index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
5,800 | 30e7fc169eceb3d8cc1a4fa6bb65d81a4403f2c7 | from selenium import webdriver
from time import sleep
import os.path
import time
import datetime
driver =webdriver.Chrome(executable_path=r'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')
counter=0
while True :
driver.get("https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0")
start='C://Users//Pathak//Downloads//chromedriver_win32'
df=str(counter);
gh=str(time.time())
ft=df+gh+'.png'
final=os.path.join(start,ft)
driver.get_screenshot_as_file(final)
counter+=1
sleep(20)
driver.quit()
|
5,801 | 3b11d514b15775e4c818a7a2adf9a80e89dca968 | import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import json
import time
#功能一:下载单一歌曲、歌词
def single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path = path +'\\'+ song_name + '.mp3'
urlretrieve(song_url,down_path)
print("歌曲下载完成:"+song_name)
def save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径
# print('正在保存歌曲:{}'.format(songname))
print("歌词下载完成:"+songname)
lyric_path=path+'\\'+songname+'.txt'
with open(lyric_path, 'a', encoding='utf-8')as f:
f.write(lyric)
def single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
save2txt(song_name, lyric, path)
time.sleep(1)
#功能二:根据歌单url下载
def songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
for song_id in music_dict:
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path=path+'\\'+music_dict[song_id]+'.mp3'
# path = "C:\\Users\\ming-\\Downloads\\%s.mp3" % music_dict[song_id]
# 添加数据
print( "正在下载:%s" % music_dict[song_id])
# text.see(END)
# text.update()
urlretrieve(song_url, down_path)
def get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url,path): #根据歌单下载歌曲歌词
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids=music_dict.keys()
for i in songids:
lyric=get_lyrics(i)
save2txt(music_dict[i],lyric,path)
time.sleep(1)
#功能三:根据歌手下载
#获取歌手信息和id
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import csv
import re
# chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" #chromedriver的文件位置
# browser = webdriver.Chrome(executable_path = chrome_driver)
# wait = WebDriverWait(browser, 5) # 设置等待时间
def get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页
chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" # chromedriver的文件位置
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5) # 设置等待时间
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1].split('\'')[0] #正则表达式获取歌曲id
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
def get_data(url):
data = []
for singernames, singerids in get_singer(url):
info = {}
info['歌手名字'] = singernames
info['歌手ID'] = singerids
data.append(info)
return data
def save2csv(url):
print('保存歌手信息中...请稍后查看')
with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:
# CSV 基本写入用 w,追加改模式 w 为 a
fieldnames = ['歌手名字', '歌手ID']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
data = get_data(url)
print(data)
writer.writerows(data)
print('保存成功')
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)
save2csv(url)
def get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名
file = "lib\\singer_info.csv"
with open(file, 'r',encoding='utf-8-sig') as f:
reader = csv.reader(f)
name = []
id = []
for i in reader:
name.append(i[0])
id.append(i[1])
a=name.index(singer_name)
return id[a]
#根据歌手姓名下载
def get_html(url): #通过代理获取网页信息,输入为指定网页url
proxy_addr = {'http': '61.135.217.7:80'}
# 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
def get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.f-hide #song-list-pre-cache a')
songname = []
songids = []
for sn in info:
songnames = sn.getText()
songname.append(songnames)
for si in info:
songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1].split('\'')[0] # 用re查找,查找对象一定要是str类型
songids.append(songid)
return zip(songname, songids)
def lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
def songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path=path+'\\'+songname+'.mp3'
save_song(songurl, down_path,songname)
time.sleep(1)
def lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path = path + '\\' + songname + '.mp3'
save_song(songurl, down_path, songname)
time.sleep(1)
#功能四:下载mv
import requests
import os
import sys
from urllib.parse import urlparse,parse_qs
def http_get(api):
my_cookie = {
"version":0,
"name":'appver',
"value":'1.5.0.75771',
"port":None,
# "port_specified":False,
"domain":'www.mydomain.com',
# "domain_specified":False,
# "domain_initial_dot":False,
"path":'/',
# "path_specified":True,
"secure":False,
"expires":None,
"discard":True,
"comment":None,
"comment_url":None,
"rest":{},
"rfc2109":False
}
s = requests.Session()
s.headers.update({'Referer': "http://music.163.com/"})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id): #根据mvid下载
size = "720" #default 720p
api = "http://music.163.com/api/mv/detail?id="+str(id)+"&type=mp4"
json_data = http_get(api)
if json_data["code"]==200:
a = list(json_data["data"]["brs"].keys())
if size not in a:
size = a[0] #如果没有720p,则选择最小的版本
mvurl = json_data["data"]["brs"][size] #mv网址
artist = json_data["data"]["artistName"] #歌手信息
song = json_data["data"]["name"] #歌曲信息
filename = '%s/[%s]%s.mp4' %(artist,size,song)
if os.path.exists(filename)==False:
if os.path.exists(artist)==False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
print("downloading "+filename)
urlretrieve(mvurl,filename,reporthook)
def download_mv_from_list(url): #批量下载歌单的mv资源
input=url.replace("#","")
id = parse_qs(urlparse(input).query)["id"][0]
if "playlist" in input:
playlist_api = "http://music.163.com/api/playlist/detail?id=%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["result"]["tracks"]): #mv信息
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
elif "album" in input:
playlist_api = "http://music.163.com/api/album/%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["album"]["songs"]):
if mv["mvid"] != None and mv["mvid"] != 0:
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
download_single_mv(id)
#功能五:爬取歌曲评论并生成词云图
from jieba import posseg
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import wordcloud
def _content_generator(music_id): #根据歌曲id获取评论信息
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Host': 'music.163.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
}
limit = 20
offset = 0
compiler = re.compile(r'[^\u4E00-\u9FA5^\u3000-\u303F^\uFF00-\uFFEF^0-9^a-z^A-Z]')
while True:
params = {
'limit': limit,
'offset': offset,
}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud: #自定义类,生成词云图
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id #歌曲信息
self.mask = mask #背景图片
self.font_path = font_path #字体
if not stop_words is None:
self.stop_words+=stop_words
self.img_wordcloud = None
def _cut_word(self, comment): #分词
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {
'background_color': "white",
'width': 1000,
'height': 860,
'margin': 2,
'max_words': 50,
'stopwords': wordcloud.STOPWORDS,
}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self): #生成词云图
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename): #保存到本地
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
def get_wordcloud(music_id,mask,font,path): #执行函数
wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)
wordcloud_obj.show_wordcloud()
result=path+'\\'+'result.jpg'
wordcloud_obj.to_file(result)
|
5,802 | 807e19f09f4a46b6c39457b8916714e2c54c3e8d | # -*- coding:utf-8 -*-
'''
@author:oldwai
'''
# email: frankandrew@163.com
def multipliers():
return lab1(x)
def lab1(x):
list1 = []
for i in range(4):
sum = x*i
list1.append(sum)
return list1
#print ([m(2) for m in multipliers()])
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
print(func1(3)) |
5,803 | 676caabb103f67c631bc191b11ab0d2d8ab25d1e | import json
from django.core.management import call_command
from django.http import JsonResponse
from django.test import TestCase
from django.urls import reverse
URLS = ['api_v1:categories', 'api_v1:main_categories', 'api_v1:articles']
class GetJsonData(TestCase):
def test_post_not_login_no_pk(self):
for url in URLS:
response = self.client.get(reverse(url))
self.check_redirect(response)
def check_redirect(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response), JsonResponse)
class UnLoginGetArticleJsonTestCase(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
call_command('loaddata', 'fixtures/auth.json', verbosity=0)
call_command('loaddata', 'fixtures/dump.json', verbosity=0)
def test_article_success_data(self):
url = reverse('api_v1:articles')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('description', data[0])
self.assertIn('category_id', data[0])
self.assertIn('user_id', data[0])
self.assertIn('image', data[0])
def test_get_main_category_json_data(self):
url = reverse('api_v1:main_categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
def test_get_json_category_success_data(self):
url = reverse('api_v1:categories')
self.response = self.client.get(url)
data = json.loads(self.response.content)
self.assertTrue(len(data) >= 1)
self.assertIn('pk', data[0])
self.assertIn('title', data[0])
self.assertIn('parent_id', data[0])
|
5,804 | 2bf057621df3b860c8f677baf54673d2da8c2bd1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.container_infrastructure_management.v1 import (
cluster_certificate,
)
from openstack.tests.unit import base
coe_cluster_ca_obj = dict(
cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n",
bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
links=[],
)
coe_cluster_signed_cert_obj = dict(
cluster_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----',
bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
links=[],
csr=(
'-----BEGIN CERTIFICATE REQUEST-----\nMIICfz=='
'\n-----END CERTIFICATE REQUEST-----\n'
),
)
class TestCOEClusters(base.TestCase):
def _compare_cluster_certs(self, exp, real):
self.assertDictEqual(
cluster_certificate.ClusterCertificate(**exp).to_dict(
computed=False
),
real.to_dict(computed=False),
)
def get_mock_url(
self,
service_type='container-infrastructure-management',
base_url_append=None,
append=None,
resource=None,
):
return super(TestCOEClusters, self).get_mock_url(
service_type=service_type,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_get_coe_cluster_certificate(self):
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='certificates',
append=[coe_cluster_ca_obj['cluster_uuid']],
),
json=coe_cluster_ca_obj,
)
]
)
ca_cert = self.cloud.get_coe_cluster_certificate(
coe_cluster_ca_obj['cluster_uuid']
)
self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)
self.assert_calls()
def test_sign_coe_cluster_certificate(self):
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='certificates'),
json={
"cluster_uuid": coe_cluster_signed_cert_obj[
'cluster_uuid'
],
"csr": coe_cluster_signed_cert_obj['csr'],
},
)
]
)
self.cloud.sign_coe_cluster_certificate(
coe_cluster_signed_cert_obj['cluster_uuid'],
coe_cluster_signed_cert_obj['csr'],
)
self.assert_calls()
|
5,805 | c585b1439217fff42945eeb9e02512d73f8ba19f | import DB as db
import os
from Chart import Chart
import matplotlib.pyplot as plt
import numpy as np
table = db.get_researcher_copy()
chart_path = '../charts/discipline '
def get_discipline_with_more_female():
docs = table.aggregate([
{'$match':{'gender':{'$exists':1}}},
{'$unwind':'$labels'},
{'$group':{'_id':{'label':'$labels','gender':'$gender'},'count':{'$sum':1}}}
# {'$group':{'_id':{'label':'$labels'},'male_count':{'$sum':{'$match':{'gender':'M'}}}}}
])
d = {}
for doc in docs:
if doc['_id']['label'] in d:
if doc['_id']['gender'] == 'M':
d[doc['_id']['label']][0] = doc['count']
else:
d[doc['_id']['label']][1] = doc['count']
else:
d[doc['_id']['label']] = [0,0]
if doc['_id']['gender'] == 'M':
d[doc['_id']['label']][0] = doc['count']
else:
d[doc['_id']['label']][1] = doc['count']
count = 0
for key in d:
if d[key][0]!=0 and d[key][1] > d[key][0]:
count+=1
print('%s:'%key)
print('male {0},female {1}'.format(d[key][0],d[key][1]))
print('number of all:%s'%count)
def discipline_proportion(top_k):
docs = table.aggregate([
{'$match':{'gender':{'$exists':1}}},
{'$unwind':'$labels'},
{'$group':{
'_id':{'label':'$labels'},
'count':{'$sum':1}
}},
{'$sort':{'count':-1}}])
docs = [doc for doc in docs]
# print(docs[:10])
total = table.count({'gender':{'$exists':1}})
count_arr = [doc['count'] for doc in docs[:top_k]]
proportion_arr = [doc['count']/total for doc in docs[:top_k]]
cumulative_arr = []
c = 0
for i in proportion_arr:
c+=i
cumulative_arr.append(c)
labels = [doc['_id']['label'] for doc in docs[:top_k]]
# chart = Chart()
# print(len(labels))
# print(len(arr))
# chart.pie([arr],'test',labels)
# chart.show()
# chart.single_unnomarlized_CDF(arr,'disciplines CDF','disciplines','percentage')
# chart.save(chart_path+'cdf.eps')
# s = ''
# print(np.median())
# for label in labels:
# s = s+label+', '
# print(s)
# os.mkdir(chart_path) if not os.path.exists(chart_path) else ''
chart = Chart(100,150)
# chart.bar(count_arr,top_k,labels,'The Top {0} popular disciplines'.format(top_k),'discipline','researcher number',True,log=False,fontsize=100)
# chart.show()
# chart.save(chart_path+'/number_{0}'.format(top_k),format='eps')
# chart.clear()
chart.bar(cumulative_arr,top_k,labels,'Cumulative propotion of most popular disciplines','discipline','propotion',True,log=False,fontsize=100)
chart.save(chart_path+'/cumulative_{0}'.format(top_k),format='eps')
chart.clear()
# chart = Chart(100,150)
# chart.bar(proportion_arr,top_k,labels,'The propotion of researchers in top 30 disciplines','discipline','propotion',True,log=False,fontsize=100)
# chart.save(chart_path+'/proportion_{0}.eps'.format(top_k))
# chart.clear()
def gender_favorite(top_k,sex='M'):
docs = table.aggregate([
{'$match':{'gender':sex}},
{'$unwind':'$labels'},
{'$group':{
'_id':{'label':'$labels'},
'count':{'$sum':1}
}},
{'$sort':{'count':-1}}])
number_arr = []
count_arr = []
labels = []
docs = [doc for doc in docs]
for doc in docs[:top_k]:
count_arr.append(doc['count'])
labels.append(doc['_id']['label'])
chart = Chart(100,180)
chart.bar(count_arr,top_k,labels,"The Top {0} females' favorite disciplines".format(top_k),'discipline','researcher number',True,log=False,fontsize=120)
chart.save(chart_path+'/{1}_favorite_{0}'.format(top_k,sex),format='eps')
chart.clear()
def average_h_index(top_k):
all_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'index':1,'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])
d = {}
col_d = {}
for doc in all_docs:
for label in doc['labels']:
if label in d:
if doc['gender'] == 'M':
d[label][0]+=1
d[label][1]+=int(doc['index'])
else:
d[label][2]+=1
d[label][3]+=int(doc['index'])
else:
if doc['gender'] == 'M':
d[label] = [1,int(doc['index']),0,0]
else:
d[label] = [0,0,1,int(doc['index'])]
if label in d:
if doc['gender'] == 'M':
d[label][0]+=1
d[label][1]+=int(doc['index'])
else:
d[label][2]+=1
d[label][3]+=int(doc['index'])
else:
if doc['gender'] == 'M':
d[label] = [1,int(doc['index']),0,0]
else:
d[label] = [0,0,1,int(doc['index'])]
labels = []
arr = []
for key in d:
if d[key][0] > 50:
a = d[key][1]/d[key][0]
b = d[key][3]/d[key][2]
if b>a:
print(key)
print(a)
print(b)
def avarage_publication(top_k):
all_docs = copy.aggregate([{'$match':{'gender':{'$exists':True}}},{'$project':{'labels':1,'gender':1,'count':{'$size':'$pubs'}}}])
d = {}
for doc in docs:
for label in doc['labels']:
if label in d:
d[pub['label']] = d[pub['label']]+1
# arr.sort(key=lambda x:x[2],reverse=True)
# arr = arr[:top_k]
# average_index_arr = []
# labels = []
# for item in arr:
# labels.append(item[0])
# average_index_arr.append(item[1])
# chart = Chart(100,180)
# chart.bar(average_index_arr,top_k,labels,'The Top {0} fields with highest average h-index'.format(top_k),'discipline','researcher number',True,log=False,fontsize=120)
# chart.save(chart_path+'/top_{0}_average_disciplines'.format(top_k),format='png')
# chart.clear()
discipline_proportion(30)
# get_discipline_with_more_female()
# gender_favorite(30)
# gender_favorite(30,'F')
|
5,806 | c69c8ba218935e5bb065b3b925cc7c5f1aa2957b |
import matplotlib.pyplot as plt
import numpy as np
x = [1, 2, 2.5, 3, 4] # x-coordinates for graph
y = [1, 4, 7, 9, 15] # y-coordinates
plt.axis([0, 6, 0, 20]) # creating my x and y axis range. 0-6 is x, 0-20 is y
plt.plot(x, y, 'ro')
# can see graph has a linear correspondence, therefore, can use linear regression that cn give us good predictions
# can create a line of best fit --> don't entirely understand syntax for line of best fit
# np.polyfit takes in x and y values, then the number of points (or connections) you want for your LBF
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.show()
|
5,807 | f0fa85f240b74b003ade767ffe8642feacdfaa32 | import argparse
from train import train
from test import infer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
help='could be either infer or train')
parser.add_argument('--model_dir', type=str, default='model',
help='directory to save models')
parser.add_argument('--batch_size', type=int, default='20',
help='train batch size')
parser.add_argument('--epoch', type=int, default='10',
help='train epoch num')
parser.add_argument('--nd', type=int, default='100',
help='noise dimension')
parser.add_argument('--num', type=int, default='1',
help='which number to infer')
args = parser.parse_args()
# if not os.path.exists(args.model_dir):
# os.mkdir(args.model_dir)
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
else:
print('unknown mode') |
5,808 | 2ccc5e01a3b47a77abcb32160dee74a6a74fcfbb | import socket
import sys
from datetime import datetime
from threading import Thread
import logging
class RRConnection():
def __init__(self):
self._listenerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._inSock = None
self._inThread = Thread(target=self.inLoop)
self._isRunning = True
self._notify = False
self._allPassings = []
def start(self):
logging.debug("Starting thread for in-loop")
self._inThread.start()
def stop(self):
self._isRunning = False
self._listenerSocket.close()
self._inSock.close()
def inLoop(self):
self._listenerSocket.bind(('', 3601))
self._listenerSocket.listen(1)
while self._isRunning:
logging.debug("Starting listener socket on port 3601")
self._inSock, addr = self._listenerSocket.accept()
try:
logging.debug("Got connection from {}".format(addr))
keepReceiving = True
while keepReceiving:
received = self._inSock.recv(1024 * 1024)
if len(received) > 0:
self.parseCommand(received.decode())
else:
keepReceiving = False
except ConnectionResetError:
logging.debug ("Connection closed, retry")
def parseCommand(self, cmd):
allCmd = cmd.strip().split("\r\n")
for oneCmd in allCmd:
if oneCmd.strip() != "":
logging.debug("Parsing command {}".format(oneCmd))
f = oneCmd.split(';')
if hasattr(self, f[0].strip()):
getattr(self, f[0].strip())(oneCmd)
elif ":" in oneCmd:
numbers = oneCmd.split(':')
self.sendPassings(int(numbers[0]), int(numbers[1]))
elif oneCmd.isdigit():
self.sendPassings(int(oneCmd), 1)
else:
logging.debug("Function {} not known: {}".format(f[0],cmd))
def sendAnswer(self, answer):
if self._inSock:
logging.debug("Sending: {}".format(answer))
fullAnswer = answer + "\r\n"
try:
self._inSock.send(fullAnswer.encode())
except socket.error:
logging.debug("Send error!")
else:
logging.debug("Not connected!")
def addPassing(self, Bib, Date, Time):
PassingNo = len(self._allPassings) + 1
# Bib is param
# Date
# Time
EventID = "143722"
Hits = "1"
MaxRSSI = "31"
InternalData = ""
IsActive = "0"
Channel = "1"
LoopID = ""
LoopOnly = ""
WakeupCounter = ""
Battery = ""
Temperature = ""
InternalActiveData = ""
BoxName = "SwimBox"
FileNumber = "1"
MaxRSSIAntenna = "1"
BoxId = "1"
# entry = f"{PassingNo};{Bib};{Date};{Time};{EventID};{Hits};{MaxRSSI};{InternalData};{IsActive};{Channel};{LoopID};{LoopOnly};{WakeupCounter};{Battery};{Temperature};{InternalActiveData};{BoxName};{FileNumber};{MaxRSSIAntenna};{BoxId}"
entry = "{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}".format(PassingNo, Bib, Date, Time,
EventID, Hits, MaxRSSI,
InternalData, IsActive, Channel,
LoopID, LoopOnly, WakeupCounter,
Battery, Temperature,
InternalActiveData, BoxName,
FileNumber, MaxRSSIAntenna, BoxId)
self._allPassings.append(entry)
if self._notify:
self.sendAnswer("#P;{}".format(entry))
def sendPassings(self, number, count):
if number+count-1>len(self._allPassings):
self.sendAnswer("ONLY {}".format(len(self._allPassings)))
else:
for i in range(number-1, number + count -1):
self.sendAnswer(self._allPassings[i])
def SETPROTOCOL(self, str):
logging.debug("Set protocol: {}".format(str))
self.sendAnswer("SETPROTOCOL;2.0")
def GETSTATUS(self, str):
logging.debug("Get Status: {}".format(str))
# GETSTATUS;<Date>;<Time>;<HasPower>;<Antennas>;<IsInOperationMode>;<FileNumber>;<GPSHasFix>;<Latitude>,<Longitude>;<ReaderIsHealthy>;<BatteryCharge>;<BoardTemperature>;<ReaderTemperature>;<UHFFrequency>;<ActiveExtConnected>;[<Channel>];[<LoopID>];[<LoopPower>];[<LoopConnected>];[<LoopUnderPower>];<TimeIsRunning>;<TimeSource>;<ScheduledStandbyEnabled>;<IsInStandby>
# GETSTATUS;0000-00-00;00:02:39.942;1;11111111;1;50;1;49.721,8.254939;1;0;;;;;;;1;0<CrLf>
Date = datetime.now().strftime("%Y-%m-%d")
Time = datetime.now().strftime("%H:%M:%S.%f")
HasPower = "0"
Antennas = "10000000"
IsInOperationMode = "1"
FileNumber = "1"
GPSHasFix = "0"
Latitude = "0.0"
Longitude = "0.0"
ReaderIsHealthy = "1"
BatteryCharge = "100"
BoardTemperature = "20"
ReaderTemperature = "20"
UHFFrequency = "0"
ActiveExtConnected = "0"
Channel = ""
LoopID = ""
LoopPower = ""
LoopConnected = ""
LoopUnderPower = ""
TimeIsRunning = "1"
TimeSource = "0"
ScheduledStandbyEnabled = "0"
IsInStandby = "0"
ErrorFlags = "0"
# self.sendAnswer(
# f"GETSTATUS;{Date};{Time};{HasPower};{Antennas};{IsInOperationMode};{FileNumber};{GPSHasFix};{Latitude},{Longitude};{ReaderIsHealthy};{BatteryCharge};{BoardTemperature};{ReaderTemperature};{UHFFrequency};{ActiveExtConnected};{Channel};{LoopID};{LoopPower};{LoopConnected};{LoopUnderPower};{TimeIsRunning};{TimeSource};{ScheduledStandbyEnabled};{IsInStandby};{ErrorFlags}")
self.sendAnswer(
"GETSTATUS;{};{};{};{};{};{};{};{},{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}".format(Date, Time,
HasPower,
Antennas,
IsInOperationMode,
FileNumber,
GPSHasFix,
Latitude,
Longitude,
ReaderIsHealthy,
BatteryCharge,
BoardTemperature,
ReaderTemperature,
UHFFrequency,
ActiveExtConnected,
Channel,
LoopID,
LoopPower,
LoopConnected,
LoopUnderPower,
TimeIsRunning,
TimeSource,
ScheduledStandbyEnabled,
IsInStandby,
ErrorFlags))
def GETCONFIG(self, s):
parts = s.split(";")
if parts[1] == "GENERAL":
if parts[2] == "BOXNAME":
self.sendAnswer(s.strip() + ";SwimBox;1")
elif parts[2] == "TIMEZONE":
self.sendAnswer(s.strip() + ";Europe/Amsterdam")
else:
logging.debug("Unknown general request: {}".format(parts[2]))
self.sendAnswer(s.strip() + ";ERROR")
elif parts[1] == "DETECTION":
if parts[2] == "DEADTIME":
self.sendAnswer(s.strip() + ";10")
elif parts[2] == "REACTIONTIME":
self.sendAnswer(s.strip() + ";10")
elif parts[2] == "NOTIFICATION":
self.sendAnswer(s.strip() + ";1")
else:
logging.debug("Unknown detection request: {}".format(parts[2]))
self.sendAnswer(s.strip() + ";ERROR")
else:
logging.debug("Unknown config category: {}".format(parts[1]))
self.sendAnswer(s.strip() + ";ERROR")
def GETFIRMWAREVERSION(self, s):
self.sendAnswer("GETFIRMWAREVERSION;1.0")
def GETACTIVESTATUS(self, s):
self.sendAnswer("GETACTIVESTATUS;ERROR")
def PASSINGS(self, s):
self.sendAnswer("PASSINGS;{};1".format(len(self._allPassings)))
def SETPUSHPASSINGS(self, s):
parts = s.split(";")
if parts[1] == "1":
self._notify = True
else:
self.notify = False
if parts[2] == "1":
pass
# shall send all existing here
self.sendAnswer(s)
if __name__ == '__main__':
foo = RRConnection()
foo.start()
while True:
try:
logging.debug("You can enter new passings in the format <bib> (current time will be taken")
newEntry = int(input())
newTime = datetime.now()
foo.addPassing(newEntry, newTime.strftime("%Y-%m-%d"), newTime.strftime("%H:%M:%S.%f"))
except KeyboardInterrupt:
logging.debug("Exiting...")
foo.stop()
sys.exit(1)
|
5,809 | 9f1cbc655a5d8f14fa45cf977bb2dcee4874b188 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 20:44:38 2018
@author: user
"""
import fitbit
import gather_keys_oauth2 as Oauth2
import pandas as pd
import datetime as dt
from config import CLIENT_ID, CLIENT_SECRET
#Establish connection to Fitbit API
server = Oauth2.OAuth2Server(CLIENT_ID, CLIENT_SECRET)
server.browser_authorize()
ACCESS_TOKEN = str(server.fitbit.client.session.token['access_token'])
REFRESH_TOKEN = str(server.fitbit.client.session.token['refresh_token'])
auth2_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET, oauth2=True, access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN)
def get_heart_rate(auth2_client, date, granularity='1sec'):
"""
Query intraday time series given date
granularity: 1sec or 1min
"""
heart_rate_raw = auth2_client.intraday_time_series('activities/heart', base_date=date, detail_level=granularity)
time_list = []
val_list = []
date_list = []
for i in heart_rate_raw['activities-heart-intraday']['dataset']:
val_list.append(i['value'])
time_list.append(i['time'])
date_list.append(date)
heart_rate_df = pd.DataFrame({'Date': date_list,'Heart Rate':val_list,'Time':time_list})
heart_rate_df['Timestamp'] = pd.to_datetime(heart_rate_df['Date'] + ' ' + heart_rate_df['Time'])
heart_rate_df = heart_rate_df[['Timestamp','Heart Rate']]
return heart_rate_df
START_DATE = '2018-01-20'
END_DATE = '2018-02-13'
DATES = pd.date_range(start=START_DATE, end=END_DATE).tolist()
DATES = [date.strftime('%Y-%m-%d') for date in DATES]
heart_rate_dfs = []
for date in DATES:
heart_rate_dfs.append(get_heart_rate(auth2_client, date))
#Concatenate individual heart_rate_dfs for each date into one big df
heart_rate_df = pd.concat(heart_rate_dfs, axis=0, ignore_index=True)
#Label each reading as 0 (not on date) or 1 (on date)
DATE_RANGES = pd.read_csv('./data/date_times.csv')
DATE_RANGES['Start'] = pd.to_datetime(DATE_RANGES['Start'])
DATE_RANGES['End'] = pd.to_datetime(DATE_RANGES['End'])
heart_rate_df['onDate?'] = 0
for i in range(len(DATE_RANGES)):
start = pd.to_datetime(DATE_RANGES['Start'][i])
end = pd.to_datetime(DATE_RANGES['End'][i])
mask = (pd.to_datetime(heart_rate_df['Timestamp']) >= start) & (pd.to_datetime(heart_rate_df['Timestamp']) <= end)
heart_rate_df['onDate?'] = heart_rate_df['onDate?'].where(~mask, other=1)
#Save to CSV
FILEPATH = './data/' + 'heart_rate ' + START_DATE + ' to ' + END_DATE + '.csv'
heart_rate_df.to_csv(FILEPATH, index=False) |
5,810 | 72bbd100a37a86dec7684257f2bec85d7367c009 | from rest_framework import serializers
from .models import *
class VisitaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Visita
fields = ('id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita') |
5,811 | c9f29a92ec8627593b54f7d9569dcfd589fa7fff | '''
Binary_to_C
Converts any binary data to an array of 'char' type to be used inside of a C program.
The reason to want to do that, is to emulate a 'Windows Resource System' on Linux.
Linux does not allow inclusion of binary data in application (I am OK with that, I like that actually).
Windows, however, does. On that system it is called resource; See 'http://www.winprog.org/tutorial/resources.html'
for details. Sometimes, though a Linux programmer, may want the binary data to be apart of the application
so that end users do not tamper with it. This script will make that possible.
python binary_to_c.py [file] ([commands])
commands
* -output=[file] : outputs the data to a file; default is the screen
* -column=[number] : sets the number of column the array should have (that is, break before making a new row);
0 gives an single row with a infinitly long column;
default is 10.
* -hex=[0, 1, or 2]: 0=decimal, 1=lower-case-hex, 2=upper-case-hex
'''
import sys
import os
## defines
COLUMN = 'column'
OUTPUT = 'output'
HEX = 'hex'
SPACE = ' '
LINE = '\n'
TAB = '\t'
COMMA = ','
DEFAULT_COLUMN = 10
DEFAULT_OUTPUT = sys.stdout
DEFAULT_HEX = 0
## structs
Config = {
COLUMN: DEFAULT_COLUMN,
OUTPUT: DEFAULT_OUTPUT,
HEX: DEFAULT_HEX
}
## functions
def printUse():
print "python binary_to_c.py " + "[file] ([commands])"
print "commands"
print "\t-" + OUTPUT + "=[file] : outputs the data to a file; default is the screen"
print "\t-" + COLUMN + "=[number] : sets the number of column the array should have (that is, break before making a new row);"
print "\t" + ' ' * len(COLUMN + "\"=[number]\"") + ": 0 gives an single row with a infinitly long column;"
print "\t-" + HEX + "=[0, 1, or 2] : 0=decimal, 1=lower-case-hex, 2=upper-case-hex"
def configReset():
if Config[OUTPUT] != sys.stdout:
Config[OUTPUT].close()
Config[COLUMN] = DEFAULT_COLUMN
Config[OUTPUT] = DEFAULT_OUTPUT
Config[HEX] = DEFAULT_HEX
def checkParam():
if(os.path.isfile(sys.argv[1]) == False):
print sys.argv[1] + " is not a file"
printUse()
sys.exit()
def configReadParam():
for i in range(len(sys.argv)):
if i == 0: continue #name of program
if i == 1: continue #file reading binary from
Command = sys.argv[i]
if OUTPUT in Command:
List = Command.split('=')
if len(List) < 2:
print "Error in " + OUTPUT + " command: ", '-' + OUTPUT + '=[file]'
sys.exit()
File = List[1].strip()
Config[OUTPUT] = open(File, 'w')
if COLUMN in Command:
List = Command.split('=')
if len(List) < 2:
print "Error in " + COLUMN + " command: ", '-' + COLUMN + '=[number]'
sys.exit()
Number = List[1].strip()
try:
Config[COLUMN] = int(Number)
except:
Config[COLUMN] = 0
if HEX in Command:
List = Command.split('=')
if len(List) < 2:
print "Error in " + HEX + " command: ", '-' + HEX + '=[0, 1 or 2]'
sys.exit()
Number = List[1].strip()
try:
Value = int(Number)
if Value == 1 or Value == 2:
Config[HEX] = Value
else:
Config[HEX] = 0
except:
Config[HEX] = 0
def read(File):
with open(File, "r") as Stream:
return Stream.read()
def createStringValue(Value):
if Config[HEX] == 1:
return "{0:#0{1}x}".format(Value, 4)
elif Config[HEX] == 2:
return '0x{0:0{1}X}'.format(Value, 2)
else:
if Value > 99: return str(Value)
if Value > 9: return SPACE + str(Value)
return SPACE + SPACE + str(Value)
def createArray(String):
Array = []
for Byte in String:
Value = ord(Byte)
StringValue = createStringValue(Value)
Array.append(StringValue)
return Array
def write(Array):
Begin = 'char binary [' + str(len(Array)) + '] = {'
End = '};'
String = ''
String += Begin + LINE + TAB
Column = 0
for i, item in enumerate(Array):
String += item
if i < len(Array) - 1:
String += COMMA + SPACE
if Config[COLUMN] > 0:
Column += 1
if Column == Config[COLUMN]:
Column = 0
String += LINE
if i < len(Array) - 1:
String += TAB
String += LINE + End + LINE
Config[OUTPUT].write(String)
def main():
if len(sys.argv) > 1:
checkParam()
configReadParam()
String = read(sys.argv[1])
Array = createArray(String)
write(Array)
configReset()
else:
printUse()
#### Run
if __name__=="__main__":
main()
'''
Reference
https://en.wikibooks.org/wiki/Python_Programming/Text
https://stackoverflow.com/questions/12638408/decorating-hex-function-to-pad-zeros
''' |
5,812 | 9d6516ea099e035fb97e5165071103698a7ec140 | # Generated by Django 2.2.8 on 2019-12-10 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fieldsapp', '0003_pole_avatar'),
]
operations = [
migrations.AddField(
model_name='pole',
name='email',
field=models.CharField(default=1, max_length=50, verbose_name='Email'),
preserve_default=False,
),
migrations.AddField(
model_name='pole',
name='number',
field=models.CharField(default=1, max_length=20, verbose_name='Номер'),
preserve_default=False,
),
migrations.AlterField(
model_name='pole',
name='avatar',
field=models.ImageField(upload_to='', verbose_name='Фото'),
),
migrations.AlterField(
model_name='pole',
name='body',
field=models.TextField(verbose_name='Описание поля'),
),
migrations.AlterField(
model_name='pole',
name='title',
field=models.CharField(max_length=255, verbose_name='Название поля'),
),
]
|
5,813 | 6a9e18cde94258b01a37f459eceaac58118b4976 | NUM_CLASSES = 31
AUDIO_SR = 16000
AUDIO_LENGTH = 16000
LIBROSA_AUDIO_LENGTH = 22050
EPOCHS = 25
categories = {
'stop': 0,
'nine': 1,
'off': 2,
'four': 3,
'right': 4,
'eight': 5,
'one': 6,
'bird': 7,
'dog': 8,
'no': 9,
'on': 10,
'seven': 11,
'cat': 12,
'left': 13,
'three': 14,
'tree': 15,
'bed': 16,
'zero': 17,
'happy': 18,
'sheila': 19,
'five': 20,
'down': 21,
'marvin': 22,
'six': 23,
'up': 24,
'wow': 25,
'house': 26,
'go': 27,
'yes': 28,
'two': 29,
'_background_noise_': 30,
}
inv_categories = {
0: 'stop',
1: 'nine',
2: 'off',
3: 'four',
4: 'right',
5: 'eight',
6: 'one',
7: 'bird',
8: 'dog',
9: 'no',
10: 'on',
11: 'seven',
12: 'cat',
13: 'left',
14: 'three',
15: 'tree',
16: 'bed',
17: 'zero',
18: 'happy',
19: 'sheila',
20: 'five',
21: 'down',
22: 'marvin',
23: 'six',
24: 'up',
25: 'wow',
26: 'house',
27: 'go',
28: 'yes',
29: 'two',
30: '_background_noise_'
}
# Marvin model
INPUT_SHAPE = (99, 40)
TARGET_SHAPE = (99, 40, 1)
PARSE_PARAMS = (0.025, 0.01, 40)
filters = [16, 32, 64, 128, 256]
DROPOUT = 0.25
KERNEL_SIZE = (3, 3)
POOL_SIZE = (2, 2)
DENSE_1 = 512
DENSE_2 = 256
BATCH_SIZE = 128
PATIENCE = 5
LEARNING_RATE = 0.001
|
5,814 | f49c15dca26d987e1d578790e077501a504e560b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print("testcase1")
@pytest.mark.demo1
def test_case2(self):
print("testcase1")
@pytest.mark.demo2
def test_case3(self):
print("testcase1")
@pytest.mark.demo2
def test_case4(self):
print("testcase1")
if __name__ == '__main__':
pytest.main(['-v','-s','test_mark.py','-m','demo1']) |
5,815 | 6d80a89a47b68fd8d81739787897355671ca94e9 | '''
Функція replace() може використовуватися для заміни будь-якого слова у рядку іншим словом.
Прочитайте кожен рядок зі створеного у попередньому завданні файлу learning_python.txt і замініть слово Python назвою іншої мови,
наприклад C при виведенні на екран. Це завдання написати в окремій функції.
'''
def reader():
with open('possibilities.txt', 'r') as file1:
file_lines = [x.strip() for x in file1.readlines()]
for e in file_lines:
n = e.replace('Python', 'C++')
print(n)
if __name__ == '__main__':
reader()
|
5,816 | 629353392e3a4f346f734543ae3f2b8dc616a6c3 | #https://docs.python.org/3.4/library/itertools.html#module-itertools
l = [(1, 2, 9), (1, 3, 12), (2, 3, 8), (2, 4, 4), (2, 5, 7), (3, 5, 5), (3, 6, 2), (4, 5, 2), (4, 7, 10),
(5, 6, 11), (5, 7, 2), (6, 8, 4), (7, 8, 4), (7, 9, 3), (8, 9, 13)]
b = ['America', 'Sudan', 'Srilanka', 'Pakistan', 'Nepal', 'India', 'France']
from itertools import groupby, filterfalse, dropwhile, cycle, count, repeat, chain, takewhile, islice, zip_longest
from collections import defaultdict
#NOTE- always use itertools with sorted list if index of element is not issue to your solution
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x :x>10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x>10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x>10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
def itertools_count_example():
for item in count(start=1, step=1):
print(item)
def itertools_repeat_example():
for item in repeat(10, 5):
print(3)
def itertools_chain_example(iterator1, iterator2):
l = []
for item in chain(iterator1, iterator2):
l.append(item)
print(l)
def itertools_islice_example(iterator):
l = []
for item in islice(iterator, 0, 10, 2):
l.append(item)
print(l)
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2,3,4],[2,5,6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,]
l3 = ['a','b','c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
iterator = [11,15,2,5,8,10,50,8,2,3,90,80,100]
iterator1 = [0,10,20,30,40,50,60,70,80,90,100,5]
iterator2 = ['a','b','c']
#itertools_false_filter_example(iterator1)
#itertools_dropwhile_example(iterator1)
#itertools_cycle_example(iterator1)
#itertools_count_example()
#itertools_repeat_example()
#itertools_chain_example(iterator1, iterator2)
#itertools_takewhile_example(iterator)
#itertools_islice_example(iterator)
#itertools_chain_from_iterable_examaple()
#itertools_zip_longest() |
5,817 | b3ce17401476afe2edfda3011d5602ba492cd705 | import matplotlib.pyplot as pt
import numpy as np
from scipy.optimize import leastsq
####################################
# Setting up test data
def norm(x, media, sd):
norm = []
for i in range(x.size):
norm += [1.0/(sd*np.sqrt(2*np.pi))*np.exp(-(x[i] - media)**2/(2*sd**2))]
return np.array(norm)
media1 = 0
media2 = -2
std1 = 0.5
std2 = 1
x = np.linspace(-20, 20, 500)
y_real = norm(x, media1, std1) + norm(x, media2, std2)
######################################
# Solving
m, dm, sd1, sd2 = [5, 10, 1, 1]
p = [m, dm, sd1, sd2] # Initial guesses for leastsq
y_init = norm(x,m,sd1) + norm(x, m + dm, sd2) # For final comparison plot
def res(p, y, x):
m, dm, sd1, sd2 = p
m1 = m
m2 = m1 + m
y_fit = norm(x, m1, sd1) + norm(x, m2, sd2)
error = y - y_fit
return error
plsq = leastsq(res, p, args = (y_real, x))
y_est = norm(x, plsq[0][0], plsq[0][2]) + norm(x, plsq[0][0] + plsq[0][1], plsq[0][3])
|
5,818 | ee22d6226f734c67be91a3ccf1c8c0024bb7dc08 | import numpy as np
from board_specs import *
from board_components import *
import constants
import board_test
# List of resources available to be distributed on the board
RESOURCE_NAMES = constants.RESOURCE_NAMES
# Create a dictionary of each resource and a corresponding number id
res_dict = dict(zip(RESOURCE_NAMES, np.arange(0, len(RESOURCE_NAMES))))
# List of available ports that can be distributed around the board
PORTS_NAMES = constants.PORTS_NAMES
# Create a dictionary of each port and a corresponding number id
port_dict = dict(zip(PORTS_NAMES, np.arange(0, len(PORTS_NAMES))))
class Board:
def __init__(self):
"""
Do not forget to ensure 6 and 8 are not next to each other:
no 6-6 no 6-8 no 8-8
"""
# Array of each resource id number repeated the amount of times that
# the resource is available on the board.
# This will be used to distribute the resources into slots on the board
self.board_resources = np.array(
[res_dict["desert"]]
+ [res_dict["brick"]] * 3
+ [res_dict["ore"]] * 3
+ [res_dict["hay"]] * 4
+ [res_dict["wood"]] * 4
+ [res_dict["sheep"]] * 4
)
# Shuffle the resource array for randomized distribution
np.random.shuffle(self.board_resources)
# replace lines #42 and #44 with the following:
# self.roll_numbers = board_test.roll_numbers
# number associated with the desert and 0 can not actually be rolled
self.roll_numbers = np.array([0, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12])
# shuffle number options
np.random.shuffle(self.roll_numbers)
# Array of the port ids, amount of times each port is available -
self.ports = np.array(
[port_dict["3:1"]] * 4
+ [port_dict["2brick:1"]]
+ [port_dict["2ore:1"]]
+ [port_dict["2hay:1"]]
+ [port_dict["2wood:1"]]
+ [port_dict["2sheep:1"]]
)
# shuffle the ports for randomized distribution
np.random.shuffle(self.ports)
# Zero_tile_nr will represent where the 0 number exists
zero_tile_nr = np.where(self.roll_numbers == 0)
# Desert_tile_nr will represent where the desert resource exists
desert_tile_nr = np.where(self.board_resources == res_dict["desert"])
# Robber will keep track of where the robber is and it starts in
# the desert. Robber will be an integer.
# Numpy returns a tuple of which the first is a list with the index.
# We'll extract it, and add 1 since terrain keys start at 1, not 0.
self.robber = desert_tile_nr[0][0] + 1
# as the desert tile and replace whatever was already in the desert
# tile into the empty zero tile
self.board_resources[zero_tile_nr], self.board_resources[desert_tile_nr] =\
(self.board_resources[desert_tile_nr], self.board_resources[zero_tile_nr])
# The following code create the board objects: terrains, edges, intersections.
# Initialize a list for each attribute type.
self.edges = self.initialize_edges()
self.intersections = self.initialize_intersections()
self.terrains = self.initialize_terrains()
# Assign the correct attributes for each attribute.
self.assign_specs()
"""
Cards are initialized and tracked in catan.py
self.dev_cards=np.array('knight'*14,'victory point'*5,'road building'*2,'year of plenty'*2,'monopoly'*2)
self.dev_cards=random.shuffle(dev_cards)
"""
def __str__(self):
# A message, of how the board is displayed.
s = '\nThe board is arranged as follows:\n'
s += ' /\\ /\\ /\\ \n'
s += ' |01|02|03| \n'
s += ' \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |04|05|06|07| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ /\\ \n'
s += '|08|09|10|11|12| \n'
s += ' \\/ \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ /\\ \n'
s += ' |13|14|15|16| \n'
s += ' \\/ \\/ \\/ \\/ \n'
s += ' /\\ /\\ /\\ \n'
s += ' |17|18|19| \n'
s += ' \\/ \\/ \\/ \n'
# Display each terrains; the identifying numbers correspond to
# the above diagram.
s += 'Following is the content of each terrain:\n\n'
for item in self.terrains:
if self.robber == item:
s += '\nRobber is on the following tile (number {0})'.format(
self.terrains[item].identifier)
s += str(self.terrains[item])
return s
# The following methods will initialize all objects with default
# arguments; their attribute objects will be reassigned later. This
# is because the objects refer each other as attributes, and they
# must exist before being assigned. The objects will be stored in a
# dictionary, with reference numbers as keys.
def initialize_edges(self):
edges = {}
for x in range(1, 73):
edges[x] = Edge(x, intersections=[], terrains=[])
return edges
def initialize_intersections(self):
intersections = {}
for x in range(1, 55):
intersections[x] = Intersection(x, edges=[], terrains=[])
return intersections
def initialize_terrains(self):
terrains = {}
for x in range(1, 20):
terrains[x] = Terrain(x, x, 0)
return terrains
# The following method will assign the correct attributes for each
# object. It does not matter if the object that's assigned already
# has it's own attributes referred to properly, or if it will be
# assigned later. The pointers remain unchanged, and all objects
# will have their proper attributes. This circular relationship is
# interesting. An object's attribute's attribute can be the initial
# object.
def assign_specs(self) -> None:
# First, it loops through the list of terrains from the board_specs
# file. The first item is the key/identifier. Then there are two
# tuples: the intersections, and the edges.
for item in terrains_specs:
# Create a local variable to hold the edges for this terrain.
local_egdes = []
for subitem in item[1]:
# Each integer in the tuple refers to a key in the edges
# dictionary. This edge will be added to the list.
# Additionally, this edge's terrains attribute will be updated
# to hold the terrain we're working on now.
local_egdes.append(self.edges[subitem])
self.edges[subitem].terrains.append(self.terrains[item[0]])
# The same process is repeated for the intersections.
local_intersections = []
for subitem in item[2]:
local_intersections.append(self.intersections[subitem])
self.intersections[subitem].terrains.append(self.terrains[item[0]])
# The local lists are converted to tuples and passed to the terrain.
self.terrains[item[0]].edges = (tuple(local_egdes))
self.terrains[item[0]].intersections = (tuple(local_intersections))
# Assign the last landscape and resource number. (The lists
# were shuffled, so it's random.) I deduct 1 from the list index,
# since the dictionary uses keys starting at 1, and lists start at 0.
self.terrains[item[0]].resource = self.board_resources[item[0]-1]
self.terrains[item[0]].resource_num = self.roll_numbers[item[0]-1]
# Using the next list from the board_specs file, the intersections and
# edges will reference each other. Additionally, the ports will be added.
for item in intersections_specs:
# It uses the same method as above: loops throught he intersections
# to add a list of edges, and adds self to the edge being processed.
local_egdes = []
for subitem in item[1]:
local_egdes.append(self.edges[subitem])
self.edges[subitem].intersections.append(self.intersections[item[0]])
self.intersections[item[0]].edges = local_egdes
# If that item contains a port, assign it here.
if len(item) == 3:
self.intersections[item[0]].port = self.ports[item[2]]
"""
Cards are initialized and tracked in catan.py
def buy_dev_card(self,current_player):
# pop the card from the dev card and add it to the players dev cards
#TODO need to see if you can purchase not sure how to use that method
self.card=dev_cards.pop()
player(current_player).development_cards.insert(card)
player(current_player).resource_cards.remove('sheep')
player(current_player).resource_cards.remove('wheat')
player(current_player).resource_cards.remove('ore')
"""
# Create and display the board object.
def main():
b = Board()
print(b)
if __name__ == '__main__':
main()
|
5,819 | d3f80deb72ca2bd91fc09b49ad644f54d339f962 | #! /home/joreyna/anaconda2/envs/hla/bin/python
import argparse
import os
import sys
import time
import numpy as np
import copy
import subprocess
import math
project_dir = os.path.join(sys.argv[0], '../../')
project_dir = os.path.abspath(project_dir)
output_dir = os.path.join(project_dir, 'output/', 'pipeline/', 'sample/')
subprocess.call('mkdir -p {}'.format(output_dir), shell=True)
# PARSING commandline arguments
parser = argparse.ArgumentParser(description='Generate a DNA sequence containing a VNTR sequence.')
parser.add_argument('len', metavar='seqLen', type=int, \
help='The length of the sequences.')
parser.add_argument('vntr', metavar='VNTR', type=str, \
help='The VNTR that will be introduced.',
default='GCACGCTGCTGTGTAGTGGAGAAAGGGCAGGCAGCGAGCAAGCGTGTACAAGGTATATACGTGCC')
parser.add_argument('numVNTR', metavar='numVNTR', type=int, \
help='The number of VNTR copies that will be introduced.')
parser.add_argument('numMuts', metavar='numMuts', type=int, \
help='The number of mutations per copy.')
parser.add_argument('--mutation_type', metavar='mutType', type=str, \
choices=['individual_random_mutations', 'group_random_mutations', 'specific_mutations'], \
default='individual_random_mutations',
help='Copies of the VNTR can different mutations. Specify ' + \
'mutation_type to simulate different mutational ' + \
'events in the VNTR copies.\n' + \
'Choices:\n' + \
'individual_random_mutations,\n' + \
'group_random_mutations, and\n' + \
'specific_mutations.')
parser.add_argument('--rlen', metavar='read length', type=int, \
help='The size of the output sequences.', default=150)
parser.add_argument('--loc', metavar='locus', type=int, \
help='The location where the snps are inserted.')
parser.add_argument('--outer_pad', action='store_true', \
help='Adds a padding around the VNTR for visual aid.', default=False)
parser.add_argument('--inner_pad', action='store_true', \
help='Adds a padding between copies of the VNTR for visual aid.', default=False)
parser.add_argument('-o', metavar='outputPrefix', type=str,
help='The prefix of the output filename.')
parser.add_argument('--gen_ref', action='store_true',
help='Generate a reference file as well which has a single copy of the VNTR.')
args = parser.parse_args()
## PRINTING commandline argument values
#print('\n')
#print('ArgParse Argument Values')
#print('--------------------')
#print('len: {}'.format(args.len))
#print('VNTR: {}'.format(args.vntr))
#print('VNTR copies: {}'.format(args.numVNTR))
#print('Mutations per VNTR copy: {}'.format(args.numMuts))
#print('Mutation Type: {}'.format(args.mutation_type))
#print('location: {}'.format(args.loc))
#print('outer pad: {}'.format(args.outer_pad))
#print('inner pad: {}'.format(args.inner_pad))
#print('output prefix: {}'.format(args.o))
#print('\n')
#
#
# DEFINING functions for generating random
# sequences with a VNTR insertion
def generate_mutation(base):
"""
Taking into account the current base, base, return a mutation.
"""
if base in ['A', 'C', 'G', 'T']:
bases = ['A', 'C', 'G', 'T']
bases.remove(base)
return np.random.choice(bases)
else:
raise Exception('base is not a proper DNA nucleotide (ACGT).')
def introduce_random_mutations(vntr, m):
"""
Generate a VNTR sequence with random mutations. The mutations will be the same across different copies.
Params
------
- vntr, the DNA copy sequence which is copied.
- m, the number of SNP mutations that will be randomly introduced.
Returns
-------
A single copy of the VNTR sequence with m mutations. \
"""
mutation_sites = np.random.choice(range(len(vntr)), m, replace=False)
m_vntr = []
for site, nucleotide in enumerate(vntr):
if site in mutation_sites:
m_vntr.append(generate_mutation(nucleotide))
else:
m_vntr.append(nucleotide)
return ''.join(m_vntr)
def introduce_specific_mutations(vntr, sites, mutations):
"""
Generate a VNTR sequence with the specified mutations at the specified sites.
Params
------
- vntr, the DNA copy sequence which is copied.
- sites, locus where the SNP mutation will be introduced.
- mutations, a list of mutations.
Returns
-------
A single copy of the VNTR sequence with mutations at the specified sites.
"""
if len(sites) != len(mutations):
raise Exception('The number of sites and mutations do not correspond.')
m_vntr = list(vntr)
for site, nucleotide in enumerate(m_vntr):
if site in sites:
mut_idx = sites.index(site)
if nucleotide == mutations[mut_idx]:
raise Exception('Not a mutation. The current site is {}. The current '.format(site) + \
'nucleotide is {}. Please use a different nucleotide '.format(nucleotide) + \
'for this site.')
else:
m_vntr[site] = mutations[mut_idx]
return ''.join(m_vntr)
# SETTING a default value for the location
# of the insert size to the middle of the sequence
loc = args.loc
if loc == None:
loc = args.len / 2
# GENERATE the random sequence
sequence = ''.join(np.random.choice(['A', 'C', 'G', 'T'], size=args.len))
# MUTATE the vntr copies.
vntr = args.vntr
if args.mutation_type == 'individual_random_mutations':
# Testing incomplete
new_vntr = []
for i in range(args.numVNTR):
new_vntr.append(introduce_random_mutations(vntr, args.numMuts))
elif args.mutation_type == 'group_random_mutations':
# Testing incomplete
new_vntr = [introduce_random_mutations(vntr, args.numMuts)] * args.numVNTR
elif args.mutation_type == 'specific_mutations':
# Deprecated. Coding incomplete.
new_vntr = introduce_specific_mutations(vntr, [0], ['C'])
# INSERT inner padding between VNTR copies
if args.inner_pad == True:
new_vntr = ' '.join(new_vntr)
else:
new_vntr = ''.join(new_vntr)
# INSERT outer padding around the VNTR
if args.outer_pad == True:
padding = ' ' * 10
new_vntr = padding + new_vntr + padding
# INSERT the VNTR into the sequence
def generate_sequence_with_vntr(sequence, loc, vntr):
nseq = sequence[0:loc]
nseq += vntr
nseq += sequence[loc:]
return nseq
n_sequence = generate_sequence_with_vntr(sequence, loc, new_vntr)
#print('Processed Variable Values')
#print('--------------------------')
#print('sequence: {}'.format(sequence))
#print('new_vntr: {}'.format(new_vntr))
#print('n_sequence: {}'.format(n_sequence))
#print('\n')
# MAKEDIR for the given sample
sample = os.path.split(args.o)[-1]
sample = sample.split('.')[0]
sample_dir = os.path.join(output_dir, sample)
subprocess.call('mkdir -p {}'.format(sample_dir), shell=True)
# WRITE the sequence file
def write_sequence(fn, rlen, sequence, sequence_name='seq1', write_mode='w'):
with open(fn, write_mode) as f:
f.write('>{}\n'.format(sequence_name))
div = len(sequence) / rlen
fasta_seq = []
for i in range(div):
f.write('{}\n'.format(sequence[i * rlen: (i + 1) * rlen]))
f.write('{}\n'.format(sequence[div * rlen:]))
if args.o != None:
write_sequence(args.o, args.rlen, n_sequence)
# WRITE the reference file and bed file
def critical_copy_number(rlen, clen):
"""
Determines the minimum number of VNTR copies needed
so a read can be completely mapped inside of a VNTR.
"""
if rlen < clen:
raise Exception('clen is larger than rlen.')
elif rlen % clen > 0:
return int(math.ceil(float(rlen) / clen))
else:
return 1 + (rlen/clen)
if args.gen_ref:
# CALCULATE the critical copy number
ccn = critical_copy_number(args.rlen, len(vntr))
# WRITE the reference file
num_seqs = int(math.ceil(float(150)/len(vntr)))
fn = args.o.replace('.fa', '_reference.fa')
if os.path.exists(fn): # REMOVE if already exists
os.remove(fn)
for i in range(0, ccn + 1):
r_sequence = generate_sequence_with_vntr(sequence, loc, vntr * i)
write_sequence(fn, args.rlen, r_sequence, sequence_name='seq{}'.format(i), write_mode='a')
# WRITE the bed file for VNTR and non-VNTR regions
bed_fn = args.o.replace('.fa', '_reference.bed')
with open(bed_fn, 'w') as f:
#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))
#print('critical copy number: {}'.format(ccn))
for i in range(0, ccn + 1):
sequence_name='seq{}'.format(i)
wrt = [sequence_name, loc, loc + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
bed_fn = args.o.replace('.fa', '_non_vntr_reference.bed')
with open(bed_fn, 'w') as f:
#print('read length: {}, vntr length: {}'.format(args.rlen, len(vntr)))
#print('critical copy number: {}'.format(ccn))
for i in range(0, ccn + 1):
sequence_name='seq{}'.format(i)
wrt = [sequence_name, 0, loc]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
wrt = [sequence_name, loc + len(vntr * i), args.len + len(vntr * i)]
wrt = [str(x) for x in wrt]
f.write('\t'.join(wrt) + '\n')
# INDEX the reference file
subprocess.call('bwa index {}'.format(fn), shell=True)
|
5,820 | 0ac99e2b33f676a99674c9a8e5d9d47c5bce084b |
plik=open("nowy_zad_84.txt", "w")
print(" Podaj 5 imion")
for i in range(1,6):
imie=input(f" Podaj imie nr {i} ")
# plik.write(imie)
# plik.write("\n")
plik.write(f" {imie} \n")
plik.close()
plik=open("nowy_zad_84.txt", "a")
for i in range(1,101):
plik.write(str(i))
plik.write("\n")
plik.close()
|
5,821 | f7d0d7dda955acd07b6da010d21dc5f02254e1ed | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from . import views
app_name = 'produce'
urlpatterns = [
# Inbound SMS view:
url(r'^sms/$', views.sms, name='sms'),
# List and Detail Views:
url(r'^list/', views.SeasonalView.as_view(), name='list'),
url(r'^(?P<pk>[0-9]+)/$', views.ProduceDetailView.as_view(), name='produce_detail'),
# CRUD for Produce Items:
url(r'^submit/', views.submit_new_produce, name='submit'),
url(r'^thanks/', TemplateView.as_view(template_name='produce/thanks.html')),
url(r'^(?P<pk>[0-9]+)/edit/$', views.ProduceUpdateView.as_view(), name='produce_edit'),
url(r'^(?P<pk>[0-9]+)/delete/$', views.ProduceDeleteView.as_view(), name='produce_delete'),
] |
5,822 | 00228facd19c72bebd9afbbe52597e390233d41e | import requests
import logging
import json
class Handler(object):
def __init__(self):
"""
This class is used to handle interaction towards coffee interface.
"""
super(Handler, self).__init__()
logging.warning('Initializing coffeeHandler....')
# get an active token and get prepared for sending request
self.coffee_session = requests.session()
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning('when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning('\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n'.
format(url, repr(rsp), repr(rsp.text)))
return rsp
def check_rsp(self, origin_rsp, expected_rsp, check_format=False, check_partial_rsp=False, check_length=False,
check_format_ignore_list_length=False, check_format_null_str=False):
if check_format:
logging.warning('Now start to check format for origin_rsp and expected_rsp!')
self._check_format(origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str)
if check_partial_rsp:
self._check_partial_rsp(expected_rsp, origin_rsp)
if check_length is not False:
for key, expected_length in check_length.iteritems():
current_length = len(origin_rsp[key])
assert expected_length == current_length, \
'We expect to see length of \'{}\' in origin_rsp is {}, but now it is {}'.format(
key, expected_length, current_length)
if not any([check_format, check_partial_rsp, check_length]):
sorted_expected_rsp = self._order_json(expected_rsp)
sorted_origin_rsp = self._order_json(origin_rsp)
logging.warning('\nWe expect to see \n\n{}, \n\nand we get \n\n{}.'.format(sorted_expected_rsp,
sorted_origin_rsp))
assert sorted_expected_rsp == sorted_origin_rsp, \
'We don\'t get the expected,please check the log'
logging.warning('\033[0;32m check_rsp done!!! PASS\033[0m')
def _check_format(self, origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str):
logging.warning(u'now compare origin rsp: \n{}'.format(origin_rsp))
logging.warning(u'\nAnd expected_rsp: \n{}'.format(expected_rsp))
if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):
assert len(origin_rsp) == len(
expected_rsp), 'Length of dict is not right! Please check the length.\norigin_rsp: ' \
'\n{}\nexpected_rsp: \n{}'.format(origin_rsp, expected_rsp)
for key, value in origin_rsp.iteritems():
assert expected_rsp.get(
key), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(str(key))
logging.warning(u'Check value for the same key: [{}] in origin_rsp and expected_rsp'.format(key))
self._check_format(value, expected_rsp.get(key),
check_format_ignore_list_length, check_format_null_str)
elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):
if expected_rsp:
logging.warning('Length of list is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp:'
' \n{}'.format(origin_rsp, expected_rsp))
if check_format_ignore_list_length:
for index in xrange(len(expected_rsp)):
self._check_format(origin_rsp[index], expected_rsp[index],
check_format_ignore_list_length, check_format_null_str)
else:
assert len(origin_rsp) == len(
expected_rsp), 'Length of list is not right! Please check the length.'
for index in xrange(len(origin_rsp)):
self._check_format(origin_rsp[index], expected_rsp[index],
check_format_ignore_list_length, check_format_null_str)
else:
return True
elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):
return True
elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):
return True
elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)) and (
isinstance(expected_rsp, str) or isinstance(expected_rsp, unicode)):
return True
elif check_format_null_str:
if origin_rsp is None and isinstance(expected_rsp, str):
return True
if origin_rsp is None and isinstance(expected_rsp, int):
return True
else:
logging.warning(
'Check format fail!!!! We get different value here!!\norigin_rsp: \n{}\nbut we expect to see in '
'expected_rsp: \n{}'.format(origin_rsp, expected_rsp))
assert 0, 'Check format fail!!!! We get different value here!!'
def _order_json(self, json_string):
"""
Return an ordered list for compare.
:param json_string: string in json format
:return: an ordered list
"""
if isinstance(json_string, dict):
return sorted((k, self._order_json(v)) for k, v in json_string.items())
if isinstance(json_string, list):
return sorted(self._order_json(x) for x in json_string)
else:
return json_string
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning('Start to check if expected_rsp: {} is part of origin_rsp: {}'.format(exp, ori))
# so far, leaf node could be string or list which must be exactly the same
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, 'key \'{}\' does not exist in original response.'.format(k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index] in ori, 'exp: {} does not in ori: {}'.format(exp[index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp, ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(subset_dict, whole_dict)
|
5,823 | 890841c8892e89375bb022f0d469fefc27414a2b | from abc import abstractmethod
from anoncreds.protocol.repo.public_repo import PublicRepo
from anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, \
RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, \
RevocationSecretKey, AccumulatorSecretKey, \
TimestampType
from anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory
class IssuerWallet(Wallet):
def __init__(self, claimDefId, repo: PublicRepo):
Wallet.__init__(self, claimDefId, repo)
# SUBMIT
@abstractmethod
async def submitClaimDef(self,
claimDef: ClaimDefinition) -> ClaimDefinition:
raise NotImplementedError
@abstractmethod
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,
pkR: RevocationPublicKey = None) -> (
PublicKey, RevocationPublicKey):
raise NotImplementedError
@abstractmethod
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,
skR: RevocationSecretKey = None):
raise NotImplementedError
@abstractmethod
async def submitAccumPublic(self, claimDefId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator, tails: TailsType):
raise NotImplementedError
@abstractmethod
async def submitAccumSecret(self, claimDefId: ID,
accumSK: AccumulatorSecretKey):
raise NotImplementedError
@abstractmethod
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
raise NotImplementedError
@abstractmethod
async def submitContextAttr(self, claimDefId: ID, m2):
raise NotImplementedError
# GET
@abstractmethod
async def getSecretKey(self, claimDefId: ID) -> SecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyRevocation(self,
claimDefId: ID) -> RevocationSecretKey:
raise NotImplementedError
@abstractmethod
async def getSecretKeyAccumulator(self,
claimDefId: ID) -> AccumulatorSecretKey:
raise NotImplementedError
@abstractmethod
async def getContextAttr(self, claimDefId: ID):
raise NotImplementedError
class IssuerWalletInMemory(IssuerWallet, WalletInMemory):
def __init__(self, claimDefId, repo: PublicRepo):
WalletInMemory.__init__(self, claimDefId, repo)
# other dicts with key=claimDefKey
self._sks = {}
self._skRs = {}
self._accumSks = {}
self._m2s = {}
self._attributes = {}
# SUBMIT
async def submitClaimDef(self,
claimDef: ClaimDefinition) -> ClaimDefinition:
claimDef = await self._repo.submitClaimDef(claimDef)
self._cacheClaimDef(claimDef)
return claimDef
async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,
pkR: RevocationPublicKey = None) -> (
PublicKey, RevocationPublicKey):
pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)
await self._cacheValueForId(self._pks, claimDefId, pk)
if pkR:
await self._cacheValueForId(self._pkRs, claimDefId, pkR)
return pk, pkR
async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,
skR: RevocationSecretKey = None):
await self._cacheValueForId(self._sks, claimDefId, sk)
if skR:
await self._cacheValueForId(self._skRs, claimDefId, skR)
async def submitAccumPublic(self, claimDefId: ID,
accumPK: AccumulatorPublicKey,
accum: Accumulator,
tails: TailsType) -> AccumulatorPublicKey:
accumPK = await self._repo.submitAccumulator(claimDefId, accumPK, accum,
tails)
await self._cacheValueForId(self._accums, claimDefId, accum)
await self._cacheValueForId(self._accumPks, claimDefId, accumPK)
await self._cacheValueForId(self._tails, claimDefId, tails)
return accumPK
async def submitAccumSecret(self, claimDefId: ID,
accumSK: AccumulatorSecretKey):
await self._cacheValueForId(self._accumSks, claimDefId, accumSK)
async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,
timestampMs: TimestampType):
await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)
await self._cacheValueForId(self._accums, claimDefId, accum)
async def submitContextAttr(self, claimDefId: ID, m2):
await self._cacheValueForId(self._m2s, claimDefId, m2)
# GET
async def getSecretKey(self, claimDefId: ID) -> SecretKey:
return await self._getValueForId(self._sks, claimDefId)
async def getSecretKeyRevocation(self,
claimDefId: ID) -> RevocationSecretKey:
return await self._getValueForId(self._skRs, claimDefId)
async def getSecretKeyAccumulator(self,
claimDefId: ID) -> AccumulatorSecretKey:
return await self._getValueForId(self._accumSks, claimDefId)
async def getContextAttr(self, claimDefId: ID):
return await self._getValueForId(self._m2s, claimDefId)
|
5,824 | ac0f0fbb9bcb450ac24198069ef8bea8b049ef47 | '''
删除排序数组中的重复项:
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
'''
def delete_sort_array(origin_list):
if len(origin_list) == 0:
return 0
elif len(origin_list) == 1:
return 1
else:
for index,item in enumerate(origin_list[:]):
if index+1 < len(origin_list):
if origin_list[index] == origin_list[index+1]:
origin_list.pop(index)
return len(origin_list)
print(delete_sort_array([1,1,5,5,6,6,13,14]))
|
5,825 | 386e491f6b10ca27f513d678c632571c29093ad2 | # -*- coding: utf-8 -*-
import numpy as np
from . import BOID_NOSE_LEN
from .utils import normalize_angle, unit_vector
class Individual:
def __init__(self, color, pos, ror, roo, roa, angle=0, speed=1.0, turning_rate=0.2):
"""Constructor of Individual.
Args:
color (Color): color for canvas visualisation.
pos (numpy.ndarray): Initial position.
angle (float, optional): Initial orientation.
"""
self.pos = np.array(pos, dtype="float")
"""numpy.ndarray: The position (in length units)."""
self.angle = normalize_angle(angle)
"""float: The orientation (in radians)."""
self.color = color
"""The color to display."""
self.speed = speed
"""float: The speed (in length units per seconds)."""
self.turning_rate = turning_rate
"""float: The angular speed (in radians per seconds)."""
self.ror = ror
"""float: The range of repulsion (in length units)."""
self.roo = roo
"""float: The range of orientation (in length units)."""
self.roa = roa
"""float: The range of attraction (in length units)."""
@property
def dir(self):
"""Get the unitary vector of direction.
Returns:
numpy.ndarray: The unitary vector of direction.
"""
return unit_vector(normalize_angle(self.angle))
@property
def vel(self):
"""Get the velocity.
Returns:
numpy.ndarray: The velocity vector (in length units per seconds).
"""
return self.speed * self.dir
def turn_by(self, dangle, dt):
"""Movement from the given angular speed.
Args:
dangle (float): The angular variation (in radians).
dt (float): The simulation time step (in seconds).
"""
# Don't turn too fast
self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)
# Keep angle in range [-pi, pi)
self.angle = normalize_angle(self.angle)
def turn_to(self, angle, dt):
"""Turn to the desired angle.
Args:
angle (float): The desired orientation (in radians).
dt (float): The simulation time step (in seconds).
"""
a = normalize_angle(angle - self.angle)
self.turn_by(a, dt)
def tick(self, dt):
"""Update function.
Update the position wrt. the velocity.
Args:
dt (float): simulation time step.
"""
self.pos += self.vel * dt
|
5,826 | 3ec858c04a7622ae621bf322730b6b3ba9f4d07e | import datetime
from httpx import AsyncClient
from testing.conftest import (LESSONS_PATH, REVIEWS_PATH, pytestmark,
register_and_get_token)
class TestReview:
async def test_review_add_get(self, ac, fill_db):
# Creating users first
token = await register_and_get_token(ac)
token2 = await register_and_get_token(ac, main_user=False)
# Not providing ant lessons for review
for param in [{"lesson_id": []}, None]:
res = await ac.post(
REVIEWS_PATH, headers={"Authorization": f"Bearer {token}"}, params=param
)
assert res.status_code == 422, res.content
# Actually adding reviews
bad = [1488888, 8888888, 99999]
real = [1, 4, 8, 7]
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": bad + real},
)
assert res.status_code == 201, res.content
resulting_reviews = res.json()
# Getting each review separately and combining them
added = []
for review_id in real:
res = await ac.get(
REVIEWS_PATH + f"/{review_id}",
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
for item in res.json():
added.append(item)
# Trying to access the file using as a different user
# res = await ac.get(REVIEWS_PATH + f"/{review_id}",
# headers={"Authorization": f"Bearer {token2}"},
# )
# assert res.status_code == 403, res.content
# Trying to get a non-existent item
res = await ac.get(
REVIEWS_PATH + "/50000",
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 404
func = lambda x: x.get("lesson_id")
assert sorted(resulting_reviews["added"], key=func) == sorted(added, key=func)
assert resulting_reviews["already_added"] == []
assert sorted(resulting_reviews["non_existent"]) == sorted(bad)
# Adding duplicates
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": bad + real},
)
assert res.status_code == 201, res.content
resulting_reviews = res.json()
assert resulting_reviews["added"] == []
assert sorted(resulting_reviews["already_added"], key=func) == sorted(
added, key=func
)
assert sorted(resulting_reviews["non_existent"]) == sorted(bad)
async def test_getting_due_reviews(self, ac, mocker, fill_db):
# Creating a user first
token = await register_and_get_token(ac)
# Creating reviews for the user
to_add = [1, 4, 8, 7]
res = await ac.post(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
params={"lesson_id": to_add},
)
assert res.status_code == 201, res.content
assert res.json()["added"] != []
assert res.json()["already_added"] == []
assert res.json()["non_existent"] == []
# Getting corresponding lessons
expected_lessons = []
for lesson_id in to_add:
res = await ac.get(LESSONS_PATH + f"/{lesson_id}")
assert res.status_code == 200
expected_lessons.append(res.json())
# Getting not yet ready reviews
res = await ac.get(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
assert res.json() == []
# Advancing time and getting ready reviews
class FakeDatetime:
def now(self=datetime.datetime):
return datetime.datetime(year=2100, month=1, day=1)
mocker.patch("app.routers.review.datetime", FakeDatetime)
res = await ac.get(
REVIEWS_PATH,
headers={"Authorization": f"Bearer {token}"},
)
assert res.status_code == 200, res.content
func = lambda x: x.get("lesson_id")
#TOdo bring back
# assert sorted(res.json(), key=func) == sorted(expected_lessons, key=func)
# Todo check every review
# Reviewing each lesson
# Getting not yet ready reviews
# res = await ac.get(REVIEWS_PATH,
# headers={"Authorization": f"Bearer {token}"},
# )
# assert res.status_code == 200, res.content
# assert res.json() == []
|
5,827 | 047b3b25cb064115a46cde1f1480ce55a1256bc1 | N = int(input())
StopPoint = N
cycle = 0
ten = 0
one = 0
new_N = 0
while True:
ten = N//10
one = N%10
total = ten + one
new_N = one*10 + total%10
cycle += 1
N = new_N
if new_N == StopPoint:
break
print(cycle) |
5,828 | d55043c2a18b935478d9be442aaf7305231edc7d | from os.path import dirname
import binwalk
from nose.tools import eq_, ok_
def test_firmware_squashfs():
'''
Test: Open hello-world.srec, scan for signatures
verify that only one signature is returned
verify that the only signature returned is Motorola S-rec data-signature
'''
expected_results = [
[0, 'DLOB firmware header, boot partition: "dev=/dev/mtdblock/2"'],
[112, 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'],
[1179760, 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'],
[1179792, 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'],
]
scan_result = binwalk.scan(
dirname(__file__) + '/input-vectors/firmware.squashfs',
signature=True,
quiet=True,
extract=True) # Throws a warning for missing external extractor
# Test number of modules used
eq_(len(scan_result), 1)
# Test number of results for that module
eq_(len(scan_result[0].results), len(expected_results))
# Test result-description
for i in range(0, len(scan_result[0].results)):
eq_(scan_result[0].results[i].offset, expected_results[i][0])
eq_(scan_result[0].results[i].description, expected_results[i][1])
|
5,829 | 778ef68b5270657f75185b27dc8219b35847afa1 | import cv2
import sys
import online as API
def demo(myAPI):
myAPI.setAttr()
video_capture = cv2.VideoCapture(0)
print("Press q to quit: ")
while True:
# Capture frame-by-frame
ret, frame = video_capture.read() #np.array
frame = cv2.resize(frame, (320, 240))
key = cv2.waitKey(100) & 0xFF
if key == ord('q'):
break
elif key == ord('r'):
pass
frame = myAPI.simple_demo(frame)
# Display the resulting frame
cv2.imshow('Video', frame)
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
demo(API.FacePlusPlus())
|
5,830 | 4c752c96b7e503ae5c9bc87a038fcf6dc176b776 | def TriSelection(S):
""" Tri par sélection
Le tableau est constitué de deux parties : la 1ère constituée des éléments triés
(initialisée avec seulement le 1er élément) et la seconde constituée des éléments
non triés (initialisée du 2ème au dernier élément) """
for i in range(0, len(S)-1):
# rechercher l'élément le plus petit dans la partie du tableau restant non trié
k = i
for j in range(i+1, len(S)):
if (S[j] <= S[k]):
k = j
# permuter cet élément et le premier élément dans le tableau non trié
z = S[i]
S[i] = S[k]
S[k] = z
# le tableau trié prend un élément de plus
# le tableau non trié perd un élément
def TriInsertion(S):
""" Tri par insertion
Le tableau est constitué de deux parties : la 1ère constituée des éléments triés
(initialisée avec seulement le 1er élément) et la seconde constituée des éléments
non triés (initialisée du 2ème au dernier élément) """
for i in range(0, len(S)-1):
# Mémorisation du 1er élément de la partie du tableau non trié que nous
# allons déplacer
valeur = S[i+1]
# Recherche de l'indice que doit prendre ce 1er élément dans la partie du
# tableau trié
indice = 0
while S[indice] < valeur:
indice += 1
# Décalage des éléments compris entre le dernier élément de la partie
# du tableau trié et l'emplacement trouvé précédemment (parcours décroissant)
for j in range(i, indice-1, -1):
S[j+1] = S[j]
# Déplacement par insertion du 1er élément de la partie du tableau non trié
# à l'indice trouvé ... qui devient un élément trié
S[indice] = valeur |
5,831 | 48affa1b823a2543b6bbda615247324f5c249a69 | onfiguration name="test3" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="hori_check" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test3.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="test4" type="PythonConfigurationType" factoryName="Python" temporary="true">
<module name="hori_check" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/test4.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<list>
<item itemvalue="Python.test1" />
<item itemvalue="Python.test2" />
<item itemvalue="Python.test3" />
<item itemvalue="Python.dir_cut" />
<item itemvalue="Python.test4" />
</list>
<recent_temporary>
<list>
<item itemvalue="Python.test4" />
<item itemvalue="Python.dir_cut" />
<item itemvalue="Python.test1" />
<item itemvalue="Python.test2" />
<item itemvalue="Python.test3" />
</list>
</recent_temporary>
</component>
<component name="SvnConfiguration">
<configuration />
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="b9acfeb2-5104-4c03-bdda-fe9dd331ff17" name="Default Changelist" comment="" />
<created>1539654879943</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1539654879943</updated>
</task>
<servers />
</component>
<component name="ToolWindowManager">
<frame x="-8" y="-8" width="1382" height="744" extended-state="6" />
<editor active="true" />
<layout>
<window_info content_ui="combo" id="Project" order="0" visible="true" weight |
5,832 | 39197b3f9f85d94457584d7e488ca376e52207f1 | from operator import itemgetter
import math
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5*(freq_term/max_freq)
idf = math.log(1 + (n_docs/len(posting_list)))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term],2)
cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)
simularity = context_simularity*referens_simularity
if(simularity != 0):
ranked_documents.append((document,simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)
return ranked_documents |
5,833 | ff331dc0c72378222db9195cce7c794f93799401 | from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
cols = ['Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape',
'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin',
'Normal Nucleoli', 'Mitoses']
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data',names=cols)
# print(data)
data.replace(to_replace='?',value=np.nan,inplace=True)
data.dropna(inplace=True)
data_train = data[:600]
data_test = data[600:]
# 通过聚类完成数据的分类
kms = KMeans(n_clusters=2)
kms.fit(data_train)
print(kms.predict(data_test))
plt.figure()
|
5,834 | c33d625ebd6a40551d2ce0393fd78619601ea7ae |
# This module is used to load pascalvoc datasets (2007 or 2012)
import os
import tensorflow as tf
from configs.config_common import *
from configs.config_train import *
from configs.config_test import *
import sys
import random
import numpy as np
import xml.etree.ElementTree as ET
# Original dataset organisation.
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# TFRecords convertion parameters.
RANDOM_SEED = 4242
SAMPLES_PER_FILES = 200
slim = tf.contrib.slim
class Dataset(object):
def __init__(self):
# Descriptions of the image items
self.items_descriptions = {
'image': 'A color image of varying height and width.',
'shape': 'Shape of the image',
'object/bbox': 'A list of bounding boxes, one per each object.',
'object/label': 'A list of labels, one per each object.',
}
# Features of Pascal VOC TFRecords.
self.features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/label': tf.VarLenFeature(dtype=tf.int64),
'image/object/bbox/difficult': tf.VarLenFeature(dtype=tf.int64),
}
# Items in Pascal VOC TFRecords.
self.items = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'gt_bboxes': slim.tfexample_decoder.BoundingBox(['ymin','xmin','ymax','xmax'], 'image/object/bbox/'),
'gt_labels': slim.tfexample_decoder.Tensor('image/object/bbox/label'),
'difficult_objects': slim.tfexample_decoder.Tensor('image/object/bbox/difficult'),
}
# This function reads dataset from tfrecords
# Inputs:
# datase_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Outputs:
# loaded dataset
def read_dataset_from_tfrecords(self, dataset_name, train_or_test, dataset_path):
with tf.name_scope(None, "read_dataset_from_tfrecords") as scope:
if dataset_name == 'pascalvoc_2007' or dataset_name == 'pascalvoc_2012':
dataset = self.load_dataset(dataset_name, train_or_test, dataset_path)
return dataset
# This function is used to load pascalvoc2007 or psaclvoc2012 datasets
# Inputs:
# dataset_name: pascalvoc_2007
# train_or_test: test
# dataset_path: './tfrecords_test/'
# Output:
# loaded dataset
def load_dataset(self, dataset_name, train_or_test, dataset_path):
dataset_file_name = dataset_name[6:] + '_%s_*.tfrecord'
if dataset_name == 'pascalvoc_2007':
train_test_sizes = {
'train': FLAGS.pascalvoc_2007_train_size,
'test': FLAGS.pascalvoc_2007_test_size,
}
elif dataset_name == 'pascalvoc_2012':
train_test_sizes = {
'train': FLAGS.pascalvoc_2012_train_size,
}
dataset_file_name = os.path.join(dataset_path, dataset_file_name % train_or_test)
reader = tf.TFRecordReader
decoder = slim.tfexample_decoder.TFExampleDecoder(self.features, self.items)
return slim.dataset.Dataset(
data_sources=dataset_file_name,
reader=reader,
decoder=decoder,
num_samples=train_test_sizes[train_or_test],
items_to_descriptions=self.items_descriptions,
num_classes=FLAGS.num_classes-1,
labels_to_names=None)
# This function gets groundtruth bboxes & labels from dataset
# Inputs:
# dataset
# train_or_test: train/test
# Output:
# image, ground-truth bboxes, ground-truth labels, ground-truth difficult objects
def get_groundtruth_from_dataset(self, dataset, train_or_test):
# Dataset provider
with tf.name_scope(None, "get_groundtruth_from_dataset") as scope:
if train_or_test == 'test':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.test_num_readers,
common_queue_capacity=FLAGS.test_common_queue_capacity,
common_queue_min=FLAGS.test_batch_size,
shuffle=FLAGS.test_shuffle)
elif train_or_test == 'train':
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers= FLAGS.train_num_readers,
common_queue_capacity= FLAGS.train_common_queue_capacity,
common_queue_min= 10 * FLAGS.train_batch_size,
shuffle=FLAGS.train_shuffle)
# Get images, groundtruth bboxes & groundtruth labels from database
[image, gt_bboxes, gt_labels] = provider.get(['image','gt_bboxes','gt_labels'])
# Discard difficult objects
gt_difficult_objects = tf.zeros(tf.shape(gt_labels), dtype=tf.int64)
if FLAGS.test_discard_difficult_objects:
[gt_difficult_objects] = provider.get(['difficult_objects'])
return [image, gt_bboxes, gt_labels, gt_difficult_objects]
##########################################
# Convert PascalVOC to TF recorsd
# Process a image and annotation file.
# Inputs:
# filename: string, path to an image file e.g., '/path/to/example.JPG'.
# coder: instance of ImageCoder to provide TensorFlow image coding utils.
# Outputs:
# image_buffer: string, JPEG encoding of RGB image.
# height: integer, image height in pixels.
# width: integer, image width in pixels.
def _process_image_PascalVOC(self, directory, name):
# Read the image file.
filename = directory + DIRECTORY_IMAGES + name + '.jpg'
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Read the XML annotation file.
filename = os.path.join(directory, DIRECTORY_ANNOTATIONS, name + '.xml')
tree = ET.parse(filename)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text), int(size.find('width').text), int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return image_data, shape, bboxes, labels, labels_text, difficult, truncated
# Build an Example proto for an image example.
# Args:
# image_data: string, JPEG encoding of RGB image;
# labels: list of integers, identifier for the ground truth;
# labels_text: list of strings, human-readable labels;
# bboxes: list of bounding boxes; each box is a list of integers;
# shape: 3 integers, image shapes in pixels.
# Returns:
# Example proto
def _convert_to_example_PascalVOC(self, image_data, labels, labels_text, bboxes, shape, difficult, truncated):
xmin = []
ymin = []
xmax = []
ymax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': self.int64_feature(shape[0]),
'image/width': self.int64_feature(shape[1]),
'image/channels': self.int64_feature(shape[2]),
'image/shape': self.int64_feature(shape),
'image/object/bbox/xmin': self.float_feature(xmin),
'image/object/bbox/xmax': self.float_feature(xmax),
'image/object/bbox/ymin': self.float_feature(ymin),
'image/object/bbox/ymax': self.float_feature(ymax),
'image/object/bbox/label': self.int64_feature(labels),
'image/object/bbox/label_text': self.bytes_feature(labels_text),
'image/object/bbox/difficult': self.int64_feature(difficult),
'image/object/bbox/truncated': self.int64_feature(truncated),
'image/format': self.bytes_feature(image_format),
'image/encoded': self.bytes_feature(image_data)}))
return example
# Loads data from image and annotations files and add them to a TFRecord.
# Inputs:
# dataset_dir: Dataset directory;
# name: Image name to add to the TFRecord;
# tfrecord_writer: The TFRecord writer to use for writing.
def _add_to_tfrecord_PascalVOC(self, dataset_dir, name, tfrecord_writer):
image_data, shape, bboxes, labels, labels_text, difficult, truncated = self._process_image_PascalVOC(dataset_dir, name)
example = self._convert_to_example_PascalVOC(image_data, labels, labels_text, bboxes, shape, difficult, truncated)
tfrecord_writer.write(example.SerializeToString())
def _get_output_filename_PascalVOC(output_dir, name, idx):
return '%s/%s_%03d.tfrecord' % (output_dir, name, idx)
# Convert images to tfrecords
# Args:
# dataset_dir: The dataset directory where the dataset is stored.
# output_dir: Output directory.
def run_PascalVOC(self, dataset_dir, output_dir, name='voc_train', shuffling=False):
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
# Dataset filenames, and shuffling.
path = os.path.join(dataset_dir, DIRECTORY_ANNOTATIONS)
filenames = sorted(os.listdir(path))
if shuffling:
random.seed(RANDOM_SEED)
random.shuffle(filenames)
# Process dataset files.
i = 0
fidx = 0
while i < len(filenames):
# Open new TFRecord file.
tf_filename = self._get_output_filename(output_dir, name, fidx)
with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:
j = 0
while i < len(filenames) and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d' % (i+1, len(filenames)))
sys.stdout.flush()
filename = filenames[i]
img_name = filename[:-4]
self._add_to_tfrecord_PascalVOC(dataset_dir, img_name, tfrecord_writer)
i += 1
j += 1
fidx += 1
print('\n ImageDB to TF conversion finished. ')
# Wrapper for inserting int64 features into Example proto.
def int64_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
# Wrapper for inserting float features into Example proto.
def float_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# Wrapper for inserting bytes features into Example proto.
def bytes_feature(self, value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
|
5,835 | edc7c74a19a272bdd6da81b3ce2d214a2b613984 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Pipeline class."""
# pytype: skip-file
import copy
import platform
import unittest
import mock
import pytest
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.io.iobase import SourceBase
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.pvalue import AsSingleton
from apache_beam.pvalue import TaggedOutput
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.transforms.environments import ProcessEnvironment
from apache_beam.transforms.resources import ResourceHint
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import MIN_TIMESTAMP
class FakeUnboundedSource(SourceBase):
"""Fake unbounded source. Does not work at runtime"""
def is_bounded(self):
return False
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class FlattenAndDouble(beam.PTransform):
def expand(self, pcolls):
return pcolls | beam.Flatten() | 'Double' >> DoubleParDo()
class FlattenAndTriple(beam.PTransform):
def expand(self, pcolls):
return pcolls | beam.Flatten() | 'Triple' >> TripleParDo()
class AddWithProductDoFn(beam.DoFn):
def process(self, input, a, b):
yield input + a * b
class AddThenMultiplyDoFn(beam.DoFn):
def process(self, input, a, b):
yield (input + a) * b
class AddThenMultiply(beam.PTransform):
def expand(self, pvalues):
return pvalues[0] | beam.ParDo(
AddThenMultiplyDoFn(), AsSingleton(pvalues[1]), AsSingleton(pvalues[2]))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
def test_flatmap_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(
pcoll3, equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
def test_maptuple_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(
pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(
pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(
pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(
pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
def test_flatmaptuple_builtin(self):
with TestPipeline() as pipeline:
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(
pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(
pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(
pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(
pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
def test_create_singleton_pcollection(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> beam.Impulse()
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual({pcoll1, pcoll2, pcoll3, pcoll4, pcoll5},
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite), set(visitor.leave_composite))
self.assertEqual(2, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[1].transform, transform)
self.assertEqual(visitor.leave_composite[0].transform, transform)
def test_apply_custom_transform(self):
with TestPipeline() as pipeline:
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'A transform with label "CustomTransform" already exists in the '
'pipeline. To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
with TestPipeline() as pipeline:
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2**10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
with TestPipeline(runner='BundleBasedDirectRunner') as pipeline:
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2**20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(
result,
equal_to(['x' * len_elements + 'y' * num_maps] * num_elements))
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
def test_ptransform_overrides(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
ptransform = applied_ptransform.transform
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
p = Pipeline()
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return ToStringParDo().with_input_types(int).with_output_types(str)
for override, expected_type in [(NoTypeHintOverride(), int),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (
p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_ptransform_override_multiple_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, FlattenAndDouble)
def get_replacement_transform(self, applied_ptransform):
return FlattenAndTriple()
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([1, 2, 3])
pcoll2 = p | 'pc2' >> beam.Create([4, 5, 6])
pcoll3 = (pcoll1, pcoll2) | 'FlattenAndMultiply' >> FlattenAndDouble()
assert_that(pcoll3, equal_to([3, 6, 9, 12, 15, 18]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_side_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))
def get_replacement_transform(self, transform):
return AddThenMultiply()
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([2])
pcoll2 = p | 'pc2' >> beam.Create([3])
pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])
result = pcoll3 | 'Operate' >> beam.ParDo(
AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))
assert_that(result, equal_to([18, 21, 24]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_replacement_inputs(self):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return (
isinstance(applied_ptransform.transform, ParDo) and
isinstance(applied_ptransform.transform.fn, AddWithProductDoFn))
def get_replacement_transform(self, transform):
return AddThenMultiply()
def get_replacement_inputs(self, applied_ptransform):
assert len(applied_ptransform.inputs) == 1
assert len(applied_ptransform.side_inputs) == 2
# Swap the order of the two side inputs
return (
applied_ptransform.inputs[0],
applied_ptransform.side_inputs[1].pvalue,
applied_ptransform.side_inputs[0].pvalue)
p = Pipeline()
pcoll1 = p | 'pc1' >> beam.Create([2])
pcoll2 = p | 'pc2' >> beam.Create([3])
pcoll3 = p | 'pc3' >> beam.Create([4, 5, 6])
result = pcoll3 | 'Operate' >> beam.ParDo(
AddWithProductDoFn(), AsSingleton(pcoll1), AsSingleton(pcoll2))
assert_that(result, equal_to([14, 16, 18]))
p.replace_all([MyParDoOverride()])
p.run()
def test_ptransform_override_multiple_outputs(self):
class MultiOutputComposite(PTransform):
def __init__(self):
self.output_tags = set()
def expand(self, pcoll):
def mux_input(x):
x = x * 2
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
multi = pcoll | 'MyReplacement' >> beam.ParDo(mux_input).with_outputs()
letters = multi.letters | 'LettersComposite' >> beam.Map(
lambda x: x * 3)
numbers = multi.numbers | 'NumbersComposite' >> beam.Map(
lambda x: x * 5)
return {
'letters': letters,
'numbers': numbers,
}
class MultiOutputOverride(PTransformOverride):
def matches(self, applied_ptransform):
return applied_ptransform.full_label == 'MyMultiOutput'
def get_replacement_transform_for_applied_ptransform(
self, applied_ptransform):
return MultiOutputComposite()
def mux_input(x):
if isinstance(x, int):
yield TaggedOutput('numbers', x)
else:
yield TaggedOutput('letters', x)
with TestPipeline() as p:
multi = (
p
| beam.Create([1, 2, 3, 'a', 'b', 'c'])
| 'MyMultiOutput' >> beam.ParDo(mux_input).with_outputs())
letters = multi.letters | 'MyLetters' >> beam.Map(lambda x: x)
numbers = multi.numbers | 'MyNumbers' >> beam.Map(lambda x: x)
# Assert that the PCollection replacement worked correctly and that
# elements are flowing through. The replacement transform first
# multiples by 2 then the leaf nodes inside the composite multiply by
# an additional 3 and 5. Use prime numbers to ensure that each
# transform is getting executed once.
assert_that(
letters,
equal_to(['a' * 2 * 3, 'b' * 2 * 3, 'c' * 2 * 3]),
label='assert letters')
assert_that(
numbers,
equal_to([1 * 2 * 5, 2 * 2 * 5, 3 * 2 * 5]),
label='assert numbers')
# Do the replacement and run the element assertions.
p.replace_all([MultiOutputOverride()])
# The following checks the graph to make sure the replacement occurred.
visitor = PipelineTest.Visitor(visited=[])
p.visit(visitor)
pcollections = visitor.visited
composites = visitor.enter_composite
# Assert the replacement is in the composite list and retrieve the
# AppliedPTransform.
self.assertIn(
MultiOutputComposite, [t.transform.__class__ for t in composites])
multi_output_composite = list(
filter(
lambda t: t.transform.__class__ == MultiOutputComposite,
composites))[0]
# Assert that all of the replacement PCollections are in the graph.
for output in multi_output_composite.outputs.values():
self.assertIn(output, pcollections)
# Assert that all of the "old"/replaced PCollections are not in the graph.
self.assertNotIn(multi[None], visitor.visited)
self.assertNotIn(multi.letters, visitor.visited)
self.assertNotIn(multi.numbers, visitor.visited)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count - 1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
with TestPipeline() as p:
pcoll = (
p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
self.assertEqual(pcoll.element_type, typehints.Any)
with TestPipeline() as p:
pcoll = (
p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
self.assertEqual(pcoll.element_type, str)
def test_track_pcoll_unbounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll2.is_bounded, False)
self.assertIs(pcoll3.is_bounded, False)
def test_track_pcoll_bounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, True)
self.assertIs(pcoll2.is_bounded, True)
self.assertIs(pcoll3.is_bounded, True)
def test_track_pcoll_bounded_flatten(self):
pipeline = TestPipeline()
pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])
pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])
pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])
pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_a, pcoll2_b) | beam.Flatten()
self.assertIs(pcoll1_a.is_bounded, True)
self.assertIs(pcoll2_a.is_bounded, True)
self.assertIs(pcoll1_b.is_bounded, True)
self.assertIs(pcoll2_b.is_bounded, True)
self.assertIs(merged.is_bounded, True)
def test_track_pcoll_unbounded_flatten(self):
pipeline = TestPipeline()
pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()
self.assertIs(pcoll1_bounded.is_bounded, True)
self.assertIs(pcoll2_bounded.is_bounded, True)
self.assertIs(pcoll1_unbounded.is_bounded, False)
self.assertIs(pcoll2_unbounded.is_bounded, False)
self.assertIs(merged.is_bounded, False)
def test_incompatible_submission_and_runtime_envs_fail_pipeline(self):
with mock.patch(
'apache_beam.transforms.environments.sdk_base_version_capability'
) as base_version:
base_version.side_effect = [
f"beam:version:sdk_base:apache/beam_python3.5_sdk:2.{i}.0"
for i in range(100)
]
with self.assertRaisesRegex(
RuntimeError,
'Pipeline construction environment and pipeline runtime '
'environment are not compatible.'):
with TestPipeline() as p:
_ = p | Create([None])
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
with TestPipeline() as pipeline:
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
with TestPipeline() as pipeline:
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
with TestPipeline() as pipeline:
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
@pytest.mark.it_validatesrunner
def test_element_param(self):
pipeline = TestPipeline()
input = [1, 2]
pcoll = (
pipeline
| 'Create' >> Create(input)
| 'Ele param' >> Map(lambda element=DoFn.ElementParam: element))
assert_that(pcoll, equal_to(input))
pipeline.run()
@pytest.mark.it_validatesrunner
def test_key_param(self):
pipeline = TestPipeline()
pcoll = (
pipeline
| 'Create' >> Create([('a', 1), ('b', 2)])
| 'Key param' >> Map(lambda _, key=DoFn.KeyParam: key))
assert_that(pcoll, equal_to(['a', 'b']))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
with TestPipeline() as pipeline:
pcoll = (
pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(
pcoll,
equal_to([(1, (-5, 5)), (1, (0, 10)), (7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
with TestPipeline() as pipeline:
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
def test_pane_info_param(self):
with TestPipeline() as p:
pc = p | Create([(None, None)])
assert_that(
pc | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([windowed_value.PANE_INFO_UNKNOWN]),
label='CheckUngrouped')
assert_that(
pc | beam.GroupByKey() | beam.Map(lambda _, p=DoFn.PaneInfoParam: p),
equal_to([
windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)
]),
label='CheckGrouped')
def test_incomparable_default(self):
class IncomparableType(object):
def __eq__(self, other):
raise RuntimeError()
def __ne__(self, other):
raise RuntimeError()
def __hash__(self):
raise RuntimeError()
# Ensure that we don't use default values in a context where they must be
# comparable (see BEAM-8301).
with TestPipeline() as pipeline:
pcoll = (
pipeline
| beam.Create([None])
| Map(lambda e, x=IncomparableType(): (e, type(x).__name__)))
assert_that(pcoll, equal_to([(None, 'IncomparableType')]))
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual({
'from_dictionary',
'get_all_options',
'slices',
'style',
'view_as',
'display_data'
},
{
attr
for attr in dir(options)
if not attr.startswith('_') and attr != 'next'
})
self.assertEqual({
'from_dictionary',
'get_all_options',
'style',
'view_as',
'display_data'
},
{
attr
for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next'
})
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(
p.transforms_stack[0].parts[0].parent, p.transforms_stack[0])
def test_requirements(self):
p = beam.Pipeline()
_ = (
p | beam.Create([])
| beam.ParDo(lambda x, finalize=beam.DoFn.BundleFinalizerParam: None))
proto = p.to_runner_api()
self.assertTrue(
common_urns.requirements.REQUIRES_BUNDLE_FINALIZATION.urn,
proto.requirements)
def test_annotations(self):
some_proto = BytesCoder().to_runner_api(None)
class EmptyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll
def annotations(self):
return {'foo': 'some_string'}
class NonEmptyTransform(beam.PTransform):
def expand(self, pcoll):
return pcoll | beam.Map(lambda x: x)
def annotations(self):
return {
'foo': b'some_bytes',
'proto': some_proto,
}
p = beam.Pipeline()
_ = p | beam.Create([]) | EmptyTransform() | NonEmptyTransform()
proto = p.to_runner_api()
seen = 0
for transform in proto.components.transforms.values():
if transform.unique_name == 'EmptyTransform':
seen += 1
self.assertEqual(transform.annotations['foo'], b'some_string')
elif transform.unique_name == 'NonEmptyTransform':
seen += 1
self.assertEqual(transform.annotations['foo'], b'some_bytes')
self.assertEqual(
transform.annotations['proto'], some_proto.SerializeToString())
self.assertEqual(seen, 2)
def test_transform_ids(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
runner_api_proto = Pipeline.to_runner_api(p)
for transform_id in runner_api_proto.components.transforms:
self.assertRegex(transform_id, r'[a-zA-Z0-9-_]+')
def test_input_names(self):
class MyPTransform(beam.PTransform):
def expand(self, pcolls):
return pcolls.values() | beam.Flatten()
p = beam.Pipeline()
input_names = set('ABC')
inputs = {x: p | x >> beam.Create([x]) for x in input_names}
inputs | MyPTransform() # pylint: disable=expression-not-assigned
runner_api_proto = Pipeline.to_runner_api(p)
for transform_proto in runner_api_proto.components.transforms.values():
if transform_proto.unique_name == 'MyPTransform':
self.assertEqual(set(transform_proto.inputs.keys()), input_names)
break
else:
self.fail('Unable to find transform.')
def test_display_data(self):
class MyParentTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
def display_data(self): # type: () -> dict
parent_dd = super().display_data()
parent_dd['p_dd_string'] = DisplayDataItem(
'p_dd_string_value', label='p_dd_string_label')
parent_dd['p_dd_string_2'] = DisplayDataItem('p_dd_string_value_2')
parent_dd['p_dd_bool'] = DisplayDataItem(True, label='p_dd_bool_label')
parent_dd['p_dd_int'] = DisplayDataItem(1, label='p_dd_int_label')
return parent_dd
class MyPTransform(MyParentTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
def display_data(self): # type: () -> dict
parent_dd = super().display_data()
parent_dd['dd_string'] = DisplayDataItem(
'dd_string_value', label='dd_string_label')
parent_dd['dd_string_2'] = DisplayDataItem('dd_string_value_2')
parent_dd['dd_bool'] = DisplayDataItem(False, label='dd_bool_label')
parent_dd['dd_double'] = DisplayDataItem(1.1, label='dd_double_label')
return parent_dd
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
proto_pipeline = Pipeline.to_runner_api(p, use_fake_coders=True)
my_transform, = [
transform
for transform in proto_pipeline.components.transforms.values()
if transform.unique_name == 'MyPTransform'
]
self.assertIsNotNone(my_transform)
self.assertListEqual(
list(my_transform.display_data),
[
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_string_label',
key='p_dd_string',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='p_dd_string_value').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_string_2',
key='p_dd_string_2',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='p_dd_string_value_2').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_bool_label',
key='p_dd_bool',
namespace='apache_beam.pipeline_test.MyPTransform',
bool_value=True).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='p_dd_int_label',
key='p_dd_int',
namespace='apache_beam.pipeline_test.MyPTransform',
int_value=1).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_string_label',
key='dd_string',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='dd_string_value').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_string_2',
key='dd_string_2',
namespace='apache_beam.pipeline_test.MyPTransform',
string_value='dd_string_value_2').SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_bool_label',
key='dd_bool',
namespace='apache_beam.pipeline_test.MyPTransform',
bool_value=False).SerializeToString()),
beam_runner_api_pb2.DisplayData(
urn=common_urns.StandardDisplayData.DisplayData.LABELLED.urn,
payload=beam_runner_api_pb2.LabelledPayload(
label='dd_double_label',
key='dd_double',
namespace='apache_beam.pipeline_test.MyPTransform',
double_value=1.1).SerializeToString()),
])
def test_runner_api_roundtrip_preserves_resource_hints(self):
p = beam.Pipeline()
_ = (
p | beam.Create([1, 2])
| beam.Map(lambda x: x + 1).with_resource_hints(accelerator='gpu'))
self.assertEqual(
p.transforms_stack[0].parts[1].transform.get_resource_hints(),
{common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})
for _ in range(3):
# Verify that DEFAULT environments are recreated during multiple RunnerAPI
# translation and hints don't get lost.
p = Pipeline.from_runner_api(Pipeline.to_runner_api(p), None, None)
self.assertEqual(
p.transforms_stack[0].parts[1].transform.get_resource_hints(),
{common_urns.resource_hints.ACCELERATOR.urn: b'gpu'})
def test_hints_on_composite_transforms_are_propagated_to_subtransforms(self):
class FooHint(ResourceHint):
urn = 'foo_urn'
class BarHint(ResourceHint):
urn = 'bar_urn'
class BazHint(ResourceHint):
urn = 'baz_urn'
class QuxHint(ResourceHint):
urn = 'qux_urn'
class UseMaxValueHint(ResourceHint):
urn = 'use_max_value_urn'
@classmethod
def get_merged_value(
cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes
return ResourceHint._use_max(outer_value, inner_value)
ResourceHint.register_resource_hint('foo_hint', FooHint)
ResourceHint.register_resource_hint('bar_hint', BarHint)
ResourceHint.register_resource_hint('baz_hint', BazHint)
ResourceHint.register_resource_hint('qux_hint', QuxHint)
ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)
@beam.ptransform_fn
def SubTransform(pcoll):
return pcoll | beam.Map(lambda x: x + 1).with_resource_hints(
foo_hint='set_on_subtransform', use_max_value_hint='10')
@beam.ptransform_fn
def CompositeTransform(pcoll):
return pcoll | beam.Map(lambda x: x * 2) | SubTransform()
p = beam.Pipeline()
_ = (
p | beam.Create([1, 2])
| CompositeTransform().with_resource_hints(
foo_hint='should_be_overriden_by_subtransform',
bar_hint='set_on_composite',
baz_hint='set_on_composite',
use_max_value_hint='100'))
options = PortableOptions([
'--resource_hint=baz_hint=should_be_overriden_by_composite',
'--resource_hint=qux_hint=set_via_options',
'--environment_type=PROCESS',
'--environment_option=process_command=foo',
'--sdk_location=container',
])
environment = ProcessEnvironment.from_options(options)
proto = Pipeline.to_runner_api(p, default_environment=environment)
for t in proto.components.transforms.values():
if "CompositeTransform/SubTransform/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
environment.resource_hints.get('foo_urn'), b'set_on_subtransform')
self.assertEqual(
environment.resource_hints.get('bar_urn'), b'set_on_composite')
self.assertEqual(
environment.resource_hints.get('baz_urn'), b'set_on_composite')
self.assertEqual(
environment.resource_hints.get('qux_urn'), b'set_via_options')
self.assertEqual(
environment.resource_hints.get('use_max_value_urn'), b'100')
found = True
assert found
def test_environments_with_same_resource_hints_are_reused(self):
class HintX(ResourceHint):
urn = 'X_urn'
class HintY(ResourceHint):
urn = 'Y_urn'
class HintIsOdd(ResourceHint):
urn = 'IsOdd_urn'
ResourceHint.register_resource_hint('X', HintX)
ResourceHint.register_resource_hint('Y', HintY)
ResourceHint.register_resource_hint('IsOdd', HintIsOdd)
p = beam.Pipeline()
num_iter = 4
for i in range(num_iter):
_ = (
p
| f'NoHintCreate_{i}' >> beam.Create([1, 2])
| f'NoHint_{i}' >> beam.Map(lambda x: x + 1))
_ = (
p
| f'XCreate_{i}' >> beam.Create([1, 2])
|
f'HintX_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(X='X'))
_ = (
p
| f'XYCreate_{i}' >> beam.Create([1, 2])
| f'HintXY_{i}' >> beam.Map(lambda x: x + 1).with_resource_hints(
X='X', Y='Y'))
_ = (
p
| f'IsOddCreate_{i}' >> beam.Create([1, 2])
| f'IsOdd_{i}' >>
beam.Map(lambda x: x + 1).with_resource_hints(IsOdd=str(i % 2 != 0)))
proto = Pipeline.to_runner_api(p)
count_x = count_xy = count_is_odd = count_no_hints = 0
env_ids = set()
for _, t in proto.components.transforms.items():
env = proto.components.environments[t.environment_id]
if t.unique_name.startswith('HintX_'):
count_x += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {'X_urn': b'X'})
if t.unique_name.startswith('HintXY_'):
count_xy += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {'X_urn': b'X', 'Y_urn': b'Y'})
if t.unique_name.startswith('NoHint_'):
count_no_hints += 1
env_ids.add(t.environment_id)
self.assertEqual(env.resource_hints, {})
if t.unique_name.startswith('IsOdd_'):
count_is_odd += 1
env_ids.add(t.environment_id)
self.assertTrue(
env.resource_hints == {'IsOdd_urn': b'True'} or
env.resource_hints == {'IsOdd_urn': b'False'})
assert count_x == count_is_odd == count_xy == count_no_hints == num_iter
assert num_iter > 1
self.assertEqual(len(env_ids), 5)
def test_multiple_application_of_the_same_transform_set_different_hints(self):
class FooHint(ResourceHint):
urn = 'foo_urn'
class UseMaxValueHint(ResourceHint):
urn = 'use_max_value_urn'
@classmethod
def get_merged_value(
cls, outer_value, inner_value): # type: (bytes, bytes) -> bytes
return ResourceHint._use_max(outer_value, inner_value)
ResourceHint.register_resource_hint('foo_hint', FooHint)
ResourceHint.register_resource_hint('use_max_value_hint', UseMaxValueHint)
@beam.ptransform_fn
def SubTransform(pcoll):
return pcoll | beam.Map(lambda x: x + 1)
@beam.ptransform_fn
def CompositeTransform(pcoll):
sub = SubTransform()
return (
pcoll
| 'first' >> sub.with_resource_hints(foo_hint='first_application')
| 'second' >> sub.with_resource_hints(foo_hint='second_application'))
p = beam.Pipeline()
_ = (p | beam.Create([1, 2]) | CompositeTransform())
proto = Pipeline.to_runner_api(p)
count = 0
for t in proto.components.transforms.values():
if "CompositeTransform/first/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
b'first_application', environment.resource_hints.get('foo_urn'))
count += 1
if "CompositeTransform/second/Map" in t.unique_name:
environment = proto.components.environments.get(t.environment_id)
self.assertEqual(
b'second_application', environment.resource_hints.get('foo_urn'))
count += 1
assert count == 2
def test_environments_are_deduplicated(self):
def file_artifact(path, hash, staged_name):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=path, sha256=hash).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=staged_name).SerializeToString(),
)
proto = beam_runner_api_pb2.Pipeline(
components=beam_runner_api_pb2.Components(
transforms={
f'transform{ix}': beam_runner_api_pb2.PTransform(
environment_id=f'e{ix}')
for ix in range(8)
},
environments={
# Same hash and destination.
'e1': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a1', 'x', 'dest')]),
'e2': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a2', 'x', 'dest')]),
# Different hash.
'e3': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a3', 'y', 'dest')]),
# Different destination.
'e4': beam_runner_api_pb2.Environment(
dependencies=[file_artifact('a4', 'y', 'dest2')]),
# Multiple files with same hash and destinations.
'e5': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a1', 'x', 'dest'),
file_artifact('b1', 'xb', 'destB')
]),
'e6': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a2', 'x', 'dest'),
file_artifact('b2', 'xb', 'destB')
]),
# Overlapping, but not identical, files.
'e7': beam_runner_api_pb2.Environment(
dependencies=[
file_artifact('a1', 'x', 'dest'),
file_artifact('b2', 'y', 'destB')
]),
# Same files as first, but differing other properties.
'e0': beam_runner_api_pb2.Environment(
resource_hints={'hint': b'value'},
dependencies=[file_artifact('a1', 'x', 'dest')]),
}))
Pipeline.merge_compatible_environments(proto)
# These environments are equivalent.
self.assertEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform2'].environment_id)
self.assertEqual(
proto.components.transforms['transform5'].environment_id,
proto.components.transforms['transform6'].environment_id)
# These are not.
self.assertNotEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform3'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform4'].environment_id,
proto.components.transforms['transform3'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform6'].environment_id,
proto.components.transforms['transform7'].environment_id)
self.assertNotEqual(
proto.components.transforms['transform1'].environment_id,
proto.components.transforms['transform0'].environment_id)
self.assertEqual(len(proto.components.environments), 6)
if __name__ == '__main__':
unittest.main()
|
5,836 | a5dcc66ece4e58995fe86c3a399c45975a596b1a | from utilities import SumOneToN, RSS, MSE, R2Score
import numpy as np
import scipy.stats as st
class RidgeLinearModel:
covariance_matrix = None # covariance matrix of the model coefficients
covariance_matrix_updated = False
beta = None # coefficients of the modelfunction
var_vector = None
var_vector_updated = False
CIbeta = None # confidence interval of betas
CIbeta_updated = False
x1 = None # first predictor of sampledata
x2 = None # second predictor of sampledata
y = None # responses of sampledata
y_tilde = None # model predictions for x
y_tilde_updated = False
def __init__(this, lmb, k):
this.lmb = lmb # set lambda of model
this.k = k # set order of polynomial
# This function fits the model to the the sample data
# using Ridge regression
#
# @x: array containing predictors
# @y: array containing responses
# @k: the degree of the polynomial to be fitted to the sample data
# @lmb: lambda, determines the emphasize on minimizing the variance
# of the model
#
def fit(this, x1, x2, y):
# store x ands y for later computations
this.x1 = x1
this.x2 = x2
this.y = y
# calculate the dimensions of the design matrix
m = x1.shape[0]
n = SumOneToN(this.k + 1)
# allocate design matrix
this.X = np.ones((m, n))
# compute values of design matrix
for i in range(m): # vectoriser denne løkka
for p in range(this.k):
for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):
this.X[i][SumOneToN(p + 1) + j] *= x1[i]**(p
+ 1 - j)*x2[i]**j
# compute linear regression coefficients
this.beta = np.linalg.pinv(this.X.T.dot(this.X) +
this.lmb*np.identity(n)).dot(this.X.T).dot(y)
# stored statistical parameters are no longer valid
this.set_updated_to_false()
# Predicts and returns the responses of the predictors with
# the fitted model if the model is fitted
#
# @x1: Columnvector containing the first predictor values
# @x2: Columnvector containing the second predictor values
#
def predict(this, x1, x2):
if this.beta is None:
print("Error: Model is not fitted.")
return None
else:
# allocate meshgrid filled with constant term
y = np.ones(x1.shape)*this.beta[0]
# compute function values
for p in range(this.k):
for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):
y += this.beta[SumOneToN(p + 1)
+ j]*x1**(p+1-j)*x2**j
return y
# Returns the residuals of the model squared and summed
def get_RSS(this, x1, x2, y):
if this.beta is None:
print("Error: Model is not fitted.")
return None
else:
y_tilde = this.predict(x1, x2)
return RSS(y, this.y_tilde)
# Returns the mean squared error of the model
# given the sample data (x1, x2, y)
#
# @x1: vector of first predictor
# @x2: vector of second predictor
# @y: vector of responses
#
def get_MSE(this, x1, x2, y):
if this.beta is None:
print("Error: Model is not fitted.")
return None
else:
y_tilde = this.predict(x1, x2)
return MSE(y, y_tilde)
# Returns the R2 score of the model
def get_R2Score(this, x1, x2, y):
if this.beta is None:
print("Error: Model is not fitted.")
return None
else:
y_tilde = this.predict(x1, x2)
return R2Score(y, y_tilde)
# Computes the sample variance of the coefficients of the model
# @B: The number of samples used
def get_variance_of_betas(this, B=20):
m = len(this.x1)
n = SumOneToN(this.k + 1)
betasamples = np.zeros((n, B))
for b in range(B):
# create bootstrapsample
c = np.random.choice(len(this.x1), len(this.x1))
s_x1 = this.x1[c]
s_x2 = this.x2[c]
s_y = this.y[c]
# Next line fixes if y is one-dimensional
if (len(s_y.shape)) == 1:
s_y = np.expand_dims(this.y[c], axis=1)
# allocate design matrix
s_X = np.ones((m, n))
# compute values of design matrix
for i in range(m): # vectoriser denne løkka
for p in range(this.k):
for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):
s_X[i][SumOneToN(p + 1) + j] *= s_x1[i]**(p
+ 1 - j)*s_x2[i]**j
betasamples[:,b] = np.linalg.pinv(s_X.T.dot(s_X) +
this.lmb*np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]
betameans = betasamples.sum(axis=1, keepdims=True)/B
# Compute variance vector
this.var_vector = np.sum((betasamples - betameans)**2, axis=1)/B
return this.var_vector
# Returns the confidence interval of the betas
def get_CI_of_beta(this, percentile=.95):
if this.beta is None:
print("Error: Model is not fitted.")
return None
else:
if not this.CIbeta_updated:
# stdcoeff is the z-score to the two-sided confidence interval
stdcoeff = st.norm.ppf((1-percentile)/2)
this.CI_beta = np.zeros((len(this.beta), 2))
for i in range(len(this.beta)):
this.CI_beta[i][0] = this.beta[i] + stdcoeff*np.sqrt(this.var_vector[i])
this.CI_beta[i][1] = this.beta[i] - stdcoeff*np.sqrt(this.var_vector[i])
this.CIbeta_updated = True
# CI_beta returns a nx2 matrix with each row
# representing the confidence interval to the corresponding beta
return this.CI_beta
def set_updated_to_false(this):
covariance_matrix_updated = False
var_vector_updated = False
y_tilde_updated = False
CIbeta_updated = False
|
5,837 | 885e02cbf78412d77bd17eba64a8a1a52aaed0df | from slacker import Slacker
import vk_api
import time
import logging
from settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL
slack = Slacker(SLACK_TOKEN)
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID,
start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user['last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = "\n".join(map(lambda s: ">" + s, text.split("\n")))
message = '>*' + title + '*\n>_' + username + '_ (' + message_date + ')\n' + text
slack.chat.post_message(channel='#random', text=message, username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
|
5,838 | b051a3dbe1c695fda9a0488dd8986d587bbb24a6 | from math import log
from collections import Counter
import copy
import csv
import carTreePlotter
import re
def calEntropy(dataSet):
"""
输入:二维数据集
输出:二维数据集标签的熵
描述:
计算数据集的标签的香农熵;香农熵越大,数据集越混乱;
在计算 splitinfo 和通过计算熵减选择信息增益最大的属性时可以用到
"""
entryNum = len(dataSet)
labelsCount = {}
for entry in dataSet:
label = entry[-1]
if label not in labelsCount.keys():
labelsCount[label] = 0
labelsCount[label] += 1 # labelsCount -> {'0' : 3, '1' : 4}
entropy = 0.0
for key in labelsCount:
propotion = float(labelsCount[key])/entryNum # propotion 特定标签占总标签比例
entropy -= propotion * log(propotion, 2)
return entropy
def calGini(dataSet):
"""
输入:二维数据集
输出:二维数据集的基尼系数
描述:计算数据集的基尼系数,基尼系数越大数据集越混乱
"""
entryNum = len(dataSet)
labelsCount = {}
for entry in dataSet:
label = entry[-1]
if label not in labelsCount.keys():
labelsCount[label] = 0
labelsCount[label] += 1
gini = 1.0
for key in labelsCount:
p = float(labelsCount[key])/entryNum
gini -= p * p # 1-p1^2-p2^2
return gini
def splitDataSet(dataSet, col, value):
"""
输入:二维数据集,属性列index,值
输出:从dataSet分离出来的subDataSet
描述:
将dataSet的col列中与value相同的样本组成一个新的subDataSet
CART的分离方法与普通方法并无区别
"""
subDataSet = []
for entry in dataSet:
if entry[col] == value: # 将col属性中值为value的行挑出
subEntry = entry[:col]
subEntry.extend(entry[col+1:])
subDataSet.append(subEntry)
return subDataSet
def selectBestAttrIndex(dataSet, algorithm):
"""
输入:二维数据集
输出:熵减最大的属性在 dataSet 中的下标
描述:
先计算dataSet的熵,然后通过属性数目,遍历计算按照每个属性划分得到的熵;
比较得到熵减最大的属性,返回它在dataSet中属性的index。
"""
if algorithm == 'ID3':
return selectBestAttrIndex_ID3(dataSet)
elif algorithm == 'C4.5':
return selectBestAttrIndex_C45(dataSet)
elif algorithm == 'CART':
return selectBestAttrIndex_CART(dataSet)
def selectBestAttrIndex_ID3(dataSet):
labelNum = len(dataSet[0])-1 # 属性attribute数目
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGain = 0.0
for index in range(labelNum):
newEntropy = 0.0
attrValueList = [entry[index] for entry in dataSet] # 获得dataSet中每个属性的所有value的列表
attrValueSet = set(attrValueList) # 获得value列表的不重复set,在ID3和C4.5中遍历计算每个value的熵,CART中用value进行二分类计算gini系数
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue) # 分离出col=index, value = uniqueValue 的数据集
p = float(len(subDataSet)) / len(dataSet) # 计算子数据集占总数据比例
newEntropy += p * calEntropy(subDataSet)
infoGain = oldEntropy - newEntropy
if infoGain > maxInfoGain:
maxInfoGain = infoGain
bestIndex = index
return bestIndex
def selectBestAttrIndex_C45(dataSet):
labelNum = len(dataSet[0])-1
oldEntropy = calEntropy(dataSet)
bestIndex = -1
maxInfoGainRotio = 0.0
for index in range(labelNum):
newEntropy = 0.0
splitInfo = 0.0
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newEntropy += p * calEntropy(subDataSet)
splitInfo -= p * log(p, 2) # index标签的熵
infoGain = oldEntropy - newEntropy
if splitInfo == 0.0:
continue
infoGainRatio = infoGain / splitInfo # 计算信息增益
if infoGainRatio > maxInfoGainRotio:
maxInfoGainRotio = infoGainRatio
bestIndex = index
return bestIndex
def selectBestAttrIndex_CART(dataSet):
labelNum = len(dataSet[0])-1
bestIndex = -1
minGini = float("inf") # 所有attribute 中最小gini系数
for index in range(labelNum):
attrValueList = [entry[index] for entry in dataSet]
attrValueSet = set(attrValueList)
newGini = 0.0
for uniqueValue in attrValueSet:
subDataSet = splitDataSet(dataSet, index, uniqueValue)
p = float(len(subDataSet)) / len(dataSet)
newGini += p * calGini(subDataSet)
if newGini < minGini:
minGini = newGini
bestIndex = index
return bestIndex
def createTree(dataSet, oriAttr, oriAttrUniValSet, algorithm = 'ID3'):
attr = oriAttr[:] # 输入的一份拷贝,不改动输入的属性
attrUniValSet = oriAttrUniValSet[:]
labelList = [entry[-1] for entry in dataSet]
if len(labelList) == labelList.count(labelList[0]): # 1. 所有样本标签相同,那么该节点为记为该标签叶子节点
return labelList[0]
if len(attr) == 0: # 2. 没有可以分类的属性
return Counter(labelList).most_common(1)[0][0] # 返回出现次数最多的标签
# dataSet 为空?dataSet 中所有属性的收益相同?
bestAttrIndex = selectBestAttrIndex(dataSet, algorithm) # 获得收益最大的属性下标,2. 数据集中所有样本在所有属性上增益相同
bestAttr = attr[bestAttrIndex] # 获得收益最大属性
resTree = {bestAttr : {}} # 构建字典树
del(attr[bestAttrIndex]) # 删除收益最大属性,与split后的dataSet相同长度
valueSet = attrUniValSet[bestAttrIndex] #B1
del(attrUniValSet[bestAttrIndex]) #B1
for value in valueSet: # 为每个value创建分支
subDataSet = splitDataSet(dataSet, bestAttrIndex, value)
if len(subDataSet) == 0: # 3. 数据集为空,预测标签为父节点出现最多的标签
resTree[bestAttr][value] = Counter(labelList).most_common(1)[0][0]
else:
cpyAttr = attr[:] # 创建attr的副本,避免直接传需要用到的引用进函数 #B1
resTree[bestAttr][value] = createTree(subDataSet, cpyAttr, attrUniValSet, algorithm) # 分支字典 {attribute0 : {low : {}, med : {}, high : {}, vhigh : {}}} #B1 B2
return resTree
def createAttrUniValSet(dataSet):
attrUniValSet = []
for attrIndex in range(len(dataSet[0])-1): # 遍历每个属性
attrList = [entry[attrIndex] for entry in dataSet]
attrUniValSet.append(set(attrList))
return attrUniValSet
def classifierVec(testVec, attr, tree):
tempTree = copy.deepcopy(tree) # 深复制
while(isinstance(tempTree, dict)):
nodeName = list(tempTree.keys())[0] # 获得标签 outlook {'outlook':{}}
nodeAttrIndex = attr.index(nodeName) # 获得标签 outlook 在 attr 的下标 0
branch = testVec[nodeAttrIndex] # 获得分支值 2 ,用于{2:{windy:{}}}
tempTree = tempTree[nodeName][branch]
return tempTree
def classifierSet(testDataSet, attr, tree):
resLabel = []
for testVec in testDataSet:
resLabel.append(classifierVec(testVec, attr, tree))
return resLabel
def saveTree(path, tree):
with open(path, 'w') as wf:
wf.write(repr(tree)) # 将决策树字典结果当做字符串写入文件
# print("Write done!\nThe file looks like:")
# with open(path, 'r') as rf:
# sample = rf.read()
# print(sample)
def loadTree(path):
with open(path, 'r') as rf:
tree = eval(rf.read())
return tree
def loadCarDataSet(path):
with open(path, 'r') as csvfile:
entries = csv.reader(csvfile)
dataSet = list(entries) # 获得数据集二维列表
attr = ['attr' + str(i) for i in range(len(dataSet[0])-1)] # 获得属性向量
return dataSet, attr
def saveCarDataRes(path, carDataSetRes):
with open(path, 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(carDataSetRes)
def calAccuracy(dataSet, resVec):
if len(dataSet) != len(resVec):
print("Length of dataSet no equal length of resVec!")
return
dataLabelVec = [entry[-1] for entry in dataSet]
correctCount = 0
for i in range(len(resVec)):
if dataSet[i][-1] == resVec[i]:
correctCount += 1
accuracy = float(correctCount)/len(resVec)
return accuracy
# main函数中的选择函数
def mainTrainTree():
print("说明:训练集是train.csv,验证集是validate.csv,由Car_train.csv随机分配得到,比例为3:1")
print("使用train.csv建立决策树")
carDataSet, carAttr = loadCarDataSet('./data/train.csv')
carUniValSet = createAttrUniValSet(carDataSet)
print("正在训练ID3决策树...", end='')
car_ID3_Tree = createTree(carDataSet, carAttr, carUniValSet)
saveTree('./output/car_ID3_Tree/car_ID3_Tree.txt', car_ID3_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.txt'")
print("正在绘制ID3决策树图像...", end='')
carTreePlotter.createPlot(car_ID3_Tree, "./output/car_ID3_Tree/car_ID3_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_ID3_Tree.png'")
print("正在训练C4.5决策树...", end='')
car_C45_Tree = createTree(carDataSet, carAttr, carUniValSet, 'C4.5')
saveTree('./output/car_C45_Tree/car_C45_Tree.txt', car_C45_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.txt'")
print("正在绘制C4.5决策树图像...", end='')
carTreePlotter.createPlot(car_C45_Tree, "./output/car_C45_Tree/car_C45_Tree.png")
print("完成,保存为'./output/car_ID3_Tree/car_C45_Tree.png'")
print("正在训练CART决策树...", end='')
car_CART_Tree = createTree(carDataSet, carAttr, carUniValSet, 'CART')
saveTree('./output/car_CART_Tree/car_CART_Tree.txt', car_CART_Tree)
print("完成,保存为'./output/car_ID3_Tree/car_CART_Tree.txt'")
print("正在绘制CART决策树图像...", end='')
carTreePlotter.createPlot(car_CART_Tree, "./output/car_CART_Tree/car_CART_Tree.png")
print("完成,保存为'./output/car_CART_Tree/car_CART_Tree.png'")
def mainCalAccu():
carTestSet, carTestAttr = loadCarDataSet('./data/validate.csv')
print("正在用ID3决策树计算验证集...", end='')
car_ID3_Tree = loadTree('./output/car_ID3_Tree/car_ID3_Tree.txt')
car_ID3_SetRes = classifierSet(carTestSet, carTestAttr, car_ID3_Tree)
car_ID3_accuracy = calAccuracy(carTestSet, car_ID3_SetRes)
print("完成,准确率为 %f" % car_ID3_accuracy)
print("正在用C4.5决策树计算验证集...", end='')
car_C45_Tree = loadTree('./output/car_C45_Tree/car_C45_Tree.txt')
car_C45_SetRes = classifierSet(carTestSet, carTestAttr, car_C45_Tree)
car_C45_accuracy = calAccuracy(carTestSet, car_C45_SetRes)
print("完成,准确率为 %f" % car_C45_accuracy)
print("正在用CART决策树计算验证集...", end='')
car_CART_Tree = loadTree("./output/car_CART_Tree/car_CART_Tree.txt")
car_CART_SetRes = classifierSet(carTestSet, carTestAttr, car_CART_Tree)
car_CART_accuracy = calAccuracy(carTestSet, car_CART_SetRes)
print("完成,准确率为 %f" % car_CART_accuracy)
def mainGenRes():
carDataSet, carAttr = loadCarDataSet('./data/Car_test.csv')
print("正在用ID3决策树生成测试集预测结果...", end='')
car_ID3_Tree = loadTree('./output/car_ID3_Tree/car_ID3_Tree.txt')
car_ID3_SetRes = classifierSet(carDataSet, carAttr, car_ID3_Tree)
saveCarDataRes('./output/car_ID3_Tree/ID3_predict.csv', car_ID3_SetRes)
print("完成,保存为'./output/car_ID3_Tree/ID3_predict.csv'")
print("正在用C4.5决策树生成测试集预测结果...", end='')
car_C45_Tree = loadTree('./output/car_C45_Tree/car_C45_Tree.txt')
car_C45_SetRes = classifierSet(carDataSet, carAttr, car_C45_Tree)
saveCarDataRes('./output/car_C45_Tree/C45_predict.csv', car_C45_SetRes)
print("完成,保存为'./output/car_C45_Tree/C45_predict.csv'")
print("正在用CART决策树生成测试集预测结果...", end='')
car_CART_Tree = loadTree('./output/car_CART_Tree/car_CART_Tree.txt')
car_CART_SetRes = classifierSet(carDataSet, carAttr, car_CART_Tree)
saveCarDataRes('./output/car_CART_Tree/CART_predict.csv', car_CART_SetRes)
print("完成,保存为'./output/car_CART_Tree/CART_predict.csv'")
def main():
trained = True
while True:
activeNumStr = input("1.训练决策树\t2.计算准确率\t3.生成测试集预测结果\t4.退出\n")
if re.match(r'^[1-4]$', activeNumStr):
activeNum = int(activeNumStr)
if activeNum == 1:
mainTrainTree()
trained = True
elif activeNum == 4:
break
else:
if trained:
if activeNum == 2:
mainCalAccu()
elif activeNum == 3:
mainGenRes()
else:
print("请先训练决策树")
else:
print("输入不匹配:", end='')
main() |
5,839 | 56cae7b7a0338bd4a405cdc3cdcd9945a9df8823 | a = 2
while a == 1:
b = source()
c=function(b)
|
5,840 | 6306acd1508698687842ba6b55a839743af420cc | from extras.plugins import PluginConfig
from .version import __version__
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = 'mgk.kolek@gmail.com'
required_settings = []
default_settings = {
'with_text': True,
'text_fields': ['name', 'serial'],
'font': 'TahomaBold',
'custom_text': None,
'text_location': 'right',
'qr_version': 1,
'qr_error_correction': 0,
'qr_box_size': 6,
'qr_border': 4,
'device': {
'text_fields': ['name', 'serial']
},
'rack': {
'text_fields': ['name']
},
'cable': {
'text_fields': [
'_termination_a_device',
'termination_a',
'_termination_b_device',
'termination_b',
]
}
}
config = QRCodeConfig # noqa E305
|
5,841 | 111186f1d45b9cf3bf9065c7fa83a8f3f796bbe1 | # -*- coding: utf-8 -*-
"""Labeled entry widget.
The goal of these widgets is twofold: to make it easier for developers
to implement dialogs with compound widgets, and to naturally
standardize the user interface presented to the user.
"""
import logging
import seamm_widgets as sw
import tkinter as tk
import tkinter.ttk as ttk
logger = logging.getLogger(__name__)
options = {
"entry": {
"class_": "class_",
"cursor": "cursor",
"exportselection": "exportselection",
"font": "font",
"invalidcommand": "invalidcommand",
"justify": "justify",
"show": "show",
"style": "style",
"takefocus": "takefocus",
"variable": "textvariable",
"validate": "validate",
"validatecommand": "validatecommand",
"width": "width",
"xscrollcommand": "xscrollcommand",
},
}
class LabeledEntry(sw.LabeledWidget):
def __init__(self, parent, *args, **kwargs):
"""Initialize the instance"""
class_ = kwargs.pop("class_", "MLabeledEntry")
super().__init__(parent, class_=class_)
interior = self.interior
# entry
justify = kwargs.pop("justify", tk.LEFT)
entrywidth = kwargs.pop("width", 15)
self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)
self.entry.grid(row=0, column=0, sticky=tk.EW)
# interior frame
self.interior = ttk.Frame(interior)
self.interior.grid(row=0, column=1, sticky=tk.NSEW)
interior.columnconfigure(0, weight=1)
self.config(**kwargs)
@property
def value(self):
return self.get()
@value.setter
def value(self, value):
self.set(value)
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == "all"
if show_all or "entry" in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
# our options that we deal with
entry = options["entry"]
# cannot modify kwargs while iterating over it...
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
# having removed our options, pass rest to parent
super().config(**kwargs)
|
5,842 | 33938a28aad29e996255827825a0cdb1db6b70b7 | import tkinter as tk
import random
root = tk.Tk()
main_frame = tk.Frame(root)
var = tk.StringVar()
ch = [ "hello world" , "HI Pyton", "Mar Java", "Mit Java", "Lut Java" ]
var.set("Hello world I am a Label")
label = tk.Label(main_frame,textvariable=var,
bg="black",fg="white",font=("Times New Roman",24,"bold"))
label.pack()
def change_label():
var.set(random.choice(ch))
b1 = tk.Button(main_frame,text="click",command=change_label,
font=("Arial",15,'bold'),bg="pink",fg="red")
b1.pack()
expr = tk.StringVar()
e1 = tk.Entry(root,textvariable=expr,font=("Arial",20,'bold'),
bg='gray',fg='white')
main_frame.pack()
button = tk.Button(root,text="!!EXIT!!",command=root.destroy,
font=("Arial",15,'bold'),bg="pink",fg="red")
button.pack()
def slove():
expr.set(eval(expr.get()))
result_button= tk.Button(root,text="!!Result!!",command=slove,
font=("Arial",15,'bold'),bg="pink",fg="red")
def clear():
expr.set("")
clr_button= tk.Button(root,text="!!clear!!",command=clear,
font=("Arial",15,'bold'),bg="pink",fg="red")
e1.pack()
result_button.pack()
clr_button.pack(anchor='sw')
root.title("My Appliction")
root.wm_minsize(400,400)
root.wm_maxsize(500,500)
root.geometry("+500+200")
root.mainloop()
|
5,843 | 547844eca9eab097b814b0daa5da96d6a8ccee55 | import numpy as np
import xgboost as xgb
from sklearn.grid_search import GridSearchCV #Performing grid search
import generateVector
from sklearn.model_selection import GroupKFold
from sklearn import preprocessing as pr
positiveFile="../dataset/full_data/positive.csv"
negativeFile="../dataset/full_data/negative.csv"
neutralFile="../dataset/full_data/neutral.csv"
X_model, Y_model = generateVector.loadMatrix(positiveFile, neutralFile, negativeFile, '2', '0', '-2')
X_model_scaled = pr.scale(X_model)
X_model_normalized = pr.normalize(X_model_scaled, norm='l2') # l2 norm
X_model = X_model_normalized
X_model = X_model.tolist()
testFold = []
for i in range(1, len(X_model) + 1):
if (i % 3 == 1) | (i % 3 == 2):
testFold.append(0)
else:
testFold.append(2)
#ps = PredefinedSplit(test_fold=testFold)
gkf = list(GroupKFold(n_splits=2).split(X_model, Y_model, testFold))
def param_Test1():
global X_model,Y_model,gkf
param_grid = {
'max_depth': [2,4,6,8,10],
'min_child_weight':[1,3,5,7],
# 'gamma':[i/10.0 for i in range(0,5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax",learning_rate=0.1)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,n_jobs=-1,scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.best_params_,gs_clf.best_score_
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#param_Test1()
#{'n_estimators': 100, 'max_depth': 4, 'min_child_weight': 3} 0.767260190997
def param_test2():
global X_model, Y_model, gkf
param_grid = {
'max_depth': [5,6,7],
'min_child_weight':[2,3,4],
# 'gamma':[i/10.0 for i in range(0,5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax")
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#param_test2()
def paramTest2a():
global X_model, Y_model, gkf
param_grid = {
#'max_depth': [5, 6, 7],
#'learning_rate': [0.1, 0.15, 0.2, 0.3],
#'min_child_weight':[1,3,5,7],
# 'gamma':[i/10.0 for i in range(0,5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(max_depth=5,min_child_weight=2,silent=0,learning_rate=0.1,objective="multi:softmax")
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest2a()
def paramTest2b():
global X_model, Y_model, gkf
param_grid = {
#'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
#'min_child_weight': [1, 3, 5, 7],
#'gamma':[i/10.0 for i in range(0,5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0, objective="multi:softmax",learning_rate=0.1,max_depth=7,min_child_weight=7)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest2b()
def paramTest3():
global X_model, Y_model, gkf
param_grid = {
# 'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
# 'min_child_weight': [1, 3, 5, 7],
'gamma':[i/10.0 for i in range(0,5)],
#'subsample': [i / 10.0 for i in range(6, 10)],
#'colsample_bytree': [i / 10.0 for i in range(6, 10)],
# 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0,objective="multi:softmax", learning_rate=0.1, max_depth=7, min_child_weight=7,
colsample_bytree=0.9,subsample=0.9)
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
#paramTest3()
def paramTest4a():
global X_model, Y_model,gkf
param_grid = {
# 'max_depth': [5, 6, 7],
# 'learning_rate': [0.1, 0.15, 0.2, 0.3],
# 'min_child_weight': [1, 3, 5, 7],
# 'gamma': [i / 10.0 for i in range(0, 5)],
# 'subsample': [i / 10.0 for i in range(6, 10)],
# 'colsample_bytree': [i / 10.0 for i in range(6, 10)],
'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'n_estimators': [100]}
xgbclf = xgb.XGBClassifier(silent=0, learning_rate=0.1, max_depth=7, min_child_weight=7,gamma=0.1,
colsample_bytree=0.8, subsample=0.6,objective="multi:softmax")
# Run Grid Search process
gs_clf = GridSearchCV(xgbclf, param_grid,
n_jobs=1,
scoring='f1_weighted',cv=gkf)
gs_clf.fit(np.asarray(X_model), Y_model)
print gs_clf.grid_scores_, gs_clf.best_params_, gs_clf.best_score_
best_parameters, score, _ = max(gs_clf.grid_scores_, key=lambda x: x[1])
print('score:', score)
for param_name in sorted(best_parameters.keys()):
print('%s: %r' % (param_name, best_parameters[param_name]))
paramTest4a()
|
5,844 | 4a546222082e2a25296e31f715baf594c974b7ad | #!/usr/bin/env python
#coding=UTF8
'''
@author: devin
@time: 2013-11-23
@desc:
timer
'''
import threading
import time
class Timer(threading.Thread):
'''
每隔一段时间执行一遍任务
'''
def __init__(self, seconds, fun, **kwargs):
'''
seconds为间隔时间,单位为秒
fun为定时执行的任务
args为fun对应的参数
'''
self.sleep_time = seconds
threading.Thread.__init__(self)
self.fun = fun
self.kwargs = kwargs
self.is_stop = threading.Event()
def run(self):
while not self.is_stop.is_set():
self.fun(**self.kwargs)
self.is_stop.wait(timeout=self.sleep_time)
def stop(self, *args):
self.is_stop.set()
class CountDownTimer(Timer):
'''
一共执行指定次数
'''
def __init__(self, seconds, total_times, fun, **args):
'''
total_times为总共执行的次数
其它参数同Timer
'''
self.total_times = total_times
Timer.__init__(self, seconds, fun, args)
def run(self):
counter = 0
while counter < self.total_times and self.is_run:
time.sleep(self.sleep_time)
self.fun(**self.args)
counter += 1
if __name__ == "__main__":
def test(s):
print s
timer = Timer(2, test, s="a")
timer.start()
import signal
signal.signal(signal.SIGINT, timer.stop)
signal.signal(signal.SIGTERM, timer.stop)
signal.pause()
|
5,845 | 774f5d01cd274755626989c2b58bde68df349d8e | class Solution:
def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:
h = len(matrix)
w = len(matrix[0])
for curRow in range(h) :
val = matrix[curRow][0]
i = 0
while i < h-curRow and i < w :
# print(curRow+i,i)
if matrix[curRow+i][i] != val :
return False
i += 1
# print('pass')
for curCol in range(w) :
val = matrix[0][curCol]
i = 0
while i < h and i < w-curCol :
if matrix[i][curCol+i] != val :
return False
i += 1
return True |
5,846 | 25b7af2a8036f35a0bca665867d1729b7c9c113c | from ._monitor import TMonitor as TMonitor, TqdmSynchronisationWarning as TqdmSynchronisationWarning
from ._tqdm_pandas import tqdm_pandas as tqdm_pandas
from .cli import main as main
from .gui import tqdm as tqdm_gui, trange as tgrange
from .std import TqdmDeprecationWarning as TqdmDeprecationWarning, TqdmExperimentalWarning as TqdmExperimentalWarning, TqdmKeyError as TqdmKeyError, TqdmMonitorWarning as TqdmMonitorWarning, TqdmTypeError as TqdmTypeError, TqdmWarning as TqdmWarning, tqdm as tqdm, trange as trange
def tqdm_notebook(*args, **kwargs): ...
def tnrange(*args, **kwargs): ...
|
5,847 | a1ebb00d7cda65cb528b2253e817d925214cdce3 | # 1.闭包
# 2.装饰圈初识
# 3.标准版装饰器 |
5,848 | c907f6b954aa3eae21a54eba9d54c116576bd40a | """
Constants to be used throughout this program
stored here.
"""
ROOT_URL = "https://api.twitter.com"
UPLOAD_URL = "https://upload.twitter.com"
REQUEST_TOKEN_URL = f'{ROOT_URL}/oauth/request_token'
AUTHENTICATE_URL = f'{ROOT_URL}/oauth/authenticate'
ACCESS_TOKEN_URL = f'{ROOT_URL}/oauth/access_token'
VERSION = '1.1'
USER_SEARCH_URL = f'{ROOT_URL}/{VERSION}/users/search.json'
FRIENDSHIP_CREATE_URL = f'{ROOT_URL}/{VERSION}/friendships/create.json'
FRIENDSHIP_DESTROY_URL = f'{ROOT_URL}/{VERSION}/friendships/destroy.json'
FRIENDS_URL = f'{ROOT_URL}/{VERSION}/friends/list.json'
FOLLOWERS_URL = f'{ROOT_URL}/{VERSION}/followers/list.json'
TWEET_SEARCH_URL = f'{ROOT_URL}/{VERSION}/search/tweets.json'
TWEET_LIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/create.json'
TWEET_UNLIKE_URL = f'{ROOT_URL}/{VERSION}/favorites/destroy.json'
RETWEET_URL = ROOT_URL + "/" + VERSION + "/retweet/create/{tweet_id}.json"
REMOVE_RETWEET_URL = ROOT_URL + "/" + \
VERSION + "/unretweet/create/{tweet_id}.json"
FAVOURITED_TWEETS_URL = ROOT_URL + "/" + VERSION + "/favorites/list.json"
STATUS_UPDATE_URL = f'{ROOT_URL}/{VERSION}/statuses/update.json'
MEDIA_UPLOAD_URL = f'{UPLOAD_URL}/{VERSION}/media/upload.json'
TRENDS_URL = f'{ROOT_URL}/{VERSION}/trends/place.json'
|
5,849 | 223413918ba2a49cd13a34026d39b17fb5944572 | from selenium import webdriver
driver = webdriver.Chrome(executable_path=r'D:\Naveen\Selenium\chromedriver_win32\chromedriver.exe')
driver.maximize_window()
driver.get('http://zero.webappsecurity.com/')
parent_window_handle = driver.current_window_handle
driver.find_element_by_xpath("(//a[contains(text(),'privacy')])[1]").click()
windows = driver.window_handles
#driver.switch_to.window(windows[1])
for window in windows:
driver.switch_to.window(window)
if driver.title == "Legal Information | Micro Focus":
break
driver.find_element_by_link_text('Free Trials').click()
driver.close()
driver.switch_to.window(parent_window_handle)
driver.find_element_by_id('signin_button').click() |
5,850 | 4bd928c16cd0f06931aad5a478f8a911c5a7108b | #source: https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
#capture the video file
b="blood.mp4"
c="Center.avi"
d="Deformed.avi"
i="Inlet.avi"
videofile=c
vs = cv2.VideoCapture(videofile)
#width = vs.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
#height = vs.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
width = vs.get(3)
height=vs.get(4)
print("Width x: ",width, " Height y: ",height)
print("Frame Number,x coordinate of ROI,Weidth,Height,Width/Height")
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
j=0
totalframesampled=0
totalcelldetected=0
while True:
j+=1
if j%1000 !=0 :
continue
totalframesampled+=1
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame[1]
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
#print(cnts)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
#print("Frame: ",j)
#print(cnts)
# loop over the contours
for c in cnts:
#print("c:",c)
area=cv2.contourArea(c)
#print("Area:",area)
minarea=250
if area<=minarea:
continue
(x, y, w, h) = cv2.boundingRect(c)# top left x,y, wid,hei
condition_center_inlet=x>440 and x<450
condition_deformation=y>240 and y<300
if condition_center_inlet:
totalcelldetected+=1
print("totalcelldetected:",totalcelldetected)
print(j,x,y,w,h,w/h)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
k=0
frameskip=10 # for center and inlet skip=10
while k<frameskip:
k+=1
temp=vs.read()
break
# if the contour is too small, ignore it
# compute the bounding box for the contour, draw it on the frame,
# and update the text
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# cleanup the camera and close any open windows
vs.release()
cv2.destroyAllWindows()
print("Total frame: ",j-1)
print("Frame sampled: ",totalframesampled)
print("Total object detected: ",totalcelldetected)
|
5,851 | 65b30bbe737b331447235b5c640e9c3f7f6d6f8c | def slope_distance(baseElev, elv2, dist_betwn_baseElev_elv2, projectedDistance):
# Calculate the slope and distance between two Cartesian points.
#
# Input:
# For 2-D graphs,
# dist_betwn_baseElev_elv2, Distance between two elevation points (FLOAT)
# baseElev, Elevation of first cartesian point (FLOAT)
# elv2, Elevation of second cartesian point (FLOAT)
#
# Output:
# For 2-D graphs/profiles,
# slope, Slope betweewn two points. The horizontal plane is the
# plane of origin. Slope above and below the plane are
# positive and negative, respectively. This variable is
# needed for creating 2-D profiles/graphs.
# distance, Cartesian length between two points on a graph/profile.
# Used as 3-D Chainage distance (may differ from survey
# chainage data)
#
# Created: April 24, 2019 (moostang)
import math
numer = elv2 - baseElev # Numerator
denom = dist_betwn_baseElev_elv2
print(numer,denom)
distance = math.sqrt( numer**2 + denom**2)
# Check if denominator is zero, i.e. both points lies on the same
# y-axis plane.
# a. If denominator is zero, then determine if it lies on the
# upper (positive) or bottom (negative) y-axis plane.
# b. If denominator is not zero, then proceed with normal pythagorean
# trigonometric calculations
#
if denom == 0:
print("Denominator is zero")
b = 0
if elv2 > baseElev:
print(" and elv2 > baseElev")
p = 1 # Second point is above first point
theta = math.pi/2
elif elv2 < baseElev:
print(" and elv2 < baseElev")
p = -1 # Second point is below first point
theta = - math.pi/2
else:
print(" and elv2 = baseElev. Both of them are the same points !")
p = 0
b = 0
theta = 0
else:
print("Denominator is NOT zero")
theta = math.atan(numer/denom)
p = math.sin(theta)
b = math.cos(theta)
slope = theta
if projectedDistance != 0 and projectedDistance <= dist_betwn_baseElev_elv2:
b = abs(projectedDistance) # Tackle negative distances (may occur)
newElev = baseElev + b*math.tan(slope)
distance = projectedDistance/math.cos(slope)
else:
newElev = elv2
return slope, distance, newElev
|
5,852 | a283fd1e4098ea8bb3cc3580438c90e5932ba22f | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.
import scipy.ndimage as nd
import argparse
import numpy as np
from pybh import tensorpack_utils
import data_record
from pybh import serialization
from pybh import msgpack_utils
from pybh import lmdb_utils
from pybh.utils import argparse_bool, logged_time_measurement
from pybh import log_utils
logger = log_utils.get_logger("reward_learning/split_data_lmdb")
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,
batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)
data_dict_df.reset_state()
assert(split_ratio1 > 0)
assert(split_ratio1 < 1)
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write("Data split will result in empty data set\n")
sys.exit(1)
logger.info("Splitting {} samples into {} train and {} test samples".format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn("Dropping {} samples from input dataset".format(num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)
with logged_time_measurement(logger, "Writing train dataset to {} ...".format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, "Writing test dataset to {} ...".format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg,
reset_df_state=False)
logger.info("Tagging as train and test")
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item("__train__", msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item("__test__", msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert(lmdb_df.size() == num_samples1)
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert(lmdb_df.size() == num_samples2)
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, "Computing data statistics for {}".format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))
# TODO: Hack to get rid of float64 in HDF5 dataset
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info("Writing data statistics to {}".format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item("__stats__", data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,
args.split_ratio1, args.batch_size,
args.shuffle, args.serialization,
args.compression, args.compression_arg,
args.max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count',
default=0, help='Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default="pickle")
parser.add_argument('--compression', type=str, default="lz4")
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help="Ratio of data to write to output path 1")
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
5,853 | 198beb5a17575d781f7bce1ab36a6213ad7331b3 | import pandas as pd
import numpy as np
import inspect
from script.data_handler.Base.df_plotterMixIn import df_plotterMixIn
from script.util.MixIn import LoggerMixIn
from script.util.PlotTools import PlotTools
DF = pd.DataFrame
Series = pd.Series
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
@staticmethod
def fill_major_value_cate(df: DF, key) -> DF:
major_value = df[key].astype(str).describe()['top']
df[key] = df[key].fillna(major_value)
return df
@staticmethod
def fill_random_value_cate(df: DF, key) -> DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values)))
# df[key] = df[key].fillna()
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) -> DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = """
class dfCleaner(Base_dfCleaner):
"""
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series: Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template', '{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = "\n".join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) -> DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) -> str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return "\n\n".join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.df_Ys_key)
@staticmethod
def _df_null_include(df: DF) -> DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) -> str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [f'column : "{key}", null ratio:{float(na_count)/float(total):.4f}%, {na_count}/{total}(null/total)']
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std', 'min', 'max', 'count'])
ret += [groupby]
return "\n".join(map(str, ret))
|
5,854 | e9ea48dec40e75f2fc73f8dcb3b5b975065cf8af | class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
def convert(word):
table = {}
count, converted = 0, ''
for w in word:
if w in table:
converted += table[w]
else:
converted += str(count)
table[w] = str(count)
count += 1
return converted
p = convert(pattern)
answer = []
for word in words:
if p == convert(word):
answer.append(word)
return answer
"""
[빠른 풀이]
- zip을 이용해서 길이만 비교!!!
class Solution:
def findAndReplacePattern(self, w: List[str], p: str) -> List[str]:
return [i for i in w if len(set(zip(p,i)))==len(set(p))==len(set(i))]
""" |
5,855 | 399a22450d215638051a7d643fb6d391156779c5 | /home/khang/anaconda3/lib/python3.6/tempfile.py |
5,856 | 91959f6621f05b1b814a025f0b95c55cf683ded3 | from pyparsing import ParseException
from pytest import raises
from easymql.expressions import Expression as exp
class TestComparisonExpression:
def test_cmp(self):
assert exp.parse('CMP(1, 2)') == {'$cmp': [1, 2]}
with raises(ParseException):
exp.parse('CMP(1)')
with raises(ParseException):
exp.parse('CMP(1, 2, 3)')
assert exp.parse('CMP(1, 3 + 2)') == {'$cmp': [1, {'$add': [3, 2]}]}
|
5,857 | f9dd21aac7915b9bbf91eeffb5fd58ffdb43c6c3 | '''
Unit test for `redi.create_summary_report()`
'''
import unittest
import os
import sys
from lxml import etree
from StringIO import StringIO
import time
import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestCreateSummaryReport(unittest.TestCase):
def setUp(self):
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_report_params = {
'project': 'hcvtarget-uf',
'report_file_path': proj_root + 'config/report.xml',
'redcap_uri': 'https://hostname.org'}
self.test_report_data = {
'total_subjects': 5,
'form_details': {
'Total_chemistry_Forms': 22,
'Total_cbc_Forms': 53
},
'subject_details': {
'60': {'cbc_Forms': 1, 'chemistry_Forms': 1},
'61': {'cbc_Forms': 2, 'chemistry_Forms': 1},
'63': {'cbc_Forms': 11, 'chemistry_Forms': 4},
'59': {'cbc_Forms': 39, 'chemistry_Forms': 16}
},
'errors' : [],
}
self.specimen_taken_time_summary = {'total': 15, 'blank': 3}
self.test_alert_summary = {
'multiple_values_alert': [
'This is multiple values alert 1',
'This is multiple values alert 2',
'This is multiple values alert 3'],
'max_event_alert': [
'This is max event alert 1',
'This is max event alert 2',
'This is max event alert 3']
}
self.expected_xml = '''
<report>
<header>
<project>hcvtarget-uf</project>
<date>'''+time.strftime("%m/%d/%Y")+'''</date>
<redcapServerAddress>https://hostname.org</redcapServerAddress>
</header>
<summary>
<subjectCount>5</subjectCount>
<forms>
<form>
<form_name>Total_cbc_Forms</form_name>
<form_count>53</form_count>
</form>
<form>
<form_name>Total_chemistry_Forms</form_name>
<form_count>22</form_count>
</form>
</forms>
</summary>
<alerts>
<tooManyForms>
<eventAlert>
<message>This is max event alert 1</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 2</message>
</eventAlert>
<eventAlert>
<message>This is max event alert 3</message>
</eventAlert>
</tooManyForms>
<tooManyValues>
<valuesAlert>
<message>This is multiple values alert 1</message>
</valuesAlert>
<valuesAlert>
<message>This is multiple values alert 2</message>
</valuesAlert>
<valuesAlert><message>This is multiple values alert 3</message>
</valuesAlert></tooManyValues>
</alerts>
<subjectsDetails>
<Subject><ID>59</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>39</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>16</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>60</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>1</form_count></form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject><ID>61</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>2</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>1</form_count>
</form>
</forms>
</Subject>
<Subject>
<ID>63</ID>
<forms>
<form>
<form_name>cbc_Forms</form_name>
<form_count>11</form_count>
</form>
<form>
<form_name>chemistry_Forms</form_name>
<form_count>4</form_count>
</form>
</forms>
</Subject>
</subjectsDetails>
<errors/>
<summaryOfSpecimenTakenTimes>
<total>15</total>
<blank>3</blank>
<percent>20.0</percent>
</summaryOfSpecimenTakenTimes>
</report>'''
self.schema_str = StringIO('''\
<xs:schema attributeFormDefault="unqualified" elementFormDefault="qualified" xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="report">
<xs:complexType>
<xs:sequence>
<xs:element name="header">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="project"/>
<xs:element type="xs:string" name="date"/>
<xs:element type="xs:string" name="redcapServerAddress"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="summary">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="subjectCount"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="alerts">
<xs:complexType>
<xs:sequence>
<xs:element name="tooManyForms">
<xs:complexType>
<xs:sequence>
<xs:element name="eventAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="tooManyValues">
<xs:complexType>
<xs:sequence>
<xs:element name="valuesAlert" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="message"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="subjectsDetails">
<xs:complexType>
<xs:sequence>
<xs:element name="Subject" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="ID"/>
<xs:element name="forms">
<xs:complexType>
<xs:sequence>
<xs:element name="form" maxOccurs="unbounded" minOccurs="0">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:string" name="form_name"/>
<xs:element type="xs:byte" name="form_count"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="errors">
</xs:element>
<xs:element name="summaryOfSpecimenTakenTimes">
<xs:complexType>
<xs:sequence>
<xs:element type="xs:byte" name="total"/>
<xs:element type="xs:byte" name="blank"/>
<xs:element type="xs:float" name="percent"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>''')
return
def test_create_summary_report(self):
sys.path.append('config')
self.newpath = proj_root+'config'
self.configFolderCreatedNow = False
if not os.path.exists(self.newpath):
self.configFolderCreatedNow = True
os.makedirs(self.newpath)
result = redi.create_summary_report(\
self.test_report_params, \
self.test_report_data, \
self.test_alert_summary, \
self.specimen_taken_time_summary)
result_string = etree.tostring(result)
#print result_string
xmlschema_doc = etree.parse(self.schema_str)
xml_schema = etree.XMLSchema(xmlschema_doc)
# validate the xml against the xsd schema
self.assertEqual(xml_schema.validate(result), True)
# validate the actual data in xml but strip the white space first
parser = etree.XMLParser(remove_blank_text=True)
clean_tree = etree.XML(self.expected_xml, parser=parser)
self.expected_xml = etree.tostring(clean_tree)
self.assertEqual(self.expected_xml, result_string)
def tearDown(self):
# delete the created xml file
with open(proj_root + 'config/report.xml'):
os.remove(proj_root + 'config/report.xml')
if self.configFolderCreatedNow:
os.rmdir(self.newpath)
return
if __name__ == '__main__':
unittest.main()
|
5,858 | aa15d51760c16181907994d329fb7ceede6a539b | import re
text = "Python is an interpreted high-level general-purpose programming language."
fiveWord = re.findall(r"\b\w{5}\b", text)
print("Following are the words with five Letters:")
for strWord in fiveWord:
print(strWord)
|
5,859 | d077f32061b87a4bfd6a0ac226730957a4000804 | ###
### Copyright 2009 The Chicago Independent Radio Project
### All Rights Reserved.
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
"""CHIRP authentication system."""
import base64
import logging
import os
import time
from common import in_prod
from common.autoretry import AutoRetry
# TODO(trow): This is a work-around for problems with PyCrypto on the Mac.
# For more information, see
# http://code.google.com/p/googleappengine/issues/detail?id=1627
_DISABLE_CRYPTO = False
try:
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
except ImportError:
# Only allow crypto to be disabled if we are running in a local
# development environment.
if in_prod():
raise
_DISABLE_CRYPTO = True
logging.warn("PyCrypto not found! Operating in insecure mode!")
from django import http
from auth.models import User, KeyStorage
from auth import roles
# Our logout URL.
LOGOUT_URL = "/auth/goodbye/"
# Users are ultimately redirected to the URL after logging out.
_FINAL_LOGOUT_URL = '/auth/hello/'
# The name of the cookie used to store our security token.
_CHIRP_SECURITY_TOKEN_COOKIE = 'chirp_security_token'
# Our security tokens expire after 24 hours.
# TODO(kumar) set this back to two hours after
# all CHIRP volunteers have set initial password?
_TOKEN_TIMEOUT_S = 24 * 60 * 60
class UserNotAllowedError(Exception):
"""Raised when the user is recognized but forbidden from entering."""
class _Credentials(object):
email = None
security_token_is_stale = False
def _create_security_token(user):
"""Create a CHIRP security token.
Args:
user: A User object.
Returns:
A string containing an encrypted security token that encodes
the user's email address as well as a timestamp.
"""
timestamp = int(time.time())
plaintext = "%x %s" % (timestamp, user.email)
nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)
# Pad plaintest with whitespace to make the length a multiple of 16,
# as this is a requirement of AES encryption.
plaintext = plaintext.rjust(nearest_mult_of_16, ' ')
if _DISABLE_CRYPTO:
body = plaintext
sig = "sig"
else:
key_storage = KeyStorage.get()
body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)
hmac_key = key_storage.hmac_key
if type(hmac_key) == unicode:
# Crypto requires byte strings
hmac_key = hmac_key.encode('utf8')
sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()
return '%s:%s' % (sig, body)
def _parse_security_token(token):
"""Parse a CHIRP security token.
Returns:
A Credentials object, or None if the token is not valid.
If a Credentials object is returned, its "user" field will not
be set.
"""
if not token:
return None
if ':' not in token:
logging.warn('Malformed token: no signature separator')
return None
sig, body = token.split(':', 1)
if _DISABLE_CRYPTO:
plaintext = body
else:
key_storage = KeyStorage.get()
hmac_key = key_storage.hmac_key
if type(hmac_key) == unicode:
# Crypto requires byte strings
hmac_key = hmac_key.encode('utf8')
computed_sig = HMAC.HMAC(key=hmac_key,
msg=body).hexdigest()
if sig != computed_sig:
logging.warn('Malformed token: invalid signature')
return None
try:
plaintext = AES.new(key_storage.aes_key,
AES.MODE_CBC).decrypt(body)
except ValueError:
logging.warn('Malformed token: wrong size')
return None
# Remove excess whitespace.
plaintext = plaintext.strip()
# The plaintext should contain at least one space.
if ' ' not in plaintext:
logging.warn('Malformed token: bad contents')
return None
parts = plaintext.split(' ')
if len(parts) != 2:
logging.warn('Malformed token: bad structure')
return None
timestamp, email = parts
try:
timestamp = int(timestamp, 16)
except ValueError:
logging.warn('Malformed token: bad timestamp')
return None
# Reject tokens that are too old or which have time-traveled. We
# allow for 1s of clock skew.
age_s = time.time() - timestamp
if age_s < -1 or age_s > _TOKEN_TIMEOUT_S:
logging.warn('Malformed token: expired (age=%ds)', age_s)
return None
cred = _Credentials()
cred.email = email
cred.security_token_is_stale = (age_s > 0.5 * _TOKEN_TIMEOUT_S)
return cred
def attach_credentials(response, user):
"""Attach a user's credentials to a response.
Args:
response: An HttpResponse object.
user: A User object.
"""
response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE,
_create_security_token(user))
def get_current_user(request):
"""Get the current logged-in user's.
Returns:
A User object, or None if the user is not logged in.
Raises:
UserNotAllowedError if the user is prohibited from accessing
the site.
"""
cred = None
token = request.COOKIES.get(_CHIRP_SECURITY_TOKEN_COOKIE)
if token:
cred = _parse_security_token(token)
# If this is a POST, look for a base64-encoded security token in
# the CHIRP_Auth variable.
if cred is None and request.method == 'POST':
token = request.POST.get("CHIRP_Auth")
if token:
try:
token = base64.urlsafe_b64decode(token)
except TypeError:
token = None
if token:
cred = _parse_security_token(token)
# No valid token? This is hopeless!
if cred is None:
return None
# Try to find a user for this email address.
user = User.get_by_email(cred.email)
if user is None:
return None
# Reject inactive users.
if not user.is_active:
logging.info('Rejected inactive user %s', user.email)
raise UserNotAllowedError
user._credentials = cred
return user
def create_login_url(path):
"""Returns the URL of a login page that redirects to 'path' on success."""
return "/auth/hello?redirect=%s" % path
def logout(redirect=None):
"""Create an HTTP response that will log a user out.
The redirect param can be a relative URL in which case
the user will go back to the same page when logging in.
This is useful for switching users like on the playlist
tracker page.
Returns:
An HttpResponse object that will log the user out.
"""
# If the user was signed in and has a cookie, clear it.
logout_url = _FINAL_LOGOUT_URL
if redirect:
logout_url = '%s?redirect=%s' % (logout_url, redirect)
response = http.HttpResponseRedirect(logout_url)
response.set_cookie(_CHIRP_SECURITY_TOKEN_COOKIE, '')
return response
def get_password_reset_token(user):
"""A URL-safe token that authenticates a user for a password reset."""
return base64.urlsafe_b64encode(_create_security_token(user))
def parse_password_reset_token(token):
"""Extracts an email address from a valid password reset token."""
try:
token = base64.urlsafe_b64decode(str(token))
except TypeError:
return None
cred = _parse_security_token(token)
return cred and cred.email
|
5,860 | f882b73645c6a280a17f40b27c01ecad7e4d85ae | import logging
from datetime import datetime
import boto3
from pytz import timezone
from mliyweb.api.v1.api_session_limiter import session_is_okay
from mliyweb.api.v1.json_view import JsonView
from mliyweb.dns import deleteDnsEntry
from mliyweb.models import Cluster
from mliyweb.resources.clusters import ClusterService
from mliyweb.settings import AWS_REGION
from mliyweb.utils import log_enter_exit
class UserGroupClusters(JsonView):
'''
Returns a json struct with the current clusters. If the last updated
time in the db is greater than the timeout, it returns the current data
and launches a background thread to refresh and prune the cluster list.
If called with ?forcerefresh as a url argument it'll refresh regardless
of the last updated time.
'''
logger = logging.getLogger('mliyweb.views.UserClusters')
cluster_service = ClusterService()
# global instance refresh time stamp
@log_enter_exit(logger)
def get_data(self, context):
user = self.request.user
try:
if session_is_okay(self.request.session, "group_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_by_user_group(user)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_by_user_group(user)
except Exception as e:
self.logger.exception(e)
return []
class UserClusters(JsonView):
# TODO There needs to be a Cluster Launch thread cleanup/rework
logger = logging.getLogger('mliyweb.views.UserClusters')
cluster_service = ClusterService()
@log_enter_exit(logger)
def get_data(self, context):
username = self.request.user.username
try:
if session_is_okay(self.request.session, "user_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_by_user(username)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_by_user(username)
except Exception as e:
self.logger.exception(e)
raise
class SingleCluster(JsonView):
logger = logging.getLogger('mliyweb.views.SingleCluster')
cluster_service = ClusterService()
@log_enter_exit(logger)
def get_data(self, context):
cluster_id = self.kwargs['pk']
try:
if session_is_okay(self.request.session, "user_clusters"):
self.logger.info("Updating clusters in database")
return self.cluster_service.update_single_cluster(cluster_id)
else:
self.logger.info("Getting clusters from database")
return self.cluster_service.get_single_cluster(cluster_id)
except Exception as e:
self.logger.exception(e)
raise
class ChangeClusterState(JsonView):
log = logging.getLogger('mliyweb.views.ChangeClusterState')
cluster_service = ClusterService()
@log_enter_exit(log, log_level=10)
def get_data(self,context):
client = boto3.client('cloudformation', region_name=AWS_REGION)
cluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid'])
client.delete_stack(StackName=cluster.stack_id)
if cluster.current_bill:
cluster.current_bill.ongoing = False
cluster.current_bill.end_time = datetime.now(timezone('UTC'))
cluster.current_bill.save()
if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':
deleteDnsEntry(cluster.cluster_id,cluster.master_ip)
else:
deleteDnsEntry(cluster.cluster_id,cluster.master_ip)
cluster.state = "TERMINATED"
cluster.save()
return { 'action' : 'terminate', 'status' : 'ok'} |
5,861 | c80ae9d2eb07fd716a80a5e2d7b5237925fda02c | # Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow
# Authors:
# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# Model file links collection (replace .sh script): Twenkid
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt
# ImageAI: https://github.com/OlafenwaMoses/ImageAI
# # YOLOv3:
# yolo.h5
# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream
# yolo-tiny.h5
# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream
# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021
# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg
# Detect with YOLO or ImageAI etc. then use DNN
# Specify the paths for the 2 files
# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.
#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8
#ImageAI: older versions require tf 1.x
#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works
# Set the paths to models, images etc.
# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible.
import cv2
import tensorflow.compat.v1 as tf
from imageai.Detection import ObjectDetection
import os
boxes = []
def yolo():
#name = "k.jpg"
root = "Z:\\"
name = "23367640.png" #t.jpg" #"p1.jpg" #"2w.jpg" #"grigor.jpg" #"2w.jpg" #"pose1.webp" #1.jpg"
execution_path = os.getcwd()
yolo_path = "Z:\\yolo.h5"
#yolo_path = "Z:\\yolo-tiny.h5"
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
#detector.setModelTypeAsTinyYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path , yolo_path))
else:
detector.setModelPath(yolo_path)
#dir(detector)
detector.loadModel()
#loaded_model = tf.keras.models.load_model("./src/mood-saved-models/"model + ".h5")
#loaded_model = tf.keras.models.load_model(detector.)
#path = "E:\capture_023_29092020_150305.jpg" #IMG_20200528_044908.jpg"
#pathOut = "E:\YOLO_capture_023_29092020_150305.jpg"
#path = "pose1.webp" #E:\\capture_046_29092020_150628.jpg"
pathOut = "yolo_out_2.jpg"
path = root + name
pathOut = root + name + "yolo_out" + ".jpg"
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)
for eachObject in detections:
print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
print("--------------------------------")
return detections, path
det,path = yolo()
yoloImage = cv2.imread(path) #crop regions from it
for i in det:
print(i)
protoFile = "Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt"
#protoFile = "pose_deploy_linevec_faster_4_stages.prototxt"
#weightsFile = "Z:\\pose\\mpi\\pose_iter_440000.caffemodel"
weightsFile = "Z:\\pose\\mpi\\pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_440000.caffemodel"
# Read the network into Memory
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
"""
{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1
8, 38, 153, 397]}
{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3
86, 93, 428, 171]}
{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [
585, 99, 641, 180]}
{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [
126, 178, 164, 290]}
{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2
93, 80, 394, 410]}
{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4
78, 88, 589, 410]}
{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1
, 212, 39, 300]}
{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [
153, 193, 192, 306]}
{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [
226, 198, 265, 308]}
{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point
s': [229, 50, 269, 94]}
{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4
23, 110, 457, 160]}
H, W, Ch 407 211 3
"""
yolo_thr = 70 #in percents, not 0.7
collected = []
bWiden = False
for d in det:
if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:
x1,y1,x2,y2 = d['box_points']
if bWiden:
x1-=20
x2+=20
y1-=30
y2+=30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name']+str(x1), cropped)
collected.append(cropped) #or copy first?
cv2.waitKey()
#x1,y1, ...
# for i in collected: cv2.imshow("COLLECTED?", i); cv2.waitKey() #OK
# Read image
#frame = cv2.imread("Z:\\23367640.png") #1.jpg")
#src = "Z:\\2w.jpg" #z:\\pose1.webp" #nacep1.jpg"
#src = "z:\\pose1.webp"
srcs = ["z:\\pose1.webp","Z:\\2w.jpg", "Z:\\grigor.jpg"]
id = 2
#src = srcs[2]
src = path #from first yolo, in order to compare
frame = cv2.imread(src)
cv2.imshow("FRAME"+src, frame)
#frameWidth, frameHeight, _ = frame.shape
frameHeight, frameWidth, ch = frame.shape
print("H, W, Ch", frameHeight, frameWidth, ch)
# Specify the input image dimensions
inWidth = 368 #184 #368
inHeight = 368 #184 #368
# Prepare the frame to be fed to the network
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",frame)
def Detect(image): #inWidth, Height ... - global, set as params later
frameHeight, frameWidth, ch = image.shape
# Prepare the image to be fed to the network
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(image, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",image)
cv2.waitKey()
for i in collected: Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
5,862 | 7d2335c956776fc5890a727d22540eabf2ea4b94 | umur = raw_input("Berapakah umurmu?")
tinggi = raw_input("Berapakah tinggimu?")
berat = raw_input("Berapa beratmu?")
print "Jadi, umurmu adalah %r, tinggumu %r, dan beratmu %r." % (umur, tinggi, berat)
|
5,863 | 84d154afe206fd2c7381a2203affc162c28e21c1 | import sqlite3
from flask_restful import Resource, reqparse
from flask_jwt import JWT, jwt_required
#import base64
import datetime
import psycopg2
class User:
def __init__(self, _id, username, password, user_name, address, contact):
self.id = _id
self.username = username
self.password = password
self.user_name = user_name
self.address = address
self.contact = contact
@classmethod
def find_by_username(cls, username):
connection = sqlite3.connect('user.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE username=?"
result = cursor.execute(query, (username,))
row = result.fetchone()
if row is not None:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, _id):
connection = sqlite3.connect('user.db')
cursor = connection.cursor()
query = "SELECT * FROM users WHERE id=?"
result = cursor.execute(query, (_id,))
row = result.fetchone()
if row is not None:
user = cls(*row)
else:
user = None
connection.close()
return user
class PresOrder(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('pres',
type=str,
required=True,
help="This field cannot be left blank.")
#@jwt_required()
def post(self):
data = PresOrder.parser.parse_args()
'''
imgdata = base64.b64decode(data['pres'])
filename = 'pres.jpg'
with open(filename, 'wb') as f:
f.write(imgdata)
'''
connection = sqlite3.connect('order.db')
cursor = connection.cursor()
query = "INSERT INTO presorder VALUES (NULL, ?, ?, 0)"
cursor.execute(query, (data['username'], data['pres']))
connection.commit()
connection.close()
return True, 200
class LandmarkAdd(Resource):
parser = reqparse.RequestParser()
parser.add_argument('landmark_name',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('landmark_type',
type=str,
required=True,
help="This field cannot be left blank.")
parser.add_argument('latitude',
type=float,
required=True,
help="This field cannot be left blank.")
parser.add_argument('longitude',
type=float,
required=True,
help="This field cannot be left blank.")
#@jwt_required()
def post(self):
data = LandmarkAdd.parser.parse_args()
'''
connection = sqlite3.connect('order.db')
cursor = connection.cursor()
query = "INSERT INTO presorder VALUES (NULL, ?, ?, 0)"
cursor.execute(query, (data['username'], data['pres']))
connection.commit()
connection.close()
'''
print(data)
# connection = psycopg2.connect(user="postgres",
# password="anuj@150100",
# host="127.0.0.1",
# port="5432",
# database="MapifyDb")
#
# cursor = connection.cursor()
#
#
# postgres_insert_query = """ INSERT INTO Landmark(Landmark_name, Landmark_type, Landmark_location) VALUES (%s,%s, Point(%s, %s))"""
# record_to_insert = (data["landmarkName"], data["landmarkType"],[data["latitude"] ,data["longitude"] ])
# cursor.execute(postgres_insert_query, record_to_insert)
# connection.commit()
return True, 200 |
5,864 | 3bf1b4cfce55820605653d9dc57bab839f2dea55 | #!/usr/bin/env python
###############################################################################
# \file
#
# $Id:$
#
# Copyright (C) Brno University of Technology
#
# This file is part of software developed by Robo@FIT group.
#
# Author: Tomas Lokaj
# Supervised by: Michal Spanel (spanel@fit.vutbr.cz)
# Date: 12/09/2012
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
import roslib; roslib.load_manifest('srs_interaction_primitives')
import rospy
import actionlib
from std_msgs.msg import *
from visualization_msgs.msg import *
from geometry_msgs.msg import *
from srs_interaction_primitives.msg import *
import random
import time
from srs_interaction_primitives.srv import ClickablePositions
if __name__ == '__main__':
rospy.init_node('clickable_positions_action_client', anonymous=True)
#===========================================================================
# rospy.wait_for_service('interaction_primitives/clickable_positions')
# click_positions = rospy.ServiceProxy('interaction_primitives/clickable_positions', ClickablePositions)
#
# color = ColorRGBA()
# color.r = random.uniform(0, 1)
# color.g = random.uniform(0, 1)
# color.b = random.uniform(0, 1)
# color.a = 1;
#
# radius = random.uniform(0, 1)
#
# positions = []
# for i in range(0, random.randint(2, 10)):
# positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))
#
# frame_id = "/world"
#
# topic = str(random.randint(0, 10000))
#
# resp = click_positions(frame_id, topic, radius, color, positions)
#
#===========================================================================
client = actionlib.SimpleActionClient("clickable_positions_server", ClickablePositionsAction)
client.wait_for_server()
rospy.loginfo("Server ready")
goal = ClickablePositionsGoal()
color = ColorRGBA()
color.r = random.uniform(0, 1)
color.g = random.uniform(0, 1)
color.b = random.uniform(0, 1)
color.a = 1;
goal.topic_suffix = str(random.randint(0, 10000))
goal.color = color
goal.radius = random.uniform(0, 1)
for i in range(0, random.randint(2, 10)):
goal.positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))
goal.frame_id = "/world"
# Fill in the goal here
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(50.0))
if client.get_state() == 3:
rospy.loginfo("Goal completed:")
print client.get_result()
else:
rospy.logwarn("Action was preempted")
|
5,865 | 3f3ed40bf800eddb2722171d5fd94f6c292162de | #!/usr/bin/env python
import sys
def trim_reads(fastq, selection, extra_cut, orientation, output, outputType,
seqLen, trim):
# Store all read/sequence ids that did not match with KoRV
ids = []
with open(selection, 'r') as f:
for line in f:
ids.append(line.strip())
# Store trimming position for each read/sequence id
trim_pros = {}
for line in trim.split('\n'):
if len(line):
line = line.split('\t')
if (line[0] == 'read name'):
if (line[1] == 'end position' and orientation != 3) or \
(line[1] == 'start position' and orientation != 5):
print('Wrong setting! 3\' trimming needs the end position'
'and 3\' trimming needs the start position.')
sys.exit()
else:
trim_pros[line[0]] = int(line[1])
# Read fastq file line by line and copy a sequence to a new fastq file if:
# 1. Read did not align against KoRV (id is in selection)
# 2. Line is not blank
# 3. Sequence length is greater than the given seqLen
with open(output, 'w') as o:
with open(fastq, 'r') as f:
while True:
identifier = f.readline()
sequence = f.readline()
plus = f.readline()
quality = f.readline()
if not identifier or not sequence or \
not plus or not quality:
break
read_id = identifier.strip()[1:].split(' ')[0]
if read_id in ids:
if read_id in trim_pros:
if (orientation == 3):
cut = trim_pros[read_id] + extra_cut
sequence = sequence[cut:(cut + seqLen)].strip()
quality = quality[cut:(cut + seqLen)].strip()
if (orientation == 5):
cut = trim_pros[read_id] - extra_cut
sequence = sequence[max(cut - seqLen, 0):cut]
quality = quality[max(cut - seqLen, 0):cut]
if (len(sequence) >= seqLen):
if (outputType == 'fasta'):
o.write('>' + identifier[1:])
o.write(sequence[:seqLen] + '\n')
else:
o.write(identifier)
o.write(sequence[:seqLen] + '\n')
o.write(plus)
o.write(quality[:seqLen] + '\n')
#############
# MAIN #
#############
def main():
trim = sys.stdin.read()
if len(sys.argv) > 7:
trim_reads(sys.argv[1], sys.argv[2], int(sys.argv[3]),
int(sys.argv[4]), sys.argv[5], sys.argv[6],
int(sys.argv[7]), trim)
else:
print("trim_reads.py [fastq] [selection] [extracut] [orientation] "
"[output] [format] [maxlen] < [trimming-info]")
sys.exit()
if __name__ == "__main__":
main()
|
5,866 | 8ec257d5dfe84e363e3c3aa5adee3470c20d1765 | import sys
import time
import numpy as np
import vii
import cnn
from cnn._utils import (FLOAT_DTYPE,
_multi_convolve_image,
_opencl_multi_convolve_image,
_relu_max_pool_image,
_opencl_relu_max_pool_image)
GROUPS = 25, 20, 1
def subsample(x, pool_size):
# Make sure it works with pool size > 2 !!!!
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]
return x[:dx:2, :dy:2]
def probe_time(func):
def wrapper(*args, **kwargs):
t0 = time.time()
res = func(*args, **kwargs)
dt = time.time() - t0
print('Time (%s): %f' % (func.__name__, dt))
return res
return wrapper
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
@probe_time
def cpu_relu_max_pool_image(*args):
return _relu_max_pool_image(*args)
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
###########################################################################
fimg = 'pizza.png'
fmod = 'feb2.h5'
device = 0
brute_force = False
if len(sys.argv) > 1:
fimg = sys.argv[1]
if len(sys.argv) > 2:
fmod = sys.argv[2]
if len(sys.argv) > 3:
device = int(sys.argv[3])
if device < 0:
device = None
img = vii.load_image(fimg)
classif = cnn.load_image_classifier(fmod)
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)
###########################################################################
print('CNN test')
x = np.random.randint(img.dims[0] - classif.image_size[0] + 1)
y = np.random.randint(img.dims[1] - classif.image_size[1] + 1)
data = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255
gold = classif.run(data)
flow = data
for i in range(len(classif.conv_filters)):
kernel, bias = classif.get_weights(i)
flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]
flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)
flow = flow.flatten()
for i in range(len(classif.conv_filters), len(classif.layers)):
kernel, bias = classif.get_weights(i)
flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias
if i < (len(classif.layers) - 1):
flow = np.maximum(flow, 0)
silver = cnn.softmax(flow)
print('error = %f' % np.max(np.abs(gold - silver)))
|
5,867 | 539431649e54469ddbe44fdbd17031b4449abdd9 | import boto3
import os
from trustedadvisor import authenticate_support
accountnumber = os.environ['Account_Number']
rolename = os.environ['Role_Name']
rolesession = accountnumber + rolename
def lambda_handler(event, context):
sts_client = boto3.client('sts')
assumerole = sts_client.assume_role(
RoleArn="arn:aws:iam::" + accountnumber + ":role/" + rolename,
RoleSessionName=rolesession
)
credentials = assumerole['Credentials']
return authenticate_support(credentials)
|
5,868 | 75c00eec7eacd37ff0b37d26163c2304620bb9db | from django.contrib.auth.hashers import make_password
from django.core import mail
from rest_framework import status
from django.contrib.auth.models import User
import time
from api.tests.api_test_case import CustomAPITestCase
from core.models import Member, Community, LocalCommunity, TransportCommunity, Profile, Notification
class MemberTests(CustomAPITestCase):
def setUp(self):
"""
Make a user for authenticating and
testing community actions
"""
owner = self.user_model.objects.create(password=make_password('user1'), email='user1@test.com',
first_name='1', last_name='User', is_active=True)
moderator = self.user_model.objects.create(password=make_password('user2'), email='user2@test.com',
first_name='2', last_name='User', is_active=True)
member = self.user_model.objects.create(password=make_password('user3'), email='user3@test.com',
first_name='3', last_name='User', is_active=True)
other = self.user_model.objects.create(password=make_password('user4'), email='user4@test.com',
first_name='4', last_name='User', is_active=True)
Profile.objects.create(user=owner)
Profile.objects.create(user=moderator)
Profile.objects.create(user=member)
Profile.objects.create(user=other)
lcom1 = LocalCommunity.objects.create(name='lcom1', description='descl1', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom2 = LocalCommunity.objects.create(name='lcom2', description='descl2', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom3 = LocalCommunity.objects.create(name='lcom3', description='descl3', city='Paris', country='FR',
gps_x=0, gps_y=0)
lcom4 = LocalCommunity.objects.create(name='lcom4', description='descl4', city='Paris', country='FR',
gps_x=0, gps_y=0,
auto_accept_member=True)
lcom5 = LocalCommunity.objects.create(name='lcom5', description='descl5', city='Paris', country='FR',
gps_x=0, gps_y=0)
tcom1 = TransportCommunity.objects.create(name='tcom1', description='desct1', departure='dep1', arrival='arr1',
auto_accept_member=True)
tcom2 = TransportCommunity.objects.create(name='tcom2', description='desct2', departure='dep2', arrival='arr2')
tcom3 = TransportCommunity.objects.create(name='tcom3', description='desct3', departure='dep3', arrival='arr3')
tcom4 = TransportCommunity.objects.create(name='tcom4', description='desct4', departure='dep4', arrival='arr4')
tcom5 = TransportCommunity.objects.create(name='tcom5', description='desct5', departure='dep4', arrival='arr5')
own_mbr = Member.objects.create(user=owner, community=lcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom3, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom3, role='1', status='0')
spl_mbr = Member.objects.create(user=member, community=lcom3, role='2', status='0')
own_mbr = Member.objects.create(user=owner, community=lcom4, role='0', status='1')
mod_mbr = Member.objects.create(user=moderator, community=lcom4, role='1', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom4, role='2', status='1')
own_mbr = Member.objects.create(user=owner, community=lcom5, role='0', status='1')
spl_mbr = Member.objects.create(user=member, community=lcom5, role='2', status='2')
own_mbr = Member.objects.create(user=owner, community=tcom1, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom2, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom3, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom4, role='0', status='1')
own_mbr = Member.objects.create(user=owner, community=tcom5, role='0', status='1')
def test_setup(self):
self.assertEqual(4, self.user_model.objects.all().count())
self.assertEqual(10, Community.objects.all().count())
self.assertEqual(15, Member.objects.all().count())
def test_join_wrong_community(self):
"""
Ensure an authenticated user cannot join a community that does not exists
"""
url = '/api/v1/communities/15/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_join_community_not_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/1/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=1)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("0", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('demande à faire' in mail.outbox[0].body)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(16, Member.objects.all().count())
def test_join_community_auto_accept(self):
"""
Ensure an authenticated user can join a community
"""
url = '/api/v1/communities/2/join_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEquals(status.HTTP_201_CREATED, response.status_code)
self.assertEqual(16, Member.objects.all().count())
member = Member.objects.get(user=self.user_model.objects.get(id=4))
community = Community.objects.get(id=2)
self.assertEqual(community, member.community)
self.assertEqual("2", member.role)
self.assertEqual("1", member.status)
self.assertEqual(1, Notification.objects.count())
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[SmarTribe] Nouveau membre')
self.assertTrue('fait désormais' in mail.outbox[0].body)
def test_leave_community(self):
"""
Ensure a member can leave a community
"""
url = '/api/v1/communities/3/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code)
self.assertEqual(14, Member.objects.all().count())
def test_leave_community_banned(self):
"""
Ensure a banned member cannot leave a community
"""
url = '/api/v1/communities/5/leave_community/'
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEquals(status.HTTP_401_UNAUTHORIZED, response.status_code)
self.assertEqual(15, Member.objects.all().count())
def test_list_my_memberships_without_auth(self):
"""
Ensure an unauthenticated user cannot list memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_my_memberships_member(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual(5, data['results'][2]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('2', data['results'][2]['status'])
self.assertEqual('2', data['results'][0]['role'])
self.assertEqual('2', data['results'][1]['role'])
self.assertEqual('2', data['results'][2]['role'])
def test_list_my_memberships_moderator(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(2, data['count'])
self.assertEqual(3, data['results'][0]['community']['id'])
self.assertEqual(4, data['results'][1]['community']['id'])
self.assertEqual('0', data['results'][0]['status'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual('1', data['results'][0]['role'])
self.assertEqual('1', data['results'][1]['role'])
def test_list_my_memberships_owner(self):
"""
Ensure a user can list all his memberships
"""
url = '/api/v1/communities/0/list_my_memberships/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(10, data['count'])
def test_list_members_without_auth(self):
"""
Ensure non authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url)
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_member_rights(self):
"""
Ensure a non-member authenticated user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user4'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_without_mod_rights(self):
"""
Ensure a simple user cannot list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user3'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights_not_accepted(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/3/retrieve_members/'
# Test before acceptation
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_list_members_with_mod_rights(self):
"""
Ensure a moderator can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user2'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
self.assertEqual(6, data['results'][0]['id'])
self.assertEqual(1, data['results'][0]['user']['id'])
self.assertEqual('0', data['results'][0]['role'])
self.assertEqual('1', data['results'][0]['status'])
self.assertEqual(7, data['results'][1]['id'])
self.assertEqual(2, data['results'][1]['user']['id'])
self.assertEqual('1', data['results'][1]['role'])
self.assertEqual('1', data['results'][1]['status'])
self.assertEqual(8, data['results'][2]['id'])
self.assertEqual(3, data['results'][2]['user']['id'])
self.assertEqual('2', data['results'][2]['role'])
self.assertEqual('1', data['results'][2]['status'])
def test_list_members_with_owner_rights(self):
"""
Ensure an owner can list community members
"""
url = '/api/v1/communities/4/retrieve_members/'
response = self.client.get(url, HTTP_AUTHORIZATION=self.auth('user1'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(3, data['count'])
def test_accept_member_without_auth(self):
"""
Ensure a non authenticated user can not accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_simple_member(self):
"""
Ensure a simple member cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_owner(self):
"""
Ensure an owner can accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_accept_member_with_owner_bad_request(self):
"""
Ensure accept_member request data format
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'lol': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_owner_not_found(self):
"""
Ensure member exists
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 19
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_accept_member_with_not_accepted_moderator(self):
"""
Ensure an non accepted moderator cannot accept members
"""
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_accept_member_with_moderator(self):
"""
Ensure an moderator can accept members
"""
mod = Member.objects.get(id=4)
mod.status = '1'
mod.save()
url = '/api/v1/communities/3/accept_member/'
data = {
'id': 5
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(5, data['id'])
self.assertEqual('1', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership accepted')
def test_ban_member_without_auth(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_non_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_moderator_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 7
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_owner_with_member(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user3'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_ban_member_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_member_with_owner(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('2', data['status'])
time.sleep(1)
self.assertEqual(1, len(mail.outbox))
self.assertEqual(mail.outbox[0].subject,
'[Smartribe] Membership cancelled')
def test_ban_owner_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/ban_member/'
data = {
'id': 6
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_without_auth(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_user(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user4'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_moderator(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user2'), format='json')
self.assertEqual(status.HTTP_401_UNAUTHORIZED, response.status_code)
def test_promote_user_with_owner(self):
"""
"""
url = '/api/v1/communities/4/promote_moderator/'
data = {
'id': 8
}
response = self.client.post(url, data, HTTP_AUTHORIZATION=self.auth('user1'), format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertEqual(8, data['id'])
self.assertEqual('1', data['role'])
|
5,869 | e09f914f00e59124ef7d8a8f183bff3f7f74b826 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import asyncio
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from myshell.environment import Environment
from myshell.job import Job, JobState
class JobManager:
"""事务管理器,用以调度前台和后台事务"""
def __init__(self, environment: "Environment"):
self.foreground_job: Optional[Job] = None
self.background_jobs: list[Optional[Job]] = [None]
self.environment = environment
async def execute(self, s: str):
"""执行一行命令,按指定分配到前台或后台"""
self.clean_jobs()
job = Job(s, environment=self.environment)
if job.initially_background:
id = self.make_background_place()
self.background_jobs[id] = job
await job.execute(id)
else:
self.foreground_job = job
await job.execute()
async def wait(self):
"""等待前台事务完成,并屏蔽`CancelError`"""
try:
if self.foreground_job is not None:
await self.foreground_job.task
except asyncio.CancelledError:
pass
def pause(self):
"""暂停前台事务并分配到后台"""
if self.foreground_job is None:
return
id = self.make_background_place()
self.background_jobs[id] = self.foreground_job
self.foreground_job = None
self.background_jobs[id].pause(id)
def resume_foreground(self, id: int):
"""将后台已暂停的事务恢复到前台"""
assert self.is_job_suspended(id)
self.foreground_job = self.background_jobs[id]
self.background_jobs[id] = None
self.foreground_job.resume_foreground() # type: ignore
def resume_background(self, id: int):
"""恢复后台已暂停的事务"""
assert self.is_job_suspended(id)
self.background_jobs[id].resume_background() # type: ignore
def move_foreground(self, id: int):
"""将后台正在运行的事务移动到前台"""
assert self.is_job_background(id)
self.foreground_job = self.background_jobs[id]
self.background_jobs[id] = None
self.foreground_job.move_foreground() # type: ignore
def stop(self):
"""终止前台事务"""
if self.foreground_job is None:
return
self.foreground_job.stop()
self.foreground_job = None
def is_job_available(self, id: int) -> bool:
"""返回是否存在指定编号的事务"""
return id < len(self.background_jobs) and self.background_jobs[id] is not None
def is_job_suspended(self, id: int) -> bool:
"""返回指定编号的事务是否已暂停"""
return (
self.is_job_available(id)
and self.background_jobs[id].state == JobState.suspended # type: ignore
)
def is_job_background(self, id: int) -> bool:
"""返回指定编号的事务是否在后台"""
return (
self.is_job_available(id)
and self.background_jobs[id].state == JobState.background # type: ignore
)
def make_background_place(self) -> int:
"""返回后台事务列表中一个空余位置的编号,若不存在则创建一个"""
id = 1
while id < len(self.background_jobs):
if self.background_jobs[id] is None:
break
id += 1
if id == len(self.background_jobs):
self.background_jobs.append(None)
return id
def clean_jobs(self):
"""删除后台事务列表中已终止的事务"""
for index, job in enumerate(self.background_jobs):
if job is not None and job.state == JobState.stopped:
self.background_jobs[index] = None
|
5,870 | 6e7cca4f766ca89d2e2f82a73f22742b0e8f92a8 | from .ros_publisher import *
|
5,871 | 2269e74c006833976c3a28cd52c238e2dde20051 | from .__main__ import datajson_write, datajson_read
|
5,872 | 0ff96b2314927d7b3e763242e554fd561f3c9343 | #!/usr/bin/env python3
# coding=utf-8
import fire
import json
import os
import time
import requests
import time
import hashlib
import random
root_path, file_name = os.path.split(os.path.realpath(__file__))
ip_list_path = ''.join([root_path, os.path.sep, 'ip_list.json'])
class ProxySwift(object):
server_id = '1'
def requerst_get(self, url, data, *p, **kwargs):
SecretKey = '3JCx8fAF7Bpq5Aj4t9wS7cfVB7hpXZ7j'
PartnerID = '2017061217350058'
TimeStamp = int(time.time())
source_data = {
'partner_id': PartnerID,
'timestamp': TimeStamp
}
source_data.update(data)
tmp_data = [i for i in source_data.items()]
tmp_data = sorted(tmp_data, key=lambda i: i[0])
url_list = ['{}{}'.format(*i) for i in tmp_data]
# url_list.reverse()
# sign = ''.join(url_list)
# sign = ''.join(sorted(sign))
sign = ''.join(url_list)
# sign = ''.join(sorted(sign))
data = sign + SecretKey
md_5 = hashlib.md5()
md_5.update(data.encode("utf-8"))
sign = md_5.hexdigest()
source_data.update({'sign': sign})
return requests.get(url, params=source_data, verify=False, *p, **kwargs)
def get_ip(self, interface_id='', pool_id=''):
url = 'https://api.proxyswift.com/ip/get'
data = {
'server_id': self.server_id,
'pool_id': pool_id,
'interface_id': interface_id,
}
r = self.requerst_get(url, data)
response = r.json()
return response
def get_task(self, task_id):
url = 'https://api.proxyswift.com/task/get'
data = {'task_id': task_id}
r = self.requerst_get(url, data)
return r.json()
def changes_ip(self, interface_id, filter=24):
url = 'https://api.proxyswift.com/ip/change'
data = {
'server_id': self.server_id,
'interface_id': interface_id,
'filter': filter,
}
r = self.requerst_get(url, data)
task_id = r.json()['taskId']
#status = self(task_id)['status']
i = 1
while True:
time.sleep(i%2+1)
status = self.get_task(task_id)['status']
if status == 'success':
ip_port = self.get_ip(interface_id)
return ip_port
class ProxyPool(object):
def __init__(self, proxyswift=ProxySwift(), interval=4):
self.interval = interval
self.ps = proxyswift
self.count = 0
self.index = 0
with open(ip_list_path, 'r', encoding='utf-8') as f:
self.pool = json.loads(f.read())
def get(self):
# 从 pool中随机取一个ip
with open(ip_list_path, 'r', encoding='utf-8') as f:
self.pool = json.loads(f.read())
ip = random.choice(self.pool)
ip = "{0}:{1}".format(ip['ip'], ip['port'])
print(ip)
return ip
def change_ip(self, proxy_server):
for ip in self.pool:
if proxy_server == "http://%(ip)s:%(port)s" % ip:
self.pool.pop(0)
self.ps.changes_ip(ip['id'])
self.pool = self.ps.get_ip()
time.sleep(1)
break
self.refresh_ip()
def refresh_ip(self):
time.sleep(5)
self.pool = self.ps.get_ip()
print(self.pool)
# os.environ['ip_list'] = json.dumps(self.ps.get_ip())
with open(ip_list_path, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.ps.get_ip()))
def main():
fire.Fire(ProxyPool)
if __name__ == '__main__':
main() |
5,873 | 8dbc0b9b80aae4cb5c4101007afc50ac54f7a7e7 | #!/usr/bin/python
def sumbelow(n):
multiples_of_3 = set(range(0,n,3))
multiples_of_5 = set(range(0,n,5))
return sum(multiples_of_3.union(multiples_of_5))
#one linear:
# return sum(set(range(0,n,3)).union(set(range(0,n,5)))),
# or rather,
# return sum(set(range(0,n,3) + range(0,n,5)))
if __name__ == '__main__':
print sumbelow(1000)
n = 1000
|
5,874 | 7a69a9fd6ee5de704a580e4515586a1c1d2b8017 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .agent import agent
from .clean import clean
from .config import config
from .create import create
from .dep import dep
from .env import env
from .meta import meta
from .release import release
from .run import run
from .test import test
from .validate import validate
ALL_COMMANDS = (
agent,
clean,
config,
create,
dep,
env,
meta,
release,
run,
test,
validate,
)
|
5,875 | 199872ea459a9dba9975c6531034bdbc1e77f1db | # -*- coding: utf-8 -*-
# caixinjun
import argparse
from sklearn import metrics
import datetime
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
from sklearn import svm
import os
import warnings
warnings.filterwarnings('ignore')
def get_data(train_file):
target = []
data = []
with open(train_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip().split("\t")
if len(line) == 1:
continue
target.append(int(line[0]))
data.append(line[1])
data = list(map(jieba.lcut, data))
data = [" ".join(d) for d in data]
return data, target
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
def trans(data, matrix_path, stopword_path):
with open(stopword_path, 'r', encoding='utf-8') as fs:
stop_words = [line.strip() for line in fs.readline()]
tfidf = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b", stop_words=stop_words)
features = tfidf.fit_transform(data)
with open(matrix_path, 'wb') as f:
pickle.dump(tfidf, f)
return features
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime("%Y-%m-%d %H-%M-%S")
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, "results.txt")
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf==None or cls==None:
print("cannot load models........")
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print("accuracy_score: ", acc)
print("precision_score: ", pre)
print("recall_score: ", recall)
print("f1_score: ", f1)
print("auc: ", auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, default='./data/train.txt', help='training data')
parser.add_argument('--test', type=str, default='./data/test.txt', help='test data')
parser.add_argument('--stopwords', type=str, default='./data/hit_stopwords.txt', help='stop words')
parser.add_argument('--model', type=str, default='./model/svm_model.pkl', help='classification model')
parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl', help='tfidf model')
parser.add_argument('--outpath', type=str, default='./results/', help='out path')
args = parser.parse_args()
print("data processing.......")
data, target = get_data(args.train)
print("transform data.......")
features = trans(data, args.matrix, args.stopwords)
print("training model.......")
cls = svm.LinearSVC()
train(cls, features, target, args.model)
print("test.......")
test(args.matrix, args.model, args.test, args.outpath)
|
5,876 | a8341bf422a4d31a83ff412c6aac75e5cb8c5e0f | # Задание 1
# Выучите основные стандартные исключения, которые перечислены в данном уроке.
# Задание 2
# Напишите программу-калькулятор, которая поддерживает следующие операции: сложение, вычитание,
# умножение, деление и возведение в степень. Программа должна выдавать сообщения об ошибке и
# продолжать работу при вводе некорректных данных, делении на ноль и возведении нуля в
# отрицательную степень.
# Функция преобразования текстового списка в float
def list_convertion(user_list):
for index, item in enumerate(user_list):
user_list[index]=float(item)
return user_list
# Умножение
def multiplication(user_list):
multnum=1
for item in user_list:
multnum *= item
return multnum
# Деление. Здесь долго мучился, т.к. нужно первое число оставть и делить его на второе, т.е цикл со второго индекса
# пока не нашел в Интернете запись через slice напр.: for i in collection[1:]
def division(user_list):
divnum=user_list[0]
for item in user_list[1:]:
divnum /= item
return divnum
# Сложение
def adding(user_list):
sumnum=0
for item in user_list:
sumnum += item
return sumnum
# Вычитание. Логика та же, что и в делении
def subtraction(user_list):
subtractnum=user_list[0]
for item in user_list[1:]:
subtractnum -= item
return subtractnum
# Возведение в степень. Логика та же, что в делении
def powering(user_list):
pownum=user_list[0]
for item in user_list[1:]:
pownum **= item
return pownum
while True:
operation = input("Enter operation sign, please (*), (/), (+), (-), (^). \nTo quit, please enter 'done' > ")
if operation.lower() == "done": # Ключевым словом выхода из цикла будет done
print("Thank you for using the program!")
break
else:
try:
numbers_list = input("Enter the numbers separated by space > ").split(" ")
if len(numbers_list) < 2:
raise IndexError("You have entered less than 2 numbers")
else:
numbers_list = list_convertion(numbers_list) # Конвертация списка из str в float
try:
if "*" in operation: # Защита от ввода типа "*" или (*), т.е. проверяем есть ли во всей строке операция
print(f"Your multiplication result is {multiplication(numbers_list)}")
elif "^" in operation:
print(f"Your putting into power result is {powering(numbers_list)}")
elif "-" in operation:
print(f"Your subtraction result is {subtraction(numbers_list)}")
elif "+" in operation:
print(f"Your sum result is {adding(numbers_list)}")
elif "/" in operation:
print(f"Your division result is {division(numbers_list)}")
else:
raise ValueError("Unsupported operation, please try again")
except (ValueError, ZeroDivisionError) as e:
print(f"We have an issue. {e}")
except Exception as e:
print(f"We have an issue. {e}")
|
5,877 | 26fb607623fda333c37e254470ca6d07708671a8 | from app.request import send_tor_signal
from app.utils.session_utils import generate_user_keys
from app.utils.gen_ddg_bangs import gen_bangs_json
from flask import Flask
from flask_session import Session
import json
import os
from stem import Signal
app = Flask(__name__, static_folder=os.path.dirname(
os.path.abspath(__file__)) + '/static')
app.user_elements = {}
app.default_key_set = generate_user_keys()
app.no_cookie_ips = []
app.config['SECRET_KEY'] = os.urandom(32)
app.config['SESSION_TYPE'] = 'filesystem'
app.config['VERSION_NUMBER'] = '0.3.1'
app.config['APP_ROOT'] = os.getenv(
'APP_ROOT',
os.path.dirname(os.path.abspath(__file__)))
app.config['LANGUAGES'] = json.load(open(
os.path.join(app.config['APP_ROOT'], 'misc/languages.json')))
app.config['COUNTRIES'] = json.load(open(
os.path.join(app.config['APP_ROOT'], 'misc/countries.json')))
app.config['STATIC_FOLDER'] = os.getenv(
'STATIC_FOLDER',
os.path.join(app.config['APP_ROOT'], 'static'))
app.config['CONFIG_PATH'] = os.getenv(
'CONFIG_VOLUME',
os.path.join(app.config['STATIC_FOLDER'], 'config'))
app.config['DEFAULT_CONFIG'] = os.path.join(
app.config['CONFIG_PATH'],
'config.json')
app.config['SESSION_FILE_DIR'] = os.path.join(
app.config['CONFIG_PATH'],
'session')
app.config['BANG_PATH'] = os.getenv(
'CONFIG_VOLUME',
os.path.join(app.config['STATIC_FOLDER'], 'bangs'))
app.config['BANG_FILE'] = os.path.join(
app.config['BANG_PATH'],
'bangs.json')
if not os.path.exists(app.config['CONFIG_PATH']):
os.makedirs(app.config['CONFIG_PATH'])
if not os.path.exists(app.config['SESSION_FILE_DIR']):
os.makedirs(app.config['SESSION_FILE_DIR'])
# Generate DDG bang filter, and create path if it doesn't exist yet
if not os.path.exists(app.config['BANG_PATH']):
os.makedirs(app.config['BANG_PATH'])
if not os.path.exists(app.config['BANG_FILE']):
gen_bangs_json(app.config['BANG_FILE'])
Session(app)
# Attempt to acquire tor identity, to determine if Tor config is available
send_tor_signal(Signal.HEARTBEAT)
from app import routes # noqa
|
5,878 | d429f03c0f0c241166d6c0a5a45dc1101bcaec16 | #!/usr/bin/env python3
import matplotlib
from matplotlib.colors import to_hex
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import itertools as it
from pathlib import Path
import subprocess
from tqdm import tqdm
from koala import plotting as pl
from koala import phase_diagrams as pd
from koala import pointsets, voronization, flux_finder, graph_color
from koala import example_graphs as eg
import functools
def multi_set_symmetric_difference(sets):
return list(functools.reduce(lambda a,b: a^b, [set(s) for s in sets]))
def flood_iteration_plaquettes(l, plaquettes):
return set(plaquettes) | set(it.chain.from_iterable(l.plaquettes[p].adjacent_plaquettes for p in plaquettes))
def flood_iteration_vertices(l, vertices):
return set(vertices) | set(it.chain.from_iterable(i for v in set(vertices) for i in l.edges.indices[l.vertices.adjacent_edges[v]]))
# imports just for this plot
column_width = 3.375
w = 3.375
black_line_widths = 1.5
matplotlib.rcParams.update({'font.size': 13, 'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Computer Modern']})
matplotlib.rcParams.update({"axes.linewidth": black_line_widths})
line_colors = [to_hex(a) for a in cm.inferno([0.25, 0.5, 0.75])]
rng = np.random.default_rng(seed = 10)
l, coloring, ujk = eg.make_amorphous(8, rng = rng)
# l, coloring, ujk = eg.make_honeycomb(8)
plaquettes = [40,]
vertices = [78,]
subprocess.run(["mkdir", "-p", "./animation"])
for n in tqdm(range(15)):
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(2 * w, 2/2 * w)
for a in axes: a.set(xticks = [], yticks = [])
# pl.plot_vertex_indices(l, ax = ax)
# pl.plot_edge_indices(l, ax = ax)
# pl.plot_plaquette_indices(l, ax = ax)
if n > 0:
vertices = flood_iteration_vertices(l, vertices)
plaquettes = flood_iteration_plaquettes(l, plaquettes)
ax = axes[0]
multi_edges = multi_set_symmetric_difference([l.vertices.adjacent_edges[v] for v in vertices])
if multi_edges: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = multi_edges)
pl.plot_edges(l, ax = ax, color = 'k', subset = multi_edges)
pl.plot_vertices(l, ax = ax, subset = list(vertices), s = 5)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
ax.set(xticks = [], yticks = [])
ax = axes[1]
plaquette_boolean = np.array([i in plaquettes for i in range(l.n_plaquettes)])
fluxes = 1 - 2*plaquette_boolean
ujk = flux_finder.find_flux_sector(l, fluxes, ujk)
fluxes = flux_finder.fluxes_from_bonds(l, ujk)
pl.plot_edges(l, ax = ax, alpha = 0.1)
pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], alpha = 0.1)
pl.plot_edges(l, ax = ax, subset = (ujk == -1))
if len(plaquettes) > 1: pl.plot_dual(l, ax = ax, color_scheme = line_colors[1:], subset = (ujk == -1), )
pl.plot_plaquettes(l, subset = fluxes == -1, ax = ax, color_scheme = ["orange", "white"], alpha = 0.5);
ax.set(xticks = [], yticks = [])
fig.tight_layout()
if n == 3:
fig.savefig(f'./{Path.cwd().name}.svg', transparent = True)
fig.savefig(f'./{Path.cwd().name}.pdf')
fig.savefig(f"animation/iteration_{n:03}.svg")
plt.close(fig)
subprocess.run(["magick", "animation/*.svg", f'./{Path.cwd().name}.gif'])
subprocess.run(["convert", "-delay", "100", f'./{Path.cwd().name}.gif', f'./{Path.cwd().name}.gif'])
subprocess.run(["rm", "-r", "./animation"]) |
5,879 | 6c6026a7ff0345c37e62de7c0aac0ee3bcde2c82 | import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017/') #We create the database object
mydb = myclient['mydatabase'] #Create a database
mycol = mydb['customers'] #Create a collection into my mydatabase
mydict = [{"name": "Eric", "address": "Highway 37"}, {"name": "Albert", "address": "Highway 37"}, {"name": "Ivan", "address": "Highway 37"}]
x = mycol.insert_many(mydict)
myquery = {'name':'Albert'}
mydoc = mycol.find()
print(mydoc)
|
5,880 | fb1974ad7ac9ae54344812814cb95a7fccfefc66 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
NGramHash
"""
import numbers
from ..utils.entrypoints import Component
from ..utils.utils import try_set
def n_gram_hash(
hash_bits=16,
ngram_length=1,
skip_length=0,
all_lengths=True,
seed=314489979,
ordered=True,
invert_hash=0,
**params):
"""
**Description**
Extracts NGrams from text and convert them to vector using hashing
trick.
:param hash_bits: Number of bits to hash into. Must be between 1
and 30, inclusive. (settings).
:param ngram_length: Ngram length (settings).
:param skip_length: Maximum number of tokens to skip when
constructing an ngram (settings).
:param all_lengths: Whether to include all ngram lengths up to
ngramLength or only ngramLength (settings).
:param seed: Hashing seed (settings).
:param ordered: Whether the position of each source column should
be included in the hash (when there are multiple source
columns). (settings).
:param invert_hash: Limit the number of keys used to generate the
slot name to this many. 0 means no invert hashing, -1 means
no limit. (settings).
"""
entrypoint_name = 'NGramHash'
settings = {}
if hash_bits is not None:
settings['HashBits'] = try_set(
obj=hash_bits,
none_acceptable=True,
is_of_type=numbers.Real)
if ngram_length is not None:
settings['NgramLength'] = try_set(
obj=ngram_length,
none_acceptable=True,
is_of_type=numbers.Real)
if skip_length is not None:
settings['SkipLength'] = try_set(
obj=skip_length,
none_acceptable=True,
is_of_type=numbers.Real)
if all_lengths is not None:
settings['AllLengths'] = try_set(
obj=all_lengths,
none_acceptable=True,
is_of_type=bool)
if seed is not None:
settings['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if ordered is not None:
settings['Ordered'] = try_set(
obj=ordered, none_acceptable=True, is_of_type=bool)
if invert_hash is not None:
settings['InvertHash'] = try_set(
obj=invert_hash,
none_acceptable=True,
is_of_type=numbers.Real)
component = Component(
name=entrypoint_name,
settings=settings,
kind='NgramExtractor')
return component
|
5,881 | 1af6e66c19078a9ee971f608daa93247911d8406 | from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet', # Learned weights on imagenet
include_top=True)
img_input = image.load_img('my_picture.jpg', target_size=(224, 224))
img_input = image.img_to_array(img_input)
img_input = preprocess_input(img_input[np.newaxis, ...])
preds = model.predict(img_input)
decoded_predictions = decode_predictions(preds, top=10)[0]
print(decoded_predictions) |
5,882 | 6edb1f99ca9af01f28322cbaf13f278e79b94e92 | # -*- coding: utf-8 -*-
c = int(input())
t = input()
m = []
for i in range(12):
aux = []
for j in range(12):
aux.append(float(input()))
m.append(aux)
aux = []
soma = 0
for i in range(12):
soma += m[i][c]
resultado = soma / (t == 'S' and 1 or 12)
print('%.1f' % resultado)
|
5,883 | 9baf55eb2fb70e9fa0d92df22d307962b8d6c6d4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import urllib2
#this is executed by a cron job on the pi inside the pooltable
secret ='secret'
baseurl='https://pooltable.mysite.com/'
url = baseurl + 'gettrans.php?secret=' + secret
req = urllib2.Request(url)
f = urllib2.urlopen(req)
response = f.read()
f.close()
print response
#check if there is a transaction
if response != 'error' and response != 'false' and response != False:
obj = json.loads(response)
trans = str(obj['transaction_hash'])
#move the transaction to the processed table and delete from unprocessed
url = baseurl + 'deltrans.php?secret=' + secret + '&trans=' + trans
req = urllib2.Request(url)
f = urllib2.urlopen(req)
response = f.read()
f.close()
#if transaction was moved correctly, set off the solanoid
if response == '*ok*':
print response
#run solanoid script
|
5,884 | 9f3ca0d5a10a27d926a0f306665889418f8d6a0c | from src.produtos import *
class Estoque(object):
def __init__(self):
self.categorias = []
self.subcategorias = []
self.produtos = []
self.menu_estoque()
def save_categoria(self, categoria):
pass
def save_subcategorias(self, subcategoria):
pass
def save_produtos(self, produto):
pass
def create_categoria(self):
""""
Cria uma categoria através dos dados recolhidos pelo formulário.
Os dados são: Codigo, nome e descrição
"""
print("- Criar CATEGORIA -")
codigo = input("CÓDIGO: ").strip()
nome = input("NOME: ").strip()
descrição = input("DESCRIÇÃO: ").strip()
categoria = Categoria(codigo, nome, descrição)
if categoria not in self.categorias:
self.categorias.append(categoria)
def create_subcategoria(self):
""""
Cria uma categoria através dos dados recolhidos pelo formulário.
Os dados são: Codigo, nome e descrição e a passagem de um objeto categoria
"""
if len(self.categorias) == 0:
print("Você deve criar pelo menos uma CATEGORIA!\n")
self.create_categoria()
print("- Criar SUBCATEGORIA -")
codigo = input("CÓDIGO: ").strip()
nome = input("NOME: ").strip()
descrição = input("DESCRIÇÃO: ").strip()
escolhe = input("CATEGORIA (Nome ou Código): ")
categoria = 0
for cat in self.categorias:
if cat.nome == escolhe or cat.codigo == escolhe:
categoria = cat
break
else:
print("Categoria não Encontrada!\nVocê deve criar uma CATEGORIA!")
self.create_categoria()
subcategoria = Subcategoria(categoria, codigo, nome, descrição)
if subcategoria not in self.subcategorias:
self.subcategorias.append(subcategoria)
def create_produto(self):
""""
Cria produto a ser controlado pelo estoque. Um produto deve pertencer a uma subcategoria.
Produtos são itens que podem ser vendidos.
Possuem subcategoria, codigo, nome, descricao, estoquemax, estoquemin, valorvenda, valorcompra, foto
TODELETE: Por enquanto foto recebe uma string qualquer
"""
# TODO: Implementar a foto no sistemas
if not len(self.subcategorias):
print("Produto deve ter CATEGORIA ou uma SUBCATEGORIA!\n")
self.create_subcategoria()
else:
print("- Cadastrar PRODUTO -")
escolhe = input("SUBCATEGORIA (Nome ou Código): ").lower()
codigo = input("CÓDIGO: ").strip()
nome = input("NOME: ").strip()
descrição = input("DESCRIÇÃO: ").strip()
estoquemax = input("Quantidade Maxima em Estoque: ")
while not produtos.valida_estoque(estoquemax):
print("Valor Inválido!")
estoquemax = input("Valor deve ser Numérico: ")
estoquemin = input("Quantidade Minima em Estoque: ")
while not produtos.valida_estoque(estoquemin):
print("Valor Inválido!")
estoquemin = input("Valor deve ser Numérico: ")
valorvenda = input("Preço Unitário: ")
while not produtos.valida_valorvenda(valorvenda):
print("Valor Inválido!")
estoquemax = input("Valor deve ser Numérico: ")
valorcompra = input("Valor de Compra: ")
while not produtos.valida_valorvenda(valorcompra):
print("Valor Inválido!")
estoquemax = input("Valor deve ser Numérico: ")
foto = input("Arquivo de foto: ")
subcategoria = 0
for scat in self.subcategorias:
if scat.nome.lower() == escolhe or scat.codigo == escolhe:
subcategoria = scat
break
else:
print("Subcategoria não Encontrada!\nDeseja criar uma SUBCATEGORIA?\n1- Sim\n2 - Não")
choice = input()
if choice.lower() == 's' or choice == '1':
self.create_subcategoria()
else:
self.create_produto()
produto = Produtos( subcategoria, codigo, nome, descricao, estoquemax, estoquemin, valorvenda, valorcompra, foto)
if produto not in self.produtos:
self.produtos.append(produto)
# funcionalidade pedida na especificação
def low_stock_alarm(self): # aviso de estoque baixo
pass
def consulta_estoque(self): # exibe itens disponiveis no estoque
print("Exibindo estoque")
if not len(self.categorias):
print("Não há Categorias Registrados!")
else:
for categoria in self.categorias:
print(categoria, end=" ")
print()
if not len(self.subcategorias):
print("Não há Subcategorias Registradas!")
else:
for subcategoria in self.subcategorias:
print(subcategoria, end=" ")
print()
if not len(self.produtos):
print("Não há Produtos Registrados!")
else:
for produto in self.produtos:
print(produto, end=" ")
self.menu_estoque()
def altera_item(self): # altera um item disponivel no estoque
print("alterando item do estoque")
self.menu_estoque()
def remove_item(self): # remove um item disponivel no estoque - n remover se o item ainda tem produtos no estoque
print("Removendo item do estoque")
self.menu_estoque()
def adiciona_item(self): # adiciona novo item ao estoque
print("Adicionando item ao estoque")
while 1:
print("************* Menu Adicionar: ******************")
print("Digite Ação!\n1 - Adicionar Categoria\n2 - Adicionar Subcategoria\n3 - Adicionar Produtos\n4 - Sair")
opcao = input()
while not self.valida_opcao(opcao):
print("Opção Inválida!")
opcao = input()
if opcao == '1':
self.create_categoria()
elif opcao == '2':
self.create_subcategoria()
elif opcao == '3':
pass
elif opcao == '4':
break
self.menu_estoque()
def menu_estoque(self):
print("Sistema de Vendas ao Consumidor")
print("****** MENU DE ESTOQUE *****")
print("Digite Ação!\n1 - Consultar Estoque\n2 - Adicionar\n3 - Remover\n4 - Alterar")
opcao = input()
while not self.valida_opcao(opcao):
print("Opção Inválida!")
opcao = input()
if opcao == '1':
self.consulta_estoque()
elif opcao == '2':
self.adiciona_item()
elif opcao == '3':
self.remove_item()
elif opcao == '4':
self.altera_item()
def valida_opcao(self, opcao):
if opcao.isdigit():
return True
else:
return False
estoque = Estoque()
|
5,885 | 6fa9dfadc60108e1718c6688f07de877b0ac0afd | #!usr/bin/python
# -*- coding:UTF-8 -*-
'''
Introduction:
Implementation of Stack
Created on: Oct 28, 2014
@author: ICY
'''
#-------------------------FUNCTION---------------------------#
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
def get_top(self):
return self.items[len(self.items)-1]
#----------------------------SELF TEST----------------------------#
def main():
s=Stack()
print(s.is_empty())
s.push(4)
s.push('dog')
print(s.get_top())
s.push(True)
print(s.size())
print(s.is_empty())
s.push(8.4)
print(s.pop())
print(s.pop())
print(s.size())
pass
if __name__ == '__main__':
main()
|
5,886 | 4f87c2602e3233889888e419296f67fe40a2db0f | #!/usr/bin/python
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#==========================================================================*/
# This script is used to automate the modularization process. The following
# steps are included:
# 1. Move the files in the monolithic ITK into modules of the modularized ITK.
# A manifest text file that lists all the files and their destinations is
# required to run the script.By default, the manifest file is named as
# "Manifest.txt" in the same directory of this script.
# 2. Create CMake Files and put them into modules.
# Modified by Guillaume Pasero <guillaume.pasero@c-s.fr>
# add dependencies in otb-module.cmake
# To run it, type ./modulizer.py OTB_PATH Manifest_PATH
# from the otb-modulizer root directory.
print "*************************************************************************"
print "WARNINGs! This modularization script is still in its experimental stage."
print "Current OTB users should not run this script."
print "*************************************************************************"
import shutil
import os.path as op
import re
import sys
import os
import stat
import glob
import documentationCheck
import analyseAppManifest
import dispatchTests
import dispatchExamples
from subprocess import call
def parseFullManifest(path):
sourceList = []
nbFields = 6
fd = open(path,'rb')
# skip first line and detect separator
firstLine = fd.readline()
sep = ','
if (len(firstLine.split(sep)) != nbFields):
sep = ';'
if (len(firstLine.split(sep)) != nbFields):
sep = '\t'
if (len(firstLine.split(sep)) != nbFields):
print "Unknown separator"
return sourceList
fd.seek(0)
# parse file
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if (len(words) < (nbFields-1)):
print "Wrong number of fields, skipping this line"
continue
fullPath = words[0].strip(" ,;\t\n\r")
groupName = words[2].strip(" ,;\t\n\r")
moduleName = words[3].strip(" ,;\t\n\r")
subDir = words[4].strip(" ,;\t\n\r")
sourceName = op.basename(fullPath)
sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir})
fd.close()
return sourceList
def parseDescriptions(path):
output = {}
sep = '|'
nbFields = 2
fd = open(path,'rb')
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if len(words) != nbFields:
continue
moduleName = words[0].strip(" \"\t\n\r")
description = words[1].strip(" \"\t\n\r")
output[moduleName] = description
fd.close()
return output
if len(sys.argv) < 4:
print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0]))
print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)")
print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ")
print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :")
print(" source_path/current_subDir/group/module/subDir/comment")
print(" module_dep : dependencies between modules")
print(" test_dep : additional dependencies for tests")
print(" mod_description : description for each module")
print(" migration_password : password to enable MIGRATION")
sys.exit(-1)
scriptDir = op.dirname(op.abspath(sys.argv[0]))
HeadOfOTBTree = sys.argv[1]
if (HeadOfOTBTree[-1] == '/'):
HeadOfOTBTree = HeadOfOTBTree[0:-1]
OutputDir = sys.argv[2]
HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular")
ManifestPath = sys.argv[3]
EdgePath = ""
if len(sys.argv) >= 5:
EdgePath = sys.argv[4]
testDependPath = ""
if len(sys.argv) >= 6:
testDependPath = sys.argv[5]
modDescriptionPath = ""
if len(sys.argv) >= 7:
modDescriptionPath = sys.argv[6]
enableMigration = False
if len(sys.argv) >= 8:
migrationPass = sys.argv[7]
if migrationPass == "redbutton":
enableMigration = True
# copy the whole OTB tree over to a temporary dir
HeadOfTempTree = op.join(OutputDir,"OTB_remaining")
if op.isdir(HeadOfTempTree):
shutil.rmtree(HeadOfTempTree)
if op.isdir(HeadOfModularOTBTree):
shutil.rmtree(HeadOfModularOTBTree)
print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...")
shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))
print("Done copying!")
# checkout OTB-Modular
cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree
os.system(cmd)
logDir = op.join(OutputDir,"logs")
if not op.isdir(logDir):
os.makedirs(logDir)
# read the manifest file
print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree))
numOfMissingFiles = 0;
missingf = open(op.join(logDir,'missingFiles.log'),'w')
moduleList=[]
moduleDic={}
sourceList = parseFullManifest(ManifestPath)
for source in sourceList:
# build module list
moduleDic[source["module"]] = source["group"]
# create the path
inputfile = op.abspath(op.join(HeadOfTempTree,source["path"]))
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
if not op.isdir(outputPath):
os.makedirs(outputPath)
# copying files to the destination
if op.isfile(inputfile):
if op.isfile(op.join(outputPath,op.basename(inputfile))):
os.remove(op.join(outputPath,op.basename(inputfile)))
shutil.move(inputfile, outputPath)
else:
missingf.write(inputfile+'\n')
numOfMissingFiles = numOfMissingFiles + 1
missingf.close()
print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles)
moduleList = moduleDic.keys()
# after move, operate a documentation check
for source in sourceList:
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
outputFile = op.join(outputPath,op.basename(source["path"]))
if op.isfile(outputFile):
if op.splitext(outputFile)[1] == ".h":
nextContent = documentationCheck.parserHeader(outputFile,source["module"])
fd = open(outputFile,'wb')
fd.writelines(nextContent)
fd.close()
# get dependencies (if file is present)
dependencies = {}
testDependencies = {}
exDependencies = {}
for mod in moduleList:
dependencies[mod] = []
testDependencies[mod] = []
exDependencies[mod] = []
if op.isfile(EdgePath):
fd = open(EdgePath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if dependencies.has_key(depFrom):
dependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
if op.isfile(testDependPath):
fd = open(testDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if testDependencies.has_key(depFrom):
testDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
if op.isfile(exDependPath):
fd = open(exDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if exDependencies.has_key(depFrom):
exDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
modDescriptions = {}
if op.isfile(modDescriptionPath):
modDescriptions = parseDescriptions(modDescriptionPath)
# list the new files
newf = open(op.join(logDir,'newFiles.log'),'w')
for (root, subDirs, files) in os.walk(HeadOfTempTree):
for afile in files:
newf.write(op.join(root, afile)+'\n')
newf.close()
print ("listed new files to logs/newFiles.log")
###########################################################################
print ('creating cmake files for each module (from the template module)')
#moduleList = os.listdir(HeadOfModularOTBTree)
for moduleName in moduleList:
moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName))
cmakeModName = "OTB"+moduleName
if op.isdir(moduleDir):
# write CMakeLists.txt
filepath = moduleDir+'/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
if op.isdir(moduleDir+'/src'):
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')
else:
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')
for line in open(template_cmakelist,'r'):
line = line.replace('otb-template-module',cmakeModName)
o.write(line);
o.close()
# write src/CMakeLists.txt
# list of CXX files
if op.isdir(moduleDir+'/src'):
cxxFiles = glob.glob(moduleDir+'/src/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n'
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
#verify if dep is a header-onlymodule
depThirdParty = False
try:
moduleDic[dep]
except KeyError:
# this is a ThirdParty module
depThirdParty = True
if not depThirdParty:
depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep))
depcxx = glob.glob(depModuleDir+'/src/*.cxx')
if depcxx :
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
else:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
if len(linkLibs) == 0:
linkLibs = " ${OTBITK_LIBRARIES}"
filepath = moduleDir+'/src/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'):
line = line.replace('otb-template-module',cmakeModName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs)
o.write(line);
o.close()
# write app/CMakeLists.txt
if op.isdir(moduleDir+'/app'):
os.mkdir(moduleDir+'/test')
srcFiles = glob.glob(moduleDir+'/app/*.cxx')
srcFiles += glob.glob(moduleDir+'/app/*.h')
appList = {}
for srcf in srcFiles:
# get App name
appName = analyseAppManifest.findApplicationName(srcf)
if len(appName) == 0:
continue
appList[appName] = {"source":op.basename(srcf)}
# get original location
cmakeListPath = ""
for item in sourceList:
if op.basename(item["path"]) == op.basename(srcf) and \
moduleName == item["module"]:
appDir = op.basename(op.dirname(item["path"]))
cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt"))
break
# get App tests
if not op.isfile(cmakeListPath):
continue
appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName)
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
filepath = moduleDir+'/app/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
# define link libraries
o.write("set("+cmakeModName+"_LINK_LIBS\n")
o.write(linkLibs)
o.write(")\n")
for appli in appList:
content = "\notb_create_application(\n"
content += " NAME " + appli + "\n"
content += " SOURCES " + appList[appli]["source"] + "\n"
content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n"
o.write(content)
o.close()
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
o.write("otb_module_test()")
for appli in appList:
if not appList[appli].has_key("test"):
continue
o.write("\n#----------- "+appli+" TESTS ----------------\n")
for test in appList[appli]["test"]:
if test.count("${"):
print "Warning : test name contains a variable : "+test
continue
testcode=appList[appli]["test"][test]
testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode]
o.writelines(testcode)
o.write("\n")
o.close()
# write test/CMakeLists.txt : done by dispatchTests.py
"""
if op.isdir(moduleDir+'/test'):
cxxFiles = glob.glob(moduleDir+'/test/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n'
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'):
# TODO : refactor for OTB
words= moduleName.split('-')
moduleNameMod='';
for word in words:
moduleNameMod=moduleNameMod + word.capitalize()
line = line.replace('itkTemplateModule',moduleNameMod)
line = line.replace('itk-template-module',moduleName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
o.write(line);
o.close()
"""
# write otb-module.cmake, which contains dependency info
filepath = moduleDir+'/otb-module.cmake'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'):
# replace documentation
if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0:
docString = "\"TBD\""
if moduleName in modDescriptions:
descPos = line.find("DESCRIPTION_TO_BE_REPLACED")
limitChar = 80
docString = "\""+modDescriptions[moduleName]+"\""
curPos = 80 - descPos
while curPos < len(docString):
lastSpace = docString[0:curPos].rfind(' ')
if lastSpace > max(0,curPos-80):
docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:]
else:
docString = docString[0:curPos] + '\n' + docString[curPos:]
curPos += 81
line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString)
# replace module name
line = line.replace('otb-template-module',cmakeModName)
# replace depend list
dependTagPos = line.find("DEPENDS_TO_BE_REPLACED")
if dependTagPos >= 0:
replacementStr = "DEPENDS"
indentStr = ""
for it in range(dependTagPos+2):
indentStr = indentStr + " "
if len(dependencies[moduleName]) > 0:
deplist = dependencies[moduleName]
else:
deplist = ["Common"]
for dep in deplist:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr)
# replace test_depend list
testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED")
if testDependTagPos >= 0:
if moduleName.startswith("App"):
# for application : hardcode TestKernel and CommandLine
indentStr = ""
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine"
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
# standard case
if len(testDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "TEST_DEPENDS"
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
for dep in testDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('TESTDEP_TO_BE_REPLACED','')
# replace example_depend list
exDependTagPos = line.find("EXDEP_TO_BE_REPLACED")
if exDependTagPos >= 0:
if len(exDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "EXAMPLE_DEPENDS"
for it in range(exDependTagPos+2):
indentStr = indentStr + " "
for dep in exDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('EXDEP_TO_BE_REPLACED','')
o.write(line);
o.close()
# call dispatchTests to fill test/CMakeLists
if op.isfile(testDependPath):
dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath])
"""
# call dispatchExamples to fill example/CMakeLists
if op.isfile(exDependPath):
dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath])
"""
# examples
for i in sorted(os.listdir(HeadOfTempTree + "/Examples")):
if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"):
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")):
if i == "CMakeLists.txt" or i == "README.txt":
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
# save version without patches (so that we can regenerate patches later)
os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") )
# apply patches in OTB_Modular
curdir = op.abspath(op.dirname(__file__))
command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch"
print "Executing " + command
os.system( command )
# remove Copyright files we don't want to touch later
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) )
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) )
os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) )
# PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT
if enableMigration:
print("Executing migration on a clone of original checkout")
HeadOfTempTree = op.abspath(HeadOfTempTree)
OutputDir = op.abspath(OutputDir)
# clone original checkout
outputModular = op.join(OutputDir,"OTB_Modular")
outputMigration = op.join(OutputDir,"OTB_Migration")
if op.exists(outputMigration):
os.removedirs(outputMigration)
command = ["cp","-ar",HeadOfOTBTree,outputMigration]
call(command)
os.chdir(outputMigration)
# walk through OTB_Remaining and delete corresponding files in OTB checkout
print("DELETE STEP...")
for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree):
currentSourceDir = dirPath.replace(HeadOfTempTree,'.')
for fileName in fileNames:
if op.exists(op.join(currentSourceDir,fileName)):
command = ["hg","remove",op.join(currentSourceDir,fileName)]
call(command)
else:
print("Unknown file : "+op.join(currentSourceDir,fileName))
command = ['hg','commit','-m','ENH: Remove files not necessary after modularization']
call(command)
# walk through manifest and rename files
print("MOVE STEP...")
for source in sourceList:
outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"])))
command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))]
call(command)
command = ['hg','commit','-m','ENH: Move source and test files into their respective module']
call(command)
# add new files from OTB_Modular (files from OTB-Modular repo + generated files)
print("ADD STEP...")
for dirPath, dirNames, fileNames in os.walk(outputModular):
currentSourceDir = dirPath.replace(outputModular,'.')
if currentSourceDir.startswith("./.hg"):
print("skip .hg")
continue
for fileName in fileNames:
# skip hg files
if fileName.startswith(".hg"):
continue
targetFile = op.join(currentSourceDir,fileName)
if not op.exists(targetFile):
if not op.exists(currentSourceDir):
command = ["mkdir","-p",currentSourceDir]
call(command)
shutil.copy(op.join(dirPath,fileName),targetFile)
command = ['hg','add']
call(command)
command = ['hg','commit','-m','ENH: Add new files for modular build system']
call(command)
# apply patches on OTB Checkout
print("PATCH STEP...")
for dirPath, dirNames, fileNames in os.walk(outputModular):
currentSourceDir = dirPath.replace(outputModular,'.')
if currentSourceDir.startswith("./.hg"):
continue
for fileName in fileNames:
# skip hg files
if fileName.startswith(".hg"):
continue
targetFile = op.join(currentSourceDir,fileName)
if op.exists(targetFile):
command = ['cp',op.join(dirPath,fileName),targetFile]
call(command)
command = ['hg','commit','-m','ENH: Apply patches necessary after modularization']
call(command)
|
5,887 | 8c4006ed8f4b1744f0316a61d95458b227653fee | /Users/AbbyPennington/anaconda/lib/python3.5/os.py |
5,888 | 2a6ae615b427a7c970aacf9804865ea7952d065f | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 20 14:48:56 2020
@author: dhk1349
"""
n = int(input()) #목표채널
m = int(input())
broken=[int(i) for i in input().split()] #망가진 버튼
normal=[i for i in range(10)] #사용가능한 버튼
ans=abs(n-100) #시작 시 정답
for i in broken:
normal.remove(i)
tempnum=0
iternum=1
def solve(lst, target):
#가장 유사한 숫자를 뱉
while n!=0:
val=n%10
n=n/10
if val not in normal:
tempnum+=(iternum*val)
iternum*=10
|
5,889 | 8a04166e091e2da348928598b2356c8ad75dd831 | #usage:
#crawl raw weibo text data from sina weibo users(my followees)
#in total, there are 20080 weibo tweets, because there is uplimit for crawler
# -*- coding: utf-8 -*-
import weibo
APP_KEY = 'your app_key'
APP_SECRET = 'your app_secret'
CALL_BACK = 'your call back url'
def run():
token = "your access token gotten from call_back url"
client = weibo.APIClient(APP_KEY, APP_SECRET, CALL_BACK)
client.set_access_token(token,12345)
followlist = client.friendships.friends.get(screen_name='蜀云Parallelli',count=200)
wb_raw = open('weibo_raw_userlistweibo_big.txt','w')
weiboCnt = 0
usernames = {}
for fl in followlist.users:
pg = 1
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
while (pg <= 3):
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
if fl.screen_name not in usernames:
usernames[fl.screen_name]=1
for wb in wbres.statuses:
weiboCnt += 1
wb_raw.write(wb.text.encode('utf-8')+'\n')
pg += 1
followlist = client.friendships.friends.get(screen_name='尹欢欢欢',count=200)
for fl in followlist.users:
pg = 1
if fl.screen_name in usernames:
continue
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
while (pg <= 3):
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
if fl.screen_name not in usernames:
usernames[fl.screen_name]=1
for wb in wbres.statuses:
weiboCnt += 1
wb_raw.write(wb.text.encode('utf-8')+'\n')
pg += 1
print weiboCnt
wb_raw.close()
if __name__ == "__main__":
run() |
5,890 | b86dedad42d092ae97eb21227034e306ca640912 | class mySeq:
def __init__(self):
self.mseq = ['I','II','III','IV']
def __len__(self):
return len(self.mseq)
def __getitem__(self,key):
if 0 <= key < 4 :
return self.mseq[key]
if __name__ == '__main__':
m = mySeq()
print('Len of mySeq : ',len(m))
for i in range(len(m.mseq)):
print(m.mseq[i]) |
5,891 | 38184ed4117b1b7dcf9e135ce8612fa13c44a99c | i = 0
while i >= 0:
a=input("Name is: ")
print(a)
if a == "Zeal":
print("""Name_Zeal.
Age_16.
Interested in Programming.""")
elif a=="HanZaw":
print("""Name_Han Zaw.
Age_18.
Studying Code at Green Hacker.""")
elif a == "Murphy":
print("""Name_Murphy.
Age_17.
Insterested in Editing.""")
elif a =="Ngal":
print("""Name_Ngal.
Age_17.
In Loved with Me:p""") |
5,892 | f39945f35b13c0918c3ef06224bca65ae6166ebc | import numpy as np
a=np.array([1,2,3])
b=np.r_[np.repeat(a,3),np.tile(a,3)]
print(b)
|
5,893 | 87a62f76027e0653f6966f76a42def2ce2a26ba3 | #! /usr/bin/python3
print("content-type: text/html")
print()
import cgi
import subprocess as sp
import requests
import xmltodict
import json
db = cgi.FieldStorage()
ch=db.getvalue("ch")
url =("http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={}&username=<username>" .format(ch))
url=url.replace(" ","%20")
r = requests.get(url)
n = xmltodict.parse(r.content)
k = json.dumps(n)
df = json.loads(k)
l=df["Vehicle"]["vehicleJson"]
p=json.loads(l)
output="Your car's details are:\n"+"Owner name: "+str(p['Owner'])+"\n"+"Car Company: "+str(p['CarMake']['CurrentTextValue'])+"\n"+"Car Model: "+str(p['CarModel']['CurrentTextValue'])+"\n"+"Fuel Type: "+str(p['FuelType']['CurrentTextValue'])+"\n"+"Registration Year: "+str(p['RegistrationYear'])+"\n"+"Insurance: "+str(p['Insurance'])+"\n"+"Vehicle ID: "+str(p['VechileIdentificationNumber'])+"\n"+"Engine No.: "+str(p['EngineNumber'])+"\n"+"Location RTO: "+str(p['Location'])
print(output)
|
5,894 | 1c6e6394a6bd26b152b2f5ec87eb181a3387f794 | #!/usr/bin/python
# Find minimal distances between clouds in one bin, average these per bin
# Compute geometric and arithmetical mean between all clouds per bin
from netCDF4 import Dataset as NetCDFFile
from matplotlib import pyplot as plt
import numpy as np
from numpy import ma
from scipy import stats
from haversine import haversine
from scipy.spatial import distance
from distance_methods import distances
from CSD_fit import CSD_fit
cusize = NetCDFFile(
'/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time41.nc')
size = cusize.variables['size']
begin_time = 41
end_time = 48
D0_all = np.zeros((end_time-begin_time+1,len(size)))
D1_all = np.zeros((end_time-begin_time+1,len(size)))
nclouds_bin_all = np.zeros((end_time-begin_time+1,len(size)))
mindistance_mean_all = np.zeros((end_time-begin_time+1,len(size)))
mindistance_std_all = np.zeros((end_time-begin_time+1,len(size)))
maxdistance_all = np.zeros((end_time-begin_time+1,len(size)))
maxdistanceY_all = np.zeros((end_time-begin_time+1,len(size)))
hn_normalized_all = np.zeros((end_time-begin_time+1,len(size)))
for time in range(begin_time,end_time+1):
print 'time:',time
cusize = NetCDFFile(
'/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time'+str(time)+'.nc')
cloudlon = cusize.variables['cloud_lon'][:]
cloudlat = cusize.variables['cloud_lat'][:]
nclouds_cusize = cusize.variables['nclouds']
#size = cusize.variables['size']
cloud_bin = cusize.variables['cloud_bin'][0,:]
hn = cusize.variables['hn']
hn_normalized_loop = hn/nclouds_cusize[0]
ncloud_bin = cusize.variables['ncloud_bin']
ncloudsint = int(nclouds_cusize[0])
cloud_lon = cloudlon[0,0:ncloudsint]
cloud_lat = cloudlat[0,0:ncloudsint]
filledbin=np.argmin(hn[0,:]) # last bin with clouds, rest is empty
output_distances = distances(filledbin,cloud_lon,cloud_lat,cloud_bin,size,ncloudsint)
D0_all[time-41] = output_distances[0]
D1_all[time-41] = output_distances[1]
mindistance_mean_all[time-41] = output_distances[2]
mindistance_std_all[time-41] = output_distances[3]
nclouds_bin_all[time-41] = output_distances[4]
hn_normalized_all[time-41] = hn_normalized_loop
mindistance_mean = np.mean(mindistance_mean_all,axis=0)/1000
mindistance_std = np.mean(mindistance_std_all,axis=0)/1000
D0 = np.mean(D0_all,axis=0)
D1 = np.mean(D1_all,axis=0)
nclouds = np.mean(nclouds_bin_all,axis=0)
hn_normalized = np.mean(hn_normalized_all,axis=0)
filledbin_all=np.argmin(hn_normalized[:])
fit = CSD_fit(hn_normalized[0:10],size[0:10])
logfit = fit[3]
a = fit[0]
b = fit[1]
c = fit[2]
print 'a, b, c:'
print a, b, c
print logfit
sizelog = np.log10(size)
hnlog = ma.filled(np.log10(ma.masked_equal(hn_normalized, 0)), np.nan)
ncloudslog = ma.filled(np.log10(ma.masked_equal(nclouds, 0)), np.nan)
#res = ma.filled(log2(ma.masked_equal(m, 0)), 0)
mindistance_plus = mindistance_mean + mindistance_std
mindistance_minus = mindistance_mean - mindistance_std
filledbin = np.argmin(mindistance_mean)
slope, intercept, r_value, p_value, std_err = stats.linregress(size[0:filledbin],mindistance_mean[0:filledbin])
print 'r-squared:',r_value**2
line = intercept + slope*size
print 'slope:',slope
print 'intercept:',intercept
##################################################################
### Plots
threshold = 0.005*sum(nclouds)
maxbin = np.min(np.where(nclouds <= threshold))
orange = (1.,0.38,0.01)
blue = (0.53,0.81,1)
plt.figure(figsize=(14,8))
plt.axis([0, 5500, 0, 120])
plt.xlabel('Cloud size [m]',fontsize=15)
plt.ylabel('Nearest-neighbour distance [km]',fontsize=15)
plt.fill_between(size,mindistance_plus,mindistance_minus,alpha=0.3,color=blue)
plt.scatter(size,mindistance_mean,color='k')
#plt.scatter(size,mindistance_plus,color='g')
#plt.plot(size,line,color='black')
ax = plt.gca()
ax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('Figures/mindistance.pdf')
plt.savefig('Figures/mindistance.png')
plt.figure(figsize=(10,8))
#plt.axis([50000,220000, 50000, 220000])
plt.xlabel('D1')
plt.ylabel('D0')
plt.scatter(D1,D0,color='k')
plt.savefig('Figures/D1-D0.pdf')
plt.figure(figsize=(10,8))
plt.xlabel('log(l) [m]')
plt.ylabel('log(N*(l)) [m-1]')
plt.scatter(sizelog,hnlog)
plt.scatter(sizelog[0:10],logfit[0:10])
plt.savefig('Figures/CSD.pdf')
plt.figure(figsize=(10,8))
plt.xlabel('Cloud size')
plt.ylabel('Ratio distance/size')
plt.axis([0, 5500, 0, 0.02])
ax = plt.gca()
ax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig('Figures/mindistance.pdf')
plt.scatter(size[0:filledbin],mindistance_mean[0:filledbin]/size[0:filledbin])
plt.savefig('Figures/ratio_distance_size.pdf')
plt.figure(figsize=(10,8))
plt.xlabel('Cloud size')
plt.ylabel('Number of clouds')
plt.axhline(y=threshold, c='black')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.scatter(size[0:filledbin],nclouds[0:filledbin])
plt.savefig('Figures/nclouds_size.pdf')
plt.figure(figsize=(10,8))
plt.xlabel('size')
plt.ylabel('nclouds')
plt.scatter(size[0:filledbin],ncloudslog[0:filledbin])
plt.savefig('Figures/nclouds_size_log.pdf')
|
5,895 | 1f0680c45afb36439c56a1d202537261df5f9afc | from eventnotipy import app
import json
json_data = open('eventnotipy/config.json')
data = json.load(json_data)
json_data.close()
username = data['dbuser']
password = data['password']
host = data['dbhost']
db_name = data['database']
email_host = data['email_host']
email_localhost = data['email_localhost']
sms_host = data['sms_host']
sms_localhost = data['sms_localhost']
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://%s:%s@%s/%s' % (username,password,host,db_name)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = False
app.secret_key = data['session_key'] |
5,896 | 90324392e763ac6ea78c77b909c4bea667d45e6c | from admin_tools.dashboard.modules import DashboardModule
from nodes.models import Node
from slices.models import Slice
class MyThingsDashboardModule(DashboardModule):
"""
Controller dashboard module to provide an overview to
the user of the nodes and slices of its groups.
"""
title="My Things"
template = "dashboard/modules/mythings.html"
def init_with_context(self, context):
user = context['request'].user
# Get user slices
slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))
context['slices'] = slices
# Get user nodes
nodes = {}
nodes_states = ['offline', 'safe', 'production']
for group in user.groups.all():
nodes[group] = []
qs_nodes = Node.objects.filter(group=group)
for state in nodes_states:
nodes[group].append(qs_nodes.filter(state_set__value=state).count())
context['nodes_states'] = nodes_states
context['user_nodes'] = nodes
# initialize to calculate is_empty
self.has_data = nodes or slices
def is_empty(self):
return not self.has_data
|
5,897 | 14e1af3d60efef842c72bf9b55143d0e14f3a7b8 | import asyncio
import json
from functools import lru_cache
from pyrogram import Client
SETTINGS_FILE = "/src/settings.json"
CONN_FILE = "/src/conn.json"
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting("bot_name")
@lru_cache()
def get_app_id():
return load_setting("app_id")
@lru_cache()
def get_app_hash():
return load_setting("app_hash")
async def initialize_client():
app = Client("testing", get_app_id(), get_app_hash())
async with app:
with open(CONN_FILE, "w+") as f:
f.write(json.dumps({"connection_string": await app.export_session_string()}))
print("Connection string was saved to conn.json")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(initialize_client())
|
5,898 | bf40b516e202af14469cd4012597ba412e663f56 | import nltk
import A
from collections import defaultdict
from nltk.align import Alignment, AlignedSent
class BerkeleyAligner():
def __init__(self, align_sents, num_iter):
self.t, self.q = self.train(align_sents, num_iter)
# TODO: Computes the alignments for align_sent, using this model's parameters. Return
# an AlignedSent object, with the sentence pair and the alignments computed.
def align(self, align_sent):
# #will return german --> english alignments
alignments = []
german = align_sent.words
english = align_sent.mots
len_g = len(german)
len_e = len(english)
for j in range(len_g):
g = german[j]
best_prob = (self.t[(g,None)] * self.q[(0,j,len_e,len_g)], None)
best_alignment_point = None
for i in range(len_e):
e = english[i]
ge_prob = (self.t[(e,g)]*self.q[(j,i,len_g,len_e)], i)
eg_prob = (self.t[(g,e)]*self.q[(i,j,len_e,len_g)], i)
best_prob = max(best_prob, ge_prob, eg_prob)
alignments.append((j, best_prob[1]))
return AlignedSent(align_sent.words, align_sent.mots, alignments)
# TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the
# translation and distortion parameters as a tuple.
def train(self, aligned_sents, num_iters):
MIN_PROB = 1.0e-12
#INITIALIZATION
#defining the vocabulary for each language:
#german = words
#english = mots
g_vocab = set()
e_vocab = set()
for sentence in aligned_sents:
g_vocab.update(sentence.words)
e_vocab.update(sentence.mots)
# initializing translation table for english --> german and german --> english
t = defaultdict(float)
for g in g_vocab:
for e in e_vocab:
t[(g,e)] = 1.0 / float(len(g_vocab))
t[(e,g)] = 1.0 / float(len(e_vocab))
# initializing separate alignment tables for english --> german and german --> english
q_eg = defaultdict(float)
q_ge = defaultdict(float)
for sentence in aligned_sents:
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
q_eg[(i,j,len_e,len_g)] = 1.0 / float((len_e+1))
q_ge[(j,i,len_g,len_e)] = 1.0 / float((len_g+1))
print 'Initialization complete'
#INITIALIZATION COMPLETE
for i in range(num_iters):
print 'Iteration ' + str(i+1) + ' /' + str(num_iters)
#E step
count_g_given_e = defaultdict(float)
count_any_g_given_e = defaultdict(float)
eg_alignment_count = defaultdict(float)
eg_alignment_count_for_any_i = defaultdict(float)
count_e_given_g = defaultdict(float)
count_any_e_given_g = defaultdict(float)
ge_alignment_count = defaultdict(float)
ge_alignment_count_for_any_j = defaultdict(float)
for sentence in aligned_sents:
g_sentence = sentence.words
e_sentence = sentence.mots
len_e = len(sentence.mots)
len_g = len(sentence.words)
eg_total = defaultdict(float)
ge_total = defaultdict(float)
#E step (a): compute normalization
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_total[g] += eg_count
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_total[e] += ge_count
# E step (b): collect fractional counts
for j in range(len_g):
g = g_sentence[j]
for i in range(len_e):
e = e_sentence[i]
#English --> German
eg_count = (t[(g_sentence[j],e_sentence[i])] * q_eg[(i,j,len_e,len_g)])
eg_normalized = eg_count / eg_total[g]
#German --> English
ge_count = (t[(e_sentence[i], g_sentence[j])] * q_ge[(j,i,len_g,len_e)])
ge_normalized = ge_count / ge_total[e]
#Averaging the probablities
avg_normalized = (eg_normalized + ge_normalized) / 2.0
#Storing counts
count_g_given_e[(g,e)] += avg_normalized
count_any_g_given_e[e] += avg_normalized
eg_alignment_count[(i,j,len_e,len_g)] += avg_normalized
eg_alignment_count_for_any_i[(j,len_e,len_g)] += avg_normalized
count_e_given_g[(e,g)] += avg_normalized
count_any_e_given_g[g] += avg_normalized
ge_alignment_count[(j,i,len_g,len_e)] += avg_normalized
ge_alignment_count_for_any_j[(i,len_g,len_e)] += avg_normalized
#M step
q = defaultdict(float)
for sentence in aligned_sents:
for e in sentence.mots:
for g in sentence.words:
#eng --> germ
t[(g,e)]= count_g_given_e[(g,e)] / count_any_g_given_e[e]
#germ --> eng
t[(e,g)]= count_e_given_g[(e,g)] / count_any_e_given_g[g]
len_e=len(sentence.mots)
len_g=len(sentence.words)
for i in range(len_e):
for j in range(len_g):
#eng --> germ
q[(i,j,len_e,len_g)] = eg_alignment_count[(i,j,len_e,len_g)] / eg_alignment_count_for_any_i[(j,len_e, len_g)]
#germ --> eng
q[(j,i,len_g,len_e)] = ge_alignment_count[(j,i,len_g,len_e)] / ge_alignment_count_for_any_j[(i,len_g,len_e)]
return (t,q)
def main(aligned_sents):
ba = BerkeleyAligner(aligned_sents, 10)
A.save_model_output(aligned_sents, ba, "ba.txt")
avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)
print ('Berkeley Aligner')
print ('---------------------------')
print('Average AER: {0:.3f}\n'.format(avg_aer))
|
5,899 | fbd7868a37a2270e5dc86843adff50a94436404d | from openvino.inference_engine import IENetwork, IECore
import numpy as np
import time
from datetime import datetime
import sys
import os
import cv2
class MotionDetect:
# Klasse zur Erkennung von Bewegung
def __init__(self):
self.static_back = None
def detect_motion(self, frame, reset=False):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if self.static_back is None or reset:
self.static_back = gray
return False
diff_frame = cv2.absdiff(self.static_back, gray)
thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)
cnts, _ = cv2.findContours(thresh_frame.copy(),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cnts:
return True
else:
return False
def reset_background(self):
self.static_back = None
class InferenceModel:
# Klasse zur Erstellung eines 'ExecInferModel' Objekts
def __init__(self, device='MYRIAD'):
self.ie = IECore()
self.device = device
def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):
# Anlegen der Pfade zu den Modell Dateien
model_xml = os.path.join(
model_dir, 'frozen_inference_graph.xml')
model_bin = os.path.join(
model_dir, 'frozen_inference_graph.bin')
exported_model = os.path.join(model_dir, 'exported_model')
# Laden der Labels aus 'classes.txt'
labels = [line.strip() for line in open(
os.path.join(model_dir, 'classes.txt')).readlines()]
assert os.path.isfile(model_bin)
assert os.path.isfile(model_xml)
# Erstellung des Modells aus IR Dateien
net = IENetwork(model=model_xml, weights=model_bin)
# In-Output Shapes des Modells aus 'net' laden
img_info_input_blob = None
feed_dict = {}
for blob_name in net.inputs:
if len(net.inputs[blob_name].shape) == 4:
input_blob = blob_name
elif len(net.inputs[blob_name].shape) == 2:
img_info_input_blob = blob_name
else:
raise RuntimeError("Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported"
.format(len(net.inputs[blob_name].shape), blob_name))
assert len(
net.outputs) == 1, "Demo supports only single output topologies"
out_blob = next(iter(net.outputs))
# Modell importieren (Falls vorhanden)
if os.path.isfile(exported_model):
print('found model to import')
try:
exec_net = self.ie.import_network(
model_file=exported_model, device_name=self.device, num_requests=num_requests)
except:
return False
else:
# sonst erstellen und exoportieren
print('creating exec model')
try:
exec_net = self.ie.load_network(
network=net, num_requests=num_requests, device_name=self.device)
exec_net.export(exported_model)
except:
return False
nchw = net.inputs[input_blob].shape
del net
if img_info_input_blob:
feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]
# ersellen und zurückgeben eines ExecInferModel Objekts, mit welchem die Inferenz ausgeführt wird
return ExecInferModel(exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir)
class ExecInferModel:
def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir):
self.exec_net = exec_net
self.labels = labels
self.input_blob = input_blob
self.out_blob = out_blob
self.feed_dict = feed_dict
self.n, self.c, self.h, self.w = nchw
self.current_frames = {}
self.detected_objects = {}
self.output_dir = output_dir
def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save=20, save_all=False):
# Status Variablen
n_infered, n_detected, n_saved = 0, 0, 0
# alle Inferenz Requests durchiterieren
for inf_img_ind, infer_request in enumerate(self.exec_net.requests):
res, frame = None, None
# Status der Inferenz für aktuellen Request abfragen
status = infer_request.wait(0)
# 0: ergebnis da, -11: noch nicht gestartet
if status != 0 and status != -11:
continue
# Ergebnis für aktuellen Request holen
if inf_img_ind in self.current_frames:
res = infer_request.outputs[self.out_blob]
frame = self.current_frames[inf_img_ind]
n_infered += 1
# neuen Inferent Request starten
if len(buffer):
self.current_frames[inf_img_ind] = buffer.pop()
in_frame = cv2.resize(
self.current_frames[inf_img_ind], (self.w, self.h))
in_frame = in_frame.transpose((2, 0, 1))
in_frame = in_frame.reshape(
(self.n, self.c, self.h, self.w))
self.feed_dict[self.input_blob] = in_frame
infer_request.async_infer(self.feed_dict)
# Ergebnis verarbeiten
if res is None or frame is None:
continue
height, width = frame.shape[:2]
# inferenz ergebnisse für ein frame durchiterieren
for obj in res[0][0]:
# Threshold prüfen
if obj[2] < threshhold:
continue
n_detected += 1
# Boundig Box koordinalte aus Erg laden
xmin = int(obj[3] * width)
ymin = int(obj[4] * height)
xmax = int(obj[5] * width)
ymax = int(obj[6] * height)
# ID der erkannten Klasse
class_id = int(obj[1])
# Bounding Box in das Bild zeichnen
cv2.rectangle(frame, (xmin, ymin),
(xmax, ymax), color=(0, 255, 255), thickness=2)
cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)
# detected_objects dict anlegen mit key:class_id, value:[N, Roi, proba]
if not class_id in self.detected_objects:
self.detected_objects[class_id] = [
0, frame, obj[2]]
else:
self.detected_objects[class_id][0] += 1
# wenn wahrscheinlichkeit höher als bei gespeicherten, ersetzen
if self.detected_objects[class_id][2] < obj[2]:
self.detected_objects[class_id][1] = frame
self.detected_objects[class_id][2] = obj[2]
# nach 'n_save' abspeicher
if self.detected_objects[class_id][0] > n_save:
n_saved += 1
self._save(class_id)
del self.detected_objects[class_id]
if view_result:
cv2.imshow('infer result', frame)
cv2.waitKey(1)
# alle aus 'detected_objects' lokal speichern
if save_all:
print('saving all')
for class_id in self.detected_objects.keys():
self._save(class_id)
n_saved += 1
self.detected_objects = {}
return n_infered, n_detected, n_saved
# Funkiont zum speichern der Bilder
def _save(self, class_id):
class_name = self.labels[class_id - 1]
print('saving ', class_name)
time_stamp = datetime.now().strftime("%d-%b-%Y_%H-%M-%S")
file_name = time_stamp + '_' + class_name + '.jpg'
image_array = self.detected_objects[class_id][1]
# save image local
cv2.imwrite(os.path.join(
self.output_dir, file_name), image_array)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.