index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
25,200 | 0b5ac8335e39d640701338daf4a14243c58b6866 | #!/usr/bin/python3
import datetime
print('BUILD_DATE="'+datetime.datetime.today().strftime("%F %T") + "\"")
|
25,201 | 4b580268a7c9e7819886bb31282ed29376d5aee0 | import math
x = int(input())
y = int(input())
if x > y:
small = y
big = x
else:
small = x
big = y
for i in range(small+1, big):
if i % 5 == 2 or i % 5 == 3:
print(i)
|
25,202 | 0d93835991cc5762cd71f8168043a6ef78351aaa | """
FROWNS LICENSE
Copyright (c) 2001-2003, Brian Kelley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Brian Kelley nor the names of frowns
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Generate the n'th prime
import Primes
prime = Primes.prime
nthprime = primes[n]
prime.get(n) retrieve the nth prime
"""
# Primes is a class that computes the list of primes
# 303 primes are pre computed and added as necessary.
# Usually, our graphs are much smaller than this so
# the seive of Erasthosthenes is actually not used.
class Primes:
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091,
1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151,
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213,
1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277,
1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307,
1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399,
1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493,
1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559,
1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609,
1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667,
1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,
1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789,
1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871,
1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931,
1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,
1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053,
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111,
2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161,
2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243,
2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297,
2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411,
2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473,
2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551,
2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633,
2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687,
2689, 2693, 2699, 2707, 2711, 2713]
# good old Sieve of Erasthosthenes
def _findNextPrime(self, N):
"""Generate the first N primes"""
primes = self.primes
nextPrime = primes[-1]+1
while(len(primes)<N):
maximum = nextPrime * nextPrime
prime = 1
for i in primes:
if i > maximum:
break
if nextPrime % i == 0:
prime = 0
break
if prime:
primes.append(nextPrime)
nextPrime+=1
def __getitem__(self, i):
assert i>=0, "Index must be greater than 0!"
if i >= len(self.primes)-1:
self._findNextPrime(i+1)
return self.primes[i]
def get(self, i):
return self[i]
primes = Primes()
|
25,203 | 3ad22f288cc525132eccec04c9ac3616cea7e698 | import FreeCAD
import FreeCADGui
import EB_Auxiliaries
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
# import drafttaskpanels.task_circulararray
from drafttaskpanels.task_scale import *
# g = WBAuxiliaries.SelectionGate("Face")
# FreeCADGui.Selection.addSelectionGate(g)
# p = drafttaskpanels.task_circulararray.TaskPanelCircularArray()
class panelMy:
def __init__(self):
self.form = QtGui.QWidget()
self.form.setWindowTitle("Move Part Object Point to Point")
layout = QtGui.QGridLayout(self.form)
self.InfoLabel = QtGui.QLabel("Info")
layout.addWidget(self.InfoLabel, 0, 0)
self.btnXYZ = QtGui.QPushButton("Move XYZ")
self.btnXYZ.clicked.connect(self.MoveXYZ)
layout.addWidget(self.btnXYZ, 1, 1)
self.btnX = QtGui.QPushButton("Move X")
self.btnX.clicked.connect(self.MoveXYZ)
layout.addWidget(self.btnX, 1, 0)
self.btnY = QtGui.QPushButton("Move Y")
self.btnY.clicked.connect(self.MoveXYZ)
layout.addWidget(self.btnY, 2, 0)
self.btnZ = QtGui.QPushButton("Move Z")
self.btnZ.clicked.connect(self.MoveXYZ)
layout.addWidget(self.btnZ, 3, 0)
def MoveXYZ(self):
pass
p = panelMy()
Gui.Control.showDialog(p) |
25,204 | f13aeae10a206cf82a647384ca440a4ca676b112 | from .base import * # noqa
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "TESTSEKRET"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False # To test error handling
CELERY_ALWAYS_EAGER = True
BROKER_BACKEND = "memory"
PASSWORD_HASHERS = ("django.contrib.auth.hashers.MD5PasswordHasher",)
ENV_HOSTS = [host for host in env.str("ALLOWED_HOSTS", "").split(",") if host]
ALLOWED_HOSTS = ENV_HOSTS + ["localhost", ".localhost", "127.0.0.1", "0.0.0.0"]
TRANSFERTO_LOGIN = ("fake_transferto_login",)
TRANSFERTO_TOKEN = ("fake_transferto_token",)
TRANSFERTO_APIKEY = ("fake_transferto_apikey",)
TRANSFERTO_APISECRET = ("fake_transferto_apisecret",)
RABBITMQ_MANAGEMENT_INTERFACE = "http://user:pass@rabbitmq:15672/api/queues//my_vhost/"
|
25,205 | 22cf3252f1e8a806bc8eed7ad13e3bf5420b42a5 | from django.db import models
from datetime import datetime
from wanwenyc.settings import DJANGO_SERVER_YUMING
from django.contrib.auth import get_user_model #导入get_user_model
from testupdatadb.models import UpdateDbData
#第三个就是我们自己创建的包
User = get_user_model() #get_user_model() 函数直接返回User类,找的是settings.AUTH_USER_MODEL变量的值
#地区
class SpiderHMArea(models.Model):
hm_area = models.CharField(max_length=100, default="", null=True, blank=True, verbose_name=u"地区")
hm_area_url = models.CharField(max_length=1500, default="", null=True, blank=True,verbose_name=u"地区外部链接")
write_user = models.ForeignKey(User, null=True, blank=True, verbose_name=u"用户名", on_delete=models.PROTECT)
add_time = models.DateTimeField(null=True, blank=True,auto_now_add=True,
verbose_name=u"添加时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now_add=True是指定在数据新增时, 自动写入时间
update_time = models.DateTimeField(default=datetime.now, null=True, blank=True,
verbose_name=u"更新时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now=True是无论新增还是更新数据, 此字段都会更新为当前时间
class Meta:
verbose_name = u"地区"
verbose_name_plural=verbose_name
def __str__(self):
return self.hm_area
#类型
class SpiderHMTag(models.Model):
hm_tag = models.CharField(max_length=100, default="", null=True, blank=True, verbose_name=u"类型")
hm_tag_url = models.CharField(max_length=1500, default="", null=True, blank=True,verbose_name=u"类型外部链接")
write_user = models.ForeignKey(User, null=True, blank=True, verbose_name=u"用户名", on_delete=models.PROTECT)
add_time = models.DateTimeField(null=True, blank=True,auto_now_add=True,
verbose_name=u"添加时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now_add=True是指定在数据新增时, 自动写入时间
update_time = models.DateTimeField(default=datetime.now, null=True, blank=True,
verbose_name=u"更新时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now=True是无论新增还是更新数据, 此字段都会更新为当前时间
class Meta:
verbose_name = u"类型"
verbose_name_plural=verbose_name
def __str__(self):
return self.hm_tag
#书名
class SpiderHMBook(models.Model):
splider_url = models.CharField(max_length=1500, default="",null=True, blank=True,verbose_name=u"爬取数据URL") #unique=True,表示设置此字段为主键,唯一
splider_title = models.CharField(max_length=1000, default="爬取漫画数据",null=True, blank=True, verbose_name=u"数据标题")
img_height = models.CharField(max_length=100, default=75,null=True, blank=True, verbose_name=u"封面图高度")
img_width = models.CharField(max_length=100, default=75, null=True, blank=True,verbose_name=u"封面图宽度")
front_cover_img = models.ImageField(upload_to="hanman/fengmian/" , null=True, blank=True,verbose_name=u"封面图片", height_field='img_height',width_field='img_width',max_length=2000)
chapter_count = models.CharField(max_length=100, default="", null=True, blank=True, verbose_name=u"章节数")
is_love = models.BooleanField(default=False,verbose_name=u"喜爱")
is_check = models.BooleanField(default=False,verbose_name=u"检查封面")
#对于ManyToManyField,没有null参数,如果加上会报警告如:spiderdata.SpiderData.genre: (fields.W340) null has no effect on ManyToManyField.
hm_area = models.ManyToManyField(SpiderHMArea,default="", blank=True,verbose_name=u"地区")
hm_tag = models.ManyToManyField(SpiderHMTag,default="",blank=True,verbose_name=u"类型")
write_user = models.ForeignKey(User, null=True, blank=True, verbose_name=u"用户名", on_delete=models.PROTECT)
add_time = models.DateTimeField(null=True, blank=True,auto_now_add=True,
verbose_name=u"添加时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now_add=True是指定在数据新增时, 自动写入时间
update_time = models.DateTimeField(default=datetime.now, null=True, blank=True,
verbose_name=u"更新时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now=True是无论新增还是更新数据, 此字段都会更新为当前时间
def front_cover_img_data(self): #定义点击后跳转到某一个地方(可以加html代码)
from django.utils.safestring import mark_safe #调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
# html_img = "<a href='{}'><span>{}<span></a><br/><a href='{}/media/{}'> <img src='{}/media/{}' style='width:75px;height:75px;'/></a>".format(self.splider_url,self.chapter_count,DJANGO_SERVER_YUMING,self.front_cover_img,DJANGO_SERVER_YUMING,self.front_cover_img)
html_tou = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>大图</title>
<script type="text/javascript">
$(function () {
var imglist = document.getElementsByTagName("img");
//安卓4.0+等高版本不支持window.screen.width,安卓2.3.3系统支持
var _width;
doDraw();
window.onresize = function () {
//捕捉屏幕窗口变化,始终保证图片根据屏幕宽度合理显示
doDraw();
}
function doDraw() {
_width = window.innerWidth;
for (var i = 0, len = imglist.length; i < len; i++) {
DrawImage(imglist[i], _width);
}
}
function DrawImage(ImgD, _width) {
var image = new Image();
image.src = ImgD.src;
image.onload = function () {
//限制,只对宽高都大于30的图片做显示处理
if (image.width > 30 && image.height > 30) {
if (image.width > _width) {
ImgD.width = _width;
ImgD.height = (image.height * _width) / image.width;
} else {
ImgD.width = image.width;
ImgD.height = image.height;
}
}
}
}
})
</script>
</head>
<body>"""
html_img = """ <a href='{}'><span>{}<span></a><br/>
<div onclick='$(".my_set_image_img").hide();$(this).next().show();'>
<img src='{}' style='width:50px;height:50px;'>
<br/>点击可看大图
</div>
<div class='my_set_image_img' onclick="$('.my_set_image_img').hide()" style="z-index:9999;position:fixed; left: 100px; top:100px;display:none; width:auto; height:auto;">
<img src='{}' style='width:100px; height:100px;'>
</div>""".format(self.splider_url,self.chapter_count,self.front_cover_img.url,self.front_cover_img.url)
html_wei = """
</body>
</html>
"""
html_all = html_tou+html_img+html_wei
return mark_safe(html_all)
# return "<a href='http://192.168.212.194:9002/testcase/{}/'>跳转</a>".format(self.id)
front_cover_img_data.short_description = u"封面图片" #为go_to函数名个名字
# #显示全部图片加载太慢
# def all_chapter(self):
# from django.utils.safestring import mark_safe # 调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
# html_all = ""
# chapter_list = self.spiderhmchapterdata_set.all().order_by("chapter_num")
# for chapter_one in chapter_list:
# html_chapter_one = "<span>{}</span><br/>".format(chapter_one.splider_title)
# html_all = "%s%s" % (html_all, html_chapter_one)
# chapter_image_list = chapter_one.spiderhmchapterimage_set.all().order_by("chapter_image_num")
# for chapter_image_one in chapter_image_list:
# html_chapter_image_one = "<a href='{}/media/{}'> <img src='{}/media/{}' style='width:75px;height:75px;'/></a><br/>".format(
# DJANGO_SERVER_YUMING,chapter_image_one.content_img, DJANGO_SERVER_YUMING,chapter_image_one.content_img
# )
# html_all = "%s%s" % (html_all, html_chapter_image_one)
#
# return mark_safe(html_all)
#此处只显示章节
def all_chapter(self):
from django.utils.safestring import mark_safe # 调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
html_all = ""
chapter_list = self.spiderhmchapterdata_set.all().order_by("chapter_num")
for chapter_one in chapter_list:
html_chapter_one = "<a href='{}/spiderdata/spiderhmchapterdata/{}/'><span>{}</span></a><br/>".format(
DJANGO_SERVER_YUMING,chapter_one.id,chapter_one.splider_title)
html_all = "%s%s" % (html_all, html_chapter_one)
# chapter_image_list = chapter_one.spiderhmchapterimage_set.all().order_by("chapter_image_num")
# for chapter_image_one in chapter_image_list:
# html_chapter_image_one = "<a href='{}/media/{}'> <img src='{}/media/{}' style='width:75px;height:75px;'/></a><br/>".format(
# DJANGO_SERVER_YUMING,chapter_image_one.content_img, DJANGO_SERVER_YUMING,chapter_image_one.content_img
# )
# html_all = "%s%s" % (html_all, html_chapter_image_one)
return mark_safe(html_all)
all_chapter.short_description = u"已经存在章节" # 为go_to函数名个名字
class Meta:
verbose_name = u"爬取的漫画书"
verbose_name_plural=verbose_name
def __str__(self):
return self.splider_title
# 爬取漫画数据
class SpiderHMChapterData(models.Model):
spiderhmbook = models.ForeignKey(SpiderHMBook,null=True, blank=True, verbose_name=u"书目", on_delete=models.PROTECT)
splider_url = models.CharField(max_length=1500, default="",null=True, blank=True,verbose_name=u"爬取数据URL") #unique=True,表示设置此字段为主键,唯一
splider_title = models.CharField(max_length=1000, default="爬取漫画数据",null=True, blank=True, verbose_name=u"数据标题")
chapter_num = models.IntegerField(null=True, blank=True,verbose_name=u"章节数")
# img_height = models.CharField(max_length=100, default=75,null=True, blank=True, verbose_name=u"封面图高度")
# img_width = models.CharField(max_length=100, default=75, null=True, blank=True,verbose_name=u"封面图宽度")
# back_front_cover_img = models.ImageField(upload_to="" , null=True, blank=True,verbose_name=u"补传封面图片", height_field='img_height',width_field='img_width',max_length=2000)
# front_cover_img = models.CharField(max_length=1500, null=True, blank=True,verbose_name=u"封面图片")
# prenum = models.CharField(max_length=100, default="", null=True, blank=True, verbose_name=u"编号")
# long_time = models.CharField(max_length=100, default="", null=True, blank=True, verbose_name=u"时长(分钟)")
# is_love = models.BooleanField(default=False,verbose_name=u"喜爱")
# is_check = models.BooleanField(default=False,verbose_name=u"检查封面")
# #对于ManyToManyField,没有null参数,如果加上会报警告如:spiderdata.SpiderData.genre: (fields.W340) null has no effect on ManyToManyField.
# hm_area = models.ManyToManyField(SpiderHMArea,default="", blank=True,verbose_name=u"地区")
# hm_tag = models.ManyToManyField(SpiderHMTag,default="",blank=True,verbose_name=u"类型")
write_user = models.ForeignKey(User, null=True, blank=True, verbose_name=u"用户名", on_delete=models.PROTECT)
add_time = models.DateTimeField(null=True, blank=True,auto_now_add=True,
verbose_name=u"添加时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now_add=True是指定在数据新增时, 自动写入时间
update_time = models.DateTimeField(default=datetime.now, null=True, blank=True,
verbose_name=u"更新时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now=True是无论新增还是更新数据, 此字段都会更新为当前时间
def image_data(self): #定义点击后跳转到某一个地方(可以加html代码)
from django.utils.safestring import mark_safe #调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
return mark_safe("<a href='{}'> <img src='{}' style='width:75px;height:75px;'/></a>".format(self.front_cover_img,self.front_cover_img))
# return "<a href='http://192.168.212.194:9002/testcase/{}/'>跳转</a>".format(self.id)
image_data.short_description = u"封面图片" #为go_to函数名个名字
def back_image_data(self): #定义点击后跳转到某一个地方(可以加html代码)
from django.utils.safestring import mark_safe #调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
return mark_safe("<a href='{}'><span>{}<span></a><br/><a href='{}/media/{}'> <img src='{}/media/{}' style='width:75px;height:75px;'/></a>".
format(self.splider_url,self.prenum,DJANGO_SERVER_YUMING,self.back_front_cover_img,DJANGO_SERVER_YUMING,self.back_front_cover_img))
# return "<a href='http://192.168.212.194:9002/testcase/{}/'>跳转</a>".format(self.id)
back_image_data.short_description = u"补传封面图片" #为go_to函数名个名字
def video_link(self): #定义点击后跳转到某一个地方(可以加html代码)
from django.utils.safestring import mark_safe #调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
return mark_safe("<a href='{}'>{}</a>".format(self.video,self.video))
# return "<a href='http://192.168.212.194:9002/testcase/{}/'>跳转</a>".format(self.id)
video_link.short_description = u"视频地址连接" #为go_to函数名个名字
def down_load_link(self): #定义点击后跳转到某一个地方(可以加html代码)
from django.utils.safestring import mark_safe #调用mark_safe这个函数,django可以显示成一个文本,而不是html代码
html_all = ""
down_load_list = self.spiderdownload_set.all()
for down_load in down_load_list:
html_one = "<a href='{}'>{}</a><br/>".format(down_load.down_load,down_load.down_load)
html_all = "%s%s"%(html_all,html_one)
return mark_safe(html_all)
# return "<a href='http://192.168.212.194:9002/testcase/{}/'>跳转</a>".format(self.id)
down_load_link.short_description = u"下载地址连接" #为go_to函数名个名字
class Meta:
verbose_name = u"爬取漫画数据查询"
verbose_name_plural=verbose_name
def __str__(self):
return self.splider_title
class SpiderHMChapterImage(models.Model):
spiderhmchapterdata = models.ForeignKey(SpiderHMChapterData,null=True, blank=True, verbose_name=u"章节", on_delete=models.PROTECT)
splider_img_url = models.CharField(max_length=1500, default="",null=True, blank=True,verbose_name=u"爬取图片URL") #unique=True,表示设置此字段为主键,唯一
img_title = models.CharField(max_length=1000, default=75,null=True, blank=True, verbose_name=u"图片标题")
img_height = models.CharField(max_length=100, default=75,null=True, blank=True, verbose_name=u"图片高度")
img_width = models.CharField(max_length=100, default=75, null=True, blank=True,verbose_name=u"图片宽度")
content_img = models.ImageField(upload_to="hanman/content/%Y/%m%d/%H/" , null=True, blank=True,verbose_name=u"图片", height_field='img_height',width_field='img_width',max_length=2000)
chapter_image_num = models.IntegerField(null=True, blank=True,verbose_name=u"图片编号")
write_user = models.ForeignKey(User, null=True, blank=True, verbose_name=u"用户名", on_delete=models.PROTECT)
add_time = models.DateTimeField(null=True, blank=True,auto_now_add=True,
verbose_name=u"添加时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now_add=True是指定在数据新增时, 自动写入时间
update_time = models.DateTimeField(default=datetime.now, null=True, blank=True,
verbose_name=u"更新时间") # datetime.now记录实例化时间,datetime.now()记录模型创建时间,auto_now=True是无论新增还是更新数据, 此字段都会更新为当前时间
class Meta:
verbose_name = u"漫画内容"
verbose_name_plural=verbose_name
def __str__(self):
return self.img_title |
25,206 | d16b3e02209c3419b99fb8e35a1994a678c6d724 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 10:56:28 2018
@author: mathewspmani
"""
num = 600851475143
#TRIAL DIVISION
def getfactors(num):
factors = []
fac = 2
while num > 1:
while num % fac == 0:
factors.append(fac)
num /= fac
fac = fac + 1
return sorted(factors)[-1]
getfactors(num)
#IMPROVEMENTS
#all primes after 2,3 in the form 6n - 1 or 6n + 1
def getfactors(num):
factors = []
fac = 2
while num > 1:
if fac > 3:
if (fac + 1) % 6 == 0 or (fac - 1) % 6 == 0:
while num % fac == 0:
factors.append(fac)
num /= fac
else:
while num % fac == 0:
factors.append(fac)
num /= fac
fac = fac + 1
return sorted(factors)[-1]
getfactors(num)
|
25,207 | 5db1037d5c69c5b8ff44f7fc911c40079ce7df5d | from django.db import models
__author__ = 'aaraokar'
class AuthenticatedUser(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30, blank=True)
class Meta:
db_table = 'users'
class Notifications(models.Model):
header = models.CharField(max_length=150)
content = models.CharField(max_length=300)
image_url = models.URLField()
class Meta:
db_table = 'notifications'
class QueryNotificationMapping(models.Model):
query = models.TextField()
notification_id = models.ForeignKey(Notifications)
timestamp = models.DateTimeField()
status = models.BooleanField(default=False)
class Meta:
db_table = 'query_notification_mapping'
|
25,208 | 6e1eb4fbc3a226d2d1683f215fdefb7c1e95bbf3 | from tkinter import *
import pandas as pd
from typing import final
BACKGROUND_COLOR: final(str) = "#B1DDC6"
to_learn = {}
window = Tk()
window.config(bg=BACKGROUND_COLOR, padx=50, pady=50)
def translate():
global count
canvas.itemconfig(lang, text='English', fill='white')
canvas.itemconfig(word, text=words_csv.English[count], fill='white')
canvas.itemconfig(card_bg, image=english_bg)
timer = window.after(3000, translate)
count = 0
def create_csv():
df = pd.DataFrame(to_learn)
df.to_csv('data/words_to_learn.csv', index=False)
def read_csv():
global words_csv, to_learn
words_csv = pd.read_csv('data/words_to_learn.csv', index_col=False)
to_learn = words_csv.to_dict(orient='records')
try:
read_csv()
except FileNotFoundError:
read_csv()
create_csv()
words_csv = pd.read_csv('data/words_to_learn.csv')
french = words_csv.French[0]
english = words_csv.English[0]
def is_know():
to_learn.remove(to_learn[0])
create_csv()
word_count()
def word_count():
global count
count += 1
canvas.itemconfig(lang, text='French', fill='black')
canvas.itemconfig(word, fill='black')
canvas.itemconfig(card_bg, image=card_front_img)
french = words_csv.French[count]
canvas.itemconfig(word, text=french)
window.after(3000, translate)
canvas = Canvas(width=800, height=526, bg=BACKGROUND_COLOR, highlightthickness=0)
card_front_img = PhotoImage(file='images/card_front.png')
english_bg = PhotoImage(file='images/card_back.png')
card_bg = canvas.create_image(400, 270, image=card_front_img)
canvas.grid(row=0, column=0, columnspan=2)
lang = canvas.create_text(400, 150, text='French', font=('Arial', 40, 'italic'))
word = canvas.create_text(400, 263, text=french, font=('Arial', 60, 'bold'))
wrong_img = PhotoImage(file='images/wrong.png')
btn_wrong = Button(image=wrong_img, highlightthickness=0, command=word_count)
btn_wrong.grid(row=1, column=0, pady=10)
right_img = PhotoImage(file='images/right.png')
btn_right = Button(image=right_img, highlightthickness=0, command=is_know)
btn_right.grid(row=1, column=1, pady=10)
window.mainloop()
|
25,209 | 83649c8cd14c37b1494016663f2ccb5de13b492d | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'firt_app.settings')
import django
django.setup()
import random
from first_app.models import Topic, Webpage,AccessRecord
from faker import Faker
|
25,210 | 4b9cb0ad96cb786adfea515123331e31b4c720b8 | #scoreboard
#AusSport user defined functions
import os
import json
import datetime
from time import time, ctime, sleep
import pigpio as GPIO
import s_data
from array import array
#import pygame
#from pygame.mixer import Sound, get_init, pre_init
# class Note(Sound):
#
# def __init__(self, frequency, volume=.9):
# self.frequency = frequency
# Sound.__init__(self, self.build_samples())
# self.set_volume(volume)
#
# def build_samples(self):
# period = int(round(get_init()[0] / self.frequency))
# samples = array("h", [0] * period)
# amplitude = 2 ** (abs(get_init()[1]) - 1) - 1
# for time in range(period):
# if time < period / 2:
# samples[time] = amplitude
#
# print( samples)
# return samples
def log_it(logging, str1):
if logging:
with open('scorelog.txt','a') as s_log:
s_log.write("T: {0}: {1}".format(datetime.datetime.now(),str1))
s_log.write('\r\n')
def check_int(s):
s = str(s)
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()
def check_allint(str_to_check):
all_int = 1
if len(str_to_check) > 1: #must have some integers to check or just return true
#first char is always non-int so skipping it
for i in range(1,len(str_to_check)-1):
if check_int(str_to_check[i]) == False:
# print('Failed int check: {0}'.format(str_to_check[i]))
all_int = 0
break
else:
all_int = 1
return all_int
def get_ziku_row(Ziku, ziku_num, ziku_segs, dot):
d1 = []
for i in Ziku[ziku_num: ziku_num + ziku_segs]:
d1.append(i + dot)
return d1
def set_data_spi(p1, d_vals,vals_strt,vals_end,d_port,port_start, dot):
spistr = ""
ba = bytearray()
p_cnt = port_start
for i in d_vals[vals_strt:vals_end]:
ziku_num = int(i) * ziku_segs
if len(d_port) > p_cnt:
d_port[p_cnt] = get_ziku_row(Ziku, ziku_num, ziku_segs, dot)
#[Ziku[ziku_num], Ziku[ziku_num+1], Ziku[ziku_num+2], Ziku[ziku_num+3]]
else:
d_port.append(get_ziku_row(Ziku, ziku_num, ziku_segs, dot))
#[Ziku[ziku_num], Ziku[ziku_num+1], Ziku[ziku_num+2], Ziku[ziku_num+3]])
ba = ba + bytearray(d_port[p_cnt])
p_cnt = p_cnt + 1
by = bytes(ba)
spistr = "".join(str(by))
return spistr
def set_chars_spi(p1, d_vals,vals_strt,vals_end,d_port,port_start):
spistr = ""
ba = bytearray()
p_cnt = port_start
for i in d_vals[vals_strt:vals_end]:
bmp_num = int(i) * 16
if len(d_port) > p_cnt:
d_port[p_cnt] = [Bmp[bmp_num], Bmp[bmp_num+1], Bmp[bmp_num+2], Bmp[bmp_num+3]]
else:
d_port.append([Bmp[bmp_num], Bmp[bmp_num+1], Bmp[bmp_num+2], Bmp[bmp_num+3]])
ba = ba + bytearray(d_port[p_cnt])
p_cnt = p_cnt + 1
by = bytes(ba)
spistr = "".join(str(by))
return spistr
def send_digits_spi(p1, freq, chan, spi_flag, latch, b_str):
#NB Chip Enable lines on SPI are NOT used
#the latching is done via whatever GPIO is in latch
#this way flickering is eliminated from the display
h1 = p1.spi_open(chan, freq, spi_flag)
p1.spi_write(h1, b_str)
p1.spi_close(h1)
p1.write(latch,1)
p1.write(latch,0)
def send_i2c_digit_data(p1, s1):
i2c_settings = s1['i2c']['port_settings']
offset = s1['i2c']['offset']
for k,v in i2c_settings.items():
this_port = s_data.get_i2c_port(s1,k)
#send all the values for this port
for k, v in this_port.items():
val = int(v['val'])
p_val = v['i2c_port']
chan = i2c_settings[p_val]['chan']
addr = v['i2c'] + offset
if val == 32: #this means a blank digit
valreg = s1['i2c']['col_reg']
val = 0 #by setting colour to BLACK it should go off
else: #this means it is a valid value
valreg = s1['i2c']['val_reg']
# print(chan,addr,valreg,val)
# sleep(.1)
send_data_i2c(p1, chan, addr, valreg, val)
def send_i2c_colour_data(p1, s1):
i2c_settings = s1['i2c']['port_settings']
offset = s1['i2c']['offset']
colreg = s1['i2c']['col_reg']
for k,v in i2c_settings.items():
this_port = s_data.get_i2c_port(s1,k)
#send all the values for this port
for k, v in this_port.items():
p_val = v['i2c_port']
chan = i2c_settings[p_val]['chan']
addr = v['i2c'] + offset
colour = v['colour']
val = s1['i2c']['colours'][colour]
send_data_i2c(p1,chan, addr, colreg, val)
def send_i2c_bright_data(p1, s1):
i2c_settings = s1['i2c']['port_settings']
offset = int(s1['i2c']['offset'])
brightreg = int(s1['i2c']['bright_reg'])
bright = s1['board']['brightness']
for k,v in i2c_settings.items():
this_port = s_data.get_i2c_port(s1,k)
#send all the values for this port
for k, v in this_port.items():
p_val = v['i2c_port']
chan = i2c_settings[p_val]['chan']
addr = v['i2c'] + offset
send_data_i2c(p1, chan, addr, brightreg, bright)
def send_data_i2c(p1, chan1, addr1, reg1, val1):
h1 = p1.i2c_open(chan1, addr1)
GPIO.exceptions = False
try:
p1.i2c_write_byte_data(h1, reg1, val1)
except GPIO.error as error:
print(error)
GPIO.exceptions = False
p1.i2c_close(h1)
def sound_siren(p1, siren_time, siren_pin):
end_siren_time = time() + siren_time
p1.write(siren_pin, 1)
# pre_init(44100, -16, 1, 1024)
# pygame.init()
# Note(450).play(-1)
# Note(320).play(-1)
return end_siren_time
|
25,211 | a2f708ab247c3b80837b9698a93070afac9a4b00 | from django.contrib import admin
from .models import Cancion
class CancionAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("titulo",)}
admin.site.register(Cancion, CancionAdmin)
# Register your models here.
|
25,212 | 132aefd4fc7b4eaf3da0d4e9af45139170a86d49 | ## @package RSM.py
# This module is the python script pulling data from the DHT and sending to SQL server.
#
# usage: RSM.py <device_name> <SQL_ip_address>
#
# where device name is the string identifier for this RSM (defaults to mac ID if not provided)
# where SQL_ip_address is the IP of a SQL server to receive the atmospheric data (defaults to localhost if not provided)
#
# Module Dependencies:
# MySQLdb (sudo apt-get install mysql-server python-mysqldb)
# Adafruit_DHT (python -m pip install --user Adafruit_DHT)
#
# Hardcoded for the DHT11 input on pin GPIO4
#!/usr/bin/python
import sys
import MySQLdb
import uuid
import Adafruit_DHT # https://github.com/adafruit/Adafruit_Python_DHT.git
from datetime import datetime, timedelta
from time import sleep
# HANDLE COMMAND LINE ARGS (IF ANY)
# usage: 1st command line argument is the hostname, defaults to MACID
# 2nd command line argument is the target SQL server, defaults to localhost if none present
## @var _sens_name
# This is the hostname of the sensor module, stored in the SQL database with each environmental data point
if len(sys.argv) > 1:
sens_name = sys.argv[1]
else:
sens_name = uuid.getnode()
## @var _conn_IP
# This is the IP address of the SQL database
if len(sys.argv) > 2:
conn_IP = sys.argv[2]
else:
conn_IP = "127.0.0.1"
# BEGIN CONFIG
print 'Creating connection to SQL database on ', conn_IP, ' as device name ', sens_name, '\n'
record_frequency = timedelta(0,30,0) # 30 seconds
# db = MySQLdb.connect("192.168.43.126", "monitor", "monitor", "DRESS_ATMOSPHERIC")
# db = MySQLdb.connect("10.182.128.3", "monitor", "monitor", "DRESS_ATMOSPHERIC")
## @var _db
# this is the connection to the SQL database
db = MySQLdb.connect(conn_IP, "monitor", "monitor", "DRESS_ATMOSPHERIC")
print'Successfully connected to SQL server'
# END CONFIG
dbCursor = db.cursor()
next_record = datetime.now() + record_frequency
temperature_series = []
humidity_series = []
while True:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4)
if (humidity is None or temperature is None):
continue # sometimes the DHT11 will fail to read. Just skip that attempt.
humidity_series.append(humidity)
temperature_series.append(temperature)
if (datetime.now() < next_record):
continue # don't save to DB unless we have enough samples of data to average
try:
record_temperature = sum(temperature_series) / len(temperature_series)
record_humidity = sum(humidity_series) / len(humidity_series)
sql = "INSERT INTO DHT11 (datetime, sensor_number, temperature, humidity) VALUES(NOW(), %s, %s, %s)"
val = (sens_name, record_temperature, record_humidity)
dbCursor.execute(sql, val)
db.commit()
# Only clear series and wait a minute if the database call succeeded.
next_record += record_frequency
del temperature_series[:]
del humidity_series[:]
print '{0},{1:0.0f},{2:0.0f}'.format(datetime.now(), record_temperature, record_humidity)
except:
try:
db.rollback()
except:
db = MySQLdb.connect(conn_IP, "monitor", "monitor", "DRESS_ATMOSPHERIC")
dbCursor = db.cursor()
|
25,213 | 05677f7b8074295311b42d64db74023b17288fa5 | # Generated by Django 3.1.5 on 2021-05-17 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRS', '0006_facultyapplicant_cv'),
]
operations = [
migrations.AlterField(
model_name='facultyapplicant',
name='CV',
field=models.FileField(null=True, upload_to='facultyApplicant/'),
),
]
|
25,214 | c64db13f9ab12c8aea209b3afeb17aaaf9012832 | # Generated by Django 2.1.4 on 2018-12-26 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0013_userprofile'),
]
operations = [
migrations.AddField(
model_name='chatrequest',
name='request_type',
field=models.CharField(choices=[('Sales', 'Sales'), ('Services', 'Services'), ('Insurance', 'Insurance'), ('Other', 'Other')], default=0, max_length=10),
preserve_default=False,
),
]
|
25,215 | 141dc81b88f39fd94e69dacadc0c90279462cd87 | # -*- coding: utf-8 -*-
"""Module timeActivity
Object about the management of the data about the project and time
"""
from datetime import date
from datetime import datetime
from .personal_logging import PersonalLogging
class RowTime:
"""@overview: this class contains the single row of time file"""
def __init__(self, newActivity, newStart, newEnd):
self.activity = newActivity
self.start = newStart
self.end = newEnd
self.log = PersonalLogging()
self.log.debug ("Rowtime","init","start:{0}".format ( str ( self.start) ) )
self.log.debug ("Rowtime","init","end:{0}".format ( str ( self.end) ) )
def __repr__(self):
return "RowTime:{0}[{1}-{2}]".format(self.activity, self.start, self.end)
|
25,216 | 10c465c185af8a8bd9a1f36debc17410e27743e6 | from django.db import models
# Create your models here.
class User(models.Model):
name=models.CharField(max_length=10)
password=models.CharField(max_length=100)
isAdmin=models.BooleanField(default=False)
isAuthenticated=models.BooleanField(default=False)
createdttm=models.DateTimeField(auto_now_add=True)
updatedttm=models.DateTimeField(auto_now=True)
|
25,217 | 5de808d9ec10deb76005f546665872bb08ece74c | # Exercício 2
# Escreva um algoritmo que leia dois valores numéricos e que pergunte ao
# usuário qual operação ele deseja realizar: adição (+), subtração (-),
# multiplicação (*) ou divisão (/).Exiba na tela o resultado da operação
# desejada.
op = input('Qual operação deseja realizar?\n'
'\t1. adição (+)\n'
'\t2. subtração (-)\n'
'\t3. multiplicação (*)\n'
'\t4. divisão (/)\n')
if op == '+' or op == '-' or op == '*' or op == '/':
x = int(input('Digite um número inteiro: '))
y = int(input('Digite outro número inteiro: '))
if op == '+':
print(x + y)
elif op == '-':
print(x - y)
elif op == '*':
print(x * y)
elif op == '/':
print(x / y)
else:
print('Operação inválida')
print('Encerrando o programa...')
|
25,218 | 64b9c4d50fc1b5e45543c7d9696070f424804eab | # 1627. Graph Connectivity With Threshold
# Hard
# 57
# 9
# Add to List
# Share
# We have n cities labeled from 1 to n. Two different cities with labels x and y are directly connected by a bidirectional road if and only if x and y share a common divisor strictly greater than some threshold. More formally, cities with labels x and y have a road between them if there exists an integer z such that all of the following are true:
# x % z == 0,
# y % z == 0, and
# z > threshold.
# Given the two integers, n and threshold, and an array of queries, you must determine for each queries[i] = [ai, bi] if cities ai and bi are connected (i.e. there is some path between them).
# Return an array answer, where answer.length == queries.length and answer[i] is true if for the ith query, there is a path between ai and bi, or answer[i] is false if there is no path.
# Example 1:
# Input: n = 6, threshold = 2, queries = [[1,4],[2,5],[3,6]]
# Output: [false,false,true]
# Explanation: The divisors for each number:
# 1: 1
# 2: 1, 2
# 3: 1, 3
# 4: 1, 2, 4
# 5: 1, 5
# 6: 1, 2, 3, 6
# Using the underlined divisors above the threshold, only cities 3 and 6 share a common divisor, so they are the
# only ones directly connected. The result of each query:
# [1,4] 1 is not connected to 4
# [2,5] 2 is not connected to 5
# [3,6] 3 is connected to 6 through path 3--6
# Example 2:
# Input: n = 6, threshold = 0, queries = [[4,5],[3,4],[3,2],[2,6],[1,3]]
# Output: [true,true,true,true,true]
# Explanation: The divisors for each number are the same as the previous example. However, since the threshold is 0,
# all divisors can be used. Since all numbers share 1 as a divisor, all cities are connected.
# Example 3:
# Input: n = 5, threshold = 1, queries = [[4,5],[4,5],[3,2],[2,3],[3,4]]
# Output: [false,false,false,false,false]
# Explanation: Only cities 2 and 4 share a common divisor 2 which is strictly greater than the threshold 1, so they are the only ones directly connected.
# Please notice that there can be multiple queries for the same pair of nodes [x, y], and that the query [x, y] is equivalent to the query [y, x].
# Constraints:
# 2 <= n <= 104
# 0 <= threshold <= n
# 1 <= queries.length <= 105
# queries[i].length == 2
# 1 <= ai, bi <= cities
# ai != bi
# This approach does not work
# class Solution:
# def areConnected(self, n: int, threshold: int, queries: List[List[int]]) -> List[bool]:
# self.roots = [num for num in range(0, n+1)]
# # print(self.roots)
# # self.union(1,2)
# for num in self.roots:
# for i in range(1, num+1):
# if num % i == 0:
# self.union(num, i)
# def find(self,x):
# if self.roots[x] == x:
# return x
# self.roots[x] = self.find(self.roots[x])
# return self.roots[x]
# def union(self, x, y):
# x = self.find(x)
# y = self.find(y)
# if x != y:
# if x < y:
# self.roots[y] = x
# else:
# self.roots[x] = y
# This solution works !!!
'''
union find ! to union the numbers, start only from thresholds -> keep adding the same numbers to connect all its divisors
its a trick but when making divisors, dont actually do mod for all numbers but add numbers with for and while loops until
the number gets bigger than the original number
also its 1 indexed, just include 0 as well in making self.roots array to avoid index out of range error - we just dont use it
union all the multiples of num - start from cur = num+num so that we dont start with the same numbers
updating the roots array using index - not value
'''
class Solution:
def areConnected(self, n: int, threshold: int, queries: List[List[int]]) -> List[bool]:
self.roots = [num for num in range(n+1)]
for num in range(threshold+1, n+1):
# union all the multiples of num - start from cur = num+num so that we dont start with the same numbers
cur = num + num
while cur <= n:
self.union(num, cur)
cur += num
# updating the roots array using index
for num in range(n+1):
self.find(num)
ans = []
for a, b in queries:
ans.append(self.roots[a] == self.roots[b])
return ans
def find(self,x):
if self.roots[x] == x:
return x
self.roots[x] = self.find(self.roots[x])
return self.roots[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x != y:
if x < y:
self.roots[y] = x
else:
self.roots[x] = y
|
25,219 | d7d4e7253b3599726fcbf1b6f7a82f8bf6c87ba6 | #字典 ,学生管理系统
stu = {
"001":{
"name":"张三",
"hobby":"女生",
"address":"江夏区"
},
"002":{
"name":"李四",
"hobby":"唱歌",
"address":"江汉区"
},
"003":{
"name":"王五",
"hobby":"打球",
"address":"洪山区"
}
}
#遍历学生的所有key
# for x in stu.keys():
# print(x)
for x in stu.values(): # x 取得是 {"name":"张三","hobby":"女生","address":"江夏区" }
# print(x,type(x))
for y in x.keys():
print(y,x[y]) |
25,220 | 0a6add3eb381b681f829b9d840dde76e48242d07 | from django.shortcuts import render
from django.views.generic import TemplateView
from datetime import datetime
from .models import Curso, Tema, Video, Documento
class Inicioviews(TemplateView):
template_name = "cursos/index.html"
def get_context_data(self):
cursos = Curso.objects.all()
return {
'title': 'Inicio',
'year': datetime.now().year,
'cursos': cursos,
}
class Cursosviews(TemplateView):
template_name = 'cursos/courses.html'
def get_context_data(self):
cursos = Curso.objects.all()
return {
'title': 'Cursos',
'year': datetime.now().year,
'cursos': cursos,
}
class Temasviews(TemplateView):
template_name = "cursos/themes.html"
def get_context_data(self, **kwargs):
temas = Tema.objects.all()
return {'temas': temas}
class TemasCursoviews(TemplateView):
template_name = "cursos/themes.html"
def get_context_data(self, **kwargs):
curso_id = kwargs['curso_id']
curso_titulo = kwargs['curso_nombre']
temas = Tema.objects.filter(curso_id=curso_id)
return {'temas': temas, 'curso_titulo': curso_titulo}
class ClasesTemaviews(TemplateView):
template_name = "cursos/class.html"
def get_context_data(self, **kwargs):
tema_id = kwargs['tema_id']
tema_titulo = kwargs['tema_nombre']
videos = Video.objects.filter(tema_id=tema_id)
documentos = Documento.objects.filter(tema_id=tema_id)
return {'tema_titulo': tema_titulo, 'videos': videos, 'documentos': documentos}
|
25,221 | 33efa7b922f764a795aedd19c575fc3160fba0bb | import pybullet as p
import pybullet_data
import time
import os
import math as m
# Open GUI and set pybullet_data in the path
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
# Load plane contained in pybullet_data
planeId = p.loadURDF("plane.urdf")
# Set gravity for simulation
p.setGravity(0,0,-9.8)
# Add path to icub sdf models
dir_path = os.path.dirname(os.path.realpath(__file__))
for root, dirs, files in os.walk(os.path.dirname(dir_path)):
for file in files:
if file.endswith('.sdf'):
print (root+'/'+str(file))
p.setAdditionalSearchPath(root)
robotIds = p.loadSDF("../envs/icub_fixed_model.sdf")
icubId = robotIds[0]
# set constraint between base_link and world
cid = p.createConstraint(icubId,-1,-1,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],
[p.getBasePositionAndOrientation(icubId)[0][0],
p.getBasePositionAndOrientation(icubId)[0][1],
p.getBasePositionAndOrientation(icubId)[0][2]*1.2],
p.getBasePositionAndOrientation(icubId)[1])
##init_pos for standing
# without FT_sensors
init_pos = [0]*15 + [-29.4, 28.8, 0, 44.59, 0, 0, 0, 0.47, 0, 0, -29.4, 30.4, 0, 44.59, 0, 0, 0]
# with FT_sensors
#init_pos = [0]*19 + [-29.4, 28.8, 0, 0, 44.59, 0, 0, 0, 0.47, 0, 0, -29.4, 30.4, 0, 0, 44.59, 0, 0, 0]
# all set to zero
#init_pos = [0]*p.getNumJoints(icubId)
# add debug slider
jointIds=[]
paramIds=[]
joints_num = p.getNumJoints(icubId)
for j in range (joints_num):
info = p.getJointInfo(icubId,j)
jointName = info[1]
jointType = info[2]
jointIds.append(j)
paramIds.append(p.addUserDebugParameter(jointName.decode("utf-8"), info[8], info[9], init_pos[j]/180*m.pi))
while True:
for i in range(joints_num):
p.setJointMotorControl2(icubId, i, p.POSITION_CONTROL,
targetPosition=p.readUserDebugParameter(i),
targetVelocity=0.0, positionGain=0.25, velocityGain=0.75, force=50)
p.stepSimulation()
time.sleep(0.01)
|
25,222 | 1e073c4db23d30835b7230b4b2f0b8ab87c912d2 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for remote_process."""
import os
from unittest import mock
from pyfakefs import fake_filesystem_unittest
from clusterfuzz._internal.bot.untrusted_runner import config
from clusterfuzz._internal.bot.untrusted_runner import file_host
from clusterfuzz._internal.protos import untrusted_runner_pb2
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
class FileHostTest(fake_filesystem_unittest.TestCase):
"""FileHost tests."""
def setUp(self):
test_helpers.patch(self, [
'clusterfuzz._internal.bot.untrusted_runner.host.stub',
])
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
def test_create_directory(self):
"""Test file_host.create_directory."""
result = untrusted_runner_pb2.CreateDirectoryResponse(result=True)
self.mock.stub().CreateDirectory.return_value = result
self.assertTrue(file_host.create_directory('/path', True))
result = untrusted_runner_pb2.CreateDirectoryResponse(result=False)
self.mock.stub().CreateDirectory.return_value = result
self.assertFalse(file_host.create_directory('/path', True))
def test_remove_directory(self):
"""Test file_host.remove_directory."""
result = untrusted_runner_pb2.RemoveDirectoryResponse(result=True)
self.mock.stub().RemoveDirectory.return_value = result
self.assertTrue(file_host.remove_directory('/path', True))
result = untrusted_runner_pb2.RemoveDirectoryResponse(result=False)
self.mock.stub().RemoveDirectory.return_value = result
self.assertFalse(file_host.remove_directory('/path', True))
def test_copy_file_to_worker(self):
"""Test file_host.copy_file_to_worker."""
contents = (b'A' * config.FILE_TRANSFER_CHUNK_SIZE +
b'B' * config.FILE_TRANSFER_CHUNK_SIZE +
b'C' * config.FILE_TRANSFER_CHUNK_SIZE)
self.fs.create_file('/file', contents=contents)
def mock_copy_file_to(iterator, metadata):
"""Mock copy file to."""
chunks = [chunk.data for chunk in iterator]
self.assertEqual(3, len(chunks))
self.assertEqual([('path-bin', b'/file')], metadata)
data = b''.join(chunks)
self.assertEqual(data, contents)
return untrusted_runner_pb2.CopyFileToResponse(result=True)
self.mock.stub().CopyFileTo.side_effect = mock_copy_file_to
self.assertTrue(file_host.copy_file_to_worker('/file', '/file'))
def test_write_data_to_worker(self):
"""Test file_host.write_data_to_worker."""
contents = (b'A' * config.FILE_TRANSFER_CHUNK_SIZE +
b'B' * config.FILE_TRANSFER_CHUNK_SIZE +
b'C' * config.FILE_TRANSFER_CHUNK_SIZE)
result = untrusted_runner_pb2.CopyFileToResponse(result=True)
self.mock.stub().CopyFileTo.return_value = result
self.assertTrue(file_host.write_data_to_worker(contents, '/file'))
call_args = self.mock.stub().CopyFileTo.call_args
self.assertEqual(call_args[1], {'metadata': [('path-bin', b'/file')]})
chunks = [chunk.data for chunk in call_args[0][0]]
self.assertEqual(len(chunks), 3)
data = b''.join(chunks)
self.assertEqual(data, contents)
def test_copy_file_from_worker(self):
"""Test file_host.copy_file_from_worker."""
mock_response = mock.MagicMock()
mock_response.trailing_metadata.return_value = (('result', 'ok'),)
mock_response.__iter__.return_value = iter([
untrusted_runner_pb2.FileChunk(data=b'A'),
untrusted_runner_pb2.FileChunk(data=b'B'),
untrusted_runner_pb2.FileChunk(data=b'C'),
])
self.mock.stub().CopyFileFrom.return_value = mock_response
self.assertTrue(file_host.copy_file_from_worker('/file', '/file'))
with open('/file') as f:
self.assertEqual(f.read(), 'ABC')
def test_copy_file_from_worker_failure(self):
"""Test file_host.copy_file_from_worker (failure)."""
mock_response = mock.MagicMock()
mock_response.trailing_metadata.return_value = (('result', 'invalid-path'),)
self.mock.stub().CopyFileFrom.return_value = mock_response
self.assertFalse(file_host.copy_file_from_worker('/file', '/file'))
self.assertFalse(os.path.exists('/file'))
def test_stat(self):
"""Test file_host.stat."""
result = untrusted_runner_pb2.StatResponse(
result=True, st_mode=0, st_size=1, st_atime=2, st_mtime=3, st_ctime=4)
self.mock.stub().Stat.return_value = result
self.assertEqual(result, file_host.stat('/path'))
def test_stat_error(self):
"""Test file_host.stat error."""
result = untrusted_runner_pb2.StatResponse(
result=False, st_mode=0, st_size=1, st_atime=2, st_mtime=3, st_ctime=4)
self.mock.stub().Stat.return_value = result
self.assertIsNone(file_host.stat('/path'))
@mock.patch(
'clusterfuzz._internal.bot.untrusted_runner.file_host.remove_directory')
@mock.patch(
'clusterfuzz._internal.bot.untrusted_runner.file_host.copy_file_to_worker'
)
def test_copy_directory_to_worker(self, mock_copy_file_to_worker,
mock_remove_directory):
"""Test file_host.copy_directory_to_worker."""
mock_copy_file_to_worker.return_value = True
self.fs.create_file('/host/dir/file1')
self.fs.create_file('/host/dir/file2')
self.fs.create_file('/host/dir/dir2/file3')
self.fs.create_file('/host/dir/dir2/file4')
self.fs.create_file('/host/dir/dir2/dir3/file5')
self.assertTrue(
file_host.copy_directory_to_worker('/host/dir', '/worker/copied_dir'))
mock_copy_file_to_worker.assert_has_calls(
[
mock.call('/host/dir/file1', '/worker/copied_dir/file1'),
mock.call('/host/dir/file2', '/worker/copied_dir/file2'),
mock.call('/host/dir/dir2/file3', '/worker/copied_dir/dir2/file3'),
mock.call('/host/dir/dir2/file4', '/worker/copied_dir/dir2/file4'),
mock.call('/host/dir/dir2/dir3/file5',
'/worker/copied_dir/dir2/dir3/file5'),
],
any_order=True)
self.assertTrue(
file_host.copy_directory_to_worker(
'/host/dir', '/worker/copied_dir', replace=True))
mock_copy_file_to_worker.assert_has_calls(
[
mock.call('/host/dir/file1', '/worker/copied_dir/file1'),
mock.call('/host/dir/file2', '/worker/copied_dir/file2'),
mock.call('/host/dir/dir2/file3', '/worker/copied_dir/dir2/file3'),
mock.call('/host/dir/dir2/file4', '/worker/copied_dir/dir2/file4'),
mock.call('/host/dir/dir2/dir3/file5',
'/worker/copied_dir/dir2/dir3/file5'),
],
any_order=True)
mock_remove_directory.assert_called_with(
'/worker/copied_dir', recreate=True)
mock_copy_file_to_worker.return_value = False
self.assertFalse(
file_host.copy_directory_to_worker('/host/dir', '/worker/copied_dir2'))
@mock.patch('clusterfuzz._internal.bot.untrusted_runner.file_host.list_files')
@mock.patch(
'clusterfuzz._internal.bot.untrusted_runner.file_host.copy_file_from_worker'
)
def test_copy_directory_from_worker(self, mock_copy_file_from_worker,
mock_list_files):
"""Test file_host.copy_directory_from_worker."""
mock_copy_file_from_worker.return_value = True
mock_list_files.return_value = [
'/worker/abc',
'/worker/def',
'/worker/dir/ghi',
]
self.assertTrue(file_host.copy_directory_from_worker('/worker', '/host'))
mock_copy_file_from_worker.assert_has_calls(
[
mock.call('/worker/abc', '/host/abc'),
mock.call('/worker/def', '/host/def'),
mock.call('/worker/dir/ghi', '/host/dir/ghi'),
],
any_order=True)
mock_list_files.return_value = [
'/escape',
]
self.assertFalse(file_host.copy_directory_from_worker('/worker', '/host'))
mock_list_files.return_value = [
'/worker/../escape',
]
self.assertFalse(file_host.copy_directory_from_worker('/worker', '/host'))
mock_list_files.return_value = [
'../escape',
]
self.assertFalse(file_host.copy_directory_from_worker('/worker', '/host'))
def test_get_cf_worker_path(self):
"""Test get worker path."""
os.environ['WORKER_ROOT_DIR'] = '/worker'
local_path = os.path.join(os.environ['ROOT_DIR'], 'a', 'b', 'c')
self.assertEqual(
file_host.rebase_to_worker_root(local_path), '/worker/a/b/c')
local_path = os.environ['ROOT_DIR']
self.assertEqual(file_host.rebase_to_worker_root(local_path), '/worker')
def test_get_cf_host_path(self):
"""Test get host path."""
os.environ['ROOT_DIR'] = '/host'
os.environ['WORKER_ROOT_DIR'] = '/worker'
worker_path = os.path.join(os.environ['WORKER_ROOT_DIR'], 'a', 'b', 'c')
self.assertEqual(file_host.rebase_to_host_root(worker_path), '/host/a/b/c')
worker_path = os.environ['WORKER_ROOT_DIR']
self.assertEqual(file_host.rebase_to_host_root(worker_path), '/host')
|
25,223 | e766d345a82b33dc42232d75c9efb2944d887af1 | # 1. Генерується список випадкових цілих чисел. Визначається, скільки в ньому парних чисел, а скільки непарних.
# 2. Вихідний список містить позитивні і негативні числа. Потрібно позитивні помістити в один список, а негативні - в інший.
# 3. Дан список цілих чисел. Замінити негативні на -1, позитивні - на число 1, нуль залишити без змін.
# 4. Вводиться нормалізований текст, який крім слів може містити певні знаки пунктуації. Програма будує список слів, знаки пунктуації виключаються.
# Під нормалізованим текстом будемо розуміти текст, в якому пробіл ставиться після знаків пунктуації, за винятком відкриває дужки (пробіл перед нею).
str = input("Write down or insert some text:\n")
punctuation = ['.',',',':',';','!','?','(',')']
for i in punctuation:
str = str.replace(i, " ")
wordList = str.split()
print(wordList)
|
25,224 | ffa2217f0c8cb82240887e25725ca48b80671638 | from time import sleep
import json
import unittest
from selenium.webdriver.common.action_chains import ActionChains
import pandas as pd
import xlsxwriter as xw
import os
class Config:
@staticmethod
def sava_to_excel(self, fileName, data):
data = list(data)
job_name = []
address = []
pay = []
edu = []
hrname = []
company = []
company_size = []
skills = []
welfare = []
for i in range(len(data)):
for j in range(len(data[i])):
job_name.append(data[i][j]["job_name"])
print("job_name %s " % job_name)
address.append(data[i][j]["address"])
pay.append(data[i][j]["pay"])
edu.append(data[i][j]["edu"])
hrname.append(data[i][j]["hrname"])
company.append(data[i][j]["company"])
company_size.append(data[i][j]["company_size"])
skills.append(data[i][j]["skills"])
welfare.append(data[i][j]["welfare"])
dfData = {
'職位名稱': job_name,
'工作地址': address,
'薪資範圍': pay,
'教育程度': edu,
'HR名字': hrname,
'公司名稱': company,
'公司規模': company_size,
'所需技能': skills,
'福利待遇': welfare,
}
if os.path.exists(fileName):
print("文件已存在,追加數據開始!")
df = pd.read_excel(fileName)
print(df)
ds = pd.DataFrame(df)
df = df.append(ds, ignore_index=True)
df.to_excel(fileName, index=False)
print("文件已存在,追加數據結束!")
else:
print("文件不存在,創建文件!")
df = pd.DataFrame(dfData) # 创建DataFrame
df.to_excel(fileName, sheet_name='職位搜索表', index=False)
print("文件存入結束,保存成功!")
@staticmethod
def save_excel(self, fileName, data):
workbook = xw.Workbook(fileName) # 创建工作簿
worksheet1 = workbook.add_worksheet("sheet1") # 创建子表
worksheet1.activate() # 激活表
title = ['職位名稱', '工作地址', '薪資範圍', '教育程度',
'HR名字', '公司名稱', '公司規模', '所需技能', '福利待遇'] # 设置表头
worksheet1.write_row('A1', title) # 从A1单元格开始写入表头
i = 2 # 从第二行开始写入数据
for j in range(len(data)):
insertData = [data[j]["job_name"], data[j]["address"], data[j]["pay"], data[j]["edu"]
, data[j]["hrname"], data[j]["company"], data[j]["company_size"], data[j]["skills"], data[j]["welfare"]]
row = 'A' + str(i)
worksheet1.write_row(row, insertData)
i += 1
workbook.close() # 关闭表
@staticmethod
def get_cookie(self, filePath, driver):
print("开始获取Cookie,等待20s --> ")
sleep(20)
# get cookies
with open(filePath, 'w') as cookief:
cookief.write(json.dumps(driver.get_cookies()))
print("获取 Cookie 结束!并写入文件 %s " % filePath)
@staticmethod
def add_cookie(self, driver, filepath):
driver.delete_all_cookies()
with open(filepath, 'r') as cookief:
cookieslist = json.load(cookief)
for cookie in cookieslist:
if isinstance(cookie.get('expiry'), float):
cookie['expiry'] = int(cookie['expiry'])
driver.add_cookie(cookie)
driver.refresh()
|
25,225 | acb45275aeeb664923bc4a5c15f7151a447c99fb | # -*- coding: utf-8 -*-
"""
Created on Tue May 31 14:38:43 2016
@author: wu34
"""
from scipy.stats import ttest_ind
import sleepAnalysis
#dd_low,dd_high,dd_diff = sleepAnalysis.supportDict()
a = [0.71,0.62,0.55,0.55,0.54,0.51,0.5,0.48,0.48,0.48,0.48,0.46,0.45,0.45,0.45,0.44,0.43,0.38,0.32]
b = [0.93,0.85,0.5,0.5,0.5,0.5,0.5,0.5,0.49,0.5,0.5,0.49,0.5,0.5,0.5,0.49,0.49,0.23,0.12]
c = [0.51,0.51,0.58,0.58,0.5,0.55,0.51,0.52,0.49,0.49,0.55,0.51,0.6,0.6,0.57,0.52,0.51,0.57,0.59]
#b = []
#for i in dd_low.keys():
## a.append(dd_low[i][0])
# b.append(dd_low[i][1])
## c.append(dd_low[i][2])
#a = [0.84,0.73]
#b = [0.84,0.76]
#c = [0.84,0.74]
t_stat1, p_val1 = ttest_ind(a, b)
t_stat2, p_val2 = ttest_ind(a, c)
t_stat3, p_val3 = ttest_ind(b, c)
#d = [0.29,0.38,0.45,0.45,0.46,0.49,0.5,0.52,0.52,0.52,0.52,0.54,0.55,0.55,0.55,0.56,0.57,0.62,0.68]
#e = [0.07,0.15,0.5,0.5,0.5,0.5,0.5,0.5,0.51,0.5,0.5,0.51,0.5,0.5,0.5,0.51,0.51,0.77,0.88]
#f = [0.49,0.49,0.42,0.42,0.5,0.45,0.49,0.48,0.51,0.51,0.45,0.49,0.4,0.4,0.43,0.48,0.49,0.43,0.41]
#
##e = []
##for i in dd_high.keys():
### d.append(dd_high[i][0])
## e.append(dd_high[i][1])
### f.append(dd_high[i][2])
#
#
#t_stat4, p_val4 = ttest_ind(d, e)
#t_stat5, p_val5 = ttest_ind(d, f)
#t_stat6, p_val6 = ttest_ind(e, f)
|
25,226 | 7a0658006fbc9d1b7bcf75cb8cfcf2d601da745c | import concurrent.futures as cf
def possible(v, coins):
if v == 0:
return True
for i, c in enumerate(coins):
if c <= v and possible(v - c, coins[:i] + coins[i+1:]):
return True
return False
def calc(C, D, V, coins):
res = 0
for v in range(1, V+1):
if not possible(v, coins):
coins.append(v)
res += 1
return res
def main():
T = int(input())
results = []
with cf.ProcessPoolExecutor(max_workers=8) as executor:
for _ in range(T):
C, D, V = [int(x) for x in input().split()]
coins = [int(x) for x in input().split()]
results.append(executor.submit(calc, C, D, V, coins))
for cs, result in enumerate(results):
print('Case #{}: {}'.format(cs + 1, result.result()))
if __name__ == '__main__':
main()
|
25,227 | c695edb47ac88e67c640b933bf9a4187b9631af5 | # Generated by Django 3.1.7 on 2021-04-17 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0015_auto_20210416_1719'),
]
operations = [
migrations.AddField(
model_name='order',
name='is_user_verified',
field=models.BooleanField(default=False),
),
]
|
25,228 | 74a61edfb52e0ecf58108317c09f5c3db43876f1 | # from yarpiz.pso import PSO
# import yarpiz.pso as yp
import argparse
import numpy as np
import pandapower as pp
from pprint import pprint
import matplotlib.pyplot as plt
from pandapower.networks import case14, case_ieee30, case118
import time
from lib import yarpiz_custom_pso as yp
import lib.fpor_tools as fpor
parser = argparse.ArgumentParser(description='Particle Swarm Optimization')
parser.add_argument('-nb', '--num_bus', type=int, help="Bus number", default=14)
parser.add_argument('-r', '--runs', type=int, help="Number of runs", default=1)
parser.add_argument('-p', '--plot', action='store_true', help="Plot the results")
args = parser.parse_args()
global net, net_params
net = {
14:case14,
30:case_ieee30,
118:case118
# Change the number below to select the case.
}[args.num_bus]()
print('\nIEEE System {} bus\n'.format(args.num_bus))
print(net)
net_params = fpor.network_set(net)
global nb, ng, nt, ns
nb = net_params['n_bus']
ng = net_params['n_gen']
nt = net_params['n_taps']
ns = net_params['n_shunt']
pp.runpp(net, algorithm = 'nr', numba = True)
pso_params = {
'MaxIter': 100,
'PopSize': 25,
'c1': 1.5,
'c2': 2,
'w': 1,
'wdamp': 0.995
}
test_params = {
'Runs': args.runs,
'lambda_volt': 1e4,
'lambda_tap': 1e4,
'lambda_shunt': 1e10,
'volt_threshold':1e-10,
'tap_threshold': 1e-10,
'shunt_threshold':1e-08
}
# lambda -> Multiplies discrete penalties
global lambd_volt, lambd_tap, lambd_shunt, tap_thr, sh_thr
lambd_volt = test_params['lambda_volt']
lambd_tap = test_params['lambda_tap']
lambd_shunt = test_params['lambda_shunt']
volt_thr = test_params['volt_threshold']
tap_thr = test_params['tap_threshold']
sh_thr = test_params['shunt_threshold']
def fitness_function(x):
# TBD Description
x = fpor.run_power_flow(x, net, net_params, ng, nt, ns)
# fopt and boundaries penalty
f, pen_v = fpor.fopt_and_penalty(net, net_params,n_threshold=volt_thr)
tap_pen = fpor.senoidal_penalty_taps(x, net_params, n_threshold=tap_thr)
shunt_pen = fpor.polinomial_penalty_shunts(x, net_params, n_threshold=sh_thr)
return f + lambd_volt*pen_v + lambd_tap*tap_pen + lambd_shunt*shunt_pen
upper_bounds, lower_bounds = fpor.get_upper_and_lower_bounds_from_net(net, net_params)
n_var = fpor.get_variables_number_from_net(net, net_params)
problem = {
'CostFunction': fitness_function,
'nVar': n_var,
'VarMin': lower_bounds,
'VarMax': upper_bounds
}
conv_plot = []
results = []
for run in range(1,test_params['Runs']+1):
print('Run No {} out of {}'.format(run,test_params['Runs']))
start = time.time()
gbest, pop, convergence_points = yp.PSO(problem, **pso_params)
elapsed = round(time.time() - start, 2)
print('Run No {} results:'.format(run))
results.append(\
fpor.debug_fitness_function(gbest['position'],net,net_params,test_params,elapsed))
if args.plot:
conv_plot.append(convergence_points)
fopt_values = [results[i]['f'] for i in range(len(results))]
ind = np.argmin(fopt_values)
best_result = results[ind]
print("\nFinal results of best run: (Run {})".format(ind+1))
pprint(best_result, sort_dicts=False)
print("\nStatistics:")
pprint(fpor.get_results_statistics(fopt_values))
print("\nTest Parameters:")
pprint(test_params)
print("\nPSO Parameters:")
pprint(pso_params)
if args.plot:
fpor.plot_results(nb, best_result, voltage_plot=True)
fpor.plot_results(nb, best_result, voltage_plot=False)
fpor.plot_convergence(nb, conv_plot[ind])
|
25,229 | 9b21afecb452136109b02bfbcd6254dd54c4e4bd | #/usr/bin/env python
# -*- coding: utf-8 -*
"auto create git repo"
import os,sys
import gitlab
WORKDIR = os.path.split(os.path.realpath(__file__))[0]
PROJECTFILE = os.path.join(WORKDIR, 'project.list')
USER_HOME = os.environ.get('HOME')
def main():
fp = open(PROJECTFILE, 'r')
lines = fp.readlines()
fp.close()
## login
gl = gitlab.Gitlab.from_config('somewhere', ['{}/.python-gitlab.cfg'.format(USER_HOME)])
for line in lines:
line = line.strip('\n')
repo_url = line.split(',')[0]
descri = line.split(',')[1]
user_name = line.split(',')[2]
create_repo(repo_url, descri, user_name)
def create_repo(repo_url, descri, user_name):
## data
parent_group_name = repo_url.split('/')[3]
sub_group_name = repo_url.split('/')[4]
project_name = repo_url.split('/')[5].replace('.git','')
print (parent_group_name,sub_group_name,project_name)
descri = project_name
## login
gl = gitlab.Gitlab.from_config('somewhere', ['{}/.python-gitlab.cfg'.format(USER_HOME)])
user_id = gl.users.list(search=user_name)[0].id
group = gl.groups.get(parent_group_name) # 获取parent组对象
## 查找子组
subgroup_id = group.subgroups.list(search=sub_group_name)
if subgroup_id:
print ("%s subgroup found: %s" % (sub_group_name, subgroup_id))
subgroup_id_value = subgroup_id[0].id
subgroup = gl.groups.get(subgroup_id_value)
else:
print ("%s subgroup not exist" % sub_group_name)
sys.exit(0) #子组不存在退出
# 查找项目,不存在创建
project = subgroup.projects.list(search=project_name)
if project:
print ("[INFO]%s project found: %s" % (project_name, project))
else:
print ("%s project not exist,create it" % project_name)
new_project = gl.projects.create({'name': project_name, 'namespace_id': subgroup_id_value, 'description': descri}) # 创建项目
member = new_project.members.create({'user_id': user_id, 'access_level':
gitlab.MASTER_ACCESS})
if __name__ == '__main__':
main()
|
25,230 | 8b673679bb7e7bb9ae4df6da5f904af5317a8663 | import pandas as pd
import numpy as np
from scipy import stats
from sklearn.preprocessing import StandardScaler
def categorize(df, columns, values,remove=True, ordered=True):
'''
Convert categories into ordered numerical and removes old features if remove==True
'''
new_df = pd.DataFrame()
for c, v in zip(columns, values):
new_df[f'{c}_cat'] = pd.Categorical(df[c], ordered = ordered, categories = v).codes
if remove:
df.drop([c],axis=1,inplace=True )
return pd.concat([df , new_df], axis=1, sort=False )
def logit(df, columns, remove=True):
'''
Convert columns to logarithmics and remove old features
'''
new_df = pd.DataFrame()
for e in columns:
name = e + '_log'
new_df[name] = df[e].apply(lambda x: np.log1p(x))
if remove:
df_no_col = df.drop(columns, axis=1).reset_index(drop=True)
return pd.concat([df_no_col, new_df], axis=1, sort=False)
return pd.concat([df,new_df],axis=1, sort=False)
def remove_outliers(df, columns, threshold=3):
'''
Remove rows with outliers according to the z-score. Threshold is 3 by default
'''
for col in columns:
z = np.abs(stats.zscore(df[col]))
for i,e in enumerate(z):
if e > threshold:
df.drop(axis=0, index=i, inplace=True)
df.reset_index()
return df.reset_index()
def standardize(df, columns, remove=True):
scaler = StandardScaler()
scale_feat = scaler.fit_transform(df[columns])
new_df = pd.DataFrame(scale_feat, columns=[c+'_st' for c in columns])
if remove:
df.drop(columns, axis=1, inplace=True)
return pd.concat([df, new_df], axis=1, sort=False)
|
25,231 | 2c7d40670f33014adc4feb1050d1f3eedf8e4067 | '''
main
'''
from sill_system.skill_manager import *
print_skill_manager() |
25,232 | 40452ec2c7c7c80cb2e7c2f50f7ada6e0cf15cd8 | from RedBot import *
train(RLBot1, seed=21)
train(RLBot2, seed=21)
test()
|
25,233 | 17ccab8c5160598142e4c3e0c5240b2b28a4e18a | from ._column_transformer import columnTransformer
|
25,234 | 192a1955032efa3dabb77e9a49864dd7132874ae | from sys import stdin
from collections import Counter
if __name__ == '__main__':
test_cases_n = int(stdin.readline())
for i in range(0, test_cases_n):
answer1 = int(stdin.readline())
row1 = []
for j in range(0, 4):
s = stdin.readline().split(' ')
if j + 1 == answer1:
row1 = set([int(c) for c in s])
answer2 = int(stdin.readline())
row2 = []
for j in range(0, 4):
s = stdin.readline().split(' ')
if j + 1 == answer2:
row2 = set([int(c) for c in s])
solution = list((Counter(row1) & Counter(row2)).elements())
if len(solution) == 1:
print('Case #{}: {}'.format(i + 1, solution[0]))
elif len(solution) > 1:
print('Case #{}: Bad magician!'.format(i + 1))
else:
print('Case #{}: Volunteer cheated!'.format(i + 1)) |
25,235 | 53bf81b381c2d7e5c000fc757f9c6d7a13dae090 | #!/usr/bin/env python
# coding: utf-8
# # Feature engineering
#
# In this notebook i want try hand-crafting some features that could help to create a model. I want to see what creative ideas i can come up with - and if they indeed seem to work.
# In[16]:
import pandas as pd
df = pd.read_csv('../input/train.csv')
# In[17]:
df.head()
# Below i'm adding features to the dataset that are computed from the comment text. Some i've seen in discussions for this competition, others i came up with while looking at the data. Right now, they are:
#
# * Length of the comment - my initial assumption is that angry people write short messages
# * Number of capitals - observation was many toxic comments being ALL CAPS
# * Proportion of capitals - see previous
# * Number of exclamation marks - i observed several toxic comments with multiple exclamation marks
# * Number of question marks - assumption that angry people might not use question marks
# * Number of punctuation symbols - assumption that angry people might not use punctuation
# * Number of symbols - assumtion that words like f*ck or $#* or sh*t mean more symbols in foul language (Thx for tip!)
# * Number of words - angry people might write short messages?
# * Number of unique words - observation that angry comments are sometimes repeated many times
# * Proportion of unique words - see previous
# * Number of (happy) smilies - Angry people wouldn't use happy smilies, right?
# In[18]:
df['total_length'] = df['comment_text'].apply(len)
df['capitals'] = df['comment_text'].apply(lambda comment: sum(1 for c in comment if c.isupper()))
df['caps_vs_length'] = df.apply(lambda row: float(row['capitals'])/float(row['total_length']),
axis=1)
df['num_exclamation_marks'] = df['comment_text'].apply(lambda comment: comment.count('!'))
df['num_question_marks'] = df['comment_text'].apply(lambda comment: comment.count('?'))
df['num_punctuation'] = df['comment_text'].apply(
lambda comment: sum(comment.count(w) for w in '.,;:'))
df['num_symbols'] = df['comment_text'].apply(
lambda comment: sum(comment.count(w) for w in '*&$%'))
df['num_words'] = df['comment_text'].apply(lambda comment: len(comment.split()))
df['num_unique_words'] = df['comment_text'].apply(
lambda comment: len(set(w for w in comment.split())))
df['words_vs_unique'] = df['num_unique_words'] / df['num_words']
df['num_smilies'] = df['comment_text'].apply(
lambda comment: sum(comment.count(w) for w in (':-)', ':)', ';-)', ';)')))
# Let's inspect data - did this work?
# In[19]:
df.head()
# Now we'll calculation correlation between the added features and the to-be-predicted columns, this should be an indication of whether a model could use these features:
# In[20]:
features = ('total_length', 'capitals', 'caps_vs_length', 'num_exclamation_marks',
'num_question_marks', 'num_punctuation', 'num_words', 'num_unique_words',
'words_vs_unique', 'num_smilies', 'num_symbols')
columns = ('toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate')
rows = [{c:df[f].corr(df[c]) for c in columns} for f in features]
df_correlations = pd.DataFrame(rows, index=features)
# Let's output the data:
# In[21]:
df_correlations
# I'll also output the data as a heatmap - that's slightly easier to read.
# In[22]:
import seaborn as sns
ax = sns.heatmap(df_correlations, vmin=-0.2, vmax=0.2, center=0.0)
# So, what have we learned? Some of the feature ideas i had make sense: They correlate with the to-be-predicted data, so a model should be able to use them. Other feature ideas don't correlate - so they look less promising.
#
# For now these feature seem the best candidates:
# * Proportion of capitals
# * Number of unique words
# * Number of exclamation marks
# * Number of punctuations
#
# Hope this could be usefull to someone! If you have more (feature) ideas or feedback - please comment, then i can add them here.
#
|
25,236 | 9f08d813c7f2d05cedc06cd41c608ac804109621 | from django.shortcuts import render
from .models import Song
from django.http import HttpResponse
# Create your views here.
def song_list(request):
songs = Song.objects.all()
return render(request, 'songbook/song_list.html', {'songbook':songs})
def song_detail(request, slug):
# return HttpResponse(slug)
ASong = Song.objects.get(slug=slug)
return render(request, 'songbook/song_detail.html', {'songbook':ASong})
|
25,237 | 1b33d7239685772867f3d3a881286a15ad2a1c67 |
# coding: utf-8
# In[1]:
import pandas as pd
def get_training_data(smol=0.01, seed=1337):
h1b_data = pd.read_csv('data.csv')
desired_cols = ['JOB_TITLE',
'EMPLOYER_NAME',
'WORKSITE_STATE',
'WORKSITE_CITY',
'WAGE_RATE_OF_PAY_FROM']
a = h1b_data[desired_cols]
a['WAGE_RATE_OF_PAY_FROM'] = a['WAGE_RATE_OF_PAY_FROM'].apply(lambda x: int(x.replace(',','')[:-3]))
b = a[~(a['WAGE_RATE_OF_PAY_FROM'] < 10000)]
b = b.dropna()
c = b.copy()
TECHIES = set(['SOFTWARE', 'PROGRAMMER', 'DEVELOPER', 'ENGINEER'])
from numpy.random import rand
is_a_techie = lambda job_title: any([word in TECHIES for word in job_title.upper().strip().split()]) and rand() <= 0.95
c['HENRY'] = c['JOB_TITLE'].map(is_a_techie)
return c.sample(frac=smol, replace=False, random_state=1337)
train = get_training_data(0.001)
print(len(train))
train.head()
# In[2]:
train['JOB_TITLE'].describe()
# In[3]:
from gensim.models import Word2Vec
# In[48]:
h1b_data = pd.read_csv('data.csv')
desired_cols = ['JOB_TITLE',
'EMPLOYER_NAME',
'WORKSITE_STATE',
'WORKSITE_CITY',
'WAGE_RATE_OF_PAY_FROM']
a = h1b_data[desired_cols]
# In[151]:
# a['JOB_TITLE']
# a['WORKSITE_CITY'].describe()
job_freqs = a[['JOB_TITLE', 'WORKSITE_CITY']].groupby('WORKSITE_CITY').count()
# print(job_freqs)
# In[161]:
top_96 = job_freqs[job_freqs['JOB_TITLE']>1000].sort_values('JOB_TITLE', ascending=False)
# In[174]:
pa_jobs = a[a['JOB_TITLE']=='BUSINESS ANALYST'][['JOB_TITLE', 'WORKSITE_CITY']].groupby('WORKSITE_CITY').count()
pa_jobs['WORKSITE_CITY'] = pa_jobs.index
top_96['WORKSITE_CITY'] = top_96.index
print(len(a[a['JOB_TITLE']=='BUSINESS ANALYST']))
pa_jobs.head()
# In[173]:
pd.merge(top_96, pa_jobs, how='left', on='WORKSITE_CITY')
# In[77]:
sentences=map(str, a['JOB_TITLE'].tolist())
print(list(sentences)[:10])
# model = Word2Vec(sentences=sentences)
model = Word2Vec(zip(sentences, ["sentence"]*len(list(sentences))), size=2, min_count=0)
# model.build_vocab(sentences=sentences, keep_raw_vocab=True)
model.wv.vocab
# In[137]:
import gensim
# sentences = ['ASSOCIATE DATA INTEGRATION', 'SENIOR ASSOCIATE', '.NET SOFTWARE PROGRAMMER', 'PROJECT MANAGER', 'ASSOCIATE - ESOTERIC ASSET BACKED SECURITIES', 'CREDIT RISK METRICS SPECIALIST', 'BUSINESS SYSTEMS ANALYST', 'PROGRAMMER ANALYST', 'PROGRAMMER ANALYST', 'PROGRAMMER ANALYST']
sentences = list(map(lambda x: [str(x)], a['JOB_TITLE'].tolist()))
sentence_tokens = list(map(lambda x: str(x).replace(',','').split(), a['JOB_TITLE'].tolist()))
print(sentences[:5])
print(sentence_tokens[:5])
# train word2vec on the two sentences
# model = gensim.models.Word2Vec(sentences, min_count=1)
# model = gensim.models.Word2Vec(zip(sentences, ["sentence"]*len(list(sentences))), size=2, min_count=0, window=1, sg=0, negative=4)
# model = gensim.models.Word2Vec(sentences, size=2, min_count=0, window=1, sg=0, negative=4)
model = gensim.models.Word2Vec(min_count=0, window=1, sg=0)
model.build_vocab(sentence_tokens)
# model.build_vocab(sentences)
model.train(sentences, total_examples=len(sentences), epochs=5)
# model = gensim.models.KeyedVectors.load_word2vec_format("GoogleNews-vectors-negative300.bin", binary=True)
# In[138]:
print(model.wv['ENGINEER'])
# print(dir(model))
model.predict_output_word(['VP'])
# print(model.wv['VP,'])
# print(model.wv['VP, SENIOR PREPARER OF FINANCIAL STATEMENTS'])
# model.most_similar('SOFTWARE DEVELOPER')
# In[18]:
model.save('job2vec.model')
# In[19]:
model = Word2Vec.load('job2vec.model')
# In[35]:
print(model)
print(dir(model))
# print(model.vocab.keys())
# model.scan_vocab(['engineer'])
print(model.raw_vocab)
# model.wv['ENGINEER']
|
25,238 | cf7f542f09354afcdc99b3124c5e2ef17aee737a | import os
file1 = raw_input('enter the filename: ')
with open(file1) as fobj1:
with open('temp.txt', 'w') as fobj2:
for i in fobj1:
if len(i) > 80:
num = list(i)
count = len(num) / 80
for i in range(count):
fobj2.write("".join(num[:79]))
fobj2.write('\n')
num = num[79:]
else:
fobj2.write(i)
fobj2.write('\n')
with open('temp.txt') as fobj2:
with open(file1, 'w') as fobj1:
for i in fobj2:
fobj1.write(i)
#os.remove('temp.txt')
fobj1.close()
fobj2.close() |
25,239 | 9908ba2238856a90c0a083b5eea9613925066232 | from django import forms
from .models import WorkTime, WorkPlace
class CreateWorkTime(forms.ModelForm):
class Meta:
model = WorkTime
fields = ('date_start', 'date_end')
class ChangeStatusForm(forms.ModelForm):
class Meta:
model = WorkPlace
fields = []
|
25,240 | 1e9f1549b6b103b295c6a8ef1b9dbadb94b92e85 | # Реализовать функцию str_cap(), принимающую слово из маленьких латинских букв и возвращающую его же, но с прописной
# первой буквой. Например, print(str_cap(‘text’)) -> Text.
# Продолжить работу над заданием. В программу должна попадать строка из слов, разделенных пробелом. Каждое слово состоит
# из латинских букв в нижнем регистре. Сделать вывод исходной строки, но каждое слово должно начинаться с заглавной
# буквы. Необходимо использовать написанную ранее функцию str_cap().
def str_cap(word):
""" (string) -> string
Return capitalized word.
>>> str_cap('example')
'Example'
>>> str_cap('9')
'9'
>>> str_cap(True)
Ошибочный аргумент.
"""
try:
return word.capitalize()
except AttributeError:
print('Ошибочный аргумент.')
return None
sentence = input('Введите предложение: ')
newSentence = ''
for word in sentence.split():
newSentence += str_cap(word) + ' '
print(newSentence.strip())
|
25,241 | 4d4ea32891167ee6ece031e19e987ed876198c14 | # adapted from:
# https://www.kaggle.com/mpearmain/homesite-quote-conversion/xgboost-benchmark/code
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_auc_score
seed = 12345
train = 'train.csv'
test = 'test.csv'
print 'Reading data...'
train = pd.read_csv(train)
test = pd.read_csv(test)
def format_data(d):
d.drop('QuoteNumber', axis=1, inplace=True)
# create date features
d['Date'] = pd.to_datetime(pd.Series(d['Original_Quote_Date']))
d['Year'] = d['Date'].dt.year
d['Month'] = d['Date'].dt.month
d['DayOfWeek'] = d['Date'].dt.dayofweek
d.drop('Original_Quote_Date', axis=1, inplace=True)
d.drop('Date', axis=1, inplace=True)
# fill NaN
d = d.fillna(-1)
return d
print 'Formatting data...'
y = np.array(train['QuoteConversion_Flag'])
train.drop('QuoteConversion_Flag', axis=1, inplace=True)
train = format_data(train)
submission = test[['QuoteNumber']]
test = format_data(test)
print 'Creating features...'
features = train.columns
# convert categorical features to numeric
for f in features:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
# create train/eval
train_X, eval_X, train_y, eval_y = train_test_split(train, y, test_size=.05)
dtrain = xgb.DMatrix(train_X, train_y)
deval = xgb.DMatrix(eval_X, eval_y)
watchlist = [(dtrain, 'train'), (deval, 'eval')]
params = {"objective": "binary:logistic",
"booster" : "gbtree",
"eta": 0.08,
"max_depth": 13,
"subsample": 0.7,
"colsample_bytree": 0.7,
"eval_metric": "auc",
"silent": 1
}
rounds = 1600
print 'Training model...'
gbm = xgb.train(params, dtrain, rounds, evals=watchlist, early_stopping_rounds=50, verbose_eval=True)
preds = gbm.predict(deval)
score = roc_auc_score(eval_y, preds)
print 'Evaluation set AUC: {0}'.format(score)
print 'Making submission...'
dtest = xgb.DMatrix(test)
submission_preds = gbm.predict(dtest)
submission['QuoteConversion_Flag'] = submission_preds
submission.to_csv('xgb_submission0005.csv', index=False)
# XGB feature importances
#xgb.plot_importance(gbm)
#mpl.pyplot.savefig('foo.png')
x = pd.Series(gbm.get_fscore())
x.to_csv('feature_score5.csv') |
25,242 | d7c577e7a4155bc2dff8285991e9d7f9b60976a6 | import numpy as np
x = 'baz'
y = 'fbar'
def build_edit_table(x, y):
n, m = len(x), len(y)
D = np.zeros((n + 1, m + 1))
# base cases
for i in range(n + 1):
D[i,0] = i
for j in range(m + 1):
D[0,j] = j
# recursion
for i in range(1, n + 1):
for j in range(1, m + 1):
D[i,j] = min(
D[i - 1, j - 1] + int(x[i - 1] != y[j - 1]),
D[i, j - 1] + 1,
D[i - 1, j] + 1
)
return D
def edit_dist(x, y):
D = build_edit_table(x, y)
n, m = len(x), len(y)
return D[n,m]
def backtrack_(D, x, y, i, j, path):
if i == 0:
path.extend('D' * j)
return
if j == 0:
path.extend('I' * i)
return
left = D[i, j - 1] + 1
diag = D[i - 1, j - 1] + int(x[i - 1] != y[j - 1])
up = D[i - 1, j] + 1
dist = left
op = 'D'
if diag < dist:
op = 'X' if x[i - 1] != y[j - 1] else '='
dist = diag
if up < dist:
op = 'I'
path.append(op)
if op == 'D':
backtrack_(D, x, y, i, j - 1, path)
if op in ('=','X'):
backtrack_(D, x, y, i - 1, j - 1, path)
if op == 'I':
backtrack_(D, x, y, i - 1, j, path)
def backtrack(D, x, y):
n, m = len(x), len(y)
path = []
backtrack_(D, x, y, n, m, path)
path.reverse()
return ''.join(path)
D = build_edit_table(x, y)
print(backtrack(D, x, y))
|
25,243 | e09ffde51a1185dbfd215b89a54476b0e435a00e | from builtins import str
from builtins import object
import re
from w20e.forms.registry import Registry
# Expression for variable subsitution in labels and hints
VAREXP = re.compile('\$\{[^\}]+\}')
def cache(func):
def get_renderer(self, renderableType, rendererType):
key = "%s::%s" % (renderableType, rendererType)
renderer = self._v_registry.get(key, None)
if renderer is None:
renderer = func(self, renderableType, rendererType)
self._v_registry[key] = renderer
return renderer
return get_renderer
class BaseRenderer(object):
def __init__(self, **kwargs):
""" Initialize renderer, given global options """
self.opts = {}
self.opts.update(kwargs)
self._v_registry = {}
@cache
def getRendererForType(self, renderableType, rendererType):
clazz = Registry.get_renderer(renderableType, rendererType)
return clazz()
def getType(self, renderable):
""" Return the renderable's type (or class) """
if hasattr(renderable, 'type'):
return renderable.type
return renderable.__class__.__name__
def createFormatMap(self, form, renderable, **extras):
""" Create a dict out of the renderable's properties """
fmtmap = renderable.__dict__.copy()
fmtmap.update(extras)
def replaceVars(match):
try:
var = match.group()[2:-1]
if var and var.endswith(":lexical"):
var = var[:-len(":lexical")]
value = form.getFieldValue(var, lexical=True) or ''
else:
value = form.getFieldValue(var) or ''
if not isinstance(value, str):
if not hasattr(value, "decode"):
value = str(value)
value = value.decode('utf-8')
return value
except:
return match.group()
# process labels and hints
if 'label' in fmtmap and fmtmap['label'] != None:
fmtmap['label'] = VAREXP.sub(replaceVars, fmtmap['label'])
if 'hint' in fmtmap and fmtmap['hint'] != None:
fmtmap['hint'] = VAREXP.sub(replaceVars, fmtmap['hint'])
if 'text' in fmtmap and fmtmap['text'] != None:
fmtmap['text'] = VAREXP.sub(replaceVars, fmtmap['text'])
if 'placeholder' in fmtmap and fmtmap['placeholder'] != None:
fmtmap['placeholder'] = VAREXP.sub(replaceVars,
fmtmap['placeholder'])
# defaults
extra_classes = {'relevant': True, 'required': False,
'readonly': False, 'error': False}
# Let's see whether we got properties here...
try:
if hasattr(renderable, 'bind') and renderable.bind:
# Requiredness
if form.model.isRequired(renderable.bind, form.data):
extra_classes["required"] = True
if not form.model.isRelevant(renderable.bind, form.data):
extra_classes["relevant"] = False
# Read only
if form.model.isReadonly(renderable.bind, form.data):
extra_classes["readonly"] = True
elif hasattr(renderable, 'getRenderables') and \
callable(renderable.getRenderables):
# Group relevance
if not form.model.isGroupRelevant(renderable, form.data):
extra_classes["relevant"] = False
except:
pass
if extras.get("errors", None) and \
hasattr(renderable, 'bind') and renderable.bind and \
extras['errors'].get(renderable.bind, None):
extra_classes['error'] = True
if getattr(renderable, 'alert', ''):
fmtmap['alert'] = renderable.alert
else:
fmtmap['alert'] = "; ".join(extras['errors'][renderable.bind])
else:
fmtmap['alert'] = ''
if "extra_classes" in fmtmap:
fmtmap['extra_classes'] = " ".join([fmtmap['extra_classes']] + \
[key for key in
list(extra_classes.keys())
if extra_classes[key]])
else:
fmtmap['extra_classes'] = " ".join([key for key in
list(extra_classes.keys()) if
extra_classes[key]])
fmtmap['type'] = self.getType(renderable)
return fmtmap
|
25,244 | 034795cb6cc2b144854299d7491a6c01aa1efbad | from django.contrib import admin
from publish.models import *
# Register your models here.
class Oauth2IntegrationAdmin(admin.ModelAdmin):
pass
admin.site.register(OAuth2Integration, Oauth2IntegrationAdmin)
admin.site.register(UserToPublishGroup)
admin.site.register(PublishGroup) |
25,245 | b1c9157b5f0089067073af1a87a348c2cb5e4444 | import numpy as np
import pandas as pd
def data_construction():
train = pd.read_json("./Data/train.json")
test = pd.read_json("./Data/test.json")
X_band_1=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_1"]])
X_band_2=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in train["band_2"]])
X_train = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis],((X_band_1+X_band_2)/2)[:, :, :, np.newaxis]], axis=-1)
X_band_test_1=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in test["band_1"]])
X_band_test_2=np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in test["band_2"]])
X_test = np.concatenate([X_band_test_1[:, :, :, np.newaxis]
, X_band_test_2[:, :, :, np.newaxis]
, ((X_band_test_1+X_band_test_2)/2)[:, :, :, np.newaxis]], axis=-1)
return train,test,X_train,X_test |
25,246 | a63021125d284f227686842e5e4e7789fb168be8 | from django.shortcuts import render
# Create your views here.
# Wendy Griffin
def post_list(request):
return render(request, 'blog/post_list.html', {})
|
25,247 | fc656788dd3795f30dfeb6a4817afbdc76483395 | '''
codes/utilities/decorators.py
'''
### packages
import time
### sys relative to root dir
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
### absolute imports wrt root
from codes.utilities.custom_logging import ezLogging
def stopwatch_decorator(func):
'''
decorator to help with logging how long it takes to finish a method
'''
def inner(*args, **kwargs):
start_clock = time.time()
output = func(*args, **kwargs)
end_clock = time.time()
ezLogging.info("Stopwatch - %s took %.2f seconds" % (func.__name__, end_clock-start_clock))
return output
return inner |
25,248 | 4179ce20a80295b1f0eced4a0846d7cd95ddd414 | #!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of points that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
cleaned_data = []
from math import pow as mt
import numpy as np
residual_error = predictions-net_worths
squares=[]
squares=pow(residual_error,2)
max=np.sort(squares,axis=None)[-9]
j=0
for i in predictions:
if pow(predictions[j]-net_worths[j],2) <max:
item = (ages[j], net_worths[j],residual_error[j])
cleaned_data.append(item)
j+=1
print 'Length of cleaned data={}'.format(len(cleaned_data))
### your code goes here
return cleaned_data
|
25,249 | 37e90b1d1df8883d07e23b9f608961d3cd0fbe70 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 22:37:17 2020
@author: 54963
"""
import numpy as np
import pandas as pd
import plotly as py
import plotly.express as px
import plotly.graph_objects as go
import matplotlib.pyplot as plt
#from plotly.offline import iplot, plot, init_notebook_mode
# 加载数据
flight = pd.read_csv('Flights dataset.csv')
# timesData相关信息
#flight.info()
flight.head()
fig = px.parallel_categories(flight)
fig.show() |
25,250 | 955017fe39ead7e727abc86138c6679f3e138cbb | import time
def fiveprod(num):
top=0
for i in range(987):
product=1
for j in range(13):
product*=int(num[i+j])
if product>top:
top=product
return top
def main():
start=time.time()
num='7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450'
answer=fiveprod(num)
elapsed=time.time()-start
print answer
print 'Completed in {elapsed} seconds'.format(elapsed=elapsed)
return True
main() |
25,251 | 60ef69ff8d01b5c10627d52b779cea5c40d64d7c | import numpy as np
import keras.backend as K
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, LearningRateScheduler, ModelCheckpoint, \
ReduceLROnPlateau, TensorBoard
from keras.datasets import cifar10, mnist, fashion_mnist
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from models_ibp import SmallCNN, MediumCNN, LargeCNN, LargeCNN_2, \
ScheduleHyperParamCallback, ConstantSchedule, \
InterpolateSchedule, ibp_loss
import math
import argparse
from pathlib import Path
from datetime import datetime
import json
#######################
# Parse configuration #
#######################
parser = argparse.ArgumentParser()
def add_bool_arg(parser, name, default=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--" + name, dest=name, action="store_true")
group.add_argument("--no_" + name, dest=name, action="store_false")
parser.set_defaults(**{name:default})
parser.add_argument("model_name", choices=["SmallCNN", "MediumCNN", "LargeCNN", "LargeCNN_2"])
parser.add_argument("dataset", choices=["MNIST", "CIFAR10", "FASHION_MNIST"])
parser.add_argument("eval_epsilon", type=float)
parser.add_argument("train_epsilon", type=float)
# Model config
parser.add_argument("--num_classes", type=int, default=10)
parser.add_argument("--load_weights_from", type=Path)
add_bool_arg(parser, "elide_final_layer", default=False)
# Training
add_bool_arg(parser, "augmentation", default=False)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--initial_epoch", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--lr_schedule", type=str)
parser.add_argument("--k_warmup", type=int, default=0)
parser.add_argument("--k_rampup", type=int, default=20)
parser.add_argument("--epsilon_warmup", type=int, default=0)
parser.add_argument("--epsilon_rampup", type=int, default=20)
parser.add_argument("--min_k", type=float, default=0.5)
parser.add_argument("--validation_size", type=int, default=5000)
parser.add_argument("--set_gpu", type=int)
# Callbacks
add_bool_arg(parser, "early_stop")
parser.add_argument("--early_stop_patience", type=int, default=30)
add_bool_arg(parser, "lr_reduce")
parser.add_argument("--lr_reduce_patience", type=int, default=10)
parser.add_argument("--lr_reduce_factor", type=float, default=math.sqrt(0.1))
parser.add_argument("--lr_reduce_min", type=float, default=1e-6)
config = parser.parse_args()
######################
# Initialise dataset #
######################
if config.dataset == "CIFAR10":
(x_train, y_train), _ = cifar10.load_data()
elif config.dataset == "MNIST":
(x_train, y_train), _ = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
elif config.dataset == "FASHION_MNIST":
(x_train, y_train), _ = fashion_mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1)
else:
raise ValueError("Unrecognised dataset")
# Leave aside a validation set
x_valid = x_train[-config.validation_size:].astype("float32") / 255
y_valid = to_categorical(y_train[-config.validation_size:], num_classes=10)
x_train = x_train[:-config.validation_size].astype("float32") / 255
y_train = to_categorical(y_train[:-config.validation_size], num_classes=10)
# Input image dimensions
input_shape = x_train.shape[1:]
####################
# Initialise model #
####################
# Restrict GPU memory usage
if config.set_gpu is not None:
conf = tf.ConfigProto()
conf.gpu_options.allow_growth = True
conf.gpu_options.visible_device_list = str(config.set_gpu)
sess = tf.Session(config=conf)
set_session(sess)
del config.set_gpu
eps_train_var = K.variable(config.train_epsilon)
eps = K.in_train_phase(K.stop_gradient(eps_train_var), K.constant(config.eval_epsilon))
k_train_var = K.variable(1)
k = K.in_train_phase(K.stop_gradient(k_train_var), K.constant(config.min_k))
if config.augmentation:
mean, std = x_train.mean(axis=(0, 1, 2)), x_train.std(axis=(0, 1, 2)) + 1e-6
x_train = (x_train - mean) / std
x_valid = (x_valid - mean) / std
print("Normalising channels with values", mean, std)
else:
mean, std = None, None
if config.model_name == "SmallCNN":
model = SmallCNN(input_shape=input_shape)
elif config.model_name == "MediumCNN":
model = MediumCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN":
model = LargeCNN(input_shape=input_shape)
elif config.model_name == "LargeCNN_2":
model = LargeCNN_2(input_shape=input_shape)
else:
raise ValueError("Unrecognised model")
def loss(y_true, y_pred):
return ibp_loss(y_true, y_pred, model, eps, k, mean=mean, std=std, elision=config.elide_final_layer)
def robust_acc(y_true, y_pred):
return model.robust_accuracy
if config.load_weights_from is not None:
model.load_weights(config.load_weights_from)
metrics = ["accuracy", robust_acc]
model.compile(loss=loss, optimizer=Adam(lr=config.lr), metrics=metrics)
model.summary()
##################
# Setup training #
##################
# Prepare model model saving directory
model_type = config.model_name
elision = "elide" if config.elide_final_layer else "no_elide"
model_name = "IBP_%s_%s_train_%.3f_eval_%.3f_%s" % (config.dataset, model_type, config.train_epsilon, config.eval_epsilon, elision)
if not config.load_weights_from:
save_dir = Path("saved_models") / model_name / datetime.now().strftime("%b%d_%H-%M-%S")
if not save_dir.exists():
save_dir.mkdir(parents=True)
else:
save_dir = config.load_weights_from.parent
file_path = save_dir / "weights_{epoch:03d}_{val_robust_acc:.3f}.h5"
# Save config to json
with open(str(save_dir / ("config_%d.json" % config.initial_epoch)), "w") as fp:
json.dump(vars(config), fp, sort_keys=True, indent=4)
# Set up training callbacks
checkpoint = ModelCheckpoint(filepath=str(file_path),
monitor="val_robust_acc",
period=10,
verbose=1)
tensor_board = TensorBoard(log_dir=save_dir,
histogram_freq=0,
batch_size=config.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
update_freq=5000)
tensor_board.samples_seen = config.initial_epoch * len(x_train)
tensor_board.samples_seen_at_last_write = config.initial_epoch * len(x_train)
callbacks = [checkpoint, tensor_board]
if config.lr_schedule is not None:
chunks = config.lr_schedule.split(",")
schedule = [(float(lr), int(epoch)) for (lr, epoch) in [c.split("@") for c in chunks]]
def scheduler(epoch, current_lr):
lr = config.lr
for (rate, e) in schedule:
if epoch >= e:
lr = rate
else:
break
return lr
callbacks.insert(0, LearningRateScheduler(scheduler, verbose=1))
if config.lr_reduce:
callbacks.insert(0, ReduceLROnPlateau(monitor="val_loss",
factor=config.lr_reduce_factor,
cooldown=0,
patience=config.lr_reduce_patience,
min_lr=config.lr_reduce_min,
verbose=1))
if config.early_stop:
callbacks.insert(0, EarlyStopping(monitor="val_loss",
patience=config.early_stop_patience,
verbose=1))
if config.epsilon_rampup > 0:
start = config.epsilon_warmup * len(x_train)
end = start + config.epsilon_rampup * len(x_train)
eps_schedule = InterpolateSchedule(0, config.train_epsilon, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="epsilon",
variable=eps_train_var,
schedule=eps_schedule,
update_every=1000,
verbose=0))
if config.k_rampup > 0:
start = config.k_warmup * len(x_train)
end = start + config.k_rampup * len(x_train)
k_schedule = InterpolateSchedule(1, config.min_k, start, end)
callbacks.insert(0, ScheduleHyperParamCallback(name="k",
variable=k_train_var,
schedule=k_schedule,
update_every=1000,
verbose=0))
# Run training, with or without data augmentation.
if not config.augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
validation_data=(x_valid, y_valid),
epochs=config.epochs,
initial_epoch=config.initial_epoch,
batch_size=config.batch_size,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
shift = 4 if config.dataset == "CIFAR10" else 2
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# randomly rotate images in the range (deg 0 to 30)
# rotation_range=30,
# randomly shift images horizontally
width_shift_range=shift,
# randomly shift images vertically
height_shift_range=shift,
# set mode for filling points outside the input boundaries
fill_mode="constant" if config.dataset == "CIFAR10" else "nearest",
cval=0,
# randomly flip images
horizontal_flip=True)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train, batch_size=config.batch_size),
validation_data=(x_valid, y_valid), steps_per_epoch=(len(x_train) / config.batch_size),
epochs=config.epochs, initial_epoch=config.initial_epoch,
verbose=1, workers=4, callbacks=callbacks)
|
25,252 | 22ce4a54977889aaed694bcb2a221f01e93ad96e | def lengthOfBigWord(sent):
#1 use a split function to get a list of words
words = sent.split(" ")
#2 use a loop to compare word sizes
largest_word = words[0]
for word in words:
if len(word) > len(largest_word):
largest_word = word
#3 return largest word
return print(largest_word)
sentence = "We'll start with an overview of how machine learning models work and how they are used. This may feel basic if you've done statistical modeling or machine learning before. Don't worry, we will progress to building powerful models soon."
lengthOfBigWord(sentence) |
25,253 | 8611e86a5cd7f63d5b1e9822d4e62081e1b734e6 | _3do = {'id':25, 'name':'3DO', 'shortcode':'3do','alias':'3do' }
_amiga = {'id':4911, 'name':'Amiga', 'shortcode':'amiga' ,'alias':'amiga' }
_amstrad = {'id':4914, 'name':'Amstrad CPC', 'shortcode':'amstrad' ,'alias':'amstrad-cpc' }
_android = {'id':4916, 'name':'Android', 'shortcode':'android' ,'alias':'android' }
_mame4all = {'id':23, 'name':'Arcade', 'shortcode':'MAME4ALL' ,'alias':'arcade' }
_advmame = {'id':23, 'name':'Arcade', 'shortcode':'AdvMAME' ,'alias':'arcade' }
_2600 = {'id':22, 'name':'Atari 2600' , 'shortcode':'Atari 2600' ,'alias':'atari-2600' }
_5200 = {'id':26, 'name':'Atari 5200', 'shortcode':'Atari 5200' ,'alias':'atari-5200' }
_7800 = {'id':27, 'name':'Atari 7800', 'shortcode':'Atari 7800' ,'alias':'atari-7800' }
_fba = {'id':24, 'name':'Arcade', 'shortcode':'Final Burn' ,'alias':'fba' }
_jaguar = {'id':28, 'name':'Atari Jaguar', 'shortcode':'Atari Jaguar' ,'alias':'atari-jaguar' }
_jaguar_cd = {'id':29, 'name':'Atari Jaguar CD', 'shortcode':'Atari Jaguar CD' ,'alias':'atari-jaguar-cd' }
_lynx = {'id':4924, 'name':'Atari Lynx', 'shortcode':'Atari Lynx' ,'alias':'atari-lynx' }
_xe = {'id':30, 'name':'Atari XE', 'shortcode':'Atari XE' ,'alias':'atari-xe' }
_colecovision = {'id':31, 'name':'Colecovision', 'shortcode':'Colecovision' ,'alias':'colecovision' }
_commodore64 = {'id':40, 'name':'Commodore 64', 'shortcode':'Commodore 64' ,'alias':'commodore-64' }
_intellivision = {'id':32, 'name':'Intellivision', 'shortcode':'Intellivision' ,'alias':'intellivision' }
_ios = {'id':4915, 'name':'iOS', 'shortcode':'iOS' ,'alias':'ios' }
_mac = {'id':37, 'name':'Mac OS', 'shortcode':'Mac' ,'alias':'mac-os' }
_xbox = {'id':14, 'name':'Microsoft Xbox', 'shortcode':'xbox' ,'alias':'microsoft-xbox' }
_360 = {'id':15, 'name':'Microsoft Xbox 360', 'shortcode':'xbox 360' ,'alias':'microsoft-xbox-360' }
_xb1 = {'id':4920, 'name':'Microsoft Xbox One', 'shortcode':'xbox one' ,'alias':'microsoft-xbox-one' }
_neogeo_pocket = {'id':4922, 'name':'Neo Geo Pocket', 'shortcode':'Neo Geo pocket' ,'alias':'neo-geo-pocket' }
_neogeo_pocket_color = {'id':4923, 'name':'Neo Geo Pocket Color', 'shortcode':'Neo Geo Pocket Color' ,'alias':'neo-geo-pocket-color' }
_neogeo = {'id':24, 'name':'NeoGeo', 'shortcode':'Neo Geo' ,'alias':'neogeo' }
_3ds = {'id':4912, 'name':'Nintendo 3DS', 'shortcode':'Nintendo 3DS' ,'alias':'nintendo-3ds' }
_n64 = {'id':3, 'name':'Nintendo 64', 'shortcode':'Nintendo 64' ,'alias':'nintendo-64' }
_ds = {'id':8, 'name':'Nintendo DS', 'shortcode':'Nintendo DS' ,'alias':'nintendo-ds' }
_nes = {'id':7, 'name':'Nintendo Entertainment System (NES)', 'shortcode':'NES' ,'alias':'nintendo-entertainment-system-nes' }
_gameboy = {'id':4, 'name':'Nintendo Game Boy', 'shortcode':'Gameboy' ,'alias':'nintendo-gameboy' }
_gba = {'id':5, 'name':'Nintendo Game Boy Advance', 'shortcode':'Gameboy Advance' ,'alias':'nintendo-gameboy-advance' }
_gbc = {'id':41, 'name':'Nintendo Game Boy Color', 'shortcode':'GBC' ,'alias':'nintendo-gameboy-color' }
_gamecube = {'id':2, 'name':'Nintendo GameCube', 'shortcode':'GC' ,'alias':'nintendo-gamecube' }
_nvb = {'id':4918, 'name':'Nintendo Virtual Boy', 'shortcode':'Virtual Boy' ,'alias':'nintendo-virtual-boy' }
_wii = {'id':9, 'name':'Nintendo Wii', 'shortcode':'Wii' ,'alias':'nintendo-wii' }
_wii_u = {'id':38, 'name':'Nintendo Wii U', 'shortcode':'Wii U' ,'alias':'nintendo-wii-u' }
_ouya = {'id':4921, 'name':'Ouya', 'shortcode':'Ouya' ,'alias':'ouya' }
_pc = {'id':1, 'name':'PC', 'shortcode':'Computer' ,'alias':'pc' }
_phillips = {'id':4917, 'name':'Philips CD-i', 'shortcode':'Phillips CD-i' ,'alias':'philips-cd-i' }
_32x = {'id':33, 'name':'Sega 32X', 'shortcode':'32x' ,'alias':'sega-32x' }
_sega_cd = {'id':21, 'name':'Sega CD', 'shortcode':'Sega CD' ,'alias':'sega-cd' }
_dreamcast = {'id':16, 'name':'Sega Dreamcast', 'shortcode':'Dreamcast' ,'alias':'sega-dreamcast' }
_game_gear = {'id':20, 'name':'Sega Game Gear', 'shortcode':'Game Gear' ,'alias':'sega-game-gear' }
_genesis = {'id':18, 'name':'Sega Genesis', 'shortcode':'Genesis' ,'alias':'sega-genesis' }
_master_system = {'id':35, 'name':'Sega Master System', 'shortcode':'Sega Master' ,'alias':'sega-master-system' }
_mega_drive = {'id':36, 'name':'Sega Mega Drive', 'shortcode':'Mega Drive' ,'alias':'sega-mega-drive' }
_saturn = {'id':17, 'name':'Sega Saturn', 'shortcode':'Saturn' ,'alias':'sega-saturn' }
_sinclair = {'id':4913, 'name':'Sinclair ZX Spectrum', 'shortcode':'ZX Spectrum' ,'alias':'sinclair-zx-spectrum' }
_ps1 = {'id':10, 'name':'Sony Playstation', 'shortcode':'Playstation 1' ,'alias':'sony-playstation' }
_ps2 = {'id':11, 'name':'Sony Playstation 2', 'shortcode':'PS2' ,'alias':'sony-playstation-2' }
_ps3 = {'id':12, 'name':'Sony Playstation 3', 'shortcode':'PS3' ,'alias':'sony-playstation-3' }
_ps4 = {'id':4919, 'name':'Sony Playstation 4', 'shortcode':'PS4' ,'alias':'sony-playstation-4' }
_vita = {'id':39, 'name':'Sony Playstation Vita', 'shortcode':'Vita' ,'alias':'sony-playstation-vita' }
_psp = {'id':13, 'name':'Sony PSP', 'shortcode':'PSP' ,'alias':'sony-psp' }
_snes = {'id':6, 'name':'Super Nintendo (SNES)', 'shortcode':'SNES' ,'alias':'super-nintendo-snes' }
_turbografx = {'id':34, 'name':'TurboGrafx 16', 'shortcode':'Turbo Graphix 16' ,'alias':'turbografx-16' }
_swan = {'id':4925, 'name':'WonderSwan', 'shortcode':'wonderswan' ,'alias':'wonderswan' }
_swan_color = {'id':4926, 'name': 'WonderSwan Color', 'shortcode':'Wonderswan Color' ,'alias':'wonderswan-color' }
_scummvm = {'id': 99999, 'name': 'Scumm VM', 'shortcode': 'scummvm', 'alias': 'scumm-vm' }
full_list = [_3do, _amiga,_amstrad, _android, _mame4all, _advmame, _2600,
_5200, _7800, _fba, _jaguar, _jaguar_cd, _lynx, _xe, _colecovision,
_commodore64, _intellivision, _ios, _mac, _xbox, _360, _xb1,
_neogeo_pocket, _neogeo_pocket_color, _neogeo, _3ds, _n64,
_ds, _nes, _gameboy, _gba, _gbc, _gamecube, _nvb, _wii,
_wii_u, _ouya, _pc, _phillips, _32x, _sega_cd, _dreamcast,
_game_gear, _genesis, _master_system, _mega_drive, _saturn,
_sinclair, _ps1, _ps2, _ps3, _ps4, _vita, _psp, _snes, _turbografx,
_swan, _swan_color] |
25,254 | 2369790b956efe0902ae39785448d1d22f9ee11c | from flask import Response
import jsonpickle
def api_response(result):
response = Response(jsonpickle.encode(result, unpicklable=False))
response.headers['Content-Type'] = 'application/json'
return response |
25,255 | 63b10b2417f1b1e0c81dc890c52a0f5ca3538161 |
from cyres import *
import numpy as np
ff = FirstOrderFunction(1, lambda x: x[0]**8, lambda x: np.array([8*x[0]**7], dtype=np.float64))
print(ff.evaluate(np.array([2], dtype=np.float64)))
prob = GradientProblem(ff)
options = GradientProblemSolverOptions()
solver = GradientProblemSolver()
init = np.array([10], dtype=np.float64)
summary = solver.solve(options, prob, init)
print(summary.fullReport())
print(init)
|
25,256 | f1e1e142b08593700d648365071cfc2a9423ae29 | from __future__ import unicode_literals
from os import path
import os
import shutil
import pytest
import headers_workaround
def dir_exists(directory):
return path.exists(directory) and path.isdir(directory)
def file_exists(loc):
return path.exists(loc) and not path.isdir(loc)
def local_path(filename):
return path.join(path.dirname(__file__), filename)
@pytest.fixture
def headers_dir():
directory = local_path('headers_dir')
if path.exists(directory):
assert path.isdir(directory)
shutil.rmtree(directory)
os.mkdir(directory)
return directory
def test_numpy(headers_dir):
headers_workaround.install_headers('numpy', include_dir=headers_dir)
assert dir_exists(headers_dir)
assert dir_exists(path.join(headers_dir, 'numpy'))
# Test some arbitrary files --- if any break, add them to the test later...
assert file_exists(path.join(headers_dir, 'numpy', 'ndarrayobject.h'))
assert file_exists(path.join(headers_dir, 'numpy', 'npy_endian.h'))
assert file_exists(path.join(headers_dir, 'numpy', 'npy_math.h'))
def test_murmurhash(headers_dir):
headers_workaround.install_headers('murmurhash', include_dir=headers_dir)
assert dir_exists(headers_dir)
assert dir_exists(path.join(headers_dir, 'murmurhash'))
assert file_exists(path.join(headers_dir, 'murmurhash', 'MurmurHash2.h'))
assert file_exists(path.join(headers_dir, 'murmurhash', 'MurmurHash3.h'))
|
25,257 | 2762d21207a02a2827c4e903d3e33d47ac662170 | /Users/lishixuan/anaconda/lib/python3.6/re.py |
25,258 | a0f27abaacb433fe901f4956b21e59b386ef360e | import logging
import json
from collections import defaultdict
from services.background_worker import BackgroundWorker
from multiprocessing import Queue
from entities.message import Message
class ChangeUnitsWorker(BackgroundWorker):
def __init__(self,
input_queue: Queue = None,
output_queue: Queue = None,
sleep_time: float = 0.01):
super().__init__(input_queue, output_queue, sleep_time)
def _target(self, message: Message):
try:
input_payload = json.loads(message.payload)
payload = defaultdict()
if input_payload.get('temperature'):
payload['temperature'] = self.to_fahrenheit(input_payload['temperature'])
if input_payload.get('distance'):
payload['distance'] = self.km_to_miles(input_payload['distance'])
if len(payload.keys()):
self._output_queue.put(
Message(topic='Output', payload=json.dumps(payload).encode()))
except Exception as e:
logging.error(e)
@staticmethod
def to_fahrenheit(celsius: float = 0) -> float:
return celsius * 1.8 + 32
@staticmethod
def km_to_miles(kilometers: int) -> float:
return kilometers * 0.62 |
25,259 | b4009fbc4ba65cdc6196815494fe29fdb7385d1b | import csv
import json
import pandas as pd
csv_data = "data.csv"
csvfile = open(csv_data, "r")
df = pd.read_csv(csv_data)
df = df.groupby('TIME')["Value"].agg("mean")
jsonfile = json.loads(df.to_json(orient="index"))
keys = list(jsonfile.keys())
values = list(jsonfile.values())
length_list = len(keys)
data = []
for i in range(length_list):
set = []
set.append(keys[i])
set.append(values[i])
data.append(set)
headers = ["Time", "Value"]
df = pd.DataFrame(data, columns=headers)
jsonfile = json.loads(df.to_json(orient="records"))
print(jsonfile)
with open('Week4data.json', 'w') as outfile:
json.dump(jsonfile, outfile)
|
25,260 | 82e5ac358cb9947c80bda6409eeeb649ec982660 | from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView
from web_search.models import Overview, ItemForm, Spec_item, Offer_detail
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.views.generic import DetailView
import exe_query as query
import json
# Create your views here.
def input_item(request):
temp = request.GET
return HttpResponse('will be showing item ' + temp['item_name'])
def search_item(request):
if request.method == 'GET':
form = ItemForm(request.GET)
if form.is_valid():
return render(request, 'web_search/specs_item.html',{'form': form,})
else:
form = ItemForm()
return render(request, 'web_search/index.html', {'form': form,})
class show_result(ListView):
model = Overview
template_name = 'web_search/result.html'
context_object_name = 'overview'
refer_list = []
def get(self, *args, **kwargs):
if (not self.request.COOKIES.has_key('refer')):
self.refer_list = []
else:
refer_list = self.request.COOKIES['refer']
if refer_list == '':
self.refer_list = []
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name's.allow_empty' is Flase.") % {'class_name':self.__class__.__name__})
context = self.get_context_data(object_list = self.object_list)
response = self.render_to_response(context)
if (self.request.COOKIES.has_key('id')):
print self.request.COOKIES['id']
else:
response.set_cookie('id', '1234556')
response.set_cookie('refer', self.refer_list)
print "cookie %s"%(self.refer_list)
return response
def get_queryset(self):
list = []
#print type(self.refer_list)
if (self.request.GET):
form = ItemForm(self.request.GET)
if form.is_valid():
item_name = form.cleaned_data['item_name']
#print item_name
code, result_list, result_count = query.make_query(item_name)
for i in range(result_count):
deal = result_list[i]
result = query.send_to_database(deal)
list.append(result)
#self.refer_list = result
if not deal['cat_id'] in self.refer_list:
self.refer_list.append(deal['cat_id'])
print result
return Overview.objects.filter(sem3_id__in = list)
elif self.refer_list != []:
refer = self.convert_cookie(self.request.COOKIES['refer'])
refer_list = Overview.objects.filter(cat_id__in = refer)
return Overview.objects.filter(sem3_id__in = refer_list)
def get_context_data(self, ** kwargs):
context = super(show_result, self).get_context_data(**kwargs)
return context
class index(ListView):
model = Overview
template_name = 'web_search/index.html'
context_object_name = 'offer'
def convert_cookie(self, cookie):
temp = cookie.replace('[', '')
temp = temp.replace(']', '')
temp = temp.replace('u', '')
temp = temp.replace('\'', '')
temp = temp.replace(' ', '')
temp = temp.split(',')
return temp
def get_queryset(self):
list = []
#print type(self.refer_list)
#
#
# form = ItemForm(self.request.GET)
# if form.is_valid():
# item_name = form.cleaned_data['item_name']
# #print item_name
# code, result_list, result_count = query.make_query(item_name)
#
# for i in range(result_count):
# deal = result_list[i]
# result = query.send_to_database(deal)
# list.append(result)
# self.refer_list = [result]
# print "there"
# return Offer_detail.objects.filter(sem3_id__in = list)
# else:
try:
refer = self.convert_cookie(self.request.COOKIES['refer'])
print "here"
refer_list = Overview.objects.filter(cat_id__in = refer)[:12]
return refer_list
except:
print "there"
return Overview.objects.all()[:12]
def get_context_data(self, ** kwargs):
context = super(index, self).get_context_data(**kwargs)
offer = context['offer']
print type(offer)
offer_count = []
for i in range(0, len(offer)-1):
offer_count.append(str(i))
context['offer_count'] = offer_count
return context
class show_offer(ListView):
model = Offer_detail
template_name = 'web_search/offers.html'
context_object_name = 'offers'
def get_queryset(self):
temp = Offer_detail.objects.filter(sem3_id = self.kwargs['pk'])
print temp
return temp
class show_spec_item(DetailView):
model = Spec_item
template_name = 'web_search/specs_item.html'
context_object_name = 'specs_item'
def get_context_data(self, **kwargs):
print type(self)
context = super(show_spec_item, self).get_context_data(**kwargs)
features = get_object_or_404(Spec_item, sem3_id = self.kwargs[self.pk_url_kwarg])
temp = (json.loads(features.features))
list = []
for a in temp:
list.append(temp[a])
context['features'] = list
return context
|
25,261 | 99a02cafd5e5d4e2e8f4174c7fd1564da71b126a | import random
import os
from player import Player, Computer
from card import Card
from abilities_constants import *
from constants import *
from deck import (
PlayerStartDeck,
testDeck,
RealDeck,
print_card_attrs,
persistant_game_hand,
)
from shuffle_mixin import ShuffleGameCardMixin
from print_mixin import PrintMixin
from abilities_mixin import AbilitiesMixin
from input_mixin import InputMixin
from colors import (
print_yellow, print_red, print_blue,
print_green, print_purple,print_color_table,
)
# created for mixin use
# mixin's are used here for code organization
class BaseGame(object):
card = None
points = 0
turn = 0 # player's turn
game_active = True
debug = False
selected_card = None #card being banished, copied, etc. Used for testing, not displayed
debug_counter = 0
active_card = None
extra_turn = False
round = 0
num_turns = 0
def __init__(self, points, players=None, deck=None):
self.log = {}
self.players = []
self.points = points
self.discard = []
self.hand = []
# doing some weird stuff with next_iid so we have to init phand and
# deck before this
self.phand = []
self.deck = []
self.played_user_cards = []
self.active_card = []
self.test_deck = test_deck()
self.deck = deck or self.test_deck
# make 50 copies of each game persistent and then put them in the
# game's persistent hand
self.buy_3_deck = []
self.buy_2_deck = []
self.kill_1_deck = []
for c in persistant_game_hand:
for i in xrange(50):
if c['cid'] == STARTING_CARD_BUY_3:
self.buy_3_deck.append(Card(iid=self.next_iid, **c))
elif c['cid'] == STARTING_CARD_BUY_2:
self.buy_2_deck.append(Card(iid=self.next_iid, **c))
elif c['cid'] == STARTING_CARD_KILL_1:
self.kill_1_deck.append(Card(iid=self.next_iid, **c))
self.phand.append(self.buy_3_deck.pop())
self.phand.append(self.buy_2_deck.pop())
self.phand.append(self.kill_1_deck.pop())
# players must come after deck and phand is created
self.players = players or test_players(game=self)
self.init_player_decks()
# all iid's must be assigned before shuffling
self.shuffle_deck()
self.new_hand()
# tokens are used to override player's buy or kill powers,
# or add special abilities
self.token = {}
self.used_tokens = {}
# token erasers denote when to erase each token
self.token_erasers = {}
self.actions = ACTION_NORMAL
@property
def next_iid(self):
cnt = len(self.deck) + len(self.buy_3_deck) + len(self.buy_2_deck) + len(self.kill_1_deck) + len(self.phand)
for p in self.players:
cnt += len(p.deck)
return cnt
@property
def hand_iids(self):
return [c.iid for c in self.hand]
@property
def phand_iids(self):
return [c.iid for c in self.phand]
@property
def discard_iids(self):
return [c.iid for c in self.discard]
@property
def played_user_cards_iids(self):
return [c.iid for c in self.played_user_cards]
def get_card_by_iid(self, iid):
if self.selected_card:
if self.selected_card.iid == iid:
return self.selected_card
for c in self.deck:
if c.iid == iid:
return c
for c in self.hand:
if c.iid == iid:
return c
for c in self.phand:
if c.iid == iid:
return c
for c in self.discard:
if c.iid == iid:
return c
for c in self.played_user_cards:
if c.iid == iid:
return c
for c in self.active_card:
if c.iid == iid:
return c
for p in self.players:
for c in p.deck:
if c.iid == iid:
return c
for c in p.phand:
if c.iid == iid:
return c
for c in p.hand:
if c.iid == iid:
return c
for c in p.discard:
if c.iid == iid:
return c
return None
def init_player_decks(self):
for p in self.players:
p.init_deck(self)
class Game(
BaseGame,
ShuffleGameCardMixin,
PrintMixin,
InputMixin,
AbilitiesMixin
):
def next_player_turn(self):
"""change player, get new hand, and start turn"""
if self.extra_turn:
self.played_user_cards = []
self.active_player.start_turn()
self.extra_turn = False
return
self.turn += 1
self.num_turns += 1
if self.turn >= len(self.players):
self.turn = 0
self.played_user_cards = []
self.active_player.start_turn()
@property
def active_player(self):
return self.players[self.turn]
def set_token(self, kind, value, end):
if isinstance( value, (int, long)) and self.token.get(kind):
self.token[kind] += value
self.token_erasers[kind] = end
else:
self.token[kind] = value
self.token_erasers[kind] = end
def remove_token(self, token):
try:
del self.token[token]
del self.token_erasers[token]
except KeyError:
pass
self.check_cards_eligibility()
def use_token(self, token):
try:
self.used_tokens[token] = self.token[token]
self.remove_token(token)
except KeyError:
pass
def check_tokens_for_use_once(self):
# clear out tokens that are use once
to_delete = []
for k, v in self.token_erasers.iteritems():
if v == END_OF_ACTION:
del self.token[k]
to_delete.append(k)
for t in to_delete:
del self.token_erasers[t]
def change_action(self, actions):
print 'CHANGING ACTION'
print 'from:', ','.join([ACTION_DICT[a] for a in self.actions])
self.actions = actions
print 'to:', ','.join([ACTION_DICT[a] for a in self.actions])
self.check_cards_eligibility()
print 'game tokens', self.token
def play_abilities(self, card):
if not card.abilities:
self.selected_card = None
self.change_action(ACTION_NORMAL)
return
self.selected_card = None
getattr(self,ABILITY_MAP.get(card.abilities))(card=card)
self.change_action(ACTION_NORMAL)
def check_tokens_for_card_played(self, card):
if card.in_faction(self, MECHANA) and card.card_type == CARD_TYPE_PERSISTENT:
if PER_TURN_WHEN_PLAY_MECHANA_CONSTRUCT_DRAW_1_INCLUDING_THIS_ONE in self.token:
self.draw_1()
self.use_token(PER_TURN_WHEN_PLAY_MECHANA_CONSTRUCT_DRAW_1_INCLUDING_THIS_ONE)
if card.in_faction(self, LIFEBOUND):
if (
PER_TURN_PLUS_1_BUY_FIRST_LIFEBOUND_HERO_PLUS_1_POINT in self.token and
card.card_type == CARD_TYPE_HERO
):
self.active_player.points += 1
self.use_token(PER_TURN_PLUS_1_BUY_FIRST_LIFEBOUND_HERO_PLUS_1_POINT)
# XXX not unit tested
def play_all_user_cards(self, selection):
if len(self.active_player.hand) == 0:
print_red('No cards left to play')
os.system(['clear','cls'][os.name == 'nt'])
# play all cards until therea re no more
while self.active_player.hand:
self.play_user_card(selection='c0')
def check_cards_eligibility(self):
"""go through each card and mark eligiblity for current actions"""
for c in self.hand:
c.check_actions(self)
for c in self.phand:
c.check_actions(self)
for c in self.discard:
c.check_actions(self)
for c in self.active_player.phand:
c.check_actions(self)
for c in self.active_player.hand:
c.check_actions(self)
for c in self.active_player.discard:
c.check_actions(self)
for c in self.played_user_cards:
c.check_actions(self)
if ACTION_KEEP in self.actions:
for p in self.players:
for c in p.phand:
c.check_actions(self)
for c in p.hand:
c.check_actions(self)
for c in p.discard:
c.check_actions(self)
def play_user_card_effects(self, card):
self.active_player.killing_power += card.instant_kill
self.active_player.buying_power += card.instant_buy
self.active_player.points += card.instant_worth
self.points -= card.instant_worth
if self.points < 0:
self.points = 0
print_blue('PLAYED CARD %s' % card)
self.play_abilities(card)
self.check_tokens_for_card_played(card)
def log_action(self, card, deck, action, iid):
if self.round not in self.log:
self.log[self.round] = {}
if self.turn not in self.log[self.round]:
self.log[self.round][self.turn] = []
print self.round, self.turn
self.log[self.round][self.turn].append({
'game_actions': self.actions,
'performed_action': (str(card), deck, action, iid),
'points': self.active_player.points,
'killing_power': self.active_player.killing_power,
'buying_power': self.active_player.buying_power,
'tokens': self.token,
'player_hand': self.active_player.hand_iids,
'player_discard': self.active_player.discard_iids,
'player_phand': self.active_player.phand_iids,
'hand': self.hand_iids,
'discard': self.discard_iids,
'played_user_cards': self.played_user_cards_iids,
'readable_action': '%s on %s' % (ACTION_DICT.get(action), card),
})
def player_can_do_actions(self):
if any([c.eligible(self) for c in self.active_player.hand]):
print 'playerhand', self.active_player.hand
if any([c.eligible(self) for c in self.active_player.phand]):
print 'playerphand', self.active_player.phand
if any([c.eligible(self) for c in self.active_player.discard]):
print 'discard', self.active_player.discard
if any([c.eligible(self) for c in self.hand]):
print 'hand', self.hand
if any([c.eligible(self) for c in self.phand]):
print 'phand', self.phand
if any([c.eligible(self) for c in self.discard]):
print 'discard', self.discard
if any([c.eligible(self) for c in self.played_user_cards]):
print 'playedcards', self.played_user_cards
for c in self.played_user_cards:
print c.actions
return (any([c.eligible(self) for c in self.active_player.hand]) or
any([c.eligible(self) for c in self.active_player.phand]) or
any([c.eligible(self) for c in self.active_player.discard]) or
any([c.eligible(self) for c in self.discard]) or
any([c.eligible(self) for c in self.hand]) or
any([c.eligible(self) for c in self.phand]) or
any([c.eligible(self) for c in self.played_user_cards])
)
def player_loop(self):
print_red('remaining points %s' % self.points)
if self.active_player.active:
self.normal_action()
else:
self.next_player_turn()
if self.points <= 0:
self.game_active = False
print_red('-----GAME OVER------')
self.print_results()
def game_loop(self):
while self.game_active:
self.player_loop()
def test_players(game, num_players=2):
players = []
for p in xrange(0,num_players):
player = Computer(name='Player %s' % p, game=game)
players.append(player)
return players
def test_deck():
deck = testDeck()
return deck.deck
def main():
deck = RealDeck().deck
game = Game(deck=deck, points=55)
game.played_user_cards = []
# calling end_turn here to reset player hand on start up
for p in game.players:
p.game = game
p.end_turn()
game.active_player.start_turn()
game.game_loop()
if __name__ == '__main__':
main()
|
25,262 | b8001f4f095fc652491aecdf2c866f38a96bc32a | # Generated by Django 3.1.2 on 2020-11-05 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('abouts', '0004_auto_20201104_2014'),
]
operations = [
migrations.AlterField(
model_name='resume',
name='resfile',
field=models.ImageField(default='h', upload_to='images/'),
preserve_default=False,
),
]
|
25,263 | 29f2885ed3d041526246bdfae5d832cc2a5cb907 | #coding:utf-8
from django.conf.urls import patterns, url
from ueditor.views import UploadFile, ImageManager, CatchRemoteImage,\
UploadScrawl, UploadImage
urlpatterns = patterns('',
url(r'^images/upload/(?P<uploadpath>.*)', UploadImage.as_view(), {'action': 'image'}),
url(r'^images/list/(?P<imagepath>.*)$', ImageManager.as_view()),
url(r'^images/fetch/(?P<imagepath>.*)$', CatchRemoteImage.as_view()),
url(r'^scrawl/upload/(?P<uploadpath>.*)$', UploadScrawl.as_view()),
url(r'^files/upload/(?P<uploadpath>.*)', UploadFile.as_view()),
)
|
25,264 | e7a251128d679c6f6c0fe76d4b168ad2afec663e | i = j[0][0]
|
25,265 | 851212312500775667101ae036691816f116c1a7 | import requests
from bs4 import BeautifulSoup
import smtplib
from email.message import EmailMessage
class Scraper:
def __init__(self, url, low, high, sale):
self.URL = url
self.LOW = low
self.HIGH = high
self.SALE = sale
self.body = ''
def run(self):
items = self.get_page().find_all(class_='c-shca-icon-item')
productsInfo = self.get_info(items)
classProducts = []
for i in productsInfo:
classProducts.append(StoreProduct(i[0], i[1], i[2]))
emailProducts = []
for finalProduct in classProducts:
try:
emailProducts.append(("".join(finalProduct.return_info(self.HIGH, self.LOW, self.SALE))))
except TypeError:
pass
self.body = ("\n".join(emailProducts))
print(self.body)
def get_page(self):
print("Accessing Page...")
try:
page = requests.get(self.URL)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
except:
raise ConnectionError("Error Connecting to Page!")
def get_info(self, items):
productlist = []
for item in items:
productlist.append(self.sort_info(item))
return productlist
@staticmethod
def sort_info(item):
name = (item.find(class_='c-shca-icon-item__body-name-brand').next_sibling.strip())
unformSP = (item.find(class_='c-shca-icon-item__summary-rebate-savings').get_text())
salePrice = (unformSP[unformSP.find("$"):(unformSP.find("$")) + 7])
unformLP = (item.find(class_='c-shca-icon-item__summary-regular').get_text())
listPrice = (unformLP[unformLP.find("$"):(unformLP.find("$")) + 7])
return listPrice, salePrice, name
def send_email(self, sender, password, to):
self.body = self.body.replace("™", "").replace(" ", " ").replace(" -", " -")
if self.SALE:
showing = f"Showing products that are on sale between ${self.LOW} - ${self.HIGH}"
else:
showing = f"Showing all products between ${self.LOW} - ${self.HIGH}"
if '&' in self.URL:
search = self.URL[self.URL.find('=') + 1:self.URL.find('&')]
else:
search = self.URL[self.URL.find('=') + 1:]
msg = EmailMessage()
msg.set_content(f"\nSearch: {search.replace('+', ' ')} "
f"\nLink: {self.URL} "
f"\n{showing} "
f"\n\n"
f"{self.body} "
f"\n \n-Your Bot Program")
msg['Subject'] = "Memory Express Search"
msg['From'] = sender
msg['To'] = to
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(sender, password)
server.send_message(msg)
server.quit()
print("Email Sent!")
except:
print("Something went wrong")
class StoreProduct:
def __init__(self, price, sale, name):
self.price = price
self.priceInt = float(price[1:].replace(",", ""))
self.sale = sale
self.saleInt = float(sale[1:].replace(",", ""))
self.name = name
self.fix_format()
def fix_format(self):
if self.priceInt < 100:
self.price = '$ ' + self.price[1:]
if self.saleInt == self.priceInt:
self.sale = " "
elif self.saleInt < 100:
self.sale = '$ ' + self.sale[1:]
self.name = '- ' + self.name
def return_info(self, high, low, sale):
if high >= self.saleInt >= low:
return self.price + ' ', self.sale + ' ', self.name
|
25,266 | 7521b4fc8df6099c946ac901c05c7fd7daf56cd1 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# author: "Dev-L"
# file: logger.py
# Time: 2018/8/14 15:24
"""
处理所有日志相关事务
"""
import logging
import os
from conf import settings
class Logger:
@staticmethod
def get_logger(log_type):
logger = logging.getLogger(log_type)
logger.setLevel(settings.LOG_LEVEL)
# 创建控制台日志并设为debug级别
ch = logging.StreamHandler()
ch.setLevel(settings.LOG_LEVEL)
# 创建文件日志并设置级别
log_file = os.path.join(settings.LOG_PATH, '%s.log' % log_type)
fh = logging.FileHandler(log_file)
fh.setLevel(settings.LOG_LEVEL)
# 创建日志格式
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
return logger
|
25,267 | 97eedd38dc1f98f027e111f360fb5448f8e60249 | import setuptools
setuptools.setup(
name="simplelayout-meitounao110", # Replace with your own username
version="0.0.1",
author="meitounao110",
author_email="431041317@qq.com",
description="A simplelayout package",
url="https://github.com/idrl-assignment/3-simplelayout-package-meitounao110",
package_dir={'': 'src'},
packages=setuptools.find_packages(where='src'),
install_requires=['matplotlib', 'numpy', 'scipy', 'pytest'],
entry_points={
'console_scripts': [ # 配置生成命令行工具及入口
'simplelayout = simplelayout.__main__:main'
]
},
)
|
25,268 | b3f0e9e1f458e176967b338cdeb81048cf006bb1 | import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import numpy as np
import os
import sys
import cv2
from PIL import Image
import easydict
sys.path.append('./Whatiswrong')
sys.path.append('./Nchar_clf')
import Trans
import Nchar_utils
import Extract
import utils
import evaluate
import torch.nn.functional as F
from torch.utils.data import *
import easydict
import torchvision
import tensorflow as tf
import pickle
import time
import os
import Decoder
import Encoder
import GlyphGen
class Basemodel(nn.Module):
def __init__(self, opt, device):
super(Basemodel, self).__init__()
if opt.TPS:
self.TPS = Trans.TPS_SpatialTransformerNetwork(F = opt.num_fiducial,
i_size = (opt.img_h, opt.img_w),
i_r_size= (opt.img_h, opt.img_w),
i_channel_num= 3, #input channel
device = device)
self.encoder = Encoder.Resnet_encoder(opt)
self.decoder = Decoder.Decoder(opt,device)
self.generator = GlyphGen.Generator(opt, device)
def forward(self, img, Input,is_train):
if self.TPS:
img = self.TPS(img)
feature_map_list, holistic_states = self.encoder(img)
logits, masks, glimpses = self.decoder(feature_map_list[-1], holistic_states, Input, is_train)
glyphs, embedding_ids = self.generator(feature_map_list, masks, glimpses)
return logits, glyphs, embedding_ids |
25,269 | 382085fb46fd8374754d6d731cf7c1336c31b6e0 | # -*- coding: utf-8 -*-
#
# 相关配置
# Author: alex
# Created Time: 2018年06月13日 星期三 16时35分25秒
# Arabic数字与中文数字的映射
arabic_num_map = {
'0': '零',
'1': '一',
'2': '二',
'3': '三',
'4': '四',
'5': '五',
'6': '六',
'7': '七',
'8': '八',
'9': '九',
}
|
25,270 | e4bad736f2deb347fde8dee9d69873c5b679d109 | import sys
import requests
import random
import string
import re
import time
try:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
except ImportError:
print("""
Install selenium for python. `pip install -U selenium`. You also have to download
Selenium Gecko Webdirver binary from https://github.com/mozilla/geckodriver/releases.
How to install this driver can be found https://selenium-python.readthedocs.io/installation.html#drivers.\n
For Linux and Mac, you can just unzip and copy the driver into /usr/local/bin/.
For Windows, you can follow the instructions in the page.
""")
exit(-1)
DEBUG = 0
def random_tag(n=4):
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(n))
def get_tags(receipt_e):
return [
t.text
for t in receipt_e.find_elements_by_class_name('tagValue')
]
def get_all_receipts(driver):
"""
Parse all the receipts in a page
$($('#receiptList')[0], '.receipt')
"""
for rs in driver.find_elements_by_css_selector('#receiptList > .receipt'):
m = rs.find_element_by_class_name('merchant').text
a = rs.find_element_by_class_name('amount').text
tags = get_tags(rs)
# created = rs.find_element_by_class_name('created').text
yield {
'merchant': m,
'amount': a,
'tags': tags,
# 'created': created
}
def add_receipts(driver):
e = driver.find_element_by_id('add-receipt')
e.click()
m = 'M__' + random_tag(3)
a = int(random.random() * 10000)/100
driver.find_element_by_id('merchant').send_keys(str(m))
driver.find_element_by_id('amount').send_keys(str(a))
driver.find_element_by_id('save-receipt').click()
return m, a
def add_tag(e, driver):
""" Adds a random tag to te element e """
tag = random_tag(8)
e.find_element_by_class_name('add-tag').click()
driver.find_element_by_class_name('tag_input')\
.send_keys(tag)
driver.find_element_by_class_name('tag_input')\
.send_keys(Keys.ENTER)
# driver.find_elements_by_class_name('save-tag').click()
return tag
def set_up(url):
driver = webdriver.Firefox()
driver.implicitly_wait(1)
driver.get(url)
return driver
def test_add_receipts(driver):
"""
Adds a receipt and checks if the receipt is available in the page
or not.
"""
print("-"*80)
print("Test: Adding a receipt")
print("-"*80)
driver = driver
time.sleep(1)
old_receipts = list(get_all_receipts(driver))
m, a = add_receipts(driver)
if DEBUG>=2:
driver.refresh()
time.sleep(1)
new_receipts = list(get_all_receipts(driver))
if len(old_receipts) + 1 != len(new_receipts):
print("old_receipts={}\n>> new_receipts={}"
.format(old_receipts, new_receipts))
return -1
found = False
for rs in new_receipts:
if str(rs['merchant']) == str(m) and str(rs['amount']) == str(a):
found = True
break
elif DEBUG:
print("Found (but not testing):", rs)
if not found:
print(
"ERROR: I don't see the receipt I just inserted with \n"
"merchant={!r} and amount={!r}".format(m, a)
)
return -1
print("Success!!!")
print('<>'*40 + '\n')
return 0
def test_add_tag(driver):
"""
Adds tag to a randomly chosen receipts, and test if the tag appears in
the page.
"""
print("-"*80)
print("Test: Adding a tag")
print("-"*80)
time.sleep(1)
# Get all receipts
receipts = driver.find_elements_by_class_name('receipt')
# Choose a receipt randomly to add tag
i = random.randint(0, len(receipts)-1)
e = receipts[i]
# Click on the add-tag element
old_tags = get_tags(e)
tag = add_tag(e, driver)
if DEBUG>=2:
driver.refresh() # Probably don't require
time.sleep(1)
# Fetch the new receipts again
receipts = driver.find_elements_by_class_name('receipt')
e = receipts[i]
new_tags = get_tags(e)
added_tags_ = list(set(new_tags) - set(old_tags))
if len(added_tags_) != 1 or tag not in added_tags_[0]:
print("""
ERROR: The number of newly added tags did not match.
Expected: {!r}, Found: {!r}""".format([tag], added_tags_))
return -1
print("Success!!!")
print('<>'*40 + '\n')
return 0
def test_del_tag(driver):
"""
Selects a random receipt and delets its one of the tag.
"""
print("-"*80)
print("Test: Deleting a tag")
print("-"*80)
# Select a random receipt
receipts = driver.find_elements_by_class_name('receipt')
index_of_random_receipt = random.randint(0, len(receipts)-1)
e = receipts[index_of_random_receipt]
# Click on the add-tag element
tags = get_tags(e)
if not tags:
add_tag(e, driver)
tags = get_tags(e)
e_tag = random.choice(e.find_elements_by_class_name('tagValue'))
tag = e_tag.text
e_tag.click(); time.sleep(1)
# Receipts DOM might have been deleted or re-drawn, pull it again
receipts = driver.find_elements_by_class_name('receipt')
e = receipts[index_of_random_receipt]
new_tags = get_tags(e)
removed_tag_ = list(set(tags) - set(new_tags))
if len(removed_tag_) != 1 or removed_tag_[0] != tag:
print(""" Removed tags: {} (Should be only [{}])"
""".format(removed_tag_, tag))
print("""This error might not be your fault. Either my code, or
the Selenium driver is buggy. Report this problem to us. We will
fix it, but in the mean time make sure the deletion works on UI.""")
return -1
else:
print("Success!!!")
print('<>'*40 + '\n')
return 0
def test_no_duplicate_tag(driver):
"""
Tests that no duplicate tags are present in any of the receipt rows.
"""
for i,rs in enumerate(driver.find_elements_by_class_name('receipt')):
l = list(get_tags(rs))
if len(l) != len(set(l)):
print("There are duplicate tags in the {}-th receipt line"\
.format(i))
print("Found tag: {!r}".format(l))
return -1
return 0
def tearDown(driver):
driver.quit()
def extract_netid_and_url(line):
regex = r'\* \[.*\]\(.*\) - (?P<netid>\w+) \- \[.+\]\((?P<url>http.+)\)\s*\[\!\[CircleCI\]\((?P<circleurl>.*)\)\]\(.*\)\s*'
m = re.match(regex, line)
if not m:
print(line)
exit(-1)
return m.group('netid', 'url', 'circleurl')
def get_github_student_url(netid):
"""
Obtain the student list from the github page.
"""
url = 'https://raw.githubusercontent.com/CT-CS5356-Fall2017/cs5356/master/README.md'
r = requests.get(url)
assert r.ok
text = r.text
for l in text.split('\n'):
if netid in l:
return extract_netid_and_url(l)
return None, None, None
if __name__ == "__main__":
# Parse commandline
USAGE = """
$ python {0} -github <netid> # To test the final submission
or
$ python {0} <url> # For just testing the url you created is working or not.
""".format(sys.argv[0])
url = None
netid=None
r = 0
if len(sys.argv)<2:
print(USAGE)
exit(-1)
if len(sys.argv)>2 and sys.argv[1] == '-github':
netid, URL, circleurl = get_github_student_url(sys.argv[2])
else:
url = sys.argv[1]
driver = set_up(url)
r = 0
try:
r += 1 + test_add_receipts(driver)
if (r>=0):
r += 1 + test_add_tag(driver)
if (r>0):
r += 1 + test_del_tag(driver)
if (r>0):
r += 1 + test_no_duplicate_tag(driver)
except (AssertionError, ImportError) as e:
print("=======")
print("Error:", e)
print("=======\n")
print("Something went wrong. Test the test by manually and see if it\n"
"is working. If yes, and check the IDs and class names in your html\n"
"file matches what is dictated in teh README file. I will add the\n"
"meaning of the error. \n")
print("\"Element not visible\": Your server might be too slow. Find the line\n"
"'implicitly_wait' in the auto-grader and change the wait time from\n"
" 5 sec to something more like 15 or 20.")
finally:
tearDown(driver)
print "Hi"
|
25,271 | 0688519320c938701b4ed993c4c7e957393745c8 | # title: determine-if-two-strings-are-close
# detail: https://leetcode.com/submissions/detail/420756048/
# datetime: Mon Nov 16 12:45:35 2020
# runtime: 132 ms
# memory: 14.9 MB
class Solution:
def closeStrings(self, word1: str, word2: str) -> bool:
m, n = len(word1), len(word2)
if m != n:
return False
w1, w2 = collections.Counter(word1), collections.Counter(word2)
if len(w1) != len(w2) or tuple(sorted(w1)) != tuple(sorted(w2)):
return False
return sorted(w1.values()) == sorted(w2.values()) |
25,272 | 2e067ab6753c0aa574552acd67d2e0b8536973a6 | # -*- coding: utf-8 -*-
{
'name': 'custom account',
'version': '0.1',
'category': 'Accounting & Finance',
'description': """custome stock module""",
'author': 'chengdh (cheng.donghui@gmail.com)',
'website': '',
'license': 'AGPL-3',
'depends': ['account_voucher'],
'init_xml': [],
'update_xml': ['account_voucher_view.xml','account_voucher_workflow.xml'],
'demo_xml': [],
'active': False,
'installable': True,
'web':True,
'css': [
],
'js': [
],
'xml': [
],
}
|
25,273 | 3790d639977e65220f96faa1b6ab530d9801eaf7 | # -*- coding: utf-8 -*-
"""
遇到最大的坑是scrapy.Request()中的cookies必须通过cookies传递,不像requests可以直接放在headers中
当我们使用requests的时候,一般可以直接把Cookies放在Headers里面,随着请求一并提交,
但是,如果使用Scrapy的时候需要Cookies,就不能把Cookies放在Headers里面。在Scrapy发起请求的时候,有一个单独的参数来设置Cookies:
并且, cookies参数的值为一个字典,需要把原来Chrome中的字符串Cookies,先按分号分为不同的段,每一段再根据等号拆分为key和value。
settings中的COOKIES_ENABLED参数默认是被注释的,说明不启用cookies,解除注释并且设置为Flase,说明开启cookie,但是不用scrapy内置的cookie,自己在
DEFAULT_REQUEST_HEADERS中设置的Cookie才会生效。如果设为True,则失败,不知道设为True有什么用
"""
import scrapy
from baiduzhidao.items import BaiduzhidaoItem
class ZhidaoSpider(scrapy.Spider):
name = 'zhidao'
# allowed_domains = ['www.zhidao.baidu.com']
# start_urls = ['http://www.zhidao.baidu.com/']
headers={
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
def start_requests(self):
url="https://zhidao.baidu.com/list?fr=daohang"
yield scrapy.Request(url)
# cookies='BIDUPSID=E815020B2450FB2C5A2A883D52C36950; PSTM=1534739412; BAIDUID=6C1F5B57817225D5683A81001322601C:FG=1; shitong_key_id=2; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; delPer=0; H_PS_PSSID=1444_21105_30210_18560_26350; PSINO=5; ZD_ENTRY=baidu; Hm_lvt_6859ce5aaf00fb00387e6434e4fcc925=1576134017,1576304600; Hm_lpvt_6859ce5aaf00fb00387e6434e4fcc925=1576304603; shitong_data=fb93080e6adce017f657e0ac08cbe25e60e50f648057e4ed0cbf0c937382bc9ada75d54602143a25296db1947076de41a52a1b073f5753b85a1922f018747c57870191529937f0878becf1516b06b859a257d596477e9dde37d573ed84c1afcf8596ec0873bdf7742153361067a890dcc72ba37ecf6c2173f1617b36d916a7c4; shitong_sign=4c046287'
# #cookies参数是一个字典或者列表,还需要自己构造
# mycookies={}
# for c in cookies.split(';'):
# mycookies[c.split('=')[0].strip()]=c.split('=')[1].strip()
# yield scrapy.Request(url,cookies=mycookies,callback=self.parse)
def parse(self, response):
#主要问题还是response.text的问题
# print(response,'*'*100)
# print(response.text.find("question-list-item"),'*'*100)
# ques_list=response.xpath('//ul[@class="question-list-ul"]/li[@class="question-list-item"]')
ques_list=response.xpath('//ul[@class="question-list-ul"]/li[@class="question-list-item"]') #为什么抓不到元素
# print(ques_list,'*'*100)
# print(ques_list,'*'*100)
item=BaiduzhidaoItem()
for ques in ques_list:
# print('*'*100)
item["TitleName"]=ques.xpath('div[1]/div//a/text()').extract()[0]
yield item #因为是yield item ,才会一个个返回抓取的问题,如果是return 那么只返回第一个就结束了
|
25,274 | 138f6126f90b86b6f42293e27c66e729f29f19c5 | import numpy as np
from constants import GLOBAL_SEED
def flipImagesLR(image_set, image_set_y):
image_set_side = len(image_set)
new_X = []
new_y = []
for i in range(image_set_side):
flipped_lr_image = np.fliplr(image_set[i])
new_X.append(flipped_lr_image)
new_y.append(image_set_y[i])
new_X = np.array(new_X)
new_y = np.array(new_y)
return new_X, new_y
def flipImagesUD(image_set, image_set_y):
image_set_side = len(image_set)
new_X = []
new_y = []
for i in range(image_set_side):
flipped_lr_image = np.flipud(image_set[i])
new_X.append(flipped_lr_image)
new_y.append(image_set_y[i])
new_X = np.array(new_X)
new_y = np.array(new_y)
return new_X, new_y
def addGaussianNoise(image_set, image_set_y):
image_set_side = len(image_set)
new_X = []
new_y = []
np.random.seed(GLOBAL_SEED)
for i in range(image_set_side):
noise = np.random.normal(0, 250, image_set[i].shape)
noisy_image = image_set[i] + noise
new_X.append(noisy_image)
new_y.append(image_set_y[i])
new_X = np.array(new_X)
new_y = np.array(new_y)
return new_X, new_y
def augmentRotation(image_set, image_set_y):
image_set_side = len(image_set)
new_X = []
new_y = []
for i in range(image_set_side):
rotated_ccw = np.rot90(image_set[i])
rotated_cw = np.rot90(image_set[i], 3)
rotated_180 = np.rot90(image_set[i], 2)
new_X.extend([rotated_ccw, rotated_cw, rotated_180])
new_y.extend([image_set_y[i], image_set_y[i], image_set_y[i]])
new_X = np.array(new_X)
new_y = np.array(new_y)
return new_X, new_y
def augmentTranslation(image_set, image_set_y, offset_x=0.25, offset_y=0.25):
image_set_side = len(image_set)
new_X = []
new_y = []
for i in range(image_set_side):
translated_x_pos = np.roll(image_set[i], int(image_set[i].shape[0]*offset_x), axis=1)
translated_y_pos = np.roll(image_set[i], int(image_set[i].shape[1]*offset_y), axis=0)
translated_x_neg = np.roll(image_set[i], -int(image_set[i].shape[0]*offset_x), axis=1)
translated_y_neg = np.roll(image_set[i], -int(image_set[i].shape[1] * offset_y), axis=0)
new_X.extend([translated_x_pos, translated_y_pos, translated_x_neg, translated_y_neg])
new_y.extend([image_set_y[i], image_set_y[i], image_set_y[i], image_set_y[i]])
new_X = np.array(new_X)
new_y = np.array(new_y)
return new_X, new_y
|
25,275 | 9b4a2847b31a389afcaac02870951b83c6d6fbb4 | import commands
import re
def GetLocalInfrastructureCarVersion():
# Use "./fglcmd.sh -cmd cartridge:list to get all cartridges' information
allLocalCartridgesInformation = commands.getoutput('cd /home/admin/Dell/Foglight/bin && ./fglcmd.sh -cmd cartridge:list')
# Use regex to get the Infrastructure Cartridge part information
matchPosition = re.search('.*Infrastructure\n.*\n.*\n.*', allLocalCartridgesInformation).span()
infrastructureCarInformation = allLocalCartridgesInformation[matchPosition[0]:matchPosition[1]]
# Use regex to get the Infrastructure Cartridge version build
matchPosition = re.search('\d(\.\d)+\-\d+.*', infrastructureCarInformation).span()
infrastructureCarVersion = infrastructureCarInformation[matchPosition[0]:matchPosition[1]]
print "\nThe local Infrastructure version is: " + infrastructureCarVersion + "\n"
return infrastructureCarVersion
|
25,276 | f6089938fdf5085fc7055adc735b12e5d91fdff6 | import pkgutil
from sanic.log import logger
IDIOM_PACKAGE = 'idiomfinder.validator'
IDIOM_FILE = 'data/idioms.3w.txt'
class IdiomValidator:
"""
IdiomValidator examines a given string to see if it is a Chinese idiom. It does so by searching
against a list of known idioms.
"""
def __init__(self):
a = pkgutil.get_data(IDIOM_PACKAGE, IDIOM_FILE)
self.all_idioms = set(a.decode('utf-8').strip().splitlines())
logger.debug('Idioms loaded from {}/{}'.format(IDIOM_PACKAGE, IDIOM_FILE))
def is_valid(self, s):
return s in self.all_idioms
|
25,277 | b5b4fd7ca5650b3a06881bda7b5d7304a4daa561 | ''' Multiple plot groups layout
'''
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
view_rows = 2
view_cols = 2
plot_rows = 3
view_cols = 2
app = QtGui.QApplication([])
view = pg.GraphicsView()
layout = pg.GraphicsLayout(border=(100,100,100))
view.setCentralItem(layout)
view.show()
view.setWindowTitle('AAAAA')
view.resize(1600,1200)
|
25,278 | 2e3e7c4269affd1c9ea640d166ebc4ac63c4395a | import sys
import os
import shutil
import random
import time
# captcha是用于生成验证码图片的库,可以 pip install captcha 来安装它
from captcha.image import ImageCaptcha
# 用于生成验证码的字符集
CHAR_SET = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
# 字符集的长度
CHAR_SET_LEN = 10
# 验证码的长度,每个验证码由4个数字组成
CAPTCHA_LEN = 4
# 验证码图片的存放路径
CAPTCHA_IMAGE_PATH = '../pic/captcha_test/'
# 用于模型测试的验证码图片的存放路径,测试集
TEST_IMAGE_PATH = '../pic/captcha_test/'
# 用于模型测试的验证码图片的个数,从生成的验证码图片中取出来放入测试集中
TEST_IMAGE_NUMBER = 50
# 生成验证码图片,4位的十进制数字可以有10000种验证码
def generate_captcha_image(charSet=CHAR_SET, charSetLen=CHAR_SET_LEN, captchaImgPath=CAPTCHA_IMAGE_PATH):
k = 0
total = 1
for i in range(CAPTCHA_LEN):
total *= charSetLen
total = 1000
for _ in range(3,4):
for i in range(charSetLen):
for j in range(charSetLen):
for m in range(charSetLen):
for n in range(charSetLen):
captcha_text = charSet[i] + charSet[j] + charSet[m] + charSet[n]
image = ImageCaptcha()
image.write(captcha_text, captchaImgPath + captcha_text + str(_) + '.jpg')
k += 1
if k == 1000:
break
sys.stdout.write("\rCreating %d/%d" % (k, total))
sys.stdout.flush()
# 从验证码的图片集中取出一部分作为测试集,这些图片不参加训练,只用于模型的测试
def prepare_test_set():
fileNameList = []
for filePath in os.listdir(CAPTCHA_IMAGE_PATH):
captcha_name = filePath.split('/')[-1]
fileNameList.append(captcha_name)
random.seed(time.time())
random.shuffle(fileNameList)
for i in range(TEST_IMAGE_NUMBER):
name = fileNameList[i]
shutil.move(CAPTCHA_IMAGE_PATH + name, TEST_IMAGE_PATH + name)
if __name__ == '__main__':
generate_captcha_image(CHAR_SET, CHAR_SET_LEN, CAPTCHA_IMAGE_PATH)
sys.stdout.write("\nFinished")
sys.stdout.flush() |
25,279 | 302841193f1c8284bc19de1a3fb14572ae4588a6 | import pytest
from flask import url_for
from tests.factories import job_factory
@pytest.mark.usefixtures('test_ctx', 'database')
class TestJobs:
def test_returns_200(self, client):
job_factory()
response = client.get(url_for('jobs.jobs'))
assert response.status_code == 200
def test_returns_correct_data(self, client):
job_factory()
response = client.get(url_for('jobs.jobs'))
assert response.json == [
{
'id': 1,
'name': 'Test job',
'prefix': '/test-job'
}
]
|
25,280 | 7a90220af4d00391e12e4e0cb0319a2b92b48196 | from selenium import webdriver #le driver qui te permet d'aller sur un explorateur internet
from selenium.webdriver.common.keys import Keys #ce qui te permet de rentrer des touches du clavier
import getpass #le module de mot de passe
driver = webdriver.Firefox() #ouvre Firefox
driver.get("https://www.kickstarter.com/discover/advanced?category_id=16") #ouvre vente-privee
button_load_more = driver.find_element_by_xpath('//*[@id="projects"]/div[2]/div[2]/a')
button_load_more.click()
while True:
try:
# time.sleep(5)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
except:
break
|
25,281 | 28496dc1d46806d263adb497602b371c97152efd | from constants import *
from node import Node
from node_agg_average import NodeAggAverage
from node_average import NodeAverage
from node_count import NodeCount
from node_file_scan import NodeFileScan
from node_join import NodeJoin
from node_limit import NodeLimit
from node_distinct import NodeDistinct
from node_projection import NodeProjection
from node_selection import NodeSelection
from node_sort import NodeSort
from node_test_scan import NodeTestScan
def process(query):
root_node = Node()
root_node.set_children([query])
row = root_node.next()
while row is not None:
print(row)
row = root_node.next()
root_node.close()
# q = [NodeLimit(3),
# [NodeProjection(["title"]),
# NodeFileScan("data/movies_head.csv")
# ] ]
# q = [NodeSelection("title", EQUALS, "Jumanji (1995)"),
# [ NodeJoin("movieId", EQUALS, "movieId"),
# NodeFileScan("data/ratings_head.csv"), NodeFileScan("data/movies_head.csv"),
# ]
# ]
# q = [ NodeJoin("movieId", EQUALS, "movieId"),
# NodeFileScan("data/movies_head.csv"), NodeFileScan("data/ratings_head.csv"),
# ]
q = [ NodeAverage(),
[ NodeProjection(["rating"]),
[ NodeJoin("movieId", EQUALS, "movieId"),
[ NodeSelection("title", EQUALS, "Jumanji (1995)"), NodeFileScan("data/movies_head.csv")],
NodeFileScan("data/ratings_head.csv"),
]
]
]
process(q)
|
25,282 | 3ce6104283c8e4edab37f58557c645cd4cecc5fb | import time
import rados
from multiprocessing import Process, Value, Pool
import os
import sys
def append_data_to_objects_t(prefix, bts, object_num):
cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
cluster.connect()
ioctx = cluster.open_ioctx('scbench')
total_bytes = 0
for i in range(object_num):
ioctx.append(prefix + str(i), bts)
total_bytes = total_bytes + len(bts)
return total_bytes
def append_data_to_objects(append_size, thread_num, object_num):
f = open('./data', 'rb')
bts = f.read()
bts = bts[0:append_size]
process_target = append_data_to_objects_t
pl = Pool(thread_num)
arguments = []
for i in range(thread_num):
arguments.append(('Thread_' + str(i), bts, object_num))
start = time.time()
results = pl.starmap(process_target,arguments)
stop = time.time()
print(sum(results)/(stop-start)/1024/1024)
time.sleep(5)
# append_size, thread num, object size , object num
append_data_to_objects(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])) |
25,283 | 37293721eef7ead67acbd0355fc3e866ac58720a | from django.db import models
# Create your models here.
class Movie(models.Model):
actor=models.CharField(max_length=30)
actor_movie=models.CharField(max_length=50)
gener=models.CharField(max_length=50)
def __str__(self):
return self.actor + '---'+ self.actor_movie + '---'+ self.gener |
25,284 | 787a66ddac5755b3e449b84587c6230bc2e3a10c | # note on windows get a package from http://www.lfd.uci.edu/~gohlke/pythonlibs/#python-ldap
# and install it like
#
# cd c:\Python27\Scripts
# pip install python_ldap-2.4.25-cp27-none-win32.whl
#
try:
import ldap
import ldap.filter
except:
pass
#
#
#
from ad_dns import DnsDigger
from ad_realm import RealmReader
from ad_rootdse import LdapRootDSE
#
#
#
class LdapDetector:
def __init__(self):
# reset the members
self.clear()
def clear(self):
# reset the members
self.realm = ""
self.basedn = ""
self.binduser = ""
self.server1 = ""
self.server2 = ""
def collect_realm(self):
# assign realm
self.realm = RealmReader().read()
# and predefined user name
self.binduser = "squid@%s" % self.realm.lower()
def collect_servers(self):
# construct ldap server dns name
name = "_ldap._tcp.%s" % self.realm.lower()
name = name.lower()
# make DNS query to using dig
servers = DnsDigger().dig(name)
if len(servers) == 0:
# no servers at all
return
if len(servers) == 1:
# only one server, see its host and port
(s0, p0) = servers[0]
# and assign
self.server1 = s0
if len(servers) > 1:
# two or more servers, get only two
(s0, p0) = servers[0]
(s1, p1) = servers[1]
# and assign
self.server1 = s0
self.server2 = s1
def collect_rest(self):
# these are local copies of two servers
s0 = self.server1
s1 = self.server2
# inspect both servers
(d0, c0) = LdapRootDSE().inspect(s0)
(d1, c1) = LdapRootDSE().inspect(s1)
# all of d0, c0, d1, c1 may be empty, when for example the domain controllers are switched off
if not d0 or not c0:
s0 = ""
if not d1 or not c1:
s1 = ""
# check the servers
if not s0 and not s1:
# we could not get information from any server, construct predefined value based on the dn
self.basedn = "dc=" + ',dc='.join(self.realm.lower().split('.'))
self.curtime = ""
if s0:
# the first server replied, assign the values
self.basedn = d0
self.curtime = c0
# and move it up
self.server1 = s0
self.server2 = s1
if s1:
# the second server replied, assign the values
self.basedn = d1
self.curtime = c1
# and move it up
self.server1 = s1
self.server2 = s0
def detect(self):
# reset the members
self.clear()
# fill all members
self.collect_realm()
self.collect_servers()
self.collect_rest()
# assign and return
data = {
'basedn' : self.basedn,
'binduser': self.binduser,
'server1' : self.server1,
'server2' : self.server2,
'curtime' : self.curtime
}
# and return
return data
def inspect_rootdse(self, server_addr):
defaultNamingContext = ""
currentTime = ""
if len(server_addr) > 0:
# try to anonymously bind to RootDSE
try:
uri = "ldap://%s:389" % server_addr
conn = ldap.initialize(uri)
# we bind anonymously which is allowed for the RootDSE only
conn.simple_bind_s('', '')
# do the search
entries = conn.search_s("", ldap.SCOPE_BASE, "objectclass=*", None)
for (dn, attrs) in entries:
for key, value in attrs.iteritems():
if key == "defaultNamingContext":
defaultNamingContext = value
if key == "currentTime":
currentTime = value
except Exception as e:
print (str(e))
pass
return (defaultNamingContext, currentTime)
#
# test some stuff
#
#if __name__ == "__main__":
#
# print "LdapDetector::detect() =>"
# print LdapDetector().detect()
|
25,285 | 7eec8fbc6b5db71d66bc912a92a86f2209d7bdb5 | class Permission:
def __init__(self, match, scopes, **kwargs):
self.id = kwargs.get('id', None)
self.match = match
self.scopes = scopes or list()
def __repr__(self):
return 'Perm(id={!r}, match={!r}, scopes={!r})'.format(
self.id, self.match, self.scopes)
@classmethod
def parse(cls, json):
if not isinstance(json.get('scopes', []), list):
raise ValueError('scopes must be a list')
return Permission(
id=json.get('id', None),
match=json.get('match', None),
scopes=json.get('scopes', list())
)
def tabular(self):
return {
'id': self.id,
'match': self.match,
'scopes': ','.join(self.scopes)
}
|
25,286 | 6ff8ec4eaf19dd9aedad402d86aef04f69de0e65 |
import json
import boto3
#ec2 = boto3.resource('ec2', region_name='eu-west-1')
#client = boto3.client('ec2')
# Show available profiles in ~/.aws/credentials
print (boto3.session.Session().available_profiles)
# Show buckets in the 'default' profile
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print("NMI: " + bucket.name)
# Change the profile of the default session in code
# Use profile 'meir3'
boto3.setup_default_session(profile_name='meir3')
s3meir3 = boto3.resource('s3')
print()
for bucket in s3meir3.buckets.all():
print('Personal buckets: ' + bucket.name )
boto3.setup_default_session(profile_name='meir')
s3meir = boto3.resource('s3')
print()
for bucket in s3meir.buckets.all():
print('buckets of "meir" profile: ' + bucket.name )
print('\n\n')
for prof in boto3.session.Session().available_profiles:
boto3.setup_default_session(profile_name=prof)
s3prof = boto3.resource('s3')
print()
for bucket in s3prof.buckets.all():
print('buckets of profile {}: {}'.format(prof, bucket.name))
|
25,287 | ea4279215c6f1915456d4e1b0b22e3199f3db846 | from smskeeper import keeper_constants, keeper_strings
from smskeeper import sms_util
from smskeeper import analytics
from smskeeper import time_utils
def process(user, msg, requestDict, keeperNumber):
# We were already in this state
# If we got the start message, then ignore
if msg.lower() == "start":
# Need to do this to by-pass user.setState protocols
user.state = keeper_constants.STATE_NORMAL
user.setState(keeper_constants.STATE_NORMAL)
user.save()
sms_util.sendMsg(user, keeper_strings.START_RESPONSE, None, keeperNumber)
analytics.logUserEvent(
user,
"Stop/Start",
{
"Action": "Start",
"Hours Paused": time_utils.totalHoursAgo(user.last_state_change),
}
)
return True, keeper_constants.CLASS_STOP, dict()
# Ignore other messages
return True, keeper_constants.CLASS_NONE, dict()
|
25,288 | 9fe665091c5496690d28fbcc3e57bdbd7b5d6509 | '''
Знайти добуток всіх елементів масиву дійсних чисел, менших заданого
числа. Розмірність масиву - 10. Заповнення масиву здійснити випадковими числами
від 50 до 100.
Виконав : Канюка Р. 122В
'''
import random
import numpy as np
while True:
#Ініціалізація масиву
X = np.zeros(10)
while True:
try:
keyword = float(input('Введіть задане число : '))
break
except ValueError :
print('Введіть число!')
for i in range(len(X)):
X[i] = random.randint(50,100)
print(X)
#Знаходження результату
result = 1
for i in range(len(X)):
if X[i] < keyword :
result *= X[i]
if (result == 1):
print('Данних елементів не знайдено')
else:
print(f'Добуток елементів менших за {keyword} = {result}')
quest = input('Завершити програму? Y/N : ')
if(quest == 'Y' or quest == 'y'):
break
|
25,289 | 8fbeafdcc7b393ed46802193d167e43ff5f8f3e5 | import pandas as pd
import numpy as np
import matplotlib as mp
import json
#from pandas.io.json import json_normalize
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer , TfidfVectorizer
# TfidfVectorizer is used for checking terms
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import random
from sklearn.model_selection import GridSearchCV
import pickle
class Sentiment:
NEGATIVE ="NEGATIVE"
NEUTRAL = "NEUTRAL"
POSITIVE = "POSITIVE"
class Review:
def __init__(self,text,score,time):
self.text = text
self.score = score
self.time = time
self.sentiment = self.get_sentiment()
def get_sentiment(self):
if self.score <=2:
return Sentiment.NEGATIVE
elif self.score == 3:
return Sentiment.NEUTRAL
else:
return Sentiment.POSITIVE
class ReviewContainer:
def __init__(self,reviews):
self.reviews= reviews
def evenly_distribute(self):
positive = list(filter(lambda x: x.sentiment == Sentiment.POSITIVE, self.reviews))
negative = list(filter(lambda x: x.sentiment == Sentiment.NEGATIVE, self.reviews))
positive_shrunk = positive[:len(negative)]
self.reviews = negative + positive_shrunk
random.shuffle(self.reviews)
def get_text(self):
return [x.text for x in self.reviews]
def get_sentiment(self):
return [x.sentiment for x in self.reviews]
filename = 'Books_small_10000.json'
reviews =[]
with open(filename) as f:
for line in f:
review = json.loads(line)
reviews.append(Review(review['reviewText'], review['overall'],review['reviewTime']))
#print(reviews[5].sentiment)
#print(len(reviews))
# PREPARE DATA TRAINING TESTING
# spliting our data for training and testing
training,test=train_test_split(reviews, test_size=0.33, random_state=42)
train_container = ReviewContainer(training)
test_container = ReviewContainer(test)
#print("Length of training data : ",len(training))
#print("Length of testing data : ",len(test))
#print(training[0].sentiment)
train_container.evenly_distribute()
train_x = train_container.get_text()
train_y = train_container.get_sentiment()
test_container.evenly_distribute()
test_x = test_container.get_text()
test_y = test_container.get_sentiment()
train_y.count(Sentiment.POSITIVE)
train_y.count(Sentiment.NEGATIVE)
# Bag of words vectorization
vectorizer = CountVectorizer()
# it will fit and transform your model
train_x_vectors = vectorizer.fit_transform(train_x)
test_x_vectors = vectorizer.transform(test_x)
#print(train_x[0])
#print(train_x_vector[0])
# Classification google it
# Linear SVM
clf_svm = svm.SVC(kernel='linear')
clf_svm.fit(train_x_vectors,train_y)
clf_svm.fit(train_x_vectors, train_y)
#print(test_x[0])
#print(clf_svm.predict(test_x_vectors[90]))
# DECISION TREE
clf_dec = DecisionTreeClassifier()
clf_dec.fit(train_x_vectors,train_y)
clf_dec.predict(test_x_vectors[0])
# GAUSSIAN NAIVE BAYES,
clf_gnb =DecisionTreeClassifier()
clf_gnb.fit(train_x_vectors,train_y)
clf_gnb.predict(test_x_vectors[0])
# LOGISTIC REGRESSION
clf_log = LogisticRegression()
clf_log.fit(train_x_vectors,train_y)
clf_log.predict(test_x_vectors[0])
# EVALUATION every model
# Mean accuracy
'''
print(clf_svm.score(test_x_vectors,test_y))
print(clf_dec.score(test_x_vectors,test_y))
print(clf_gnb.score(test_x_vectors,test_y))
print(clf_log.score(test_x_vectors,test_y),"\n")
'''
# F1 SCORES
#'''
print(f1_score(test_y, clf_svm.predict(test_x_vectors), average=None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]))
print(f1_score(train_y, clf_svm.predict(train_x_vectors), average=None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]))
#print(f1_score(test_y, clf_dec.predict(test_x_vectors), average=None, labels=[ Sentiment.POSITIVE, Sentiment.NEGATIVE]))
#print(f1_score(test_y, clf_gnb.predict(test_x_vectors), average=None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]))
#print(f1_score(test_y, clf_log.predict(test_x_vectors), average=None, labels=[Sentiment.POSITIVE, Sentiment.NEGATIVE]))
#'''
#var= input()
#test_set = [var]
test_set=['I thouroughly enjoy this, 5 star',"bad look do not but", 'horrible waste of time','I love this book']
new_test = vectorizer.transform(test_set)
print(clf_svm.predict(new_test))
# Improving our model
'''
print(train_y.count(Sentiment.NEGATIVE))
print(train_y.count(Sentiment.POSITIVE))
print(test_y.count(Sentiment.POSITIVE))
print(test_y.count(Sentiment.NEGATIVE))
'''
# Tunning our model (with grid search)
parameters = {'kernel':('linear','rbf'), 'C':(1,4,8,16,32)}
svc=svm.SVC()
clf = GridSearchCV(svc, parameters, cv=5)
print(clf.fit(train_x_vectors,train_y))
# More improvement
# Model saving
with open('./sentiment_classifer.pk1','wb') as f:
pickle.dump(clf,f)
# Load model
with open('./sentiment_classifer.pk1','rb') as f:
loaded_clf = pickle.load(f)
print(test_x[0])
print(loaded_clf.predict(test_x_vectors[0]))
|
25,290 | 15b26855c5cde8d8ff61680362ad9abde142f71e | from django.shortcuts import render
from django.http import HttpResponse
from location.models import Locations, Images
# Create your views here.
def location(request, id):
loc = Locations.objects.get(pk = id)
img = Images.objects.filter(location_id= id).order_by('?')[:]
# more_like = Hotel.objects.all().order_by('?')[:4]
context = {
'loc' : loc,
'img' : img,
}
return render(request, 'locations.html', context)
|
25,291 | b7ec91a3158d7faf1876b6e008cb668272dce9e4 | # CS122: Auto-completing keyboard using Tries
# Distribution
#
# Matthew Wachs
# Autumn 2014
#
# Revised: August 2015, AMR
# December 2017, AMR
#
# Rhedintza Audryna
import os
import sys
from sys import exit
import autocorrect_shell
class EnglishDictionary(object):
def __init__(self, wordfile):
'''
Constructor
Inputs:
wordfile (string): name of the file with the words.
'''
self.words = TrieNode()
with open(wordfile) as f:
for w in f:
w = w.strip()
if w != "" and not self.is_word(w):
self.words.add_word(w)
def is_word(self, w):
'''
Is the string a word?
Inputs:
w (string): the word to check
Returns: boolean
'''
if self.words.last_node(w):
return self.words.last_node(w).final
else:
return False
def num_completions(self, prefix):
'''
How many words in the dictionary start with the specified
prefix?
Inputs:
prefix (string): the prefix
Returns: int
'''
if self.words.last_node(prefix):
return self.words.last_node(prefix).count
else:
return 0
def get_completions(self, prefix):
'''
Get the suffixes in the dictionary of words that start with the
specified prefix.
Inputs:
prefix (string): the prefix
Returns: list of strings.
'''
last_node = self.words.last_node(prefix)
if last_node:
if last_node.final:
return [''] + last_node.trie_to_words('')
else:
return [] + last_node.trie_to_words('')
else:
return []
class TrieNode(object):
def __init__(self):
'''
Constructor for a TrieNode
'''
self.count = 0
self.final = False
self.children = {}
def add_word(self, word):
'''
Adds a word to the trie
Inputs:
word (string): the word to be added
'''
self.count += 1
if not word:
self.final = True
else:
self.children[word[0]] = self.children.get(word[0], TrieNode())
self.children[word[0]].add_word(word[1:])
def last_node(self, prefix):
'''
Returns the node for the last letter in the prefix,
if it exists
Inputs:
prefix (string): the prefix
Returns: (object) TrieNode if exists, None otherwise
'''
if not prefix:
return self
else:
if prefix[0] in self.children:
return self.children[prefix[0]].last_node(prefix[1:])
else:
return None
def trie_to_words(self, prev):
'''
A list of final words for a given Trie node
Inputs:
prev (str): the previous letter
Returns: list of strings
'''
one_down = []
children = []
for letter, node in self.children.items():
if self.children[letter].final:
one_down.append(prev + letter)
children += node.trie_to_words(prev + letter)
return one_down + children
if __name__ == "__main__":
autocorrect_shell.go("english_dictionary")
|
25,292 | 8b0794d23a8bcd11265d87f3f735431c5be20c14 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 13:30:16 2018
@author: chrisconroy
"""
import numpy as np # import numpy package for calculations
import matplotlib.pyplot as plt
M =np.random.normal(0,1,(1*10))
M=np.reshape(M, (10,1000))
M_Idx = np.shape(M)
def Fib(n):
if n == 0: return 0
elif n == 1: return 1
else: return Fib(n-1)+Fib(n-2)
Sequence = np.zeros(10)
for k in range(1,10):
Sequence[k]=Fib(k) |
25,293 | b2608f49ced44ab866e858d85059fb3a6070243a | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
import users.views as user_views
urlpatterns = [
# django admin stuff
url(r'^signup/$', user_views.user_signup, name='signup'),
url(r'^login/$', user_views.user_login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^admin/', admin.site.urls),
# main website apps
url(r'^$', include('homepage.urls')),
url(r'^food/', include('food.urls')),
url(r'^beauty/', include('beauty.urls')),
url(r'^fitness/', include('fitness.urls')),
url(r'^travel/', include('travel.urls')),
url(r'^search/', include('search.urls')),
url(r'^users/', include('users.urls')),
# additional tools
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
25,294 | c866fb602ff750fdf83ee99a26eda224a634bed5 | def loanAmort(initial, apr, time):
## ADD TOTAL CUMULATIVE PAYMENT, REORDER THE TOTAL PAID/INTEREST PAID SECTION
#print which month it is, counter starting at one stopping at term length
#calculate monthly payment for current month (counter)
#calculate interest left remaining on loan, print
#calculate principle left remaining on loan, print
pir = (apr / time)
totalInt = 0
totalPaid = 0
prinPayment = (initial / time)
remaining = initial
for x in range(1, (time + 1)):
monthlyPayment = (initial * pir) / (1 - (1 + pir)**(-time))
intThisMonth = (monthlyPayment - prinPayment)
remaining += intThisMonth
remaining -= monthlyPayment
#print month > interest this month > principle (monthly payment w/o int) > remaining balance on principle (initial - totalpaid)
if(x < 10):
print(str(x) + " " + "%.2f" % intThisMonth + " " + "%.2f" % prinPayment + " " + "%.2f" % (remaining))
else:
print(str(x) + " " + "%.2f" % intThisMonth + " " + "%.2f" % prinPayment + " " + "%.2f" % (remaining))
#print(str(prinPayment) + " is principal payment for month " + str(x))
totalPaid += monthlyPayment
totalInt += (monthlyPayment - prinPayment)
print(" ")
print("Total Paid | Total Interest Paid")
print("--------------------------------------")
print("%.2f" % totalPaid + " " + "%.2f" % totalInt)
#print total amount paid between interest and principle, then print only the interest paid on the loan
#must print month number, interest amount paid, principle paid (equals to sum of the payment) and priciple remaining at end of month. then output total amount of interest paid and total amount paid (interest and principle)
# Monthly payment is calculated using this formula:
# P = (Pv*R) / [1 - (1 + R)^(-n)] (** for exponent in python syntax)
# pir = (apr / time)
# monthlyPayment = (initial * pir) / (1 - (1 + pir)**(-time))
# where :
# Pv = Present Value (amount of loan, initial principle)
# APR = Annual percentage rate
# R = Periodic interest rate = APR/ interest periods per year (time)
# P = Monthly Payment
# n = # of interest periods for overall time period
return
#check for exceptions
initialPrinciple = float(input("Please enter your initial Principle: "))
apr = float(input("Enter your APR (in percentage. eg. 12.5) Do not use '%' symbol: "))
apr = (apr / 100)
time = int(input("Enter the length of your term (in months) : "))
print("Month | Interest Owed | Principle Owed | Principle Remaining ")
print("-------------------------------------------------------------------------------")
loanAmort(initialPrinciple, apr, time)
|
25,295 | 45bdf5bcff0a14453803ea39b6c764be5cb668b4 | import webapp2
from src.backup.backup_scheduler import BackupScheduler
from src.commons.config.configuration import configuration
class OrganizationBackupHandler(webapp2.RequestHandler):
def get(self):
backup_scheduler = BackupScheduler()
backup_scheduler.iterate_over_all_datasets_and_schedule_backups()
app = webapp2.WSGIApplication([
('/cron/backup', OrganizationBackupHandler)
], debug=configuration.debug_mode)
|
25,296 | 4206fb32afb5d8dac681734d30f5d712cae4dd10 | """
PISA pi stage for the calculation of earth layers and osc. probabilities
Maybe it would amke sense to split this up into a seperate earth layer stage
and an osc. stage....todo
"""
from __future__ import absolute_import, print_function, division
import numpy as np
from numba import guvectorize
from pisa import FTYPE, TARGET
from pisa.core.pi_stage import PiStage
from pisa.utils.profiler import profile
from pisa.stages.osc.pi_osc_params import OscParams
from pisa.stages.osc.layers import Layers
from pisa.stages.osc.prob3numba.numba_osc import propagate_array, fill_probs
from pisa.utils.numba_tools import WHERE
from pisa.utils.resources import find_resource
class pi_prob3(PiStage):
"""
prob3 osc PISA Pi class
Parameters
----------
params
Expected params .. ::
detector_depth : float
earth_model : PREM file path
prop_height : quantity (dimensionless)
YeI : quantity (dimensionless)
YeO : quantity (dimensionless)
YeM : quantity (dimensionless)
theta12 : quantity (angle)
theta13 : quantity (angle)
theta23 : quantity (angle)
deltam21 : quantity (mass^2)
deltam31 : quantity (mass^2)
deltacp : quantity (angle)
**kwargs
Other kwargs are handled by PiStage
"""
def __init__(
self,
data=None,
params=None,
input_names=None,
output_names=None,
debug_mode=None,
input_specs=None,
calc_specs=None,
output_specs=None,
):
expected_params = (
'detector_depth',
'earth_model',
'prop_height',
'YeI',
'YeO',
'YeM',
'theta12',
'theta13',
'theta23',
'deltam21',
'deltam31',
'deltacp',
)
input_names = ()
output_names = ()
# what are the keys used from the inputs during apply
input_apply_keys = ('weights', 'nu_flux')
# what are keys added or altered in the calculation used during apply
output_calc_keys = ('prob_e', 'prob_mu')
# what keys are added or altered for the outputs during apply
output_apply_keys = ('weights',)
# init base class
super().__init__(
data=data,
params=params,
expected_params=expected_params,
input_names=input_names,
output_names=output_names,
debug_mode=debug_mode,
input_specs=input_specs,
calc_specs=calc_specs,
output_specs=output_specs,
input_apply_keys=input_apply_keys,
output_calc_keys=output_calc_keys,
output_apply_keys=output_apply_keys,
)
assert self.input_mode is not None
assert self.calc_mode is not None
assert self.output_mode is not None
self.layers = None
self.osc_params = None
def setup_function(self):
# object for oscillation parameters
self.osc_params = OscParams()
# setup the layers
#if self.params.earth_model.value is not None:
earth_model = find_resource(self.params.earth_model.value)
YeI = self.params.YeI.value.m_as('dimensionless')
YeO = self.params.YeO.value.m_as('dimensionless')
YeM = self.params.YeM.value.m_as('dimensionless')
prop_height = self.params.prop_height.value.m_as('km')
detector_depth = self.params.detector_depth.value.m_as('km')
self.layers = Layers(earth_model, detector_depth, prop_height)
self.layers.setElecFrac(YeI, YeO, YeM)
# set the correct data mode
self.data.data_specs = self.calc_specs
# --- calculate the layers ---
if self.calc_mode == 'binned':
# speed up calculation by adding links
# as layers don't care about flavour
self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
'nue_nc', 'numu_nc', 'nutau_nc',
'nuebar_cc', 'numubar_cc', 'nutaubar_cc',
'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])
for container in self.data:
self.layers.calcLayers(container['true_coszen'].get('host'))
container['densities'] = self.layers.density.reshape((container.size, self.layers.max_layers))
container['distances'] = self.layers.distance.reshape((container.size, self.layers.max_layers))
# don't forget to un-link everything again
self.data.unlink_containers()
# --- setup empty arrays ---
if self.calc_mode == 'binned':
self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
'nue_nc', 'numu_nc', 'nutau_nc'])
self.data.link_containers('nubar', ['nuebar_cc', 'numubar_cc', 'nutaubar_cc',
'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])
for container in self.data:
container['probability'] = np.empty((container.size, 3, 3), dtype=FTYPE)
self.data.unlink_containers()
# setup more empty arrays
for container in self.data:
container['prob_e'] = np.empty((container.size), dtype=FTYPE)
container['prob_mu'] = np.empty((container.size), dtype=FTYPE)
def calc_probs(self, nubar, e_array, rho_array, len_array, out):
''' wrapper to execute osc. calc '''
propagate_array(self.osc_params.dm_matrix, # pylint: disable = unexpected-keyword-arg, no-value-for-parameter
self.osc_params.mix_matrix_complex,
self.osc_params.nsi_eps,
nubar,
e_array.get(WHERE),
rho_array.get(WHERE),
len_array.get(WHERE),
out=out.get(WHERE)
)
out.mark_changed(WHERE)
@profile
def compute_function(self):
# set the correct data mode
self.data.data_specs = self.calc_specs
if self.calc_mode == 'binned':
# speed up calculation by adding links
self.data.link_containers('nu', ['nue_cc', 'numu_cc', 'nutau_cc',
'nue_nc', 'numu_nc', 'nutau_nc'])
self.data.link_containers('nubar', ['nuebar_cc', 'numubar_cc', 'nutaubar_cc',
'nuebar_nc', 'numubar_nc', 'nutaubar_nc'])
# --- update mixing params ---
self.osc_params.theta12 = self.params.theta12.value.m_as('rad')
self.osc_params.theta13 = self.params.theta13.value.m_as('rad')
self.osc_params.theta23 = self.params.theta23.value.m_as('rad')
self.osc_params.dm21 = self.params.deltam21.value.m_as('eV**2')
self.osc_params.dm31 = self.params.deltam31.value.m_as('eV**2')
self.osc_params.deltacp = self.params.deltacp.value.m_as('rad')
for container in self.data:
self.calc_probs(container['nubar'],
container['true_energy'],
container['densities'],
container['distances'],
out=container['probability'],
)
# the following is flavour specific, hence unlink
self.data.unlink_containers()
for container in self.data:
# initial electrons (0)
fill_probs(container['probability'].get(WHERE),
0,
container['flav'],
out=container['prob_e'].get(WHERE),
)
# initial muons (1)
fill_probs(container['probability'].get(WHERE),
1,
container['flav'],
out=container['prob_mu'].get(WHERE),
)
container['prob_e'].mark_changed(WHERE)
container['prob_mu'].mark_changed(WHERE)
@profile
def apply_function(self):
# update the outputted weights
for container in self.data:
apply_probs(container['nu_flux'].get(WHERE),
container['prob_e'].get(WHERE),
container['prob_mu'].get(WHERE),
out=container['weights'].get(WHERE))
container['weights'].mark_changed(WHERE)
# vectorized function to apply (flux * prob)
# must be outside class
if FTYPE == np.float64:
signature = '(f8[:], f8, f8, f8[:])'
else:
signature = '(f4[:], f4, f4, f4[:])'
@guvectorize([signature], '(d),(),()->()', target=TARGET)
def apply_probs(flux, prob_e, prob_mu, out):
out[0] *= (flux[0] * prob_e) + (flux[1] * prob_mu)
|
25,297 | 5b72250583cd073d1a1efe085c1c3cf14b9ad94d | import requests
from bs4 import BeautifulSoup
import pandas as pd
response = requests.get('https://movies.yahoo.com.tw/movie_intheaters.html')
# print(response.text)
# print(response.status)
rank=[]
name=[]
soup = BeautifulSoup(response.text, "lxml")
# 中文名子
chinese_name =soup.find_all("li")
# 英文名子
english_name =soup.find_all("a")
#
for index in chinese_name:
if index.div != None:
# print(index.div['class'])
if (index.div['class']==['num']):
rank.append(index.div.text)
name.append(index.span.text)
# print('電影名稱: '+str(index.span.text))
data = {
'rank': rank,
'name': name,
}
movie_df = pd.DataFrame(data)
# 輸出成csv檔在同一個目錄下
movie_df.to_csv('電影排行.csv', encoding = 'big5')
print(movie_df)
|
25,298 | c2079fd37d74e894a102c8710ae0f14c20f718e3 | """
Test /answer
"""
from unittest.mock import patch
from django.urls.base import reverse_lazy
from rest_framework import status
from breathecode.tests.mocks import (
GOOGLE_CLOUD_PATH,
apply_google_cloud_client_mock,
apply_google_cloud_bucket_mock,
apply_google_cloud_blob_mock,
)
from ..mixins import MediaTestCase
class MediaTestSuite(MediaTestCase):
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_info_id_resolution_without_auth(self):
"""Test /answer without auth"""
url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_info_id_resolution_wrong_academy(self):
"""Test /answer without auth"""
url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})
response = self.client.get(url, **{'HTTP_Academy': 1})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_info_id_resolution_without_capability(self):
"""Test /cohort/:id without auth"""
self.headers(academy=1)
url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})
self.generate_models(authenticate=True)
response = self.client.get(url)
json = response.json()
self.assertEqual(
json, {
'detail': "You (user: 1) don't have this capability: read_media_resolution for academy 1",
'status_code': 403
})
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_info_id_without_data(self):
"""Test /answer without auth"""
self.headers(academy=1)
model = self.generate_models(authenticate=True,
profile_academy=True,
capability='read_media_resolution',
role='potato')
url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': 1})
response = self.client.get(url)
json = response.json()
self.assertEqual(json, {'detail': 'media-not-found', 'status_code': 404})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(self.all_media_dict(), [])
@patch(GOOGLE_CLOUD_PATH['client'], apply_google_cloud_client_mock())
@patch(GOOGLE_CLOUD_PATH['bucket'], apply_google_cloud_bucket_mock())
@patch(GOOGLE_CLOUD_PATH['blob'], apply_google_cloud_blob_mock())
def test_info_id_resolution_get_with_id(self):
"""Test /info/media:id/resolution"""
self.headers(academy=1)
model = self.generate_models(authenticate=True,
media_resolution=True,
media=True,
capability='read_media_resolution',
role='potato',
profile_academy=True,
media_kwargs={'hash': 'abc'},
media_resolution_kwargs={'hash': 'abc'})
model_dict = self.remove_dinamics_fields(model['media_resolution'].__dict__)
url = reverse_lazy('media:info_id_resolution', kwargs={'media_id': model['media'].id})
response = self.client.get(url)
json = response.json()
expected = [{
'id': model['media_resolution'].id,
'hash': model['media'].hash,
'width': model['media_resolution'].width,
'height': model['media_resolution'].height,
'hits': model['media_resolution'].hits,
}]
self.assertEqual(json, expected)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.count_media_resolution(), 1)
self.assertEqual(self.get_media_resolution_dict(1), model_dict)
|
25,299 | e7f661ad2d24fd454d84916eff5b877c433628d5 | #!/usr/bin/env python2
# Fade each half of lamp to 2 random colors.
import opc, time, colorsys, random
numpixels = 47
client = opc.Client('localhost:7890')
pixels = [ (0,0,0) ] * numpixels
client.put_pixels(pixels)
client.put_pixels(pixels)
while 1:
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
r2 = random.randint(0, 255)
g2 = random.randint(0, 255)
b2 = random.randint(0, 255)
for i in range(0,22):
pixels[i] = (r,g,b)
for i in range(23,46):
pixels[i] = (r2,g2,b2)
client.put_pixels(pixels)
time.sleep(4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.