blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5b5f9e58967eed42a29cb3a831bbdeee870ef8d | 54dac70ba80063250ef66bf973ce14226514aa9a | /releasetools/flare_img_from_target_files | 6864b1acb4c17a9e89353fd77e3e65b8c9a0dbea | [] | no_license | ravikirancg/A21 | 22e5ffa994be1b46959793fbf1692cd507903af3 | e0509d617fcb9902ee67258da750a0f9bee690b3 | refs/heads/master | 2016-09-05T08:47:36.598970 | 2013-06-03T13:23:16 | 2013-06-03T13:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | #!/usr/bin/env python
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given a target-files zipfile, produces an image zipfile suitable for
use with 'fastboot update'.
Usage: img_from_target_files [flags] input_target_files output_image_zip
-b (--board_config) <file>
Deprecated.
"""
import sys
if sys.hexversion < 0x02040000:
print >> sys.stderr, "Python 2.4 or newer is required."
sys.exit(1)
import errno
import os
import re
import shutil
import subprocess
import tempfile
import zipfile
# missing in Python 2.4 and before
if not hasattr(os, "SEEK_SET"):
os.SEEK_SET = 0
import trebon_common as common
OPTIONS = common.OPTIONS
def AddUserdata(output_zip):
"""Create an empty userdata image and store it in output_zip."""
print "creating userdata.img..."
# The name of the directory it is making an image out of matters to
# mkyaffs2image. So we create a temp dir, and within it we create an
# empty dir named "data", and build the image from that.
temp_dir = tempfile.mkdtemp()
user_dir = os.path.join(temp_dir, "data")
os.mkdir(user_dir)
img = tempfile.NamedTemporaryFile()
build_command = []
if OPTIONS.info_dict["fstab"]["/data"].fs_type.startswith("ext"):
build_command = ["mkuserimg.sh",
user_dir, img.name,
OPTIONS.info_dict["fstab"]["/data"].fs_type, "data"]
if "userdata_size" in OPTIONS.info_dict:
build_command.append(str(OPTIONS.info_dict["userdata_size"]))
else:
build_command = ["mkyaffs2image", "-f"]
extra = OPTIONS.info_dict.get("mkyaffs2_extra_flags", None)
if extra:
build_command.extend(extra.split())
build_command.append(user_dir)
build_command.append(img.name)
p = common.Run(build_command)
p.communicate()
assert p.returncode == 0, "build userdata.img image failed"
common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
output_zip.write(img.name, "userdata.img")
img.close()
os.rmdir(user_dir)
os.rmdir(temp_dir)
def AddSystem(output_zip):
"""Turn the contents of SYSTEM into a system image and store it in
output_zip."""
print "creating system.img..."
img = tempfile.NamedTemporaryFile()
# The name of the directory it is making an image out of matters to
# mkyaffs2image. It wants "system" but we have a directory named
# "SYSTEM", so create a symlink.
try:
os.symlink(os.path.join(OPTIONS.input_tmp, "SYSTEM"),
os.path.join(OPTIONS.input_tmp, "system"))
except OSError, e:
if (e.errno == errno.EEXIST):
pass
build_command = []
if OPTIONS.info_dict["fstab"]["/system"].fs_type.startswith("ext"):
build_command = ["mkuserimg.sh",
os.path.join(OPTIONS.input_tmp, "system"), img.name,
OPTIONS.info_dict["fstab"]["/system"].fs_type, "system"]
if "system_size" in OPTIONS.info_dict:
build_command.append(str(OPTIONS.info_dict["system_size"]))
else:
build_command = ["mkyaffs2image", "-f"]
extra = OPTIONS.info_dict.get("mkyaffs2_extra_flags", None)
if extra:
build_command.extend(extra.split())
build_command.append(os.path.join(OPTIONS.input_tmp, "system"))
build_command.append(img.name)
p = common.Run(build_command)
p.communicate()
assert p.returncode == 0, "build system.img image failed"
img.seek(os.SEEK_SET, 0)
data = img.read()
img.close()
common.CheckSize(data, "system.img", OPTIONS.info_dict)
common.ZipWriteStr(output_zip, "system.img", data)
def CopyInfo(output_zip):
"""Copy the android-info.txt file from the input to the output."""
output_zip.write(os.path.join(OPTIONS.input_tmp, "OTA", "android-info.txt"),
"android-info.txt")
def main(argv):
def option_handler(o, a):
if o in ("-b", "--board_config"):
pass # deprecated
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
extra_opts="b:",
extra_long_opts=["board_config="],
extra_option_handler=option_handler)
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
OPTIONS.input_tmp = common.UnzipTemp(args[0])
input_zip = zipfile.ZipFile(args[0], "r")
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
output_zip = zipfile.ZipFile(args[1], "w", compression=zipfile.ZIP_DEFLATED)
common.AddBoot(output_zip, OPTIONS.info_dict)
common.AddRecovery(output_zip, OPTIONS.info_dict)
AddSystem(output_zip)
AddUserdata(output_zip)
CopyInfo(output_zip)
print "cleaning up..."
output_zip.close()
shutil.rmtree(OPTIONS.input_tmp)
print "done."
if __name__ == '__main__':
try:
main(sys.argv[1:])
except common.ExternalError, e:
print
print " ERROR: %s" % (e,)
print
sys.exit(1)
| [
"ravikirancg1@gmail.com"
] | ravikirancg1@gmail.com | |
8904beb072f5f0d6c02deb340ad9e1bde96aa958 | 6509c398816baffafa4a1fcfb2855e1bc9d1609b | /sistema-operacional/diretorios/pathlib/exemplos/pathlib-4.py | 7986086bea5a528b646fbaa9b9c5e9fc10c68789 | [] | no_license | marcoswebermw/learning-python | 6b0dfa81a0d085f4275865dce089d9b53b494aa5 | 931ed2985b8a3fec1a48c660c089e290aaac123d | refs/heads/master | 2021-10-27T21:19:46.013020 | 2019-04-19T23:25:46 | 2019-04-19T23:25:46 | 87,670,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # Listando só os arquivos de um diretório.
from pathlib import Path
diretorio = Path('.')
[print(x) for x in diretorio.iterdir() if x.is_file()] | [
"marcoswebermw@gmail.com"
] | marcoswebermw@gmail.com |
ca2213d6fef6166939b64addb386fc49ba7aa2d8 | 47a5a4642fd15b8c16e7b295807b4aac6c830d20 | /_python/project/basics/list tuple set.py | 106541a403da2648f11da3571800432a348ecba0 | [] | no_license | hakalar/network_automation | 6ed03b1c5d7aad53157cbf241f003c2c5225d8cf | 831a3e5cfe467aeafabe64df9cfb504a87254e3f | refs/heads/master | 2021-07-07T07:17:06.756647 | 2021-05-03T11:15:40 | 2021-05-03T11:15:40 | 239,247,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,060 | py | # Empty Lists
empty_list = []
empty_list = list()
# Empty Tuples
empty_tuple = ()
empty_tuple = tuple()
# Empty Sets
empty_set = {} # This isn't right! It's a dict
empty_set = set()
########################
# Mutable
list_1 = ['History', 'Math', 'Physics', 'CompSci']
list_2 = list_1
print(list_1)
print(list_2)
# changing list_1 will change also list_2 because they are both the same mutable object
list_1[0] = 'Art'
print(list_1)
print(list_2)
###########################
# Tuples
# Immutable
tuple_1 = ('History', 'Math', 'Physics', 'CompSci')
tuple_2 = tuple_1
print(tuple_1)
print(tuple_2)
# Will throw an error
tuple_1[0] = 'Art'
print(tuple_1)
print(tuple_2)
###########################
# Sets - unordered and no duplicates
# if you add a duplicate to the set and print out the set, the duplicate value will not be printed
cs_courses = {'History', 'Math', 'Physics', 'CompSci', 'Math'}
print(cs_courses)
cs_courses = {'History', 'Math', 'Physics', 'CompSci'}
art_courses = {'History', 'Math', 'Art', 'Design'}
print(cs_courses.intersection(art_courses))
print(cs_courses.difference(art_courses))
print(cs_courses.union(art_courses))
######################
# List
courses = ['History', 'Math', 'Physics', 'CompSci']
courses_2 = ['Art','Education']
courses.append(courses_2)
courses.remove('Math')
courses.pop('Physics')
nums = [1,5,6,9,3,4]
courses.reverse()
courses.sort()
courses.sort(reverse=True)
print(max(nums))
print(min(nums))
print(sum(nums))
# sort without modification of the list
sorted_courses = sorted(courses)
# returns a ValueError if Art does not exist
print(courses.index('Art'))
# return true or false - used in conditionals
print ('Art' in courses)
for item in courses:
print(item)
# enumerate returns index and value
for index, item in enumerate(courses):
print(index, item)
# enumerate will number index from 1 and not 0
for index, item in enumerate(courses, start = 1):
print(index, item)
course_str = ', '.join(courses)
print(course_str)
new_list = course_str.split(', ')
print(new_list) | [
"rhakala@gmail.com"
] | rhakala@gmail.com |
3a17bed6668f80c35ddbbc13374f04955a26cb23 | 2891cbe8156de37575f951e8391a8bbde2405f29 | /boot.py | 4b86a371e56113c0d00ce68e8bfbe716484b1b9c | [] | no_license | ayoy/micropython-led-controller | ceab70dfbad0227c300dd7e80f6ec9045dda2248 | 7e630ccd5eacfaba46d9c315ad1d0c00e9aaf074 | refs/heads/master | 2021-01-24T01:52:35.039757 | 2018-02-25T10:26:32 | 2018-02-25T10:26:32 | 122,825,883 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import machine
from keychain import WLAN_SSID, WLAN_PASSKEY
from helpers import setup_rtc
known_nets = [(WLAN_SSID, WLAN_PASSKEY)]
if machine.reset_cause() != machine.SOFT_RESET: # needed to avoid losing connection after a soft reboot
from network import WLAN
wl = WLAN()
# save the default ssid and auth
original_ssid = wl.ssid()
original_auth = wl.auth()
wl.mode(WLAN.STA)
available_nets = wl.scan()
nets = frozenset([e.ssid for e in available_nets])
known_nets_names = frozenset([e[0] for e in known_nets])
net_to_use = list(nets & known_nets_names)
try:
net_to_use = net_to_use[0]
pwd = dict(known_nets)[net_to_use]
sec = [e.sec for e in available_nets if e.ssid == net_to_use][0]
wl.connect(net_to_use, (sec, pwd), timeout=10000)
setup_rtc()
except:
wl.init(mode=WLAN.AP, ssid=original_ssid, auth=original_auth, channel=6, antenna=WLAN.INT_ANT)
machine.main('main.py')
| [
"dominik@kapusta.cc"
] | dominik@kapusta.cc |
a5fa5c8e3314f669eaee16227e3c06770520da90 | f7525456f5451926282cba840dae1c4adba3573f | /Moduli/second.py | 4689e122237787d5b945fc6e503329138dff93b8 | [
"MIT"
] | permissive | AlPus108/Python_lessons | 1f2ac6e1ce5a344e0c900249ef7864d257424436 | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | refs/heads/master | 2021-02-27T06:37:10.239125 | 2020-05-14T21:27:29 | 2020-05-14T21:27:29 | 245,588,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,613 | py | # 3 Пришли из файла first.py
# С самого начала делаем импорт модуля first
import first
print('Top level is second.py')
# вызываем ф-и из импортированного модуля first
first.function_1()
# копируем блок if_else из модуля first и вставляем сюда и меняем в тексте first на second
if __name__ == '__main__': # если переменн name имеет значение 'main'
print('second.py is being run directly') # файл был запущен непосредстенно, на прямую
else: # если переменн name не имеет значение 'main'
print('second.py has been imported') # тогда этот файл импортирован.
# --> 4 first.py
# 5 Пришли из first.py
# Запускаем этот модуль second.py
# Вывод:
# Top level in first.py
# first.py has been imported
# Top level is second.py
# function_1() from first.py
# second.py is being run directly
# Так как мы импортировали first, у нас запускается print в файле first и мы получаем эту строку:
# Top level in first.py
# Далее в файле first продолжает выполняться код, а именно if __name__ == '__main__':
# присвоено ли переменной name значение main
# и в данном случае при импорте этого не случилось, поэтому эта ф-я из файла first выводит сообщение:
# first.py has been imported
# Когда выполнение кода в модуле first.py закончилось, начинает выполняться код в этом модуле second.py:
# Выводится строка: Top level is second.py,
# Далее запускается function_1 из модуля first, который выводит сообщение:
# function_1() from first.py, что эта ф-я запущена из модуля first.py
# Затем выполняется проверка значения встроенной переменной __name__ данного модуля second.py
# Так как мы его запустили непосредственно, то есть он не был импортирован, то здесь переменная __name__
# получает значение main и срабатывает первая ветка кода: second.py is being run directly
# 6 --> 4_name_main
| [
"alex.pustovoy@gmail.com"
] | alex.pustovoy@gmail.com |
cc5a061ef4e8cefaa17ca4bae5fbf52fa9173d90 | 01e18a939fc96cf20653bb950667c87299b4837f | /bulletPointAdder.py | 8f61af279197257c8ee55a73f132defc771595c9 | [] | no_license | MasterBroda/automate-boring-stuff-answers | 3eb6f21447cdb62fe06f2cc3fba9108f4f80aec8 | bb9b230c99f8e20b8b4f5be89a7b0134d03afcfb | refs/heads/master | 2021-01-12T14:07:23.315971 | 2016-10-06T15:31:23 | 2016-10-06T15:31:23 | 70,167,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import pyperclip
text = pyperclip.paste()
print text
l = list()
l = text.split("\n")
print "~~~~~~~~~~"
text = "".join(["* " +i for i in l])
print text
pyperclip.copy(text) | [
"gabsubercool@gmail.com"
] | gabsubercool@gmail.com |
d42e7fa9d71f6b303b09b0127e5624b5af3ce7db | 2290eed5c494202beea0da1b9257a38b7a4403d2 | /script/[83]删除排序链表中的重复元素.py | 626a835b78ae9354dd10fbd846316d0fbec6778a | [] | no_license | DSXiangLi/Leetcode_python | 4b1c9848ea774955fb252b9bd796ba8d46ad728e | a2ef0ba5e86405dbf68dbc1ffeb086c7d864db1d | refs/heads/main | 2022-09-01T04:34:04.260402 | 2022-08-20T01:12:27 | 2022-08-20T01:12:27 | 445,347,891 | 1 | 0 | null | 2022-07-23T06:32:14 | 2022-01-07T00:15:20 | Python | UTF-8 | Python | false | false | 1,070 | py | # 给定一个已排序的链表的头 head , 删除所有重复的元素,使每个元素只出现一次 。返回 已排序的链表 。
#
#
#
# 示例 1:
#
#
# 输入:head = [1,1,2]
# 输出:[1,2]
#
#
# 示例 2:
#
#
# 输入:head = [1,1,2,3,3]
# 输出:[1,2,3]
#
#
#
#
# 提示:
#
#
# 链表中节点数目在范围 [0, 300] 内
# -100 <= Node.val <= 100
# 题目数据保证链表已经按升序 排列
#
# Related Topics 链表
# 👍 827 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
ptr =head
while ptr and ptr.next:
if ptr.val == ptr.next.val:
ptr.next = ptr.next.next
else:
ptr = ptr.next
return head
# leetcode submit region end(Prohibit modification and deletion)
| [
"37739462+DSXiangLi@users.noreply.github.com"
] | 37739462+DSXiangLi@users.noreply.github.com |
a2dc346969291165e642caa49c93d0a72350c328 | 71ca3a86a3e1a77e0b5036278026e27744fac488 | /env/bin/easy_install-2.7 | 8019eedd5b0cfc489bc426737fc40b212c3bd85a | [] | no_license | Harbinger55555/Thompsons-Lexical-Analyzer | 45d41eb1441bfea15dfa18dcba16bfda7614c79e | 179bbc71cbc27964b6fc6ebed5177c7e162251c2 | refs/heads/master | 2020-05-01T06:24:12.662955 | 2019-03-23T19:03:27 | 2019-03-23T19:03:27 | 177,329,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | 7 | #!/home/codio/workspace/pr01/python001/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zaykha.kyawsan@gmail.com"
] | zaykha.kyawsan@gmail.com |
d9efe7caf1b897b057e1ac0d96f4ba7c6b25aa7d | f67fa61e0291934fdf8f895a44bc455690462d74 | /mx_shop/extra_apps/xadmin/plugins/ueditor.py | 9625caaecf2d299d39031ee25acaf8b362fa9952 | [] | no_license | wuxinsuoqiu/myfirst_git | bbbaf16ebd99e0f3e539607ea2c577a88998dc8e | 96a4fae6c8326923ddcd9fe99124df3ee43485ed | refs/heads/master | 2020-03-27T12:35:30.737730 | 2018-08-29T06:39:53 | 2018-08-29T06:39:53 | 146,555,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # _*_ coding: utf-8 _*_
__author__ = 'mk'
__date__ = '2018/8/12 16:56'
import xadmin
from xadmin.views import BaseAdminPlugin, CreateAdminView, ModelFormAdminView, UpdateAdminView
from DjangoUeditor.models import UEditorField
from DjangoUeditor.widgets import UEditorWidget
from django.conf import settings
class XadminUEditorWidget(UEditorWidget):
def __init__(self,**kwargs):
self.ueditor_options=kwargs
self.Media.js = None
super(XadminUEditorWidget,self).__init__(kwargs)
class UeditorPlugin(BaseAdminPlugin):
def get_field_style(self, attrs, db_field, style, **kwargs):
if style == 'ueditor':
if isinstance(db_field, UEditorField):
widget = db_field.formfield().widget
param = {}
param.update(widget.ueditor_settings)
param.update(widget.attrs)
return {'widget': XadminUEditorWidget(**param)}
return attrs
def block_extrahead(self, context, nodes):
js = '<script type="text/javascript" src="%s"></script>' % (settings.STATIC_URL + "ueditor/ueditor.config.js") # 自己的静态目录
js += '<script type="text/javascript" src="%s"></script>' % (settings.STATIC_URL + "ueditor/ueditor.all.js") # 自己的静态目录
nodes.append(js)
xadmin.site.register_plugin(UeditorPlugin, UpdateAdminView)
xadmin.site.register_plugin(UeditorPlugin, CreateAdminView) | [
"1468585071@qq.com"
] | 1468585071@qq.com |
36513be124791d00ea08a18ec72dbd64a86346f0 | 21d7a6d0566b07dc4103a97be1c91e05e6784128 | /configs/urls.py | 86638c81a91a2d2b7c2a08a8d65f35947abdd0ae | [] | no_license | simonw/squirrelspotter | 60c28dfcf3851676a37457db6e28704f2dfdcdf2 | 4d56e3843b6de9caf23f180b0cee19f4cd0a6b73 | refs/heads/master | 2021-03-19T18:09:02.960853 | 2012-09-25T14:56:44 | 2012-09-25T14:56:44 | 5,900,675 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | from django.conf.urls import patterns, url, include
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'spotter.views.index'),
url(r'^login/$', 'spotter.views.login'),
url(r'^login2/$', 'spotter.views.login2'),
url(r'^credits/$', 'spotter.views.credits'),
url(r'^debug/$', 'spotter.views.debug'),
url(r'^scores/$', 'spotter.views.scores'),
url(r'^login/done/$', 'spotter.views.done'),
url(r'^logout/$', 'spotter.views.logout'),
url(r'^spot/(\d+)/$', 'spotter.views.spot'),
url(r'^spotted/$', 'spotter.views.spotted'),
url(r'^twilio/sms/$', 'spotter.views.twilio_sms'),
url(r'^robots\.txt$', 'spotter.views.robots_txt'),
url(r'^channel\.html$', 'spotter.views.channel_html'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT
}),
(r'^admin-peeps/', include(admin.site.urls)),
)
| [
"simon@lanyrd.com"
] | simon@lanyrd.com |
fae106553bb36f13c29797fcc875a07f47f4c0c6 | ea6444f69fd3d8015c0ef31b7e8846af9529caef | /ebooksapi/ebooksapi/urls.py | 6d34a8dfc611e45ca9e5e65ee992a8e9037540d1 | [
"MIT"
] | permissive | hovsepyanvahe/django_rest_api_ebooks | 265468817e45cbe8046e35d0c9c907f584366ef1 | 5859aec588e0bcded37edbec2ada0b4b89c887a0 | refs/heads/master | 2023-08-20T04:11:05.581038 | 2020-04-11T18:51:15 | 2020-04-11T18:51:15 | 254,921,279 | 0 | 0 | MIT | 2021-09-22T18:52:01 | 2020-04-11T17:37:59 | Python | UTF-8 | Python | false | false | 173 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('ebooks.api.urls')),
]
| [
"vahikhovsepyan95@gmail.com"
] | vahikhovsepyan95@gmail.com |
965af6ee43cba4c918c74cea08cb02b1bda6aa7c | 6040c08fc1ae8a11e1c4a93f5d95bd45018118f8 | /crawling.py | b6ca9595284455f8c408e8f92612cf89d81807c9 | [] | no_license | tilthend/kjkintheworld | e7051c4094100507587058b630a750fe51ba6bf8 | 157ce27668fe9b87012de6304a046cce4127a66e | refs/heads/main | 2023-04-16T04:31:59.537070 | 2021-04-28T11:00:06 | 2021-04-28T11:00:06 | 318,044,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | import requests
from bs4 import BeautifulSoup
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://movie.naver.com/movie/sdb/rank/rmovie.nhn?sel=pnt&date=20200303',headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
trs = soup.select('#old_content > table > tbody > tr')
points = soup.select('#old_content > table > tbody > tr')
#old_content > table > tbody > tr:nth-child(3) > td:nth-child(1) > img
#old_content > table > tbody > tr:nth-child(4) > td:
for tr in trs:
a_tag = tr.select_one('td.title > div > a')
if a_tag is not None:
rank = tr.select_one('td:nth-child(1) > img')['alt']
title = a_tag.text
stars = tr.select_one('td.point').text
print(rank, title, stars)
| [
"noreply@github.com"
] | noreply@github.com |
6996aae73bd4990f83077d72224280151bb76dcb | 436517f8ab142a94944cc50e93ebcb3596b910a6 | /CodeUp/기초 100제/6023_17390916(AC).py | 259e83e4698bb2864c18ca74d045d428c84aa1be | [] | no_license | uni2237/Algorithm | 143267706f74404602bf5b166f4d108496a459d0 | 28408cfc9b8f8dd86bd437d30bb2a4f9c6d408f2 | refs/heads/master | 2023-08-21T05:11:41.418007 | 2021-09-19T15:30:06 | 2021-09-19T15:30:06 | 276,294,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | import sys
input = sys.stdin.readline
s= input()
a,b,c=s.split(':')
print(b)
| [
"a01022371341@gmail.com"
] | a01022371341@gmail.com |
a131ba7539f611d39f887a2a8cf1e415b50fddde | 18339252dc6e4931ed3f7a0c671b232b184e56e6 | /BlogPs/asgi.py | 9a11c55c6848f4501f72a531a178dd2ebaa98df9 | [] | no_license | ibk2510/food_blog | 8e17f663dc5741d823d74fdac13fbf5c0eeabfb8 | c20dcf503692dd0fa71c8af8915033a79cbd4741 | refs/heads/main | 2023-06-06T09:34:47.795881 | 2021-06-18T14:58:04 | 2021-06-18T14:58:04 | 378,182,374 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
ASGI config for BlogPs project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BlogPs.settings')
application = get_asgi_application()
| [
"kalilibrahimjh@gmail.com"
] | kalilibrahimjh@gmail.com |
f6890569412e03a13d41215a891e3d25ad0e2ee1 | a4ab889cbcd077dae29eb2ca62c9078aa6c0d555 | /benchmark/torch/qmix/train.py | 290b56a9498d83e3e044225acd35849e95afa0f1 | [
"Apache-2.0"
] | permissive | rical730/PARL | 909721c347ce05d0c43ad8b3d4b38edd63f47a44 | 88e43d309278c34ca857939fe251813d4cad4b03 | refs/heads/develop | 2022-04-19T19:29:15.269466 | 2022-04-01T05:51:46 | 2022-04-01T05:51:46 | 482,826,065 | 0 | 0 | Apache-2.0 | 2022-04-18T11:44:11 | 2022-04-18T11:44:11 | null | UTF-8 | Python | false | false | 6,621 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from smac.env import StarCraft2Env
from env_wrapper import SC2EnvWrapper
from replay_buffer import EpisodeExperience, EpisodeReplayBuffer
from qmixer_model import QMixerModel
from rnn_model import RNNModel
from parl.algorithms import QMIX
from qmix_agent import QMixAgent
import parl
from parl.utils import logger
from parl.utils import summary
import numpy as np
from copy import deepcopy
from qmix_config import QMixConfig
logger.set_dir('./log_path')
def run_train_episode(env, agent, rpm, config):
episode_limit = config['episode_limit']
agent.reset_agent()
episode_reward = 0.0
episode_step = 0
terminated = False
state, obs = env.reset()
episode_experience = EpisodeExperience(episode_limit)
while not terminated:
available_actions = env.get_available_actions()
actions = agent.sample(obs, available_actions)
next_state, next_obs, reward, terminated = env.step(actions)
episode_reward += reward
episode_step += 1
episode_experience.add(state, actions, [reward], [terminated], obs,
available_actions, [0])
state = next_state
obs = next_obs
# fill the episode
state_zero = np.zeros_like(state, dtype=state.dtype)
actions_zero = np.zeros_like(actions, dtype=actions.dtype)
obs_zero = np.zeros_like(obs, dtype=obs.dtype)
available_actions_zero = np.zeros_like(
available_actions, dtype=available_actions.dtype)
reward_zero = 0
terminated_zero = True
for _ in range(episode_step, episode_limit):
episode_experience.add(state_zero, actions_zero, [reward_zero],
[terminated_zero], obs_zero,
available_actions_zero, [1])
rpm.add(episode_experience)
is_win = env.win_counted
mean_loss = []
mean_td_error = []
if rpm.count > config['memory_warmup_size']:
for _ in range(2):
s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch,\
filled_batch = rpm.sample_batch(config['batch_size'])
loss, td_error = agent.learn(s_batch, a_batch, r_batch, t_batch,
obs_batch, available_actions_batch,
filled_batch)
mean_loss.append(loss)
mean_td_error.append(td_error)
mean_loss = np.mean(mean_loss) if mean_loss else None
mean_td_error = np.mean(mean_td_error) if mean_td_error else None
return episode_reward, episode_step, is_win, mean_loss, mean_td_error
def run_evaluate_episode(env, agent):
agent.reset_agent()
episode_reward = 0.0
episode_step = 0
terminated = False
state, obs = env.reset()
while not terminated:
available_actions = env.get_available_actions()
actions = agent.predict(obs, available_actions)
state, obs, reward, terminated = env.step(actions)
episode_step += 1
episode_reward += reward
is_win = env.win_counted
return episode_reward, episode_step, is_win
def main():
config = deepcopy(QMixConfig)
env = StarCraft2Env(
map_name=config['scenario'], difficulty=config['difficulty'])
env = SC2EnvWrapper(env)
config['episode_limit'] = env.episode_limit
config['obs_shape'] = env.obs_shape
config['state_shape'] = env.state_shape
config['n_agents'] = env.n_agents
config['n_actions'] = env.n_actions
rpm = EpisodeReplayBuffer(config['replay_buffer_size'])
agent_model = RNNModel(config['obs_shape'], config['n_actions'],
config['rnn_hidden_dim'])
qmixer_model = QMixerModel(
config['n_agents'], config['state_shape'], config['mixing_embed_dim'],
config['hypernet_layers'], config['hypernet_embed_dim'])
algorithm = QMIX(agent_model, qmixer_model, config['double_q'],
config['gamma'], config['lr'], config['clip_grad_norm'])
qmix_agent = QMixAgent(
algorithm, config['exploration_start'], config['min_exploration'],
config['exploration_decay'], config['update_target_interval'])
while rpm.count < config['memory_warmup_size']:
train_reward, train_step, train_is_win, train_loss, train_td_error\
= run_train_episode(env, qmix_agent, rpm, config)
total_steps = 0
last_test_step = -1e10
while total_steps < config['training_steps']:
train_reward, train_step, train_is_win, train_loss, train_td_error\
= run_train_episode(env, qmix_agent, rpm, config)
total_steps += train_step
if total_steps - last_test_step >= config['test_steps']:
last_test_step = total_steps
eval_is_win_buffer = []
eval_reward_buffer = []
eval_steps_buffer = []
for _ in range(3):
eval_reward, eval_step, eval_is_win = run_evaluate_episode(
env, qmix_agent)
eval_reward_buffer.append(eval_reward)
eval_steps_buffer.append(eval_step)
eval_is_win_buffer.append(eval_is_win)
summary.add_scalar('train_loss', train_loss, total_steps)
summary.add_scalar('eval_reward', np.mean(eval_reward_buffer),
total_steps)
summary.add_scalar('eval_steps', np.mean(eval_steps_buffer),
total_steps)
summary.add_scalar('eval_win_rate', np.mean(eval_is_win_buffer),
total_steps)
summary.add_scalar('exploration', qmix_agent.exploration,
total_steps)
summary.add_scalar('replay_buffer_size', rpm.count, total_steps)
summary.add_scalar('target_update_count',
qmix_agent.target_update_count, total_steps)
summary.add_scalar('train_td_error:', train_td_error, total_steps)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
ce3eb9f306532e3d901fc2acb81877bb8a80fbde | b70f00927b9ed862252ad7345ca39f9d44ae87a2 | /exec -l /bin/bash/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/filestore/operations/flags.py | a2422ffca4165e41c34c82b6d85667ae3393cac0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | sparramore/Art-Roulette | 7654dedad6e9423dfc31bd0f807570b07a17a8fc | c897c9ec66c27ccab16f1a12213d09fe982d4a95 | refs/heads/master | 2021-07-06T13:04:22.141681 | 2018-07-12T23:30:13 | 2018-07-12T23:30:13 | 139,061,941 | 0 | 2 | null | 2020-07-25T11:32:11 | 2018-06-28T19:49:24 | Python | UTF-8 | Python | false | false | 1,156 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the Cloud Filestore operations commands."""
from __future__ import unicode_literals
OPERATIONS_LIST_FORMAT = """\
table(
name.basename():label=OPERATION_NAME,
name.segment(3):label=LOCATION,
metadata.verb:label=TYPE,
metadata.target.basename(),
done.yesno(yes='DONE', no='RUNNING'):label=STATUS,
metadata.createTime.date():sort=1,
duration(start=metadata.createTime,end=metadata.endTime,precision=0,calendar=false).slice(2:).join("").yesno(no="<1S"):label=DURATION
)"""
| [
"djinnie24@gmail.com"
] | djinnie24@gmail.com |
d59670d01436234a5c4ea1be119b112db5626c4e | 5c49ee8ab371cfae2af2e080bb12be48517be8b7 | /Coding_Interviews/Python/KP/ah.py | bc8ff673ff0f647ab7bd290d95fe6c461a244c26 | [] | no_license | gonzalob24/Learning_Central | 7aa07d6b5cd50d47ba791dedafed856f3e335697 | 1843c80249c8313543757131ecb12a476e687b24 | refs/heads/master | 2023-06-08T10:43:13.646824 | 2023-05-26T11:55:31 | 2023-05-26T11:55:31 | 241,561,705 | 0 | 0 | null | 2023-03-05T14:31:04 | 2020-02-19T07:41:36 | Python | UTF-8 | Python | false | false | 1,020 | py |
def doctor_visit(jon_ah, doc_ah):
if len(doc_ah) < len(jon_ah):
print("go")
else:
print("no")
while True:
#0-999 a's and h at the end
jon = input("Jon: ")
doc = input("Doc: ")
if jon[-1] == 'h' or doc[-1] == 'h':
if len(jon[0: len(jon) - 1]) <= 999 and len(doc[0:len(doc) - 1]) <= 999:
if set(jon) == {'a', 'h'} or set(jon) == {'h'} and set(doc) == {'a', 'h'} or set(doc) == {'h'}:
doctor_visit(jon, doc)
break
else:
continue
# if "a" in set(doc[:]):
# print("yes")
#
# ssl = doc[0:len(doc)]
# sst = set(doc[0:len(doc)])
# ss = list(set(doc[0:len(doc)]))assddddd
# print(len(ssl))
# print(ssl)
#
# if len(set(doc[0:len(doc)])) and len(set(jon[0:len(jon)])) <= 2:
# if doc[0] == 'h' or 'a' and jon[0] == 'h' or 'a':
# print("YYYY")
# print(len(doc))
# print({'a', 'h'}=={'h','a'})
# s1 = set(doc)
# print(s1 == {'h', 'a'})
# print(s1)
# print("length of a's: ", len(doc[0: len(doc) - 1]))
| [
"gonzalobetancourt@me.com"
] | gonzalobetancourt@me.com |
6dddd15cb1bc357c82a2804ea351a1d8db0a777f | 707baf19fbfb0f9b20eefe5032e9810265c8d4e4 | /test/functional/p2p-leaktests.py | 6e196ac59dbe0e777008292a6926863856e9adf2 | [
"MIT"
] | permissive | tmiholdings/tmi | dc397c895fd8d801e2769fa18feae30f6ddceafc | f1b6027f025dafc40616cde076df2f4b8cdae8a2 | refs/heads/master | 2020-06-30T23:09:46.155886 | 2019-08-07T05:40:59 | 2019-08-07T05:40:59 | 200,978,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,855 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The TMIcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it
into sending us something it shouldn't.
Also test that nodes that send unsupported service bits to tmicoind are disconnected
and don't receive a VERACK. Unsupported service bits are currently 1 << 5 and
1 << 7 (until August 1st 2018)."""
from test_framework.mininode import *
from test_framework.test_framework import TMIcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(NodeConnCB):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self, conn):
self.connected = True
self.ever_connected = True
def on_version(self, conn, message): self.bad_message(message)
def on_verack(self, conn, message): self.bad_message(message)
def on_reject(self, conn, message): self.bad_message(message)
def on_inv(self, conn, message): self.bad_message(message)
def on_addr(self, conn, message): self.bad_message(message)
def on_alert(self, conn, message): self.bad_message(message)
def on_getdata(self, conn, message): self.bad_message(message)
def on_getblocks(self, conn, message): self.bad_message(message)
def on_tx(self, conn, message): self.bad_message(message)
def on_block(self, conn, message): self.bad_message(message)
def on_getaddr(self, conn, message): self.bad_message(message)
def on_headers(self, conn, message): self.bad_message(message)
def on_getheaders(self, conn, message): self.bad_message(message)
def on_ping(self, conn, message): self.bad_message(message)
def on_mempool(self, conn): self.bad_message(message)
def on_pong(self, conn, message): self.bad_message(message)
def on_feefilter(self, conn, message): self.bad_message(message)
def on_sendheaders(self, conn, message): self.bad_message(message)
def on_sendcmpct(self, conn, message): self.bad_message(message)
def on_cmpctblock(self, conn, message): self.bad_message(message)
def on_getblocktxn(self, conn, message): self.bad_message(message)
def on_blocktxn(self, conn, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if tmicoind ban behavior changes
def on_open(self, conn):
super().on_open(conn)
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, conn, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, conn, message): pass
def on_verack(self, conn, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, conn, message):
self.version_received = True
conn.send_message(msg_ping())
conn.send_message(msg_getaddr())
class P2PLeakTest(TMIcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = CNodeNoVersionBan()
no_version_idlenode = CNodeNoVersionIdle()
no_verack_idlenode = CNodeNoVerackIdle()
unsupported_service_bit5_node = CLazyNode()
unsupported_service_bit7_node = CLazyNode()
self.nodes[0].setmocktime(1501545600) # August 1st 2017
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], unsupported_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7))
no_version_bannode.add_connection(connections[0])
no_version_idlenode.add_connection(connections[1])
no_verack_idlenode.add_connection(connections[2])
unsupported_service_bit5_node.add_connection(connections[3])
unsupported_service_bit7_node.add_connection(connections[4])
NetworkThread().start() # Start up network handling in another thread
assert wait_until(lambda: no_version_bannode.ever_connected, timeout=10)
assert wait_until(lambda: no_version_idlenode.ever_connected, timeout=10)
assert wait_until(lambda: no_verack_idlenode.version_received, timeout=10)
assert wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10)
assert wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.connected
# These nodes should have been disconnected
assert not unsupported_service_bit5_node.connected
assert not unsupported_service_bit7_node.connected
[conn.disconnect_node() for conn in connections]
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
assert not unsupported_service_bit5_node.unexpected_msg
assert not unsupported_service_bit7_node.unexpected_msg
self.log.info("Service bits 5 and 7 are allowed after August 1st 2018")
self.nodes[0].setmocktime(1533168000) # August 2nd 2018
allowed_service_bit5_node = NodeConnCB()
allowed_service_bit7_node = NodeConnCB()
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit5_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], allowed_service_bit7_node, services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7))
allowed_service_bit5_node.add_connection(connections[5])
allowed_service_bit7_node.add_connection(connections[6])
assert wait_until(lambda: allowed_service_bit5_node.message_count["verack"], timeout=10)
assert wait_until(lambda: allowed_service_bit7_node.message_count["verack"], timeout=10)
if __name__ == '__main__':
P2PLeakTest().main()
| [
"tmiholdings@daum.net"
] | tmiholdings@daum.net |
283f2f2e55f77f3757ba14cc672c22e63e8862b9 | 48f297637ae79f0242e4cbc0944b68e5ae331a13 | /0692_top-k-frequent-words.py | a9780c797a441405b28176cc181ba2a5d9bcdcdd | [] | no_license | bolatov/leetcode | a1ff46be79ccb46e7c8e8b754ef35579ce4bd863 | 8da24f993535cf22ae1af5b8a98fd9dada22b498 | refs/heads/master | 2020-04-17T08:59:23.516987 | 2019-12-19T14:50:15 | 2019-12-19T14:50:15 | 65,895,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | from collections import Counter
def is_before(u, v):
"""Check if pair `u` comes before `v`"""
u_freq, u_word = u
v_freq, v_word = v
if u_freq > v_freq:
return True
if u_freq < v_freq:
return False
return u_word < v_word
def bubble_up(heap, i):
"""Move pair of (frequence, word) at `i` up the tree as much as possible."""
parent = (i-1) // 2
if parent >= 0 and is_before(heap[i], heap[parent]):
heap[i], heap[parent] = heap[parent], heap[i]
bubble_up(heap, parent)
def bubble_down(heap, i):
"""Move pair of (frequence, word) at `i` down the tree as much as possible."""
index = i
left_child = i*2 + 1
right_child = i*2 + 2
if left_child < len(heap) and is_before(heap[left_child], heap[index]):
index = left_child
if right_child < len(heap) and is_before(heap[right_child], heap[index]):
index = right_child
if index != i:
heap[index], heap[i] = heap[i], heap[index]
bubble_down(heap, index)
def build_heap(counter):
"""Build max heap"""
heap = []
for word, freq in counter.items():
heap.append((freq, word))
bubble_up(heap, len(heap)-1)
return heap
def pop(heap):
"""Pop the most frequent word out of the heap."""
item = heap[0]
heap[0], heap[-1] = heap[-1], heap[0]
heap.pop()
bubble_down(heap, 0)
return item
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
"""Count frequency of each word and use a heap to get `k` top frequent ones."""
counter = Counter(words)
heap = build_heap(counter)
return [pop(heap)[1] for _ in range(k)]
| [
"noreply@github.com"
] | noreply@github.com |
c0a92dc535604ff0d59ca55ad866c155ae5511b9 | f0117325b7a40779965b35ec6cefc8d12353d779 | /python_exercises/loop_ex/2_n_m.py | 81273ae8b7d52d861cd6273fb57acc0b161df574 | [] | no_license | joshwestbury/Digital_Crafts | 4188e71ad631439dcb2cca9eea63d29400c37dc0 | 66c06f198d110388781a30c0ecb7902d3a8daf5a | refs/heads/master | 2021-07-24T05:37:02.370341 | 2017-11-04T14:36:39 | 2017-11-04T14:36:39 | 103,189,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | #Same as the previous problem, except you will prompt the user for the number to start on and the number to end on.
a = int(input("What number should I start on? "))
b = int(input("What number should I end on? " ))
for i in range (a, b):
print(i)
#figure out how to put the input statements on the same line
| [
"joshwestbury@gmail.com"
] | joshwestbury@gmail.com |
00276f52576015f87cc070be920aae247664a053 | 1ec4f0c82a6342063fef847474dc5924c4fe71c6 | /dcmslurm_make.py | 968184969ec567b87cb2e4f3974abdd3778b97c1 | [] | no_license | leelabhub/dcmslurm | 5b97ba794eb02567cce3f08c234aa24d9de5c10d | df5e2b1994c5524f0fa90fc3138f953bdd1432f8 | refs/heads/master | 2021-07-14T17:59:39.328578 | 2017-10-21T18:00:08 | 2017-10-21T18:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,565 | py | """dcmuslurm_make.py
This module generates scripts to run spectral DCM on a SLURM cluster.
The functions can be called independently, but the typical usage is to call
make_scripts_all in a script in which the relevant keywords are given and the
parameters are defined in a separate script. The general use is to generate the
parameters using the dcmslurm_make_params module. An example such script
follows below.
Lawrence Chiou
20 October 2017
_______________________________________________________________________________
Example script:
path_dcmslurm = '/home/usr/dcm/dcmslurm'
path_spm = '/home/usr/dcm/spm12beta'
path_raw = '/scratch/users/usr/dcm_data'
path_raw_file = '/scratch/users/usr/dcm_data/brain_data.mat'
path_parsed = '/scratch/users/usr/dcm_data/brain_data_parsed'
import os
import sys
sys.path.append(path_dcmslurm)
from dcmslurm_make import make_scripts_all
from dcmslurm_make_params import make_params
directory_output = '/scratch/users/usr/dcm_data/brain_data_output'
prefix_output = 'brain_data'
path_params_list = make_params( \
filename='%s_params' % prefix_output, \
path_output=os.path.dirname(path_parsed), \
n_in=5, \
free_connects=1, \
self_connect=True, \
dominant_nodes=[1])
for i in range(len(path_params_list)):
make_scripts_all(
path_params = path_params_list[i], \
directory_output = directory_output, \
prefix_output = '%s_%s' % (prefix_output, str(i+1)), \
path_dcmslurm = path_dcmslurm, \
path_spm = path_spm, \
path_raw = path_raw, \
path_raw_file = path_raw_file, \
path_parsed = path_parsed, \
save_in_path_parsed = 'false', \
labels = "{'cond 1', 'cond 2', 'cond 3'}", \
subjects = 16, \
em_steps_max = 150, \
time = '00:10:00', \
email = 'user@email.com', \
partition = 'normal', \
nodes = 1, \
memory = 700, \
overwrite = True)
"""
import os
def replace_in_outline(**kwargs):
"""Loads a text outline and replaces all specified keyword strings (in all
caps) and replaces the ith keyword string (in the outline) with the ith
variable. Writes to path_output if specified.
Keyword strings are in the format "$KEYWORD$" (i.e., always bookended by
dollar signs).
Args:
**kwargs:
- path_outline: file path to the outline
- overwrite: True if overwriting of an existing file is desired
- path_output_filename: file path to write the outline
- keywords to replace in the outline (not case sensitive)
Returns:
outline_contents: contents of the outline as a single string
"""
# change to directory containing this script and outlines (dcmslurm folder)
path_script = os.path.abspath(__file__)
os.chdir(os.path.dirname(path_script))
path_outline = kwargs['path_outline']
outline = open(path_outline, 'r')
outline_contents = outline.read()
outline.close()
for key in kwargs:
key_format = '$%s$' % key.upper()
outline_contents = outline_contents.replace(key_format, str(kwargs[key]))
# option to prevent overwriting if file already exists
if 'overwrite' in kwargs:
overwrite = kwargs['overwrite']
else:
overwrite = True
if 'path_output_filename' in kwargs:
path_output_filename = kwargs['path_output_filename']
if overwrite or (not os.path.exists(path_output_filename) \
and not overwrite):
path_output_filename = kwargs['path_output_filename']
# create directory if it does not exist already
directory_output = os.path.dirname(path_output_filename)
if not os.path.exists(directory_output):
os.makedirs(directory_output)
script = open(path_output_filename, 'wb')
script.write(outline_contents)
script.close()
return outline_contents
def make_parse(filename, **kwargs):
"""Loads the outline 'outline_sh.txt' and replaces the ith keyword string
(in the outline) with the ith variable. Writes to path_output if specified.
Keyword strings are in the format "$KEYWORD$" (i.e., always bookended by
dollar signs).
Args:
filename: output file path
**kwargs:
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
replace_in_outline( \
path_outline='outline_sh.txt', \
path_output_filename=os.path.join( \
os.path.dirname(kwargs['path_parsed']), filename), \
commands=replace_in_outline(path_outline='commands_parse.txt', \
**kwargs), \
**kwargs)
def make_estimate(filename, commands_variable, **kwargs):
"""Loads the outline 'outline_sbatch.txt' and replaces the ith keyword
string (in the outline) with the ith variable. Writes to path_output if
specified. Replaces the commands keyword with the commands in
'commands_estimate.txt' (for running 'dcmslurm_estimate.m').
Keyword strings are in the format "$KEYWORD$" (i.e., always bookended by
dollar signs).
Args:
filename: output file path
commands_variable: variable commands
**kwargs:
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
replace_in_outline( \
path_outline='outline_sbatch.txt', \
path_output_filename=os.path.join(kwargs['path_output'], filename), \
script_name=os.path.splitext(filename)[0], \
path_log= '%s.log' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
path_err= '%s.err' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
commands=replace_in_outline(path_outline='commands_estimate.txt', \
variable=commands_variable, \
**kwargs), \
**kwargs)
def make_favg(filename, **kwargs):
"""Loads the outline 'outline_sbatch.txt' and replaces the ith keyword
string (in the outline) with the ith variable. Writes to path_output if
specified. Replaces the commands keyword with the commands in
'commands_favg.txt' (for finding the average F among subjects).
Keyword strings are in the format "$KEYWORD$" (i.e., always bookended by
dollar signs).
Args:
filename: output file path
**kwargs
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
replace_in_outline( \
path_outline='outline_sbatch.txt', \
path_output_filename=os.path.join(kwargs['path_output'], filename), \
script_name=os.path.splitext(filename)[0], \
path_log= '%s.log' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
path_err= '%s.err' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
commands=replace_in_outline(path_outline='commands_favg.txt', \
**kwargs), \
**kwargs)
def make_ttest(filename, **kwargs):
"""Loads the outline 'outline_sbatch.txt' and replaces the ith keyword
string (in the outline) with the ith variable. Writes to path_output if
specified. Replaces the commands keyword with the commands in
'commands_ttest.txt' (for t-testing among subjects---recommended only as a
preliminary measure).
Keyword strings are in the format "$KEYWORD$" (i.e., always bookended by
dollar signs).
Args:
filename: output file path
**kwargs
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
replace_in_outline( \
path_outline='outline_sbatch.txt', \
path_output_filename=os.path.join(kwargs['path_output'], filename), \
script_name=os.path.splitext(filename)[0], \
path_log= '%s.log' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
path_err= '%s.err' % os.path.join(kwargs['path_output'], \
os.path.splitext(filename)[0]), \
commands=replace_in_outline(path_outline='commands_ttest.txt', \
**kwargs), \
**kwargs)
def make_run(filename, script_list_estimate, script_list_post, **kwargs):
"""Loads the outline 'outline_sh.txt' and replaces the ith keyword string
(in the outline) with the ith variable. Writes to path_output if specified.
Writes the list of all batch scripts to be run to a shell script.
Args:
filename: output filename
script_list_estimate: list of scripts calling dcmslurm_estimate.m
script_list_post: list of scripts for "post-processing" (e.g., t-test)
**kwargs
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
commands_run = ''
# string needed to make sure post-processing tasks happen after DCM fitting
dependency_string = '--dependency=afterany'
counter = 1
for script_name in script_list_estimate:
commands_run += 'j%s=$(sbatch %s)\n' % (counter, script_name)
dependency_string += ':${j%s:20}' % (counter)
counter += 1
commands_run += '\necho %s jobs submitted\n' % filename
if len(script_list_post) > 0:
for script_name in script_list_post:
commands_run += '\nj%s=$(sbatch %s %s)' \
% (counter, dependency_string, script_name)
counter += 1
replace_in_outline( \
path_outline='outline_sh.txt', \
path_output_filename=os.path.join(kwargs['path_output'], filename), \
commands=commands_run, \
**kwargs)
def make_run_all(filename, script_list_run, **kwargs):
"""Loads the outline 'outline_sh.txt' and replaces the ith keyword string
(in the outline) with the ith variable. Writes to path_output if specified.
Writes the list of shell scripts to be run to a "master" shell script.
Args:
filename: output filename
script_list_run: list of shell scripts to be run
**kwargs
- overwrite: True if overwriting of an existing file is desired
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
commands_run_all = ''
for script_name in script_list_run:
directory_script = os.path.dirname(script_name)
commands_run_all += \
'hasmatfile=$(find %s -type f \( -name "*.mat" \))\n' \
% directory_script
commands_run_all += \
'if [ ${#hasmatfile} -eq 0 ]; then sh %s; fi\n\n' \
% script_name
replace_in_outline( \
path_outline='outline_sh.txt', \
path_output_filename=os.path.join( \
os.path.dirname(kwargs['path_parsed']), filename), \
commands=commands_run_all, \
**kwargs)
def make_scripts(include_parse=True, include_favg=True, include_ttest=True, \
**kwargs):
"""Makes all individual scripts (for DCM estimation and "post-processing")
and accompanying shell scripts for submitting and running all scripts for a
single set of parameters.
Args:
include_parse: True if scripts for parsing the MATLAB data file should be
included
include_favg: True if scripts for calculting the average F score should
be included
include_ttest: True if scripts for t-testing should be included
**kwargs
- overwrite: True if overwriting of an existing file is desired
- job_name: job name that should prefix each filename
- path_output: output directory
- path_raw: path to the MATLAB data file
- labels: labels for the experimental conditions
- subjects: number of subjects
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
job_name = kwargs['job_name']
path_output = kwargs['path_output']
path_raw = kwargs['path_raw']
# list of scripts for make_run
script_list_estimate = []
script_list_post = []
if include_parse:
script_name_parse = '%s-parse.sh' \
% os.path.basename(os.path.normpath(path_raw))
make_parse(filename=script_name_parse, **kwargs)
# create estimate scripts
label_string = kwargs['labels']
labels = [x[1:-1] for x in label_string[1:-1].split(', ')]
subjects = kwargs['subjects']
for label in labels:
for subject in range(1, subjects+1):
script_name_estimate = '%s-%s-%s.sbatch' \
% (job_name, label, str(subject))
commands_variable = "'%s', %d, %d" % (label, subject, subject)
script_list_estimate.append(os.path.join(path_output, \
script_name_estimate))
make_estimate(filename=script_name_estimate, \
commands_variable=commands_variable, **kwargs)
if include_favg:
script_name_favg = '%s-favg.sbatch' % job_name
script_list_post.append(os.path.join(path_output, script_name_favg))
make_favg(filename=script_name_favg, **kwargs)
if include_ttest:
script_name_ttest = '%s-ttest.sbatch' % job_name
script_list_post.append(os.path.join(path_output, script_name_ttest))
make_ttest(filename=script_name_ttest, **kwargs)
script_name_run = '%s-run.sh' % job_name
make_run(script_name_run, script_list_estimate, script_list_post, **kwargs)
def make_scripts_all(**kwargs):
"""Makes all individual scripts (for DCM estimation and "post-processing")
and accompanying shell scripts for submitting and running all scripts for
many parameters.
Args:
**kwargs
- path_params: path to the parameter files
- directory_output: output directory
- prefix_output: any prefix that should go at the beginning of file
names
- keywords to replace in the outline (not case sensitive)
Returns:
None
"""
path_params = kwargs['path_params']
directory_output = kwargs['directory_output']
prefix_output = kwargs['prefix_output']
file_params = open(path_params, 'r')
list_params = []
for line in file_params.readlines():
line_split = line.split('&')
if len(line_split) == 3:
params = {}
params['matrix_A'] = line_split[0]
params['matrix_C'] = line_split[1]
params['matrix_hidden'] = line_split[2].replace('\n', '')
list_params.append(params)
file_params.close()
script_list_run = []
for params in list_params:
# job_name is matrix_A and matrix_hidden converted from a binary string
string_raw = params['matrix_A'] + params['matrix_hidden']
string_bin = filter(lambda d: d.isdigit(), string_raw)
job_name = '%s_%s' % (prefix_output, \
str(int(string_bin, 2)).zfill(len(str(int(string_bin.replace('0', \
'1'), 2)))))
path_output = os.path.join(directory_output, job_name)
make_scripts(
path_output = path_output, \
job_name = job_name, \
matrix_A = params['matrix_A'], \
matrix_C = params['matrix_C'], \
matrix_hidden = params['matrix_hidden'], \
**kwargs)
script_list_run.append('%s-run.sh' % os.path.join(path_output, job_name))
script_name_run_all = '%s-run_all.sh' % prefix_output
make_run_all(script_name_run_all, script_list_run, **kwargs) | [
"noreply@github.com"
] | noreply@github.com |
ffaa66a27642c062e9fbe349e489cc69524e50a9 | ef955c7e5f4a560c95f42202a459449308450c59 | /spinup/algos/tf1/bootstrapped_pg/klucb_sac.py | f5a690c882eab28562a1380ff43787f98eab0b95 | [
"MIT"
] | permissive | anandrajasekar18/spinningup | 4e1527bdaf24328c91696bc2c6eb9b28007f1ef3 | c14daf6579e0341a387358adf4230de36d6e7cd4 | refs/heads/master | 2021-04-24T00:37:14.995757 | 2020-03-25T18:08:49 | 2020-03-25T18:08:49 | 250,045,253 | 0 | 0 | MIT | 2020-03-25T17:26:04 | 2020-03-25T17:26:03 | null | UTF-8 | Python | false | false | 16,128 | py | import numpy as np
import tensorflow as tf
import gym
import time
from spinup.algos.bootstrapped_pg import core
from spinup.algos.sac.core import get_vars
from spinup.utils.logx import EpochLogger
# from SMPyBandits.Policies.klUCB_forGLR import klUCB_forGLR
# from SMPyBandits.Arms.kullback import klucbGauss
# from SMPyBandits.Policies.LM_DSEE import parameter_ell
from SMPyBandits.Policies.LM_DSEE import LM_DSEE
from SMPyBandits.Policies.SWHash_UCB import alpha_for_slowly_varying_env
from SMPyBandits.Environment.Result import Result
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
"""
def klucb_bs_sac(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=1e-3, alpha=0.2, batch_size=100, start_steps=10000,
max_ep_len=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
``q2_pi`` (batch,) | Gives the composition of ``q2`` and
| ``pi`` for states in ``x_ph``:
| q2(x, pi(x)).
``v`` (batch,) | Gives the value estimate for states
| in ``x_ph``.
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v = actor_critic(x_ph, a_ph, **ac_kwargs)
# Target value network
with tf.variable_scope('target'):
_, _, _, _, _, _, _, v_targ = actor_critic(x2_ph, a_ph, **ac_kwargs)
# mu = tf.squeeze(mu,axis=1)
# pi = tf.squeeze(pi,axis=1)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in
['main/pi', 'main/q1', 'main/q2', 'main/v', 'main'])
print(('\nNumber of parameters: \t pi: %d, \t' + \
'q1: %d, \t q2: %d, \t v: %d, \t total: %d\n')%var_counts)
print (mu.shape, pi.shape, logp_pi.shape, q1.shape, q2.shape, q1_pi.shape, q2_pi.shape, v.shape, tf.expand_dims(d_ph,1).shape, tf.expand_dims(d_ph,1).shape, v_targ.shape)
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi, q2_pi)
# Targets for Q and V regression
q_backup = tf.stop_gradient(tf.expand_dims(r_ph,1) + gamma*(1-tf.expand_dims(d_ph,1))*v_targ)
# q_backup = tf.stop_gradient(r_ph + gamma*(1-d_ph))
v_backup = tf.stop_gradient(min_q_pi - alpha * logp_pi)
# Soft actor-critic losses
pi_loss = tf.reduce_mean(alpha * logp_pi - q1_pi)
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2)**2)
v_loss = 0.5 * tf.reduce_mean((v_backup - v)**2)
value_loss = q1_loss + q2_loss + v_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main/q') + get_vars('main/v')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# All ops to call during one training step
step_ops = [pi_loss, q1_loss, q2_loss, v_loss, q1, q2, v, logp_pi,
train_pi_op, train_value_op, target_update]
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph},
outputs={'mu': mu, 'pi': pi, 'q1': q1, 'q2': q2, 'v': v})
def get_action(o, head, deterministic=False):
# act_op = mu[:,p_head,:] if deterministic else pi[:,p_head,:]
act_op = mu if deterministic else pi
return sess.run(act_op, feed_dict={x_ph: o.reshape(1,-1)})[0,head,:]
def test_agent(n, head):
global sess, mu, pi, q1, q2, q1_pi, q2_pi
for j in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
# head = np.random.randint(num_heads, size = 1)[0]
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, head, True))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
policy = LM_DSEE(ac_kwargs['num_heads'], rho = 0.49, lower = -50, amplitude= 4450)
policy.startGame()
returns = []
choices = []
head = policy.choice()
# print ('Total number of heads', ac_kwargs['num_heads'])
# Main loop: collect experience in env and update/log each epoch
train_end = start_time
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = get_action(o, head)
else:
a = env.action_space.sample()
# a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
"""
train_start = time.time()
# print (t//steps_per_epoch, "Playing time", train_start - train_end)
policy.getReward(head, ep_ret)
returns.append(ep_ret)
choices.append(head)
head = policy.choice()
for j in range(ep_len):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
}
# tic = time.time()
outs = sess.run(step_ops, feed_dict)
# toc = time.time()
# print (toc-tic)
logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],
LossV=outs[3], Q1Vals=outs[4], Q2Vals=outs[5],
VVals=outs[6], LogPi=outs[7])
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
train_end = time.time()
# print (t//steps_per_epoch, "Training time", train_end - train_start)
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
test_start = time.time()
epoch = t // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
head = policy.choice()
test_agent(n = 10, head = head)
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
test_end = time.time()
# print (t//steps_per_epoch, "Testing time", test_end - test_start)
# print ("*"*30)
print (returns, choices)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='sac')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
sac(lambda : gym.make(args.env), actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
logger_kwargs=logger_kwargs) | [
"anand.rajasekar18@gmail.com"
] | anand.rajasekar18@gmail.com |
cb8fa7993d28e68af554a2ed1f56341714a22a4f | 478d095966166ae77bd39238c43b3e428e9a48e7 | /4 Строки 18.09/num_pi.py | 190e48067ac48fcc87f3dcf182a6995a1084207a | [] | no_license | AAlexIII/Welcome_to_Py | 3b07c6f2d953b7a31def6299e42d03acda4300e9 | b545144806c4d8092821a6e27b5876c02448fcad | refs/heads/master | 2021-07-11T06:10:39.470409 | 2020-10-09T08:53:55 | 2020-10-09T08:53:55 | 208,157,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import math
def not_pi(k):
return round(math.pi, k)
print(f"Вот то что вы хотели {not_pi(int(input('Сколько знаков пи вас интересует? ')))}")
| [
"a.alex.2000@mail.ru"
] | a.alex.2000@mail.ru |
a4386519e6338b9c3dd8af64aa1c10a2e7c08b79 | 6549cf82178a3d2f9ba231cb9851fd20dc6ccea3 | /model.py | 7be69cbdc0b0d1306f34ee35caf286abe62cc2ae | [] | no_license | rxhxt/R-5-ASSIST | 9cf35814ab328b5e283452f4d638e997ad150bbf | 15d9b134d4dab6753a1207f68bb5d4c6837a1cc8 | refs/heads/master | 2023-01-01T08:30:03.470079 | 2020-10-10T11:37:47 | 2020-10-10T11:37:47 | 297,110,235 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,728 | py | from imports import *
from mail import *
model = T5ForConditionalGeneration.from_pretrained('t5-small')
tokenizer = T5Tokenizer.from_pretrained('t5-small')
device = torch.device('cpu')
nltk.download('punkt')
def cleanText(text):
text = re.sub(r"@[A-Za-z0-9]+", ' ', text)
text = re.sub(r"https?://[A-Za-z0-9./]+", ' ', text)
text = re.sub(r"[^a-zA-z.!?'0-9]", ' ', text)
text = re.sub('\t', ' ', text)
text = re.sub(r" +", ' ', text)
return text
def getSummary(text,tokenizer):
preprocess_text = text.strip().replace("\n","")
t5_prepared_Text = "summarize: "+preprocess_text
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt").to(device)
summary_ids = model.generate(tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
print(output)
return output
def sentenceCorrection(text):
correctedText = ""
parser = GingerIt()
sentences = sent_tokenize(text, language='english')
for sentence in sentences:
sentenceDict = parser.parse(sentence)
sentence = str(sentenceDict['result'])
correctedText += sentence
return correctedText
def summaryGeneration(mailid):
try:
txtFiles = []
for filename in os.listdir(app.config["PDF_UPLOADS"]):
if fnmatch.fnmatch(filename, 'pdf_fileChapter*.txt'):
print(filename)
txtFiles.append(filename)
for fname in txtFiles:
summary = ""
print("Summarising: ", fname)
text = ""
with open(os.path.join(app.config['PDF_UPLOADS'] + '/' + fname), 'r', encoding="utf-8") as f:
textLines = f.readlines()
for line in textLines:
line = cleanText(line)
line = line.replace("\n", " ")
text += line
textTokens = word_tokenize(text)
totalTokens = len(textTokens)
chunkCounter = 0
maxTokenLen = 1000
chunkList = []
start = 0
end = maxTokenLen
if(totalTokens % maxTokenLen) == 0:
totalChunks = int(totalTokens / maxTokenLen)
for i in range(0,totalChunks):
tempTokens = textTokens[start:end]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
start = end
end += maxTokenLen
chunkText = ""
else:
totalChunks = int(totalTokens / maxTokenLen) + 1
for i in range(0,(totalChunks-1)):
tempTokens = textTokens[start:end]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
start = end
end += maxTokenLen
chunkText = ""
tempTokens = textTokens[start:totalTokens]
chunkText = ' '.join([str(elem) for elem in tempTokens])
chunkList.append(chunkText)
for chunk in chunkList:
tempSummary = getSummary(chunk, tokenizer)
print(tempSummary)
summary += tempSummary
summary = sentenceCorrection(summary)
print("Summarisation complete!")
fileName = fname[:-4] + "_summary.txt"
with open(os.path.join(app.config['PDF_UPLOADS'] + '/' + fileName), 'w', encoding="utf-8") as f1:
f1.write(summary)
print("Summary written to file!")
f1.close()
f.close()
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/' + fname))
makezipAndCleanUp(mailid)
except Exception as e:
print(e)
send_fail(mailid)
def makezipAndCleanUp(mailid):
# function to compress all summary text files into single zip file
# call mail function and send zip file
shutil.make_archive('summarized_chapters', 'zip', app.config['PDF_UPLOADS'])
for file in os.listdir(app.config['PDF_UPLOADS']):
os.remove(os.path.join(app.config['PDF_UPLOADS'] + '/' + file))
send_mail('summarized_chapters.zip', mailid) | [
"jineshparakh@hotmail.com"
] | jineshparakh@hotmail.com |
7a57b9d8fc4353b0116d5eb59291d529fd673296 | 91e98f30ab87f13cbd533c276e24690912690b35 | /BlaineFry/Phys_707_Model_Selection_v2.py | a908f6a9aff841b250489f5e5527751582a51e48 | [] | no_license | ladosamushia/PHYS707 | a5a3f4954746722a3c7e530730a7cbd01caeb5f4 | 968e143022d49bfe477590b38e40184e3affed02 | refs/heads/master | 2020-07-20T06:38:42.914658 | 2019-12-23T12:27:43 | 2019-12-23T12:27:43 | 206,591,395 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 09:52:28 2019
@author: Blaine Fry
"""
# import packages
import numpy as np
from numpy import random as rand
from matplotlib import pyplot as plt
#%% generate some data
Npoints = 50
mu_0 = 1.0
sigma_0 = 1.0
data = rand.normal(loc=mu_0,scale=sigma_0,size=Npoints)
# and make a histogram of it
xbounds = [-5,5]
Nbins = 20
Bins = np.linspace(xbounds[0],xbounds[1],num=Nbins+1)
plt.figure(1)
plt.xlim(xbounds[0],xbounds[1])
plt.xlabel('value')
plt.ylabel('Normalized Frequency')
plt.title('Data Histogram')
plt.grid(alpha=0.5)
plt.hist(data,bins=Bins,alpha=0.8,color='m',normed=True,label='Data') # may need to switch normed to density if this line calls an error
plt.legend()
#%% define the models to test out
# first, a general gaussian
def gauss(x,mu,sigma):
return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-((x-mu)*(x-mu))/(2*sigma*sigma))
# then the models
def Gauss_A(x):
return gauss(x,1.0,1.0)
def Gauss_B(x):
return gauss(x,1.2,1.0)
x = np.linspace(xbounds[0],xbounds[1],num=1000)
plt.plot(x,Gauss_A(x),'c-',label='Gauss A')
plt.plot(x,Gauss_B(x),'b-',label='Gauss B')
plt.legend()
#%% start comparing models
# P({x_i}) = P(x_1)*P(x_2)*P(x_3)*...
# logs would be better... consider revising
Ntrials = 1000
def compare_models(actual_dist,mu_ex,sigma_ex,model_1,model_2): # actual_dist = 'Gauss' or 'Cauchy'
log_ratios = []
for i in range(Ntrials):
if actual_dist is 'Gauss':
data = rand.normal(loc=mu_ex,scale=sigma_ex,size=Npoints)
else:
data = rand.standard_cauchy(size=Npoints)
# find the probability of the data set given model 1
prob1 = 1
for i in range(Npoints):
prob1 *= model_1(data[i])
# find the probability of the data set given model 2
prob2 = 1
for i in range(Npoints):
prob2 *= model_2(data[i])
log_ratios.append(np.log10(prob1/prob2))
return log_ratios
ratios_A = compare_models('Gauss',1.0,1.0,Gauss_A,Gauss_B) # compare the models if A is true
ratios_B = compare_models('Gauss',1.2,1.0,Gauss_A,Gauss_B) # compare the models if B is true
plt.figure(2)
plt.title('Model Comparison')
plt.ylabel('Normalized Frequency')
plt.xlabel(r'$\log_{10} \left(\frac{f_A}{f_B}\right)$')
plt.hist(ratios_A,bins=Ntrials/10,alpha=0.7,normed=True,label='A is True')
plt.hist(ratios_B,bins=Ntrials/10,alpha=0.7,normed=True,label='B is True')
plt.legend()
#%% Now we want to do the same, but with Cauchy vs Gauss
mu_star = 0
sigma_star = 1
def GAUSS(x):
return gauss(x,mu_star,sigma_star)
def CAUCHY(x):
return 1.0/((np.pi*sigma_star)*(1.0+(((x-mu_star)/sigma_star)**2)))
plt.figure(3)
plt.title('Example Distributions')
x = np.linspace(-5,5,100)
plt.plot(x,GAUSS(x),'b',label='Gauss')
plt.plot(x,CAUCHY(x),'r-',label='Cauchy')
plt.legend()
ratios_Gauss = compare_models('Gauss',0.0,1.0,GAUSS,CAUCHY)
ratios_Cauchy = compare_models('Cauchy',0.0,1.0,GAUSS,CAUCHY)
plt.figure(4)
plt.title('Gauss vs Cauchy')
plt.ylabel('Normalized Frequency')
plt.xlabel(r'$\log_{10} \left(\frac{f_{Gauss}}{f_{Cauchy}}\right)$')
plt.hist(ratios_Gauss,bins=Ntrials/10,alpha=0.7,normed=True,label='Gauss is True')
plt.hist(ratios_Cauchy,bins=Ntrials/10,alpha=0.7,normed=True,label='Cauchy is True')
plt.legend()
| [
"noreply@github.com"
] | noreply@github.com |
9c0cf1c8261160763e6b9bd9b4485efef74b2d8d | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/code/leetcode/TOOLS/BinaryTree-master/test/__init__.py | a503b9948e4bb811046d2e3362574e7f4bed412b | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | from test_binary_tree import *
from test_tree_node import *
| [
"643472092@qq.com"
] | 643472092@qq.com |
b8b654ff6871dffa670be45c300d5129b9e7f7ab | 527dd4b545b7062c1939827b4513f9b215d4b0f2 | /listp.py | 3e8ac70da0d6cf5760d1eb1afcf38f966c00f93b | [] | no_license | Suraj-Kumar-Dubey/python | d02d6f2bfedb9729878b4e4f7d3a8e9a06087b6e | d92942559c7865788d34735882200fd8bbf12766 | refs/heads/master | 2021-01-09T05:19:56.761877 | 2017-02-02T18:27:02 | 2017-02-02T18:27:02 | 80,753,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | rng = input("enter the range")
list = []
for i in range(rng) :
x = input("enter the number")
list.append(x)
print list
list.sort()
print list
print list[-2]
| [
"surajdubey302@rediffmail.com"
] | surajdubey302@rediffmail.com |
fc6c7e17a5a0737e41e1d3fda989998f0019ad5e | ea2f33ac71a144c9c39fad6f229cb93d87de9fa8 | /exercise02_write.py | f24425c3aeb5e9c67439917e5ea72ef8d5aa659d | [] | no_license | ym7979/aid-1 | 1301235d0e6f43d68fff3cd4e83ad8ee2bdb88fe | f460cea87449a15ab06af517eba671e4a643274d | refs/heads/master | 2020-12-13T03:02:20.021658 | 2020-01-16T10:49:02 | 2020-01-16T10:49:02 | 234,293,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | import pymysql
# 连接数据库
db=pymysql.connect(host="localhost",
port=3306,
user="root",
password="123456",
database="stu",
charset="utf8")
#生成游标对象(操作数据库,执行sql语句,获取结果)
cur=db.cursor()
# 执行各种sql操作
sql = "insert into cls (name,age,sex,score) values(%s,%s,%s,%s);"
l=[('Dave', 17, 'm',81),('Ala',18,'w',84),('Eva',19,'w',91)]
try:
# 法1
# for i in l:
# cur.execute(sql,i)
# 法2
cur.executemany(sql,l)
db.commit()
except:
db.rollback()
# 关闭游标和数据库连接
cur.close()
db.close() | [
"1176195161@qq.com"
] | 1176195161@qq.com |
00bfc63f1bf21d0a15cb4d8c99e4ebae6038a881 | e0c257a6846ffac4303a733ba2420cfc1dc7526b | /src/199. 二叉树的右视图.py | bdd7c21f3fae0a40eacf2259dd62b7ebf00f87e7 | [] | no_license | Hanlen520/Leetcode-4 | 1d21112e23d8a782e0bfe82d3b1cc216fc1ef350 | 389e004052ba2c0951ffd66af97ac368f7de84d4 | refs/heads/master | 2022-03-31T21:48:49.675426 | 2020-01-06T17:21:45 | 2020-01-06T17:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | """
给定一棵二叉树,想象自己站在它的右侧,按照从顶部到底部的顺序,返回从右侧所能看到的节点值。
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rightSideView(self, root: TreeNode) -> List[int]:
if not root:
return []
stack = []
ans = [root.val]
if root.left:
stack.append((root.left, 1))
if root.right:
stack.append((root.right, 1))
while stack:
curr = stack.pop(0)
if not stack or curr[1] != stack[0][1]:
ans.append(curr[0].val)
if curr[0].left:
stack.append((curr[0].left, curr[1]+1))
if curr[0].right:
stack.append((curr[0].right, curr[1]+1))
return ans
# 我的解答是把每一层的元素从左往右都写出来,但是为什么不从右往左,每次只写第一个元素呢?
# ```py
# class Solution:
# def rightSideView(self, root):
# """
# :type root: TreeNode
# :rtype: List[int]
# """
# result = []
# self.recursive(root,1,result)
# return result
# def recursive(self, root, depth, result):
# if root is None:
# return
# if depth > len(result):
# result.append(root.val)
# self.recursive(root.right,depth+1, result)
# self.recursive(root.left,depth+1, result)
# ```
| [
"bjwu@zju.edu.cn"
] | bjwu@zju.edu.cn |
46832810e66777462aaa65daf4a2fd3b539daa67 | 1e2630724d366a801988178a174905961797248c | /formating.py | e72ee38b5d89cb8049609bcb5b787253056de7db | [] | no_license | cesarmaina/python-scratches | 8d290831e413aae316b1995168dda3c4a2df0f01 | 85a847c89b43026cfa778b3fae793e5f5c23899b | refs/heads/main | 2023-07-11T16:14:01.344388 | 2021-08-12T08:27:28 | 2021-08-12T08:27:28 | 383,376,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #removing parts of the naming string
import os
print("eg /root/Videos")
print()
dir=input("Enter the file directory: ")
os.chdir(dir)
print(os.getcwd())
print()
print("files available are:")
for x in os.listdir(os.getcwd()):
a,b=os.path.splitext(x)
i,j,k,l =a.split(" ")
new_name="{}_{}".format(i,l)
print(new_name)
os.rename(x,new_name)
| [
"noreply@github.com"
] | noreply@github.com |
37cf2e93f4c27892d6d3d0cf64c3981e549f5aa5 | ba850639122e1bc2f0e9798986ab34e43befa635 | /examples/p3can/p3can_demo.py | 3ac7e2dc6eb09907b818ae4dfcce1da76b1722a2 | [
"MIT"
] | permissive | KDriesen/tribology | 5a491bcd7c9f1b57e17d1efcdf3aae99841b1ef5 | 09bf75d670fb3d86575ca4bdced00d5dce2d4af7 | refs/heads/master | 2022-02-27T01:59:39.771791 | 2019-08-19T09:08:48 | 2019-08-19T09:08:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | """
Short demonstration of how to use P3CAN module of the tribology package.
"""
from tribology.p3can.p3can import p3can, generate_input_file
import os
def p3can_demo():
"""
Generate a P3CAN input file, then start a simulation run. This function
performs a number of simulation runs for different simulation types;
see the P3CAN documentation for more information (also have a look at the
P3CAN input file).
A directory `results` will be created in the current working directory that
contains the simulation output files. See terminal output for more
information.
"""
for sim_type in (3, 5, 6, 7, 8):
out_file = generate_input_file(
sim_type,
os.getcwd() + os.sep + 'demo.txt')
p3can(out_file)
if __name__ == "__main__":
p3can_demo()
| [
"moritz.ploss@googlemail.com"
] | moritz.ploss@googlemail.com |
5565adee97331eac2b4139117a7071131dc8f1f0 | 07cdf16998da4bb0c1173ec968d835b073ddd7de | /stream_example/comenzi.py | 2599e356c85cd043866c8ca44ba687c587648cd3 | [] | no_license | AndreiCherechesu/eestec9 | 4e90735a7fe6b4f3f6dcda6e775637c813410045 | bc23f5b6269084e9e4c4f4fda44a1f3e99216da8 | refs/heads/master | 2020-04-29T08:01:50.920214 | 2019-03-19T17:37:11 | 2019-03-19T17:37:11 | 175,972,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,268 | py | import cv2, sys
import requests
import json
import time
import socketio
from utils import printTimeDiff, initTimeDiff
from client import startListening, curFrame, frameFragments
def example(frame):
#TODO: do something with your frame
#render frame to our screen
cv2.imshow('client', frame)
cv2.waitKey(1)
# sio = socketio.Client()
# sio.connect("http://10.81.176.105")
def sendCommand(url, body):
global sio
try:
# print(body)
resp = requests.post(url, json = body)
# print(resp.status_code)
# resp = sio.emit(url, body)
# print(resp.json())
return resp.json()
except requests.ConnectionError as e:
print(e)
url_admin = "http://10.81.176.97/admin"
url_config = "http://10.81.176.97/stream_config"
url_pselect = "http://10.81.176.97/player_select"
url_command = "http://10.81.176.97/command"
url_status = "http://10.81.176.97/get_status"
status = {
"key" : "ztobuh98copz35ka"
}
menu_down = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "down",
"is_player_2": False
}
menu_up = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "up",
"is_player_2": False
}
menu_left = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "left",
"is_player_2": False
}
menu_right = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "right",
"is_player_2": False
}
menu_enter = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "enter",
"is_player_2": False
}
menu_escape = {
"key": "nuicho9423hlcx80",
"type": "menu_command",
"menu_key": "escape",
"is_player_2": False
}
select_scorpio = {
"key": "ztobuh98copz35ka",
"champion": "scorpio"
}
select_subzero = {
"key": "ztobuh98copz35ka",
"champion": "subzero"
}
game_up = {
"key": "ztobuh98copz35ka",
"commands": {
"up": True
}
}
game_up_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"up": False
}
}
game_down = {
"key": "ztobuh98copz35ka",
"commands": {
"down": True
}
}
game_down_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"down": False
}
}
game_left = {
"key": "ztobuh98copz35ka",
"commands": {
"left": True
}
}
game_left_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"left": False
}
}
game_right = {
"key": "ztobuh98copz35ka",
"commands": {
"right": True
}
}
game_right_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"right": False
}
}
game_front_punch = {
"key": "ztobuh98copz35ka",
"commands": {
"front_punch": True
}
}
game_front_punch_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"front_punch": False
}
}
game_back_punch = {
"key": "ztobuh98copz35ka",
"commands": {
"back_punch": True
}
}
game_back_punch_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"back_punch": False
}
}
game_front_kick = {
"key": "ztobuh98copz35ka",
"commands": {
"front_kick": True
}
}
game_front_kick_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"front_kick": False
}
}
game_back_kick = {
"key": "ztobuh98copz35ka",
"commands": {
"back_kick": True
}
}
game_back_kick_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"back_kick": False
}
}
game_interact = {
"key": "ztobuh98copz35ka",
"commands": {
"interact": True
}
}
game_interact_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"interact": False
}
}
game_throw = {
"key": "ztobuh98copz35ka",
"commands": {
"throw": True
}
}
game_throw_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"throw": False
}
}
game_block = {
"key": "ztobuh98copz35ka",
"commands": {
"block": True
}
}
game_block_disable = {
"key": "ztobuh98copz35ka",
"commands": {
"block": False
}
}
shortcut_igtmm = {
"key": "nuicho9423hlcx80",
"type": "in_game_to_main_menu"
# "hide_post_game_details": True
}
shortcut_2p = {
"key": "nuicho9423hlcx80",
"type": "new_2p_game"
# "hide_post_game_details": True
}
shortcut_pselect = {
"key": "nuicho9423hlcx80",
"type": "start_player_select"
# "hide_post_game_details": True
}
def escape():
sendCommand(url_admin, menu_escape)
time.sleep(0.3)
sendCommand(url_admin, menu_up)
time.sleep(0.3)
sendCommand(url_admin, menu_up)
time.sleep(0.3)
sendCommand(url_admin, menu_up)
time.sleep(0.4)
sendCommand(url_admin, menu_enter)
time.sleep(0.2)
def low_kick():
sendCommand(url_command, game_right)
time.sleep(0.2)
sendCommand(url_command, game_left)
time.sleep(0.2)
sendCommand(url_command, game_back_kick)
sendCommand(url_command, game_right_disable)
sendCommand(url_command, game_left_disable)
sendCommand(url_command, game_back_kick_disable)
time.sleep(0.2)
def teleport_right():
sendCommand(url_command, game_down)
time.sleep(0.1)
sendCommand(url_command, game_left)
time.sleep(0.1)
sendCommand(url_command, game_front_kick)
time.sleep(0.1)
sendCommand(url_command, game_down_disable)
time.sleep(0.1)
sendCommand(url_command, game_left_disable)
time.sleep(0.1)
sendCommand(url_command, game_front_kick_disable)
time.sleep(0.4)
def teleport_left():
sendCommand(url_command, game_down)
time.sleep(0.1)
sendCommand(url_command, game_right)
time.sleep(0.1)
sendCommand(url_command, game_front_kick)
time.sleep(0.1)
sendCommand(url_command, game_down_disable)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
time.sleep(0.1)
sendCommand(url_command, game_front_kick_disable)
time.sleep(0.4)
def spear_right():
sendCommand(url_command, game_left)
time.sleep(0.1)
sendCommand(url_command, game_right)
time.sleep(0.1)
sendCommand(url_command, game_front_punch)
time.sleep(0.1)
sendCommand(url_command, game_left_disable)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
time.sleep(0.1)
sendCommand(url_command, game_front_punch_disable)
time.sleep(0.4)
def spear_left():
sendCommand(url_command, game_right)
time.sleep(0.1)
sendCommand(url_command, game_left)
time.sleep(0.1)
sendCommand(url_command, game_front_punch)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
time.sleep(0.1)
sendCommand(url_command, game_left_disable)
time.sleep(0.1)
sendCommand(url_command, game_front_punch_disable)
time.sleep(0.4)
def forward2_right():
sendCommand(url_command, game_right)
time.sleep(0.2)
sendCommand(url_command, game_back_punch)
time.sleep(0.2)
sendCommand(url_command, game_back_punch_disable)
sendCommand(url_command, game_right_disable)
time.sleep(0.4)
def forward2_left():
sendCommand(url_command, game_left)
time.sleep(0.2)
sendCommand(url_command, game_back_punch)
time.sleep(0.2)
sendCommand(url_command, game_back_punch_disable)
sendCommand(url_command, game_left_disable)
time.sleep(0.4)
def takedown_left():
sendCommand(url_command, game_right)
time.sleep(0.2)
sendCommand(url_command, game_left)
time.sleep(0.2)
sendCommand(url_command, game_back_kick)
time.sleep(0.2)
sendCommand(url_command, game_left_disable)
sendCommand(url_command, game_right_disable)
sendCommand(url_command, game_back_kick_disable)
time.sleep(0.4)
def takedown_right():
sendCommand(url_command, game_left)
time.sleep(0.2)
sendCommand(url_command, game_right)
time.sleep(0.2)
sendCommand(url_command, game_back_kick)
time.sleep(0.2)
sendCommand(url_command, game_right_disable)
sendCommand(url_command, game_left_disable)
sendCommand(url_command, game_back_kick_disable)
time.sleep(0.4)
def move_right():
sendCommand(url_command, game_right)
time.sleep(0.2)
sendCommand(url_command, game_right_disable)
time.sleep(0.2)
def fpunch():
sendCommand(url_command, game_front_punch)
time.sleep(0.2)
sendCommand(url_command, game_front_punch_disable)
time.sleep(0.2)
def bpunch():
sendCommand(url_command, game_back_punch)
time.sleep(0.2)
sendCommand(url_command, game_back_punch_disable)
time.sleep(0.2)
def fkick():
sendCommand(url_command, game_front_kick)
time.sleep(0.2)
sendCommand(url_command, game_front_kick_disable)
time.sleep(0.2)
def bkick():
sendCommand(url_command, game_back_kick)
time.sleep(0.2)
sendCommand(url_command, game_back_kick_disable)
time.sleep(0.2)
def block():
sendCommand(url_command, game_block)
time.sleep(0.2)
sendCommand(url_command, game_block_disable)
def upsword_left():
sendCommand(url_command, game_right)
time.sleep(0.1)
sendCommand(url_command, game_back_punch)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
sendCommand(url_command, game_back_punch_disable)
def upsword_right():
sendCommand(url_command, game_left)
time.sleep(0.1)
sendCommand(url_command, game_back_punch)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
sendCommand(url_command, game_back_punch_disable)
def uppercut():
sendCommand(url_command, game_down)
time.sleep(0.1)
sendCommand(url_command, game_back_punch)
time.sleep(0.1)
sendCommand(url_command, game_down_disable)
sendCommand(url_command, game_back_punch_disable)
time.sleep(0.1)
# UDP_IP = "0.0.0.0"
# UDP_PORT = 5005
# if (len(sys.argv) > 1):
# UDP_PORT = int(sys.argv[1])
# startListening(UDP_IP, UDP_PORT, example)
if __name__ == "__main__":
# move_right()
# while(True):
# teleport_right()
# time.sleep(2)
# teleport_left()
# time.sleep(2)
while(True):
line = sys.stdin.readline().strip()
if (line == "mup"):
sendCommand(url_admin, menu_up)
if (line == "mdown"):
sendCommand(url_admin, menu_down)
if (line == "mleft"):
sendCommand(url_admin, menu_left)
if (line == "mright"):
sendCommand(url_admin, menu_right)
if (line == "menter"):
sendCommand(url_admin, menu_enter)
if (line == "mescape"):
sendCommand(url_admin, menu_escape)
if (line == "scorpio"):
sendCommand(url_pselect, select_scorpio)
if (line == "subzero"):
sendCommand(url_pselect, select_subzero)
if (line == "reset"):
sendCommand(url_admin, shortcut_igtmm)
if (line == "pvp"):
sendCommand(url_admin, shortcut_2p)
if (line == "select"):
sendCommand(url_admin, shortcut_pselect)
if (line == "gup"):
sendCommand(url_command, game_up)
time.sleep(0.1)
sendCommand(url_command, game_up_disable)
time.sleep(0.1)
if (line == "gdown"):
sendCommand(url_command, game_down)
time.sleep(0.7)
sendCommand(url_command, game_down_disable)
time.sleep(0.1)
if (line == "gleft"):
sendCommand(url_command, game_left)
time.sleep(0.1)
sendCommand(url_command, game_left_disable)
time.sleep(0.1)
if (line == "gright"):
sendCommand(url_command, game_right)
time.sleep(0.1)
sendCommand(url_command, game_right_disable)
time.sleep(0.1)
if (line == "fpunch"):
sendCommand(url_command, game_front_punch)
time.sleep(0.1)
sendCommand(url_command, game_front_punch_disable)
time.sleep(0.1)
if (line == "bpunch"):
sendCommand(url_command, game_back_punch)
time.sleep(0.1)
sendCommand(url_command, game_back_punch_disable)
time.sleep(0.1)
if (line == "fkick"):
sendCommand(url_command, game_front_kick)
time.sleep(0.1)
sendCommand(url_command, game_front_kick_disable)
time.sleep(0.1)
if (line == "bkick"):
sendCommand(url_command, game_back_kick)
time.sleep(0.1)
sendCommand(url_command, game_back_kick_disable)
time.sleep(0.1)
if (line == "throw"):
sendCommand(url_command, game_throw)
time.sleep(0.1)
sendCommand(url_command, game_throw_disable)
time.sleep(0.1)
if (line == "block"):
sendCommand(url_command, game_block)
time.sleep(0.1)
sendCommand(url_command, game_block_disable)
time.sleep(0.1)
if (line == "interact"):
sendCommand(url_command, game_interact)
# sendCommand(url_admin, menu_escape)
# sendCommand(url_command, game_left)
# time.sleep(3)
# sendCommand(url_command, game_left_disable)
# count = 10
# while (count > 0):
# count -= 1
# forward2_right()
# sendCommand(url_command, game_up)
# time.sleep(0.2)
# sendCommand(url_command, game_up_disable)
# time.sleep(0.2)
# sendCommand(url_command, game_right)
# time.sleep(0.2)
# sendCommand(url_command, game_right_disable)
# time.sleep(0.4)
# sendCommand(url_command, game_front_punch)
# time.sleep(0.2)
# sendCommand(url_command, game_front_punch_disable)
# time.sleep(0.2)
# sendCommand(url_command, game_back_kick)
# time.sleep(0.2)
# sendCommand(url_command, game_back_kick_disable)
# time.sleep(0.4)
# teleport_right()
# time.sleep(0.2)
# sendCommand(url_command, game_back_punch)
# time.sleep(0.2)
# sendCommand(url_command, game_back_punch_disable)
# time.sleep(0.2)
# sendCommand(url_command, game_front_punch)
# time.sleep(0.2)
# sendCommand(url_command, game_front_punch_disable)
# time.sleep(0.4)
# spear_right()
# time.sleep(0.4)
# takedown_right()
# time.sleep(0.2)
# teleport_right()
# time.sleep(0.1)
# spear_left()
# time.sleep(0.2)
# takedown_left()
# time.sleep(0.2)
# teleport_left()
# time.sleep(0.1)
# while(True):
# sendCommand(url_command, game_down)
# time.sleep(0.2)
# sendCommand(url_command, game_down_disable)
# time.sleep(0.1)
# escape()
# sendCommand(url_admin, menu_enter)
# sendCommand(url_pselect, select_scorpio)
| [
"andrei.cherechesu@gmail.com"
] | andrei.cherechesu@gmail.com |
35a61af3b0f6f690a4eac432d69103781468c94e | bba3a68b188fed58e3226fe1efa83eee808d68ad | /umfrageseite/umfrageseite/urls.py | 49ad26b164e287109f6c78e775487c7e78e29a7d | [] | no_license | Maurice73-Tech/Test-Project1 | d82c8dfe534caffec3f94e5b6f2edb35d96f027b | 9c17ea6bf4d74e6cad02c81134e3f12c8b28a3c4 | refs/heads/main | 2023-09-04T07:22:13.271975 | 2021-10-31T18:38:44 | 2021-10-31T18:38:44 | 422,291,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | """umfrageseite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('polls.urls'))
]
| [
"maurice.neidhart97@gmail.com"
] | maurice.neidhart97@gmail.com |
9c97105142970516183c7db5852d5d9ff221957b | b66f090edbccaa217c28f5ebc85a98258837cf87 | /lattice_calculator_procedural2.py | 4e40934ef4d459115a08291abbcd26425657e42b | [] | no_license | dquintana/PyCrysFML | d2bc0536a01e29f0744891e0c2ecff4b85910cb2 | d86eedc4401af539a3f40d179b440b4fa6b2e242 | refs/heads/master | 2020-04-16T22:38:19.011436 | 2013-06-12T19:06:19 | 2013-06-12T19:06:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,425 | py | import numpy as N
import math
import unittest
eps=1e-3
pi=N.pi
#Interfaces to the user are to be in degrees--internally, we may sometimes convert to radians
def sign(x): # I think this one include in numpy
if x>0:
ret=1
if x<0:
ret=-1
if x==0:
ret=0
return ret
def blkdiag(g):
"Returns a block diagonal matrix, given a list of square matrices, g"
glen=len(g)
n=0
for i in range(glen):
n=n+g[i].shape[0]
gout=N.zeros((n,n))
offset=0
for i in range(glen):
currblock=g[i]
lenx,leny=currblock.shape
for x in range(lenx):
for y in range(leny):
gout[x+offset,y+offset]=currblock[x,y]
offset=offset+lenx
return gout
def similarity_transform(A,B):
G=N.dot(B,A.transpose())
G2=N.dot(A,G)
#
return G2
def CleanArgs(**args):
"""Takes as input a set of arrays and/or scalars. It returns a set of the arrays that are the length as the longest entry
For shorter arrays, it pads them with the last value in the array. It returns a dictionary based on the calling args"""
npts=[]
for name,val in args.iteritems():
if type(val) in [type(13),type(13.0)]:
args[name]=N.array([val],'float64')
if type(val)==type([1,2,3]):
npts.append(len(val))
else:
npts.append(args[name].shape[0])
maxpts=max(npts)
for name,val in args.iteritems():
if type(val)==type([1,2,3]):
for i in range(maxpts-len(val)):
args[name].append(val[-1])
else:
if val.shape[0]<maxpts:
last_val=val[-1]
if len(val.shape)==1:
addendum=last_val*N.ones((maxpts-val.shape[0],),'float64')
args[name]=N.concatenate((val,addendum))
elif len(val.shape)==2:
addendum=N.tile(last_val,(maxpts-val.shape[0],1))
args[name]=N.concatenate((val,addendum))
#print name, len(val.shape),args[name]
return args
class Instrument(object):
def __init__(self):
self.tau_list={'pg(002)':1.87325, \
'pg(004)':3.74650, \
'ge(111)':1.92366, \
'ge(220)':3.14131, \
'ge(311)':3.68351, \
'be(002)':3.50702, \
'2axis':1e-4,\
'pg(110)':5.49806,\
'cu(220)':4.91593 ,\
}
return
def get_tau(self,tau):
return self.tau_list[tau]
class Orientation(object):
def __init__(self, orient1,orient2):
self.orient1=orient1
self.orient2=orient2
class Lattice(object):
def _get_a(self): return self._a
def _get_b(self): return self._b
def _get_c(self): return self._c
def _get_alpha(self): return self._alpha
def _get_beta(self): return self._beta
def _get_gamma(self): return self._gamma
#def _get_orient1(self): return self._orient1
#def _get_orient2(self): return self._orient2
#def _get_orientation(self): return self._orientation
def _set_a(self,x):
self._a=x
self.setvals()
def _set_b(self,x):
self._b=x
self.setvals()
def _set_c(self,x):
self._c=x
self.setvals()
def _set_alpha(self,x):
self._alpha=x
self.setvals()
def _set_beta(self,x):
self._beta=x
self.setvals()
def _set_gamma(self,x):
self._gamma=x
self.setvals()
#def _set_orient1(self,x):
# self._orient1=x
# self.setvals()
#def _set_orient2(self,x):
# self._orient2=x
# self.setvals()
#def _set_orientation(self,x):
# self._orientation=x
# self._orient1=x.orient1
# self._orient2=x.orient2
# self.setvals()
a=property(_get_a,_set_a)
b=property(_get_b,_set_b)
c=property(_get_c,_set_c)
alpha=property(_get_alpha,_set_alpha)
beta=property(_get_beta,_set_beta)
gamma=property(_get_gamma,_set_gamma)
#orient1=property(_get_orient1,_set_orient1)
#orient2=property(_get_orient2,_set_orient2)
#orientation=property(_get_orientation, _set_orientation)
def setvals(self):
#if self._orient1.shape[0]==1:
# self.orient1=self.orient1.transpose()
#if self._orient2.shape[0]==1:
# self.orient2=self.orient2.transpose()
print "initing"
print self._a, self._b, self._c, self._alpha, self._gamma, self._beta
newinput=CleanArgs(a=self._a,
b=self._b,
c=self._c,
alpha=self._alpha,
beta=self._beta,
gamma=self._gamma,
#orient1=self._orient1,
#orient2=self._orient2
)
self._a=newinput['a']
self._b=newinput['b']
self._c=newinput['c']
self._alpha=newinput['alpha']
self._beta=newinput['beta']
self._gamma=newinput['gamma']
self.alphar=N.radians(newinput['alpha'])
self.betar=N.radians(newinput['beta'])
self.gammar=N.radians(newinput['gamma'])
self.star()
self.gtensor('lattice')
self.gtensor('latticestar')
#self.npts=N.size(self.a)
#self._orient1=newinput['orient1']
#self._orient2=newinput['orient2']
#self._orientation=Orientation(newinput['orient1'],newinput['orient2'])
#self._orientation.orient1=newinput['orient1']
#self._orientation.orient2=newinput['orient2']
#print self.npts
#self._orient1=self._orient1.reshape((self.npts,1))
#self._orient2=self._orient2.reshape((self.npts,1))
#if newinput['orient1'].shape[0]==1:
# self.orient1=newinput['orient1'].transpose()
#else:
# self.orient1=newinput['orient1']
#if newinput['orient2'].shape[0]==1:
# self.orient2=newinput['orient2'].transpose()
#else:
# self.orient2=newinput['orient2']
# self.StandardSystem()
def __init__(self, a=None, \
b=None, \
c=None, \
alpha=None, \
beta=None, \
gamma=None, \
orientation=None, \
):
"""a,b,c in Angstroms, alpha,beta, gamma in radians. All are vectors """
self._a=a
self._b=b
self._c=c
self._alpha=alpha
self._beta=beta
self._gamma=gamma
#self._orient1=orientation.orient1
#self._orient2=orientation.orient2
#self.instrument=Instrument()
self.setvals()
return
def star(self):
"Calculate unit cell volume, reciprocal cell volume, reciprocal lattice parameters"
V=2*self.a*self.b*self.c*\
N.sqrt(N.sin((self.alphar+self.betar+self.gammar)/2)*\
N.sin((-self.alphar+self.betar+self.gammar)/2)*\
N.sin((self.alphar-self.betar+self.gammar)/2)*\
N.sin((self.alphar+self.betar-self.gammar)/2))
self.Vstar=(2*N.pi)**3/V;
self.astar=self.b*self.c*N.sin(self.alphar)/V
self.bstar=self.a*self.c*N.sin(self.betar)/V
self.cstar=self.b*self.a*N.sin(self.gammar)/V
self.alphastar_r=N.arccos((N.cos(self.betar)*N.cos(self.gammar)-\
N.cos(self.alphar))/ \
(N.sin(self.betar)*N.sin(self.gammar)))
self.betastar_r= N.arccos((N.cos(self.alphar)*N.cos(self.gammar)-\
N.cos(self.betar))/ \
(N.sin(self.alphar)*N.sin(self.gammar)))
self.gammastar_r=N.arccos((N.cos(self.alphar)*N.cos(self.betar)-\
N.cos(self.gammar))/ \
(N.sin(self.alphar)*N.sin(self.betar)))
self.V=V
self.alphastar=N.degrees(self.alphastar_r)
self.betastar=N.degrees(self.betastar_r)
self.gammastar=N.degrees(self.gammastar_r)
return
def calc_twotheta(self,wavelength,h,k,l):
dr=self.calc_dhkl_star(h,k,l)
return N.degrees(2*N.arcsin(wavelength*dr/2))
def calc_dhkl_star(self,h,k,l):
dr=N.sqrt(h**2*self.astar**2+k**2*self.bstar**2+l**2*self.cstar**2
+2*k*l*self.bstar*self.cstar*N.cos(self.alphastar_r)
+2*l*h*self.cstar*self.astar*N.cos(self.betastar_r)
+2*h*k*self.astar*self.bstar*N.cos(self.gammastar_r)
)
return dr
def gtensor(self, latticetype):
"calculates the metric tensor of a lattice"
g=N.zeros((3, 3, N.size(self.a)), 'Float64')
#print 'shape ', g.shape
if latticetype=='lattice':
a=self.a
b=self.b
c=self.c
alphar=self.alphar
betar=self.betar
gammar=self.gammar
if latticetype=='latticestar':
a=self.astar
b=self.bstar
c=self.cstar
alphar=self.alphastar_r
betar=self.betastar_r
gammar=self.gammastar_r
g[0, 0, :]=a**2;
g[0, 1, :]=a*b*N.cos(gammar)
g[0, 2, :]=a*c*N.cos(betar)
g[1, 0, :]=g[0, 1, :]
g[1, 1, :]=b**2
g[1, 2, :]=c*b*N.cos(alphar)
g[2, 0, :]=g[0, 2, :]
g[2, 1, :]=g[1, 2, :]
g[2, 2, :]=c**2
if latticetype=='lattice':
self.g=g
if latticetype=='latticestar':
self.gstar=g
return
def scalar(x1, y1, z1, x2, y2, z2, latticetype,lattice):
"calculates scalar product of two vectors"
if latticetype=='lattice':
a=lattice.a
b=lattice.b
c=lattice.c
alpha=lattice.alphar
beta=lattice.betar
gamma=lattice.gammar
if latticetype=='latticestar':
a=lattice.astar
b=lattice.bstar
c=lattice.cstar
alpha=lattice.alphastar_r
beta=lattice.betastar_r
gamma=lattice.gammastar_r
#a lattice can either be a reciprocal lattice, or a direct lattice
s=x1*x2*a**2+y1*y2*b**2+z1*z2*c**2+(x1*y2+x2*y1)*a*b*N.cos(gamma)+(x1*z2+x2*z1)*a*c*N.cos(beta)+(z1*y2+z2*y1)*c*b*N.cos(alpha)
return s
def angle2(x, y, z, h, k, l,lattice):
"Calculate the angle between vectors in real and reciprocal space"
"x,y,z are the fractional cell coordinates of the first vector,"
"h,k,l are Miller indices of the second vector"
phi=N.arccos(2*pi*(h*x+k*y+l*z)/modvec(x, y, z, 'lattice',lattice)/modvec(h, k, l, 'latticestar',lattice))
return phi
def angle(x1, y1, z1, x2, y2, z2, latticetype,lattice):
"Calculate the angle between vectors in real and reciprocal space"
"xi,yi,zi are the fractional cell coordinates of the vectors"
phi=N.arccos(scalar(x1, y1, z1, x2, y2, z2, latticetype,lattice)/modvec(x1, y1, z1, latticetype,lattice)/modvec(x1, y1, z1, latticetype,lattice))
return phi
def modvec(x, y, z, latticetype,lattice):
"Calculates modulus of a vector defined by its fraction cell coordinates"
"or Miller indexes"
m=N.sqrt(scalar(x, y, z, x, y, z, latticetype,lattice))
return m
def reciprocate(x, y, z, latticetype,lattice):
"calculate miller indexes of a vector defined by its fractional cell coords"
if latticetype=='lattice':
g=lattice.g
if latticetype=='latticestar':
g=lattice.gstar
h=g[0, 0, :]*x+g[1, 0, :]*y+g[2, 0, :]*z;
k=g[0, 1, :]*x+g[1, 1, :]*y+g[2, 1, :]*z;
l=g[0, 2, :]*x+g[1, 2, :]*y+g[2, 2, :]*z;
return h, k, l
def vector(x1, y1, z1, x2, y2, z2, latticetype,lattice):
"calculates the fractional cell coordinates or Miller indexes of a vector"
"product of two vectors, defined by their fractional cell coordinates or "
"Miller idexes"
if latticetype=='lattice':
g=lattice.gstar
V=lattice.Vstar
if latticetype=='latticestar':
g=lattice.g
V=lattice.V
g=g*V/(2*N.pi)**2
x=y1*z2*g[0, 0, :]-z1*y2*g[0, 0, :]-x1*z2*g[1, 0, :]+z1*x2*g[1, 0, :]\
+x1*y2*g[2, 0, :]-y1*x2*g[2, 0, :]
y=y1*z2*g[0, 1, :]-z1*y2*g[0, 1, :]-x1*z2*g[1, 1, :]+z1*x2*g[1, 1, :]\
+x1*y2*g[2, 1, :]-y1*x2*g[2, 1, :]
z=y1*z2*g[0, 2, :]-z1*y2*g[0, 2, :]-x1*z2*g[1, 2, :]+z1*x2*g[1, 2, :]\
+x1*y2*g[2, 2, :]-y1*x2*g[2, 2, :]
return x,y,z
def StandardSystem(orient1,orient2,lattice):
#orient1=self._orient1.T
#orient2=self._orient2.T
try:
modx=modvec(orient1[0, :], orient1[1, :], orient1[2, :], 'latticestar',lattice)
except IndexError:
orient1=orient1.transpose()
orient2=orient2.transpose()
modx=modvec(orient1[0, :], orient1[1, :], orient1[2, :], 'latticestar',lattice)
x=N.copy(orient1)
x[0, :]=x[0, :]/modx; # First unit basis vector
x[1, :]=x[1, :]/modx;
x[2, :]=x[2, :]/modx;
proj=scalar(orient2[0, :], orient2[1, :], orient2[2, :], \
x[0, :], x[1, :], x[2, :], 'latticestar',lattice)
y=N.copy(orient2)
y[0, :]=y[0, :]-x[0, :]*proj;
y[1, :]=y[1, :]-x[1, :]*proj;
y[2, :]=y[2, :]-x[2, :]*proj;
mody=modvec(y[0, :], y[1, :], y[2, :], 'latticestar',lattice);
#check for collinearity of orienting vectors
try:
if N.where(mody<=eps)[0].size>0:
print 'ValueError'
raise ValueError
y[0, :]=y[0, :]/mody; # Second unit basis vector
y[1, :]=y[1, :]/mody;
y[2, :]=y[2, :]/mody;
z=N.copy(y);
z[0, :]=x[1, :]*y[2, :]-y[1, :]*x[2, :];
z[1, :]=x[2, :]*y[0, :]-y[2, :]*x[0, :];
z[2, :]=-x[1, :]*y[0, :]+y[1, :]*x[0, :];
proj=scalar(z[0, :], z[1, :], z[2, :], x[0, :], x[1, :], x[2, :], 'latticestar',lattice);
z[0, :]=z[0, :]-x[0, :]*proj;
z[1, :]=z[1, :]-x[1, :]*proj;
z[2, :]=z[2, :]-x[2, :]*proj;
proj=scalar(z[0, :], z[1, :], z[2, :], y[0, :], y[1, :], y[2, :], 'latticestar',lattice);
z[0, :]=z[0, :]-y[0, :]*proj;
z[1, :]=z[1, :]-y[1, :]*proj;
z[2, :]=z[2, :]-y[2, :]*proj;
modz=modvec(z[0, :], z[1, :], z[2, :], 'latticestar',lattice);
z[0, :]=z[0, :]/modz; #% Third unit basis vector
z[1, :]=z[1, :]/modz;
z[2, :]=z[2, :]/modz;
return x,y,z
except ValueError:
print 'ORIENTATION VECTORS ARE COLLINEAR x,y,z not set'
return None
def S2R(qx, qy, qz,x,y,z):
"Given cartesian coordinates of a vector in the S System, calculate its Miller indexes."
H=qx*x[0, :]+qy*y[0, :]+qz*z[0, :];
K=qx*x[1, :]+qy*y[1, :]+qz*z[1, :];
L=qx*x[2, :]+qy*y[2, :]+qz*z[2, :];
q=N.sqrt(qx**2+qy**2+qz**2);
return H, K, L, q
def R2S(H, K, L,x,y,z,lattice):
"Given reciprocal-space coordinates of a vector, calculate its coordinates in the Cartesian space."
qx=scalar(H, K, L, x[0, :], x[1, :], x[2, :], 'latticestar',lattice);
qy=scalar(H, K, L, y[0, :], y[1, :], y[2, :], 'latticestar',lattice);
qz=scalar(H, K, L, z[0, :], z[1, :], z[2, :], 'latticestar',lattice);
q=modvec(H, K, L, 'latticestar',lattice);
return qx, qy, qz, q
def SpecWhere(M2,S1,S2,A2,EXP,lattice,orientation,instrument):
""" For given values of M3,S1,S2 and A2 spectrometer motors (AKA M2,M3,M4 and M6)
and spectrometer and sample parameters specified in EXP calculates the wave vector
transfer in the sample (H, K, L), Q=|(H,K,L)|, energy tranfer E, and incident
and final neutron energies. Angles are given in radians"""
newinput=CleanArgs(a=lattice.a,b=lattice.b,c=lattice.c,alpha=lattice.alpha,beta=lattice.beta,\
gamma=lattice.gamma,orient1=orientation.orient1,orient2=orientation.orient2,M2=N.radians(M2),S1=N.radians(S1),S2=N.radians(S2),A2=N.radians(A2))
neworientation=Orientation(newinput['orient1'],newinput['orient2'])
newlattice=Lattice(a=newinput['a'],b=newinput['b'],c=newinput['c'],alpha=newinput['alpha'],\
beta=newinput['beta'],gamma=newinput['gamma'],orientation=neworientation\
)
M2=newinput['M2']
S1=newinput['S1']
S2=newinput['S2']
A2=newinput['A2']
npts=len(EXP)
taum=N.empty(npts,'Float64')
taua=N.empty(npts,'Float64')
for ind in range(npts):
taum[ind]=instrument.get_tau(EXP[ind]['mono']['tau'])
for ind in range(npts):
taua[ind]=instrument.get_tau(EXP[ind]['ana']['tau'])
ki=taum/N.sqrt(2.0-2*N.cos(M2))
Ei=2.072142*ki**2
kf=taua/N.sqrt(2.0-2*N.cos(A2))
Ef=2.072142*kf**2
E=Ei-Ef
Q=N.sqrt(ki**2+kf**2-2*ki*kf*N.cos(S2))
x,y,z=StandardSystem(neworientation.orient1,neworientation.orient2,newlattice)
orienta=x
orientb=y
#phi=-atan2(-kf.*sin(S2), ki-kf.*cos(S2)); %Angle from ki to Q
delta=N.absolute(N.arccos( (Q**2+ki**2-kf**2)/(2*ki*Q)))
psi=S1+delta-pi/2 #Angle from first orienting vector to to Q
qx=Q*N.cos(psi)
qy=Q*N.sin(psi)
H=qx*orienta[0]+qy*orientb[0]
K=qx*orienta[1]+qy*orientb[1]
L=qx*orienta[2]+qy*orientb[2]
return H,K,L,E,Q,Ei,Ef
def SpecGoTo(H,K,L,E,EXP,lattice,orientation):
"""Calculate shaft angles given momentum transfer H, K, L, energy transfer E, and
spectrometer and smaple parameters in EXP. The angles returned are in radians"""
newinput=CleanArgs(a=lattice.a,b=lattice.b,c=lattice.c,alpha=lattice.alpha,beta=lattice.beta,\
gamma=lattice.gamma,orient1=orientation.orient1,orient2=orientation.orient2,H=H,K=K,L=L,E=E)
neworientation=Orientation(newinput['orient1'],newinput['orient2'])
newlattice=Lattice(a=newinput['a'],b=newinput['b'],c=newinput['c'],alpha=newinput['alpha'],\
beta=newinput['beta'],gamma=newinput['gamma'])
H=newinput['H']
K=newinput['K']
L=newinput['L']
E=newinput['E']
CONVERT2=2.072
#mono=[EXP['mono']]
npts=len(EXP)
taum=N.empty((npts,),'float64')
taua=N.empty((npts,),'float64')
infin=-1*N.ones((npts,1),'float64')
dir1=N.ones((npts,1),'float64')
dir2=N.ones((npts,1),'float64')
mondir=N.ones((npts,1),'float64')
efixed=N.empty((npts,1),'float64')
instrument=Instrument()
for ind in range(npts):
taum[ind]=instrument.get_tau(EXP[ind]['mono']['tau'])
taua[ind]=instrument.get_tau(EXP[ind]['ana']['tau'])
if ('infin' in EXP[ind]):
infin[ind]=EXP[ind]['infin']
if ('dir1' in EXP[ind]):
dir1[ind]=EXP[ind]['dir1']
if ('dir2' in EXP):
dir2[ind]=EXP[ind]['dir2']
if ('mondir' in EXP):
mondir[ind]=EXP[ind]['mondir']
efixed[ind]=EXP[ind]['efixed']
x,y,z=StandardSystem(neworientation.orient1,neworientation.orient2,newlattice)
qx,qy,qz,Q=R2S(H,K,L,x,y,z,newlattice)
dir=N.zeros((3,npts),'float64')
dir[0,:]=mondir
dir[1,:]=-dir[0,:]*dir1
dir[2,:]=-dir[1,:]*dir2
ei=efixed+E
ef=efixed+0*E
change=N.where(infin>0)
if N.size(change)!=0:
ef[change]=efixed[change]-E[change]
ei[change]=efixed[change]
ki = N.sqrt(ei/CONVERT2)
kf = N.sqrt(ef/CONVERT2)
M1=N.arcsin(taum/(2*ki))#.*sign(dir(1,:));
M2=2*M1
A1=N.arcsin(taua/(2*kf))#.*sign(dir(3,:));
A2=2*A1
S2=N.arccos((ki**2+kf**2-Q**2)/(2*ki*kf))#.*sign(dir(2,:));
delta=N.absolute(N.arccos( (Q**2+ki**2-kf**2)/(2*ki*Q)))
#psi=S1+delta-pi/2 #Angle from first orienting vector to to Q
psi=N.arctan2(qy,qx)
S1=psi-delta+pi/2
#TODO: Add checks to make sure that the scattering triangle closed
bad=N.where(ei<0 or ef<0 or N.abs(taum/(2*ki))>1 or N.abs(taua/(2.*kf))>1 or N.abs ( (ki**2+kf**2-Q**2)/(2*ki*kf))>1)[0]
M1[bad]=N.NaN
M2[bad]=N.NaN;
S1[bad]=N.NaN;
S2[bad]=N.NaN;
A1[bad]=N.NaN;
A2[bad]=N.NaN;
return N.degrees(M1),N.degrees(M2),N.degrees(S1),N.degrees(S2),N.degrees(A1),N.degrees(A2)
class TestLattice(unittest.TestCase):
def setUp(self):
a=N.array([2*pi],'Float64')
b=N.array([2*pi],'Float64')
c=N.array([2*pi],'Float64')
alpha=N.radians(N.array([90],'Float64'))
beta=N.radians(N.array([90],'Float64'))
gamma=N.radians(N.array([90],'Float64'))
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,1,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture = Lattice(a=a,b=b,c=c,alpha=alpha,beta=beta,gamma=gamma,\
orientation=orientation)
def test_astar(self):
self.assertAlmostEqual(self.fixture.astar[0],1.0,2,'astar Not equal to '+str(1.0))
def test_bstar(self):
self.assertAlmostEqual(self.fixture.bstar[0],1.0,2,'bstar Not equal to '+str(1.0))
def test_cstar(self):
self.assertAlmostEqual(self.fixture.cstar[0],1.0,2,'cstar '+str(self.fixture.cstar[0])+' Not equal to '+str(1.0))
def test_alphastar(self):
self.assertAlmostEqual(self.fixture.alphastar[0],pi/2,2,'alphastar Not equal to '+str(pi/2))
def test_betastar(self):
self.assertAlmostEqual(self.fixture.betastar[0],pi/2,2,'betastar Not equal to '+str(pi/2))
def test_gammastar(self):
self.assertAlmostEqual(self.fixture.gammastar[0],pi/2,2,'gammastar Not equal to '+str(pi/2))
def test_V(self):
self.assertAlmostEqual(self.fixture.V[0],248.0502,2,'V Not equal to '+str(248.0502))
def test_Vstar(self):
self.assertAlmostEqual(self.fixture.Vstar[0],1.0,2,'Vstar Not equal to '+str(1.0))
def test_g(self):
#print self.fixture.g
self.assertAlmostEqual((self.fixture.g[:,:,0][0,0]),39.4784*(N.eye(3)[0,0]) ,2,'g Not equal to '+str(39.4784 ))
def test_gstar(self):
#print self.fixture.gstar
self.assertAlmostEqual(self.fixture.gstar[:,:,0][0,0],1.0*N.eye(3)[0,0] ,2,'gstar Not equal to '+str(1.0 ))
def test_StandardSystem_x(self):
# #print self.fixture.gstar
self.assertAlmostEqual(self.fixture.x[0],1.0 ,2,'Standard System x Not equal to '+str(1.0 ))
class TestLatticeCubic(unittest.TestCase):
def setUp(self):
a=N.array([6.283],'Float64')
b=N.array([6.283],'Float64')
c=N.array([6.283],'Float64')
alpha=N.radians(N.array([90],'Float64'))
beta=N.radians(N.array([90],'Float64'))
gamma=N.radians(N.array([90],'Float64'))
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture=Lattice(a=a,b=b,c=c,alpha=alpha,beta=beta,gamma=gamma,\
orientation=orientation)
EXP={}
EXP['ana']={}
EXP['ana']['tau']='pg(002)'
EXP['mono']={}
EXP['mono']['tau']='pg(002)';
EXP['ana']['mosaic']=30
EXP['mono']['mosaic']=30
EXP['sample']={}
EXP['sample']['mosaic']=10
EXP['sample']['vmosaic']=10
EXP['hcol']=N.array([40, 10, 20, 80],'Float64')
EXP['vcol']=N.array([120, 120, 120, 120],'Float64')
EXP['infix']=-1 #positive for fixed incident energy
EXP['efixed']=14.7
EXP['method']=0
setup=[EXP]
self.fixture.EXP=EXP
def test_cubic1(self):
#setup lattice
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([74.169]))
A2=N.radians(N.array([74.169]))
S1=N.radians(N.array([97.958]))
S2=N.radians(N.array([89.131]))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
#self.assertAlmostEqual(Q[0],2.1799,4, 'Q Not equal to '+ str(2.1799))
self.assertAlmostEqual(Ei[0],4.9995,4,'Ei Not equal to '+str(4.9995))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_cubic2(self):
"""test different Energy Transfer"""
#setup lattice
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([52.420]))
A2=N.radians(N.array([74.169]))
S1=N.radians(N.array([101.076]))
S2=N.radians(N.array([70.881]))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],4.3195,4, 'E Not equal to '+ str(4.3195))
#self.assertAlmostEqual(Q[0],2.1799,4, 'Q Not equal to '+ str(2.1799))
self.assertAlmostEqual(Ei[0],9.3190,4,'Ei Not equal to '+str(9.3190))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_cubic3(self):
"""test different Orientation"""
#setup lattice
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([74.169]))
A2=N.radians(N.array([74.169]))
S1=N.radians(N.array([98.375]))
S2=N.radians(N.array([109.575]))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],1.2999,4, 'K Not equal to '+ str(1.2999))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.9995,4,'Ei Not equal to '+str(4.9995))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_cubic4(self):
"""Switch order of orientations, compare with test 3"""
#setup lattice
orient1=N.array([[0,0,1]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([74.169]))
A2=N.radians(N.array([74.169]))
S1=N.radians(N.array([101.200])) #Note that this angle has changed
#This is a consequence of the fact that ICP defines the first orientation vector
#to be at half of the detector two theta angle. The second orientation vector is always
#at higher a3
S2=N.radians(N.array([109.575]))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],1.2999,4, 'K Not equal to '+ str(1.2999))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.9995,4,'Ei Not equal to '+str(4.9995))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_cubic5(self):
"""Test another energy, compare with test 4"""
#setup lattice
orient1=N.array([[0,0,1]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([48.661]))
A2=N.radians(N.array([74.169]))
S1=N.radians(N.array([99.257]))
S2=N.radians(N.array([80.722]))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],1.2999,4, 'K Not equal to '+ str(1.2999))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],5.7097,4, 'E Not equal to '+ str(5.7097))
self.assertAlmostEqual(Ei[0],10.7092,4,'Ei Not equal to '+str(10.7092))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_cubic6(self):
"""test different energies, compare with test 3"""
#setup lattice
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([48.661],'Float64'))
A2=N.radians(N.array([74.169],'Float64'))
S1=N.radians(N.array([96.433],'Float64'))
S2=N.radians(N.array([80.722],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2999,4, 'H Not equal to '+ str(1.2999))
self.assertAlmostEqual(K[0],1.2999,4, 'K Not equal to '+ str(1.2999))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],5.7097,4, 'E Not equal to '+ str(5.7097))
self.assertAlmostEqual(Ei[0],10.7092,4,'Ei Not equal to '+str(10.7092))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_tetragonal1(self):
"""test the tetragonal cell"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([74.169],'Float64'))
A2=N.radians(N.array([74.169],'Float64'))
S1=N.radians(N.array([76.720],'Float64'))
S2=N.radians(N.array([110.480],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],2.3749,4, 'H Not equal to '+ str(2.3749))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.9995,4,'Ei Not equal to '+str(4.9995))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_tetragonal2(self):
"""test the tetragonal cell, change E transfer"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=5.0
#test the angles
M2=N.radians(N.array([49.633],'Float64'))
A2=N.radians(N.array([74.169],'Float64'))
S1=N.radians(N.array([74.345],'Float64'))
S2=N.radians(N.array([82.717],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],2.3749,4, 'H Not equal to '+ str(2.3749))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],1.7499,4, 'L Not equal to '+ str(1.7499))
self.assertAlmostEqual(E[0],5.3197,4, 'E Not equal to '+ str(5.3197))
self.assertAlmostEqual(Ei[0],10.3192,4,'Ei Not equal to '+str(10.3192))
self.assertAlmostEqual(Ef[0],4.9995,4,'Ef Not equal to '+str(4.9995))
def test_tetragonal3(self):
"""test the tetragonal cell, change Ei, orientation vectors"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=-1 # Fixed Ei
self.fixture.EXP['efixed']=3.7
#test the angles
M2=N.radians(N.array([89.008],'Float64'))
A2=N.radians(N.array([89.008],'Float64'))
S1=N.radians(N.array([100.569],'Float64'))
S2=N.radians(N.array([148.389],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.6289,4, 'H Not equal to '+ str(1.6289))
self.assertAlmostEqual(K[0],1.6289,4, 'K Not equal to '+ str(1.6289))
self.assertAlmostEqual(L[0],2.1389,4, 'L Not equal to '+ str(2.1389))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],3.6997,4,'Ei Not equal to '+str(3.6997))
self.assertAlmostEqual(Ef[0],3.6997,4,'Ef Not equal to '+str(3.6997))
def test_tetragonal4(self):
"""test the tetragonal cell, change Energy transfer, compare with tetragonal3"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=-1 # Fixed Ei
self.fixture.EXP['efixed']=3.7
#test the angles
M2=N.radians(N.array([89.008],'Float64'))
A2=N.radians(N.array([98.663],'Float64'))
S1=N.radians(N.array([91.561],'Float64'))
S2=N.radians(N.array([117.979],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.3329,4, 'H Not equal to '+ str(1.3329))
self.assertAlmostEqual(K[0],1.3329,4, 'K Not equal to '+ str(1.3329))
self.assertAlmostEqual(L[0],2.1389,4, 'L Not equal to '+ str(2.1389))
self.assertAlmostEqual(E[0],0.5400,4, 'E Not equal to '+ str(0.5400))
self.assertAlmostEqual(Ei[0],3.6997,4,'Ei Not equal to '+str(3.6997))
self.assertAlmostEqual(Ef[0],3.1597,4,'Ef Not equal to '+str(3.1597))
def test_tetragonal5(self):
"""test the tetragonal cell, swap orientation vectors"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[0,0,1]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=-1 # Fixed Ei
self.fixture.EXP['efixed']=3.7
#test the angles
M2=N.radians(N.array([89.008],'Float64'))
A2=N.radians(N.array([98.663],'Float64'))
S1=N.radians(N.array([119.133],'Float64')) #recall how icp chooses the a3 angle based on first orientation vector
S2=N.radians(N.array([117.979],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.3329,4, 'H Not equal to '+ str(1.3329))
self.assertAlmostEqual(K[0],1.3329,4, 'K Not equal to '+ str(1.3329))
self.assertAlmostEqual(L[0],2.1389,4, 'L Not equal to '+ str(2.1389))
self.assertAlmostEqual(E[0],0.5400,4, 'E Not equal to '+ str(0.5400))
self.assertAlmostEqual(Ei[0],3.6997,4,'Ei Not equal to '+str(3.6997))
self.assertAlmostEqual(Ef[0],3.1597,4,'Ef Not equal to '+str(3.1597))
def test_tetragonal6(self):
"""test the tetragonal cell, swap orientation vectors compared with tetragonal 4"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([6.283],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[0,0,1]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=-1 # Fixed Ei
self.fixture.EXP['efixed']=3.7
#test the angles
M2=N.radians(N.array([89.008],'Float64'))
A2=N.radians(N.array([89.008],'Float64'))
S1=N.radians(N.array([137.820],'Float64')) #recall how icp chooses the a3 angle based on first orientation vector
S2=N.radians(N.array([148.389],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.6289,4, 'H Not equal to '+ str(1.6289))
self.assertAlmostEqual(K[0],1.6289,4, 'K Not equal to '+ str(1.6289))
self.assertAlmostEqual(L[0],2.1389,4, 'L Not equal to '+ str(2.1389))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],3.6997,4,'Ei Not equal to '+str(3.6997))
self.assertAlmostEqual(Ef[0],3.6997,4,'Ef Not equal to '+str(3.6997))
def test_orthorhombic1(self):
"""test the orthorhombic cell"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,2]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([78.930],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([89.644],'Float64'))
S2=N.radians(N.array([86.470],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.3919,4, 'H Not equal to '+ str(1.3919))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.4996,4,'Ei Not equal to '+str(4.4996))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_orthorhombic2(self):
"""test the orthorhombic cell, change E transfer"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,2]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([46.305],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([98.405],'Float64'))
S2=N.radians(N.array([57.515],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.3919,4, 'H Not equal to '+ str(1.3919))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],7.2594,4, 'E Not equal to '+ str(7.2594))
self.assertAlmostEqual(Ei[0],11.7590,4,'Ei Not equal to '+str(11.7590))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_orthorhombic3(self):
"""test the orthorhombic cell, change orientation vectors, compare with ortho2"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,2]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([78.930],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([91.228],'Float64'))
S2=N.radians(N.array([105.102],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2339,4, 'H Not equal to '+ str(1.2339))
self.assertAlmostEqual(K[0],1.2339,4, 'K Not equal to '+ str(1.2339))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.4996,4,'Ei Not equal to '+str(4.4966))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_orthorhombic4(self):
"""test the orthorhombic cell, change E, compare with ortho3"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[1,1,0]],'Float64')
orient2=N.array([[0,0,2]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([47.030],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([92.030],'Float64'))
S2=N.radians(N.array([71.391],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2339,4, 'H Not equal to '+ str(1.2339))
self.assertAlmostEqual(K[0],1.2339,4, 'K Not equal to '+ str(1.2339))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],6.9194,4, 'E Not equal to '+ str(6.9194))
self.assertAlmostEqual(Ei[0],11.4190,4,'Ei Not equal to '+str(11.4190))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_orthorhombic5(self):
"""test the orthorhombic cell, swap orientation vectors, compare with ortho4"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[0,0,2]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([47.030],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([104.676],'Float64')) #recalll how icp defines a3 in terms of a4 of orient1
S2=N.radians(N.array([71.391],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2339,4, 'H Not equal to '+ str(1.2339))
self.assertAlmostEqual(K[0],1.2339,4, 'K Not equal to '+ str(1.2339))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],6.9194,4, 'E Not equal to '+ str(6.9194))
self.assertAlmostEqual(Ei[0],11.4190,4,'Ei Not equal to '+str(11.4190))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_orthorhombic6(self):
"""test the orthorhombic cell, swap orient vectors, compare with ortho3"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
orient1=N.array([[0,0,2]],'Float64')
orient2=N.array([[1,1,0]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([78.930],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([103.874],'Float64')) #recalll how icp defines a3 in terms of a4 of orient1
S2=N.radians(N.array([105.102],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.2339,4, 'H Not equal to '+ str(1.2339))
self.assertAlmostEqual(K[0],1.2339,4, 'K Not equal to '+ str(1.2339))
self.assertAlmostEqual(L[0],2.7379,4, 'L Not equal to '+ str(2.7379))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.4996,4,'Ei Not equal to '+str(4.4996))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_monoclinic1(self):
"""test monoclinic 1"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
self.fixture.beta=N.radians(N.array([100.0],'Float64'))
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([78.930],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([108.743],'Float64')) #recalll how icp defines a3 in terms of a4 of orient1
S2=N.radians(N.array([130.130],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.5829,4, 'H Not equal to '+ str(1.5829))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],3.4558,4, 'L Not equal to '+ str(3.4558))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.4996,4,'Ei Not equal to '+str(4.4996))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_monoclinic2(self):
"""test monoclinic 2, change E"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
self.fixture.beta=N.radians(N.array([100.0],'Float64'))
orient1=N.array([[1,0,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([74.186],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([106.473],'Float64')) #recalll how icp defines a3 in terms of a4 of orient1
S2=N.radians(N.array([123.991],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],1.5829,4, 'H Not equal to '+ str(1.5829))
self.assertAlmostEqual(K[0],0.0000,4, 'K Not equal to '+ str(0.0000))
self.assertAlmostEqual(L[0],3.4558,4, 'L Not equal to '+ str(3.4558))
self.assertAlmostEqual(E[0],0.4979,4, 'E Not equal to '+ str(0.4979))
self.assertAlmostEqual(Ei[0],4.9976,4,'Ei Not equal to '+str(4.9976))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_monoclinic3(self):
"""test monoclinic 2, change orient vectors"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
self.fixture.beta=N.radians(N.array([100.0],'Float64'))
orient1=N.array([[1,2,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.radians(N.array([78.930],'Float64'))
A2=N.radians(N.array([78.930],'Float64'))
S1=N.radians(N.array([70.753],'Float64')) #recalll how icp defines a3 in terms of a4 of orient1
S2=N.radians(N.array([91.256],'Float64'))
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],0.7650,4, 'H Not equal to '+ str(0.7650))
self.assertAlmostEqual(K[0],1.5299,4, 'K Not equal to '+ str(1.5299))
self.assertAlmostEqual(L[0],1.6539,4, 'L Not equal to '+ str(1.6539))
self.assertAlmostEqual(E[0],0.0000,4, 'E Not equal to '+ str(0.0000))
self.assertAlmostEqual(Ei[0],4.4996,4,'Ei Not equal to '+str(4.49996))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
def test_monoclinic4(self):
"""test monoclinic 2, change E"""
#setup lattice
self.fixture.a=N.array([6.283],'Float64')
self.fixture.b=N.array([5.7568],'Float64')
self.fixture.c=N.array([11.765],'Float64')
self.fixture.beta=N.radians(N.array([100.0],'Float64'))
orient1=N.array([[1,2,0]],'Float64')
orient2=N.array([[0,0,1]],'Float64')
orientation=Orientation(orient1,orient2)
self.fixture.orientation=orientation
#setup spectrometer
self.fixture.EXP['infix']=1 # Fixed Ef
self.fixture.EXP['efixed']=4.5
#test the angles
M2=N.array([56.239],'Float64')
A2=N.array([78.930],'Float64')
S1=N.array([73.059],'Float64') #recalll how icp defines a3 in terms of a4 of orient1
S2=N.array([73.305],'Float64')
H,K,L,E,Q,Ei,Ef=self.fixture.SpecWhere(M2,S1,S2,A2,[self.fixture.EXP])
print 'H ',H
print 'K ',K
print 'L ',L
print 'E ',E
print 'Q ',Q
print 'Ei ',Ei
print 'Ef ',Ef
self.assertAlmostEqual(H[0],0.7650,4, 'H Not equal to '+ str(0.7650))
self.assertAlmostEqual(K[0],1.5299,4, 'K Not equal to '+ str(1.5299))
self.assertAlmostEqual(L[0],1.6539,4, 'L Not equal to '+ str(1.6539))
self.assertAlmostEqual(E[0],3.6838,4, 'E Not equal to '+ str(3.6838))
self.assertAlmostEqual(Ei[0],8.1834,4,'Ei Not equal to '+str(8.1834))
self.assertAlmostEqual(Ef[0],4.4996,4,'Ef Not equal to '+str(4.4996))
if __name__=="__main__":
#mylattice = Lattice(2*pi,2*pi,2*pi,90,90,90)
mylattice = Lattice(5.96520,5.96520,11.702,90,90,120)
mylattice.calc_twotheta(2.35916,N.array([0.]),N.array([0.]),N.array([2.0]))
EXP={}
EXP['ana']={}
EXP['ana']['tau']='pg(002)'
EXP['mono']={}
EXP['mono']['tau']='pg(002)';
EXP['ana']['mosaic']=30
EXP['mono']['mosaic']=30
EXP['sample']={}
EXP['sample']['mosaic']=10
EXP['sample']['vmosaic']=10
EXP['hcol']=N.array([40, 10, 20, 80],'Float64')
EXP['vcol']=N.array([120, 120, 120, 120],'Float64')
EXP['infix']=-1 #positive for fixed incident energy
EXP['efixed']=14.7
EXP['method']=0
instrument=Instrument()
if 1:
#test the angles
orientation=Orientation(N.array([[1,0,0]],'Float64'),N.array([[0,0,1]],'Float64'))
M2=N.array([41.177])
A2=N.array([41.177])
S1=N.array([77.6])
S2=N.array([43.5])
H,K,L,E,Q,Ei,Ef=SpecWhere(M2,S1,S2,A2,[EXP],mylattice,orientation,instrument)
print H,K,L
if 0:
orientation=Orientation(N.array([[1,0,0]],'Float64'),N.array([[0,1,0]],'Float64'))
H=N.array([1.0])
K=N.array([1.0])
L=N.array([0.0])
W=N.array([0.0])
M1,M2,S1,S2,A1,A2=SpecGoTo(H,K,L,W,[EXP],mylattice,orientation)
print M1,M2
print S1,S2
print A1,A2
#if __name__=="__main__":
# if 0:
# a=N.array([2*pi,2*pi],'Float64')
# b=N.array([8],'Float64')
# c=N.array([11],'Float64')
# alpha=N.radians(N.array([87],'Float64'))
# beta=N.radians(N.array([52],'Float64'))
# gamma=N.radians(N.array([100],'Float64'))
# orient1=N.array([[0,1,0]],'Float64')
# orient2=N.array([[1,0,0]],'Float64')
# orientation=Orientation(orient1,orient2)
# self.fixture.orientation=orientation
# mylattice=Lattice(a=a,b=b,c=c,alpha=alpha,beta=beta,gamma=gamma,\
# orientation=orientation)
# H=N.array([1],'Float64');K=N.array([0],'Float64');L=N.array([0],'Float64');W=N.array([0],'Float64')
# EXP={}
# EXP['ana']={}
# EXP['ana']['tau']='pg(002)'
# EXP['mono']={}
# EXP['mono']['tau']='pg(002)';
# EXP['ana']['mosaic']=30
# EXP['mono']['mosaic']=30
# EXP['sample']={}
# EXP['sample']['mosaic']=10
# EXP['sample']['vmosaic']=10
# EXP['hcol']=N.array([40, 10, 20, 80],'Float64')
# EXP['vcol']=N.array([120, 120, 120, 120],'Float64')
# EXP['infix']=-1 #positive for fixed incident energy
# EXP['efixed']=14.7
# EXP['method']=0
# setup=[EXP]
# M2=N.radians(N.array([41.177]))
# A2=N.radians(N.array([41.177]))
# S1=N.radians(N.array([66.4363]))
# S2=N.radians(N.array([37.6547]))
# H,K,L,E,Q,Ei,Ef=mylattice.SpecWhere(M2,S1,S2,A2,setup)
# print 'H ',H
# print 'K ',K
# print 'L ',L
# print 'E ',E
# print 'Q ',Q
# print 'Ei ',Ei
# print 'Ef ',Ef
# M1,M2,S1,S2,A1,A2=mylattice.SpecGoTo(H,K,L,E,setup)
# print 'M2 ',N.degrees(M2)
# print 'A2 ',N.degrees(A2)
# print 'M1 ',N.degrees(M1)
# print 'A1 ',N.degrees(A1)
# print 'S1 ',N.degrees(S1)
# print 'S2 ',N.degrees(S2)
# if 1:
# unittest.main()
| [
"dquintan@andrew.cmu.edu"
] | dquintan@andrew.cmu.edu |
4365938a5db92558c7c18ea93d358dfe9ffed5bd | 0b0d3246d39974cb8faff7d269da2d539415afab | /problem_python/p49.py | 88e5b7863a391d3d438604eab2bbd0cc41c6c173 | [] | no_license | xionghhcs/leetcode | 972e7ae4ca56b7100223630b294b5a97ba5dd7e8 | 8bd43dcd995a9de0270b8cea2d9a48df17ffc08b | refs/heads/master | 2020-03-07T17:18:08.465559 | 2019-09-29T11:11:26 | 2019-09-29T11:11:26 | 127,607,564 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | class Solution:
def groupAnagrams(self, strs):
import copy
strs_cp = copy.deepcopy(strs)
for i, item in enumerate(strs_cp):
item = list(item)
item.sort()
item = ''.join(item)
strs_cp[i] = item
table = dict()
for i, item in enumerate(strs_cp):
if item not in table:
table[item] = []
table[item].append(strs[i])
ans = []
for k in table:
ans.append(table[k])
return ans
| [
"xionghhcs@163.com"
] | xionghhcs@163.com |
c1665ca29eb89bec54f23abc0101df65132a1288 | a359924a5c3e4e74b6a96199d0daf59b09d950ab | /http_server_socket.py | a3429391513c183be401aff136cff9fb86214977 | [] | no_license | Sbregiuz/trainchecker | c8b4b5ac98fc6c8b75cfba1ce96e0ac7e806c710 | 68ca3a58ddf797fb2f959d0caa7e27b1a3415ee4 | refs/heads/master | 2020-05-09T10:25:20.850033 | 2019-04-12T09:23:10 | 2019-04-12T09:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | #!/usr/bin/python3
import socket
import threading
import datetime
class HttpServerSocket(socket.socket):
def __init__(self, ip, port):
socket.socket.__init__(self, socket.AF_INET, socket.SOCK_STREAM)
self.bind((ip, port))
self.listen(5)
self.accept_thread = threading.Thread(target=self.worker_thread)
self.accept_thread.daemon = True
self.accept_thread.start()
self.http_handlers = []
print('[HttpServerSocket] socket started on %s:%d\n' % (ip, port))
def bind_http_handler(self, handler):
self.http_handlers.append(handler)
def send_response(self, client_socket, response):
(status_code, content_type, response_text) = response
raw = 'HTTP/1.1 %d\n' % (status_code)
raw += 'Date: %s\n' % (datetime.datetime.today().strftime('%a, %-d %b %Y %-H:%-M:%-S GMT'))
raw += 'Server: trainchecker-webserver\n'
raw += 'Content-Type: %s\n' % (content_type)
if len(response_text) > 0:
raw += 'Content-Length: %d\n\n%s\n' % (len(response_text), response_text)
client_socket.send(raw)
client_socket.close()
def handle_client(self, client_socket, client_address):
request = client_socket.recv(1024)
#print('[HttpServerSocket] received -> {}'.format(request))
for handler in self.http_handlers:
response = handler.handle_request(request)
if response == False:
continue
self.send_response(client_socket, response)
return
def worker_thread(self):
print('[HttpServerSocket] accepting worker thread started\n')
while True:
(client_socket, client_address) = self.accept()
print('[HttpServerSocket] accepted incoming connection from %s:%d\n' % (client_address))
client_thread = threading.Thread(target=self.handle_client, args=(client_socket, client_address,))
client_thread.daemon = True
client_thread.start()
| [
"aesir@Aesir-Laptop.localdomain"
] | aesir@Aesir-Laptop.localdomain |
6036b4e9fe5bce86b786985656d485851ebc000e | 78918441c6735b75adcdf20380e5b6431891b21f | /api/views.py | 7db0c4896ac2f5194e3116b1611b1bf43493ac47 | [] | no_license | dede-20191130/PracticeDjango_2 | eba40532d5ce8bd4fd13fbd15d94f31942111cfa | 23593c0fa4c4dff04bd76583e8176e600ca69014 | refs/heads/master | 2020-12-23T14:23:08.039363 | 2020-02-27T12:16:00 | 2020-02-27T12:16:00 | 237,176,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,502 | py | import json
from collections import OrderedDict
from django.http import HttpResponse
from mybook.models import Book
def render_json_response(request, data, status=None):
"""response を JSON で返却"""
json_str = json.dumps(data, ensure_ascii=False, indent=2)
callback = request.GET.get('callback')
if not callback:
callback = request.POST.get('callback') # POSTでJSONPの場合
if callback:
json_str = "%s(%s)" % (callback, json_str)
response = HttpResponse(json_str, content_type='application/javascript; charset=UTF-8', status=status)
else:
response = HttpResponse(json_str, content_type='application/json; charset=UTF-8', status=status)
return response
def book_list(request):
"""書籍と感想のJSONを返す"""
books = []
for book in Book.objects.all().order_by('id'):
impressions = []
for impression in book.impressions.order_by('id'):
impression_dict = OrderedDict([
('impression_id', impression.id),
('comment', impression.comment),
])
impressions.append(impression_dict)
book_dict = OrderedDict([
('book_id', book.id),
('name', book.name),
('publisher', book.publisher),
('page', book.page),
('impressions', impressions)
])
books.append(book_dict)
data = OrderedDict([('books', books)])
return render_json_response(request, data)
| [
"1044adad@gmail.com"
] | 1044adad@gmail.com |
454182b170715cbea8bc2fdce4acf5bf5e53df24 | b6feefafed3147e13b029d8cd7f7bd2985460eb9 | /count_words.py | c1d159bd96637557b4fa96544ff18e50354cc23f | [
"MIT"
] | permissive | jmmL/misc | a44ed531a3d3bc2ff247f1959fff81dd379ef330 | 6bdbcd977fd29ea9a73f99364ff21caccd30d3d0 | refs/heads/master | 2020-12-24T14:00:44.096337 | 2015-01-24T23:05:25 | 2015-01-24T23:05:25 | 24,642,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | def main():
""" A very naive word-counter"""
wordy_string = input("Please enter a string:\n")
alphabet = ["q","w","e", "r", "t", "y", "u", "i", "o", "p", "a", "s",
"d", "f", "g", "h", "j", "k", "l", "z", "x", "c", "v", "b", "n", "m",]
word_count = 0
for i in range(len(alphabet)):
if alphabet[i] in wordy_string:
word_count = 1
for i in wordy_string:
if i == " ":
word_count += 1
print(word_count)
main()
| [
"jamie.lawler@gmail.com"
] | jamie.lawler@gmail.com |
222be11de59c1ac1e77cc58956a505be286371f8 | 51b3ad4581e3b451c3ff02aa66e252dfa191490f | /Exercicios python - Avaliacao/rm86180exer8.py | e15299b3b05d964ce7465b4724d35a8218904a14 | [] | no_license | xmelo/Projects-_Python | 2fd925130b5f29517b5f22eb85159eb44c0f95ac | e966ff7ae6e17ad1568bd9a87d61f0bd2e2401db | refs/heads/main | 2023-06-04T05:07:41.298564 | 2021-06-23T15:53:45 | 2021-06-23T15:53:45 | 379,651,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | n1=float(input("Digite um número real:"))
n2=float(input("Digite outro número real:"))
resul= n1 + n2
if (resul > 10):
print("O número é maior que 10!")
elif (resul <= 10):
print("O número é menor ou igual a 10!") | [
"noreply@github.com"
] | noreply@github.com |
d37fe7dd2eeed9cd1671d7f0927797b718040ff6 | b4166044870d1c026e86c95ac41e3e3613ee424f | /python_basic/abc035_a.py | cb3e26984addc996cbe33c23443e49ee4d0229ba | [] | no_license | nsakki55/AtCoder | 2cbb785415a7c0b9df9953ddc3706c90a5716a03 | 03c428e8eb8f24b8560d00e2388ba75509619690 | refs/heads/master | 2020-05-31T04:33:06.400697 | 2020-01-19T13:41:41 | 2020-01-19T13:41:41 | 190,099,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | w,h=map(int,input().split())
print('16:9' if w*h%144==0 else '4:3')
| [
"n.sakki55@gmail.com"
] | n.sakki55@gmail.com |
890771890284fc4e615b8f2f73d9492d94c535d0 | 90e6e97d94a660c01d30cd04d539f86da3fa7f89 | /client/flash.py | b2a5eb69bcbd6de06eddd8f27ad3baa486aea060 | [
"MIT"
] | permissive | webfpga/icestorm-server | 679643beb4bbce55b11f5535cc1506ad2e769de1 | a5a93dc48cc88a896cd5a0fe5739341e206d71dd | refs/heads/master | 2021-06-13T06:08:05.243501 | 2020-04-30T09:09:48 | 2020-04-30T09:09:48 | 202,099,251 | 7 | 2 | MIT | 2021-05-29T13:16:08 | 2019-08-13T08:28:03 | C++ | UTF-8 | Python | false | false | 55 | py | #!/usr/bin/env python
print("not implemented yet :(")
| [
"ryan@rmj.us"
] | ryan@rmj.us |
0bdbc4e81f4ff6d06481139ee1a7e5694826e4e9 | eece2111c05d8aaa9325557be95057b11e20320d | /2019_CSE4020_2017029716/Assignment10/2017029716-10-1.py | 762418385b5b9f7d64875a6128e35e09ef9b4939 | [] | no_license | hyedoii/computerGraphics | 4e1559da144087892c77c4ef68ff3215c0de2f79 | 4799704c2987cbaaf786b27f4c744d9992ebbed4 | refs/heads/master | 2020-09-03T12:52:25.200554 | 2019-11-04T10:07:47 | 2019-11-04T10:07:47 | 219,466,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,740 | py | ###################################################
# [Practice] Euler Angles in OpenGL
import glfw
from OpenGL.GL import *
from OpenGL.GLU import *
import numpy as np
from OpenGL.arrays import vbo
import ctypes
gCamAng = 0.
gCamHeight = 1.
xang = np.radians(0)
nang = np.radians(0)
zang = np.radians(0)
def drawCube_glVertex():
glBegin(GL_TRIANGLES)
glNormal3f(0,0,1) # v0, v2, v1, v0, v3, v2 normal
glVertex3f( -1 , 1 , 1 ) # v0 position
glVertex3f( 1 , -1 , 1 ) # v2 position
glVertex3f( 1 , 1 , 1 ) # v1 position
glVertex3f( -1 , 1 , 1 ) # v0 position
glVertex3f( -1 , -1 , 1 ) # v3 position
glVertex3f( 1 , -1 , 1 ) # v2 position
glNormal3f(0,0,-1)
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( -1 , -1 , -1 ) # v7
glNormal3f(0,1,0)
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( 1 , 1 , -1 ) # v5
glVertex3f( -1 , 1 , -1 ) # v4
glNormal3f(0,-1,0)
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , -1 , 1 ) # v2
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( -1 , -1 , -1 ) # v7
glVertex3f( 1 , -1 , -1 ) # v6
glNormal3f(1,0,0)
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , -1 , 1 ) # v2
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , 1 , 1 ) # v1
glVertex3f( 1 , -1 , -1 ) # v6
glVertex3f( 1 , 1 , -1 ) # v5
glNormal3f(-1,0,0)
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( -1 , -1 , -1 ) # v7
glVertex3f( -1 , -1 , 1 ) # v3
glVertex3f( -1 , 1 , 1 ) # v0
glVertex3f( -1 , 1 , -1 ) # v4
glVertex3f( -1 , -1 , -1 ) # v7
glEnd()
def createVertexArraySeparate():
varr = np.array([
(0,0,1), # v0 normal
( -1 , 1 , 1 ), # v0 position
(0,0,1), # v2 normal
( 1 , -1 , 1 ), # v2 position
(0,0,1), # v1 normal
( 1 , 1 , 1 ), # v1 position
(0,0,1), # v0 normal
( -1 , 1 , 1 ), # v0 position
(0,0,1), # v3 normal
( -1 , -1 , 1 ), # v3 position
(0,0,1), # v2 normal
( 1 , -1 , 1 ), # v2 position
(0,0,-1),
( -1 , 1 , -1 ), # v4
(0,0,-1),
( 1 , 1 , -1 ), # v5
(0,0,-1),
( 1 , -1 , -1 ), # v6
(0,0,-1),
( -1 , 1 , -1 ), # v4
(0,0,-1),
( 1 , -1 , -1 ), # v6
(0,0,-1),
( -1 , -1 , -1 ), # v7
(0,1,0),
( -1 , 1 , 1 ), # v0
(0,1,0),
( 1 , 1 , 1 ), # v1
(0,1,0),
( 1 , 1 , -1 ), # v5
(0,1,0),
( -1 , 1 , 1 ), # v0
(0,1,0),
( 1 , 1 , -1 ), # v5
(0,1,0),
( -1 , 1 , -1 ), # v4
(0,-1,0),
( -1 , -1 , 1 ), # v3
(0,-1,0),
( 1 , -1 , -1 ), # v6
(0,-1,0),
( 1 , -1 , 1 ), # v2
(0,-1,0),
( -1 , -1 , 1 ), # v3
(0,-1,0),
( -1 , -1 , -1 ), # v7
(0,-1,0),
( 1 , -1 , -1 ), # v6
(1,0,0),
( 1 , 1 , 1 ), # v1
(1,0,0),
( 1 , -1 , 1 ), # v2
(1,0,0),
( 1 , -1 , -1 ), # v6
(1,0,0),
( 1 , 1 , 1 ), # v1
(1,0,0),
( 1 , -1 , -1 ), # v6
(1,0,0),
( 1 , 1 , -1 ), # v5
(-1,0,0),
( -1 , 1 , 1 ), # v0
(-1,0,0),
( -1 , -1 , -1 ), # v7
(-1,0,0),
( -1 , -1 , 1 ), # v3
(-1,0,0),
( -1 , 1 , 1 ), # v0
(-1,0,0),
( -1 , 1 , -1 ), # v4
(-1,0,0),
( -1 , -1 , -1 ), # v7
], 'float32')
return varr
def drawCube_glDrawArray():
global gVertexArraySeparate
varr = gVertexArraySeparate
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glNormalPointer(GL_FLOAT, 6*varr.itemsize, varr)
glVertexPointer(3, GL_FLOAT, 6*varr.itemsize, ctypes.c_void_p(varr.ctypes.data + 3*varr.itemsize))
glDrawArrays(GL_TRIANGLES, 0, int(varr.size/6))
def render():
global gCamAng, gCamHeight, xang, zang, nang
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45, 1, 1,10)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(5*np.sin(gCamAng),gCamHeight,5*np.cos(gCamAng), 0,0,0, 0,1,0)
# draw global frame
drawFrame()
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_RESCALE_NORMAL)
# set light properties
lightPos = (4.,5.,6.,1.)
glLightfv(GL_LIGHT0, GL_POSITION, lightPos)
ambientLightColor = (.1,.1,.1,1.)
diffuseLightColor = (1.,1.,1.,1.)
specularLightColor = (1.,1.,1.,1.)
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLightColor)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLightColor)
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLightColor)
# ZYX Euler angles
t = glfw.get_time()
M = np.identity(4)
Rx = np.array([[1,0,0],
[0, np.cos(xang), -np.sin(xang)],
[0, np.sin(xang), np.cos(xang)]])
"""Ry = np.array([[np.cos(yang), 0, np.sin(yang)],
[0,1,0],
[-np.sin(yang), 0, np.cos(yang)]])"""
Rz = np.array([[np.cos(zang), -np.sin(zang), 0],
[np.sin(zang), np.cos(zang), 0],
[0,0,1]])
Rn = np.array([[np.cos(nang), -np.sin(nang), 0],
[np.sin(nang), np.cos(nang), 0],
[0,0,1]])
M[:3,:3] = Rz @ Rx @ Rn
glMultMatrixf(M.T)
# # The same ZYX Euler angles with OpenGL functions
# glRotate(30, 0,0,1)
# glRotate(30, 0,1,0)
# glRotate(np.degrees(t), 1,0,0)
glScalef(.25,.25,.25)
# draw cubes
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (.5,.5,.5,1.))
drawCube_glDrawArray()
glTranslatef(2.5,0,0)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (1.,0.,0.,1.))
drawCube_glDrawArray()
glTranslatef(-2.5,2.5,0)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (0.,1.,0.,1.))
drawCube_glDrawArray()
glTranslatef(0,-2.5,2.5)
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, (0.,0.,1.,1.))
drawCube_glDrawArray()
glDisable(GL_LIGHTING)
def drawFrame():
glBegin(GL_LINES)
glColor3ub(255, 0, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([1.,0.,0.]))
glColor3ub(0, 255, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([0.,1.,0.]))
glColor3ub(0, 0, 255)
glVertex3fv(np.array([0.,0.,0]))
glVertex3fv(np.array([0.,0.,1.]))
glEnd()
def key_callback(window, key, scancode, action, mods):
global gCamAng, gCamHeight, xang, zang, nang
if action==glfw.PRESS or action==glfw.REPEAT:
if key==glfw.KEY_A:
zang += np.radians(10)
elif key==glfw.KEY_Z:
zang += np.radians(-10)
elif key==glfw.KEY_S:
xang += np.radians(10)
elif key==glfw.KEY_X:
xang += np.radians(-10)
elif key==glfw.KEY_D:
nang+= np.radians(10)
elif key==glfw.KEY_C:
nang += np.radians(-10)
elif key==glfw.KEY_V:
zang = 0
xang = 0
nang = 0
gVertexArraySeparate = None
def main():
global gVertexArraySeparate
if not glfw.init():
return
window = glfw.create_window(480,480,'2017029716-10-1', None,None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
glfw.set_key_callback(window, key_callback)
glfw.swap_interval(1)
gVertexArraySeparate = createVertexArraySeparate()
while not glfw.window_should_close(window):
glfw.poll_events()
render()
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
ce624f97e6b59f461ccb016aeca61ba1ca5ceb9b | fb48a5567c094b0d7a1b83a2ea48fd8259450ee6 | /Diffle Hellman/Diffie-Hellman Starter 1.py | cea0e070c079682400845c83f97767c1ffc1a25c | [] | no_license | zoeyy-enabs/Challenges | a89c10b8ba0c2c9df3017deec95e458ae70437c2 | 729a7e9383335a5455e16c8184a817fcd254c552 | refs/heads/master | 2023-02-09T19:14:16.473922 | 2021-01-10T09:19:29 | 2021-01-10T09:19:29 | 300,939,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | inverse(209, 991)
| [
"noreply@github.com"
] | noreply@github.com |
2503cf4464e2c19eefba93d69073f600d9ab89e3 | ffe20630810998085cb9ac71a9145e9fe4b528b1 | /recommenderModels/POImodels.py | 4ef0fecc1f385d8bba404b252429c0ada02cccb4 | [] | no_license | DankoGVizlore/LCIficontent-GAE | 632df88b0223aad157587a3f4b9c4cbdddd38daa | 9f08d85b6e387423971313f3a9f971f81c1502ed | refs/heads/master | 2021-03-12T20:30:41.224761 | 2014-09-12T17:20:32 | 2014-09-12T17:20:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from google.appengine.ext import ndb
class POI(ndb.Model):
""" Model for basic POI
for days we use 1 - Monday; 2 - Tuesday; 3 - Wednesday; 4 - Thursday; 5 - Friday; 6 - Saturday; 7 - Sunday;
categoryID is a ID from category table
"""
name = ndb.StringProperty()
categoryID = ndb.IntegerProperty()
lat = ndb.FloatProperty()
lon = ndb.FloatProperty()
description = ndb.TextProperty()
open = ndb.StringProperty()
close = ndb.StringProperty()
days = ndb.IntegerProperty()
address = ndb.StringProperty()
website = ndb.StringProperty() | [
"danko.gutesa@vizlore.com"
] | danko.gutesa@vizlore.com |
b83130ad17b33804cafb2e56e57a0338e7906436 | 68323482aaa3bd7f7cad23e47b836cff28ac7b81 | /env/lib/python2.7/site-packages/djangocms_text_ckeditor/__init__.py | 80e1f3d09d72a2c193e3708d32198290cfe5f97e | [] | no_license | addsimm/cms8 | d8add85985f025993b885b1901afbc981850837d | dc19b010169c046dc2981009c402ad914182fa41 | refs/heads/master | 2021-01-20T11:00:06.630720 | 2015-12-05T17:23:33 | 2015-12-05T17:23:33 | 47,465,287 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # -*- coding: utf-8 -*-
__version__ = "2.8.0"
default_app_config = 'djangocms_text_ckeditor.apps.TextCkeditorConfig'
| [
"adamfsimon@gmail.com"
] | adamfsimon@gmail.com |
4fed593d5f025735e0ad7e586d3fa993077381f3 | 5e5252812e67393a75830b313cd0d746c912123b | /python/Calculating with Functions.py | 5da5ee3fd6e14cf8e4c65e409919b2cbc840f9a6 | [] | no_license | Konohayui/Codewars | 20dfc6b147d2afd68172d5f5824b6c8c8dfa05f1 | 97291462e7b2e42e437355fb676e9152013a5e3a | refs/heads/master | 2021-10-19T18:07:26.973873 | 2019-02-22T22:52:33 | 2019-02-22T22:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | '''
Modified Xueyimei's solution
for better understanding
'''
def zero(f = None):
if not f:
print("first 0")
return 0
else:
print("second 0")
return f(0)
def one(f = None):
if not f:
print("first 1")
return 1
else:
print("second 1")
return f(1)
def two(f = None):
return 2 if not f else f(2)
def three(f = None):
return 3 if not f else f(3)
def four(f = None):
return 4 if not f else f(4)
def five(f = None):
return 5 if not f else f(5)
def six(f = None):
return 6 if not f else f(6)
def seven(f = None):
return 7 if not f else f(7)
def eight(f = None):
return 8 if not f else f(8)
def nine(f = None):
return 9 if not f else f(9)
def plus(y): return lambda x: int(x+y)
def minus(y): return lambda x: int(x-y)
def times(y): return lambda x: int(x*y)
def divided_by(y): return lambda x: int(x/y)
| [
"noreply@github.com"
] | noreply@github.com |
0ce4d017a693b7e97b1e5d0155a0b3359b9d6cd9 | 845cfc4e60934bc91c837c1123ed6df19c45da2f | /banking.py | fdaeaa996c6ac0208a0d6d8ba3b405c068bf0e77 | [] | no_license | macsee87/bank_system | 28233d7f7c9392ccaee9518fcb101d9e7f9b1fa0 | 7702148b0c16240b5e79e6fe16b7707d9207c0a8 | refs/heads/master | 2022-11-25T15:39:43.953679 | 2020-07-12T17:06:59 | 2020-07-12T17:06:59 | 279,108,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,007 | py | import sqlite3
import random
import os.path
# check if the database already exists
if not os.path.isfile('card.s3db'):
conn = sqlite3.connect('card.s3db') # Make the connection with the database
c = conn.cursor() # Create a cursor
# Create table
c.execute('''CREATE TABLE card(id INTEGER,number TEXT,pin TEXT,balance INTEGER DEFAULT 0);''')
else:
# Make connection with the database and create a cursor
conn = sqlite3.connect('card.s3db')
c = conn.cursor()
class Bank:
STATUS_OFF = -1
STATUS_IDLE = 0
STATUS_WAIT_CARD_NUMBER = 1
STATUS_WAIT_PIN = 2
STATUS_USER_LOGGED = 3
_MENU_MAIN = " << MENU >>\n1. Create an account\n2. Log into account\n0. Exit\n"
_MENU_USER = "\n1. Balance\n2. Add income\n3. Do transfer\n4. Close account\n5. Log out\n0. Exit"
_MSG_LOGIN_SUCCESS = "\nYou have successfully logged in!"
_MSG_LOGIN_FAIL = "\nWrong card number or PIN!"
_MSG_SUCCESS = "\nSuccess!"
_MSG_NOT_ENOUGH_MONEY = "Not enough money!"
_MSG_ANOTHER_ACCOUNT = "\nYou can't transfer money to the same account!"
_MSG_CHECKSUM_FAIL = "\nProbably you made mistake in the card number. Please try again!"
_MSG_INCOME_AMOUNT = "\nEnter income:\n"
_MSG_INCOME_ADDED = "Income was added!\n"
_MSG_CLOSE_ACCOUNT = "\nThe account has been closed!"
_MSG_BYE = "\nBye!"
def __init__(self):
self.status = Bank.STATUS_IDLE
self.prompt = Bank._MENU_MAIN
self.login_card = ""
def main_menu(self):
if self.status == Bank.STATUS_IDLE:
main_choice = input()
if main_choice == '0':
exit()
if main_choice == '1':
self.create_account()
elif main_choice == '2':
self.log_in()
elif self.status == Bank.STATUS_USER_LOGGED:
logged_choice = input()
if logged_choice == '0':
print(Bank._MSG_BYE)
exit()
if logged_choice == '1':
self.status = Bank.STATUS_USER_LOGGED
print(f"Balance: {self.select_balance(self.login_card)}\n")
if logged_choice == '2':
self.status = Bank.STATUS_USER_LOGGED
n_income = int(input(Bank._MSG_INCOME_AMOUNT))
self.add_income(n_income, self.login_card)
if logged_choice == '3':
self.status = Bank.STATUS_USER_LOGGED
to_account = input("Transfer\nEnter card number:\n")
if self.checksum(number=to_account[0:15]) == to_account[15]:
if self.check_account(to_account) == to_account:
if self.login_card != to_account:
n_transfer = int(input("\nEnter how much money you want to transfer:\n"))
if self.select_balance(card_number=self.login_card) >= n_transfer:
self.do_transfer(to_account, n_transfer, card_number=self.login_card)
return
else:
return print(Bank._MSG_NOT_ENOUGH_MONEY)
else:
return print(Bank._MSG_ANOTHER_ACCOUNT)
else:
return print(Bank._MSG_CHECKSUM_FAIL)
else:
return print(Bank._MSG_CHECKSUM_FAIL)
if logged_choice == '4':
self.status = Bank.STATUS_USER_LOGGED
self.close_account(card_number=self.login_card)
self.status = Bank.STATUS_IDLE
self.prompt = Bank._MENU_MAIN
return
if logged_choice == '5':
self.status = Bank.STATUS_IDLE
self.prompt = Bank._MENU_MAIN
return '\nYou have successfully logged out!\n'
def create_account(self):
new_card_number = self.card_number_generator()
new_pin = f"{random.randint(0, 9999):04}"
self.save_new_account(new_card_number, new_pin)
print("\nYour card has been created\nYour card number:\n" f"{new_card_number}"
"\nYour card PIN:\n" f"{new_pin}")
def log_in(self):
self.login_card = input('\nEnter your card number:\n')
login_pin = input('Enter your PIN:\n')
c.execute(f'SELECT * FROM card WHERE number = ? AND pin = ?', (self.login_card, login_pin))
if c.fetchone() is None:
print(Bank._MSG_LOGIN_FAIL)
else:
self.status = Bank.STATUS_USER_LOGGED
self.prompt = Bank._MENU_USER
self.login_card = self.login_card
print(Bank._MSG_LOGIN_SUCCESS)
def card_number_generator(self):
iin = '400000'
can = ''
for i in range(9):
n = str(random.randint(0, 9))
can += n
checksum = self.checksum(iin + can)
return iin + can + checksum
def checksum(self, number):
number_list = [int(num) for num in number]
for index in range(0, 15, 2):
number_list[index] *= 2
if number_list[index] > 9:
number_list[index] -= 9
checker = 0
while (checker + sum(number_list)) % 10 != 0:
checker += 1
return str(checker)
def save_new_account(self, card_number, pin):
c.execute(f'INSERT INTO card(number, pin) VALUES (?,?)', (card_number, pin))
conn.commit()
def select_balance(self, card_number):
c.execute(f'select balance from card where number = %s' % card_number)
return int(c.fetchone()[0])
def add_income(self, n_income, card_number):
c.execute(f'update card set balance = (balance + %d) where number = %s' % (n_income, card_number))
conn.commit()
return print(Bank._MSG_INCOME_ADDED)
def do_transfer(self, to_account: str, n_transfer: int, card_number):
c.execute('update card set balance = (balance - %d) where number = %s' % (n_transfer, card_number))
conn.commit()
c.execute('update card set balance = (balance + %d) where number = %s' % (n_transfer, to_account))
conn.commit()
return print(Bank._MSG_SUCCESS)
def check_account(self, to_account: str) -> bool:
c.execute('select number from card where number = %s' % to_account)
try:
return c.fetchone()[0]
except:
print('Not exists card!')
return
def close_account(self, card_number):
c.execute('delete from card where number = %s' % card_number)
conn.commit()
return Bank._MSG_CLOSE_ACCOUNT
my_bank = Bank()
while my_bank.status != Bank.STATUS_OFF:
print(my_bank.prompt)
print(my_bank.main_menu())
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
6ade2e993792afdb890bf1237f2f646754a03800 | 2d5ac089c41dc5b495ca63e4f064038013fb8a57 | /text.py | 930048c277c41a51b51caf8a7e7806e981ca1ab4 | [] | no_license | NoNamesUserName/homework | 21c8fc84c311d2f85f66a8285bf3e9d72ce64777 | 78ecdd5c1d49b251fec3efef0ed12e5f707e38b3 | refs/heads/master | 2020-04-18T08:06:09.220477 | 2019-05-02T14:08:03 | 2019-05-02T14:08:03 | 167,384,365 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | """
Noah
Rivera
Block1
"""
"""
file_stats.py
A program to determine the following statistics about the file:
The number of lines in the file
The number of words in the file
The number of 4 letter words in the file
The number of each of the vowels in the file
The longest word in the file
"""
"""
open/close.txt
"""
def opent():
return open ("programs3.txt","r")
def close():
txtf.close()
"""
open/close.txt/split
"""
"""
length
of lines
"""
txtf=opent()
lines = txtf.readlines()
numol = len(lines)
close()
print (numol)
"""
length
of lines
"""
"""
num of words
"""
def numw():
txtf=opent()
txt = txtf.read().split()
return (len(txt))
close()
"""
num of words
"""
"""
number of each vowel
"""
countA=0
countE=0
countI=0
countO=0
countU=0
countALL=0
aA="a"
eE="e"
iI="i"
oO="o"
uU="u"
txtf=opent()
x=0
ntxt=len(txtf.read())
close()
txtf=opent()
while x <= ntxt:
x+=1
newtx=txtf.read(1).lower()
if aA == newtx:
countA+=1
elif eE in newtx:
countE+=1
elif iI in newtx:
countI+=1
elif oO in newtx:
countO+=1
elif uU in newtx:
countU+=1
countALL=countA+countE+countI+countO+countU
close()
print (countA)
print (countE)
print (countI)
print (countO)
print (countU)
print (countALL)
"""
number of each vowel
"""
"""
num of 4 letter words
"""
def fl():
txtf=opent()
txt = txtf.read().split()
return (nfl)
close()
"""
num of 4 letter words
"""
"""
longest word
"""
"""
longest word
"""
| [
"noreply@github.com"
] | noreply@github.com |
362bd2587bf53f17a12c58f928ca7a559f73578e | 8db400391078d9325b928e3c92fb61ed275164e7 | /pypong.py | 26da7d9271f25399b76ff3a3f1ee03bca561abf6 | [] | no_license | Andy87730/tests | e631671e81cb6bbd86df162ef413f104a09abb79 | 2b9d2fccea84d5d471635ec63afe40e6dff27d16 | refs/heads/master | 2020-05-04T03:20:53.568831 | 2020-04-29T19:52:25 | 2020-04-29T19:52:25 | 178,945,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | import pygame
import random
import sys
x = 500
y = 500
sbreite = 100
shoehe = 15
sx = 200
sy = 450
bx = int(x/2)
by = int(y/2)
brad = 15
speed = 0
bxspeed = 1
byspeed = -2
leben = 3
pygame.init()
screen = pygame.display.set_mode([x,y])
screen.fill((0,0,0))
pygame.draw.circle(screen, (255,255,0), (bx,by), brad, 0)
pygame.draw.rect(screen, (255,40,0), (sx,sy,sbreite,shoehe), 0)
pygame.display.flip()
def sblock():
global speed
if sx <= 0 or sx >= x-sbreite:
speed = 0
def ballbewegung():
global bx,by
bx += bxspeed
by += byspeed
def reset():
global byspeed,bxspeed,leben,bx,by,sx,sy,speed
sx = 200
sy = 450
bx = int(x/2)
by = int(y/2)
speed = 0
bxspeed = random.randint(-2,2)
if bxspeed == 0:
bxspeed = 1
byspeed = random.randint(-2,2)
if byspeed == 0:
byspeed = 2
screen.fill((0,0,0))
pygame.draw.circle(screen, (255,255,0), (bx,by), brad, 0)
pygame.draw.rect(screen, (255,40,0), (sx,sy,sbreite,shoehe), 0)
pygame.display.flip()
pygame.time.wait(1000)
def ballblock():
global byspeed,bxspeed,leben
if by-brad <= 0:
byspeed *= -1
if bx-brad <= 0:
bxspeed *= -1
if bx+brad >= x:
bxspeed *= -1
if by >= 435 and by <= 440:
if bx >= sx-15 and bx <= sx+sbreite+15:
byspeed *= -1
else:
leben -= 1
reset()
def sbewegung():
global sx
sx += speed
while leben>0:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
speed = -2
if event.key == pygame.K_RIGHT:
speed = 2
screen.fill((0,0,0))
sbewegung()
sblock()
pygame.draw.rect(screen, (255,40,0), (sx,sy,sbreite,shoehe), 0)
ballbewegung()
ballblock()
pygame.draw.circle(screen, (255,255,0), (bx,by), brad, 0)
pygame.display.flip()
pygame.time.wait(5)
print("haha verloren") | [
"noreply@github.com"
] | noreply@github.com |
df3ecbe593627c41b7a80ce190bed5815b414e04 | cf182785179d785ee30ccc2e73cabe30008f711c | /tools/c7n_mailer/tests/test_azure.py | 65d173cd9006c29c9141408c80844fb0b51124ae | [
"Apache-2.0"
] | permissive | ChrisRx/cloud-custodian | c320dd39797add280e926ce5642e5f49895ee22d | f2fe25e75f9c863f276da5d04e54f59981eac38a | refs/heads/master | 2020-03-27T14:10:28.371559 | 2018-08-28T21:33:09 | 2018-08-28T21:33:09 | 134,552,314 | 0 | 1 | Apache-2.0 | 2018-06-28T14:31:50 | 2018-05-23T10:19:24 | Python | UTF-8 | Python | false | false | 3,416 | py | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import unittest
import zlib
from c7n_azure.storage_utils import StorageUtilities
from c7n_mailer.azure.azure_queue_processor import MailerAzureQueueProcessor
from common import MAILER_CONFIG_AZURE, ASQ_MESSAGE, logger
from mock import MagicMock, patch
class AzureTest(unittest.TestCase):
def setUp(self):
self.compressed_message = MagicMock()
self.compressed_message.content = base64.b64encode(
zlib.compress(ASQ_MESSAGE.encode('utf8')))
self.loaded_message = json.loads(ASQ_MESSAGE)
@patch('c7n_mailer.azure.sendgrid_delivery.SendGridDelivery.sendgrid_handler')
@patch('c7n_mailer.azure.sendgrid_delivery.SendGridDelivery.get_to_addrs_sendgrid_messages_map')
def test_process_azure_queue_message_success(self, mock_get_addr, mock_handler):
mock_handler.return_value = True
mock_get_addr.return_value = 42
# Run the process messages method
azure_processor = MailerAzureQueueProcessor(MAILER_CONFIG_AZURE, logger)
self.assertTrue(azure_processor.process_azure_queue_message(self.compressed_message))
# Verify mock calls were correct
mock_get_addr.assert_called_with(self.loaded_message)
mock_handler.assert_called_with(self.loaded_message, 42)
@patch('c7n_mailer.azure.sendgrid_delivery.SendGridDelivery.sendgrid_handler')
@patch('c7n_mailer.azure.sendgrid_delivery.SendGridDelivery.get_to_addrs_sendgrid_messages_map')
def test_process_azure_queue_message_failure(self, mock_get_addr, mock_handler):
mock_handler.return_value = False
mock_get_addr.return_value = 42
# Run the process messages method
azure_processor = MailerAzureQueueProcessor(MAILER_CONFIG_AZURE, logger)
self.assertFalse(azure_processor.process_azure_queue_message(self.compressed_message))
# Verify mock calls were correct
mock_get_addr.assert_called_with(self.loaded_message)
mock_handler.assert_called_with(self.loaded_message, 42)
@patch.object(MailerAzureQueueProcessor, 'process_azure_queue_message')
@patch.object(StorageUtilities, 'get_queue_client_by_uri')
@patch.object(StorageUtilities, 'delete_queue_message')
@patch.object(StorageUtilities, 'get_queue_messages')
def test_run(self, mock_get_messages, mock_delete, mock_client, mock_process):
mock_get_messages.side_effect = [[self.compressed_message], []]
mock_client.return_value = (None, None)
mock_process.return_value = True
# Run the 'run' method
azure_processor = MailerAzureQueueProcessor(MAILER_CONFIG_AZURE, logger)
azure_processor.run(False)
self.assertEqual(2, mock_get_messages.call_count)
self.assertEqual(1, mock_process.call_count)
mock_delete.assert_called()
| [
"noreply@github.com"
] | noreply@github.com |
d939e0132b157eb8dafacb0512eb6e9d390ebdfc | 1e86ae73b8340c453946ca9ccc39802743dc52ff | /src/utils/viewsets.py | 7e3a0c1890f77c6f61cc2353ab18866ffb1a411b | [] | no_license | pranavkneeraj/contact-backend | 65afc3619766d233d68c8c551ee0d3e0f0891ced | 19707c075d149dc95bedb8b14cddc23317e2f889 | refs/heads/master | 2020-04-05T14:02:50.182024 | 2017-06-29T12:25:57 | 2017-06-29T12:25:57 | 94,749,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | """
Common Viewsets
"""
from rest_framework_extensions.mixins import NestedViewSetMixin
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from .pagination import StandardResultsSetPagination
class PaginatedViewSetMixin(NestedViewSetMixin, viewsets.ModelViewSet): # pylint: disable=too-many-ancestors
"""
Viewset for Paginated Values
"""
pagination_class = StandardResultsSetPagination
| [
"pranav.k@amazatic.com"
] | pranav.k@amazatic.com |
c1cbec664bbc7d9d3fbeb57dda8c5cd6a4e338e9 | eb8660d8a7c7557af0fd681a4cce305e1fc73ef9 | /client-samples/asterisk/eagi-ru.py | a3fc941af833160c49e4b1a678ce0de03ecb2971 | [
"Apache-2.0"
] | permissive | morfeusys/vosk-server | d4639eaaae7b2e171bd99618513100100d94e773 | 955517bfcc8a7ef3f93ed5ace50052234aa3bf74 | refs/heads/master | 2021-01-14T19:18:55.803416 | 2020-02-24T12:57:55 | 2020-02-24T12:57:55 | 242,727,733 | 1 | 0 | Apache-2.0 | 2020-02-24T12:14:17 | 2020-02-24T12:14:16 | null | UTF-8 | Python | false | false | 1,260 | py | #!/usr/bin/python3
from asterisk.agi import *
import os
from websocket import create_connection
import json
import uuid
AUDIO_FD = 3
CONTENT_TYPE = 'audio/l16; rate=8000; channels=1'
ACCEPT = 'audio/pcm'
def play_text(agi, text):
fn = str(uuid.uuid4())
os.system("espeak -v ru -w /tmp/%s.22.wav \"%s\"" % (fn, text))
os.system("sox /tmp/%s.22.wav -r 8000 /tmp/%s.wav" % (fn, fn))
agi.stream_file("/tmp/%s" % (fn))
os.remove("/tmp/%s.22.wav" % (fn))
os.remove("/tmp/%s.wav" % (fn))
os.read(AUDIO_FD, 1000000) # Read remaining chunks
def process_chunk(agi, ws, buf):
ws.send_binary(buf)
res = json.loads(ws.recv())
agi.verbose(str(res))
if 'text' in res:
play_text(agi, "Распознано " + res['text'])
def startAGI():
agi = AGI()
agi.verbose("EAGI script started...")
ani = agi.env['agi_callerid']
did = agi.env['agi_extension']
agi.verbose("Call answered from: %s to %s" % (ani, did))
play_text(agi, "Привет")
ws = create_connection("ws://localhost:2700")
try:
while True:
data = os.read(AUDIO_FD, 8000)
if not data:
break
process_chunk(agi, ws, data)
finally:
ws.close()
startAGI()
| [
"nshmyrev@gmail.com"
] | nshmyrev@gmail.com |
5b729775f69c7b6aab5d04d8874c1847ce0303e3 | 232c4c67ebe5241aa180d9434fca8ae290c9aad1 | /cogs/poeng.py | 147b96756c5dc6edf87e7aaaad6dc4787239d742 | [
"MIT"
] | permissive | msingstad/ProgBott | 4049d3ab9c707f0d6ee7a49c4139281440c1d78c | 3ae6daf3aebef87f58a00c1c3fad7abeabcf182d | refs/heads/master | 2023-08-25T10:11:32.031560 | 2021-07-24T20:28:26 | 2021-07-24T20:31:03 | 415,925,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,100 | py | # Discord Packages
import discord
from discord.ext import commands
from discord.ui import View
# Bot Utilities
from cogs.utils.defaults import easy_embed
import asyncio
import codecs
import json
import os
import time
# Define a simple View that gives us a confirmation menu
class Confirm(View):
def __init__(self, parent: discord.Message):
super().__init__()
self.value = None
self.parent = parent
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user.id in [self.parent.author.id, 120970603556503552]:
return True
else:
await interaction.response.send_message("Dette er ikke din melding", ephemeral=True)
@discord.ui.button(label="Ja", style=discord.ButtonStyle.green)
async def confirm(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = True
self.stop()
@discord.ui.button(label="Nei", style=discord.ButtonStyle.grey)
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = False
self.stop()
class Poeng(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.teller_data = {}
self.cache_time = time.time()
self.settings_file = bot.data_dir + "/poeng/innstilinger.json"
self.teller_file = bot.data_dir + "/poeng/teller.json"
self.load_json("settings")
self.load_json("teller")
self.bot.loop.create_task(self.cache_loop())
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
if reaction.message.author.id == user.id:
pass
elif not str(reaction.message.id) in self.teller_data["meldinger"] and reaction.emoji == "🏅":
if not reaction.message.author.bot:
return await self.confirm_star(message=reaction.message, giver=user.id,
hjelper=[reaction.message.author.id], dudes=None)
@commands.Cog.listener()
async def on_message(self, message):
if not message.author.bot and message.mentions:
await self._filter(message)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
try:
if not after.author.bot and after.mentions \
and (after.edited_at.timestamp() - before.created_at.timestamp()) < 60:
await self._filter(after, before=before)
except AttributeError:
pass
# TODO: halvstjerner?
async def _filter(self, message, before=None, **kwarg):
def check(message):
for word in self.settings_data["takk"]:
word_ = word.lower()
content_ = message.content.lower()
if (
word_ in content_ and
(
"hjelp" in
(
message.channel.name or
message.channel.category.name).lower()
)
) or (
content_.startswith(word_) or
content_.endswith(word_) or
content_[:-1].endswith(word_)
):
return True
if not before:
if check(message):
return await self.confirm_star(message)
elif before:
if check(before):
return
if check(message):
return await self.confirm_star(message)
async def confirm_star(self, message, dudes={"id": [], "mention": []}, **kwarg):
for dude in message.mentions:
if dude is self.bot.user:
continue
if dude is message.author:
continue
dudes["id"].append(dude.id)
dudes["mention"].append(dude.mention)
msg_data = {
"hjelper": kwarg.get("hjelper") or dudes["id"],
"giver": kwarg.get("giver") or message.author.id,
"link": message.jump_url
}
conf_view = Confirm(parent=message)
reply = await message.reply("Registrer stjerne?", view=conf_view)
await message.channel.trigger_typing()
await conf_view.wait()
if conf_view.value is None:
return await reply.delete()
elif conf_view.value:
menn = [self.bot.get_user(person).mention for person in msg_data["hjelper"]]
embed = easy_embed(self, message)
embed.title = "Ny stjerne tildelt!"
embed.description = f'<@{msg_data["giver"]}> ga {", ".join(menn)} en stjerne!'
self.teller_data["meldinger"][str(message.id)] = msg_data
self.cacher()
await reply.edit(content=None, embed=embed, view=[])
else:
return await reply.delete()
@commands.guild_only()
@commands.group(name="stjerne")
async def pGroup(self, ctx):
"""
Kategori for styring av poeng
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@pGroup.command(name="sjekk")
async def check(self, ctx, user: discord.Member = None):
"""
Komanndo for å sjekke stjerner
"""
if not user:
user = ctx.author
embed = easy_embed(self, ctx)
counter = 0
for msg in self.teller_data["meldinger"]:
for helper in self.teller_data["meldinger"][msg]["hjelper"]:
if helper == user.id:
counter += 1
if counter <= 5:
fyr = "Ukjent bruker"
try:
fyr = self.bot.get_user(self.teller_data["meldinger"][msg]["giver"]).name
except AttributeError:
pass
embed.add_field(
name=f"Hjalp {fyr} her:",
value=f'[Link]({self.teller_data["meldinger"][msg]["link"]})',
inline=False
)
embed.title = "Boken"
desc = f"{user.mention} har {counter} stjerner i boka."
if counter == 1:
desc = f"{user.mention} har {counter} stjerne i boka"
if 5 <= counter:
desc = f"{user.mention} har {counter} stjerner i boka"
if 10 <= counter:
desc = f"{user.mention} har jobbet bra, her er det {counter} stjerner i boka!"
if 15 <= counter:
desc = f"{user.mention} har lagt inn en fantastisk jobb, {counter} stjerner i boka!"
if embed.fields:
desc += f"\n\nViser de {len(embed.fields)} første:"
embed.description = desc
await ctx.send(embed=embed)
@commands.is_owner()
@pGroup.group()
async def admin(self, ctx):
"""
Kategori for instillinger
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@admin.command(name="takk")
async def set_thanks(self, ctx, thanks_phrase):
"""
Kommando for å sette takkeord
"""
try:
self.settings_data["takk"].append(thanks_phrase)
await ctx.send(f"La til {thanks_phrase} i lista")
except KeyError:
self.settings_data["takk"] = []
self.settings_data["takk"].append(thanks_phrase)
except Exception:
return self.bot.logger.error("Failed to set thanks_phrase: %s" % thanks_phrase)
self.save_json("settings")
self.load_json("settings")
async def cache_loop(self):
while True:
self.cacher()
await asyncio.sleep(60*60*5)
def cacher(self):
if time.time() - 120 > float(self.cache_time):
self.save_json("teller")
self.load_json("teller")
self.bot.logger.debug("Reloaded data cache")
self.cache_time = time.time()
def load_json(self, mode):
if mode == "teller":
with codecs.open(self.teller_file, "r", encoding="utf8") as json_file:
self.teller_data = json.load(json_file)
elif mode == "settings":
with codecs.open(self.settings_file, "r", encoding="utf8") as json_file:
self.settings_data = json.load(json_file)
def save_json(self, mode):
if mode == "teller":
try:
with codecs.open(self.teller_file, "w", encoding="utf8") as outfile:
json.dump(self.teller_data, outfile, indent=4, sort_keys=True)
except Exception as e:
return self.bot.logger.warn("Failed to validate JSON before saving:\n%s\n%s" % (e, self.teller_data))
elif mode == "settings":
try:
with codecs.open(self.settings_file, "w", encoding="utf8") as outfile:
json.dump(self.settings_data, outfile, indent=4, sort_keys=True)
except Exception as e:
return self.bot.logger.warn("Failed to validate JSON before saving:\n%s\n%s" % (e, self.settings_data))
def check_folder(data_dir):
f = f"{data_dir}/poeng"
if not os.path.exists(f):
os.makedirs(f)
def check_files(data_dir):
files = [
{f"{data_dir}/poeng/teller.json": {"meldinger": {}}},
{f"{data_dir}/poeng/innstilinger.json": {"takk": []}}
]
for i in files:
for file, default in i.items():
try:
with codecs.open(file, "r", encoding="utf8") as json_file:
json.load(json_file)
except FileNotFoundError:
with codecs.open(file, "w", encoding="utf8") as outfile:
json.dump(default, outfile)
def setup(bot):
check_folder(bot.data_dir)
check_files(bot.data_dir)
bot.add_cog(Poeng(bot))
# TODO: make it save on unload
# def teardown(bot):
# obj = Poeng(bot)
# obj.save_json("teller")
# print("I am being unloaded!")
| [
"me@roxedus.dev"
] | me@roxedus.dev |
b8057bfd90277d7f954e3713e2198773a6ce19d8 | 78ade3f3f334593e601ea78c1e6fd8575f0fe86b | /tfx/examples/chicago_taxi_pipeline/taxi_utils_test.py | 466676a43ebd5b80bfdecd3f72a58490953f907b | [
"Apache-2.0"
] | permissive | rmothukuru/tfx | 82725e20a7d71265f791122ec3ec5d7708443761 | f46de4be29e96c123e33f90245dc5021d18f8294 | refs/heads/master | 2023-01-11T08:50:20.552722 | 2020-11-06T11:11:47 | 2020-11-06T11:11:47 | 279,754,672 | 1 | 1 | Apache-2.0 | 2020-07-15T03:37:39 | 2020-07-15T03:37:39 | null | UTF-8 | Python | false | false | 7,554 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.examples.chicago_taxi_pipeline.taxi_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import types
import apache_beam as beam
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform import beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.components.trainer import executor as trainer_executor
from tfx.examples.chicago_taxi_pipeline import taxi_utils
from tfx.utils import io_utils
from tfx.utils import path_utils
class TaxiUtilsTest(tf.test.TestCase):
def setUp(self):
super(TaxiUtilsTest, self).setUp()
self._testdata_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'components/testdata')
def testUtils(self):
key = 'fare'
xfm_key = taxi_utils._transformed_name(key)
self.assertEqual(xfm_key, 'fare_xf')
def testPreprocessingFn(self):
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
feature_spec = taxi_utils._get_raw_feature_spec(schema)
working_dir = self.get_temp_dir()
transform_graph_path = os.path.join(working_dir, 'transform_graph')
transformed_examples_path = os.path.join(
working_dir, 'transformed_examples')
# Run very simplified version of executor logic.
# TODO(kestert): Replace with tft_unit.assertAnalyzeAndTransformResults.
# Generate legacy `DatasetMetadata` object. Future version of Transform
# will accept the `Schema` proto directly.
legacy_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.from_feature_spec(feature_spec))
decoder = tft.coders.ExampleProtoCoder(legacy_metadata.schema)
with beam.Pipeline() as p:
with tft_beam.Context(temp_dir=os.path.join(working_dir, 'tmp')):
examples = (
p
| 'ReadTrainData' >> beam.io.ReadFromTFRecord(
os.path.join(self._testdata_path, 'csv_example_gen/train/*'),
coder=beam.coders.BytesCoder(),
# TODO(b/114938612): Eventually remove this override.
validate=False)
| 'DecodeTrainData' >> beam.Map(decoder.decode))
(transformed_examples, transformed_metadata), transform_fn = (
(examples, legacy_metadata)
| 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
taxi_utils.preprocessing_fn))
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
# pylint: disable=expression-not-assigned
(transform_fn
|
'WriteTransformFn' >> tft_beam.WriteTransformFn(transform_graph_path))
encoder = tft.coders.ExampleProtoCoder(transformed_metadata.schema)
(transformed_examples
| 'EncodeTrainData' >> beam.Map(encoder.encode)
| 'WriteTrainData' >> beam.io.WriteToTFRecord(
os.path.join(transformed_examples_path,
'train/transformed_examples.gz'),
coder=beam.coders.BytesCoder()))
# pylint: enable=expression-not-assigned
# Verify the output matches golden output.
# NOTE: we don't verify that transformed examples match golden output.
expected_transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(
self._testdata_path,
'transform/transform_graph/transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(transform_graph_path, 'transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
# Clear annotations so we only have to test main schema.
transformed_schema.ClearField('annotation')
for feature in transformed_schema.feature:
feature.ClearField('annotation')
self.assertEqual(transformed_schema, expected_transformed_schema)
def testTrainerFn(self):
temp_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
output_dir = os.path.join(temp_dir, 'output_dir')
trainer_fn_args = trainer_executor.TrainerFnArgs(
train_files=os.path.join(self._testdata_path,
'transform/transformed_examples/train/*.gz'),
transform_output=os.path.join(self._testdata_path,
'transform/transform_graph'),
output_dir=output_dir,
serving_model_dir=os.path.join(temp_dir, 'serving_model_dir'),
eval_files=os.path.join(self._testdata_path,
'transform/transformed_examples/eval/*.gz'),
schema_file=schema_file,
train_steps=1,
eval_steps=1,
verbosity='INFO',
base_model=None)
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
training_spec = taxi_utils.trainer_fn(trainer_fn_args, schema)
estimator = training_spec['estimator']
train_spec = training_spec['train_spec']
eval_spec = training_spec['eval_spec']
eval_input_receiver_fn = training_spec['eval_input_receiver_fn']
self.assertIsInstance(estimator,
tf.estimator.DNNLinearCombinedClassifier)
self.assertIsInstance(train_spec, tf.estimator.TrainSpec)
self.assertIsInstance(eval_spec, tf.estimator.EvalSpec)
self.assertIsInstance(eval_input_receiver_fn, types.FunctionType)
# Test keep_max_checkpoint in RunConfig
self.assertGreater(estimator._config.keep_checkpoint_max, 1)
# Train for one step, then eval for one step.
eval_result, exports = tf.estimator.train_and_evaluate(
estimator, train_spec, eval_spec)
self.assertGreater(eval_result['loss'], 0.0)
self.assertEqual(len(exports), 1)
self.assertGreaterEqual(len(tf.io.gfile.listdir(exports[0])), 1)
# Export the eval saved model.
eval_savedmodel_path = tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=path_utils.eval_model_dir(output_dir),
eval_input_receiver_fn=eval_input_receiver_fn)
self.assertGreaterEqual(len(tf.io.gfile.listdir(eval_savedmodel_path)), 1)
# Test exported serving graph.
with tf.compat.v1.Session() as sess:
metagraph_def = tf.compat.v1.saved_model.loader.load(
sess, [tf.saved_model.SERVING], exports[0])
self.assertIsInstance(metagraph_def, tf.compat.v1.MetaGraphDef)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow-extended-team@google.com"
] | tensorflow-extended-team@google.com |
21f7189af2c3e27c6e21660e5a8e8bd14728887b | cd1fbb71d8ec429b7364f9dd90448efe37e45ffb | /code/bert4keras/models.py | 7785715466d81aad2031616974999360f43f72b9 | [] | no_license | chenxichen95/Tianchi2020ChineseMedicineQuestionGeneration | bfc4d47df12f77947f7e8a65a8766468694fb45b | 101b41bca42eebda74ac554cefd1dfe40a1cab04 | refs/heads/main | 2023-01-29T00:38:15.143691 | 2020-12-10T11:57:28 | 2020-12-10T11:57:28 | 320,256,029 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 69,730 | py | #! -*- coding: utf-8 -*-
# 主要模型
import numpy as np
from bert4keras.layers import *
from bert4keras.snippets import insert_arguments
from bert4keras.snippets import delete_arguments
from bert4keras.snippets import is_string
from keras.models import Model
import json
class Transformer(object):
"""模型基类
"""
def __init__(
self,
vocab_size, # 词表大小
hidden_size, # 编码维度
num_hidden_layers, # Transformer总层数
num_attention_heads, # Attention的头数
intermediate_size, # FeedForward的隐层维度
hidden_act, # FeedForward隐层的激活函数
dropout_rate=None, # Dropout比例
embedding_size=None, # 是否指定embedding_size
attention_key_size=None, # Attention中Q,K的head_size
sequence_length=None, # 是否固定序列长度
keep_tokens=None, # 要保留的词ID列表
compound_tokens=None, # 扩展Embedding
layers=None, # 外部传入的Keras层
prefix=None, # 层名前缀
name=None, # 模型名称
**kwargs
):
if keep_tokens is not None:
vocab_size = len(keep_tokens)
if compound_tokens is not None:
vocab_size += len(compound_tokens)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.attention_head_size = hidden_size // num_attention_heads
self.attention_key_size = attention_key_size or self.attention_head_size
self.intermediate_size = intermediate_size
self.dropout_rate = dropout_rate or 0
self.hidden_act = hidden_act
self.embedding_size = embedding_size or hidden_size
self.sequence_length = sequence_length
self.keep_tokens = keep_tokens
self.compound_tokens = compound_tokens
self.attention_mask = None
self.position_bias = None
self.layers = {} if layers is None else layers
self.prefix = prefix or ''
self.name = name
self.built = False
def build(
self,
layer_norm_cond=None,
layer_norm_cond_hidden_size=None,
layer_norm_cond_hidden_act=None,
additional_input_layers=None,
**kwargs
):
"""模型构建函数
layer_norm_*系列参数为实现Conditional Layer Normalization时使用,
用来实现以“固定长度向量”为条件的条件Bert。
"""
if self.built:
return None
# Input
inputs = self.get_inputs()
self.set_inputs(inputs, additional_input_layers)
# Other
self.layer_norm_conds = [
layer_norm_cond,
layer_norm_cond_hidden_size,
layer_norm_cond_hidden_act or 'linear',
]
# Call
outputs = self.call(inputs)
self.set_outputs(outputs)
# Model
self.model = Model(self.inputs, self.outputs, name=self.name)
self.built = True
def call(self, inputs):
"""定义模型的执行流程
"""
# Embedding
outputs = self.apply_embeddings(inputs)
# Main
for i in range(self.num_hidden_layers):
outputs = self.apply_main_layers(outputs, i)
# Final
outputs = self.apply_final_layers(outputs)
return outputs
def prefixed(self, name):
"""给名字加前缀
"""
if name is not None:
return self.prefix + name
def apply(self, inputs=None, layer=None, arguments=None, **kwargs):
"""通过apply调用层会自动重用同名层
inputs: 上一层的输出;
layer: 要调用的层类名;
arguments: 传递给layer.call的参数;
kwargs: 传递给层初始化的参数。
"""
if layer is Dropout and self.dropout_rate == 0:
return inputs
arguments = arguments or {}
name = self.prefixed(kwargs.get('name'))
kwargs['name'] = name
if name not in self.layers:
layer = layer(**kwargs)
name = layer.name
self.layers[name] = layer
if inputs is None:
return self.layers[name]
else:
return self.layers[name](inputs, **arguments)
def get_inputs(self):
raise NotImplementedError
def apply_embeddings(self, inputs):
raise NotImplementedError
def apply_main_layers(self, inputs, index):
raise NotImplementedError
def apply_final_layers(self, inputs):
raise NotImplementedError
def compute_attention_mask(self, inputs=None):
"""定义每一层的Attention Mask
"""
return self.attention_mask
def compute_position_bias(self, inputs=None):
"""定义每一层的Position Bias(一般相对位置编码用)
"""
return self.position_bias
def set_inputs(self, inputs, additional_input_layers=None):
"""设置input和inputs属性
"""
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
inputs = [inputs]
inputs = inputs[:]
if additional_input_layers is not None:
if not isinstance(additional_input_layers, list):
additional_input_layers = [additional_input_layers]
inputs.extend(additional_input_layers)
self.inputs = inputs
if len(inputs) > 1:
self.input = inputs
else:
self.input = inputs[0]
def set_outputs(self, outputs):
"""设置output和oututs属性
"""
if not isinstance(outputs, list):
outputs = [outputs]
outputs = outputs[:]
self.outputs = outputs
if len(outputs) > 1:
self.output = outputs
else:
self.output = outputs[0]
@property
def initializer(self):
"""默认使用截断正态分布初始化
"""
return keras.initializers.TruncatedNormal(stddev=0.02)
def simplify(self, inputs):
"""将list中的None过滤掉
"""
inputs = [i for i in inputs if i is not None]
if len(inputs) == 1:
inputs = inputs[0]
return inputs
def load_embeddings(self, embeddings):
"""处理Embedding层权重
"""
if self.keep_tokens is not None:
embeddings = embeddings[self.keep_tokens]
if self.compound_tokens is not None:
ext_embeddings = np.array([
embeddings[idxs].mean(0) for idxs in self.compound_tokens
])
embeddings = np.concatenate([embeddings, ext_embeddings], 0)
return embeddings
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
return tf.train.load_variable(checkpoint, name)
def create_variable(self, name, value):
"""创建一个变量
"""
return K.variable(self.initializer(value.shape), name=name)
def variable_mapping(self):
"""构建keras层与checkpoint的变量名之间的映射表
"""
return {}
def load_weights_from_checkpoint(self, checkpoint, mapping=None):
"""根据mapping从checkpoint加载权重
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
weight_value_pairs = []
for layer, variables in mapping.items():
layer = self.layers[layer]
weights = layer.trainable_weights
values = [self.load_variable(checkpoint, v) for v in variables]
if isinstance(layer, MultiHeadAttention):
"""如果key_size不等于head_size,则可以通过
正交矩阵将相应的权重投影到合适的shape。
"""
count = 2
if layer.use_bias:
count += 2
heads = self.num_attention_heads
head_size = self.attention_head_size
key_size = self.attention_key_size
W = np.linalg.qr(np.random.randn(key_size, head_size))[0].T
if layer.attention_scale:
W = W * key_size**0.25 / head_size**0.25
for i in range(count):
w, v = weights[i], values[i]
w_shape, v_shape = K.int_shape(w), v.shape
if w_shape[-1] != v_shape[-1]:
pre_shape = w_shape[:-1]
v = v.reshape(pre_shape + (heads, head_size))
v = np.dot(v, W)
v = v.reshape(pre_shape + (heads * key_size,))
values[i] = v
weight_value_pairs.extend(zip(weights, values))
K.batch_set_value(weight_value_pairs)
def save_weights_as_checkpoint(self, filename, mapping=None):
"""根据mapping将权重保存为checkpoint格式
"""
mapping = mapping or self.variable_mapping()
mapping = {self.prefixed(k): v for k, v in mapping.items()}
mapping = {k: v for k, v in mapping.items() if k in self.layers}
with tf.Graph().as_default():
all_variables, all_values = [], []
for layer, variables in mapping.items():
layer = self.layers[layer]
values = K.batch_get_value(layer.trainable_weights)
for name, value in zip(variables, values):
all_variables.append(self.create_variable(name, value))
all_values.append(value)
with tf.Session() as sess:
K.batch_set_value(zip(all_variables, all_values))
saver = tf.train.Saver()
saver.save(sess, filename)
class LM_Mask(object):
"""定义下三角Attention Mask(语言模型用)
"""
def compute_attention_mask(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_mask is None:
def lm_mask(s):
seq_len = K.shape(s)[1]
idxs = K.arange(0, seq_len)
mask = idxs[None, :] <= idxs[:, None]
mask = K.cast(mask, K.floatx())
return mask[None, None]
self.attention_mask = self.apply(
inputs=self.inputs[0],
layer=Lambda,
function=lm_mask,
name='Attention-LM-Mask'
)
return self.attention_mask
class UniLM_Mask(object):
"""定义UniLM的Attention Mask(Seq2Seq模型用)
其中source和target的分区,由segment_ids来表示。
UniLM: https://arxiv.org/abs/1905.03197
"""
def compute_attention_mask(self, inputs=None):
"""通过idxs序列的比较来得到对应的mask
"""
if self.attention_mask is None:
def unilm_mask(s):
idxs = K.cumsum(s, axis=1)
mask = idxs[:, None, :] <= idxs[:, :, None]
mask = K.cast(mask, K.floatx())
return mask[:, None]
self.attention_mask = self.apply(
inputs=self.inputs[1],
layer=Lambda,
function=unilm_mask,
name='Attention-UniLM-Mask'
)
return self.attention_mask
class BERT(Transformer):
"""构建BERT模型
"""
def __init__(
self,
max_position, # 序列最大长度
segment_vocab_size=2, # segment总数目
with_pool=False, # 是否包含Pool部分
with_nsp=False, # 是否包含NSP部分
with_mlm=False, # 是否包含MLM部分
custom_position_ids=False, # 是否自行传入位置id
shared_segment_embeddings=False, # 若True,则segment跟token共用embedding
**kwargs # 其余参数
):
super(BERT, self).__init__(**kwargs)
self.max_position = max_position
self.segment_vocab_size = segment_vocab_size
self.with_pool = with_pool
self.with_nsp = with_nsp
self.with_mlm = with_mlm
self.custom_position_ids = custom_position_ids
self.shared_segment_embeddings = shared_segment_embeddings
if self.with_nsp and not self.with_pool:
self.with_pool = True
def get_inputs(self):
"""BERT的输入是token_ids和segment_ids
(但允许自行传入位置id,以实现一些特殊需求)
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
inputs = [x_in]
if self.segment_vocab_size > 0:
s_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Segment'
)
inputs.append(s_in)
if self.custom_position_ids:
p_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Input-Position'
)
inputs.append(p_in)
return inputs
def apply_embeddings(self, inputs):
"""BERT的embedding是token、position、segment三者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""BERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_mask': None}
if attention_mask is not None:
arguments['a_mask'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""根据剩余参数决定输出
"""
x = inputs
z = self.layer_norm_conds[0]
outputs = [x]
if self.with_pool:
# Pooler部分(提取CLS向量)
x = outputs[0]
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x[:, 0],
name='Pooler'
)
pool_activation = 'tanh' if self.with_pool is True else self.with_pool
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=pool_activation,
kernel_initializer=self.initializer,
name='Pooler-Dense'
)
if self.with_nsp:
# Next Sentence Prediction部分
x = self.apply(
inputs=x,
layer=Dense,
units=2,
activation='softmax',
kernel_initializer=self.initializer,
name='NSP-Proba'
)
outputs.append(x)
if self.with_mlm:
# Masked Language Model部分
x = outputs[0]
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='MLM-Dense'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='MLM-Norm'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(inputs=x, layer=BiasAdd, name='MLM-Bias')
outputs.append(x)
if len(outputs) == 1:
outputs = outputs[0]
elif len(outputs) == 2:
outputs = outputs[1]
else:
outputs = outputs[1:]
return outputs
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(BERT, self).load_variable(checkpoint, name)
if name in [
'bert/embeddings/word_embeddings',
'cls/predictions/output_bias',
]:
return self.load_embeddings(variable)
elif name == 'cls/seq_relationship/output_weights':
return variable.T
else:
return variable
def create_variable(self, name, value):
"""在tensorflow中创建一个变量
"""
if name == 'cls/seq_relationship/output_weights':
value = value.T
return super(BERT, self).create_variable(name, value)
def variable_mapping(self):
"""映射到官方BERT权重格式
"""
mapping = {
'Embedding-Token': ['bert/embeddings/word_embeddings'],
'Embedding-Segment': ['bert/embeddings/token_type_embeddings'],
'Embedding-Position': ['bert/embeddings/position_embeddings'],
'Embedding-Norm': [
'bert/embeddings/LayerNorm/beta',
'bert/embeddings/LayerNorm/gamma',
],
'Embedding-Mapping': [
'bert/encoder/embedding_hidden_mapping_in/kernel',
'bert/encoder/embedding_hidden_mapping_in/bias',
],
'Pooler-Dense': [
'bert/pooler/dense/kernel',
'bert/pooler/dense/bias',
],
'NSP-Proba': [
'cls/seq_relationship/output_weights',
'cls/seq_relationship/output_bias',
],
'MLM-Dense': [
'cls/predictions/transform/dense/kernel',
'cls/predictions/transform/dense/bias',
],
'MLM-Norm': [
'cls/predictions/transform/LayerNorm/beta',
'cls/predictions/transform/LayerNorm/gamma',
],
'MLM-Bias': ['cls/predictions/output_bias'],
}
for i in range(self.num_hidden_layers):
prefix = 'bert/encoder/layer_%d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention/self/query/kernel',
prefix + 'attention/self/query/bias',
prefix + 'attention/self/key/kernel',
prefix + 'attention/self/key/bias',
prefix + 'attention/self/value/kernel',
prefix + 'attention/self/value/bias',
prefix + 'attention/output/dense/kernel',
prefix + 'attention/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'attention/output/LayerNorm/beta',
prefix + 'attention/output/LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/dense/kernel',
prefix + 'intermediate/dense/bias',
prefix + 'output/dense/kernel',
prefix + 'output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'output/LayerNorm/beta',
prefix + 'output/LayerNorm/gamma',
],
})
return mapping
class ALBERT(BERT):
"""构建ALBERT模型
"""
def apply_main_layers(self, inputs, index):
"""ALBERT的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-MultiHeadSelfAttention'
feed_forward_name = 'Transformer-FeedForward'
attention_mask = self.compute_attention_mask(index)
# Self Attention
xi, x, arguments = x, [x, x, x], {'a_mask': None}
if attention_mask is not None:
arguments['a_mask'] = True
x.append(attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
mapping.update({
'Transformer-MultiHeadSelfAttention': [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-MultiHeadSelfAttention-Norm': [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-FeedForward': [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-FeedForward-Norm': [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class ALBERT_Unshared(BERT):
"""解开ALBERT共享约束,当成BERT用
"""
def variable_mapping(self):
"""映射到官方ALBERT权重格式
"""
mapping = super(ALBERT_Unshared, self).variable_mapping()
prefix = 'bert/encoder/transformer/group_0/inner_group_0/'
for i in range(self.num_hidden_layers):
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'attention_1/self/query/kernel',
prefix + 'attention_1/self/query/bias',
prefix + 'attention_1/self/key/kernel',
prefix + 'attention_1/self/key/bias',
prefix + 'attention_1/self/value/kernel',
prefix + 'attention_1/self/value/bias',
prefix + 'attention_1/output/dense/kernel',
prefix + 'attention_1/output/dense/bias',
],
'Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'LayerNorm/beta',
prefix + 'LayerNorm/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'ffn_1/intermediate/dense/kernel',
prefix + 'ffn_1/intermediate/dense/bias',
prefix + 'ffn_1/intermediate/output/dense/kernel',
prefix + 'ffn_1/intermediate/output/dense/bias',
],
'Transformer-%d-FeedForward-Norm' % i: [
prefix + 'LayerNorm_1/beta',
prefix + 'LayerNorm_1/gamma',
],
})
return mapping
class NEZHA(BERT):
"""华为推出的NAZHA模型
链接:https://arxiv.org/abs/1909.00204
"""
def apply_embeddings(self, inputs):
"""NEZHA的embedding是token、segment两者embedding之和
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=2,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""NEZHA的主体是基于Self-Attention的模块
顺序:Att --> Add --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi, x = x, [x, x, x, position_bias]
arguments = {'a_mask': None, 'p_bias': 'typical_relative'}
if attention_mask is not None:
arguments['a_mask'] = True
x.insert(3, attention_mask)
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
return x
def compute_position_bias(self, inputs=None):
"""经典相对位置编码
"""
if self.position_bias is None:
def sinusoidal(shape, dtype=None):
"""NEZHA直接使用Sin-Cos形式的位置向量
"""
vocab_size, depth = shape
embeddings = np.zeros(shape)
for pos in range(vocab_size):
for i in range(depth // 2):
theta = pos / np.power(10000, 2. * i / depth)
embeddings[pos, 2 * i] = np.sin(theta)
embeddings[pos, 2 * i + 1] = np.cos(theta)
return embeddings
x = inputs
self.position_bias = self.apply(
inputs=[x, x],
layer=RelativePositionEmbedding,
input_dim=2 * 64 + 1,
output_dim=self.attention_head_size,
embeddings_initializer=sinusoidal,
name='Embedding-Relative-Position',
trainable=False
)
return self.position_bias
class ELECTRA(BERT):
"""Google推出的ELECTRA模型
链接:https://arxiv.org/abs/2003.10555
"""
@insert_arguments(with_discriminator=False)
@delete_arguments('with_pool', 'with_mlm')
def __init__(
self,
max_position, # 序列最大长度
**kwargs # 其余参数
):
super(ELECTRA, self).__init__(max_position, **kwargs)
def apply_final_layers(self, inputs):
x = inputs
if self.with_discriminator:
if self.with_discriminator is True:
final_activation = 'sigmoid'
else:
final_activation = self.with_discriminator
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name='Discriminator-Dense'
)
x = self.apply(
inputs=x,
layer=Dense,
units=1,
activation=final_activation,
kernel_initializer=self.initializer,
name='Discriminator-Prediction'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(ELECTRA, self).load_variable(checkpoint, name)
if name == 'electra/embeddings/word_embeddings':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
mapping = super(ELECTRA, self).variable_mapping()
mapping['Embedding-Mapping'] = [
'electra/embeddings_project/kernel',
'electra/embeddings_project/bias',
]
mapping = {
k: [i.replace('bert/', 'electra/') for i in v]
for k, v in mapping.items()
}
mapping['Discriminator-Dense'] = [
'discriminator_predictions/dense/kernel',
'discriminator_predictions/dense/bias',
]
mapping['Discriminator-Prediction'] = [
'discriminator_predictions/dense_1/kernel',
'discriminator_predictions/dense_1/bias',
]
return mapping
class GPT_OpenAI(LM_Mask, BERT):
"""构建GPT模型
链接:https://github.com/openai/finetune-transformer-lm
"""
@insert_arguments(final_activation='softmax')
@delete_arguments('with_pool', 'with_mlm')
def __init__(self, **kwargs):
super(GPT_OpenAI, self).__init__(**kwargs)
def apply_embeddings(self, inputs):
"""GPT的embedding是token、position、segment三者embedding之和
跟BERT的主要区别是三者相加之后没有加LayerNormalization层。
"""
inputs = inputs[:]
x = inputs.pop(0)
if self.segment_vocab_size > 0:
s = inputs.pop(0)
if self.custom_position_ids:
p = inputs.pop(0)
else:
p = None
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
if self.segment_vocab_size > 0:
if self.shared_segment_embeddings:
name = 'Embedding-Token'
else:
name = 'Embedding-Segment'
s = self.apply(
inputs=s,
layer=Embedding,
input_dim=self.segment_vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
name=name
)
x = self.apply(
inputs=[x, s], layer=Add, name='Embedding-Token-Segment'
)
x = self.apply(
inputs=self.simplify([x, p]),
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
embeddings_initializer=self.initializer,
custom_position_ids=self.custom_position_ids,
name='Embedding-Position'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def variable_mapping(self):
"""映射到TF版GPT权重格式
"""
mapping = super(GPT_OpenAI, self).variable_mapping()
mapping = {
k: [
i.replace('bert/', 'gpt/').replace('encoder', 'transformer')
for i in v
]
for k, v in mapping.items()
}
return mapping
class GPT2_ML(LM_Mask, Transformer):
"""构建GPT2_ML模型
链接: https://github.com/imcaspar/gpt2-ml
"""
def __init__(
self,
max_position, # 序列最大长度
final_activation='softmax', # 预测分布的激活函数
**kwargs # 其余参数
):
super(GPT2_ML, self).__init__(**kwargs)
self.max_position = max_position
self.final_activation = final_activation
def get_inputs(self):
"""GPT2_ML的输入是token_ids
"""
x_in = self.apply(
layer=Input, shape=(self.sequence_length,), name='Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""GPT2_ML的embedding是token、position两者embedding之和
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=PositionEmbedding,
input_dim=self.max_position,
output_dim=self.embedding_size,
merge_mode='add',
embeddings_initializer=self.initializer,
name='Embedding-Position'
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Embedding-Norm'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""GPT2_ML的主体是基于Self-Attention的模块
顺序:Att --> LN --> FFN --> Add --> LN
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask(index)
# Self Attention
xi, x, arguments = x, [x, x, x, attention_mask], {'a_mask': True}
x = self.apply(
inputs=x,
layer=MultiHeadAttention,
arguments=arguments,
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-0' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
epsilon=1e-5,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm-1' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
# Language Model部分
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Activation,
activation=self.final_activation,
name='LM-Activation'
)
return x
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(GPT2_ML, self).load_variable(checkpoint, name)
if name == 'newslm/embeddings/word_embed':
return self.load_embeddings(variable)
else:
return variable
def variable_mapping(self):
"""映射到官方GPT2_ML权重格式
"""
mapping = {
'Embedding-Token': ['newslm/embeddings/word_embed'],
'Embedding-Position': ['newslm/embeddings/pos_embed'],
'Embedding-Norm': [
'newslm/embeddings/LayerNorm_embed_norm/beta',
'newslm/embeddings/LayerNorm_embed_norm/gamma',
],
}
for i in range(self.num_hidden_layers):
prefix = 'newslm/layer%02d/' % i
mapping.update({
'Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'query_layer/kernel',
prefix + 'query_layer/bias',
prefix + 'key_layer/kernel',
prefix + 'key_layer/bias',
prefix + 'value_layer/kernel',
prefix + 'value_layer/bias',
prefix + 'context_projection_layer/kernel',
prefix + 'context_projection_layer/bias',
],
'Transformer-%d-FeedForward-Norm-0' % i: [
prefix + 'LayerNorm_mlp_ln0/beta',
prefix + 'LayerNorm_mlp_ln0/gamma',
],
'Transformer-%d-FeedForward' % i: [
prefix + 'intermediate/kernel',
prefix + 'intermediate/bias',
prefix + 'output/kernel',
prefix + 'output/bias',
],
'Transformer-%d-FeedForward-Norm-1' % i: [
prefix + 'LayerNorm_mlp_ln1/beta',
prefix + 'LayerNorm_mlp_ln1/gamma',
],
})
return mapping
class T5_Base(Transformer):
"""Google的T5模型(基类)
"""
def load_variable(self, checkpoint, name):
"""加载单个变量的函数
"""
variable = super(T5_Base, self).load_variable(checkpoint, name)
if name == 'shared/embedding':
return self.load_embeddings(variable)
elif 'relative_attention_bias' in name:
return variable.T
else:
return variable
def create_variable(self, name, value):
"""在tensorflow中创建一个变量
"""
if 'relative_attention_bias' in name:
value = value.T
return super(T5_Base, self).create_variable(name, value)
def variable_mapping(self):
"""映射到官方T5权重格式
"""
mapping = {
'Embedding-Token': ['shared/embedding'],
'Encoder-Embedding-Relative-Position': [
'encoder/block_000/layer_000/SelfAttention/relative_attention_bias'
],
'Encoder-Output-Norm': ['encoder/final_layer_norm/scale'],
'Decoder-Embedding-Relative-Position': [
'decoder/block_000/layer_000/SelfAttention/relative_attention_bias',
],
'Decoder-Output-Norm': ['decoder/final_layer_norm/scale'],
}
for i in range(self.num_hidden_layers):
# Encoder主体
prefix = 'encoder/block_%03d/' % i
mapping.update({
'Encoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Encoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Encoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_001/DenseReluDense/wi/kernel',
prefix + 'layer_001/DenseReluDense/wo/kernel',
],
'Encoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
})
# Decoder主体
prefix = 'decoder/block_%03d/' % i
mapping.update({
'Decoder-Transformer-%d-MultiHeadSelfAttention' % i: [
prefix + 'layer_000/SelfAttention/q',
prefix + 'layer_000/SelfAttention/k',
prefix + 'layer_000/SelfAttention/v',
prefix + 'layer_000/SelfAttention/o',
],
'Decoder-Transformer-%d-MultiHeadSelfAttention-Norm' % i: [
prefix + 'layer_000/layer_norm/scale',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention' % i: [
prefix + 'layer_001/EncDecAttention/q',
prefix + 'layer_001/EncDecAttention/k',
prefix + 'layer_001/EncDecAttention/v',
prefix + 'layer_001/EncDecAttention/o',
],
'Decoder-Transformer-%d-MultiHeadCrossAttention-Norm' % i: [
prefix + 'layer_001/layer_norm/scale',
],
'Decoder-Transformer-%d-FeedForward' % i: [
prefix + 'layer_002/DenseReluDense/wi/kernel',
prefix + 'layer_002/DenseReluDense/wo/kernel',
],
'Decoder-Transformer-%d-FeedForward-Norm' % i: [
prefix + 'layer_002/layer_norm/scale',
],
})
return mapping
class T5_Encoder(T5_Base):
"""Google的T5模型(Encoder)
"""
def get_inputs(self):
"""T5的Encoder的输入只有token_ids
"""
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Encoder-Input-Token'
)
return x_in
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Encoder-Embedding-Mapping'
)
return x
def apply_main_layers(self, inputs, index):
"""T5的Encoder的主体是基于Self-Attention的模块
顺序:LN --> Att --> Add --> LN --> FFN --> Add
"""
x = inputs
z = self.layer_norm_conds[0]
attention_name = 'Encoder-Transformer-%d-MultiHeadSelfAttention' % index
feed_forward_name = 'Encoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask(index)
position_bias = self.compute_position_bias(x)
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % attention_name
)
x = self.apply(
inputs=[x, x, x, position_bias],
layer=MultiHeadAttention,
arguments={'p_bias': 't5_relative'},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return x
def apply_final_layers(self, inputs):
"""剩余部分
"""
x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Encoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Encoder-Output-Dropout'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x = inputs
p = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=True,
embeddings_initializer=self.initializer,
name='Encoder-Embedding-Relative-Position'
)
self.position_bias = p
return self.position_bias
class T5_Decoder(LM_Mask, T5_Base):
"""Google的T5模型(Decoder)
"""
def __init__(self, with_lm=True, **kwargs):
super(T5_Decoder, self).__init__(**kwargs)
self.with_lm = with_lm
def get_inputs(self):
"""T5的Decoder的输入为context序列和token_ids
"""
c_in = self.apply(
layer=Input,
shape=(self.sequence_length, self.hidden_size),
name='Input-Context'
)
x_in = self.apply(
layer=Input,
shape=(self.sequence_length,),
name='Decoder-Input-Token'
)
return [c_in, x_in]
def apply_embeddings(self, inputs):
"""T5的embedding只有token embedding,
并把relative position embedding准备好,待attention使用。
"""
c, x = inputs
x = self.apply(
inputs=x,
layer=Embedding,
input_dim=self.vocab_size,
output_dim=self.embedding_size,
embeddings_initializer=self.initializer,
mask_zero=True,
name='Embedding-Token'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Embedding-Dropout'
)
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.hidden_size,
kernel_initializer=self.initializer,
name='Decoder-Embedding-Mapping'
)
return [c, x]
def apply_main_layers(self, inputs, index):
"""T5的Dencoder主体是基于Self-Attention、Cross-Attention的模块
顺序:LN --> Att1 --> Add --> LN --> Att2 --> Add --> LN --> FFN --> Add
"""
c, x = inputs
z = self.layer_norm_conds[0]
self_attention_name = 'Decoder-Transformer-%d-MultiHeadSelfAttention' % index
cross_attention_name = 'Decoder-Transformer-%d-MultiHeadCrossAttention' % index
feed_forward_name = 'Decoder-Transformer-%d-FeedForward' % index
attention_mask = self.compute_attention_mask(index)
position_bias = self.compute_position_bias([x, c])
# Self Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % self_attention_name
)
x = self.apply(
inputs=[x, x, x, attention_mask, position_bias[0]],
layer=MultiHeadAttention,
arguments={
'a_mask': True,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=self_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % self_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % self_attention_name
)
# Cross Attention
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % cross_attention_name
)
x = self.apply(
inputs=[x, c, c, position_bias[1]],
layer=MultiHeadAttention,
arguments={
'a_mask': None,
'p_bias': 't5_relative'
},
heads=self.num_attention_heads,
head_size=self.attention_head_size,
key_size=self.attention_key_size,
use_bias=False,
attention_scale=False,
kernel_initializer=self.initializer,
name=cross_attention_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % cross_attention_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % cross_attention_name
)
# Feed Forward
xi = x
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='%s-Norm' % feed_forward_name
)
x = self.apply(
inputs=x,
layer=FeedForward,
units=self.intermediate_size,
activation=self.hidden_act,
use_bias=False,
kernel_initializer=self.initializer,
name=feed_forward_name
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='%s-Dropout' % feed_forward_name
)
x = self.apply(
inputs=[xi, x], layer=Add, name='%s-Add' % feed_forward_name
)
return [c, x]
def apply_final_layers(self, inputs):
"""剩余部分
"""
c, x = inputs
z = self.layer_norm_conds[0]
x = self.apply(
inputs=self.simplify([x, z]),
layer=LayerNormalization,
center=False,
epsilon=1e-6,
conditional=(z is not None),
hidden_units=self.layer_norm_conds[1],
hidden_activation=self.layer_norm_conds[2],
hidden_initializer=self.initializer,
name='Decoder-Output-Norm'
)
x = self.apply(
inputs=x,
layer=Dropout,
rate=self.dropout_rate,
name='Decoder-Output-Dropout'
)
x = self.apply(
inputs=x,
layer=Lambda,
function=lambda x: x / np.sqrt(self.hidden_size),
name='Decoder-Output-Scale'
)
if self.with_lm:
# 预测token概率部分
if self.embedding_size != self.hidden_size:
x = self.apply(
inputs=x,
layer=Dense,
units=self.embedding_size,
kernel_initializer=self.initializer,
name='Decoder-Output-Mapping'
)
x = self.apply(
inputs=x,
layer=Embedding,
arguments={'mode': 'dense'},
name='Embedding-Token'
)
lm_activation = 'softmax' if self.with_lm is True else self.with_lm
x = self.apply(
inputs=x,
layer=Activation,
activation=lm_activation,
name='Dencoder-Output-LM-Activation'
)
return x
def compute_position_bias(self, inputs=None):
"""T5相对位置编码
"""
if self.position_bias is None:
x, c = inputs
p1 = self.apply(
inputs=[x, x],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
p2 = self.apply(
inputs=[x, c],
layer=RelativePositionEmbeddingT5,
input_dim=32,
output_dim=self.num_attention_heads,
bidirectional=False,
embeddings_initializer=self.initializer,
name='Decoder-Embedding-Relative-Position'
)
self.position_bias = (p1, p2)
return self.position_bias
class T5(T5_Base):
"""Google的T5模型(Encoder-Decoder)
"""
def __init__(self, **kwargs):
super(T5, self).__init__(**kwargs)
kwargs['layers'] = self.layers
e_name, d_name = 'Encoder', 'Decoder'
if 'name' in kwargs:
e_name = '%s_%s' % (kwargs['name'], e_name)
d_name = '%s_%s' % (kwargs['name'], d_name)
del kwargs['name'] # 防止重复传参
self._encoder = T5_Encoder(name=e_name, **kwargs)
self._decoder = T5_Decoder(name=d_name, **kwargs)
def build(self, **kwargs):
"""同时构建Encoder和Decoder
"""
self._encoder.build(**kwargs)
self._decoder.build(**kwargs)
self.encoder = self._encoder.model
self.decoder = self._decoder.model
self.inputs = self.encoder.inputs + self.decoder.inputs[1:]
self.outputs = self.decoder(
self.encoder.outputs + self.decoder.inputs[1:]
)
self.model = Model(self.inputs, self.outputs)
def extend_with_language_model(BaseModel):
"""添加下三角的Attention Mask(语言模型用)
"""
class LanguageModel(LM_Mask, BaseModel):
"""带下三角Attention Mask的派生模型
"""
def __init__(self, *args, **kwargs):
super(LanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return LanguageModel
def extend_with_unified_language_model(BaseModel):
"""添加UniLM的Attention Mask(Seq2Seq模型用)
"""
class UnifiedLanguageModel(UniLM_Mask, BaseModel):
"""带UniLM的Attention Mask的派生模型
UniLM: https://arxiv.org/abs/1905.03197
"""
def __init__(self, *args, **kwargs):
super(UnifiedLanguageModel, self).__init__(*args, **kwargs)
self.with_mlm = self.with_mlm or True
return UnifiedLanguageModel
def build_transformer_model(
config_path=None,
checkpoint_path=None,
model='bert',
application='encoder',
return_keras_model=True,
**kwargs
):
"""根据配置文件构建模型,可选加载checkpoint权重
"""
configs = {}
if config_path is not None:
configs.update(json.load(open(config_path)))
configs.update(kwargs)
if 'max_position' not in configs:
configs['max_position'] = configs.get('max_position_embeddings', 512)
if 'dropout_rate' not in configs:
configs['dropout_rate'] = configs.get('hidden_dropout_prob')
if 'segment_vocab_size' not in configs:
configs['segment_vocab_size'] = configs.get('type_vocab_size', 2)
models = {
'bert': BERT,
'albert': ALBERT,
'albert_unshared': ALBERT_Unshared,
'roberta': BERT,
'nezha': NEZHA,
'electra': ELECTRA,
'gpt_openai': GPT_OpenAI,
'gpt2_ml': GPT2_ML,
't5': T5,
't5_encoder': T5_Encoder,
't5_decoder': T5_Decoder,
}
if is_string(model):
model = model.lower()
MODEL = models[model]
else:
MODEL = model
application = application.lower()
if application in ['lm', 'unilm'] and model in ['electra', 't5']:
raise ValueError(
'"%s" model can not be used as "%s" application.\n' %
(model, application)
)
if application == 'lm':
MODEL = extend_with_language_model(MODEL)
elif application == 'unilm':
MODEL = extend_with_unified_language_model(MODEL)
transformer = MODEL(**configs)
transformer.build(**configs)
if checkpoint_path is not None:
transformer.load_weights_from_checkpoint(checkpoint_path)
if return_keras_model:
return transformer.model
else:
return transformer
| [
"xichen_ghost@outlook.com"
] | xichen_ghost@outlook.com |
3dba9cc472654cbd43ec5366ccd01fa7bd6f03de | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/type_start_end_time.py | 9a4b3e1df4ef31fe8155d79f3f8f6c17d3a73f86 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,920 | py | from __future__ import annotations
from dataclasses import dataclass, field
__NAMESPACE__ = "http://www.travelport.com/schema/vehicle_v52_0"
@dataclass
class TypeStartEndTime:
"""
Used to specify earliest and latest pickup/dropoff times for a vehicle.
Parameters
----------
time
The time in 24 hour clock format.
requirement_passed
When true, the time requirement has been met.
mon
tue
wed
thu
fri
sat
sun
"""
class Meta:
name = "typeStartEndTime"
time: None | str = field(
default=None,
metadata={
"name": "Time",
"type": "Attribute",
"required": True,
}
)
requirement_passed: None | bool = field(
default=None,
metadata={
"name": "RequirementPassed",
"type": "Attribute",
}
)
mon: None | bool = field(
default=None,
metadata={
"name": "Mon",
"type": "Attribute",
}
)
tue: None | bool = field(
default=None,
metadata={
"name": "Tue",
"type": "Attribute",
}
)
wed: None | bool = field(
default=None,
metadata={
"name": "Wed",
"type": "Attribute",
}
)
thu: None | bool = field(
default=None,
metadata={
"name": "Thu",
"type": "Attribute",
}
)
fri: None | bool = field(
default=None,
metadata={
"name": "Fri",
"type": "Attribute",
}
)
sat: None | bool = field(
default=None,
metadata={
"name": "Sat",
"type": "Attribute",
}
)
sun: None | bool = field(
default=None,
metadata={
"name": "Sun",
"type": "Attribute",
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
40a852ce1416267d85f30763734bc0bd82b6157b | d71b7866fecab36e5116318fa671265b7471cc08 | /substrabac/substrapp/serializers/algo.py | e18f32498b0a5e979e4be4bef43cf98e72acd56b | [
"Apache-2.0"
] | permissive | sibeshkar/substrabac | dc00bf9a4190720391811dcdf6de2c91c880d760 | c8a7623cbc4454da0caff99311e415f647a9edab | refs/heads/master | 2020-03-23T05:02:46.054483 | 2018-06-28T07:32:30 | 2018-06-28T07:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from rest_framework import serializers
from substrapp.models import Algo
class AlgoSerializer(serializers.ModelSerializer):
class Meta:
model = Algo
fields = '__all__'
| [
"guillaumecisco@gmail.com"
] | guillaumecisco@gmail.com |
3fb4d4b5cef44e193280b2afc153c3aae446bfcc | d19469cce07cb05673d0d5269d6a84c555bc1eb8 | /IDS_Model-master/Extra_code/2.py | b5fda321018d3a3b3e5ea45bd77855e40cde2146 | [] | no_license | s3r-be/test_files | 4c438731812e1ae0238dd9ea2cad9e55c7d79404 | ac415175531034c0ee900f5fbbff906121e821ec | refs/heads/master | 2022-12-12T21:59:29.486073 | 2020-06-22T10:42:42 | 2020-06-22T10:42:42 | 239,291,484 | 0 | 0 | null | 2022-12-08T06:24:34 | 2020-02-09T11:16:31 | Python | UTF-8 | Python | false | false | 1,067 | py | import pandas as pd
import time
start=time.time()
df=pd.read_csv('first_dataset_3_attacks.csv', sep=',')
l=list(df["Info"])
df=df.drop(axis=1,columns="Info")
# print (df.columns)
s=[]
num=[]
for i in range(0,len(l)):
if (l[i].startswith("GET")):
#print("Hh")
a = l[i].split("=")
b = (a[1].split(" "))
num.append(b[0])
#print(b[0])
elif(l[i].startswith("Echo")):
num.append("ping")
else:
df=df.drop(i)
i+=1
print (len(num))
normality=[]
for i in range(0,len(num)):
if num[i].replace(".","",1).isdigit():
if float(num[i]) < 2:
normality.append(1) # 1 - wrong setup
else:
normality.append(0) # normal
else:
if num[i]=="ping":
normality.append(2) # ddos
else:
normality.append(3) #data type probing
num[i]=-1
df["Value"]=num
df["normality"]=normality
df.to_csv('c_dataset.csv',index=False)
print (time.time()-start) | [
"rhish9h@gmail.com"
] | rhish9h@gmail.com |
ea67b257d486b0630025a0e3b1ae137a45ba25a4 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-3533.py | 9879255d0fdf486ca055ce49640b6af70af43a61 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,754 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for $ID in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
b55a5033285fe85e350014f77edb19070e901038 | 947acace352c4b2e719e94600f7447d1382adfe2 | /env/Scripts/painter.py | 3b388efab78cf5cdae0a57d979b2692a59b44692 | [] | no_license | skconan/autoPlayAtariBreakout | ac9de1ef3342e81b57519fe588eb88e9bb6c6695 | 3a7167f31d810951b099c30bfceed0da6dcdf12f | refs/heads/master | 2022-12-21T15:21:44.552250 | 2017-10-10T08:54:18 | 2017-10-10T08:54:18 | 106,190,698 | 2 | 2 | null | 2022-12-11T06:28:01 | 2017-10-08T16:18:04 | Python | UTF-8 | Python | false | false | 2,215 | py | #!c:\users\skconan\desktop\พี่สอนน้อง\env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk
#
# painter widget
class PaintCanvas(tkinter.Canvas):
def __init__(self, master, image):
tkinter.Canvas.__init__(self, master,
width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=tkinter.NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"supakit.kr@gmail.com"
] | supakit.kr@gmail.com |
66a7397ff8de2cf95bd95e3e9c8d7c740854c517 | d59521ab1a25df17f8b945847e1bc51745ff29c9 | /data/01_DataCleaning.py | 29b1af4982599419c08fb3b7af0baeb08dc6f923 | [] | no_license | geneveeves/foreign-aid-analysis | 45d0b40b34d9209edb4c6b3030b3e63b19c2cc98 | 94c7adb86e57cf3d4b49322553c58bd72d98d485 | refs/heads/master | 2020-12-29T10:26:29.331410 | 2020-03-14T00:42:37 | 2020-03-14T00:42:37 | 238,573,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | import numpy as np
import pandas as pd
#Data sets to clean:
# 1. GINI Index (World Bank)
# 2. Internally Displaced Persons (World Bank)
# 3. Total Population (World Bank)
# 4. Gender Parity Index - School Enrollment (World Bank)
# US Foreign Aid Spending (USAID)
# Target Data:
# Governance Indicators
#Function to clean & pickle first 4 datasets
def WorldBankData(filename, varname):
years = [str(year) for year in range(2002, 2019)]
df = pd.read_csv(filename, skiprows=[0,2])
return df[['Country Name','Country Code'] + years].to_pickle(
'./interim/' + varname + '.pkl')
WorldBankData('./raw/GINI.csv', 'gini')
WorldBankData('./raw/InternallyDisplacedPersons.csv', 'displaced')
WorldBankData('./raw/Population.csv', 'population')
WorldBankData('./raw/SchoolEnrollment_GenderParityIndex.csv', 'gpi_school') | [
"gmmcguire2@gmail.com"
] | gmmcguire2@gmail.com |
0738fce7d88f73a4db36cdadc81d56e408103033 | 370d1cef798460dc6d0682d78b3b619acdb24506 | /HER/rcnn/large_dataset.py | 53b5ba3419fb234043ce03ec4f9baa4263510cb8 | [] | no_license | xuexidi/ActiveVisionManipulation | 9d3c83c2db5225bc0ca17c0ca77370affd3af132 | 3ec06f7bce2a35c56674d11302ba36c30baa5b08 | refs/heads/master | 2023-04-01T21:21:02.813851 | 2019-05-23T19:11:01 | 2019-05-23T19:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,478 | py | import sys
if ('HER/examples/run.py' not in sys.argv[0]) and ('HER/examples/visualize.py' not in sys.argv[0]):
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import visualize
except:
print ('import failed')
from Mask_RCNN.utils import Dataset
import numpy as np
import sys
sys.path.append('Mask_RCNN')
from time import time
import pickle
import os
from scipy.misc import imread, imsave
#things to check
#1. need to preprocess/normalize image/mask input?
NUMTHREADS=8
DATADIR = 'ap-v3_data'
#a hack to get around not being able to use tkinter
def make_save(name):
plt.show = lambda: (plt.savefig(name, bbox_inches = 'tight', dpi = 300), plt.clf())
class Paths(object):
def __init__(self, datadir):
self.datadir = datadir
if not os.path.exists(datadir):
os.mkdir(datadir)
self.maskdir = os.path.join(datadir, 'mask')
if not os.path.exists(self.maskdir):
os.mkdir(self.maskdir)
self.imgdir = os.path.join(datadir, 'img')
if not os.path.exists(self.imgdir):
os.mkdir(self.imgdir)
def mask_fn_for_id(self, i):
return os.path.join(self.maskdir, '%d.npz' % i)
def img_fn_for_id(self, i):
return os.path.join(self.imgdir, '%d.png' % i)
def save_img_to_id(self, i, img):
pth = self.img_fn_for_id(i)
imsave(pth, img)
def load_img_from_id(self, i):
pth = self.img_fn_for_id(i)
return imread(pth)
def save_mask_to_id(self, i, mask):
pth = self.mask_fn_for_id(i)
np.savez_compressed(pth, mask)
def load_mask_from_id(self, i):
pth = self.mask_fn_for_id(i)
return np.load(pth)['arr_0']
class MujocoData(Dataset, Paths):
def __init__(self, datadir='data'):
Dataset.__init__(self)
Paths.__init__(self, datadir)
def load_mujoco(self, start_id = 0, end_id = 1000):
self.add_class("mujoco", 1, "box")
for i in range(start_id, end_id):
self.add_image("mujoco", i, None)
def load_image(self, i):
return self.load_img_from_id(i)
def load_mask(self, i):
mask = self.load_mask_from_id(i)
num_objs = mask.shape[-1]
cls = cls = np.array([1]*num_objs).astype(np.int32)
return mask, cls
class GenerateData(Paths):
def __init__(self, renderer, datadir):
self.renderer = renderer
super().__init__(datadir)
def generate(self, count=1000, tid = None):
print ('tid is', tid)
iterator = (range(count)
if tid is None
else range(tid, count, NUMTHREADS))
for i in iterator:
print("generating image %d" %i)
img, masks = self._generate()
self.save_mask_to_id(i, masks)
self.save_img_to_id(i, img)
def _generate(self):
self.renderer.reset()
self.renderer.rand_state()
img = self.renderer.render_rgb()
box = self.renderer.render_box()
box_modal = self.renderer.render_box(override_amodal = False)
THRESHOLD = 50 #number of pixels visible
exists_box = np.sum(box) > THRESHOLD
#modality
RATIO_THRESHOLD = 0.1
exists_box = exists_box and (float(np.sum(box_modal)) / np.sum(box) > RATIO_THRESHOLD)
cls = []
masks = []
if exists_box:
masks.append(box)
cls.append(1)
else:
print('warning: no box found')
if exists_box:
masks = np.stack(masks, axis = 2)
else:
h, w, _ = img.shape
masks = np.zeros((h, w, 0)).astype(np.float32)
#cls = np.array(cls).astype(np.int32)
return img, masks#, cls
def visualize_data(data):
image_ids = data.image_ids[:10]
for image_id in image_ids:
image = data.load_image(image_id)
mask, class_ids = data.load_mask(image_id)
#print(class_ids)
make_save("debug/%s" % image_id)
visualize.display_top_masks(image, mask, class_ids, data.class_names, limit = 1)
def get_train_set():
datadir=DATADIR
data = MujocoData(datadir)
data.load_mujoco(0, 8000)
data.prepare()
return data
def get_val_set():
datadir=DATADIR
data = MujocoData(datadir)
data.load_mujoco(8000, 10000)
#data.load_mujoco(0, 50)
data.prepare()
return data
def generate_wrapper(tid = None):
import HER.envs
import gym
env = gym.make('active_pusher-v3')
from renderer import Renderer
renderer = Renderer(env)
generator = GenerateData(renderer, datadir)
generator.generate(count, tid)
if __name__ == '__main__':
do_generate = True
do_vis = False
count = 10000
datadir=DATADIR
if do_generate:
if False:
generate_wrapper()
else:
from multiprocessing import Process
processes = []
for j in range(NUMTHREADS):
process = Process(target = lambda: generate_wrapper(j))
process.start()
processes.append(process)
for process in processes:
process.join()
if do_vis:
data = MujocoData(datadir)
data.load_mujoco(0, count)
data.prepare()
visualize_data(data)
| [
"ricsonc@andrew.cmu.edu"
] | ricsonc@andrew.cmu.edu |
751bdf6f60dff41304f0776cb5d494154868f8d8 | e8f5940c3ebf36dcbe8afa6f2c84b6ee13059fcf | /007-dictionaries.py | feb48935b3261065e0178c5e57f8d86365f863a6 | [] | no_license | thilinasandaru1/python-fundamentals | 26c4071a0a00bbe116766d908cfd9ecda5483993 | c28077520d249684dee356b4a554306a82d04247 | refs/heads/master | 2021-05-27T07:50:40.711658 | 2020-04-11T02:17:10 | 2020-04-11T02:17:10 | 254,238,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # A dictionary is a collection which is unordered, changeable and indexed
# No duplicate members.
# create dictionary
person = {
'first_name': 'John',
'last_name': 'Doe',
'age': 30
}
# create dictionary using a constructor
person2 = dict(
first_name='John',
last_name='Doe',
age=30
)
# get value
print(person['first_name'])
print(person.get('last_name'))
# add key/value
person['phone'] = '555-5555'
# get keys
print(person.keys())
# get items
print(person.items())
# make a copy
person3 = person.copy()
# remove item
del(person['age'])
person.pop('phone')
# clear whole dictionary
person.clear()
# get length
print(len(person3))
# list of dictionary
people = [
{'name': 'John', 'age': 30},
{'name': 'Ross', 'age': 27},
{'name': 'Monica', 'age': 28}
]
print(people[1]['name']) | [
"thilinasandaru1@gmail.com"
] | thilinasandaru1@gmail.com |
0309e59b8905481fd8c3f3b9a0f8e0f48aa72140 | 514eb959e0e1118f0ebb3709f74811cd5d603a4f | /scores.py | c2c1588c9ee6715b780a49370f348a889b0bf8f9 | [] | no_license | llr489/lesson2 | 68daa950ba001cdd8510b37dded7e864a0a36f33 | ad42d400d84f7a9bbd50bd231841ad25a12abf8d | refs/heads/master | 2020-04-26T14:46:11.047985 | 2019-03-05T20:25:29 | 2019-03-05T20:25:29 | 173,625,473 | 0 | 0 | null | 2019-03-06T15:00:29 | 2019-03-03T20:22:07 | Python | UTF-8 | Python | false | false | 925 | py | students_scores = [
{'school_class': '4a', 'scores': [3, 4, 4, 5, 2]},
{'school_class': '4b', 'scores': [5, 3, 4, 4, 1, 2]},
{'school_class': '5a', 'scores': [4, 5, 5, 3]},
{'school_class': '5b', 'scores': [4, 4, 3, 5, 3, 2, 4, 4, 3]}
]
def count_average(students_scores):
sum_school = 0
school_counter = 0
for i_class in students_scores:
sum_class = 0
for score in i_class['scores']:
sum_class += score
sum_school += score
school_counter += len(i_class['scores'])
average_class = sum_class / len(i_class['scores'])
print(f"Средняя оценка по классу {i_class['school_class']}: {average_class}")
average_school = sum_school/school_counter
print(f"Средняя оценка по школе: {average_school}")
count_average(students_scores)
| [
"noreply@github.com"
] | noreply@github.com |
2a1493835df66ac6521310eadc4cd9c73ea5cef8 | 955c2caeb13ac0b493d0ef147c44a0d76b5ede28 | /27_52/36_mutable_string/mutablestring.py | 9982257483f5f200ce65f0b5ea7670be31344f84 | [] | no_license | ryanh153/Morsels | dcb079cafec63d895160a028fffdb799112cf364 | 087d8f4e548e31223acb39862b7491b819542320 | refs/heads/master | 2022-05-27T22:45:02.492518 | 2022-03-23T22:39:22 | 2022-03-23T22:39:22 | 214,299,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from collections import UserString
class MutableString(UserString):
def __setitem__(self, index, value):
lst = [c for c in self.data]
lst[index] = value
self.data = "".join(lst)
def __delitem__(self, index):
lst = [c for c in self.data]
del lst[index]
self.data = "".join(lst)
def append(self, value):
self.data = self.data + value
def insert(self, index, value):
lst = [c for c in self.data]
lst.insert(index, value)
self.data = "".join(lst)
def pop(self, index=-1):
popped = self.data[index]
lst = [c for c in self.data]
del lst[index]
self.data = "".join(lst)
return MutableString(popped)
| [
"rhorton@scitec.com"
] | rhorton@scitec.com |
cd8d85c2591edd6b143fb0be9a9eb0c296fb06e0 | 82410399e57cbfafc03564b6a6ad58b50ace34f3 | /edit/migrations/0003_auto_20210215_0835.py | 39a66e454a46a06d2a9a5fd6eca7cccf4a11fad3 | [] | no_license | sw-baek/Cafedb_modification_web_project | 87042b821eba234e27afff5078f10f862b6e87c6 | c47473ff0d41789f61d1be34c0f62220a52d4180 | refs/heads/master | 2023-03-08T12:06:41.500078 | 2021-02-19T06:26:35 | 2021-02-19T06:26:35 | 340,264,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 2.2.5 on 2021-02-14 23:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edit', '0002_remove_shopinfo_recent_update'),
]
operations = [
migrations.AlterField(
model_name='shopinfo',
name='address',
field=models.CharField(max_length=300, verbose_name='위치'),
),
migrations.AlterField(
model_name='shopinfo',
name='direct_number',
field=models.CharField(max_length=200, verbose_name='전화번호'),
),
migrations.AlterField(
model_name='shopinfo',
name='opening_hours',
field=models.CharField(max_length=200, verbose_name='영업시간'),
),
migrations.AlterField(
model_name='shopinfo',
name='shop_name',
field=models.CharField(max_length=200, verbose_name='가게이름'),
),
]
| [
"tight729@naver.com"
] | tight729@naver.com |
d3f7e5a38010e610526dfe18104e43a8f58375e6 | c4ecc70400f3c4375dd4b2335673137dd36b72b4 | /venv/lib/python3.6/site-packages/xero_python/accounting/models/contact_groups.py | 44c5b00b3e94d4523d3baf225c292a9d849de367 | [
"MIT"
] | permissive | TippyFlitsUK/FarmXero | 1bb3496d164d66c940bd3012e36e1763990ff30d | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | refs/heads/main | 2023-07-05T14:49:57.186130 | 2021-08-19T19:33:48 | 2021-08-19T19:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,766 | py | # coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ContactGroups(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"contact_groups": "list[ContactGroup]"}
attribute_map = {"contact_groups": "ContactGroups"}
def __init__(self, contact_groups=None): # noqa: E501
"""ContactGroups - a model defined in OpenAPI""" # noqa: E501
self._contact_groups = None
self.discriminator = None
if contact_groups is not None:
self.contact_groups = contact_groups
@property
def contact_groups(self):
"""Gets the contact_groups of this ContactGroups. # noqa: E501
:return: The contact_groups of this ContactGroups. # noqa: E501
:rtype: list[ContactGroup]
"""
return self._contact_groups
@contact_groups.setter
def contact_groups(self, contact_groups):
"""Sets the contact_groups of this ContactGroups.
:param contact_groups: The contact_groups of this ContactGroups. # noqa: E501
:type: list[ContactGroup]
"""
self._contact_groups = contact_groups
| [
"ben.norquay@gmail.com"
] | ben.norquay@gmail.com |
3dcc07b5fde5a9285464010165fea7791cbd6733 | 2cdca093365c498cd11f7f03385e9c9310a80113 | /reltest/kernel.py | 99fb677cf1078662bf3165d54babc6868d60fc9c | [
"MIT"
] | permissive | jenninglim/model-comparison-test | 3565a98253e05950d1dc55c39e0b8480c12b6166 | 0024d1ff76ef71a25610b368cc364a59bc672961 | refs/heads/master | 2020-07-20T21:59:02.231673 | 2020-01-08T18:13:28 | 2020-01-08T18:13:28 | 206,715,600 | 5 | 2 | MIT | 2020-01-08T18:13:29 | 2019-09-06T04:54:06 | Jupyter Notebook | UTF-8 | Python | false | false | 22,154 | py | """Module containing kernel related classes"""
from __future__ import division
from builtins import str
from past.utils import old_div
from builtins import object
from future.utils import with_metaclass
__author__ = 'wittawat'
from abc import ABCMeta, abstractmethod
import autograd
import autograd.numpy as np
#import numpy as np
import kgof.config as config
import kgof.util as util
class Kernel(with_metaclass(ABCMeta, object)):
"""Abstract class for kernels. Inputs to all methods are numpy arrays."""
@abstractmethod
def eval(self, X, Y):
"""
Evaluate the kernel on data X and Y
X: nx x d where each row represents one point
Y: ny x d
return nx x ny Gram matrix
"""
pass
@abstractmethod
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
X: n x d where each row represents one point
Y: n x d
return a 1d numpy array of length n.
"""
pass
class KSTKernel(with_metaclass(ABCMeta, Kernel)):
"""
Interface specifiying methods a kernel has to implement to be used with
the Kernelized Stein discrepancy test of Chwialkowski et al., 2016 and
Liu et al., 2016 (ICML 2016 papers) See goftest.KernelSteinTest.
"""
@abstractmethod
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
raise NotImplementedError()
@abstractmethod
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
raise NotImplementedError()
@abstractmethod
def gradXY_sum(self, X, Y):
"""
Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
raise NotImplementedError()
# end KSTKernel
class LinearKSTKernel(with_metaclass(ABCMeta, Kernel)):
"""
Interface specifiying methods a kernel has to implement to be used with
the linear-time version of Kernelized Stein discrepancy test of
Liu et al., 2016 (ICML 2016).
"""
@abstractmethod
def pair_gradX_Y(self, X, Y):
"""
Compute the gradient with respect to X in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
raise NotImplementedError()
@abstractmethod
def pair_gradY_X(self, X, Y):
"""
Compute the gradient with respect to Y in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
raise NotImplementedError()
@abstractmethod
def pair_gradXY_sum(self, X, Y):
"""
Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: n x d numpy array.
Y: n x d numpy array.
Return a one-dimensional length-n numpy array of the derivatives.
"""
raise NotImplementedError()
class DifferentiableKernel(with_metaclass(ABCMeta, Kernel)):
def gradX_y(self, X, y):
"""
Compute the gradient with respect to X (the first argument of the
kernel). Base class provides a default autograd implementation for convenience.
Subclasses should override if this does not work.
X: nx x d numpy array.
y: numpy array of length d.
Return a numpy array G of size nx x d, the derivative of k(X, y) with
respect to X.
"""
yrow = np.reshape(y, (1, -1))
f = lambda X: self.eval(X, yrow)
g = autograd.elementwise_grad(f)
G = g(X)
assert G.shape[0] == X.shape[0]
assert G.shape[1] == X.shape[1]
return G
# end class KSTKernel
class KDiagGauss(Kernel):
"""
A Gaussian kernel with diagonal covariance structure i.e., one Gaussian
width for each dimension.
"""
def __init__(self, sigma2s):
"""
sigma2s: a one-dimensional array of length d containing one width
squared for each of the d dimensions.
"""
self.sigma2s = sigma2s
def eval(self, X, Y):
"""
Equivalent to dividing each dimension with the corresponding width (not
width^2) and using the standard Gaussian kernel.
"""
sigma2s = self.sigma2s
Xs = old_div(X,np.sqrt(sigma2s))
Ys = old_div(Y,np.sqrt(sigma2s))
k = KGauss(1.0)
return k.eval(Xs, Ys)
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ..."""
sigma2s = self.sigma2s
Xs = old_div(X,np.sqrt(sigma2s))
Ys = old_div(Y,np.sqrt(sigma2s))
k = KGauss(1.0)
return k.pair_eval(Xs, Ys)
# end class KDiagGauss
class KIMQ(DifferentiableKernel, KSTKernel):
"""
The inverse multiquadric (IMQ) kernel studied in
Measure Sample Quality with Kernels
Jackson Gorham, Lester Mackey
k(x,y) = (c^2 + ||x-y||^2)^b
where c > 0 and b < 0. Following a theorem in the paper, this kernel is
convergence-determining only when -1 < b < 0. In the experiments,
the paper sets b = -1/2 and c = 1.
"""
def __init__(self, b=-0.5, c=1.0):
if not b < 0:
raise ValueError('b has to be negative. Was {}'.format(b))
if not c > 0:
raise ValueError('c has to be positive. Was {}'.format(c))
self.b = b
self.c = c
def eval(self, X, Y):
"""Evalute the kernel on data X and Y """
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
K = (c**2 + D2)**b
return K
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
"""
assert X.shape[0] == Y.shape[0]
b = self.b
c = self.c
return (c**2 + np.sum((X-Y)**2, 1))**b
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
D2 = util.dist2_matrix(X, Y)
# 1d array of length nx
Xi = X[:, dim]
# 1d array of length ny
Yi = Y[:, dim]
# nx x ny
dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]
b = self.b
c = self.c
Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
assert Gdim.shape[0] == X.shape[0]
assert Gdim.shape[1] == Y.shape[0]
return Gdim
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def gradXY_sum(self, X, Y):
"""
Compute
\sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
b = self.b
c = self.c
D2 = util.dist2_matrix(X, Y)
# d = input dimension
d = X.shape[1]
c2D2 = c**2 + D2
T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
T2 = -2.0*b*d*c2D2**(b-1)
return T1 + T2
# end class KIMQ
class KGauss(DifferentiableKernel, KSTKernel, LinearKSTKernel):
"""
The standard isotropic Gaussian kernel.
Parameterization is the same as in the density of the standard normal
distribution. sigma2 is analogous to the variance.
"""
def __init__(self, sigma2):
assert sigma2 > 0, 'sigma2 must be > 0. Was %s'%str(sigma2)
self.sigma2 = sigma2
def eval(self, X, Y):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X : n1 x d numpy array
Y : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
#(n1, d1) = X.shape
#(n2, d2) = Y.shape
#assert d1==d2, 'Dimensions of the two inputs must be the same'
sumx2 = np.reshape(np.sum(X**2, 1), (-1, 1))
sumy2 = np.reshape(np.sum(Y**2, 1), (1, -1))
D2 = sumx2 - 2*np.dot(X, Y.T) + sumy2
K = np.exp(old_div(-D2,(2.0*self.sigma2)))
return K
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
sigma2 = self.sigma2
K = self.eval(X, Y)
Diff = X[:, [dim]] - Y[:, [dim]].T
#Diff = np.reshape(X[:, dim], (-1, 1)) - np.reshape(Y[:, dim], (1, -1))
G = -K*Diff/sigma2
return G
def pair_gradX_Y(self, X, Y):
"""
Compute the gradient with respect to X in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
sigma2 = self.sigma2
Kvec = self.pair_eval(X, Y)
# n x d
Diff = X - Y
G = -Kvec[:, np.newaxis]*Diff/sigma2
return G
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def pair_gradY_X(self, X, Y):
"""
Compute the gradient with respect to Y in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
return -self.pair_gradX_Y(X, Y)
def gradXY_sum(self, X, Y):
r"""
Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert d1==d2, 'Dimensions of the two inputs must be the same'
d = d1
sigma2 = self.sigma2
D2 = np.sum(X**2, 1)[:, np.newaxis] - 2*np.dot(X, Y.T) + np.sum(Y**2, 1)
K = np.exp(old_div(-D2,(2.0*sigma2)))
G = K/sigma2*(d - old_div(D2,sigma2))
return G
def pair_gradXY_sum(self, X, Y):
"""
Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: n x d numpy array.
Y: n x d numpy array.
Return a one-dimensional length-n numpy array of the derivatives.
"""
d = X.shape[1]
sigma2 = self.sigma2
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(old_div(-D2,(2.0*self.sigma2)))
G = Kvec/sigma2*(d - old_div(D2,sigma2))
return G
def pair_eval(self, X, Y):
"""
Evaluate k(x1, y1), k(x2, y2), ...
Parameters
----------
X, Y : n x d numpy array
Return
-------
a numpy array with length n
"""
(n1, d1) = X.shape
(n2, d2) = Y.shape
assert n1==n2, 'Two inputs must have the same number of instances'
assert d1==d2, 'Two inputs must have the same dimension'
D2 = np.sum( (X-Y)**2, 1)
Kvec = np.exp(old_div(-D2,(2.0*self.sigma2)))
return Kvec
def __str__(self):
return "KGauss(%.3f)"%self.sigma2
class KMixGauss(DifferentiableKernel, KSTKernel):
def __init__(self, sigma2s, wts=None):
"""
Mixture of isotropic Gaussian kernels:
sum wts[i] * exp(- ||x - y||^2 / (2 * sigma2s[i]))
sigma2s: a list/array of squared bandwidths
wts: a list/array of weights. Defaults to equal weights summing to 1.
"""
self.sigma2s = sigma2s = np.asarray(sigma2s)
assert len(sigma2s) > 0
if wts is None:
self.wts = wts = np.full(len(sigma2s), 1/len(sigma2s))
else:
self.wts = wts = np.asarray(wts)
assert len(wts) == len(sigma2s)
assert all(w >= 0 for w in wts)
def eval(self, X, Y):
"""
Evaluate the kernel on data X and Y
X: nx x d where each row represents one point
Y: ny x d
return nx x ny Gram matrix
"""
sumx2 = np.sum(X**2, axis=1)[:, np.newaxis]
sumy2 = np.sum(Y**2, axis=1)[np.newaxis, :]
D2 = sumx2 - 2 * np.dot(X, Y.T) + sumy2
return np.tensordot(
self.wts,
np.exp(
D2[np.newaxis, :, :]
/ (-2 * self.sigma2s[:, np.newaxis, np.newaxis])),
1)
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
X: n x d where each row represents one point
Y: n x d
return a 1d numpy array of length n.
"""
n1, d1 = X.shape
n2, d2 = Y.shape
assert n1 == n2, 'Two inputs must have the same number of instances'
assert d1 == d2, 'Two inputs must have the same dimension'
D2 = np.sum((X - Y)**2, axis=1)
return np.tensordot(
self.wts,
np.exp(D2[np.newaxis, :] / (-2 * self.sigma2s[:, np.newaxis])),
1)
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
diffs = -X[:, [dim]] + Y[:, [dim]].T
exps = np.exp(diffs[np.newaxis, :, :] ** 2
/ (-2 * self.sigma2s[:, np.newaxis, np.newaxis]))
return np.einsum('w,wij,ij->ij', self.wts / self.sigma2s, exps, diffs)
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return -self.gradX_Y(X, Y, dim)
def gradXY_sum(self, X, Y):
r"""
Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
d = X.shape[1]
sumx2 = np.sum(X**2, axis=1)[:, np.newaxis]
sumy2 = np.sum(Y**2, axis=1)[np.newaxis, :]
D2 = sumx2 - 2 * np.dot(X, Y.T) + sumy2
s = (D2[np.newaxis, :, :] / self.sigma2s[:, np.newaxis, np.newaxis])
return np.einsum('w,wij,wij->ij',
self.wts / self.sigma2s, np.exp(s / -2), d - s)
class KPoly(DifferentiableKernel, KSTKernel):
def __init__(self, degree=3, gamma=None, coef0=1):
"""
Polynomial kernel
(gamma X^T Y + coef0)^degree
degree: default 3
gamma: default 1/dim
coef0: float, default 1
"""
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
if degree <= 0:
raise ValueError("KPoly needs positive degree")
if not np.allclose(degree, int(degree)):
raise ValueError("KPoly needs integral degree")
def eval(self, X, Y):
"""
Evaluate the kernel on data X and Y
X: nx x d where each row represents one point
Y: ny x d
return nx x ny Gram matrix
"""
dot = np.dot(X, Y.T)
gamma = 1/X.shape[1] if self.gamma is None else self.gamma
return (gamma * dot + self.coef0) ** self.degree
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
X: n x d where each row represents one point
Y: n x d
return a 1d numpy array of length n.
"""
n1, d1 = X.shape
n2, d2 = Y.shape
assert n1 == n2, 'Two inputs must have the same number of instances'
assert d1 == d2, 'Two inputs must have the same dimension'
dot = np.einsum('id,id->i', X, Y)
gamma = 1/d1 if self.gamma is None else self.gamma
return (gamma * dot + self.coef0) ** self.degree
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
gamma = 1/X.shape[1] if self.gamma is None else self.gamma
if self.degree == 1: # optimization, other expression is valid too
out = gamma * Y[np.newaxis, :, dim] # 1 x ny
return np.repeat(out, X.shape[0], axis=0)
dot = np.dot(X, Y.T)
return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
* gamma * Y[np.newaxis, :, dim])
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
gamma = 1/X.shape[1] if self.gamma is None else self.gamma
if self.degree == 1: # optimization, other expression is valid too
out = gamma * X[:, dim, np.newaxis] # nx x 1
return np.repeat(out, Y.shape[0], axis=1)
dot = np.dot(X, Y.T)
return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
* gamma * X[:, dim, np.newaxis])
def gradXY_sum(self, X, Y):
r"""
Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
gamma = 1/X.shape[1] if self.gamma is None else self.gamma
if self.degree == 1: # optimization, other expression is valid too
return np.tile(gamma, (X.shape[0], X.shape[1]))
dot = np.dot(X, Y.T)
inside = gamma * dot + self.coef0
to_dminus2 = inside ** (self.degree - 2)
to_dminus1 = to_dminus2 * inside
return (
(self.degree * (self.degree-1) * gamma**2) * to_dminus2 * dot
+ (X.shape[1] * gamma * self.degree) * to_dminus1
)
class KMixture(KSTKernel, LinearKSTKernel, DifferentiableKernel):
def __init__(self, ks, wts=None):
self.ks = ks
if wts is None:
self.wts = np.full(len(ks), 1/len(ks))
else:
self.wts = np.asarray(wts)
def eval(self, X, Y):
"""
Evaluate the kernel on data X and Y
X: nx x d where each row represents one point
Y: ny x d
return nx x ny Gram matrix
"""
return sum(w * k.eval(X, Y) for w, k in zip(self.wts, self.ks))
def pair_eval(self, X, Y):
"""Evaluate k(x1, y1), k(x2, y2), ...
X: n x d where each row represents one point
Y: n x d
return a 1d numpy array of length n.
"""
return sum(w * k.pair_eval(X, Y) for w, k in zip(self.wts, self.ks))
def gradX_Y(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of X in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return sum(w * k.gradX_Y(X, Y, dim) for w, k in zip(self.wts, self.ks))
def gradY_X(self, X, Y, dim):
"""
Compute the gradient with respect to the dimension dim of Y in k(X, Y).
X: nx x d
Y: ny x d
Return a numpy array of size nx x ny.
"""
return sum(w * k.gradY_X(X, Y, dim) for w, k in zip(self.wts, self.ks))
def gradXY_sum(self, X, Y):
r"""
Compute \sum_{i=1}^d \frac{\partial^2 k(x, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: nx x d numpy array.
Y: ny x d numpy array.
Return a nx x ny numpy array of the derivatives.
"""
return sum(w * k.gradXY_sum(X, Y) for w, k in zip(self.wts, self.ks))
def pair_gradX_Y(self, X, Y):
"""
Compute the gradient with respect to X in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
return sum(w * k.pair_gradX_Y(X, Y) for w, k in zip(self.wts, self.ks))
def pair_gradY_X(self, X, Y):
"""
Compute the gradient with respect to Y in k(X, Y), evaluated at the
specified X and Y.
X: n x d
Y: n x d
Return a numpy array of size n x d
"""
return sum(w * k.pair_gradY_X(X, Y) for w, k in zip(self.wts, self.ks))
def pair_gradXY_sum(self, X, Y):
r"""
Compute \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
evaluated at each x_i in X, and y_i in Y.
X: n x d numpy array.
Y: n x d numpy array.
Return a one-dimensional length-n numpy array of the derivatives.
"""
return sum(w * k.pair_gradXY_sum(X, Y) for w, k in zip(self.wts, self.ks))
def gradX_y(self, X, y):
"""
Compute the gradient with respect to X (the first argument of the
kernel). Base class provides a default autograd implementation for convenience.
Subclasses should override if this does not work.
X: nx x d numpy array.
y: numpy array of length d.
Return a numpy array G of size nx x d, the derivative of k(X, y) with
respect to X.
"""
return sum(w * k.gradX_y(X, y) for w, k in zip(self.wts, self.ks))
| [
"noreply@github.com"
] | noreply@github.com |
7b50233f4eb7c169e4d344cce9f00d54f79501c2 | 345cd996d2de63d9ab36dce7516ba1c5674248c0 | /eval.py | 63dffc8c1c7d17b00795cee1de75e4e6ec532a72 | [
"BSD-2-Clause"
] | permissive | Ping-C/certifiedpatchdefense | a66c1d673094417eda2abd389d52d6598a67c882 | f1dbb7e399c320413c17e1412d2fb0ee0d6c812a | refs/heads/master | 2021-01-04T03:58:40.349977 | 2020-09-18T14:50:10 | 2020-09-18T14:50:10 | 240,373,545 | 34 | 5 | null | null | null | null | UTF-8 | Python | false | false | 3,360 | py | ## Copyright (C) 2019, Huan Zhang <huan@huan-zhang.com>
## Hongge Chen <chenhg@mit.edu>
## Chaowei Xiao <xiaocw@umich.edu>
##
## This program is licenced under the BSD 2-Clause License,
## contained in the LICENCE file in this directory.
##
import sys
import copy
import torch
from torch.nn import Sequential, Linear, ReLU, CrossEntropyLoss
import numpy as np
from datasets import loaders
from model_defs import Flatten, model_mlp_any, model_cnn_1layer, model_cnn_2layer, model_cnn_4layer, model_cnn_3layer
from bound_layers import BoundSequential
import torch.optim as optim
# from gpu_profile import gpu_profile
import time
from datetime import datetime
from config import load_config, get_path, config_modelloader, config_dataloader
from argparser import argparser
from train import Train, Logger
# sys.settrace(gpu_profile)
def main(args):
config = load_config(args)
global_eval_config = config["eval_params"]
models, model_names = config_modelloader(config, load_pretrain = True)
converted_models = [BoundSequential.convert(model) for model in models]
robust_errs = []
errs = []
for model, model_id, model_config in zip(converted_models, model_names, config["models"]):
model = model.cuda()
# make a copy of global training config, and update per-model config
eval_config = copy.deepcopy(global_eval_config)
if "eval_params" in model_config:
eval_config.update(model_config["eval_params"])
# read training parameters from config file
method = eval_config["method"]
verbose = eval_config["verbose"]
eps = eval_config["epsilon"]
# parameters specific to a training method
method_param = eval_config["method_params"]
norm = float(eval_config["norm"])
train_data, test_data = config_dataloader(config, **eval_config["loader_params"])
model_name = get_path(config, model_id, "model", load = False)
print(model_name)
model_log = get_path(config, model_id, "eval_log")
logger = Logger(open(model_log, "w"))
logger.log("evaluation configurations:", eval_config)
logger.log("Evaluating...")
# evaluate
robust_err, err = Train(model, model_id, 0, test_data, eps, eps, eps, norm, logger, verbose, False, None, method, **method_param)
robust_errs.append(robust_err)
errs.append(err)
print('model robust errors (for robustly trained models, not valid for naturally trained models):')
print(robust_errs)
robust_errs = np.array(robust_errs)
print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(robust_errs), np.max(robust_errs), np.median(robust_errs), np.mean(robust_errs)))
print('clean errors for models with min, max and median robust errors')
i_min = np.argmin(robust_errs)
i_max = np.argmax(robust_errs)
i_median = np.argsort(robust_errs)[len(robust_errs) // 2]
print('for min: {:.4f}, for max: {:.4f}, for median: {:.4f}'.format(errs[i_min], errs[i_max], errs[i_median]))
print('model clean errors:')
print(errs)
print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(errs), np.max(errs), np.median(errs), np.mean(errs)))
if __name__ == "__main__":
args = argparser()
main(args)
| [
"pchiang@umd.edu"
] | pchiang@umd.edu |
9f0bed1563c3f15bd8460ed295a3e02f73417726 | 32004989025697aec4d7097e65213fbdefaf1cde | /models/preprocess.py | a8e84602dd28604b2a3ed43f2d2cee51c1091588 | [] | no_license | jianhongwu/Kaggle-Quora-Insincere-Questions-Classification | 7552c66b48d29a8cbfd9e684a6696b7a79df3ac3 | 0d58ea0f435047953a651c53f44a6e7afaaeba87 | refs/heads/master | 2020-04-30T00:59:02.091460 | 2019-03-19T13:32:56 | 2019-03-19T13:32:56 | 176,516,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,540 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 21:00:56 2019
@author: WJH
"""
import pandas as pd
import numpy as np
import re
import time
import gc
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from cfg import *
from sklearn import metrics
#%%
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have",
"couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not",
"hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is",
"how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would",
"I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have",
"i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am",
"i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will",
"it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not",
"might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have",
"mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have",
"o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not",
"sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have",
"she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have",
"shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as",
"this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is",
"there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is",
"they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have",
"they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would",
"we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have",
"weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are",
"what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did",
"where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have",
"who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have",
"won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not",
"wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have",
"y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have",
"you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have",
'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling',
'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor',
'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ',
'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do',
'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do',
'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation',
'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum',
'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota',
'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp',
'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'}
def clean_text(x):
"""
清洗标点
"""
x = str(x)
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x
def clean_numbers(x):
"""
清洗数字
"""
x = re.sub('[0-9]{5,}', '#####', x)
x = re.sub('[0-9]{4}', '####', x)
x = re.sub('[0-9]{3}', '###', x)
x = re.sub('[0-9]{2}', '##', x)
return x
def _get_mispell(mispell_dict):
mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys()))
return mispell_dict, mispell_re
mispellings, mispellings_re = _get_mispell(mispell_dict)
def replace_typical_misspell(text):
def replace(match):
return mispellings[match.group(0)]
return mispellings_re.sub(replace, text)
def add_features(df):
"""
增加一些统计特征
"""
df['question_text'] = df['question_text'].apply(lambda x:str(x))
df['total_length'] = df['question_text'].apply(len)
df['capitals'] = df['question_text'].apply(lambda comment: sum(1 for c in comment if c.isupper()))
df['caps_vs_length'] = df.apply(lambda row: float(row['capitals'])/float(row['total_length']),
axis=1)
df['num_words'] = df.question_text.str.count('\S+')
df['num_unique_words'] = df['question_text'].apply(lambda comment: len(set(w for w in comment.split())))
df['words_vs_unique'] = df['num_unique_words'] / df['num_words']
return df
def load_and_prec():
"""
读取数据与初处理
"""
start_time = time.time()
print("+++++loading and precessing data+++++")
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
# lower
train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower())
test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower())
# Clean the text
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_text(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x))
# Clean numbers
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_numbers(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x))
# Clean speelings
train_df["question_text"] = train_df["question_text"].apply(lambda x: replace_typical_misspell(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x))
## fill up the missing values
train_X = train_df["question_text"].fillna("_##_").values
test_X = test_df["question_text"].fillna("_##_").values
## add features
# train = add_features(train_df)
# test = add_features(test_df)
# features = train[['caps_vs_length', 'words_vs_unique']].fillna(0)
# test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0)
# ss = StandardScaler()
# ss.fit(np.vstack((features, test_features)))
# features = ss.transform(features)
# test_features = ss.transform(test_features)
## Tokenize the sentences
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=max_len)
test_X = pad_sequences(test_X, maxlen=max_len)
## Get the target values
train_y = train_df['target'].values
#shuffling the data
np.random.seed(SEED)
trn_idx = np.random.permutation(len(train_X))
train_X = train_X[trn_idx]
train_y = train_y[trn_idx]
print(f"+++++loading and precossing finished: {elapsed(time.time()-start_time)}+++++")
return train_X, test_X, train_y, tokenizer.word_index
def load_word_embeddings(word_index,name='glove'):
start_time = time.time()
print(f"+++++loading {name}+++++")
def get_coefs(word,*arr):
return word, np.asarray(arr,dtype='float32')
if name =='glove':
EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
elif name == 'fasttext':
EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
elif name == 'paragram':
EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore') if len(o)>100)
elif name =='word2vec':
word2vecDict = word2vec.KeyedVectors.load_word2vec_format("../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin", binary=True)
embeddings_index={}
for word in word2vecDict.wv.vocab:
embeddings_index[word] = word2vecDict.word_vec(word)
else:
raise NameError
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(),all_embs.std()
embed_size = all_embs.shape[1]
num_words = min(max_features,len(word_index))
embedding_matrix = np.random.normal(emb_mean,emb_std,(num_words, embed_size))
for word,i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
print(f"+++++loading {name} finished: {elapsed(time.time()-start_time)}+++++")
del(embeddings_index)
gc.collect()
return embedding_matrix
def read_data_from_path():
seed_everything()
train_X, test_X, train_y, word_index = load_and_prec()
#平均两个词向量
embedding_matrix_glove = load_word_embeddings(word_index,'glove')
embedding_matrix_paragram = load_word_embeddings(word_index,'paragram')
embedding_matrix_mean = np.mean([embedding_matrix_glove, embedding_matrix_paragram], axis=0)
# save_data_to_disk(train_X, test_X, train_y, features, test_features, word_index,embedding_matrix_glove,embedding_matrix_paragram,embedding_matrix_mean)
gc.collect()
return train_X, test_X, train_y, embedding_matrix_mean
def save_data_to_disk(train_X, test_X, train_y, features, test_features, word_index,embedding_matrix_glove,embedding_matrix_paragram,embedding_matrix_mean):
np.save("train_X",train_X)
np.save("test_X",test_X)
np.save("train_y",train_y)
np.save("features",features)
np.save("test_features",test_features)
np.save("word_index.npy",word_index)
np.save("embedding_matrix_mean",embedding_matrix_mean)
np.save("embedding_matrix_glove",embedding_matrix_glove)
np.save("embedding_matrix_paragram",embedding_matrix_paragram)
def read_data_from_disk():
train_X = np.load("../input/final-version/train_X.npy")
test_X = np.load("../input/final-version/test_X.npy")
train_y = np.load("../input/final-version/train_y.npy")
features = np.load("../input/final-version/features.npy")
test_features = np.load("../input/final-version/test_features.npy")
#word_index = np.load("word_index.npy").item()
embedding_matrix_mean = np.load("../input/final-version/embedding_matrix_mean.npy")
# embedding_matrix_glove = np.load("embedding_matrix_glove.npy")
# embedding_matrix_paragram = np.load("embedding_matrix_paragram.npy")
print(f"train_X shape: {train_X.shape}")
print(f"test_X shape: {test_X.shape}")
print(f"train_y shape: {train_y.shape}")
print(f"embedding_matrix_mean shape: {embedding_matrix_mean.shape}")
return train_X,test_X,train_y,features,test_features,embedding_matrix_mean
| [
"jianhong.wu0322@qq.com"
] | jianhong.wu0322@qq.com |
f9604e416d904cd2705d5cfd31893ee121898710 | 1ffb9f2ea9f0935a39eded2871d0747f60c8548c | /blogapi/models.py | a8413266df7f31f983ad2e3c94674faf8c61f4b4 | [] | no_license | alexmzirai/blogapi | 0e58bf049e235a2975b9f5df1ffe16dc2a153511 | 2e1544f79422eaefbeb9380fad0eb26bd5f191e6 | refs/heads/master | 2020-03-07T20:01:44.881420 | 2018-04-02T01:22:14 | 2018-04-02T01:22:14 | 127,686,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15 | py | from django.db
| [
"alexmzirai@gmail.com"
] | alexmzirai@gmail.com |
6e7c0f98f01d2d3be2f846dcffa5bf7cddd2be01 | b2163bfd263d37a0e0c3ac49f4c04507cc579c7f | /runs/new_ggaa_receptors/overlay_receptors.py | d6440053e3b295c3bae7b1ceab9e6fdfb84bdb94 | [] | no_license | jyesselm/SimulateTectos | 2443af83a7f64b619599f6c584d99b7c01d33fda | ba8e32eb2ec01a8de6d41dc6870796d4549a1bc2 | refs/heads/master | 2021-01-13T14:30:35.450395 | 2019-06-03T05:57:19 | 2019-06-03T05:57:19 | 72,880,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | from rnamake import motif
from rnamake import resource_manager as rm
m = motif.file_to_motif("../../data/ggaa_models/GGAA_tetraloop_round2_model221.motif")
rm.manager.add_motif(motif=m)
m_new = rm.manager.get_motif(name="new_ggaa_tetraloop", end_name="A7-A22")
m_new.to_pdb("test2.pdb")
m_org = rm.manager.get_motif(name="GGAA_tetraloop", end_name="A7-A22")
m_org.to_pdb("org.pdb") | [
"jyesselm@stanford.edu"
] | jyesselm@stanford.edu |
872717b41ab8b999c249c90ecf54b77774dbe70a | decd69b811379750a5e2458494accdd2d8749dba | /test/simulation_test.py | df4315081ddb0d9da5ed725cc8bc5f87b2a8a442 | [
"MIT"
] | permissive | skyskys00/hornet-model | 93e3e02a27839ac402cc3520bc5eedc6ef0e9605 | d6c7dce26de241a17fd7534b0e98b6a0112bf67f | refs/heads/main | 2023-04-21T20:15:05.609729 | 2021-05-05T04:15:13 | 2021-05-05T04:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | import unittest
import numpy as np
from src.simulate import simulate
class SimulationTest(unittest.TestCase):
def test_sim_no_geo(self):
"""Setup a random simulation"""
stl_lat = 47.6062
stl_long = -122.3321
n_starting_hives = 10
coords = []
for i in range(n_starting_hives):
coords.append((stl_lat + np.random.random(), stl_long + np.random.random()))
df = simulate(coords, 2020, 10)
print(df.head())
print(df.tail())
def test_sim_geo(self):
"""Setup a random simulation with a geometry file"""
stl_lat = 47.6062
stl_long = -122.3321
n_starting_hives = 10
coords = []
for i in range(n_starting_hives):
coords.append((stl_lat + np.random.random(), stl_long + np.random.random()))
df = simulate(coords, 2020, 10,
shape_file='../data/states_reduced/states_reduced.shp')
print(df.head())
print(df.tail())
if __name__ == '__main__':
unittest.main()
| [
"18237687+rustygentile@users.noreply.github.com"
] | 18237687+rustygentile@users.noreply.github.com |
7b47974d7c6dff9d2d526ea771620b522c940bca | 5f4da925312f9ad4b4de36e7d1861031d3f03731 | /app.py | 964943b9a931d3e43f46b67109b0c953a4cb9dad | [] | no_license | geofferyj/PROJECT1 | 1b1c0cad5c3766589af8291b0c2635d15cfd599d | 89cdfe42e27c3176dbdce79654d1161013e041cf | refs/heads/master | 2021-01-01T12:28:51.167516 | 2020-03-02T15:59:41 | 2020-03-02T15:59:41 | 239,279,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,679 | py | import os, requests
from functools import wraps
from flask import Flask, session, redirect, render_template, url_for, request, flash, jsonify, make_response, abort
from flask_session import Session
from sqlalchemy import create_engine, exc
from sqlalchemy.orm import scoped_session, sessionmaker
app = Flask(__name__)
dbstring = "postgres://fabeidpsjarnlm:080cd5f8a5a7ce8dd8d6c71863c76924e7a26ebcab39588e6dc637a1741bf496@ec2-3-234-109-123.compute-1.amazonaws.com:5432/de693jkmt9rih3"
# Configure session to use filesystem
app.config['SECRET_KEY'] = "efd432e0aca715610c505c533037b95d6fb22f5692a0d33820ab7b19ef06f513"
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(dbstring)
db = scoped_session(sessionmaker(bind=engine))
db.execute("""CREATE TABLE IF NOT EXISTS users(uid SERIAL PRIMARY KEY,
name VARCHAR NOT NULL,
username VARCHAR NOT NULL UNIQUE,
email VARCHAR NOT NULL UNIQUE,
password VARCHAR NOT NULL)""")
db.execute("""CREATE TABLE IF NOT EXISTS books(isbn VARCHAR PRIMARY KEY,
title VARCHAR NOT NULL,
author VARCHAR NOT NULL,
year INTEGER NOT NULL)""")
db.execute("""CREATE TABLE IF NOT EXISTS reviews(id SERIAL PRIMARY KEY,
uid INTEGER NOT NULL REFERENCES users(uid),
isbn VARCHAR NOT NULL REFERENCES books(isbn),
review VARCHAR NOT NULL,
rating INTEGER CHECK(rating > 0 AND rating <= 5) NOT NULL,
review_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT unique_uid_isbn UNIQUE(uid,isbn)
)""")
db.commit()
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if 'uid' not in session:
return redirect(url_for('login', next=request.url))
return func(*args, **kwargs)
return wrapper
@app.route("/", methods = ["POST", "GET"])
@login_required
def index():
if request.method == "POST":
search = request.form.get("search")
data = db.execute("SELECT * FROM books WHERE title ILIKE :search OR author ILIKE :search OR isbn ILIKE :search", {"search": '%' + search + '%'}).fetchall()
if data:
return render_template('index.html', data=data)
else:
flash("Sorry No match was found for your search")
return render_template('index.html', data=data)
return render_template('index.html')
@app.route("/login/", methods = ["POST", "GET"])
def login():
if request.method == "POST":
form = request.form
email = form["email"]
password = form["password"]
next_url = form["next"]
user = db.execute("SELECT uid FROM users WHERE email = :email", {"email": email}).fetchone()
if user:
session["uid"] = user.uid
if next_url:
flash("Login successful")
return redirect(next_url)
return redirect(url_for("index"))
else:
flash("user not found")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/logout/")
def logout():
session.pop("uid", None)
return redirect(url_for("login"))
@app.route("/signup/", methods = ["POST", "GET"])
def signup():
if request.method == "POST":
form = request.form
username = form["username"]
name = form["name"]
email = form["email"]
password = form["password"]
try:
db.execute("INSERT INTO users(name, username, email, password) VALUES(:name, :username, :email, :password)", {
"name": name, "username": username, "email": email, "password": password})
db.commit()
return redirect(url_for('login'))
except exc.IntegrityError:
flash('Username Already exists')
return redirect(url_for('signup'))
return render_template('signup.html')
@app.route("/book/<isbn>/", methods = ["GET", "POST"])
@login_required
def book_details(isbn):
if request.method == "POST":
review = request.form.get("review")
rating = request.form.get("rating")
uid = session["uid"]
try:
db.execute("INSERT INTO reviews (uid, isbn, review, rating) VALUES(:uid, :isbn, :review, :rating)", {"uid": uid, "isbn": isbn, "review": review, "rating": rating})
db.commit()
except exc.IntegrityError:
flash('You have already revied this book')
return redirect(url_for('book_details', isbn=isbn))
reviews = db.execute("SELECT name, review, rating FROM users, reviews WHERE users.uid = reviews.uid AND reviews.isbn = :isbn ORDER BY reviews.review_date", {"isbn":isbn})
details = db.execute("SELECT * FROM books WHERE isbn = :isbn", {"isbn":isbn}).fetchone()
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "e9hh8mpJf995M7SzMfst5A", "isbns": isbn}).json()
for i in res['books']:
gr_data = i
return render_template("book_details.html", details=details, reviews=reviews, gr_data=gr_data)
@app.route("/api/<isbn>/", methods=['GET'])
def api(isbn):
if request.method == 'GET':
book = db.execute('SELECT * FROM books WHERE isbn = :isbn', {'isbn': isbn}).fetchone()
if book:
rating = db.execute("SELECT ROUND( AVG(rating), 2) FROM reviews WHERE isbn = :isbn", {'isbn':isbn}).fetchone()
review = db.execute("SELECT COUNT(review) FROM reviews WHERE isbn = :isbn", {'isbn':isbn}).fetchone()
for i in rating:
if i:
avg_rating = float(i)
else:
avg_rating = 0
for i in review:
if i:
review_count = int(i)
else:
review_count = 0
return make_response(jsonify({
"title": book.title,
"author": book.author,
"year": book.year,
"isbn": book.isbn,
"review_count": review_count,
"average_score": avg_rating,
}))
else:
return abort(404)
@app.shell_context_processor
def make_shell_context():
return {'db': db}
if __name__ == "__main__":
app.debug = True
app.run() | [
"geofferyjoseph1@gmail.com"
] | geofferyjoseph1@gmail.com |
7aad1cf9d6acb0465048e076f816b50cf784ec85 | 1fbd992e8056548a8baaed804cd5aa93ba57651a | /Q2_PGM4.py | b418ab2bd95cc9568ec48b25fae045bbca389116 | [] | no_license | dhanusrikar/Assignment-2 | 8e26318569b1553c82ec7ba4555bcd842611c0ab | 0da1e752a5350aed1c653079075773869ae10026 | refs/heads/master | 2022-12-20T10:09:36.934918 | 2020-09-22T08:45:34 | 2020-09-22T08:45:34 | 297,587,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #!/usr/bin/env python
# coding: utf-8
# In[5]:
from mpl_toolkits import mplot3d
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as ani
theta0 = np.array(pd.read_csv('batch_1.csv',usecols = ['theta0']))
theta1 = np.array(pd.read_csv('batch_1.csv',usecols = ['theta1']))
theta2 = np.array(pd.read_csv('batch_1.csv',usecols = ['theta2']))
print(len(theta0))
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(theta1,theta2,theta0)
plt.ylabel('Theta1')
plt.xlabel('Theta2')
plt.show()
# In[ ]:
| [
"gvsrikar08@gmail.com"
] | gvsrikar08@gmail.com |
d1a3312fd06cdd1c33319651970db66ccf6feaff | 844501294ca37f1859b9aa0a258e6dd6b1bf2349 | /snipe/__init__.py | ed31be10c2f86531161797372795b4dd3a2ba4bb | [
"MIT",
"BSD-2-Clause"
] | permissive | 1ts-org/snipe | 2ac1719bc8f6b3b158c04536464f866c34051253 | ad84a629e9084f161e0fcf811dc86ba54aaf9e2b | refs/heads/master | 2021-06-04T22:32:36.038607 | 2020-03-27T05:18:36 | 2020-04-05T21:50:42 | 18,642,653 | 6 | 3 | NOASSERTION | 2019-10-08T02:02:50 | 2014-04-10T16:01:32 | Python | UTF-8 | Python | false | false | 1,377 | py | # -*- encoding: utf-8 -*-
# Copyright © 2014 the Snipe contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
| [
"kcr@1ts.org"
] | kcr@1ts.org |
2e406c239b4072c96a15888b2a2eb83f59ab7dd9 | 69397ce9f2aa2c66d58d2f9eb3fdb9ef74c26651 | /blogsite/apps/blog/models.py | 438b7eb2749cd9038db91785686f2c95b43aa0e2 | [] | no_license | wangjojo/second-blogsite | 8d939749c5655cdcbb4dec21bf3e8e2443ea5a9a | 1c83486d9a321cc6592131de512f398cd1a3dab2 | refs/heads/master | 2020-03-16T20:06:53.316656 | 2018-05-27T06:29:02 | 2018-05-27T06:29:02 | 132,946,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | from datetime import datetime
from django.db import models
from users.models import UserProfile
from DjangoUeditor.models import UEditorField
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=20,verbose_name='类别')
nav_display = models.BooleanField(default=True,verbose_name='是否导航显示')
is_admin = models.BooleanField(default=False,verbose_name='管理权限')
add_time = models.DateTimeField(default=datetime.now,verbose_name='添加时间')
class Meta:
verbose_name = "分类"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=20,verbose_name='标签')
add_time = models.DateTimeField(auto_now_add=True,verbose_name='添加时间')
class Meta:
verbose_name = "标签"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Blog(models.Model):
title = models.CharField(max_length=50,verbose_name='标题')
content = UEditorField(height=400, width=850,default='', blank=True, imagePath="uploads/images/",toolbars='besttome', filePath='uploads/files/',verbose_name='内容')
excerpt = models.CharField(blank=True, max_length=100,verbose_name='摘要')
is_pub = models.BooleanField(default=True,verbose_name='是否发表')
add_time = models.DateTimeField(auto_now_add=True,editable=True,verbose_name='发表时间')
update_time = models.DateTimeField(auto_now=True,null=True,verbose_name='更新时间')
image = models.ImageField(upload_to='images/blog/%Y/%m',default='images/blog_default.png',blank=True,max_length=100,verbose_name='封面')
fav_nums = models.IntegerField(default=0,verbose_name='收藏数')
click_nums = models.IntegerField(default=0,verbose_name='点击数')
want_to_say = models.CharField(blank=True, max_length=200,verbose_name='想说的话')
is_banner = models.BooleanField(default=False,verbose_name='是否轮播')
category = models.ForeignKey(Category,verbose_name='分类')
tags = models.ManyToManyField(Tag,blank=True,verbose_name='标签')
author = models.ForeignKey(UserProfile,verbose_name='作者')
class Meta:
verbose_name = "博客"
verbose_name_plural = verbose_name
def __str__(self):
return self.title | [
"fg498572715@163.com"
] | fg498572715@163.com |
b9fb546b5d064c6ff627572b2cd8bb3bad4cb08a | 7b71fc437d1be709bb34b64ca150769b529cc093 | /pyroms/build/lib/pyroms/remapping/roms2z.py | da6d38d78da9ead278f78f803c217f9d5e7024b6 | [
"BSD-3-Clause"
] | permissive | ongrjoe/pyroms-osx | 96495cb868d7e16f45eda21f982bc499d523f664 | 10480275941a84f51357136b5eb3b5230ec38447 | refs/heads/master | 2020-05-24T20:22:50.547174 | 2019-05-19T08:57:01 | 2019-05-19T08:57:01 | 187,454,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | # encoding: utf-8
import numpy as np
import _interp
def roms2z(var, grd, grdz, Cpos='rho', irange=None, jrange=None, \
spval=1e37, mode='linear'):
"""
varz = roms2z(var, grd, grdz)
optional switch:
- Cpos='rho', 'u' 'v' or 'w' specify the C-grid position where
the variable rely
- irange specify grid sub-sample for i direction
- jrange specify grid sub-sample for j direction
- spval=1e37 define spval value
- mode='linear' or 'spline' specify the type of interpolation
Interpolate the variable from ROMS grid grd to z vertical grid grdz
"""
var = var.copy()
assert len(var.shape) == 3, 'var must be 3D'
if mode=='linear':
imode=0
elif mode=='spline':
imode=1
else:
imode=0
raise Warning('%s not supported, defaulting to linear' % mode)
if Cpos is 'rho':
z = grd.vgrid.z_r[0,:]
depth = grdz.vgrid.z
mask = grd.hgrid.mask_rho
elif Cpos is 'u':
z = 0.5 * (grd.vgrid.z_r[0,:,:,:-1] + grd.vgrid.z_r[0,:,:,1:])
depth = 0.5 * (grdz.vgrid.z[:,:,:-1] + grdz.vgrid.z[:,:,1:])
mask = grd.hgrid.mask_u
elif Cpos is 'v':
z = 0.5 * (grd.vgrid.z_r[0,:,:-1,:] + grd.vgrid.z_r[0,:,1:,:])
depth = 0.5 * (grdz.vgrid.z[:,:-1,:] + grdz.vgrid.z[:,1:,:])
mask = grd.hgrid.mask_v
elif Cpos is 'w':
z = grd.vgrid.z_w[0,:]
depth = grdz.vgrid.z
mask = grd.hgrid.mask_rho
else:
raise Warning('%s unknown position. Cpos must be rho, u, v or w.' % Cpos)
Nm, Mm, Lm = var.shape
nlev = grdz.vgrid.N
var = np.concatenate((var, var[-2:-1,:,:]), 0)
z = np.concatenate((z, 100*np.ones((1,z.shape[1], z.shape[2]))), 0)
if irange is None:
irange = (0,Lm)
else:
assert var.shape[2] == irange[1]-irange[0], \
'var shape and irange must agree'
if jrange is None:
jrange = (0,Mm)
else:
assert var.shape[1] == jrange[1]-jrange[0], \
'var shape and jrange must agree'
varz = np.zeros((nlev, jrange[1]-jrange[0], irange[1]-irange[0]))
for k in range(nlev):
varz[k,:,:] = _interp.xhslice(var, \
z[:,jrange[0]:jrange[1], irange[0]:irange[1]], \
depth[k,jrange[0]:jrange[1], irange[0]:irange[1]], \
mask[jrange[0]:jrange[1], irange[0]:irange[1]], \
imode, spval)
#mask
idx = np.where(abs((varz-spval)/spval)<=1e-5)
varz[idx] = spval
#varz = np.ma.masked_values(varz, spval, rtol=1e-5)
return varz
| [
"ongrjoe@gmail.com"
] | ongrjoe@gmail.com |
766cac3c3014e959f87d729de6f1f13a4b3b5bcc | 6f2380fe4087783603a36ce966ab9b5e99ac4cbb | /demo6/demo6/wsgi.py | 426879a0e0f24332489b4f2b78726f72154dbfa2 | [] | no_license | zhlxcl/Python1904xcl | aab0d4989d9065bff5d5334a1a1e1188cbd3619d | f4cc21ed0fb6d40349490a084b983b4e897c8d91 | refs/heads/master | 2022-12-14T20:12:16.014524 | 2019-07-19T08:47:19 | 2019-07-19T08:47:19 | 194,625,840 | 0 | 0 | null | 2022-11-22T04:07:44 | 2019-07-01T07:53:59 | JavaScript | UTF-8 | Python | false | false | 387 | py | """
WSGI config for demo6 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo6.settings')
application = get_wsgi_application()
| [
"2870175885@qq.com"
] | 2870175885@qq.com |
787cb77276053925d5fdaec7ddd69b65a7f0b570 | 8e7b3a72d238ef49e763b39237bc8a2344c7ca50 | /venv/Scripts/easy_install-script.py | 2f05806577b4b6aa1cbe5506110d02d5c283457d | [] | no_license | Uyanc/CatVsDog | 72f85083f85c173b5e6b153c5aa484a0e9085d56 | c1d524295e4ed703db4161a73dbcac3324ad9f4b | refs/heads/master | 2020-05-15T16:00:45.919171 | 2019-04-20T01:48:49 | 2019-04-20T01:48:49 | 182,382,712 | 1 | 0 | null | 2019-04-20T08:48:37 | 2019-04-20T08:48:37 | null | UTF-8 | Python | false | false | 451 | py | #!E:\PycharmProjects\CatVsDog\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==28.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==28.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==28.8.0', 'console_scripts', 'easy_install')()
)
| [
"952066073@qq.com"
] | 952066073@qq.com |
52415f46784183380504b7540edef423115ddea4 | 55266a178575bc89ed30b5fdc0fb084c4c4907a1 | /URLGrab.py | 7ddf1667da87e4e358a37891da1d6ce20e085106 | [] | no_license | camerse/WebQueries | 8673ec45c8f8b2e844cae61f04a1867aa1754ac4 | d3a168f78599d65b5ca0feb87f8efd6bb46ee406 | refs/heads/master | 2020-08-05T05:34:02.303985 | 2017-05-17T17:56:31 | 2017-05-17T17:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # -*- coding: utf-8 -*-
#! /usr/bin/env python3
'''
This code is modified from pymbook.readthedocs.io
I'm using it as a jumping-off point
'''
import requests
import os
import sys
def download_url(url):
''' download URL, save to current directory '''
req = requests.get(url)
#uses string argument, returns requests.models.Response object
if req.status_code == 404:
print("No such file found at {}".format(url))
return
filename = url.split('/')[-1]
if filename in os.listdir():
print("Filename {} already exists!".format(filename))
return
with open(filename, 'w') as fobj:
fobj.write(req.text)
# type of req.content is bytes
print("Download over.")
if __name__ == '__main__':
if len(sys.argv) != 2:
print("One URL argument please.")
sys.exit(0)
download_url(sys.argv[1])
| [
"klyons19@gmail.com"
] | klyons19@gmail.com |
4d786b9ccda027cddd2f875212827ce276a4d821 | fc6c6437bda3ba5ba75218d8184c53f74352a1a2 | /firmware/Iconograph/mergeHex.py | 269e62217f443a9a91c75c703ae434b3a17ae75d | [] | no_license | siliconsparrow/Iconograph | 36d6675d134e1a264e3e99e198661b2ac9119860 | c07c579a87fb5abb1e568f499c21706b82b30652 | refs/heads/master | 2023-07-21T02:15:54.590115 | 2021-09-01T22:25:13 | 2021-09-01T22:25:13 | 367,541,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python3
# Python script to merge two or more HEX files.
#
# by Adam Pierce <adam@levaux.com>
# for Levaux Pty. Ltd. Australia
#
# Usage: mergeHex.py <file1.hex> <file2.hex> [<file3.hex> ...]
# Outputs merged file to STDOUT.
import sys
from intelhex import IntelHex
# Get the list of filenames to merge.
filenames = sys.argv[1:]
# Load the HEX files.
files = []
for fn in filenames:
x = IntelHex()
x.fromfile(fn, format='hex')
files.append(x)
# Merge them all
for i in range(1,len(files)):
files[0].merge(files[i], overlap='replace')
# Output the result to STDOUT.
files[0].write_hex_file(sys.stdout)
| [
"adam@siliconsparrow.com"
] | adam@siliconsparrow.com |
5bf4ffe1e9ffbf78397b69434f6a761fc3e01e4d | 89712fbed6b3a96e4933b6c610a3dc496668b1db | /migrations/versions/df235ca39b01_.py | 5ac6d4d7bf4cb42e8ae908c00244e7a5eb0e379a | [] | no_license | Sanketkalode/Flask_webapp | bb3a80a605adad373fb3b0528cfb7f47a3ce9ec1 | 798742853f4972011efe24d19afc1cc66c68f633 | refs/heads/master | 2023-05-11T01:32:49.650602 | 2022-04-26T08:46:11 | 2022-04-26T08:46:11 | 254,264,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """empty message
Revision ID: df235ca39b01
Revises: c76b7add00f0
Create Date: 2020-06-29 17:08:14.447160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df235ca39b01'
down_revision = 'c76b7add00f0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('token', sa.String(length=32), nullable=True))
op.add_column('users', sa.Column('token_expiration', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_users_token'), 'users', ['token'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_token'), table_name='users')
op.drop_column('users', 'token_expiration')
op.drop_column('users', 'token')
# ### end Alembic commands ###
| [
"sanket_kalode@persistent.co.in"
] | sanket_kalode@persistent.co.in |
74b74b0586d3c4a3c9553e59397d45f07e266132 | 6122ceafdb93067a5ae25732d76f170e229378e8 | /Week2Final.py | 13fbcfa38b3af7deeb89fc3d4a897a1e1a3e8a91 | [] | no_license | SG2016UW/MachineLearning | 7239b6f3459344fe3e96a370032b355087195eb1 | ba4c7ca19f1fc4d3b33143a649d3961c52feda2b | refs/heads/master | 2020-12-24T12:40:00.622073 | 2016-11-06T03:21:03 | 2016-11-06T03:21:03 | 72,966,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,871 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 19:33:26 2016
@author: gujju
"""
import pandas as pd
import numpy as np
import xml.etree.cElementTree as ET
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import linear_model
import codecs
import sys
#FUNCTION WHICH WILL PRINT XML
def write_contents(pointer):
user_id = test_profile_df['userid'].iloc[pointer-1]
root.set(items[0],str(user_id))
root.set(items[1],str(target_age))
if(output_gender_predicted[pointer-1] == 1.0):
root.set(items[2],'female')
else:
root.set(items[2],'male')
root.set(items[3],str(output_traits_predicted_df.loc[pointer-1,'ext']))
root.set(items[4],str(output_traits_predicted_df.loc[pointer-1,'neu']))
root.set(items[5],str(output_traits_predicted_df.loc[pointer-1,'agr']))
root.set(items[6],str(output_traits_predicted_df.loc[pointer-1,'con']))
root.set(items[7],str(output_traits_predicted_df.loc[pointer-1,'ope']))
tree = ET.ElementTree(root)
output_path = output_folder + user_id + '.xml'
tree.write(output_path, short_empty_elements = True)
input_training_path = "/data/training"
#FIRST PATH which will be input path to the test data, which can be obtained from sys.argv[0]
input_test_path = sys.argv[2]
training_profile_df = pd.read_csv(input_training_path + '/profile/profile.csv')
test_profile_df = pd.read_csv(input_test_path + '/profile/profile.csv')
#######################################################################################################
#COUNT VECTORIZER (NAIVE BAIS) FOR PREDICTING GENDER
#######################################################################################################
training_gender_df = training_profile_df.loc[:,['userid', 'gender']]
userIds_train = training_gender_df['userid']
input_training_text_arr = []
input_train_text_loc = input_training_path + "/text/"
for userId in userIds_train:
file = input_train_text_loc + userId + ".txt"
#print('filename : ' + file)
#fo = open(file, "r")
with codecs.open(file, 'r', encoding='utf-8', errors='ignore') as fo:
nightmare = fo.read()
input_training_text_arr.append(nightmare)
fo.close()
# Training a Naive Bayes model
count_vect = CountVectorizer()
input_status_train = count_vect.fit_transform(input_training_text_arr)
input_gender_train = training_gender_df['gender']
clf = MultinomialNB()
clf.fit(input_status_train, input_gender_train)
test_gender_df = test_profile_df.loc[:,['userid','gender']]
userIds_test = test_gender_df['userid']
input_test_text_arr = []
input_test_text_Loc = input_test_path + "/text/"
for userId in userIds_test:
filename = input_test_text_Loc + userId + ".txt"
#fo = open(input_test_text_Loc + filename, "r+")
with codecs.open(filename, 'r', encoding='utf-8', errors='ignore') as fo:
nightmare = fo.read()
fo.close()
input_test_text_arr.append(nightmare)
input_status_test = count_vect.transform(input_test_text_arr)
output_gender_predicted = clf.predict(input_status_test)
#######################################################################################################
#COUNT VECTORIZER (NAIVE BAIS) FOR PREDICTING GENDER
#######################################################################################################
#######################################################################################################
#LINEAR REGRESSION FOR PREDICITING PERSONALITY TRAIT
#######################################################################################################
input_training_path = "/data/training"
input_train_liwc_loc = input_training_path + "/LIWC/LIWC.csv"
training_traits_df = pd.read_csv(input_train_liwc_loc)
training_traits_df.columns = training_traits_df.columns.str.lower()
training_traits_df = pd.merge(training_traits_df, training_profile_df, how='inner', on='userid')
input_test_liwc_loc = input_test_path + "/LIWC/LIWC.csv"
test_traits_df = pd.read_csv(input_test_liwc_loc, sep=',')
test_traits_df.columns = test_traits_df.columns.str.lower()
big5 = ['ope','ext','con','agr','neu']
feature_list = [x for x in training_traits_df.columns.tolist()[:] if not x in big5]
feature_list.remove('userid')
feature_list.remove('age')
feature_list.remove('gender')
feature_list.remove('Unnamed: 0')
feature_list.remove('seg')
sLength = len(test_traits_df['userid'])
for trait in big5:
test_traits_df[trait] = pd.Series(np.random.randn(sLength), index=test_traits_df.index)
input_train_liwc = training_traits_df[feature_list]
input_train_traits = training_traits_df[trait]
regr = linear_model.LinearRegression()
regr.fit(input_train_liwc, input_train_traits)
input_test_liwc = test_traits_df[feature_list]
output_traits_predicted = regr.predict(input_test_liwc)
test_traits_df[trait] = output_traits_predicted
output_traits_predicted_df = test_traits_df[['ope','ext','con','agr','neu']]
#######################################################################################################
#LINEAR REGRESSION FOR PREDICITING PERSONALITY TRAIT
#######################################################################################################
#######################################################################################################
#BASELINE ALGORITHM FOR PREDICTING AGE
#######################################################################################################
target_age = "xx-24"
age_series = training_profile_df['age']
first_AG = age_series[age_series < 25].count()
second_AG = age_series[(age_series >= 25) & (age_series < 35)].count()
third_AG = age_series[(age_series >= 35) & (age_series < 50)].count()
fourth_AG = age_series[age_series >= 50].count()
list1 = [first_AG, second_AG, third_AG, fourth_AG]
max_AG = max(list1)
if(max_AG == first_AG):
target_age = 'xx-24'
elif(max_AG == second_AG):
target_age = '25-34'
elif(max_AG == third_AG):
target_age = '35-49'
else:
target_age = '50-xx'
#######################################################################################################
#BASELINE ALGORITHM FOR PREDICTING AGE
#######################################################################################################
#SECOND PATH which will be output path, which can be obtained from sys.argv[1]
output_folder = sys.argv[4]
items = np.array(['id','age_group','gender','extrovert','neurotic','agreeable','conscientious','open'])
# gender -> output_gender_predicted
# age_group -> output_age_predicted
# ext -> output_ext_predicted
# neu -> output_neu_predicted
# agr -> output_agr_predicted
# con -> output_con_predicted
root = ET.Element('user')
files_count = len(test_profile_df.index)
for k in range(1, files_count+1):
write_contents(k)
| [
"sgvm2015@gmail.com"
] | sgvm2015@gmail.com |
d6f9aae369f645e06dd5a81e0da92deb03d22e25 | 350d6b7246d6ef8161bdfccfb565b8671cc4d701 | /Insert Interval.py | fdec8d8c9a46850b59d3f652b43f6c85e069796d | [] | no_license | YihaoGuo2018/leetcode_python_2 | 145d5fbe7711c51752b2ab47a057b37071d2fbf7 | 2065355198fd882ab90bac6041c1d92d1aff5c65 | refs/heads/main | 2023-02-14T14:25:58.457991 | 2021-01-14T15:57:10 | 2021-01-14T15:57:10 | 329,661,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | class Solution:
def insert(self, intervals: 'List[Interval]', newInterval: 'Interval') -> 'List[Interval]':
# init data
new_start, new_end = newInterval
idx, n = 0, len(intervals)
output = []
# add all intervals starting before newInterval
while idx < n and new_start > intervals[idx][0]:
output.append(intervals[idx])
idx += 1
# add newInterval
# if there is no overlap, just add the interval
if not output or output[-1][1] < new_start:
output.append(newInterval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], new_end)
# add next intervals, merge with newInterval if needed
while idx < n:
interval = intervals[idx]
start, end = interval
idx += 1
# if there is no overlap, just add an interval
if output[-1][1] < start:
output.append(interval)
# if there is an overlap, merge with the last interval
else:
output[-1][1] = max(output[-1][1], end)
return output | [
"yihao_guo@gwmail.gwu.edu"
] | yihao_guo@gwmail.gwu.edu |
4f8f0416c1adbe6562d11d2c4658f0914196dac1 | 6bf55c4bc480a48bbbe723bbeb1a5a36b173b394 | /examples/mnist_pruning.py | 9ca57ad48b3cc50096643365dad98f21978d6188 | [
"Apache-2.0"
] | permissive | timwillhack/dm-haikuBah2 | 1b3b71c58e7467b6ddcd4b93968596b3e3f73dd4 | b76a3db3a39b82c8a1ae5a81a8a0173c23c252e5 | refs/heads/main | 2023-08-31T22:54:38.639767 | 2021-11-12T17:27:54 | 2021-11-12T17:27:54 | 427,442,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,435 | py | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""MNIST classifier with pruning as in https://arxiv.org/abs/1710.01878 ."""
import functools
from typing import Callable, Generator, Mapping, Sequence, Tuple
from absl import app
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
Batch = Mapping[str, np.ndarray]
Predicate = Callable[[str, str, jnp.ndarray], bool]
PredicateMap = Mapping[Predicate, jnp.ndarray]
ModuleSparsity = Sequence[Tuple[Predicate, jnp.ndarray]]
def topk_mask(value: jnp.ndarray, density_fraction: float) -> jnp.ndarray:
"""Return a mask with 1s marking the top fraction of value.
Note: This routine takes care to make sure that ties are handled without
bias toward smaller indices. This can be a problem when pruning large
embedding matrices, or global pruning where all parameters in the model
are concatenated together and pruned at once.
Args:
value: An array. Must contain sortable values (i.e. not complex).
density_fraction: A float. What fraction of value should be kept.
Returns:
A mask containing 1s where the topk elements of value are. k is
determined based on density_fraction and the size of value.
"""
def topk_mask_internal(value):
assert value.ndim == 1
indices = jnp.argsort(value)
k = jnp.round(density_fraction * jnp.size(value)).astype(jnp.int32)
mask = jnp.greater_equal(np.arange(value.size), value.size - k)
mask = jnp.zeros_like(mask).at[indices].set(mask)
return mask.astype(np.int32)
# shuffle value so that identical values aren't always pruned
# with a bias to lower indices
orig_shape = value.shape
value = jnp.reshape(value, -1)
shuffled_indices = jax.random.shuffle(
jax.random.PRNGKey(42), jnp.arange(0, jnp.size(value), dtype=jnp.int32))
shuffled_mask = topk_mask_internal(value[shuffled_indices])
mask = jnp.zeros_like(shuffled_mask).at[shuffled_indices].set(shuffled_mask)
mask = jnp.reshape(mask, orig_shape)
return mask
def zhugupta_func(progress: jnp.ndarray) -> jnp.ndarray:
"""From 'To Prune or Not To Prune."""
return 1. - (1. - progress)**3
def _create_partitions(
module_sparsity: ModuleSparsity, params: hk.Params
) -> Tuple[Sequence[hk.Params], Sequence[jnp.ndarray], hk.Params]:
"""Partition params based on sparsity_predicate_map.
Args:
module_sparsity: A Sequence of (Predicate, float) pairs. Predicate
functions take module_name, name, value as arguments. The floats are the
sparsity level to apply to leaves matching Predicate.
params: A Haiku param.
Returns:
A tuple containing:
- A list of len(module_sparsity), where each element is a disjoint subset
of the `params` to be pruned.
- A list of len(module_sparsity) where each element is the sparsity level.
- The remaining elements of `params` not being pruned such that the union
of the first list and this element contains the elements of `params`.
"""
list_of_trees = []
sparsity_list = []
tail = params
# Greedily match so that no parameter can be matched more than once
for predicate, sparsity in module_sparsity:
head, tail = hk.data_structures.partition(predicate, tail)
list_of_trees.append(head)
sparsity_list.append(sparsity)
return list_of_trees, sparsity_list, tail
def sparsity_ignore(m: str, n: str, v: jnp.ndarray) -> bool:
"""Any parameter matching these conditions should generally not be pruned."""
# n == 'b' when param is a bias
return n == "b" or v.ndim == 1 or "batchnorm" in m or "batch_norm" in m
@functools.partial(jax.jit, static_argnums=2)
def apply_mask(params: hk.Params, masks: Sequence[hk.Params],
module_sparsity: ModuleSparsity) -> hk.Params:
"""Apply existing masks to params based on sparsity_predicate_map.
Some of params may not be masked depending on the content of
module_sparsity. masks must have the same structure as implied by
module_sparsity.
Args:
params: Tree to mask, can be a superset of masks.
masks: Tree of masks to apply to params. This must match the result of
applying module_sparsity to params.
module_sparsity: A dictionary mapping predicates to sparsity levels. Any
leaf matching a predicate key will be pruned to the resulting sparsity
level.
Returns:
A tree of masked params.
"""
params_to_prune, _, params_no_prune = _create_partitions(
module_sparsity, params)
pruned_params = []
for value, mask in zip(params_to_prune, masks):
pruned_params.append(
jax.tree_util.tree_multimap(lambda x, y: x * y, value, mask))
params = hk.data_structures.merge(*pruned_params, params_no_prune)
return params
@functools.partial(jax.jit, static_argnums=2)
def update_mask(params: hk.Params, sparsity_fraction: float,
module_sparsity: ModuleSparsity) -> Sequence[hk.Params]:
"""Generate masks based on module_sparsity and sparsity_fraction."""
params_to_prune, sparsities, _ = _create_partitions(module_sparsity, params)
masks = []
def map_fn(x: jnp.ndarray, sparsity: float) -> jnp.ndarray:
return topk_mask(jnp.abs(x), 1. - sparsity * sparsity_fraction)
for tree, sparsity in zip(params_to_prune, sparsities):
map_fn_sparsity = functools.partial(map_fn, sparsity=sparsity)
mask = jax.tree_util.tree_map(map_fn_sparsity, tree)
masks.append(mask)
return masks
@jax.jit
def get_sparsity(params: hk.Params):
"""Calculate the total sparsity and tensor-wise sparsity of params."""
total_params = sum(jnp.size(x) for x in jax.tree_leaves(params))
total_nnz = sum(jnp.sum(x != 0.) for x in jax.tree_leaves(params))
leaf_sparsity = jax.tree_map(lambda x: jnp.sum(x == 0) / jnp.size(x), params)
return total_params, total_nnz, leaf_sparsity
def net_fn(batch: Batch) -> jnp.ndarray:
"""Standard LeNet-300-100 MLP network."""
x = batch["image"].astype(jnp.float32) / 255.
mlp = hk.Sequential([
hk.Flatten(),
hk.Linear(300), jax.nn.relu,
hk.Linear(100), jax.nn.relu,
hk.Linear(10),
])
return mlp(x)
def load_dataset(
split: str,
*,
is_training: bool,
batch_size: int,
) -> Generator[Batch, None, None]:
"""Loads the dataset as a generator of batches."""
ds = tfds.load("mnist:3.*.*", split=split).cache().repeat()
if is_training:
ds = ds.shuffle(10 * batch_size, seed=0)
ds = ds.batch(batch_size)
return iter(tfds.as_numpy(ds))
def main(_):
# Make the network and optimiser.
net = hk.without_apply_rng(hk.transform(net_fn))
opt = optax.adam(1e-3)
# Define layerwise sparsities
def module_matching(s):
def match_func(m, n, k):
return m.endswith(s) and not sparsity_ignore(m, n, k)
return match_func
module_sparsity = ((module_matching("linear"), 0.98),
(module_matching("linear_1"), 0.9))
# Training loss (cross-entropy).
@jax.jit
def loss(params: hk.Params, batch: Batch) -> jnp.ndarray:
"""Compute the loss of the network, including L2."""
logits = net.apply(params, batch)
labels = jax.nn.one_hot(batch["label"], 10)
l2_loss = 0.5 * sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))
softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))
softmax_xent /= labels.shape[0]
return softmax_xent + 1e-4 * l2_loss
# Evaluation metric (classification accuracy).
@jax.jit
def accuracy(params: hk.Params, batch: Batch) -> jnp.ndarray:
predictions = net.apply(params, batch)
return jnp.mean(jnp.argmax(predictions, axis=-1) == batch["label"])
@jax.jit
def get_updates(
params: hk.Params,
opt_state: optax.OptState,
batch: Batch,
) -> Tuple[hk.Params, optax.OptState]:
"""Learning rule (stochastic gradient descent)."""
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
return updates, opt_state
# We maintain avg_params, the exponential moving average of the "live" params.
# avg_params is used only for evaluation (cf. https://doi.org/10.1137/0330046)
@jax.jit
def ema_update(params, avg_params):
return optax.incremental_update(params, avg_params, step_size=0.001)
# Make datasets.
train = load_dataset("train", is_training=True, batch_size=1000)
train_eval = load_dataset("train", is_training=False, batch_size=10000)
test_eval = load_dataset("test", is_training=False, batch_size=10000)
# Implemenation note: It is possible to avoid pruned_params and just use
# a single params which progressively gets pruned. The updates also don't
# need to masked in such an implementation. The current implementation
# attempts to mimic the way the current TF implementation which allows for
# previously inactivated connections to become active again if active values
# drop below their value.
# Initialize network and optimiser; note we draw an input to get shapes.
pruned_params = params = avg_params = net.init(
jax.random.PRNGKey(42), next(train))
masks = update_mask(params, 0., module_sparsity)
opt_state = opt.init(params)
# Train/eval loop.
for step in range(10001):
if step % 1000 == 0:
# Periodically evaluate classification accuracy on train & test sets.
avg_params = apply_mask(avg_params, masks, module_sparsity)
train_accuracy = accuracy(avg_params, next(train_eval))
test_accuracy = accuracy(avg_params, next(test_eval))
total_params, total_nnz, per_layer_sparsities = get_sparsity(avg_params)
train_accuracy, test_accuracy, total_nnz, per_layer_sparsities = (
jax.device_get(
(train_accuracy, test_accuracy, total_nnz, per_layer_sparsities)))
print(f"[Step {step}] Train / Test accuracy: "
f"{train_accuracy:.3f} / {test_accuracy:.3f}.")
print(f"Non-zero params / Total: {total_nnz} / {total_params}; "
f"Total Sparsity: {1. - total_nnz / total_params:.3f}")
# Do SGD on a batch of training examples.
pruned_params = apply_mask(params, masks, module_sparsity)
updates, opt_state = get_updates(pruned_params, opt_state, next(train))
# applying a straight-through estimator here (that is not masking
# the updates) leads to much worse performance.
updates = apply_mask(updates, masks, module_sparsity)
params = optax.apply_updates(params, updates)
# we start pruning at iteration 1000 and end at iteration 8000
progress = min(max((step - 1000.) / 8000., 0.), 1.)
if step % 200 == 0:
sparsity_fraction = zhugupta_func(progress)
masks = update_mask(params, sparsity_fraction, module_sparsity)
avg_params = ema_update(params, avg_params)
print(per_layer_sparsities)
if __name__ == "__main__":
app.run(main)
| [
"timwillhack@gmail.com"
] | timwillhack@gmail.com |
38c7b8ae0a1fe7c519e2cb5f2fca8b9894080414 | bcc00e164c3d20b3c0ac1099741a71491af0e302 | /.history/neotropical_datasetAPI_20191014144558.py | 7ff867bf62e30070273816b13537d6b29785d50f | [] | no_license | manasa151/Toshokan | cff2af75c480bd629b49ce39c17857b316102e45 | 192c7eaf8523e38fa5821affdec91eb60ae5b7ce | refs/heads/master | 2020-08-05T14:56:10.285024 | 2019-10-15T17:07:09 | 2019-10-15T17:07:09 | 212,586,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,422 | py | import csv
from os import makedirs
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
import time
from io import BytesIO
from PIL import _imaging
from PIL import Image
import requests
import datetime
import os
import time
from os.path import getsize, join
import imghdr
import os
from os.path import getsize, join
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from io import BytesIO
from PIL import Image
import requests
import datetime
import os
import time
from os.path import getsize, join
import imghdr
import os
from os.path import getsize, join
def search_bySpecies():
species = 'Semioptera wallacii'
with open('request_species.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
if species == row[2]:
# makedirs(f'testing/{row[1]}', exist_ok=True)
makedirs(f'{row[0]}/{row[1]}/{row[2]}', exist_ok=True)
# download_species_byOrder(row[0], row[1], row[2])
print(row)
def NEW_download_from_CSV():
with open('request_species.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
makedirs(f'{row[0]}/{row[1]}/{row[2]}', exist_ok=True)
download_species_byOrder(row[0], row[1], row[2], row[3])
time.sleep(10)
def download_species_byOrder(bird_family, bird_order, bird_species, tax_code):
# initate web driver
ebird_url = f'https://ebird.org/species/{tax_code}'
chromeDriver = 'C:\\Users\\jmentore\\Documents\\Selenium Chrome Driver\\chromedriver.exe'
driver = webdriver.Chrome(executable_path=chromeDriver)
driver.get(ebird_url)
driver.maximize_window()
time.sleep(3)
# Clicks the view all link
view_all = driver.find_element(
By.XPATH, '/html/body/div/div[7]/div/div/div[2]/div[1]/a')
time.sleep(5)
view_all.click()
ids = driver.find_elements_by_tag_name('img')
sci_name = bird_species
family = bird_family
order = bird_order
ebird_counter = 0
file_ext = '.jpg'
show_more = driver.find_element_by_id('show_more')
while show_more.is_displayed():
try:
for ii in ids:
download_link = ii.get_attribute('src')
r = requests.get(download_link)
img = Image.open(BytesIO(r.content))
ebird_counter = ebird_counter + 1
img.save(
f'{family}/{order}/{sci_name}/{sci_name}-{ebird_counter}{file_ext}')
time.sleep(5)
print(download_link)
time.sleep(5)
driver.find_element_by_xpath('//*[@id="show_more"]').click()
except Exception as e:
messages.append(e)
time.sleep(1)
if not show_more.is_displayed():
print(f'Total url extracted: {ebird_counter}')
driver.quit()
def post_safe(url, params):
done = False
tries_left = 3
messages = []
while tries_left and not done:
tries_left -= 1
try:
response = requests.post(url, data=params)
done = True
except Exception as e:
messages.append(e)
time.sleep(1)
if not done:
output = "%s\n" % (datetime.now().strftime('%Y-%m-%d %H:%M'),)
output += "requests() failed 3 times:\n"
for m in messages:
output += m+"\n"
print(output)
return done
def test(tax_code):
ebird_url = f'https://ebird.org/species/{tax_code}'
chromeDriver = 'C:\\Users\\jmentore\\Documents\\Selenium Chrome Driver\\chromedriver.exe'
driver = webdriver.Chrome(executable_path=chromeDriver)
driver.get(ebird_url)
driver.maximize_window()
time.sleep(3)
# Clicks the view all link
view_all = driver.find_element(
By.XPATH, '/html/body/div/div[7]/div/div/div[2]/div[1]/a')
time.sleep(5)
view_all.click()
NEW_download_from_CSV()
# search_bySpecies()
test('walsta2')
# search_byTaxcode('zimant1')
| [
"cornerstoneconnections@gmail.com"
] | cornerstoneconnections@gmail.com |
b842f83dd100eea2e938b1fbf74a35f90ec6129e | c3ceffabcbb5ddfd47921661744f998a90c36bc9 | /lib/ouestjob.py | 17b2925862d7368f53e541889db3be284d98697c | [] | no_license | elominp/internship_offers_search | b1595464603876b155ce40e456dedbfe1fec30d1 | 620aa9c3c200dfaca5859f9cd5e623354e538b05 | refs/heads/master | 2021-04-28T08:15:47.191804 | 2016-08-25T10:55:53 | 2016-08-25T10:55:53 | 122,244,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py | #!/bin/env python3
# -*- coding: utf-8 -*-
import http.client
import html.parser
import copy
import json
class OuestJobParser(html.parser.HTMLParser):
def __init__(self, *kwargs):
self.offer_entries = []
self.offer_entry = None
self.data_handler = None
self.starttag_handlers = {"section": self.handle_startsection,
"a": self.handle_starta,
"strong": self.handle_startstrong,
"span": self.handle_startspan}
self.next_starthandler = None
self.handle_title_text = False
super().__init__()
def handle_starttag(self, tag, attrs):
if tag in self.starttag_handlers:
self.starttag_handlers[tag](dict(attrs))
elif self.next_starthandler is not None:
self.next_starthandler(tag, dict(attrs))
self.next_starthandler = None
def handle_startsection(self, attrs):
if "class" in attrs and \
attrs["class"].split()[0] == "annonce":
self.offer_entry = {"summary": ""}
self.offer_entries.append(self.offer_entry)
def handle_starta(self, attrs):
if "class" in attrs:
classes = attrs["class"].split()
if len(classes) > 0 and classes[0] == "lien-annonce":
if "href" in attrs:
self.offer_entry["link"] = "http://www.ouestjob.com" + \
attrs["href"]
self.handle_title_text = True
def handle_startstrong(self, attrs):
if self.handle_title_text is True:
self.data_handler = self.handle_title
self.handle_title_text = False
def handle_startspan(self, attrs):
if "itemprop" in attrs and attrs["itemprop"] == "name":
self.data_handler = self.handle_organisation
def handle_data(self, data):
if self.data_handler is not None:
self.data_handler(data)
self.data_handler = None
def handle_title(self, data):
self.offer_entry["title"] = data
def handle_organisation(self, data):
self.offer_entry["organisation"] = data
def parse_ouestjob_offers():
ouestjob_connection = http.client.HTTPConnection("www.ouestjob.com")
ouestjob_connection.connect()
ouestjob_connection.request("GET",
"/emplois/recherche.html?l=Rennes+35000&f=Informatique_dev_hard&f=Informatique_dev&f=Informatique_syst_info&f=Informatique_syst_reseaux&c=Stage")
ouestjob_response = ouestjob_connection.getresponse()
parser = OuestJobParser()
parser.feed(ouestjob_response.read().decode("utf-8"))
ouestjob_connection.close()
return copy.deepcopy(parser.offer_entries)
def ouestjob_offers_to_json():
return json.dumps(parse_ouestjob_offers())
if __name__ == "__main__":
print(ouestjob_offers_to_json().encode("utf-8"))
| [
"pirou_g@epitech.eu"
] | pirou_g@epitech.eu |
36d1358c9061802c0181d07fff98dcc46196842a | 4b6f0adb1748d5192d29839fe8274b57adcaa480 | /gec/word_correct_model/data_utility.py | 53d7fabfed3ea44283f6df9a45a00aee7b92dc5c | [] | no_license | IvyTang/code | 4a03aca05d55caaf434d899b8b2cb2771987ed70 | d1926677bf78bf964abca04c3fa8c40c5e18d288 | refs/heads/master | 2020-04-11T11:31:30.518413 | 2018-12-14T07:50:21 | 2018-12-14T07:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,662 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
class DataUtility:
def __init__(self, vocab_file_in_words=None, vocab_file_in_letters=None, vocab_file_out=None):
self.start_str = "<start>"
self.unk_str = "<unk>"
self.num_str = "<num>"
self.pun_str = "<pun>"
self.pad_id = 0
if vocab_file_in_words and vocab_file_in_letters and vocab_file_out:
self.id2token_in_words, self.id2token_in_letters, self.id2token_out = {}, {}, {}
self.token2id_in_words, self.token2id_in_letters, self.token2id_out = {}, {}, {}
with open(vocab_file_in_words, mode="r") as f:
for line in f:
token, id = line.strip().split("##")
id = int(id)
self.id2token_in_words[id] = token
self.token2id_in_words[token] = id
print ("in words vocabulary size =", str(len(self.token2id_in_words)))
self.in_words_count = len(self.token2id_in_words)
with open(vocab_file_in_letters, mode="r") as f:
for line in f:
token, id = line.strip().split("##")
id = int(id)
self.id2token_in_letters[id] = token
self.token2id_in_letters[token] = id
print ("in letters vocabulary size =", str(len(self.token2id_in_letters)))
self.start_id = self.token2id_in_letters[self.start_str]
print("in vocabulary size =", str(len(self.id2token_in_words) + len(self.id2token_in_letters)))
self.in_letters_count = len(self.token2id_in_letters)
with open(vocab_file_out, mode="r") as f:
for line in f:
token, id = line.strip().split("##")
id = int(id)
self.id2token_out[id] = token
self.token2id_out[token] = id
self.out_words_count = len(self.token2id_out)
print("out vocabulary size =", str(len(self.token2id_out)))
def word2id(self, word):
if re.match("^[a-zA-Z]$", word) or (word in self.token2id_in_words):
word_out = word
else:
if re.match("^[+-]*[0-9]+.*[0-9]*$", word):
word_out = self.num_str
else:
if re.match("^[^a-zA-Z0-9']*$", word):
word_out = self.pun_str
else:
word_out = self.unk_str
rid = self.token2id_in_words.get(word_out, -1)
if rid == -1:
return self.token2id_in_words[self.unk_str]
return rid
def words2ids(self, words):
return [self.word2id(word) for word in words if len(word) > 0]
def letters2ids(self, words):
return [[self.start_id] + [self.token2id_in_letters.get(letter.lower(), self.token2id_in_letters[self.unk_str])
for letter in letter_split if len(letter) > 0][:19] +
[self.pad_id] * (19 - len(letter_split)) for letter_split in words]
def outword2id(self, outword):
return self.token2id_out.get(outword, self.token2id_out[self.unk_str])
def ids2outwords(self, ids_out):
return [self.id2token_out.get(id, self.unk_str) for id in ids_out]
def ids2inwords(self, ids_in):
return [self.id2token_in_words.get(int(id), self.unk_str) for id in ids_in]
def data2ids_line(self, data_line):
data_line_split = re.split("\\|#\\|", data_line)
letters_line = data_line_split[0].replace(" ","").split("\t")
raw_words_line = data_line_split[1].strip().split("\t")
words_line = []
for i in range(len(raw_words_line)):
# if raw_words_line[i].lower() != letters_line[i]:
# words_line.append(letters_line[i])
# else:
words_line.append(raw_words_line[i])
words_ids = self.words2ids(words_line)
letters_ids = self.letters2ids(letters_line)
words_num = len(words_line)
letters_num = [len(letter) for letter in letters_line]
return raw_words_line, words_line, letters_line, words_ids, letters_ids, words_num, letters_num
def sentence2ids(self, sentence):
words_array = re.split('\\s+', sentence)
words_num = len(words_array)
letters_num = [len(letters) for letters in words_array]
words_ids = self.words2ids(words_array)
letters_ids = self.letters2ids(words_array)
return words_ids, letters_ids, words_num, letters_num
| [
"bruce.lee@kikatech.com"
] | bruce.lee@kikatech.com |
77c9a4aa2fe8cce936dbee351e0ce51904c6acdd | 3d6e9adecff041d49fe1f5174eb55b365508b324 | /State_Recognition_Code/old code/State_Checker.py | b53da8dfe291d49f33a5b68e89c9ff482ca311bc | [] | no_license | SebCurrie/Graph_State_Recognition | 0d3a688dd0f8002d837f55c4453f4fe98140ce78 | 2047db10936abcc86156cde8a368fe9fb555307c | refs/heads/master | 2022-12-04T09:15:44.350769 | 2020-08-26T23:42:00 | 2020-08-26T23:42:00 | 278,402,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | import numpy as np
import itertools
import copy
import Graph_Printers as gp
import math
#Two qubit matrix definitions
hadamard=np.array([[1,1],[1,-1]])
identity=np.array([[1,0],[0,1]])
Z=np.array([[1,0],[0,-1]])
X=np.array([[0,1],[1,0]])
n=3
input_coeffs=[1,1,1,-1,-1,-1,-1,-1]
#This is all possible combinations of bit strings
#useful later where we need index positions of states with one 1, two 1's etc
input_strings=["".join(seq) for seq in itertools.product("01", repeat=3)]
print(input_strings)
if len(input_coeffs)!=len(input_strings):
print('WARNING: Coefficient array and String array have different lengths')
#Need to write a function that tests whether REW under hadamards
#This function needs to test all possible combinations of Identity tensor hadamard
# III HII IHI ....HHH etc
def is_state_LEQ_REW(state):
no_zero_coeffs=state.count(0)
checks=itertools.product([hadamard, identity], repeat=n)
checks=[list(p) for p in checks]
for i in range(2**n):
temp=checks[i]
if no_zero_coeffs==0:
REW_TEST(state)
print('input state:',input_coeffs,'is already a REW state')
return input_coeffs,_
else:
for k in range(n-1):
temp[0]=np.kron(temp[0],temp[1])
del(temp[1])
LU_State=np.matmul(temp[0],input_coeffs)
if REW_TEST(LU_State):
print('State',input_coeffs,' is LU to the REW state',LU_State,' under the action of \n',temp[0],'\n')
return LU_State, temp[0]
def REW_TEST(state):
for i in range (len(state)):
if abs(state[0])!=abs(state[i]):
REW=False
break
else:
REW=True
continue
return REW
#This is probably far from the best implementation of this
#Trying numericall rather than symbolically reduces to the problem of writing a general CZ for arbritray targets
#Needs EDGE TESTING
#Takes the coefficients and strings of a REW state,
#and returns the operations to take you to the associated hypergraph
def Graph_From_REW(coeffs,strings,n):
ops=[]
for t in range(n): #First we are going to check for terms with one 1, then 2 ....
for k in range(len(coeffs)): #Loop through every element of our bit string array
if coeffs[k]<0 and strings[k].count('1')==t+1: #If coefficient is negative, and the string has t 1's in it
indices = [i for i, x in enumerate(strings[k]) if x == "1"] #get the position of those 1's within the string. ie -1100 has ones at indexes 0,1
ops.append(indices)
for z in range(len(coeffs)): #Now we need to apply a negative to every element with this many 1's at those indices
to_be_tested_indices=[i for i, x in enumerate(strings[z]) if x == "1"] #finds where the element has 1's
if set(indices).issubset(set(to_be_tested_indices)): #if the element has 1's in the same place we must apply a negative
coeffs[z]=-coeffs[z]
print('The edges in the graph are:',ops,'\n')
return ops
state_coeffs,LU=is_state_LEQ_REW(input_coeffs)
edges=Graph_From_REW(input_coeffs, input_strings,n)
gp.get_hypergraph_from_edges(edges,n) | [
"noreply@github.com"
] | noreply@github.com |
0f484847c5c71e263b71fb80674cba95ca9d9be7 | 35ffed61e1c4507a5cda24a8f6675fc10665d260 | /01_1_wordvec_start.py | edab20ef7fedf07f079d8cca581535faeaebeebe | [] | no_license | fanrong33/nlp | 1df0058a79361168593c892d7d8f550d57c916fc | ad239956e82ee38dd124e706bab12a8425516a77 | refs/heads/master | 2021-05-12T07:49:09.567602 | 2018-01-15T13:33:12 | 2018-01-15T13:33:12 | 117,259,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | # encoding: utf-8
# word2vec的模型是基于神经网络来训练词向量模型;
# word2vec的主要的应用还是自然语言的处理,通过训练出来的词向量,可以进行聚类等处理,或者作为其他深入学习的输入。
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
# 1、加载句子集合
sentences = [['this', 'is', 'the', 'first', 'sentence', 'for', 'word2vec'],
['this', 'is', 'the', 'second', 'sentence'],
['yet', 'another', 'sentence'],
['one', 'more', 'sentence'],
['and', 'the', 'final', 'sentence']]
# 2、训练模型
model = Word2Vec(sentences, min_count=1)
# PS:sentences 有不同的处理类可以使用,方便从文件加载处理为句子集合
print(model)
''' Word2Vec(vocab=14, size=100, alpha=0.025) '''
# 总结词汇
words = list(model.wv.vocab)
print(words)
''' ['this', 'is', 'the', 'first', 'sentence', 'for', 'word2vec', 'second', 'yet', 'another', 'one', 'more', 'and', 'final'] '''
# 查看某个词的词向量
print(model['first'])
'''
[-0.00250688 -0.00162008 0.00250901 -0.00097651 -0.0048413 -0.00068062
0.00435694 -0.00375766 -0.00431498 0.0019443 -0.00128378 -0.00343237
...
-0.00034512 0.00334434 0.00412283 0.00035618 -0.00126278 -0.00482794
0.00386906 -0.00355957 -0.00194974 -0.00251286]
'''
print(len(model['first']))
''' 100 词向量为100维度 '''
# 所以,则为所有的词的向量集合,理解word2vec的结构!
print(model[model.wv.vocab])
'''
[[ 3.35454009e-03 -2.96757789e-03 8.95642443e-04 ..., 4.16836003e-03
-3.26405023e-03 -1.91481831e-03]
...,
[ 7.19302261e-05 1.70022575e-03 3.59526509e-03 ..., 1.11010019e-03
3.70053225e-03 -3.61868995e-03]]
'''
# 3、持久化模型
model.save('sample.en.text.model')
model.wv.save_word2vec_format('sample.en.text.vector', binary=True)
'''
save() 函数保存的完整的模型?额
wv.save_word2vec_format() 函数保存的其实就是词汇和对应向量,不过会丢失tree信息,所以无法进行增量训练
'''
# 4、加载持久化的模型,需与上面持久化的模型对应,此为方法一
new_model = Word2Vec.load('sample.en.text.model')
print(new_model)
# 4、加载持久化模型,方法二
from gensim.models import KeyedVectors
filename = 'sample.en.text.vector'
new_model = KeyedVectors.load_word2vec_format(filename, binary=True)
# 参考:
# [word2vec学习小记](https://www.jianshu.com/p/418f27df3968)
# [How to Develop Word Embeddings in Python with Gensim](https://machinelearningmastery.com/develop-word-embeddings-python-gensim/)
# [gensim.model.word2vec API](https://radimrehurek.com/gensim/models/word2vec.html)
| [
"fanrong33@qq.com"
] | fanrong33@qq.com |
10d919ed0109a6401f4dd3ac01502930a7d4097e | 80383bd5f39fd7eacff50f4b0fcc3c5e7c8329e0 | /reddwarf/tests/api/instances_delete.py | 9bef213d56cfc68c4ac1598aaffd3bb0d1ab7020 | [] | no_license | imsplitbit/reddwarf | 646409a2365459515b37f70445c0acb22610898d | 2f50d9a12a390c6016aad6a612a14bd6c34b66fd | refs/heads/master | 2020-05-19T15:45:26.733102 | 2013-01-08T21:37:10 | 2013-01-08T21:37:10 | 2,270,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,257 | py | import time
from proboscis import after_class
from proboscis import before_class
from proboscis import test
from proboscis.asserts import *
from proboscis.decorators import time_out
from reddwarfclient import exceptions
from reddwarf.tests.util import create_dbaas_client
from reddwarf.tests.util import poll_until
from reddwarf.tests.util import test_config
from reddwarf.tests.util.users import Requirements
class TestBase(object):
def set_up(self):
reqs = Requirements(is_admin=True)
self.user = test_config.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
def create_instance(self, name, size=1):
result = self.dbaas.instances.create(name, 1, {'size': size}, [], [])
return result.id
def wait_for_instance_status(self, instance_id, status="ACTIVE"):
poll_until(lambda: self.dbaas.instances.get(instance_id),
lambda instance: instance.status == status,
time_out=10)
def wait_for_instance_task_status(self, instance_id, description):
poll_until(lambda: self.dbaas.management.show(instance_id),
lambda instance: instance.task_description == description,
time_out=10)
def is_instance_deleted(self, instance_id):
while True:
try:
instance = self.dbaas.instances.get(instance_id)
except exceptions.NotFound:
return True
time.sleep(.5)
def get_task_info(self, instance_id):
instance = self.dbaas.management.show(instance_id)
return instance.status, instance.task_description
def delete_instance(self, instance_id, assert_deleted=True):
instance = self.dbaas.instances.get(instance_id)
instance.delete()
if assert_deleted:
assert_true(self.is_instance_deleted(instance_id))
def delete_errored_instance(self, instance_id):
self.wait_for_instance_status(instance_id, 'ERROR')
status, desc = self.get_task_info(instance_id)
assert_equal(status, "ERROR")
self.delete_instance(instance_id)
@test(runs_after_groups=["services.initialize"],
groups=['dbaas.api.instances.delete'])
class ErroredInstanceDelete(TestBase):
"""
Test that an instance in an ERROR state is actually deleted when delete
is called.
"""
@before_class
def set_up(self):
"""Create some flawed instances."""
super(ErroredInstanceDelete, self).set_up()
# Create an instance that fails during server prov.
self.server_error = self.create_instance('test_SERVER_ERROR')
# Create an instance that fails during volume prov.
self.volume_error = self.create_instance('test_VOLUME_ERROR', size=9)
# Create an instance that fails during DNS prov.
#self.dns_error = self.create_instance('test_DNS_ERROR')
# Create an instance that fails while it's been deleted the first time.
self.delete_error = self.create_instance('test_ERROR_ON_DELETE')
@test
@time_out(20)
def delete_server_error(self):
self.delete_errored_instance(self.server_error)
@test
@time_out(20)
def delete_volume_error(self):
self.delete_errored_instance(self.volume_error)
@test(enabled=False)
@time_out(20)
def delete_dns_error(self):
self.delete_errored_instance(self.dns_error)
@test
@time_out(20)
def delete_error_on_delete_instance(self):
id = self.delete_error
self.wait_for_instance_status(id, 'ACTIVE')
self.wait_for_instance_task_status(id, 'No tasks for the instance.')
instance = self.dbaas.management.show(id)
assert_equal(instance.status, "ACTIVE")
assert_equal(instance.task_description, 'No tasks for the instance.')
# Try to delete the instance. This fails the first time due to how
# the test fake is setup.
self.delete_instance(id, assert_deleted=False)
instance = self.dbaas.management.show(id)
assert_equal(instance.status, "SHUTDOWN")
assert_equal(instance.task_description, "Deleting the instance.")
# Try a second time. This will succeed.
self.delete_instance(id)
| [
"tim.simpson@rackspace.com"
] | tim.simpson@rackspace.com |
fc9c235e3d4f8607eaf02246e0cb7385120abb75 | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /18/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4500_R_0-7.py | ae645a54658b7cd536c87077e75805da8681f2d2 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4500_R0-7_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Autumn18_V19_MC_L1FastJet_AK4PFchs.txt','Autumn18_V19_MC_L2Relative_AK4PFchs.txt','Autumn18_V19_MC_L3Absolute_AK4PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFchs.txt','Autumn18_V19_MC_L2Relative_AK8PFchs.txt','Autumn18_V19_MC_L3Absolute_AK8PFchs.txt','Autumn18_V19_MC_L1FastJet_AK8PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK8PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK8PFPuppi.txt','Autumn18_V19_MC_L1FastJet_AK4PFPuppi.txt','Autumn18_V19_MC_L2Relative_AK4PFPuppi.txt','Autumn18_V19_MC_L3Absolute_AK4PFPuppi.txt' ]
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4500-R0-7_TuneCP5_13TeV-madgraph/RunIIAutumn18MiniAOD-102X_upgrade2018_realistic_v15-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase='/store/group/phys_b2g/chench/cc/'#chench/'# = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4500_R0-7_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"c.chen@cern.ch"
] | c.chen@cern.ch |
65fe59176ecaeee3ac73de2fdf870260bfd2994a | 882b7b01a9d5e5d3db5120645d8de4a8be713813 | /P0101_data_exploration.py | e4e66a9b68ec47e5d6860e72846714221c7c59e3 | [] | no_license | zhangzhanluo/data_set_for_rnn | 14b37953f7c9812b51b5d05ac81cf9baae1274f7 | 64d1fb69bb361f16e58166ef04b886ab711f1487 | refs/heads/master | 2022-12-15T01:50:15.359548 | 2020-08-19T03:24:41 | 2020-08-19T03:24:41 | 288,621,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | import pandas as pd
from matplotlib import pyplot as plt
file_path = 'Raw Data/Dataset_Photovoltaic_power_prediction_PV_data_all_2020_08_18.csv'
raw_data = pd.read_csv(file_path, header=0, index_col=0, parse_dates=[0])
# 验证光伏电池功率=电压×电流
raw_data['PowerCal'] = raw_data.Current_V * raw_data.Voltage
power_data = raw_data[['TruePower', 'PowerCal']].copy()
power_data['20180301':'20180303'].plot()
plt.show()
# 结论:有一定的出入,但基本正确
# 查看光伏电池功率的周期性
raw_data.TruePower['20180801':'20180807'].plot()
plt.show()
# 结论:呈现明显的周期性
# 分析其他先关信息,例如功率与温度、辐照强度的关系,与历史数据的关系...
| [
"zhangzhanluo@outlook.com"
] | zhangzhanluo@outlook.com |
35c2b8691c4f2a9410a8d0e1802611fc977daf88 | f313176349f441bb8f72759d0f84f320d274d5d7 | /xjerab13/blogs_download2/web_rss_old/mako/_ast_util.py | 8eb22158d52de316cc3de5f6b2e55fb40b621200 | [] | no_license | iotrusina/M-Eco-WP3-package | 641876974268cda458bf096e702b0bfe18596d67 | 83d76dfd2afdae93856e094879464327abf1faa0 | refs/heads/master | 2021-01-01T05:35:57.200860 | 2012-10-03T19:57:32 | 2012-10-03T19:57:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,454 | py | # mako/_ast_util.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
BOOLOP_SYMBOLS = {
And: 'and',
Or: 'or'
}
BINOP_SYMBOLS = {
Add: '+',
Sub: '-',
Mult: '*',
Div: '/',
FloorDiv: '//',
Mod: '%',
LShift: '<<',
RShift: '>>',
BitOr: '|',
BitAnd: '&',
BitXor: '^'
}
CMPOP_SYMBOLS = {
Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'
}
UNARYOP_SYMBOLS = {
Invert: '~',
Not: 'not',
UAdd: '+',
USub: '-'
}
ALL_SYMBOLS = {}
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
ALL_SYMBOLS.update(BINOP_SYMBOLS)
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
def parse(expr, filename='<unknown>', mode='exec'):
"""Parse an expression into an AST node."""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def to_source(node, indent_with=' ' * 4):
"""
This function can convert a node tree back into python sourcecode. This
is useful for debugging purposes, especially if you're dealing with custom
asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
"""
generator = SourceGenerator(indent_with)
generator.visit(node)
return ''.join(generator.result)
def dump(node):
"""
A very verbose representation of the node passed. This is useful for
debugging purposes.
"""
def _format(node):
if isinstance(node, AST):
return '%s(%s)' % (node.__class__.__name__,
', '.join('%s=%s' % (a, _format(b))
for a, b in iter_fields(node)))
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy the source location hint (`lineno` and `col_offset`) from the
old to the new node if possible and return the new one.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
Some nodes require a line number and the column offset. Without that
information the compiler will abort the compilation. Because it can be
a dull task to add appropriate line numbers and column offsets when
adding new nodes this function can help. It copies the line number and
column offset of the parent node to the child nodes without this
information.
Unlike `copy_location` this works recursive and won't touch nodes that
already have a location information.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line numbers of all nodes by `n` if they have line number
attributes. This is useful to "move code" to a different location in a
file.
"""
for node in zip((node,), walk(node)):
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
def iter_fields(node):
"""Iterate over all fields of a node, only yielding existing fields."""
# CPython 2.5 compat
if not hasattr(node, '_fields') or not node._fields:
return
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def get_fields(node):
"""Like `iter_fiels` but returns a dict."""
return dict(iter_fields(node))
def iter_child_nodes(node):
"""Iterate over all child nodes or a node."""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_child_nodes(node):
"""Like `iter_child_nodes` but returns a list."""
return list(iter_child_nodes(node))
def get_compile_mode(node):
"""
Get the mode for `compile` of a given node. If the node is not a `mod`
node (`Expression`, `Module` etc.) a `TypeError` is thrown.
"""
if not isinstance(node, mod):
raise TypeError('expected mod node, got %r' % node.__class__.__name__)
return {
Expression: 'eval',
Interactive: 'single'
}.get(node.__class__, 'expr')
def get_docstring(node):
"""
Return the docstring for the given node or `None` if no docstring can be
found. If the node provided does not accept docstrings a `TypeError`
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Str):
return node.body[0].s
def walk(node):
"""
Iterate over all nodes. This is useful if you only want to modify nodes in
place and don't care about the context or the order the nodes are returned.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
Walks the abstract syntax tree and call visitor functions for every node
found. The visitor functions may return values which will be forwarded
by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def get_visitor(self, node):
"""
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node)
return self.generic_visit(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
Here an example transformer that rewrites all `foo` to `data['foo']`::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes
you must either transform the child nodes yourself or call the generic
visit function for the node first.
Nodes that were part of a collection of statements (that applies to
all statement nodes) may also return a list of nodes rather than just
a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
class SourceGenerator(NodeVisitor):
"""
This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with):
self.result = []
self.indent_with = indent_with
self.indentation = 0
self.new_lines = 0
def write(self, x):
if self.new_lines:
if self.result:
self.result.append('\n' * self.new_lines)
self.result.append(self.indent_with * self.indentation)
self.new_lines = 0
self.result.append(x)
def newline(self, n=1):
self.new_lines = max(self.new_lines, n)
def body(self, statements):
self.new_line = True
self.indentation += 1
for stmt in statements:
self.visit(stmt)
self.indentation -= 1
def body_or_else(self, node):
self.body(node.body)
if node.orelse:
self.newline()
self.write('else:')
self.body(node.orelse)
def signature(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
padding = [None] * (len(node.args) - len(node.defaults))
for arg, default in zip(node.args, padding + node.defaults):
write_comma()
self.visit(arg)
if default is not None:
self.write('=')
self.visit(default)
if node.vararg is not None:
write_comma()
self.write('*' + node.vararg)
if node.kwarg is not None:
write_comma()
self.write('**' + node.kwarg)
def decorators(self, node):
for decorator in node.decorator_list:
self.newline()
self.write('@')
self.visit(decorator)
# Statements
def visit_Assign(self, node):
self.newline()
for idx, target in enumerate(node.targets):
if idx:
self.write(', ')
self.visit(target)
self.write(' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
self.newline()
self.visit(node.target)
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
self.visit(node.value)
def visit_ImportFrom(self, node):
self.newline()
self.write('from %s%s import ' % ('.' * node.level, node.module))
for idx, item in enumerate(node.names):
if idx:
self.write(', ')
self.write(item)
def visit_Import(self, node):
self.newline()
for item in node.names:
self.write('import ')
self.visit(item)
def visit_Expr(self, node):
self.newline()
self.generic_visit(node)
def visit_FunctionDef(self, node):
self.newline(n=2)
self.decorators(node)
self.newline()
self.write('def %s(' % node.name)
self.signature(node.args)
self.write('):')
self.body(node.body)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.newline(n=3)
self.decorators(node)
self.newline()
self.write('class %s' % node.name)
for base in node.bases:
paren_or_comma()
self.visit(base)
# XXX: the if here is used to keep this module compatible
# with python 2.6.
if hasattr(node, 'keywords'):
for keyword in node.keywords:
paren_or_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
paren_or_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
paren_or_comma()
self.write('**')
self.visit(node.kwargs)
self.write(have_args and '):' or ':')
self.body(node.body)
def visit_If(self, node):
self.newline()
self.write('if ')
self.visit(node.test)
self.write(':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], If):
node = else_[0]
self.newline()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.body(node.body)
else:
self.newline()
self.write('else:')
self.body(else_)
break
def visit_For(self, node):
self.newline()
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.body_or_else(node)
def visit_While(self, node):
self.newline()
self.write('while ')
self.visit(node.test)
self.write(':')
self.body_or_else(node)
def visit_With(self, node):
self.newline()
self.write('with ')
self.visit(node.context_expr)
if node.optional_vars is not None:
self.write(' as ')
self.visit(node.optional_vars)
self.write(':')
self.body(node.body)
def visit_Pass(self, node):
self.newline()
self.write('pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.newline()
self.write('print ')
want_comma = False
if node.dest is not None:
self.write(' >> ')
self.visit(node.dest)
want_comma = True
for value in node.values:
if want_comma:
self.write(', ')
self.visit(value)
want_comma = True
if not node.nl:
self.write(',')
def visit_Delete(self, node):
self.newline()
self.write('del ')
for idx, target in enumerate(node):
if idx:
self.write(', ')
self.visit(target)
def visit_TryExcept(self, node):
self.newline()
self.write('try:')
self.body(node.body)
for handler in node.handlers:
self.visit(handler)
def visit_TryFinally(self, node):
self.newline()
self.write('try:')
self.body(node.body)
self.newline()
self.write('finally:')
self.body(node.finalbody)
def visit_Global(self, node):
self.newline()
self.write('global ' + ', '.join(node.names))
def visit_Nonlocal(self, node):
self.newline()
self.write('nonlocal ' + ', '.join(node.names))
def visit_Return(self, node):
self.newline()
self.write('return ')
self.visit(node.value)
def visit_Break(self, node):
self.newline()
self.write('break')
def visit_Continue(self, node):
self.newline()
self.write('continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.newline()
self.write('raise')
if hasattr(node, 'exc') and node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
elif hasattr(node, 'type') and node.type is not None:
self.visit(node.type)
if node.inst is not None:
self.write(', ')
self.visit(node.inst)
if node.tback is not None:
self.write(', ')
self.visit(node.tback)
# Expressions
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.' + node.attr)
def visit_Call(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
self.visit(node.func)
self.write('(')
for arg in node.args:
write_comma()
self.visit(arg)
for keyword in node.keywords:
write_comma()
self.write(keyword.arg + '=')
self.visit(keyword.value)
if node.starargs is not None:
write_comma()
self.write('*')
self.visit(node.starargs)
if node.kwargs is not None:
write_comma()
self.write('**')
self.visit(node.kwargs)
self.write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_Str(self, node):
self.write(repr(node.s))
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node):
self.write(repr(node.n))
def visit_Tuple(self, node):
self.write('(')
idx = -1
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(idx and ')' or ',)')
def sequence_visit(left, right):
def visit(self, node):
self.write(left)
for idx, item in enumerate(node.elts):
if idx:
self.write(', ')
self.visit(item)
self.write(right)
return visit
visit_List = sequence_visit('[', ']')
visit_Set = sequence_visit('{', '}')
del sequence_visit
def visit_Dict(self, node):
self.write('{')
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
if idx:
self.write(', ')
self.visit(key)
self.write(': ')
self.visit(value)
self.write('}')
def visit_BinOp(self, node):
self.write('(')
self.visit(node.left)
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
self.visit(node.right)
self.write(')')
def visit_BoolOp(self, node):
self.write('(')
for idx, value in enumerate(node.values):
if idx:
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
self.visit(value)
self.write(')')
def visit_Compare(self, node):
self.write('(')
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(' %s ' % CMPOP_SYMBOLS[type(op)])
self.visit(right)
self.write(')')
def visit_UnaryOp(self, node):
self.write('(')
op = UNARYOP_SYMBOLS[type(node.op)]
self.write(op)
if op == 'not':
self.write(' ')
self.visit(node.operand)
self.write(')')
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Slice(self, node):
if node.lower is not None:
self.visit(node.lower)
self.write(':')
if node.upper is not None:
self.visit(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, Name) and node.step.id == 'None'):
self.visit(node.step)
def visit_ExtSlice(self, node):
for idx, item in node.dims:
if idx:
self.write(', ')
self.visit(item)
def visit_Yield(self, node):
self.write('yield ')
self.visit(node.value)
def visit_Lambda(self, node):
self.write('lambda ')
self.signature(node.args)
self.write(': ')
self.visit(node.body)
def visit_Ellipsis(self, node):
self.write('Ellipsis')
def generator_visit(left, right):
def visit(self, node):
self.write(left)
self.visit(node.elt)
for comprehension in node.generators:
self.visit(comprehension)
self.write(right)
return visit
visit_ListComp = generator_visit('[', ']')
visit_GeneratorExp = generator_visit('(', ')')
visit_SetComp = generator_visit('{', '}')
del generator_visit
def visit_DictComp(self, node):
self.write('{')
self.visit(node.key)
self.write(': ')
self.visit(node.value)
for comprehension in node.generators:
self.visit(comprehension)
self.write('}')
def visit_IfExp(self, node):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Starred(self, node):
self.write('*')
self.visit(node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
self.write('`')
self.visit(node.value)
self.write('`')
# Helper Nodes
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as ' + node.asname)
def visit_comprehension(self, node):
self.write(' for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
if node.ifs:
for if_ in node.ifs:
self.write(' if ')
self.visit(if_)
def visit_excepthandler(self, node):
self.newline()
self.write('except')
if node.type is not None:
self.write(' ')
self.visit(node.type)
if node.name is not None:
self.write(' as ')
self.visit(node.name)
self.write(':')
self.body(node.body)
| [
"otrusina@gmail.com"
] | otrusina@gmail.com |
139a215386fd73c93520824d0d8e1a4d7e908698 | 7dbbde919349fdc3651eff1a7be744aed25eea30 | /scripts/multiprocessing_example.py | 13f78ecf32de52baab2847baa7991b1bf9d173e0 | [] | no_license | adrn/scicoder-notebooks | 06ca10a12c4f89a5c2e4062c70b6e4eb3bc0b1b0 | 7c8a5850200c3fb78aca1c336af7ed47ad52c52a | refs/heads/master | 2021-03-12T21:37:39.597404 | 2013-07-08T04:37:23 | 2013-07-08T04:37:23 | 11,226,565 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # coding: utf-8
""" Demonstration of the built-in multiprocessing package """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import multiprocessing
# Define a 'task' or 'worker' function -- something function that you
# need to call over and over and over
def task(x):
return x**2
if __name__ == "__main__":
# a pool is like a magical box that knows how to execute things on
# multiple CPUs
pool = multiprocessing.Pool(processes=4)
# this will run the function task() on all values in range(10000)
result = pool.map(task, range(10000))
print(result) | [
"adrian.prw@gmail.com"
] | adrian.prw@gmail.com |
89c6d8623bc995463d87eec782ddc6b345abcf8e | f93ecd348d5464e0b87826ae4544d8af82a030e7 | /server/django/pages/urls.py | 569e13c343eb215410d7235057f4380842edb9d0 | [
"MIT"
] | permissive | wanyaworld/SearchThisBackend | 1fe5fb4400422ea8a408379c2de2d7bb0d0fa0aa | 30c4e6d688ffb2ac3fa7916a2a949b64d25aaca6 | refs/heads/main | 2023-02-23T00:17:28.334729 | 2021-01-28T15:58:21 | 2021-01-28T15:58:21 | 333,405,124 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.urls import path
from django.conf.urls import include, url
from .views import *
urlpatterns = [
path('', homePageView, name='home'),
path('document', documentPageView, name='document'),
]
| [
"6812skiii@gmail.com"
] | 6812skiii@gmail.com |
b8d77957b7c1d7078636cd405133292880642ca8 | 832920bf45ea5e759be24fe276496ff9310c3e43 | /Python/URI 1070.py | c5b337863224f5e679a184d7860dd8e4260eb9ee | [
"MIT"
] | permissive | carvalhopedro22/Programacao-URI-Online-Judge | a5e23bd708244b0de1b33b9617e72780d047c502 | 4a58141418de7589d7d81c3fffb41664efd2ffc6 | refs/heads/main | 2023-03-26T16:21:14.583253 | 2021-03-22T22:28:39 | 2021-03-22T22:28:39 | 302,116,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | num = int(input())
contador = 0
while(contador < 6):
if(num % 2 != 0):
print(num)
contador = contador + 1
num = num + 1 | [
"pedro.mundim@ufv.br"
] | pedro.mundim@ufv.br |
03bc75f30bd988e5219152210d4569a1242e179d | 32d30e05e646351b3459ed2711a84487ea8b2445 | /priorityqueue.py | 97db9e28e2903b881b72aa99c33f0d85c65de976 | [] | no_license | Silveryu/ekains | 71eaad8c287cd60e4c228342dfd44f624954a8f2 | beb4f31f1c2b224c543e549c6b4023be07c1500d | refs/heads/master | 2020-06-02T18:03:32.952111 | 2019-06-12T17:04:38 | 2019-06-12T17:04:38 | 191,258,804 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | import bisect
class PriorityQueue:
def __init__(self, f):
self.queue = []
self.f = f
def append(self, item ):
bisect.insort(self.queue, (self.f(item), item))
def __len__(self):
return len(self.queue)
def pop(self):
return self.queue.pop(0)[1]
def __contains__(self, item):
return any(item == pair[1] for pair in self.queue)
def __getitem__(self, key):
for _, item in self.queue:
if item == key:
return item
def __delitem__(self, key):
for i, (value, item) in enumerate(self.queue):
if item == key:
self.queue.pop(i)
| [
"p.m.s@ua.pt"
] | p.m.s@ua.pt |
2fbb5fdd65ad8826645ab7e7e699ebefa0f72824 | 22cd0e79f4dd999e40a1d6ff829f8ef4f8d9df9a | /samples/kuka_maze_planner/devel/lib/python2.7/dist-packages/rll_msgs/msg/_DefaultMoveIfaceActionResult.py | 8cdca9ad71548b62110f5d3b6c4fddf5b2278554 | [] | no_license | anupamkaul/robond | b3f9cefbf0205d3c1db14b9982a95f61644a49b1 | f5b1e4323c0bf0ccd7d78dbef8c9e0ddf1a85d17 | refs/heads/master | 2021-06-21T14:06:09.368041 | 2020-11-30T19:51:36 | 2020-11-30T19:51:36 | 146,023,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,018 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rll_msgs/DefaultMoveIfaceActionResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import rll_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class DefaultMoveIfaceActionResult(genpy.Message):
_md5sum = "1eb06eeff08fa7ea874431638cb52332"
_type = "rll_msgs/DefaultMoveIfaceActionResult"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
DefaultMoveIfaceResult result
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: rll_msgs/DefaultMoveIfaceResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
"""
__slots__ = ['header','status','result']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','rll_msgs/DefaultMoveIfaceResult']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(DefaultMoveIfaceActionResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = rll_msgs.msg.DefaultMoveIfaceResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"anupam.kaul@yahoo.com"
] | anupam.kaul@yahoo.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.