blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d91821e5a8a69cd511d74bc1715f78c9f263421 | 19a2e8444edba5dcf7405e665112359496f7eeab | /build/counter_node/catkin_generated/pkg.installspace.context.pc.py | fb6d507c2e278b74cf27338a4edf80f92a268518 | [] | no_license | okreng/ProjectCourseTasks | cd57c7c25fecd5198e57464d4354b1bb812e5f99 | d8b79149bb1c2457324651a8e259a5ed4c4fe537 | refs/heads/master | 2021-07-08T11:54:24.902722 | 2017-10-03T04:16:22 | 2017-10-03T04:16:22 | 105,580,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/okrengel/catkin_ws/install/include".split(';') if "/home/okrengel/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "counter_node"
PROJECT_SPACE_DIR = "/home/okrengel/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"okrengel46@gmail.com"
] | okrengel46@gmail.com |
4db4ead4d9e8fc01e987b14c56aca330364ea2b7 | 6920575dc95c6800fb8e8088719afa88931281b5 | /tests/validator/test_validating_a_motion.py | 8f6a6c4882be98bfdbdde320c887b6a363fa5e59 | [] | no_license | pipermerriam/ethereum-alarm-governance | 091de3a3f72c95e097a24bd05a1d7ebe4738a01b | 1b4a9a0355976f92e88396582a64fdfb50bbe858 | refs/heads/master | 2023-08-28T09:39:44.191755 | 2016-02-15T22:18:18 | 2016-02-15T22:18:18 | 47,724,489 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | import pytest
deploy_contracts = []
ONE_DAY = 24 * 60 * 60
@pytest.mark.parametrize(
"quorium,pass_percentage,debate_period,executor,expected",
(
(10000, 51, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10001, 51, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10000, 100, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(10000, 100, ONE_DAY + 1, "0xd3cda913deb6f67967b99d67acdfa1712c293601", True),
(9999, 100, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 50, ONE_DAY, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 51, ONE_DAY - 1, "0xd3cda913deb6f67967b99d67acdfa1712c293601", False),
(10000, 51, ONE_DAY, "0x0000000000000000000000000000000000000000", False),
)
)
def test_validation_with_acceptable_values(deploy_contract, contracts,
deploy_client, accounts,
get_log_data, deploy_coinbase,
Status, quorium, debate_period,
pass_percentage, executor,
expected):
validator = deploy_contract(contracts.Validator)
validator.setMinimumQuorum.s(10000)
validator.setMinimumPassPercentage.s(51)
validator.setMinimumDebatePeriod.s(ONE_DAY)
motion = deploy_contract(
contracts.Motion,
constructor_args=(deploy_coinbase,),
)
motion.configure.s(quorium, debate_period, pass_percentage, executor)
actual = validator.validate(motion._meta.address)
assert actual is expected
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
d968232fc5e52bb05f5ea5243f36ae7afc71561c | 67384b87b1f3932f989b559d832809f1292cc33c | /pythonquickstart/build/lib/bin/pythonquickstart.py | ddae685bda5098ce601ff39d2e0513d8fa641240 | [] | no_license | weiwongfaye/experiment_projects | e88f174faff3946e83e85464ef3ca14a4175e45f | af6036dc38121b9e3379a085caf72b67c36121a1 | refs/heads/master | 2020-04-05T23:17:23.714438 | 2017-08-01T08:41:50 | 2017-08-01T08:41:50 | 30,822,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | #!/usr/bin/env python2
from pythonquickstart import *
def main():
pass
if __name__ == "__main__":
main() | [
"weiwongfaye@gmail.com"
] | weiwongfaye@gmail.com |
a0c8241bd312fe171868b2198fe72f2c99ce62f1 | 71ee46f14a263159fdde9390c3f87eb744c0d035 | /core/admin.py | c83330a230888f85f6c8c0d6cf682ddc8d5a54a8 | [] | no_license | hoffmannchen/CourseSystem | 8a6aefdcf8fabafbff442ec9dcbcf13b69e71501 | c0bfeedc94c9c51c5e25714734408dbd1e5f8392 | refs/heads/master | 2023-07-25T05:27:32.433398 | 2021-08-26T23:38:54 | 2021-08-26T23:38:54 | 399,674,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | from interface import admin_interface
def register():
while True:
username = input("请输入用户名: ").strip()
password = input("请密码: ").strip()
re_password = input("请确认密码: ").strip()
if password == re_password:
admin_interface.admin_register_interface(username, password)
else:
print('两次密码不一致,请重新输入')
def login():
pass
def create_school():
pass
def create_course():
pass
def create_teacher():
pass
def admin_view():
func_dic = {'1': ['注册', register],
'2': ['登录', login],
'3': ['创建学校', create_school],
'4': ['创建课程(先选择学校)', create_course],
'5': ['创建讲师', create_teacher]}
while True:
print("=====管理员功能=====")
for i, v in func_dic.items():
print(f"{i}: {v[0]}")
print("===== end =====")
choice = input('请输入功能编号: ').strip()
if choice == 'q':
break
if choice not in func_dic:
print("请输入正确的编号!")
continue
else:
func_dic.get(choice)[1]()
| [
"hoffmannchen@gmail.com"
] | hoffmannchen@gmail.com |
bba328c66530bfcd80a42b528fe3afbcb6179a03 | 0b3d3e6473215d5400f2ce26f0d651bd86aca31a | /build/ros_controllers/diff_drive_controller/catkin_generated/pkg.develspace.context.pc.py | 57980708cca8e7ca4504ece70eb678093d7c2b33 | [] | no_license | DonJons42/Astek-Projet-Tondeuse | eb7e39d6b7d727fb5abeff48be74ac9b5507aa51 | ef545ac4a1dbb139766fa5cf2a9df5d1cae20f1b | refs/heads/master | 2020-04-20T01:18:55.908951 | 2019-01-31T15:07:34 | 2019-01-31T15:07:34 | 168,541,973 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/stagiaire019/astek_ws/devel/include;/home/stagiaire019/astek_ws/src/ros_controllers/diff_drive_controller/include".split(';') if "/home/stagiaire019/astek_ws/devel/include;/home/stagiaire019/astek_ws/src/ros_controllers/diff_drive_controller/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldiff_drive_controller".split(';') if "-ldiff_drive_controller" != "" else []
PROJECT_NAME = "diff_drive_controller"
PROJECT_SPACE_DIR = "/home/stagiaire019/astek_ws/devel"
PROJECT_VERSION = "0.14.2"
| [
"adsquall@gmail.com"
] | adsquall@gmail.com |
61653cbca41f2b6c3bc62c1719bfcf84ebf6fd36 | e8be0846b32edc67fd436fffa91d9888935f7104 | /common/communication.py | b0dee48da48dcbabafcf8d1285c3fc40c08a9f05 | [] | no_license | usnehal/split_models | a3de0a0d0aaa03f3d17d3653608803103a8df801 | f1415b87ccc7ff6826f5a735ac80730162752a1b | refs/heads/main | 2023-07-07T15:18:32.721002 | 2021-08-14T06:02:24 | 2021-08-14T06:02:24 | 395,880,824 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,375 | py | import socket
import json
import zlib
import threading
from common.logger import Logger
class Client:
def __init__(self,cfg):
self.cfg = cfg
# self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
def connect(self):
Logger.debug_print("connect:Entry")
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
Logger.debug_print("connect:Connect")
self.s.connect((self.cfg.server_ip,int(self.cfg.server_port)))
def disconnect(self):
Logger.debug_print("disconnect:Entry")
self.s.shutdown(socket.SHUT_RDWR)
Logger.debug_print("disconnect:Connect")
self.s.close()
def reconnect(self):
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.s.connect((self.cfg.server_ip,int(self.cfg.server_port)))
def send_load_model_request(self,data_info):
self.connect()
Logger.debug_print("send_data:send data_info")
self.s.send(data_info.encode())
Logger.debug_print("send_data:receive response")
confirmation = self.s.recv(1024).decode()
Logger.debug_print("send_data:confirmation = "+confirmation)
if confirmation == "OK":
Logger.debug_print('send_data:Sending data')
return "OK"
else:
print("Received error from server, %s" % (confirmation.decode()))
return "Error"
def send_data(self,data_info, data_buffer):
self.connect()
Logger.debug_print("send_data:send data_info")
self.s.send(data_info.encode())
Logger.debug_print("send_data:receive response")
confirmation = self.s.recv(1024).decode()
Logger.debug_print("send_data:confirmation = "+confirmation)
if confirmation == "OK":
Logger.debug_print('send_data:Sending data')
self.s.sendall(data_buffer)
Logger.debug_print('send_data:successfully sent data.')
pred_caption = self.s.recv(1024)
Logger.debug_print('send_data:received '+pred_caption.decode())
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
Logger.debug_print(pred_caption.decode())
return pred_caption.decode()
# self.reconnect()
else:
print("Received error from server, %s" % (confirmation.decode()))
class Server:
def __init__(self,cfg):
self.cfg = cfg
self.s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.request_count = 0
self.callbacks = {}
# self.accept_connections()
def register_callback(self, obj, callback):
# print('register_callback obj='+obj)
if obj not in self.callbacks:
self.callbacks[obj] = None
self.callbacks[obj] = callback
# print('register_callback self.callbacks=%s' % (str(self.callbacks)))
def accept_connections(self):
ip = ''
port = self.cfg.server_port
Logger.milestone_print('Running on IP: '+ip)
Logger.milestone_print('Running on port: '+str(port))
self.s.bind((ip,port))
self.s.listen(100)
while 1:
try:
c, addr = self.s.accept()
except KeyboardInterrupt as e:
print("\nctrl+c,Exiting gracefully")
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
exit(0)
# print(c)
threading.Thread(target=self.handle_client,args=(c,addr,)).start()
def handle_client(self,c,addr):
# global request_count
# print(addr)
print(' [%d]' % (self.request_count), end ="\r")
self.request_count = self.request_count + 1
Logger.debug_print("handle_client:Entry")
received_data = c.recv(1024).decode()
Logger.debug_print("handle_client:received_data="+received_data)
obj = json.loads(received_data)
Logger.debug_print(obj)
request = obj['request']
if(request == 'load_model_request'):
model_type = obj['model']
model_path = None
if('model_path' in obj.keys()):
model_path = obj['model_path']
response = ''
if request in self.callbacks :
callback = self.callbacks[request]
response = callback(model_type,model_path)
Logger.debug_print("handle_client:sending pred_caption" + response)
c.send(response.encode())
# candidate = pred_caption.split()
Logger.debug_print ('response:' + response)
else:
tensor_shape = obj['data_shape']
zlib_compression = False
if 'zlib_compression' in obj.keys():
if(obj['zlib_compression'] == 'yes'):
zlib_compression = True
quantized = False
if 'quantized' in obj.keys():
if(obj['quantized'] == 'yes'):
quantized = True
reshape_image_size = obj['reshape_image_size']
Logger.debug_print("handle_client:sending OK")
c.send("OK".encode())
max_data_to_be_received = obj['data_size']
total_data = 0
msg = bytearray()
while 1:
# print("handle_client:calling recv total_data=%d data_size=%d" % (total_data, max_data_to_be_received))
if(total_data >= max_data_to_be_received):
Logger.debug_print("handle_client:received all data")
break
data = c.recv(1024)
# print(type(data))
msg.extend(data)
if not data:
Logger.debug_print("handle_client:while break")
break
total_data += len(data)
Logger.debug_print('total size of msg=%d' % (len(msg)))
response = ''
if request in self.callbacks :
callback = self.callbacks[request]
response = callback(msg,tensor_shape,reshape_image_size,quantized,zlib_compression)
Logger.debug_print("handle_client:sending pred_caption" + response)
c.send(response.encode())
# candidate = pred_caption.split()
Logger.debug_print ('response:' + response) | [
"snehal.v.uphale@gmail.com"
] | snehal.v.uphale@gmail.com |
ef9c3946f5e3c8e36462e5f64ccd0988feaba25b | f89a102381207924e1d625fa6f1610c243dcc9da | /clientes/migrations/0001_initial.py | fa055216a9f5489cba7c3583200705a33e67976b | [] | no_license | mrshoga/gestao-clientes-django | 1d1db30ae504d60dff7697820bfb232cc7b274a0 | 8b22038c47bdaf76d7b1881d4c149a1f9c347169 | refs/heads/master | 2020-06-28T20:31:27.350738 | 2019-08-03T04:44:58 | 2019-08-03T04:44:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | # Generated by Django 2.2 on 2019-04-01 20:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('age', models.IntegerField()),
('salary', models.DecimalField(decimal_places=2, max_digits=50)),
('bio', models.TextField()),
],
),
]
| [
"rrgaya@gmail.com"
] | rrgaya@gmail.com |
d87bd0a26c4dea8453e2441d07db03be5e45f3ff | f480d8db3bf81d8cb02ece914f156d3efd074b5f | /tests/unit_tests/repobee/plugin_tests/test_gitlab.py | 9160d23993693600ef4110319ba1dad7376ad0cf | [
"MIT"
] | permissive | tohanss/repobee | e9dc31eb5fed91a399e968ebfb41105d80f49dcb | cf5eb1e83e62c20bbca00c8ad9f798a612e1664f | refs/heads/master | 2023-01-06T20:10:16.157976 | 2020-10-29T17:50:35 | 2020-10-29T17:50:35 | 272,424,721 | 0 | 0 | MIT | 2020-06-15T11:49:50 | 2020-06-15T11:49:49 | null | UTF-8 | Python | false | false | 18,561 | py | from collections import namedtuple
import itertools
import requests.exceptions
import pytest
import gitlab
import repobee_plug as plug
import _repobee
import constants
PAGE_SIZE = 10
class Group:
"""Class mimicking a gitlab.Group"""
_Members = namedtuple("_Members", ("create", "list"))
_Member = namedtuple("_Member", ("id", "access_level", "username"))
_Projects = namedtuple("_Projects", "list")
def __init__(self, id, name, path, parent_id, users_read_only):
self.id = id
self.name = name
self.path = path
self.parent_id = parent_id
self.members = self._Members(
create=self._create_member, list=self._list_members
)
self._member_list = []
self._project_list = []
self._group_list = []
self._users_read_only = users_read_only or {}
# the group owner is always added to the group with max access level
owner_id = [
uid
for uid, user in self._users_read_only.items()
if user.username == constants.USER
][0]
owner_data = {"user_id": owner_id, "access_level": gitlab.OWNER_ACCESS}
self._create_member(owner_data)
self._deleted = False
def _create_member(self, data):
user_id = data["user_id"]
access_level = data["access_level"]
if user_id in [m.id for m in self._list_members()]:
raise gitlab.exceptions.GitlabCreateError(
response_code=409, error_message="Member already exists"
)
self._member_list.append(
self._Member(
id=user_id,
access_level=access_level,
username=self._users_read_only[user_id].username,
)
)
def _list_members(self, all=False):
return list(self._member_list)[: (PAGE_SIZE if not all else None)]
def _list_projects(self, all=False, include_subgroups=False):
projects = list(self._project_list)
if include_subgroups:
projects += list(
itertools.chain.from_iterable(
[list(g._project_list) for g in self._group_list]
)
)
return projects[: (PAGE_SIZE if not all else None)]
def delete(self):
self._deleted = True
@property
def tests_only_deleted(self):
return self._deleted
@property
def projects(self):
return self._Projects(list=self._list_projects)
class Project:
"""Class mimicking a gitlab.Project"""
def __init__(
self, id, name, path, description, visibility, namespace_id, http_url
):
"""The http_url argument does not exist in the real gitlab api, it's
only for testing.
"""
self.id = id
self.name = name
self.path = path
self.description = description
self.visibility = visibility
self.namespace_id = namespace_id
self.attributes = dict(http_url_to_repo=http_url)
User = namedtuple("User", ("id", "username"))
class GitLabMock:
"""Class representing a GitLab instance, with the subset of functionality
required for RepoBee to work. It may seem counterintuitive to create this
much logic to test a class, but from experience it became much more
complicated to try to mock out individual pieces, when those individual
pieces exhibit interconnected behavior.
The _groups and _users dictionaries are indexed by id, while the _projects
dictionary is indexed by the full path to it.
"""
_Groups = namedtuple("_Groups", ("create", "list", "get"))
_Users = namedtuple("_Users", ("list"))
_Projects = namedtuple("_Projects", "create get".split())
def __init__(self, url, private_token, ssl_verify):
self._users = {
id: User(id=id, username=str(grp))
for id, grp in enumerate(constants.STUDENTS + (constants.USER,))
}
self._owner, *_ = [
usr
for usr in self._users.values()
if usr.username == constants.USER
]
self._user = self._owner
self._base_url = url
self._private_token = private_token
self._groups = {}
self._projects = {}
self._id = len(self._users)
self._create_group({"name": TARGET_GROUP, "path": TARGET_GROUP})
# this is only for testing purposes, does not exist in the real class
self._target_group_id = list(self._groups.keys())[0]
def auth(self):
if self._base_url != BASE_URL:
raise requests.exceptions.ConnectionError("could not connect")
if self._private_token != TOKEN:
raise gitlab.exceptions.GitlabAuthenticationError(
"could not authenticate token"
)
@property
def user(self):
return self._user
@property
def tests_only_target_group_id(self):
return self._target_group_id
@property
def groups(self):
return self._Groups(
create=self._create_group,
list=self._list_groups,
get=self._get_group,
)
@property
def users(self):
return self._Users(list=self._list_users)
@property
def projects(self):
return self._Projects(
get=self._get_project, create=self._create_project
)
def _get_project(self, full_path_or_id):
if full_path_or_id in self._projects:
return self._projects[full_path_or_id]
for project in self._projects.values():
if project.id == full_path_or_id:
return project
raise gitlab.exceptions.GitlabGetError(
response_code=404, error_message="Project Not Found"
)
def _create_project(self, data):
"""Note thate the self._projects dict is indexed by full path, as
opposed to id!
"""
name = data["name"]
path = data["path"]
description = data["description"]
visibility = data["visibility"]
namespace_id = data["namespace_id"]
# ensure namespace exists
try:
group = self._get_group(namespace_id)
except gitlab.exceptions.GitlabGetError:
raise gitlab.exceptions.GitlabCreateError(
response_code=400,
error_message="{'namespace': ['is not valid'], "
"'limit_reached': []}",
)
# ensure no other project in the namespace has the same path
if path in [p.path for p in group.projects.list(all=True)]:
raise gitlab.exceptions.GitlabCreateError(
response_code=400,
error_message="Failed to save project "
"{:path=>['has already been taken']}",
)
id = self._next_id()
full_path = "{}/{}".format(self._group_endpoint(namespace_id), path)
http_url = "{}/{}.git".format(self._base_url, full_path)
self._projects[full_path] = Project(
id=id,
name=name,
path=path,
description=description,
visibility=visibility,
namespace_id=namespace_id,
http_url=http_url,
)
group._project_list.append(self._projects[full_path])
return self._projects[full_path]
def _group_endpoint(self, group_id):
"""Build a url endpoint for a given group by recursively iterating
through its parents.
"""
group = self._groups[group_id]
if group.parent_id:
prefix = self._group_endpoint(group.parent_id)
return "{}/{}".format(prefix, group.path)
return group.path
def _next_id(self):
cur_id = self._id
self._id += 1
return cur_id
def _create_group(self, kwargs):
name = kwargs["name"]
path = kwargs["path"]
parent_id = kwargs.get("parent_id")
if parent_id and parent_id not in self._groups:
raise gitlab.exceptions.GitlabCreateError(
response_code=404, error_message="Group Not found"
)
if path in [g.path for g in self._groups.values()]:
raise gitlab.exceptions.GitlabCreateError(
response_code=400, error_message="Path has already been taken"
)
group_id = self._next_id()
self._groups[group_id] = Group(
id=group_id,
name=name,
path=path,
parent_id=parent_id,
users_read_only=self._users,
)
if parent_id:
self._groups[parent_id]._group_list.append(self._groups[group_id])
return self._groups[group_id]
def _list_groups(self, *, id=None, search=None, all=False):
groups = self._groups.values()
if id:
groups = filter(lambda g: g.parent_id == id, groups)
if search:
groups = filter(lambda g: g.name == search, groups)
return list(groups)[: (PAGE_SIZE if not all else None)]
def _get_group(self, id):
if id not in self._groups:
raise gitlab.exceptions.GitlabGetError(
response_code=404, error_message="Group Not Found"
)
return self._groups[id]
def _list_users(self, username=None):
if username:
return [
usr for usr in self._users.values() if usr.username == username
]
return list(self._users.values())
BASE_URL = "https://some-host.com"
TOKEN = "3049fplktdufpdl23"
TARGET_GROUP = "repobee-testing"
MASTER_GROUP = "repobee-master"
@pytest.fixture(autouse=True)
def api_mock(mocker):
return mocker.patch(
"_repobee.ext.gitlab.gitlab.Gitlab", side_effect=GitLabMock
)
@pytest.fixture
def api(api_mock):
yield _repobee.ext.gitlab.GitLabAPI(BASE_URL, TOKEN, TARGET_GROUP)
@pytest.fixture
def team_names():
return [str(s) for s in constants.STUDENTS]
def raise_(error):
def inner(*args, **kwargs):
raise error
return inner
@pytest.fixture
def repo_names(api, assignment_names):
"""Setup repo tuples along with groups for the repos to be created in."""
target_group_id = api._gitlab.tests_only_target_group_id
groups = [
api._gitlab.groups.create(
dict(name=str(team), path=str(team), parent_id=target_group_id)
)
for team in constants.STUDENTS
]
repo_names = []
for group, assignment in itertools.product(groups, assignment_names):
repo_name = plug.generate_repo_name(group.name, assignment)
api._gitlab.projects.create(
dict(
name=repo_name,
path=repo_name,
description="Some description",
visibility="private",
namespace_id=group.id,
)
)
repo_names.append(repo_name)
return repo_names
class TestInit:
"""Tests for the GitLabAPI constructor."""
def test_raises_api_error_when_target_group_cant_be_found(self):
with pytest.raises(plug.NotFoundError):
_repobee.ext.gitlab.GitLabAPI(BASE_URL, TOKEN, "fake-name")
@pytest.fixture
def assignment_names():
return ["task-1", "task-2", "task-3"]
class TestGetRepoUrls:
def test_get_template_repo_urls(self, assignment_names):
"""When supplied with only assignment_names, get_repo_urls should
return urls for those master repos, expecting them to be in the target
group.
"""
# arrange
api = _repobee.ext.gitlab.GitLabAPI(BASE_URL, TOKEN, TARGET_GROUP)
expected_urls = [
api._insert_auth("{}/{}/{}.git".format(BASE_URL, TARGET_GROUP, mn))
for mn in assignment_names
]
assert (
expected_urls
), "there must be at least some urls for this test to make sense"
# act
actual_urls = api.get_repo_urls(assignment_names, insert_auth=True)
# assert
assert sorted(actual_urls) == sorted(expected_urls)
def test_get_template_repo_urls_in_master_group(self, assignment_names):
"""When supplied with assignment_names and org_name, the urls
generated should go to the group named org_name instead of the default
target group.
"""
# arrange
master_group = "master-" + TARGET_GROUP # guaranteed != TARGET_GROUP
api = _repobee.ext.gitlab.GitLabAPI(BASE_URL, TOKEN, TARGET_GROUP)
expected_urls = [
api._insert_auth("{}/{}/{}.git".format(BASE_URL, master_group, mn))
for mn in assignment_names
]
assert (
expected_urls
), "there must be at least some urls for this test to make sense"
# act
actual_urls = api.get_repo_urls(
assignment_names, org_name=master_group, insert_auth=True
)
# assert
assert sorted(actual_urls) == sorted(expected_urls)
def test_get_student_repo_urls(self, assignment_names):
"""When supplied with the students argument, the generated urls should
go to the student repos related to the supplied master repos.
"""
# arrange
api = _repobee.ext.gitlab.GitLabAPI(BASE_URL, TOKEN, TARGET_GROUP)
expected_urls = [
api._insert_auth(
"{}/{}/{}/{}.git".format(
BASE_URL,
TARGET_GROUP,
str(student_group),
plug.generate_repo_name(str(student_group), mn),
)
)
for student_group in constants.STUDENTS
for mn in assignment_names
]
assert (
expected_urls
), "there must be at least some urls for this test to make sense"
# act
actual_urls = api.get_repo_urls(
assignment_names,
team_names=[t.name for t in constants.STUDENTS],
insert_auth=True,
)
# assert
assert sorted(actual_urls) == sorted(expected_urls)
class TestGetRepos:
"""Tests for get_repos."""
def test_get_all_repos(self, api, repo_names):
"""get_repos should return all repos when called without an
argument.
"""
assert len(list(api.get_repos())) == len(repo_names)
class TestInsertAuth:
"""Tests for insert_auth."""
def test_inserts_into_https_url(self, api):
url = f"{BASE_URL}/some/repo"
authed_url = api.insert_auth(url)
assert authed_url.startswith(f"https://oauth2:{TOKEN}")
def test_raises_on_non_platform_url(self, api):
url = "https://somedomain.com"
with pytest.raises(plug.InvalidURL) as exc_info:
api.insert_auth(url)
assert "url not found on platform" in str(exc_info.value)
class TestVerifySettings:
def test_raises_if_token_is_empty(self):
with pytest.raises(plug.BadCredentials):
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None, org_name=TARGET_GROUP, base_url=BASE_URL, token=""
)
def test_raises_on_failed_connection(self):
with pytest.raises(plug.PlatformError) as exc_info:
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=TARGET_GROUP,
base_url="https://garbage-url",
token=TOKEN,
)
assert "please check the URL" in str(exc_info.value)
def test_raises_on_bad_token(self):
with pytest.raises(plug.BadCredentials) as exc_info:
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=TARGET_GROUP,
base_url=BASE_URL,
token="wrong-token",
)
assert "Could not authenticate token" in str(exc_info.value)
def test_raises_if_group_cant_be_found(self):
non_existing_group = "some-garbage-group"
with pytest.raises(plug.NotFoundError) as exc_info:
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=non_existing_group,
base_url=BASE_URL,
token=TOKEN,
)
assert "Could not find group with slug {}".format(
non_existing_group
) in str(exc_info.value)
def test_raises_if_master_group_cant_be_found(self):
non_existing_group = "some-garbage-group"
with pytest.raises(plug.NotFoundError) as exc_info:
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=TARGET_GROUP,
base_url=BASE_URL,
token=TOKEN,
template_org_name=non_existing_group,
)
assert "Could not find group with slug {}".format(
non_existing_group
) in str(exc_info.value)
def test_raises_when_user_is_not_member(self, mocker):
gl = GitLabMock(BASE_URL, TOKEN, False)
gl.groups.create(dict(name=MASTER_GROUP, path=MASTER_GROUP))
user = User(id=9999, username="some-random-user")
gl._user = user
mocker.patch(
"_repobee.ext.gitlab.gitlab.Gitlab",
side_effect=lambda base_url, private_token, ssl_verify: gl,
)
with pytest.raises(plug.BadCredentials) as exc_info:
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=TARGET_GROUP,
base_url=BASE_URL,
token=TOKEN,
template_org_name=MASTER_GROUP,
)
assert f"{user.username} is not a member of {TARGET_GROUP}" in str(
exc_info.value
)
def test_happy_path(self, mocker):
"""Test that the great success message is printed if all is as it
should.
"""
gl = GitLabMock(BASE_URL, TOKEN, False)
gl.groups.create(dict(name=MASTER_GROUP, path=MASTER_GROUP))
mocker.patch(
"_repobee.ext.gitlab.gitlab.Gitlab",
side_effect=lambda base_url, private_token, ssl_verify: gl,
)
log_mock = mocker.patch("repobee_plug.log.info")
_repobee.ext.gitlab.GitLabAPI.verify_settings(
user=None,
org_name=TARGET_GROUP,
base_url=BASE_URL,
token=TOKEN,
template_org_name=MASTER_GROUP,
)
log_mock.assert_called_with("GREAT SUCCESS: All settings check out!")
| [
"noreply@github.com"
] | tohanss.noreply@github.com |
2eb08aeab2d31864011a33b28fee95b6b159ea67 | b011f9a78db79f3c68d6f53f74e83df3970bf8e0 | /authl/__version__.py | f161374a9541bffb46c771e0f6ed93bd66566ff9 | [
"MIT"
] | permissive | zaxtax/Authl | 7504b1eef264da66085b12f1995b0d17aabbb95a | 21bdacfff2761f91e3859ac0735e5902d3763e88 | refs/heads/master | 2021-04-21T01:45:53.256468 | 2020-02-29T03:32:59 | 2020-02-29T03:32:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | """ current library version """
__version__ = '0.3.5'
| [
"fluffy@beesbuzz.biz"
] | fluffy@beesbuzz.biz |
7135df943d2a6a55839d6f0e1abf8cfb5bbe52e2 | 2fffd51ba941c0848733097bc825549cc04faf65 | /src/dqn/dqn_predator_model.py | 685e65fedc8a2f257e218a3585f63b6d514585d5 | [] | no_license | WouterHuygen/multi-agent-reinforcement-learning | 1c483ab5d92881744d6d264377491770f1409df4 | 63b2292c1e3f8170f3190e17b9b9e381faac5a36 | refs/heads/main | 2023-02-08T04:42:05.062186 | 2020-12-29T18:03:00 | 2020-12-29T18:03:00 | 308,330,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | from torch import nn, cat
from ray.rllib.utils.annotations import override
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from gym.spaces import Discrete, Box
class DQNPredatorModel(nn.Module, TorchModelV2):
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.obs_space = obs_space
self.action_space = action_space
self.model_config = model_config
self.name = name
self.network_size = model_config["custom_model_config"]["network_size"]
if isinstance(self.obs_space, Box):
self.obs_shape = obs_space.shape[0]
else:
self.obs_shape = self.obs_space
self.layers = nn.Sequential()
last_size = self.obs_space.shape[0]
i = 0
for layer_size in self.network_size:
self.layers.add_module("linear_{}".format(i), nn.Linear(last_size, layer_size))
self.layers.add_module("relu_{}".format(i), nn.ReLU())
last_size = layer_size
i += 1
self.layers.add_module("linear_{}".format(i), nn.Linear(last_size, num_outputs))
@override(TorchModelV2)
def forward(self, obs):
return self.layers(obs)
| [
"berk.ayaz@hotmail.com"
] | berk.ayaz@hotmail.com |
ce88bf7af10a89c8474dffc2a82e54e04c1d6a2b | 56ffce29f0d27f83206e11870d95982c38524aae | /apweb/site/configure.py | e40a59ea7cfb45c55048ca631afb93938572dc5f | [] | no_license | adamandpaul/apweb | cce365085e2ee58cfbc31544c5a7414e67ad56b4 | b1bb81fa7d7b39f19e187462aa3447ff482b46af | refs/heads/master | 2022-10-19T02:09:52.437906 | 2021-05-21T06:10:08 | 2021-05-21T06:10:08 | 201,398,036 | 0 | 3 | null | 2022-09-21T21:39:41 | 2019-08-09T05:41:06 | Python | UTF-8 | Python | false | false | 1,316 | py | # -*- coding:utf-8 -*-
from .password_login_provider import PasswordLoginProvider
from .resource import Site
import apweb.authentication
def site_factory(request):
"""Return a default site factory"""
return Site.from_request(request)
def get_user_for_unauthenticated_userid(request):
email = request.unauthenticated_userid
return request.site["users"].get_user_by_email(email)
def get_identifiers(request):
identifiers = [*apweb.authentication.get_identifiers(request)]
user = request.user
if user:
identifiers.append(("user_uuid", str(user.user_uuid)))
return identifiers
def get_roles(request):
roles = [*apweb.authentication.get_roles(request)]
user = request.user
if user:
roles.extend(user.assigned_roles)
roles.append('authenticated')
return roles
def includeme(config):
"""A site configureation"""
config.include("apweb")
config.add_request_method(site_factory, "site", reify=True)
config.add_request_method(get_user_for_unauthenticated_userid, "user", reify=True)
config.add_request_method(get_identifiers, "identifiers", reify=True)
config.add_request_method(get_roles, "roles", reify=True)
config.register_login_provider(PasswordLoginProvider())
config.include(".view")
config.commit()
| [
"arterrey@gmail.com"
] | arterrey@gmail.com |
c948ad237c590dfd89ae5e491450ac4895fbb550 | fa82dad9e83206d4630a55141bf44f50cbf0c3a8 | /day1_python/01_python200_src/181.py | 20a43d86dda37f41451faaad66761d0cd6d16b34 | [] | no_license | jsh2333/pyml | 8f8c53a43af23b8490b25f35f28d85f1087df28d | 157dfa7cc2f1458f12e451691a994ac6ef138cab | refs/heads/master | 2021-03-27T22:26:38.254206 | 2020-04-26T06:35:11 | 2020-04-26T06:35:11 | 249,114,580 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | def countBirths():
ret = []
for y in range(1880, 2015):
count = 0
filename = 'names/yob%d.txt' %y
with open(filename, 'r') as f:
data = f.readlines()
for d in data:
if d[-1] == '\n':
d = d[:-1]
birth = d.split(',')[2]
count += int(birth)
ret.append((y, count))
return ret
result = countBirths()
with open('birth_by_year.csv', 'w') as f:
for year, birth in result:
data = '%s,%s\n' %(year, birth)
print(data)
f.write(data)
| [
"jsh2333@gmail.com"
] | jsh2333@gmail.com |
cca5dc568c4108d5f0343a262c80979e232932b0 | bdccb421c292d4ce78814c81c80aae7165f2a625 | /2018-1-9 functions.py | b196292ad61d940258bd54ca67114307ad22112b | [] | no_license | SunTXone/PythonCodes | f2199309f61509eeb0a7bfa74c65098ad4d4e2ff | ba69ee823670c1242d82736abb38b96f2654ba06 | refs/heads/master | 2021-04-27T18:33:44.504312 | 2018-02-24T01:05:45 | 2018-02-24T01:05:45 | 122,341,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,668 | py | Python 3.6.4 (v3.6.4:d48eceb, Dec 19 2017, 06:04:45) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> #传递函数
>>> def ba():
print('I am in bar()')
>>> def foo(func):
func()
>>> foo(ba)
I am in bar()
>>> def power_seq(func,seq):
return [func(i) for i in seq]
>>> def pingfang(x):
return x**2
>>> __name__
'__main__'
>>> if __name== "__main__":
num_seq = [111,3.14,2.91]
r = power_seq[pingfang,num_seq]
print(num_seq)
print(r)
Traceback (most recent call last):
File "<pyshell#20>", line 1, in <module>
if __name== "__main__":
NameError: name '__name' is not defined
>>>
KeyboardInterrupt
>>> if __name__ == "__main__":
num_seq = [111,3.14,2.91]
r = power_seq[pingfang,num_seq]
print(num_seq)
print(r)
Traceback (most recent call last):
File "<pyshell#26>", line 3, in <module>
r = power_seq[pingfang,num_seq]
TypeError: 'function' object is not subscriptable
>>> if __name__ == "__main__":
num_seq = [111,3.14,2.91]
r = power_seq(pingfang,num_seq)
print(num_seq)
print(r)
[111, 3.14, 2.91]
[12321, 9.8596, 8.468100000000002]
>>> pingfang(i) for i in num_seq
SyntaxError: invalid syntax
>>> [pingfang(i) for i in num_seq]
[12321, 9.8596, 8.468100000000002]
>>> (pingfang(i) for i in num_seq)
<generator object <genexpr> at 0x0251D930>
>>>
>>> #嵌套函数
>>> def foo():
def bar():
print("bar() is running")
print("foo() is running")
>>> foo()
foo() is running
>>> #上例在foo函数中定义了bar函数,在foo函数中没有调用bar函数,所有没执行bar函数的输出
>>>
>>> #新例子,在函数中定义函数,并调用
>>> def foo():
def far():
print("bar() is running")
bar() # 在foo函数的定义中显式执行bar函数
print('foo() is running')
>>> foo()
Traceback (most recent call last):
File "<pyshell#53>", line 1, in <module>
foo()
File "<pyshell#52>", line 4, in foo
bar() # 在foo函数的定义中显式执行bar函数
NameError: name 'bar' is not defined
>>> #嵌套函数的名字写错误,重试
>>> def foo():
def bar():
print("bar() is running")
bar() # 在foo函数的定义中显式执行bar函数
print('foo() is running')
>>> foo()
bar() is running
foo() is running
>>> #嵌套函数定义、演示成功
>>> #注意:函数内嵌套定义的函数,不能在定义函数外部单独调用。如在主函数外调用主函数的嵌套函数,显示“NameError”
>>>
>>> #为方便描述,将有嵌套函数的函数成为“主函数”或“定义函数”,主函数内定义的嵌套函数称为“嵌套函数”
>>> '''嵌套函数中的变量与主函数的变量关系、作用范围和函数中的变量与全局变量的关系、作用范围类似 '''
'嵌套函数中的变量与主函数的变量关系、作用范围和函数中的变量与全局变量的关系、作用范围类似 '
>>> #1、嵌套函数可以直接使用主函数的变量
>>> #2、当嵌套函数使用主函数的变量时,如果不能使python解析器自动认定变量为主函数变量,可以使用nonlocal关键字标识主函数变量
>>>
>>> #计算重力例子
>>> def weight(g):
def cal_mg(m):
return m*g
return cal_mg
>>> w = weight(10) #g=10
>>> type(w)
<class 'function'>
>>> dir(w)
['__annotations__', '__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__', '__getattribute__', '__globals__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__kwdefaults__', '__le__', '__lt__', '__module__', '__name__', '__ne__', '__new__', '__qualname__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__']
>>> mg = w(10)
>>> type(mg)
<class 'int'>
>>> mg
100
>>> print(mg)
100
"""
据说:执行w=weight(10)后,w所引用的是一个函数对象(cal_mg),而W(10)_则是向所引用的函数对象cal_mg传递了一个参数10,从而计算m*g并返回结果。
"""
>>> g0 = 9.78046 #赤道海平面上的重力加速度
>>> w0 = weight(g0)
>>> mg0 = w0(10)
>>> print(mg0)
SyntaxError: invalid character in identifier
>>> print(mg0)
97.8046
>>> ### 我目前没看懂这个例子???????????????
>>> ### 据说:这个嵌套函数,其实能够制作一个动态的函数对象——cal_mg。这个话题延伸下去,就是所谓的“闭包”。
>>> ### 2018-1-9 22:27
>>>
>>>
| [
"noreply@github.com"
] | SunTXone.noreply@github.com |
150f4bc532ec71ec3ec5e293645728741c8f2514 | 15feee7c7d3822f23f831df1447e03d9812f4651 | /simbion_mvc/entity/pendaftaran.py | cf82854ca4dcab433d83254bf5abcac0e1de395f | [] | no_license | yerlandinata/simbion | cf3e3d2256ac76af99b614d25bff3bb3601138cc | 222b714502f0761ac9658651441da7c7fd170b39 | refs/heads/master | 2020-03-15T09:42:25.493802 | 2018-05-21T12:01:27 | 2018-05-21T12:01:27 | 132,081,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | from .entity import Entity
class Pendaftaran(Entity):
def __init__(self,skema_beasiswa_aktif,mahasiswa,waktu_daftar,status_daftar,status_terima):
super().__init__({
'skema_beasiswa_aktif':skema_beasiswa_aktif.data,
'mahasiswa':mahasiswa.data,
'waktu_daftar':waktu_daftar,
'status_daftar':status_daftar,
'status_terima':status_terima
})
self.__skema_beasiswa_aktif = skema_beasiswa_aktif
self.__mahasiswa = mahasiswa
def getNoUrut(self):
return self.__skema_beasiswa_aktif.getNoUrut()
def getKodeSkemaBeasiswa(self):
return self.__skema_beasiswa_aktif.getSkemaBeasiswa().getKode()
def getNPM(self):
return self.__mahasiswa.getNpm()
def getWaktuDaftar(self):
return self.data['waktu_daftar']
def getStatusDaftar(self):
return self.data['status_daftar']
def getStatusTerima(self):
return self.data['status_terima']
def getMahasiswa(self):
return self.__mahasiswa
def __repr__(self) :
return 'Pendaftaran : nomor urut = {}, kode skema beasiswa = {} ,NPM Mahasiswa = {}, \
waktu daftar = {} , status daftar = {} , status_terima = {}'.format(self.getNoUrut(),self.getKodeSkemaBeasiswa(),
self.getNPM(),self.getWaktuDaftar(),
self.getStatusDaftar(),self.getStatusTerima())
def __str__(self) :
return self.__repr__()
| [
"adrika.novrialdi@gmail.com"
] | adrika.novrialdi@gmail.com |
4bee8633074dd1f2174f4054fd7cd3ce218bf717 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/nilearn/mass_univariate/tests/test_permuted_least_squares.py | e873dc0a17226b11dce52a062647e51e0e9f3452 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 23,016 | py | """
Tests for the permuted_ols function.
"""
# Author: Virgile Fritsch, <virgile.fritsch@inria.fr>, Feb. 2014
import numpy as np
from scipy import stats
from sklearn.utils import check_random_state
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_less, assert_equal)
from nilearn.mass_univariate import permuted_ols
from nilearn.mass_univariate.permuted_least_squares import (
_t_score_with_covars_and_normalized_design, orthonormalize_matrix)
def get_tvalue_with_alternative_library(tested_vars, target_vars, covars=None):
"""Utility function to compute tvalues with linalg or statsmodels
Massively univariate linear model (= each target is considered
independently).
Parameters
----------
tested_vars: array-like, shape=(n_samples, n_regressors)
Tested variates, the associated coefficient of which are to be tested
independently with a t-test, resulting in as many t-values.
target_vars: array-like, shape=(n_samples, n_targets)
Target variates, to be approximated with a linear combination of
the tested variates and the confounding variates.
covars: array-like, shape=(n_samples, n_confounds)
Confounding variates, to be fitted but not to be tested
Returns
-------
t-values: np.ndarray, shape=(n_regressors, n_targets)
"""
### set up design
n_samples, n_regressors = tested_vars.shape
n_targets = target_vars.shape[1]
if covars is not None:
n_covars = covars.shape[1]
design_matrix = np.hstack((tested_vars, covars))
else:
n_covars = 0
design_matrix = tested_vars
mask_covars = np.ones(n_regressors + n_covars, dtype=bool)
mask_covars[:n_regressors] = False
test_matrix = np.array([[1.] + [0.] * n_covars])
### t-values computation
try: # try with statsmodels if available (more concise)
from statsmodels.regression.linear_model import OLS
t_values = np.empty((n_targets, n_regressors))
for i in range(n_targets):
current_target = target_vars[:, i].reshape((-1, 1))
for j in range(n_regressors):
current_tested_mask = mask_covars.copy()
current_tested_mask[j] = True
current_design_matrix = design_matrix[:, current_tested_mask]
ols_fit = OLS(current_target, current_design_matrix).fit()
t_values[i, j] = np.ravel(ols_fit.t_test(test_matrix).tvalue)
except: # use linalg if statsmodels is not available
from numpy import linalg
lost_dof = n_covars + 1 # fit all tested variates independently
t_values = np.empty((n_targets, n_regressors))
for i in range(n_regressors):
current_tested_mask = mask_covars.copy()
current_tested_mask[i] = True
current_design_matrix = design_matrix[:, current_tested_mask]
invcov = linalg.pinv(current_design_matrix)
normalized_cov = np.dot(invcov, invcov.T)
t_val_denom_aux = np.diag(
np.dot(test_matrix, np.dot(normalized_cov, test_matrix.T)))
t_val_denom_aux = t_val_denom_aux.reshape((-1, 1))
for j in range(n_targets):
current_target = target_vars[:, j].reshape((-1, 1))
res_lstsq = linalg.lstsq(current_design_matrix, current_target)
residuals = (current_target
- np.dot(current_design_matrix, res_lstsq[0]))
t_val_num = np.dot(test_matrix, res_lstsq[0])
t_val_denom = np.sqrt(
np.sum(residuals ** 2, 0) / float(n_samples - lost_dof)
* t_val_denom_aux)
t_values[j, i] = np.ravel(t_val_num / t_val_denom)
return t_values
### Tests t-scores computation ################################################
def test_t_score_with_covars_and_normalized_design_nocovar(random_state=0):
rng = check_random_state(random_state)
### Normalized data
n_samples = 50
# generate data
var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples)
var2 = rng.randn(n_samples, 1)
var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize
# compute t-scores with nilearn routine
t_val_own = _t_score_with_covars_and_normalized_design(var1, var2)
# compute t-scores with linalg or statsmodels
t_val_alt = get_tvalue_with_alternative_library(var1, var2)
assert_array_almost_equal(t_val_own, t_val_alt)
def test_t_score_with_covars_and_normalized_design_withcovar(random_state=0):
"""
"""
rng = check_random_state(random_state)
### Normalized data
n_samples = 50
# generate data
var1 = np.ones((n_samples, 1)) / np.sqrt(n_samples) # normalized
var2 = rng.randn(n_samples, 1)
var2 = var2 / np.sqrt(np.sum(var2 ** 2, 0)) # normalize
covars = np.eye(n_samples, 3) # covars is orthogonal
covars[3] = -1 # covars is orthogonal to var1
covars = orthonormalize_matrix(covars)
# nilearn t-score
own_score = _t_score_with_covars_and_normalized_design(var1, var2, covars)
# compute t-scores with linalg or statmodels
ref_score = get_tvalue_with_alternative_library(var1, var2, covars)
assert_array_almost_equal(own_score, ref_score)
### General tests for permuted_ols function ###################################
def test_permuted_ols_check_h0_noeffect_labelswap(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 100
# create dummy design with no effect
target_var = rng.randn(n_samples, 1)
tested_var = np.arange(n_samples, dtype='f8').reshape((-1, 1))
tested_var_not_centered = tested_var.copy()
tested_var -= tested_var.mean(0) # centered
# permuted OLS
# We check that h0 is close to the theoretical distribution, which is
# known for this simple design (= t(n_samples - dof)).
perm_ranges = [10, 100, 1000] # test various number of permutations
# we use two models (with and without intercept modelling)
all_kstest_pvals = []
all_kstest_pvals_intercept = []
all_kstest_pvals_intercept2 = []
# we compute the Mean Squared Error between cumulative Density Function
# as a proof of consistency of the permutation algorithm
all_mse = []
all_mse_intercept = []
all_mse_intercept2 = []
for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
### Case no. 1: no intercept in the model
pval, orig_scores, h0 = permuted_ols(
tested_var, target_var, model_intercept=False,
n_perm=n_perm, two_sided_test=False, random_state=i)
assert_equal(h0.size, n_perm)
# Kolmogorov-Smirnov test
kstest_pval = stats.kstest(h0, stats.t(n_samples - 1).cdf)[1]
all_kstest_pvals.append(kstest_pval)
mse = np.mean(
(stats.t(n_samples - 1).cdf(np.sort(h0))
- np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
all_mse.append(mse)
### Case no. 2: intercept in the model
pval, orig_scores, h0 = permuted_ols(
tested_var, target_var, model_intercept=True,
n_perm=n_perm, two_sided_test=False, random_state=i)
assert_array_less(pval, 1.) # pval should not be significant
# Kolmogorov-Smirnov test
kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
all_kstest_pvals_intercept.append(kstest_pval)
mse = np.mean(
(stats.t(n_samples - 2).cdf(np.sort(h0))
- np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
all_mse_intercept.append(mse)
### Case no. 3: intercept in the model, no centering of tested vars
pval, orig_scores, h0 = permuted_ols(
tested_var_not_centered, target_var, model_intercept=True,
n_perm=n_perm, two_sided_test=False, random_state=i)
assert_array_less(pval, 1.) # pval should not be significant
# Kolmogorov-Smirnov test
kstest_pval = stats.kstest(h0, stats.t(n_samples - 2).cdf)[1]
all_kstest_pvals_intercept2.append(kstest_pval)
mse = np.mean(
(stats.t(n_samples - 2).cdf(np.sort(h0))
- np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
all_mse_intercept2.append(mse)
all_kstest_pvals = np.array(all_kstest_pvals).reshape(
(len(perm_ranges), -1))
all_kstest_pvals_intercept = np.array(all_kstest_pvals_intercept).reshape(
(len(perm_ranges), -1))
all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
all_mse_intercept = np.array(all_mse_intercept).reshape(
(len(perm_ranges), -1))
all_mse_intercept2 = np.array(all_mse_intercept2).reshape(
(len(perm_ranges), -1))
# check that a difference between distributions is not rejected by KS test
assert_array_less(0.01, all_kstest_pvals)
assert_array_less(0.01, all_kstest_pvals_intercept)
assert_array_less(0.01, all_kstest_pvals_intercept2)
# consistency of the algorithm: the more permutations, the less the MSE
assert_array_less(np.diff(all_mse.mean(1)), 0)
assert_array_less(np.diff(all_mse_intercept.mean(1)), 0)
assert_array_less(np.diff(all_mse_intercept2.mean(1)), 0)
def test_permuted_ols_check_h0_noeffect_signswap(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 100
# create dummy design with no effect
target_var = rng.randn(n_samples, 1)
tested_var = np.ones((n_samples, 1))
# permuted OLS
# We check that h0 is close to the theoretical distribution, which is
# known for this simple design (= t(n_samples - dof)).
perm_ranges = [10, 100, 1000] # test various number of permutations
all_kstest_pvals = []
# we compute the Mean Squared Error between cumulative Density Function
# as a proof of consistency of the permutation algorithm
all_mse = []
for i, n_perm in enumerate(np.repeat(perm_ranges, 10)):
pval, orig_scores, h0 = permuted_ols(
tested_var, target_var, model_intercept=False,
n_perm=n_perm, two_sided_test=False, random_state=i)
assert_equal(h0.size, n_perm)
# Kolmogorov-Smirnov test
kstest_pval = stats.kstest(h0, stats.t(n_samples).cdf)[1]
all_kstest_pvals.append(kstest_pval)
mse = np.mean(
(stats.t(n_samples).cdf(np.sort(h0))
- np.linspace(0, 1, h0.size + 1)[1:]) ** 2)
all_mse.append(mse)
all_kstest_pvals = np.array(all_kstest_pvals).reshape(
(len(perm_ranges), -1))
all_mse = np.array(all_mse).reshape((len(perm_ranges), -1))
# check that a difference between distributions is not rejected by KS test
assert_array_less(0.01 / (len(perm_ranges) * 10.), all_kstest_pvals)
# consistency of the algorithm: the more permutations, the less the MSE
assert_array_less(np.diff(all_mse.mean(1)), 0)
### Tests for labels swapping permutation scheme ##############################
def test_permuted_ols_nocovar(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 50
# create design
target_var = rng.randn(n_samples, 1)
tested_var = rng.randn(n_samples, 1)
# compute t-scores with linalg or statsmodels
ref_score = get_tvalue_with_alternative_library(tested_var, target_var)
# permuted OLS
_, own_score, _ = permuted_ols(
tested_var, target_var, model_intercept=False,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_score, own_score, decimal=6)
# test with ravelized tested_var
_, own_score, _ = permuted_ols(
np.ravel(tested_var), target_var, model_intercept=False,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_score, own_score, decimal=6)
### Adds intercept (should be equivalent to centering variates)
# permuted OLS
_, own_score_intercept, _ = permuted_ols(
tested_var, target_var, model_intercept=True,
n_perm=0, random_state=random_state)
target_var -= target_var.mean(0)
tested_var -= tested_var.mean(0)
# compute t-scores with linalg or statsmodels
ref_score_intercept = get_tvalue_with_alternative_library(
tested_var, target_var, np.ones((n_samples, 1)))
assert_array_almost_equal(ref_score_intercept, own_score_intercept,
decimal=6)
def test_permuted_ols_withcovar(random_state=0):
"""
"""
rng = check_random_state(random_state)
# design parameters
n_samples = 50
# create design
target_var = rng.randn(n_samples, 1)
tested_var = rng.randn(n_samples, 1)
confounding_vars = rng.randn(n_samples, 2)
# compute t-scores with linalg or statsmodels
ref_score = get_tvalue_with_alternative_library(tested_var, target_var,
confounding_vars)
# permuted OLS
_, own_score, _ = permuted_ols(
tested_var, target_var, confounding_vars, model_intercept=False,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_score, own_score, decimal=6)
### Adds intercept
# permuted OLS
_, own_scores_intercept, _ = permuted_ols(
tested_var, target_var, confounding_vars, model_intercept=True,
n_perm=0, random_state=random_state)
# compute t-scores with linalg or statsmodels
confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
alt_score_intercept = get_tvalue_with_alternative_library(
tested_var, target_var, confounding_vars)
assert_array_almost_equal(alt_score_intercept, own_scores_intercept,
decimal=6)
def test_permuted_ols_nocovar_multivariate(random_state=0):
"""Test permuted_ols with multiple tested variates and no covariate.
It is equivalent to fitting several models with only one tested variate.
"""
rng = check_random_state(random_state)
# design parameters
n_samples = 50
n_targets = 10
n_regressors = 2
# create design
target_vars = rng.randn(n_samples, n_targets)
tested_var = rng.randn(n_samples, n_regressors)
# compute t-scores with linalg or statsmodels
ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars)
# permuted OLS
_, own_scores, _ = permuted_ols(
tested_var, target_vars, model_intercept=False,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_scores, own_scores, decimal=6)
### Adds intercept (should be equivalent to centering variates)
# permuted OLS
_, own_scores_intercept, _ = permuted_ols(
tested_var, target_vars, model_intercept=True,
n_perm=0, random_state=random_state)
target_vars -= target_vars.mean(0)
tested_var -= tested_var.mean(0)
# compute t-scores with linalg or statsmodels
ref_scores_intercept = get_tvalue_with_alternative_library(
tested_var, target_vars, np.ones((n_samples, 1)))
assert_array_almost_equal(ref_scores_intercept, own_scores_intercept,
decimal=6)
def test_permuted_ols_withcovar_multivariate(random_state=0):
"""Test permuted_ols with multiple tested variates and covariates.
It is equivalent to fitting several models with only one tested variate.
"""
rng = check_random_state(random_state)
# design parameters
n_samples = 50
n_targets = 10
n_covars = 2
# create design
target_vars = rng.randn(n_samples, n_targets)
tested_var = rng.randn(n_samples, 1)
confounding_vars = rng.randn(n_samples, n_covars)
# compute t-scores with linalg or statmodels
ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars,
confounding_vars)
# permuted OLS
_, own_scores, _ = permuted_ols(
tested_var, target_vars, confounding_vars, model_intercept=False,
n_perm=0, random_state=random_state)
assert_almost_equal(ref_scores, own_scores, decimal=6)
### Adds intercept
# permuted OLS
_, own_scores_intercept, _ = permuted_ols(
tested_var, target_vars, confounding_vars, model_intercept=True,
n_perm=0, random_state=random_state)
# compute t-scores with linalg or statmodels
confounding_vars = np.hstack((confounding_vars, np.ones((n_samples, 1))))
ref_scores_intercept = get_tvalue_with_alternative_library(
tested_var, target_vars, confounding_vars)
assert_array_almost_equal(ref_scores_intercept,
own_scores_intercept, decimal=6)
### Tests for sign swapping permutation scheme ##############################
def test_permuted_ols_intercept_nocovar(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 50
# create design
target_var = rng.randn(n_samples, 1)
tested_var = np.ones((n_samples, 1))
# compute t-scores with linalg or statmodels
t_val_ref = get_tvalue_with_alternative_library(tested_var, target_var)
# permuted OLS
neg_log_pvals, orig_scores, _ = permuted_ols(
tested_var, target_var, confounding_vars=None, n_perm=10,
random_state=random_state)
assert_array_less(neg_log_pvals, 1.) # ensure sign swap is correctly done
# same thing but with model_intercept=True to check it has no effect
_, orig_scores_addintercept, _ = permuted_ols(
tested_var, target_var, confounding_vars=None, model_intercept=True,
n_perm=0, random_state=random_state)
assert_array_almost_equal(t_val_ref, orig_scores, decimal=6)
assert_array_almost_equal(orig_scores, orig_scores_addintercept, decimal=6)
def test_permuted_ols_intercept_statsmodels_withcovar(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 50
# create design
target_var = rng.randn(n_samples, 1)
tested_var = np.ones((n_samples, 1))
confounding_vars = rng.randn(n_samples, 2)
# compute t-scores with linalg or statmodels
ref_scores = get_tvalue_with_alternative_library(tested_var, target_var,
confounding_vars)
# permuted OLS
_, own_scores, _ = permuted_ols(
tested_var, target_var, confounding_vars, n_perm=0,
random_state=random_state)
# same thing but with model_intercept=True to check it has no effect
_, own_scores_intercept, _ = permuted_ols(
tested_var, target_var, confounding_vars, model_intercept=True,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_scores, own_scores, decimal=6)
assert_array_almost_equal(ref_scores, own_scores_intercept, decimal=6)
def test_permuted_ols_intercept_nocovar_multivariate(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 50
n_targets = 10
# create design
target_vars = rng.randn(n_samples, n_targets)
tested_vars = np.ones((n_samples, 1))
# compute t-scores with nilearn routine
ref_scores = get_tvalue_with_alternative_library(tested_vars, target_vars)
# permuted OLS
_, own_scores, _ = permuted_ols(
tested_vars, target_vars, confounding_vars=None, n_perm=0,
random_state=random_state)
# same thing but with model_intercept=True to check it has no effect
_, own_scores_intercept, _ = permuted_ols(
tested_vars, target_vars, confounding_vars=None, model_intercept=True,
n_perm=0, random_state=random_state)
assert_array_almost_equal(ref_scores, own_scores, decimal=6)
assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6)
def test_permuted_ols_intercept_withcovar_multivariate(random_state=0):
rng = check_random_state(random_state)
# design parameters
n_samples = 50
n_targets = 10
n_covars = 2
# create design
target_vars = rng.randn(n_samples, n_targets)
tested_var = np.ones((n_samples, 1))
confounding_vars = rng.randn(n_samples, n_covars)
# compute t-scores with linalg or statsmodels
ref_scores = get_tvalue_with_alternative_library(tested_var, target_vars,
confounding_vars)
# permuted OLS
_, own_scores, _ = permuted_ols(
tested_var, target_vars, confounding_vars, n_perm=0,
random_state=random_state)
# same thing but with model_intercept=True to check it has no effect
_, own_scores_intercept, _ = permuted_ols(
tested_var, target_vars, confounding_vars, model_intercept=True,
n_perm=0, random_state=random_state)
assert_almost_equal(ref_scores, own_scores, decimal=6)
assert_array_almost_equal(own_scores, own_scores_intercept, decimal=6)
### Test one-sided versus two-sided ###########################################
def test_sided_test(random_state=0):
"""Check that a positive effect is always better recovered with one-sided.
"""
rng = check_random_state(random_state)
# design parameters
n_samples = 50
# create design
target_var = rng.randn(n_samples, 100)
tested_var = rng.randn(n_samples, 1)
# permuted OLS
# one-sided
neg_log_pvals_onesided, _, _ = permuted_ols(
tested_var, target_var, model_intercept=False,
two_sided_test=False, n_perm=100, random_state=random_state)
# two-sided
neg_log_pvals_twosided, _, _ = permuted_ols(
tested_var, target_var, model_intercept=False,
two_sided_test=True, n_perm=100, random_state=random_state)
positive_effect_location = neg_log_pvals_onesided > 1
assert_equal(
np.sum(neg_log_pvals_twosided[positive_effect_location]
- neg_log_pvals_onesided[positive_effect_location] > 0),
0)
def test_sided_test2(random_state=0):
"""Check that two-sided can actually recover positive and negative effects.
"""
# create design
target_var1 = np.arange(0, 10).reshape((-1, 1)) # positive effect
target_var = np.hstack((target_var1, - target_var1))
tested_var = np.arange(0, 20, 2)
# permuted OLS
# one-sided
neg_log_pvals_onesided, _, _ = permuted_ols(
tested_var, target_var, model_intercept=False,
two_sided_test=False, n_perm=100, random_state=random_state)
# one-sided (other side)
neg_log_pvals_onesided2, _, _ = permuted_ols(
tested_var, -target_var, model_intercept=False,
two_sided_test=False, n_perm=100, random_state=random_state)
# two-sdided
neg_log_pvals_twosided, _, _ = permuted_ols(
tested_var, target_var, model_intercept=False,
two_sided_test=True, n_perm=100, random_state=random_state)
assert_array_almost_equal(neg_log_pvals_onesided[0],
neg_log_pvals_onesided2[0][::-1])
assert_array_almost_equal(neg_log_pvals_onesided + neg_log_pvals_onesided2,
neg_log_pvals_twosided)
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
f2780023b958c24ce9a96cc053a798ca125226ef | 456e815c7ae4b72ae223cbc165f3cff7e6829582 | /scoring.py | 646fbd1850883a435912b6cf1f716d9a80cfeb94 | [] | no_license | Luche/Tesis | 25043a0528ee9714b0a2e134310304bfcfb06aba | b270fbf1caaa2a9c78b19785f60b0d7537d583b0 | refs/heads/main | 2023-08-05T13:15:39.096120 | 2021-10-08T08:54:41 | 2021-10-08T08:54:41 | 342,509,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,808 | py | import glob
import nltk
import numpy as np
from datasets import load_metric
from bert_score import score
nltk.download('punkt')
metric = load_metric("rouge")
def load_data(path=""):
# path = "/content/drive/MyDrive/Colab Notebooks/Tesis"
can_path = path + "ref.txt"
ref_path = path + "out.txt"
src_path = path + "src.txt"
ref = []
can = []
src = []
with open(can_path, 'r') as f:
for line in f:
line = line.replace('\n', '')
can.append(line.strip())
with open(ref_path, 'r') as f:
for line in f:
line = line.replace('\n', '')
ref.append(line.strip())
with open(src_path, 'r') as f:
for line in f:
line = line.replace('\n', '')
src.append(line.strip())
return ref, can, src
def compute_rouge(decoded_preds, decoded_labels):
# Rouge expects a newline after each sentence
decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
return {k: round(v, 4) for k, v in result.items()}
def compute_metrics():
ref, can, src = load_data()
print("Compute BERTScore...")
P, R, F1 = score(can, ref, model_type="bert-base-multilingual-cased", verbose=True)
print(f"System level F1 score: {F1.mean():.4f}")
print(f"System level Precision score: {P.mean():.4f}")
print(f"System level Recall score: {R.mean():.4f}")
print("Compute ROUGE...")
result = compute_rouge(can, ref)
print(result)
compute_metrics() | [
"henrylucky68@gmail.com"
] | henrylucky68@gmail.com |
81e1ff36d764ebf2d6e179a45efa2e2ac9a119f4 | 71a7ed5dd56b89aea1795a3db1469e3006be783f | /configs/body/2d_kpt_sview_rgb_img/deeppose/coco/res50_coco_256x192.py | 2d2b20fa49d0e23356d688081388809183364474 | [
"Apache-2.0"
] | permissive | RunningLeon/mmpose | b6091416a2565778a1438b23edaa6620a2088a3e | 9ef202c0a4b2c28add32b530caf2ea0354843fe4 | refs/heads/master | 2023-06-30T04:46:52.917877 | 2021-08-05T08:54:24 | 2021-08-05T08:54:24 | 335,275,684 | 0 | 0 | Apache-2.0 | 2021-02-02T12:06:57 | 2021-02-02T12:06:57 | null | UTF-8 | Python | false | false | 3,956 | py | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='torchvision://resnet50',
backbone=dict(type='ResNet', depth=50, num_stages=4, out_indices=(3, )),
neck=dict(type='GlobalAveragePooling'),
keypoint_head=dict(
type='DeepposeRegressionHead',
in_channels=2048,
num_joints=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='SmoothL1Loss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(flip_test=True))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTargetRegression'),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| [
"noreply@github.com"
] | RunningLeon.noreply@github.com |
6903e350a0ee38e643f722f9a5177bdc7d7da553 | 92f9fd4397d88619073c17174f3d52f5f489d4e4 | /qa/rpc-tests/bip9-softforks.py | 5b15b06663bbf376dc660a6a92e09ebf5fea6278 | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | diablax2/bts | 380df7562d73a292e641faaff1b0d1e17a10f0a8 | fe3c727ce607e11bee64bb03afadb653e9bd23fd | refs/heads/master | 2020-04-24T21:57:40.173603 | 2019-02-25T06:33:48 | 2019-02-25T06:33:48 | 172,295,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,030 | py | #!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blockstore import BlockStore
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
'''
This test is meant to exercise BIP forks
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
'''
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']],
binary=[self.options.testbinary])
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.block_store.close()
stop_nodes(self.nodes)
wait_BTSds()
shutil.rmtree(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.test.block_store = BlockStore(self.options.tmpdir)
self.test.clear_all_connections()
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
'''Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
'''
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
'''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan)
'''
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
'''Modify the nLockTime to make it fails once MTP rule is activated
'''
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
| [
"47169271+BitcoinSDN@users.noreply.github.com"
] | 47169271+BitcoinSDN@users.noreply.github.com |
a77514a354202ea060c3808641ab83dce4cf439f | c29ff32eb5755c45d656162bb91df428eeab489d | /Main.py | ca4ecdf20d8df96838023848bc5e4b4aa30c2822 | [] | no_license | Larionov0/Listo4ek | f5f441dcd908dbd1f55768e3c62515a385ad4cb1 | c30dd00c999239ea205e6a5140b188de6553376f | refs/heads/master | 2021-06-27T17:04:02.950956 | 2021-01-15T14:26:49 | 2021-01-15T14:26:49 | 197,660,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,028 | py | import telebot
from telebot import types
import emoji
from data_handler import data as waves_data, units_data, weapons_data
from random import choice, randint
from funks import *
delete_data = {}
def add_delete_message(chat_id,message_id):
if chat_id in delete_data:
delete_data[chat_id].append(message_id)
else:
delete_data[chat_id] = [message_id]
data = {
000000: {
"name": "Larko",
"lvl": 10,
"coins": 0,
"armament": {
"active": {
"location": {
"soldiers": 100,
"turrets": 10,
"tanks": 16,
}
},
"passive": {
"soldiers": 100,
"turrets": 10,
"tanks": 16,
}
},
"location": "wasteland",
"action": None
},
"count_of_lands":0
}
bot = telebot.TeleBot("625314496:AAEQ_L7mcsmhdB8DytiMXfEc3CAGEJaI_iE")
@bot.message_handler(commands = ["start"])
def start(message):
add_delete_message(message.chat.id, bot.send_message(message.chat.id, "😃🥳😈👾💣⚙💥\nHello, ۞today you will play the best game ever!").message_id)
if message.chat.id in data:
menu(message.chat.id)
else:
data[message.chat.id] = "creating"
id = bot.send_message(message.chat.id, "Please input your name:").message_id
add_delete_message(message.chat.id,id)
bot.delete_message(message.chat.id, message.message_id)
@bot.message_handler(commands=["help"])
def help(message):
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton('message Larko',url = 'https://t.me/Larko_0'))
bot.send_message(message.chat.id,
"Please, tell me about your problem",
reply_markup=keyboard)
@bot.message_handler(func = lambda call: True, content_types = ['text'])
def text_callback(message):
chat_id = message.chat.id
for chat in delete_data:
for i in range(len(delete_data[chat])):
id = delete_data[chat][0]
bot.delete_message(chat,id)
delete_data[chat].remove(id)
bot.delete_message(chat_id,message.message_id)
if chat_id in data:
if data[chat_id] == "creating":
location = "wasteland " + str(data["count_of_lands"])
data["count_of_lands"] += 1
data[chat_id] = {
"name":message.text,
"lvl":1,
"wave":1,
"coins":100,
"armament": {
"active": {
location: {
"soldiers": {"hp":[],"count":3},
},
},
"passive": {
"soldiers": 5,
"turrets": 1,
}
},
"location": location,
"locations": [location],
"action": "start",
}
menu(chat_id)
elif data[chat_id]["action"] == "wave - count shot":
num = num_validator(message.text)
if num != False:
if num > data[chat_id]["attack points"][data[chat_id]["choosed armament"]]:
num = data[chat_id]["attack points"][data[chat_id]["choosed armament"]]
num1 = num
while num > 0 and data[chat_id]["target"] in data[chat_id]["enemies"]:
enemies = data[chat_id]["enemies"]
armament = data[chat_id]["choosed armament"]
for weapon in units_data[armament]["weapons"]:
for temp in range(units_data[armament]["weapons"][weapon]):
one_shot(chat_id, weapon)
num -= 1
data[chat_id]["attack points"][data[chat_id]["choosed armament"]] -= num1 - num
check_after_shot(chat_id)
else:
id = bot.send_message(chat_id,"Please input correct number").message_id
add_delete_message(chat_id,id)
elif data[chat_id]["action"] == "set armament":
num = num_validator(message.text)
if num!=False:
set_armament(chat_id,data[chat_id]["target"],num)
menu(chat_id)
else:
add_delete_message(chat_id, bot.send_message(chat_id,"Please input correct number:").message_id)
@bot.callback_query_handler(lambda call: True)
def get_callback(call):
chat_id = call.message.chat.id
if chat_id not in data:
return
player = data[chat_id]
for chat in delete_data:
for i in range(len(delete_data[chat])):
id = delete_data[chat][0]
bot.delete_message(chat,id)
delete_data[chat].remove(id)
bot.delete_message(chat_id,call.message.message_id)
if data[chat_id]["action"] == "Pick menu: Wave, Map, Set":
if call.data == "WAVE":
bot.answer_callback_query(call.id)
data[chat_id]["action"] = "wave"
wawe = waves_data[player["lvl"]][player["wave"]]
enemies = choice(wawe["enemies"])
data[chat_id]["enemies"] = {}
for enemy in enemies:
data[chat_id]["enemies"][enemy[-1]] = {}
for i in range(2): #pick count and coins
if "-" in enemy[i]:
coins = enemy[i].split("-")
coins = randint(int(coins[0]),int(coins[1]))
else:
coins = int(enemy[i])
point = ["count","coins"][i]
data[chat_id]["enemies"][enemy[-1]][point] = coins
if data[chat_id]["enemies"][enemy[-1]]["count"] <= 0:
data[chat_id]["enemies"].pop(enemy[-1])
else:
data[chat_id]["enemies"][enemy[-1]]["hp"] = []
data[chat_id]["reward"] = {}
wave_func(chat_id)
elif call.data == "SET ARMAMENT":
data[chat_id]["action"] = "set armament"
keyboard = types.InlineKeyboardMarkup()
for unit in data[chat_id]["armament"]["passive"]:
keyboard.add(types.InlineKeyboardButton("{} x {}".format(unit,data[chat_id]["armament"]["passive"][unit]),callback_data=unit))
keyboard.add(types.InlineKeyboardButton("Cancel",callback_data="CANCEL"))
bot.send_message(chat_id,"Pick units:",reply_markup=keyboard)
elif call.data == "MAP":
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton())
elif call.data == "loose":
bot.answer_callback_query(chat_id)
elif data[chat_id]["action"] == "set armament":
if call.data == "CANCEL":
return menu(chat_id)
elif call. data == "set armament - 1":
set_armament(chat_id,data[chat_id]["target"],1)
menu(chat_id)
elif call.data == "set armament - count":
add_delete_message(chat_id, bot.send_message(chat_id, "Please input count of units to set:").message_id)
elif call.data == "set armament - all":
set_armament(chat_id, data[chat_id]["target"], data[chat_id]["armament"]["passive"][data[chat_id]["target"]])
menu(chat_id)
elif call.data == "set armament - cancel":
menu(chat_id)
else:
data[chat_id]["target"] = call.data
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton("1", callback_data="set armament - 1"),
types.InlineKeyboardButton("Input count", callback_data="set armament - count"),
)
keyboard.row(
types.InlineKeyboardButton("All", callback_data="set armament - all"),
types.InlineKeyboardButton("Cancel", callback_data="set armament - cancel"),
)
bot.send_message(chat_id,"How many units want you to set?🔥",reply_markup=keyboard)
elif data[chat_id]["action"].startswith("wave - "):
if data[chat_id]["action"] == "wave - pick armament":
if call.data != "cancel":
print(1)
bot.answer_callback_query(call.id)
unit = call.data
data[chat_id]["choosed armament"] = unit
choose_target_class(chat_id)
else:
wave_menu(chat_id)
elif data[chat_id]["action"] == "wave - pick enemy":
bot.answer_callback_query(call.id)
if call.data != "cancel":
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton("😐 SINGLE SHOT",callback_data="SINGLE"),
types.InlineKeyboardButton("😈 CHOOSE COUNT of shots",callback_data = "COUNT")
)
keyboard.row(
types.InlineKeyboardButton("😱 SHOOT TO KILL",callback_data="KILL"),
types.InlineKeyboardButton("🥺 CANCEL",callback_data = "CANCEL")
)
data[chat_id]["action"] = "wave - pick shot option"
data[chat_id]["target"] = call.data
bot.send_message(chat_id,text = "{} -> {}\nChoose way to destroy:".format(data[chat_id]["choosed armament"],data[chat_id]["target"]),reply_markup=keyboard)
else:
wave_menu(chat_id)
elif data[chat_id]["action"] == "wave - pick shot option":
if call.data != "CANCEL":
bot.answer_callback_query(call.id)
shot(chat_id, call.data)
else:
choose_target_class(chat_id)
elif data[chat_id]["action"] == "wave - enemies turn ends":
bot.answer_callback_query(call.id)
wave_func(chat_id)
def choose_target_class(chat_id):
keyboard = types.InlineKeyboardMarkup()
for enemy in data[chat_id]["enemies"]:
enemy_dict = data[chat_id]["enemies"][enemy]
keyboard.add(types.InlineKeyboardButton("{} x {} 💚 {} / {} 🛡 {}".format(enemy, enemy_dict["count"], ";".join([str(hp) for hp in enemy_dict["hp"]]),
units_data[enemy]["hp"],";".join(units_data[enemy]["armor"])), callback_data=enemy))
keyboard.add(types.InlineKeyboardButton("cancel",callback_data="cancel"))
data[chat_id]["action"] = "wave - pick enemy"
bot.send_message(chat_id, text="Good! (Armament: {} (x{} left))\n Now choose class of enemies armament to 🗡destroy it:".format(data[chat_id]["choosed armament"],
data[chat_id]["attack points"][data[chat_id]["choosed armament"]]),
reply_markup=keyboard)
def level_reward(chat_id):
coins = (data[chat_id]["lvl"]) * 50
data[chat_id]["coins"] += coins
add_delete_message(chat_id, bot.send_message(chat_id, "😁☝NEW LEVEL : {}\n\n💰 + {}".format(data[chat_id]["lvl"], coins)).message_id)
def victory(chat_id):
data[chat_id].pop("attack points")
data[chat_id].pop("choosed armament")
data[chat_id].pop("target")
data[chat_id].pop("enemies")
data[chat_id].pop("reward")
data[chat_id]["action"] = "wave - victory"
rewards = choice(waves_data[data[chat_id]["lvl"]][data[chat_id]["wave"]]["reward"])
add_delete_message(chat_id, bot.send_message(chat_id, "🥳Victoryy!!!\n🎁 REWARD 🎁").message_id)
for reward in rewards:
if reward[1] == "coins":
data[chat_id]["coins"] += int(reward[0])
add_delete_message(chat_id, bot.send_message(chat_id, "You got {} 💸".format(reward[0])).message_id)
else:
add_delete_message(chat_id, bot.send_message(chat_id, "👍 You got {} {}".format(reward[0],reward[1])).message_id)
if reward[1] in data[chat_id]["armament"]["passive"]:
data[chat_id]["armament"]["passive"][reward[1]] += int(reward[0])
else:
data[chat_id]["armament"]["passive"][reward[1]] = int(reward[0])
if data[chat_id]["wave"]+1 in waves_data[data[chat_id]["lvl"]]:
data[chat_id]["wave"] += 1
else:
data[chat_id]["lvl"] += 1
level_reward(chat_id)
data[chat_id]["wave"] = 1
sum = 0
for unit in data[chat_id]["armament"]["active"][data[chat_id]["location"]]:
sum += units_data[unit]["pay"] * data[chat_id]["armament"]["active"][data[chat_id]["location"]][unit]["count"]
add_delete_message(chat_id, bot.send_message(chat_id,"You need to pay {} $".format(sum)).message_id)
data[chat_id]["coins"] -= sum
menu(chat_id)
def enemies_turn(chat_id,start = True):
if start:
add_delete_message(chat_id, bot.send_message(chat_id, "-----=== Enemies turn😡: ").message_id )
data[chat_id]["attack points"] = {}
for unit in data[chat_id]["enemies"]:
data[chat_id]["attack points"][unit] = data[chat_id]["enemies"][unit]["count"]
for enemy in data[chat_id]["enemies"]:
if enemy not in data[chat_id]["attack points"]:
continue
location = data[chat_id]["location"]
target = choice(list(data[chat_id]["armament"]["active"][location].keys()))
data[chat_id]["target"] = target
player_armament = data[chat_id]["armament"]["active"][location]
while data[chat_id]["attack points"][enemy] > 0 and target in player_armament:
for weapon in units_data[enemy]["weapons"]:
for temp in range(units_data[enemy]["weapons"][weapon]):
one_shot(chat_id, weapon, mode = "enemies")
data[chat_id]["attack points"][enemy] -= 1
if data[chat_id]["attack points"][enemy] == 0:
data[chat_id]["attack points"].pop(enemy)
if check_after_shot(chat_id, "enemies") == False:
return
def set_armament(chat_id, unit, count):
if count > data[chat_id]["armament"]["passive"][unit]:
count = data[chat_id]["armament"]["passive"][unit]
data[chat_id]["armament"]["passive"][unit] -= count
location = data[chat_id]["location"]
if data[chat_id]["armament"]["passive"][unit] == 0:
data[chat_id]["armament"]["passive"].pop(unit)
if unit in data[chat_id]["armament"]["active"][location]:
data[chat_id]["armament"]["active"][location][unit]["count"] += count
else:
data[chat_id]["armament"]["active"][location][unit] = {
"hp":[],
"count":count
}
def loose(chat_id):
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton("OK :(", callback_data="loose"))
bot.send_message(chat_id, "You loose(", reply_markup=keyboard)
data.pop(chat_id)
def check_after_shot(chat_id, mode = "player"):
if mode == "player":
if len(data[chat_id]["enemies"].keys()) == 0:
victory(chat_id)
elif not any([data[chat_id]["attack points"][unit] > 0 for unit in data[chat_id]["attack points"]]):
data[chat_id]["action"] = "wave - enemies turn"
data[chat_id]["attack points"] = {}
enemies_turn(chat_id)
elif data[chat_id]["attack points"][data[chat_id]["choosed armament"]] > 0:
choose_target_class(chat_id)
else:
wave_menu(chat_id)
elif mode == "enemies":
location = data[chat_id]["location"]
if len(data[chat_id]["armament"]["active"][location].keys()) == 0:
loose(chat_id)
return False
elif not any([data[chat_id]["attack points"][unit] > 0 for unit in data[chat_id]["attack points"]]):
data[chat_id]["action"] = "wave - enemies turn ends"
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton("OK",callback_data="OK"))
bot.send_message(chat_id,"OK? :)", reply_markup=keyboard)
else:
enemies_turn(chat_id, False)
return False
def shot(chat_id, option):
if data[chat_id]["attack points"][data[chat_id]["choosed armament"]] > 0:
data[chat_id]["action"] = "wave - shot"
if option == "SINGLE":
data[chat_id]["attack points"][data[chat_id]["choosed armament"]] -= 1
enemies = data[chat_id]["enemies"]
armament = data[chat_id]["choosed armament"]
for weapon in units_data[armament]["weapons"]:
for temp in range(units_data[armament]["weapons"][weapon]):
one_shot(chat_id, weapon)
check_after_shot(chat_id)
elif option == "COUNT":
data[chat_id]["action"] = "wave - count shot"
id = bot.send_message(chat_id, "Input count ({} left):".format(data[chat_id]["attack points"][data[chat_id]["choosed armament"]])).message_id
add_delete_message(chat_id,id)
elif option == "KILL":
while data[chat_id]["attack points"][data[chat_id]["choosed armament"]] > 0 and data[chat_id]["target"] in data[chat_id]["enemies"]:
enemies = data[chat_id]["enemies"]
armament = data[chat_id]["choosed armament"]
for weapon in units_data[armament]["weapons"]:
for temp in range(units_data[armament]["weapons"][weapon]):
one_shot(chat_id, weapon)
data[chat_id]["attack points"][data[chat_id]["choosed armament"]] -= 1
check_after_shot(chat_id)
else:
add_delete_message(chat_id, bot.send_message(chat_id,"😭Count of loaded {} = 0".format(data[chat_id]["choosed armament"])).message_id)
wave_menu(chat_id)
def one_shot(chat_id, weapon, mode = "player"):
if mode == "player":
enemies = data[chat_id]["enemies"]
elif mode == "enemies":
enemies = data[chat_id]["armament"]["active"][data[chat_id]["location"]]
if weapons_data[weapon]["control"] == "on":
target = data[chat_id]["target"]
target_index = enemies[target]["count"] - 1
elif weapons_data[weapon]["control"] == "off":
target = choice([key for key in enemies.keys() if enemies[key]["count"] != 0])
target_index = randint(0,enemies[target]["count"] - 1)
hp = units_data[target]["hp"]
armor = units_data[target]["armor"][["physical","magic","fire"].index(weapons_data[weapon]["type"])]
if "-" in armor:
armor = armor.split("-")
armor = randint(int(armor[0]),int(armor[1]))
else:
armor = int(armor)
damage = weapons_data[weapon]["damage"]
if "-" in damage:
damage = damage.split("-")
damage = randint(int(damage[0]),int(damage[1]))
else:
damage = int(damage)
if target_index < enemies[target]["count"] - len(enemies[target]["hp"]):
target_hp = hp - (damage - armor)
if target_hp > 0:
id = bot.send_message(chat_id,"{} ranen by {} ({} / {})".format(target, weapon, target_hp, units_data[target]["hp"])).message_id
add_delete_message(chat_id, id)
enemies[target]["hp"].insert(0,target_hp)
else:
kill(chat_id,weapon,target,enemies,mode)
check_effects(chat_id,weapon,target,"post kill",mode,-target_hp)
else:
target_index = enemies[target]["count"] - target_index - 1
enemies[target]["hp"][target_index] -= (damage - armor)
if enemies[target]["hp"][target_index] <= 0:
kill(chat_id, weapon, target, enemies, mode)
target_hp = -enemies[target]["hp"][target_index]
enemies[target]["hp"].pop(target_index)
check_effects(chat_id,weapon,target,"post kill",mode,target_hp)
else:
id = bot.send_message(chat_id,"{} ranen by {} ({} / {})".format(target, weapon, enemies[target]["hp"][target_index], units_data[target]["hp"])).message_id
add_delete_message(chat_id, id)
print(enemies)
if enemies[target]["count"] <= 0:
enemies.pop(target)
def check_effects(chat_id,weapon,target,clss,mode = "player", stock_dmg = False):
if clss == "post kill":
if "full damage" in weapons_data[weapon]["effects"] and stock_dmg > 0:
weapons_data["splinter"] = {
"place":"0",
"damage":str(stock_dmg),
"type":weapons_data[weapon]["type"],
"control":weapons_data[weapon]["control"],
"effects":weapons_data[weapon]["effects"]
}
print(weapons_data["splinter"])
if check_after_shot(chat_id,mode) == False:
return
one_shot(chat_id,"splinter",mode)
def kill(chat_id, weapon, target, enemies, mode = "player"):
text = "{} 🤯killed by {}".format(target, weapon)
if mode == "player":
coins = data[chat_id]["enemies"][target]["coins"]
data[chat_id]["coins"] += coins
text += " (+{}$ now {})".format(coins, data[chat_id]["coins"])
id = bot.send_message(chat_id, text).message_id
add_delete_message(chat_id, id)
enemies[target]["count"] -= 1
def wave_start(chat_id):
data[chat_id]["attack points"] = {}
location = data[chat_id]["location"]
for unit in data[chat_id]["armament"]["active"][location]:
if any(["first blood" in weapons_data[weapon]["effects"] for weapon in units_data[unit]["weapons"]]):
data[chat_id]["attack points"][unit] = data[chat_id]["armament"]["active"][location][unit]["count"]
wave_menu(chat_id)
def wave_func(chat_id):
data[chat_id]["attack points"] = {}
location = data[chat_id]["location"]
for unit in data[chat_id]["armament"]["active"][location]:
data[chat_id]["attack points"][unit] = data[chat_id]["armament"]["active"][location][unit]["count"]
wave_menu(chat_id)
def wave_menu(chat_id):
text = "Your enemies 😡 :\n"
for enemy in data[chat_id]["enemies"]:
text += "{} : {} 💚 {} / {} 🛡 {}\n".format(enemy, data[chat_id]["enemies"][enemy]["count"],";".join([str(hp) for hp in data[chat_id]["enemies"][enemy]["hp"]]),
units_data[enemy]["hp"], ";".join(units_data[enemy]["armor"]))
add_delete_message(chat_id, bot.send_message(chat_id, text).message_id)
keyboard = types.InlineKeyboardMarkup()
location = data[chat_id]["location"]
for unit in data[chat_id]["attack points"]:
text = "{} x{}".format(unit, data[chat_id]["attack points"][unit])
if data[chat_id]["armament"]["active"][location][unit]["hp"] != []:
text += " 💚 "
for hp in data[chat_id]["armament"]["active"][location][unit]["hp"]:
text += " {};".format(hp)
text = text[:-1] + " / " + str(units_data[unit]["hp"])
min_dmg = sum([int(units_data[unit]["weapons"][weapon]*(weapons_data[weapon]["damage"].split("-")[0])) for weapon in units_data[unit]["weapons"]])
max_dmg = sum([int(units_data[unit]["weapons"][weapon] * (weapons_data[weapon]["damage"].split("-")[-1])) for weapon in units_data[unit]["weapons"]])
text += " {} - {}⚔".format(min_dmg, max_dmg)
keyboard.add(types.InlineKeyboardButton(text, callback_data=unit))
keyboard.add(types.InlineKeyboardButton("cancel", callback_data="cancel"))
data[chat_id]["action"] = "wave - pick armament"
bot.send_message(chat_id, "Pick armament for destroy:", reply_markup=keyboard)
def menu(id):
keyboard = types.InlineKeyboardMarkup()
keyboard.row(
types.InlineKeyboardButton("🤜Fight wave {} . {}".format(data[id]["lvl"],data[id]["wave"]), callback_data = "WAVE"),
types.InlineKeyboardButton("👁Watch map", callback_data = "MAP")
)
keyboard.row(
types.InlineKeyboardButton("🙄Set armament".format(data[id]["lvl"], data[id]["wave"]),
callback_data="SET ARMAMENT"),
)
text = "Mr👁️🗨️ --={}=--\n".format(data[id]["name"])
for key in data[id]:
if key == "":
pass
elif key != 'name':
if type(data[id][key]) == dict:
text += "\n---- {}:\n".format(key)
for key1 in data[id][key]:
if key1 == "active":
text += "-- active:\n"
location = data[id]["location"]
for unit in data[id]["armament"]["active"][location]:
text += " {} - {}".format(unit, data[id]["armament"]["active"][location][unit]["count"])
if data[id]["armament"]["active"][location][unit]["hp"] != []:
text += " 💚 "
for hp in data[id]["armament"]["active"][location][unit]["hp"]:
text += " {};".format(hp)
text = text[:-1] + " / " + str(units_data[unit]["hp"]) + "\n"
else:
text += "\n"
else:
if type(data[id][key][key1]) == dict:
text += "-- {}:\n".format(key1)
for key2 in data[id][key][key1]:
text += " {} - {}\n".format(key2, data[id][key][key1][key2])
else:
text += "{} - {}\n".format(key1, data[id][key][key1])
else:
if key == "coins":
text += "\n💰{} - {}\n".format(key, data[id][key])
else:
text += "\n{} - {}\n".format(key, data[id][key])
data[id]["action"] = "Pick menu: Wave, Map, Set"
bot.send_message(id, text=text,
reply_markup = keyboard)
bot.polling(none_stop=True)
| [
"larionov1001@gmail.com"
] | larionov1001@gmail.com |
66966bcb8d5c95f3387b0ff7cb5433c094feea13 | d862f807a5dce63b917945cef10ecf95f7139a9d | /OnlinerParser/urls.py | 653993e81f539159eec28bc501c6da593da012ce | [] | no_license | al1enjesus/onliner_parser | 4e8123be5b57078d154bdbc46be515b9d9cd9ab9 | 986f11858346018d8622eb8ad2266ee9780ab62f | refs/heads/master | 2023-04-21T10:20:11.499842 | 2021-05-04T18:10:33 | 2021-05-04T18:10:33 | 364,343,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | """OnlinerParser URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"wdybelievein@gmail.com"
] | wdybelievein@gmail.com |
f09d13ea1ba4616512b010c728325a334bb69c4c | ac70b3e5545a50366b532116edabcd4c0fc109c6 | /src/siamese_stage/infer.py | 01e14d08d81229660eb0e747516125bb87dcdc05 | [
"MIT"
] | permissive | CrueLu/DogEmbeddings | 46d234646bac0414500514241274b757ed89617d | 10936d0fa6d2a8461d3f14f091f27b54511d2fdb | refs/heads/master | 2022-01-11T19:24:43.464627 | 2018-04-03T06:40:06 | 2018-04-03T07:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from siamese import SiameseNetwork
# Construct Siamese network
model, base_network = SiameseNetwork()
model.load_weights('/book/working/models/siamese.h5')
def intermediate(embs):
return base_network.predict(embs)
| [
"ericlzhao28@gmail.com"
] | ericlzhao28@gmail.com |
1d0267680a1567f9e4051c363035858665d4a384 | 11cb1f15a6b9850b9575b5bfd745e5053eba7c84 | /pooldin/app/convert.py | edcb0eab8a585d06eddb26057acd2e3e45909f71 | [] | no_license | pooldin/pooldin | fa39e61aa28e7042bb8814032fd5739c0ed3625e | 3287b9e6039844161ed103d588a32fc635768c85 | refs/heads/master | 2020-05-18T23:28:06.807082 | 2012-10-09T00:38:57 | 2012-10-11T18:05:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | def to_bool(value):
if value in ['True', 'true', '1']:
return True
if value in ['False', 'false', '0']:
return False
return bool(value)
def to_int(value):
try:
return int(value)
except (TypeError, ValueError):
return None
| [
"watson.collin@gmail.com"
] | watson.collin@gmail.com |
bdb4d72815badc2db40b8e3ded2e1bcad504b4f4 | fedc097c85f12de797d4becd31de47f80013a488 | /Python_Test/PySample1/com/djs/learn/profile/TestDebugger.py | 36c76cceb3f6629f951ff8e4cfb90166a4857737 | [
"Apache-2.0"
] | permissive | djsilenceboy/LearnTest | 6291e272da2abb873720dfa9a55d58cdc7556d35 | 91c1ee4875a740d8be48fc9d74098a37e2f5cae6 | refs/heads/master | 2023-07-05T14:47:52.223970 | 2023-06-29T11:51:14 | 2023-06-29T11:51:14 | 61,952,430 | 3 | 3 | Apache-2.0 | 2022-12-16T09:01:24 | 2016-06-25T16:43:34 | Java | UTF-8 | Python | false | false | 271 | py | '''
Created on May 2, 2017
@author: dj
'''
def calc(a, b, c):
print("This is beginning.")
import pdb
pdb.set_trace()
print("To be end.")
calc(1, 2, 3)
# PDB command:
# bt, up, down
# step, next, return, continue
if __name__ == '__main__':
pass
| [
"djdarkguardian@gmail.com"
] | djdarkguardian@gmail.com |
86fcf6c74099436224d26ca4ccd9c27e498fcc0c | 11f7add72635ad985b3e98fd77e9426e8c74ab08 | /growthintel/growthintel.py | 7d641b9bad3fbb521fa21f28b0a5e9ec17603326 | [] | no_license | harshdarji/python | afa6b11338504567ece8bb1e78e841d13716ff14 | 8bad854304f423264b7b0724b87c7cd7de748cd6 | refs/heads/master | 2020-12-31T01:48:04.439466 | 2012-09-13T09:22:58 | 2012-09-13T09:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | APIKey="00a5f2164e5e49b29981ddcbe9df7d7c"
import parse
import json
output={"type":"FeatureCollection","features":[]}
postcodes=["AB","AL","B","BA","BB","BD","BH","BL","BN","BR","BS","BT","CA","CB","CF","CH","CM","CO","CR","CT","CV","CW","DA","DD","DE","DG","DH","DL","DN","DT","DY","E","EC","EH","EN","EX","FK","FY","G","GL","GU","HA","HD","HG","HP","HR","HS","HU","HX","IG","IM","IP","IV","KA","KT","KW","KY","L","LA","LD","LE","LL","LN","LS","LU","M","ME","MK","ML","N","NE","NG","NN","NP","NR","NW","OL","OX","PA","PE","PH","PL","PO","PR","RG","RH","RM","S","SA","SE","SG","SK","SL","SM","SN","SO","SP","SR","SS","ST","SW","SY","TA","TD","TF","TN","TQ","TR","TS","TW","UB","W","WA","WC","WD","WF","WN","WR","WS","WV","YO","ZE","BR","CR","DA","E","EC","EN","HA","IG","KT","N","NW","RM","SE","SM","SW","TW","UB","W","WC","WD"]
stuburl="http://geocoding.cloudmade.com/"+APIKey+"/geocoding/v2/find.js?query=postcode:"
urlend="&return_geometry=true"
for postcode in postcodes:
url=stuburl+postcode+urlend;
data=json.loads(htmlify(url))
print postcode,len(url)
if ("features" in data):
output["features"].append(data["features"][0])
else:
print "no data found for "+postcode
| [
"jeromecukier@gmail.com"
] | jeromecukier@gmail.com |
276d31696b5798d10682159add96252a4f789158 | e0b53df2148aa7d18478d1c3735d2186048496e6 | /sly_lexer_parser/fr_lexer.py | 678aacd0ca587cf5d400ebe5d7c98e5faa831dfb | [] | no_license | fherbine/trainingPython-Kivy | bb677150900f2d1c885ef712ac4f6c28ab275568 | 10d973bc939bc2b48b16acc59d259f955513409e | refs/heads/master | 2020-04-01T02:00:21.776997 | 2019-10-07T13:13:08 | 2019-10-07T13:13:08 | 152,761,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from sly import Lexer
class FrenchCalcLexer(Lexer):
tokens = { ID, NOMBRE, PLUS, MOINS, MULTIPLIER,
DIVISER, ASSIGNER, GPAREN, RPAREN }
ignore = ' \t'
ID = r'[a-zA-Z_][a-zA-Z0-9_]*'
NOMBRE = r'\d+'
PLUS = r'\+'
MOINS = r'-'
MULTIPLIER = r'\*'
DIVISER = r'/'
ASSIGNER = r'='
GPAREN = r'\('
RPAREN = r'\)'
if __name__ == '__main__':
lexer = FrenchCalcLexer()
while True:
data = input('>>> ')
for token in lexer.tokenize(data):
print('type={t_type}, value={t_val}'.format(
t_type=token.type,
t_val=repr(token.value),
))
| [
"felix.herbinet@yahoo.com"
] | felix.herbinet@yahoo.com |
acc028e66e697d07b577ed54511081c0cd448e1f | 2af2779df24a94d80ee578b23b4cb41c6a5b520c | /scripts/blink_led2moorse.py | 13bce0d22bcee960c1854801853b712cf599281c | [] | no_license | vigneshragupathy/blog | 81221405383531270d8d86b97227965fdadd1239 | 0a63af59e2e113fdd8c30e2acee56fc857421932 | refs/heads/master | 2023-01-03T09:40:18.091683 | 2020-10-18T12:47:49 | 2020-10-18T12:47:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | # Import the GPIO and time libraries
import RPi.GPIO as GPIO
import time
#####Moorse code definition######
CODE = {' ': ' ',
"'": '.----.',
'(': '-.--.-',
')': '-.--.-',
',': '--..--',
'-': '-....-',
'.': '.-.-.-',
'/': '-..-.',
'0': '-----',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
':': '---...',
';': '-.-.-.',
'?': '..--..',
'A': '.-',
'B': '-...',
'C': '-.-.',
'D': '-..',
'E': '.',
'F': '..-.',
'G': '--.',
'H': '....',
'I': '..',
'J': '.---',
'K': '-.-',
'L': '.-..',
'M': '--',
'N': '-.',
'O': '---',
'P': '.--.',
'Q': '--.-',
'R': '.-.',
'S': '...',
'T': '-',
'U': '..-',
'V': '...-',
'W': '.--',
'X': '-..-',
'Y': '-.--',
'Z': '--..',
'_': '..--.-'}
######End of moorse code definition######
# Set the pin designation type.
# In this case, we use BCM- the GPIO number- rather than the pin number itself.
GPIO.setmode (GPIO.BCM)
# So that you don't need to manage non-descriptive numbers,
# set "LIGHT" to 4 so that our code can easily reference the correct pin.
LIGHT = 4
# Because GPIO pins can act as either digital inputs or outputs,
# we need to designate which way we want to use a given pin.
# This allows us to use functions in the GPIO library in order to properly send and receive signals.
GPIO.setup(LIGHT,GPIO.OUT)
def dot():
GPIO.output(LIGHT,True)
time.sleep(0.2)
GPIO.output(LIGHT,False)
time.sleep(0.2)
def dash():
GPIO.output(LIGHT,True)
time.sleep(0.5)
GPIO.output(LIGHT,False)
time.sleep(0.2)
try:
while True:
input = raw_input('What would you like to send? ')
for letter in input:
for symbol in CODE[letter.upper()]:
if symbol == '-':
dash()
elif symbol == '.':
dot()
else:
time.sleep(0.5)
time.sleep(0.5)
except KeyboardInterrupt:
GPIO.cleanup()
| [
"me@vikki.in"
] | me@vikki.in |
32d654a348320c21302268c73278d97e9540c221 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GLES2/NV/framebuffer_blit.py | ada66373b0f3e2262f88cc60a536d0ce0840c18b | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.constant import Constant as _C
# End users want this...
from OpenGL.raw.GLES2 import _errors
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
_EXTENSION_NAME = 'GLES2_NV_framebuffer_blit'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GLES2, 'GLES2_NV_framebuffer_blit',
error_checker=_errors._error_checker)
GL_DRAW_FRAMEBUFFER_BINDING_NV = _C('GL_DRAW_FRAMEBUFFER_BINDING_NV', 0x8CA6)
GL_DRAW_FRAMEBUFFER_NV = _C('GL_DRAW_FRAMEBUFFER_NV', 0x8CA9)
GL_READ_FRAMEBUFFER_BINDING_NV = _C('GL_READ_FRAMEBUFFER_BINDING_NV', 0x8CAA)
GL_READ_FRAMEBUFFER_NV = _C('GL_READ_FRAMEBUFFER_NV', 0x8CA8)
@_f
@_p.types(None, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLint, _cs.GLbitfield,
_cs.GLenum)
def glBlitFramebufferNV(srcX0,srcY0,srcX1,srcY1,dstX0,dstY0,dstX1,dstY1,mask,filter):pass
| [
"rudnik49@gmail.com"
] | rudnik49@gmail.com |
c520dd7bddfc90caab08af4bb0a8517af01ac98a | 0422bca79c61ee4630f7c762e77ca9780b05e3ff | /pattern/text/nl/parser/__init__.py | 590127d45bbbde61c6675e815462d4cc4e8842d0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kkoch986/pattern | 69ac11d12ea4fd2bb581e3343bca021cf6e46c92 | db807c888dcd15f515afe31753c9b0345a11b542 | refs/heads/master | 2021-01-18T04:26:13.078631 | 2013-02-14T03:29:25 | 2013-02-14T03:29:25 | 8,186,866 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,508 | py | #### PATTERN | NL | RULE-BASED SHALLOW PARSER ######################################################
# Copyright (c) 2010 Jeroen Geertzen and University of Antwerp, Belgium
# Authors: Jeroen Geertzen (Dutch language model), Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
import re
import os
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
# The tokenizer, chunker and relation finder are inherited from pattern.en.parser.
# The tagger is based on Jeroen Geertzen's Dutch language model Brill-NL
# (brill-bigrams.txt, brill-contextual.txt, brill-lexical.txt, brill-lexicon.txt):
# http://cosmion.net/jeroen/software/brill_pos/
# Accuracy is reported around 92%, but Pattern scores may vary from Geertzen's original
# due to WOTAN => Penn Treebank mapping etc.
try:
from ...en.parser import Lexicon
from ...en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from ...en.parser import commandline
except:
import sys; sys.path.insert(0, os.path.join(MODULE, "..", ".."))
from en.parser import Lexicon
from en.parser import PUNCTUATION, tokenize as _en_tokenize, parse as _en_parse, TaggedString
from en.parser import commandline
#### TOKENIZER #####################################################################################
abbreviations = [
"a.d.h.v.", "afb.", "a.u.b.", "bv.", "b.v.", "bijv.", "blz.", "ca.", "cfr.", "dhr.", "dr.",
"d.m.v.", "d.w.z.", "e.a.", "e.d.", "e.g.", "enz.", "etc.", "e.v.", "evt.", "fig.", "i.e.",
"i.h.b.", "ir.", "i.p.v.", "i.s.m.", "m.a.w.", "max.", "m.b.t.", "m.b.v.", "mevr.", "min.",
"n.a.v.", "nl.", "n.o.t.k.", "n.t.b.", "n.v.t.", "o.a.", "ong.", "pag.", "ref.", "t.a.v.",
"tel.", "zgn."]
def tokenize(s, punctuation=PUNCTUATION, abbreviations=abbreviations, replace={"'n": " 'n"}):
# 's in Dutch preceded by a vowel indicates plural ("auto's"): don't replace.
s = _en_tokenize(s, punctuation, abbreviations, replace)
s = [re.sub(r"' s (ochtends|morgens|middags|avonds)", "'s \\1", s) for s in s]
return s
_tokenize = tokenize
#### LEMMATIZER ####################################################################################
# Word lemmas using singularization and verb conjugation from the inflect module.
try:
from ..inflect import singularize, conjugate, predicative
except:
try:
sys.path.append(os.path.join(MODULE, ".."))
from inflect import singularize, conjugate, predicative
except:
try:
from pattern.nl.inflect import singularize, conjugate, predicative
except:
singularize = lambda w: w
conjugate = lambda w, t: w
predicative = lambda w: w
def lemma(word, pos="NN"):
if pos == "NNS":
return singularize(word)
if pos.startswith(("VB","MD")):
return conjugate(word, "infinitive") or word
if pos.startswith("JJ") and word.endswith("e"):
return predicative(word)
return word
def find_lemmata(tagged):
for token in tagged:
token.append(lemma(token[0].lower(), pos=len(token) > 1 and token[1] or None))
return tagged
#### PARSER ########################################################################################
# pattern.en.find_tags() has an optional "lexicon" parameter.
# We'll pass the Dutch lexicon to it instead of the default English lexicon:
lexicon = LEXICON = Lexicon()
lexicon.path = os.path.join(MODULE, "brill-lexicon.txt")
lexicon.lexical_rules.path = os.path.join(MODULE, "brill-lexical.txt")
lexicon.contextual_rules.path = os.path.join(MODULE, "brill-contextual.txt")
lexicon.named_entities.tag = "N(eigen,ev)"
# WOTAN tagset:
# http://lands.let.ru.nl/literature/hvh.1999.2.ps
PENN = PENNTREEBANK = TREEBANK = "penntreebank"
WOTAN = "wotan"
wotan = {
"N(": [("eigen,ev","NNP"), ("eigen,mv","NNPS"), ("ev","NN"), ("mv","NNS")],
"V(": [("hulp","MD"), ("ott,3","VBZ"), ("ott","VBP"), ("ovt","VBD"), ("verldw","VBN"), ("tegdw","VBG"), ("imp","VB"), ("inf","VB")],
"Adj(": [("stell","JJ"), ("vergr","JJR"), ("overtr","JJS")],
"Adv(": [("deel","RP"), ("gew","RB"), ("pro","RB")],
"Art(": "DT",
"Conj(": "CC",
"Num(": "CD",
"Prep(": [("voorinf","TO"), ("", "IN")],
"Pron(": [("bez","PRP$"), ("","PRP")],
"Punc(": [("komma",","), ("haakopen","("), ("haaksluit",")"), ("",".")],
"Int": "UH",
"Misc": [("symbool","SYM"), ("vreemd","FW")]
}
def wotan2penntreebank(tag):
""" Converts a WOTAN tag to Penn Treebank II tag.
For example: bokkenrijders N(soort,mv,neut) => bokkenrijders/NNS
"""
for k,v in wotan.iteritems():
if tag.startswith(k):
if not isinstance(v, list):
return v
for a,b in v:
if a in tag.replace("_",""): return b
return tag
return tag
def parse(s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs):
""" Takes a string (sentences) and returns a tagged Unicode string.
Sentences in the output are separated by newlines.
"""
if tokenize:
s = _tokenize(s)
# Reuse the English parser:
kwargs.update({
"lemmata": False,
"light": False,
"lexicon": LEXICON,
"language": "nl",
"default": "N(soort,ev,neut)",
"map": kwargs.get("tagset", "") != WOTAN and wotan2penntreebank or None,
})
s = _en_parse(s, False, tags, chunks, relations, **kwargs)
# Use pattern.nl.inflect for lemmatization:
if lemmata:
p = [find_lemmata(sentence) for sentence in s.split()]
s = TaggedString(p, tags=s.tags+["lemma"], language="nl")
return s
def tag(s, tokenize=True, encoding="utf-8"):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
#### COMMAND LINE ##################################################################################
# From the folder that contains the "pattern" folder:
# python -m pattern.nl.parser xml -s "De kat wil wel vis eten maar geen poot nat maken." -OTCLI
if __name__ == "__main__":
commandline(parse) | [
"tom@organisms.be"
] | tom@organisms.be |
cfd7b02747987b034cf4487a4f0d184a14a698da | 43a4fd934407a963c876dc1b15fd7052e693592b | /GearBot/Util/RaidHandling/RaidShield.py | 2920e7cee1e644e445fd3d504d578a43fb9c6170 | [
"MIT"
] | permissive | mrkirby153/GearBot | aa02fdd32bec292a54e8e3805bd829f16522cdc8 | a61d936c12e906c7bcfaa840a585ee25ffc06138 | refs/heads/master | 2020-05-29T18:59:55.450101 | 2019-06-02T09:54:18 | 2019-06-02T09:54:18 | 189,318,104 | 0 | 0 | MIT | 2019-05-30T00:31:06 | 2019-05-30T00:31:06 | null | UTF-8 | Python | false | false | 1,444 | py | from Util import GearbotLogging
from Util.RaidHandling import RaidActions
class RaidShield:
def __init__(self, shield_info) -> None:
self.shield_name=shield_info["name"]
self.start_actions = [action for action in shield_info["actions"]["triggered"]]
self.raider_actions = [action for action in shield_info["actions"]["raider"]]
self.termination_actions = [action for action in shield_info["actions"]["terminated"]]
async def raid_detected(self, bot, guild, raid_id, raider_ids, shield):
GearbotLogging.log_to(guild.id, "raid_shield_triggered", raid_id=raid_id, name=self.shield_name)
await self.handle_actions(self.start_actions, bot, guild, raid_id, raider_ids, shield)
async def handle_raider(self, bot, raider, raid_id, raider_ids, shield):
await self.handle_actions(self.raider_actions, bot, raider, raid_id, raider_ids, shield)
async def shield_terminated(self, bot, guild, raid_id, raider_ids, shield):
GearbotLogging.log_to(guild.id, "raid_shield_terminated", raid_id=raid_id, name=self.shield_name)
await self.handle_actions(self.termination_actions, bot, guild, raid_id, raider_ids, shield)
async def handle_actions(self, actions, bot, o, raid_id, raider_ids, shield):
for a in actions:
action = RaidActions.handlers[a["type"]]
await action.execute(bot, o, a["action_data"], raid_id, raider_ids, shield)
| [
"aenterprise2@gmail.com"
] | aenterprise2@gmail.com |
c14063ae8b5ff7c11f6410404f09ba2e0c88559e | 8c20ced083224b8afb7c8337265810a350b916f6 | /api/predictor.py | 77a427701068cff081e62c3750c613ac0cc0ae26 | [] | no_license | girginsoft/soyaddanparti | d0947797335cf00b9295335abde3686b67d86ac1 | 4a95ac6f487ab2e893cc36e6b7904e5cd1445127 | refs/heads/master | 2020-05-09T21:14:26.422251 | 2019-04-15T08:03:14 | 2019-04-15T08:03:14 | 181,436,415 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | from nltk import ngrams
from sklearn.externals import joblib
from collections import OrderedDict
from flask import jsonify
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
def load_feature_list():
response = S3.get_object(Bucket=BUCKET_NAME, Key=FEATURE_LIST_FILE)
json_str = response['Body'].read()
return json.loads(json_str)
def load_model(key):
# Load model from S3 bucket
response = S3.get_object(Bucket=BUCKET_NAME, Key=key)
model_str = response['Body'].read()
model = pickle.loads(model_str)
return model
S3 = boto3.client('s3', region_name='eu-central-1')
BUCKET_NAME = 'predictorr'
MODEL_FILE_NAME = 'decision_tree.sav'
FEATURE_LIST_FILE = 'feature_list.json'
MODEL_ = load_model(MODEL_FILE_NAME)
FEATURES_ = load_feature_list()
def prepare_input_data(surname, conservative, opposition):
grams = ngrams(surname, 2)
features = FEATURES_
inp = np.zeros((1, len(features)))
for gram in grams:
bigram = str(gram[0]) + str(gram[1])
try:
index = features.index(bigram)
inp[0][index] = 1
except:
pass
conv_index = features.index("conservative")
ops_index = features.index("opposition")
inp[0][conv_index] = conservative
inp[0][ops_index] = (1 - opposition)
return inp
@app.route('/', methods=['POST', 'GET', 'OPTIONS'])
def index():
# Parse request body for model input
body_dict = request.get_json(silent=True)
try:
surname = body_dict['surname']
conserv = body_dict['conservative']
opposition = body_dict['opposition']
inp = prepare_input_data(surname, int(conserv), int(opposition))
# Load model
model = MODEL_
prediction = model.predict_proba(inp)
result = {}
for i in range(0, len(model.classes_)):
result[model.classes_[i]] = prediction[0][i]
result = {'prediction': result}
response = jsonify(result)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
except:
return jsonify({'result': True})
if __name__ == '__main__':
# listen on all IPs
app.run(host='0.0.0.0')
| [
"noreply@github.com"
] | girginsoft.noreply@github.com |
3f4bbf3c6260dc4cc2bb2f44c44e22db6ea458bb | 214c3dccf41fc5e178a9c95083c82d0555b774d4 | /logic/LSTM.py | 845b95b2418811d74fc8e3c33eae94414332cb25 | [] | no_license | MichalSzewczyk/neural-network | 098d4d6797ddb1c598cb905e70f13a29d89fbad1 | 4eef8db1cf55d2616ecc1f75335a40bf048e0eba | refs/heads/master | 2020-04-17T14:37:53.019355 | 2019-01-27T16:59:05 | 2019-01-27T16:59:05 | 166,664,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | import torch
from torch import nn, FloatTensor
class LSTM(nn.Module):
def __init__(self, word2embed, id2train, id2valid, batch_size, max_len_sent, embedding_dim, hidden_dim, output_dim):
super(LSTM, self).__init__()
self.word2embed = word2embed
self.id2train = id2train
self.id2valid = id2valid
self.batch_size = batch_size
self.max_len_sent = max_len_sent
self.input_dim = embedding_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=1, batch_first=True, dropout=0.1)
self.hidden2out = nn.Linear(hidden_dim, output_dim)
self.hidden = self.init_hidden(self.hidden_dim, batch_size)
def init_hidden(self, hidden_size, batch_size=1):
return (torch.zeros(1, batch_size, hidden_size),
torch.zeros(1, batch_size, hidden_size))
def get_embeddings_for_batch(self, sentences, mode):
print(sentences.shape)
id2words = self.id2train if mode == 'train' else self.id2valid
batch = FloatTensor()
for i, sent in enumerate(sentences):
for w in sent:
batch = torch.cat((batch, self.word2embed[id2words[w.item()]]), dim=-1)
return batch.reshape(self.batch_size, self.max_len_sent, -1)
def forward(self, sentences, mode):
word_embeds = self.get_embeddings_for_batch(sentences, mode)
lstm_out, self.hidden = self.lstm(word_embeds, self.hidden)
sent_embeds = self.hidden2out(self.hidden[0].contiguous().view((self.batch_size, -1)))
print(sent_embeds.shape)
return sent_embeds
| [
"szewczykm94@gmail.com"
] | szewczykm94@gmail.com |
882aa794bd871d4fa726624fca83d3207874b12b | 75351b49a226e6af5e46f6ab2d75e4ae22549cab | /mega/helpers/gdrive.py | e6a868d9ac089654fe8d6b446acf6f7f1f71a3ae | [] | no_license | wonkru-bot/megadlbot_oss | 31331570ae88de26260b7bd63e67c08a4243b23b | 9fa5d50b651c428048edd65d08943990095271f7 | refs/heads/master | 2022-12-25T14:39:07.526253 | 2020-10-06T12:03:17 | 2020-10-06T12:03:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,691 | py | import os
import base64
import logging
import aiofiles
import mimetypes
from googleapiclient.discovery import build
from google.oauth2 import service_account
from googleapiclient.http import MediaFileUpload
from mega.database.users import MegaUsers
from datetime import datetime, timedelta, timezone
class Gdrive:
def __init__(self):
self.scope = ["https://www.googleapis.com/auth/drive"]
async def upload_file(self, user_id: int, file: str):
user_details = await MegaUsers().get_user(user_id)
if "gdrive_key" in user_details:
key_file_location = f"mega/{user_details['gdrive_key_location']}"
if os.path.isfile(key_file_location) is not True:
async with aiofiles.open(key_file_location, mode='wb') as key_file_aio:
await key_file_aio.write(base64.decodebytes(user_details["gdrive_key"]))
credentials = service_account.Credentials.from_service_account_file(
filename=key_file_location, scopes=self.scope,
)
service = build('drive', 'v3', credentials=credentials)
file_name = os.path.basename(file)
file_metadata = {
'name': file_name,
'mimeType': (mimetypes.guess_type(file_name))[0]
}
media = MediaFileUpload(key_file_location,
mimetype=(mimetypes.guess_type(file_name))[0],
resumable=True)
gfile = service.files().create(body=file_metadata,
media_body=media,
fields='id').execute()
service.permissions().create(body={"role": "reader", "type": "anyone"}, fileId=gfile.get("id")).execute()
await Gdrive().clean_old_files(service)
return gfile
@staticmethod
async def clean_old_files(service: build):
twelve_hours_since_now = datetime.now(tz=timezone.utc) - timedelta(hours=12)
twelve_hours_since_now.strftime("%Y-%m-%dT%H:%M:%SZ")
date_str = twelve_hours_since_now.isoformat(timespec='milliseconds')
date_str = str(date_str).replace('+00:00', '')
response = service.files().list(q=f"modifiedTime < '{date_str}'",
spaces="drive",
fields="nextPageToken, files(id, name)").execute()
old_files = response.get("files")
for file in old_files:
logging.info(f"I am removing {file} from google drive since its already old enough.")
service.files().delete(fileId=file.get("id")).execute()
| [
"eyaadh@gmail.com"
] | eyaadh@gmail.com |
e00ea1cfb0a012d2762bd8d9ab89d6d147f0c170 | adb6755eb1a3d91375e6b4e9b8c1afd07f85313b | /venv/Lib/site-packages/pandas/tests/io/test_excel.py | c527451cc6d27da20440880d1bc8730d44c74e32 | [] | no_license | Atwinenickson/Socialmediaclassifier- | af54b559569e80004c441fc90dc44b0ee945555d | 05c5abbaba8694d9bf95d745ffca75c17ac69621 | refs/heads/master | 2022-12-15T01:33:18.073259 | 2019-06-07T15:38:18 | 2019-06-07T15:38:18 | 190,616,071 | 1 | 0 | null | 2022-12-08T05:13:29 | 2019-06-06T16:41:17 | Python | UTF-8 | Python | false | false | 102,135 | py | from collections import OrderedDict
import contextlib
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from functools import partial
import os
import warnings
from warnings import catch_warnings
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY36, BytesIO, iteritems, map, range, u
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
from pandas.core.config import get_option, set_option
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter,
read_excel, register_writer)
from pandas.io.formats.excel import ExcelFormatter
from pandas.io.parsers import read_csv
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='time.clock has been deprecated',
category=DeprecationWarning)
yield
@td.skip_if_no('xlrd', '1.0.0')
class SharedItems(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
dfref : DataFrame
"""
pref = os.path.join(self.dirpath, basename + '.csv')
dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
return dfref
def get_excelfile(self, basename, ext):
"""
Return test data ExcelFile instance.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
excel : io.excel.ExcelFile
"""
return ExcelFile(os.path.join(self.dirpath, basename + ext))
def get_exceldf(self, basename, ext, *args, **kwds):
"""
Return test data DataFrame.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
df : DataFrame
"""
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
class ReadingTestsBase(SharedItems):
# This is based on ExcelWriterBase
@pytest.fixture(autouse=True, params=['xlrd', None])
def set_engine(self, request):
func_name = "get_exceldf"
old_func = getattr(self, func_name)
new_func = partial(old_func, engine=request.param)
setattr(self, func_name, new_func)
yield
setattr(self, func_name, old_func)
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_usecols_int(self, ext):
df_ref = self.get_csv_refdf("test1")
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df1 = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=3)
# usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, usecols=3)
# parse_cols instead of usecols, usecols as int
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1],
index_col=0, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df3, df_ref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_list(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols=[0, 2, 3])
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=[0, 2, 3])
with tm.assert_produces_warning(FutureWarning):
with ignore_xlrd_time_clock_warning():
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_usecols_str(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A:D')
with tm.assert_produces_warning(FutureWarning):
with ignore_xlrd_time_clock_warning():
df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
tm.assert_frame_equal(df4, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C,D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize("usecols", [
[0, 1, 3], [0, 3, 1],
[1, 0, 3], [1, 3, 0],
[3, 0, 1], [3, 1, 0],
])
def test_usecols_diff_positional_int_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["A", "C"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [
["B", "D"], ["D", "B"]
])
def test_usecols_diff_positional_str_columns_order(self, ext, usecols):
expected = self.get_csv_refdf("test1")[["B", "D"]]
expected.index = range(len(expected))
result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, ext):
expected = self.get_csv_refdf("test1")
result = self.get_exceldf("test1", ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, ext):
expected = self.get_csv_refdf("test1")[["C", "D"]]
result = self.get_exceldf("test1", ext, "Sheet1",
index_col=0, usecols="A,D:E")
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
self.get_exceldf("test1", ext, "Sheet1", index_col=["A"],
usecols=["A", "C"])
def test_index_col_empty(self, ext):
# see gh-9208
result = self.get_exceldf("test1", ext, "Sheet3",
index_col=["A", "B", "C"])
expected = DataFrame(columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3,
codes=[[]] * 3,
names=["A", "B", "C"]))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, ext, index_col):
# see gh-18792
result = self.get_exceldf("test1", ext, "Sheet4",
index_col=index_col)
expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]],
columns=["Unnamed: 0", "col1", "col2"])
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, ext):
msg = ("Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E"])
def test_usecols_wrong_type(self, ext):
msg = ("'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable.")
with pytest.raises(ValueError, match=msg):
self.get_exceldf("test1", ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, ext):
parsed = self.get_exceldf('test2', ext, 'Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, ext):
parsed = self.get_exceldf('test3', ext, 'Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self, ext):
excel = self.get_excelfile('test4', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
# 13967
excel = self.get_excelfile('test5', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_deprecated_sheetname(self, ext):
# gh-17964
excel = self.get_excelfile('test1', ext)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
read_excel(excel, sheetname='Sheet1')
with pytest.raises(TypeError):
read_excel(excel, sheet='Sheet1')
@td.skip_if_no('xlrd', '1.0.1') # GH-22682
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
dfref = self.get_csv_refdf('test1')
df1 = read_excel(excel, 0, index_col=0)
df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df4)
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
import xlrd
with pytest.raises(xlrd.XLRDError):
read_excel(excel, 'asdf')
def test_excel_table(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, ext):
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
]))
basename = 'test_types'
# should read in correctly and infer types
actual = self.get_exceldf(basename, ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = self.get_exceldf(
basename, ext, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
converters={"StrCol": str})
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, ext):
basename = 'test_converters'
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = self.get_exceldf(basename, ext, 'Sheet1',
converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, ext):
# GH 8212
basename = 'testdtype'
actual = self.get_exceldf(basename, ext)
expected = DataFrame({
'a': [1, 2, 3, 4],
'b': [2.5, 3.5, 4.5, 5.5],
'c': [1, 2, 3, 4],
'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf(basename, ext,
dtype={'a': 'float64',
'b': 'float32',
'c': str})
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype('float32')
expected['c'] = ['001', '002', '003', '004']
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
self.get_exceldf(basename, ext, dtype={'d': 'int64'})
@pytest.mark.parametrize("dtype,expected", [
(None,
DataFrame({
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0]
})),
({"a": "float64",
"b": "float32",
"c": str,
"d": str
},
DataFrame({
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"]
})),
])
def test_reader_dtype_str(self, ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = self.get_exceldf(basename, ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = 'test_multisheet'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ['Charlie', 'Alpha', 'Beta']
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = 'test_multisheet'
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, 'Charlie', 'Charlie']
dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, ext):
actual = self.get_exceldf('blank', ext, 'Sheet1')
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, ext):
expected = DataFrame(columns=['col_1', 'col_2'])
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([np.nan] * 4)),
(0, DataFrame({"Unnamed: 0": [np.nan] * 3}))
])
def test_read_one_empty_col_no_header(self, ext, header, expected):
# xref gh-12292
filename = "no_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, filename, index=False, header=False)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
@pytest.mark.parametrize("header,expected", [
(None, DataFrame([0] + [np.nan] * 4)),
(0, DataFrame([np.nan] * 4))
])
def test_read_one_empty_col_with_header(self, ext, header, expected):
filename = "with_header"
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
result = read_excel(path, filename, usecols=[0], header=header)
tm.assert_frame_equal(result, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = read_excel(reader, 'Data_with_head',
index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
[pd.Timestamp('2016-03-16'), 'Jack Black'],
[1e+20, 'Timothy Brown']],
columns=['DateColWithBigInt', 'StringCol'])
result = self.get_exceldf('testdateoverflow', ext)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlrd", "1.0.1") # see gh-22682
def test_sheet_name_and_sheetname(self, ext):
# gh-10559: Minor improvement: Change "sheet_name" to "sheetname"
# gh-10969: DOC: Consistent var names (sheetname vs sheet_name)
# gh-12604: CLN GH10559 Rename sheetname variable to sheet_name
# gh-20920: ExcelFile.parse() and pd.read_xlsx() have different
# behavior for "sheetname" argument
filename = "test1"
sheet_name = "Sheet1"
df_ref = self.get_csv_refdf(filename)
df1 = self.get_exceldf(filename, ext,
sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
with ignore_xlrd_time_clock_warning():
df2 = self.get_exceldf(filename, ext, index_col=0,
sheetname=sheet_name) # backward compat
excel = self.get_excelfile(filename, ext)
df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2_parse = excel.parse(index_col=0,
sheetname=sheet_name) # backward compat
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
tm.assert_frame_equal(df1_parse, df_ref, check_names=False)
tm.assert_frame_equal(df2_parse, df_ref, check_names=False)
def test_sheet_name_both_raises(self, ext):
with pytest.raises(TypeError, match="Cannot specify both"):
self.get_exceldf('test1', ext, sheetname='Sheet1',
sheet_name='Sheet1')
excel = self.get_excelfile('test1', ext)
with pytest.raises(TypeError, match="Cannot specify both"):
excel.parse(sheetname='Sheet1',
sheet_name='Sheet1')
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, ext):
bad_engine = 'foo'
with pytest.raises(ValueError, match="Unknown engine: foo"):
read_excel('', engine=bad_engine)
@tm.network
def test_read_from_http_url(self, ext):
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
file_name = os.path.join(self.dirpath, 'test1' + ext)
with open(file_name, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext,
Body=f)
url = ('s3://pandas-test/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, ext):
# FILE
localtable = os.path.join(self.dirpath, 'test1' + ext)
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
from pathlib import Path
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
path_obj = Path(self.dirpath, 'test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self, ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
abs_dir = os.path.abspath(self.dirpath)
path_obj = LocalPath(abs_dir).join('test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
assert f.closed
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_creating_and_reading_multiple_sheets(self, ext):
# see gh-9450
#
# Test reading multiple sheets, from a runtime.txt
# created Excel file with multiple sheets.
def tdf(col_sheet_name):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[col_sheet_name])
sheets = ["AAA", "BBB", "CCC"]
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_reader_seconds(self, ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf('times_1904', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
# "mi_column" sheet
expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True]],
columns=mi)
actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = read_excel(mi_file, "mi_column_name",
header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels(
[1, 2], level=1).set_names(["c1", "c2"])
actual = read_excel(mi_file, "name_with_int",
index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = read_excel(mi_file, "both_name",
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1],
header=[0, 1], skiprows=2)
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex_header_only(self, ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = os.path.join(self.dirpath, "testmultiindex" + ext)
result = read_excel(mi_file, "index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
@td.skip_if_no("xlsxwriter")
def test_read_excel_multiindex_empty_level(self, ext):
# see gh-12453
with ensure_clean(ext) as path:
df = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", ""): {0: 0}
})
expected = DataFrame({
("One", "x"): {0: 1},
("Two", "X"): {0: 3},
("Two", "Y"): {0: 7},
("Zero", "Unnamed: 4_level_1"): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
("Beg", ""): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
expected = pd.DataFrame({
("Beg", "Unnamed: 1_level_1"): {0: 0},
("Middle", "x"): {0: 1},
("Tail", "X"): {0: 3},
("Tail", "Y"): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no("xlsxwriter")
@pytest.mark.parametrize("c_idx_names", [True, False])
@pytest.mark.parametrize("r_idx_names", [True, False])
@pytest.mark.parametrize("c_idx_levels", [1, 3])
@pytest.mark.parametrize("r_idx_levels", [1, 3])
def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels):
# see gh-4679
with ensure_clean(ext) as pth:
if c_idx_levels == 1 and c_idx_names:
pytest.skip("Column index name cannot be "
"serialized unless it's a MultiIndex")
# Empty name case current read in as
# unnamed levels, not Nones.
check_names = r_idx_names or r_idx_levels <= 1
df = mkdf(5, 5, c_idx_names, r_idx_names,
c_idx_levels, r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(df, act, check_names=check_names)
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = "test_index_name_pre17" + ext
in_file = os.path.join(self.dirpath, filename)
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array([[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1",
"R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1",
"R_l1_g2", "R_l1_g3", "R_l1_g4"]],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None])
si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]])
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2",
"R_l1_g3", "R_l1_g4"]],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None])
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2",
"R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
header=arg)
def test_read_excel_chunksize(self, ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
@td.skip_if_no("xlwt")
@td.skip_if_no("openpyxl")
def test_read_excel_parse_dates(self, ext):
# see gh-11544, gh-12051
df = DataFrame(
{"col": [1, 2, 3],
"date_strings": pd.date_range("2012-01-01", periods=3)})
df2 = df.copy()
df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y")
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = read_excel(pth, index_col=0)
tm.assert_frame_equal(df2, res)
res = read_excel(pth, parse_dates=["date_strings"], index_col=0)
tm.assert_frame_equal(df, res)
date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y")
res = read_excel(pth, parse_dates=["date_strings"],
date_parser=date_parser, index_col=0)
tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=[0, 2])
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
# GH 16645
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows='5')
def test_read_excel_squeeze(self, ext):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + ext)
actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
expected.index.name = 'a'
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, 'two_columns', squeeze=True)
expected = pd.DataFrame({'a': [4, 5, 6],
'b': [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, 'one_column', squeeze=True)
expected = pd.Series([1, 2, 3], name='a')
tm.assert_series_equal(actual, expected)
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
This is the base class for the xlrd tests, and 3 different file formats
are supported: xls, xlsx, xlsm
"""
@td.skip_if_no("xlwt")
def test_read_xlrd_book(self, ext):
import xlrd
df = self.frame
engine = "xlrd"
sheet_name = "SheetA"
with ensure_clean(ext) as pth:
df.to_excel(pth, sheet_name)
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine=engine) as xl:
result = read_excel(xl, sheet_name, index_col=0)
tm.assert_frame_equal(df, result)
result = read_excel(book, sheet_name=sheet_name,
engine=engine, index_col=0)
tm.assert_frame_equal(df, result)
class _WriterBase(SharedItems):
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, merge_cells, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
It also uses a context manager to open a temporary excel file for
the function to write to, accessible via `self.path`
Notes
-----
This fixture will run as part of each test method defined in the
class and any subclasses, on account of the `autouse=True`
argument
"""
option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
prev_engine = get_option(option_name)
set_option(option_name, engine)
with ensure_clean(ext) as path:
self.path = path
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize("merge_cells", [True, False])
@pytest.mark.parametrize("engine,ext", [
pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt')),
pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
])
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
def test_excel_sheet_by_name_raise(self, *_):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(self.path)
xl = ExcelFile(self.path)
df = read_excel(xl, 0, index_col=0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
read_excel(xl, "0")
def test_excel_writer_context_manager(self, *_):
with ExcelWriter(self.path) as writer:
self.frame.to_excel(writer, "Data1")
self.frame2.to_excel(writer, "Data2")
with ExcelFile(self.path) as reader:
found_df = read_excel(reader, "Data1", index_col=0)
found_df2 = read_excel(reader, "Data2", index_col=0)
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(self.path, 'test1')
recons = read_excel(self.path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', index=False)
recons = read_excel(self.path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='NA')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(self.path, 'Sheet1')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, '0')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = self.frame["A"]
s.to_excel(self.path)
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, merge_cells, engine, ext):
self.mixed_frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_ts_frame(self, *_):
df = tm.makeTimeDataFrame()[:5]
df.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
@pytest.mark.parametrize("np_type", [
np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, merge_cells, engine, ext, np_type):
# Test np.int values read come back as int
# (rather than float which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(int_frame, recons2)
# Test with convert_float=False comes back as float.
float_frame = frame.astype(float)
recons = read_excel(self.path, "test1",
convert_float=False, index_col=0)
tm.assert_frame_equal(recons, float_frame,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize("np_type", [
np.float16, np.float32, np.float64])
def test_float_types(self, merge_cells, engine, ext, np_type):
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, merge_cells, engine, ext, np_type):
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0).astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self, *_):
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(frame, recons)
def test_sheets(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(self.path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = read_excel(reader, 'test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
assert 2 == len(reader.sheet_names)
assert 'test1' == reader.sheet_names[0]
assert 'test2' == reader.sheet_names[1]
def test_colaliases(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(self.path, 'test1', header=col_aliases)
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path, 'test1',
index_label=['test'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label='test',
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
self.frame.to_excel(self.path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
df.to_excel(self.path, merge_cells=merge_cells)
xf = ExcelFile(self.path)
result = read_excel(xf, xf.sheet_names[0],
index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == 'foo'
def test_excel_roundtrip_datetime(self, merge_cells, *_):
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(self.path, "test1", merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
def test_excel_date_datetime_format(self, merge_cells, engine, ext):
# see gh-4133
#
# Excel output format strings
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=["DATE", "DATETIME"], columns=["X", "Y"])
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(self.path)
writer2 = ExcelWriter(filename2,
date_format="DD.MM.YYYY",
datetime_format="DD.MM.YYYY HH-MM-SS")
df.to_excel(writer1, "test1")
df.to_excel(writer2, "test1")
writer1.close()
writer2.close()
reader1 = ExcelFile(self.path)
reader2 = ExcelFile(filename2)
rs1 = read_excel(reader1, "test1", index_col=0)
rs2 = read_excel(reader2, "test1", index_col=0)
tm.assert_frame_equal(rs1, rs2)
# Since the reader returns a datetime object for dates,
# we need to use df_expected to check the result.
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, *_):
# see gh-19242
#
# Test writing Interval without labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
frame["new"] = pd.cut(frame[0], 10)
expected["new"] = pd.cut(expected[0], 10).astype(str)
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, *_):
# see gh-19242
#
# Test writing Interval with labels.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
intervals = pd.cut(frame[0], 10, labels=["A", "B", "C", "D", "E",
"F", "G", "H", "I", "J"])
frame["new"] = intervals
expected["new"] = pd.Series(list(intervals))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, *_):
# see gh-19242, gh-9155
#
# Test writing timedelta to xls.
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
columns=["A"], dtype=np.int64)
expected = frame.copy()
frame["new"] = frame["A"].apply(lambda x: timedelta(seconds=x))
expected["new"] = expected["A"].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400))
frame.to_excel(self.path, "test1")
reader = ExcelFile(self.path)
recons = read_excel(reader, "test1", index_col=0)
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, merge_cells, engine, ext):
frame = self.tsframe
xp = frame.resample('M', kind='period').mean()
xp.to_excel(self.path, 'sht1')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
frame.to_excel(self.path, 'test1', header=False)
frame.to_excel(self.path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
frame = pd.DataFrame({'A': [None, 2, 3],
'B': [10, 20, 30],
'C': np.random.sample(3)})
frame = frame.set_index(['A', 'B'])
frame.to_excel(self.path, merge_cells=merge_cells)
df = read_excel(self.path, index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
(50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', header=header,
index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ('time', 'foo')
def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
ext):
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(self.path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(self.path)
frame3 = read_excel(reader, 'test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, *_):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(self.path, "test1", float_format="%.2f")
reader = ExcelFile(self.path)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
def test_to_excel_output_encoding(self, merge_cells, engine, ext):
# Avoid mixed inferred_type.
df = DataFrame([[u"\u0192", u"\u0193", u"\u0194"],
[u"\u0195", u"\u0196", u"\u0197"]],
index=[u"A\u0192", u"B"],
columns=[u"X\u0193", u"Y", u"Z"])
with ensure_clean("__tmp_to_excel_float_format__." + ext) as filename:
df.to_excel(filename, sheet_name="TestSheet", encoding="utf8")
result = read_excel(filename, "TestSheet",
encoding="utf8", index_col=0)
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
with ensure_clean(u("\u0192u.") + ext) as filename:
try:
f = open(filename, "wb")
except UnicodeEncodeError:
pytest.skip("No unicode file names on this system")
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=["A", "B"], columns=["X", "Y", "Z"])
df.to_excel(filename, "test1", float_format="%.2f")
reader = ExcelFile(filename)
result = read_excel(reader, "test1", index_col=0)
expected = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=["A", "B"], columns=["X", "Y", "Z"])
tm.assert_frame_equal(result, expected)
# def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
@pytest.mark.parametrize("use_headers", [True, False])
@pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3])
@pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3])
def test_excel_010_hemstring(self, merge_cells, engine, ext,
c_idx_nlevels, r_idx_nlevels, use_headers):
def roundtrip(data, header=True, parser_hdr=0, index=True):
data.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
return read_excel(xf, xf.sheet_names[0], header=parser_hdr)
# Basic test.
parser_header = 0 if use_headers else None
res = roundtrip(DataFrame([0]), use_headers, parser_header)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
# More complex tests with multi-index.
nrows = 5
ncols = 3
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of gh-2370 until sorted out in 0.11
df = mkdf(nrows, ncols, r_idx_nlevels=r_idx_nlevels,
c_idx_nlevels=c_idx_nlevels)
# This if will be removed once multi-column Excel writing
# is implemented. For now fixing gh-9794.
if c_idx_nlevels > 1:
with pytest.raises(NotImplementedError):
roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + r_idx_nlevels)
else:
# First row taken as columns.
assert res.shape == (nrows - 1, ncols + r_idx_nlevels)
# No NaNs.
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
def test_duplicated_columns(self, *_):
# see gh-5235
df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
columns=["A", "B", "B"])
df.to_excel(self.path, "test1")
expected = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]],
columns=["A", "B", "B.1"])
# By default, we mangle.
result = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(result, expected)
# Explicitly, we pass in the parameter.
result = read_excel(self.path, "test1", index_col=0,
mangle_dupe_cols=True)
tm.assert_frame_equal(result, expected)
# see gh-11007, gh-10970
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "A", "B"])
df.to_excel(self.path, "test1")
result = read_excel(self.path, "test1", index_col=0)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "A.1", "B.1"])
tm.assert_frame_equal(result, expected)
# see gh-10982
df.to_excel(self.path, "test1", index=False, header=False)
result = read_excel(self.path, "test1", header=None)
expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
tm.assert_frame_equal(result, expected)
msg = "Setting mangle_dupe_cols=False is not supported yet"
with pytest.raises(ValueError, match=msg):
read_excel(self.path, "test1", header=None, mangle_dupe_cols=False)
def test_swapped_columns(self, merge_cells, engine, ext):
# Test for issue #5427.
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
read_frame = read_excel(self.path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_invalid_columns(self, *_):
# see gh-10982
write_frame = DataFrame({"A": [1, 1, 1],
"B": [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
write_frame.to_excel(self.path, "test1", columns=["B", "C"])
expected = write_frame.reindex(columns=["B", "C"])
read_frame = read_excel(self.path, "test1", index_col=0)
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(KeyError):
write_frame.to_excel(self.path, "test1", columns=["C", "D"])
def test_comment_arg(self, *_):
# see gh-18735
#
# Test the comment argument functionality to read_excel.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Read file without comment arg.
result1 = read_excel(self.path, "test_c", index_col=0)
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument default to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file with default and explicit comment=None
result1 = read_excel(self.path, 'test_c')
result2 = read_excel(self.path, 'test_c', comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, *_):
# see gh-18735
#
# Test the comment argument is working as expected when used.
# Create file to read in.
df = DataFrame({"A": ["one", "#one", "one"],
"B": ["two", "two", "#two"]})
df.to_excel(self.path, "test_c")
# Test read_frame_comment against manually produced expected output.
expected = DataFrame({"A": ["one", None, "one"],
"B": ["two", None, None]})
result = read_excel(self.path, "test_c", comment="#", index_col=0)
tm.assert_frame_equal(result, expected)
def test_comment_empty_line(self, merge_cells, engine, ext):
# Re issue #18735
# Test that read_excel ignores commented lines at the end of file
df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
df.to_excel(self.path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({'a': [1], 'b': [2]})
result = read_excel(self.path, comment='#')
tm.assert_frame_equal(result, expected)
def test_datetimes(self, merge_cells, engine, ext):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(self.path, 'Sheet1')
read_frame = read_excel(self.path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
def test_bytes_io(self, merge_cells, engine, ext):
# see gh-7074
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# Pass engine explicitly, as there is no file path to infer from.
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = read_excel(bio, index_col=0)
tm.assert_frame_equal(df, reread_df)
def test_write_lists_dict(self, *_):
# see gh-8188.
df = DataFrame({"mixed": ["a", ["b", "c"], {"d": "e", "f": 2}],
"numeric": [1, 2, 3.0],
"str": ["apple", "banana", "cherry"]})
df.to_excel(self.path, "Sheet1")
read = read_excel(self.path, "Sheet1", header=0, index_col=0)
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype("int64")
tm.assert_frame_equal(read, expected)
def test_true_and_false_value_options(self, *_):
# see gh-13347
df = pd.DataFrame([["foo", "bar"]], columns=["col1", "col2"])
expected = df.replace({"foo": True, "bar": False})
df.to_excel(self.path)
read_frame = read_excel(self.path, true_values=["foo"],
false_values=["bar"], index_col=0)
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, *_):
# see gh-15160
expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
result = read_excel(self.path, index_col=0)
tm.assert_frame_equal(result, expected)
def test_path_path_lib(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
def test_path_local_path(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{ext}".format(ext=ext))
tm.assert_frame_equal(result, df)
@td.skip_if_no('openpyxl')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'openpyxl')])
class TestOpenpyxlTests(_WriterBase):
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
number_format = '0.00'
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw['font'] == font
assert kw['border'] == border
assert kw['alignment'] == alignment
assert kw['fill'] == fill
assert kw['number_format'] == number_format
assert kw['protection'] == protection
def test_write_cells_merge_styled(self, merge_cells, ext, engine):
from pandas.io.formats.excel import ExcelCell
sheet_name = 'merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': {'color': '000000FF', 'bold': True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs['font']
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean(ext) as path:
writer = _OpenpyxlWriter(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks['B1']
xcell_a2 = wks['A2']
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("mode,expected", [
('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
import openpyxl
df = DataFrame([1], columns=['baz'])
with ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = 'foo'
wb.worksheets[0]['A1'].value = 'foo'
wb.create_sheet('bar')
wb.worksheets[1]['A1'].value = 'bar'
wb.save(f)
writer = ExcelWriter(f, engine=engine, mode=mode)
df.to_excel(writer, sheet_name='baz', index=False)
writer.save()
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]['A1'].value == cell_value
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xls', 'xlwt')])
class TestXlwtTests(_WriterBase):
def test_excel_raise_error_on_multiindex_columns_and_no_index(
self, merge_cells, ext, engine):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
engine):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(self, merge_cells, ext, engine):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'xlsxwriter')])
class TestXlsxWriterTests(_WriterBase):
@td.skip_if_no('openpyxl')
def test_column_format(self, merge_cells, ext, engine):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
import openpyxl
with ensure_clean(ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook['Sheet1']
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell.
try:
cell = read_worksheet['B2']
except TypeError:
# compat
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine=engine, mode='a')
class TestExcelWriterEngineTests(object):
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt'))
])
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == '.xlsx' and td.safe_import('xlsxwriter'):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with pytest.raises(ValueError, match='No engine'):
ExcelWriter('nothing')
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
writer = ExcelWriter('something.test')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
with catch_warnings(record=True):
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
'something.xls', engine='dummy'))
@pytest.mark.parametrize('engine', [
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
'style dicts')),
'xlsxwriter',
'openpyxl',
])
def test_styler_to_excel(engine):
def style(df):
# XXX: RGB colors not supported in xlwt
return DataFrame([['font-weight: bold', '', ''],
['', 'color: blue', ''],
['', '', 'text-decoration: underline'],
['border-style: solid', '', ''],
['', 'font-style: italic', ''],
['', '', 'text-align: right'],
['background-color: red', '', ''],
['number-format: 0%', '', ''],
['', '', ''],
['', '', ''],
['', '', '']],
index=df.index, columns=df.columns)
def assert_equal_style(cell1, cell2, engine):
if engine in ['xlsxwriter', 'openpyxl']:
pytest.xfail(reason=("GH25351: failing on some attribute "
"comparisons in {}".format(engine)))
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(' \n;'):
return {'font': {'bold': True}}
return {}
pytest.importorskip('jinja2')
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(11, 3))
with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
writer = ExcelWriter(path, engine=engine)
df.to_excel(writer, sheet_name='frame')
df.style.to_excel(writer, sheet_name='unstyled')
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name='styled')
ExcelFormatter(styled, style_converter=custom_converter).write(
writer, sheet_name='custom')
writer.save()
if engine not in ('openpyxl', 'xlsxwriter'):
# For other engines, we only smoke test
return
openpyxl = pytest.importorskip('openpyxl')
wb = openpyxl.load_workbook(path)
# (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['unstyled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_style(cell1, cell2, engine)
n_cells += 1
# ensure iteration actually happened:
assert n_cells == (11 + 1) * (3 + 1)
# (2) check styling with default converter
# XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
alpha = '00' if engine == 'openpyxl' else 'FF'
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['styled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == 'B2':
assert not cell1.font.bold
assert cell2.font.bold
elif ref == 'C3':
assert cell1.font.color.rgb != cell2.font.color.rgb
assert cell2.font.color.rgb == alpha + '0000FF'
elif ref == 'D4':
# This fails with engine=xlsxwriter due to
# https://bitbucket.org/openpyxl/openpyxl/issues/800
if engine == 'xlsxwriter' \
and (LooseVersion(openpyxl.__version__) <
LooseVersion('2.4.6')):
pass
else:
assert cell1.font.underline != cell2.font.underline
assert cell2.font.underline == 'single'
elif ref == 'B5':
assert not cell1.border.left.style
assert (cell2.border.top.style ==
cell2.border.right.style ==
cell2.border.bottom.style ==
cell2.border.left.style ==
'medium')
elif ref == 'C6':
assert not cell1.font.italic
assert cell2.font.italic
elif ref == 'D7':
assert (cell1.alignment.horizontal !=
cell2.alignment.horizontal)
assert cell2.alignment.horizontal == 'right'
elif ref == 'B8':
assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
assert cell1.fill.patternType != cell2.fill.patternType
assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
assert cell2.fill.patternType == 'solid'
elif ref == 'B9':
assert cell1.number_format == 'General'
assert cell2.number_format == '0%'
else:
assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
# (3) check styling with custom converter
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['custom'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'):
assert not cell1.font.bold
assert cell2.font.bold
else:
assert_equal_style(cell1, cell2, engine)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (11 + 1) * (3 + 1)
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
class TestFSPath(object):
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
| [
"atwiinenicxon@gmail.com"
] | atwiinenicxon@gmail.com |
99a885ced2259ddd980d7178fc0d7ae23cd5c7cc | 1334505cc093b2796c80c83a358f39aaa9bb7392 | /things/parmed/LES/run.py | 20454ca47cbeedcf04b143b5b04b152fa4dd1b15 | [
"BSD-2-Clause"
] | permissive | hainm/amber_things | 031703f1b39c1c1ceae3f1186cb1020b68260e13 | c4c6de411e6b83f1d0dd723d892253a88f5125e5 | refs/heads/master | 2021-01-21T04:47:49.404282 | 2016-07-10T23:19:59 | 2016-07-10T23:19:59 | 36,897,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | import parmed as pmd
pmd.load_file('new.parm7', 'new.rst7')
| [
"hainm.comp@gmail.com"
] | hainm.comp@gmail.com |
71ac256aa2adf47b4dd47df90dc1e02cc82de548 | 34039eb012beca4fbd0ed96d523bf88bf39cffdf | /potFieldsClassCode/controller - Matthew Chun.py | b0e9208b74749729c11a2753870aa37f24f5e007 | [] | no_license | hsudakar/racecar2019 | c72392edc9e415d584ab2e621b843b8361f03a30 | 40e45bb0490709e5468f303bf37ec19fcf156230 | refs/heads/master | 2020-06-26T03:05:00.533385 | 2019-07-29T17:06:41 | 2019-07-29T17:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,047 | py | #!/usr/bin/env python
import numpy as np
import sys, math, random, copy
import rospy, copy, time
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
import time
class Racecar:
SCAN_TOPIC = '/scan'
DRIVE_TOPIC = '/drive'
def __init__(self):
self.data = None
self.data_len = None
self.cmd = AckermannDriveStamped()
#write your publishers and subscribers here; they should be the same as the wall follower's
self.laser_sub = rospy.Subscriber(self.SCAN_TOPIC, LaserScan, self.scan, queue_size=1)
self.drive_pub = rospy.Publisher(self.DRIVE_TOPIC, AckermannDriveStamped, queue_size=1)
#cartesian points -- to be filled (tuples)
self.cartPoints = None
#[speed, angle]
self.finalVector = [0.5, 0]
self.prev_angle = None
self.prev_time = None
def scan(self, data):
self.data = data
self.data_len = len(data.ranges)
self.cartPoints = [None for x in range(self.data_len)]
self.drive()
def drive(self):
'''Publishes drive commands'''
self.speed, self.angle = self.controller()
#rospy.loginfo("speed: {} angle: {}".format(self.speed, self.angle))
if self.speed < -1000: #-500
self.cmd.drive.speed = -0.5
self.cmd.drive.steering_angle = - self.angle
s = time.time()
while time.time() - s < 0.1:
self.drive_pub.publish(self.cmd)
else:
self.cmd.drive.speed = 5
self.cmd.drive.steering_angle = self.angle
self.drive_pub.publish(self.cmd)
def controller(self):
self.convertPoints()
self.calcFinalVector(self.cartPoints)
x = self.finalVector[0]
y = self.finalVector[1]
angle = np.arctan((x/y))
angle = -np.rad2deg(angle) / 60 #90
#self.prev_angle = angle
rospy.loginfo("a: {}".format(angle))
# speed = (y + 1000) / 250
# speed = -100/y + 1
speed = max(0.5,y/20)
# rospy.loginfo("y: {} a: {}".format(y, angle))
# if self.prev_angle == None:
# deriv = 0
#
# else:
#
# deriv = (angle - self.prev_angle) / (time.time() - self.prev_time) / 100 # .04-.05
#
# self.prev_time = time.time()
# self.prev_angle = angle - deriv
#
# rospy.loginfo("a: {} d: {} o: {}".format(angle, deriv, angle-deriv))
return y, angle #-deriv
def convertPoints(self):
'''Convert all current LIDAR data to cartesian coordinates'''
for i in range(self.data_len):
x = self.data.ranges[i] * np.sin((i * self.data.angle_increment) + self.data.angle_min)
y = self.data.ranges[i] * np.cos((i * self.data.angle_increment) + self.data.angle_min)
# rospy.loginfo(str(np.rad2deg((i * self.data.angle_increment) + self.data.angle_min)))
self.cartPoints[i] = (x, y)
# rospy.loginfo(str(self.cartPoints[i]))
# self.cartPoints[-1] = (0, -.1)
def calcFinalVector(self, points):
'''Calculate the final driving speed and angle'''
vector = [0, 0]
for i in range(len(points)):
x = points[i][0]
y = points[i][1]
mag = ((x**2) + (y**2)) ** 0.5
if x != 0:
vector[0] += -(x/(mag ** 2))
vector[1] += -(y/(mag ** 2))
# rospy.loginfo("mag: {} x: {} y: {} vec_x: {} vec_y: {}".format(mag, x, y, -(x/(mag ** 2)), -(y/(mag ** 2))))
self.finalVector = vector
rospy.init_node('controller')
rate = rospy.Rate(60)
rc = Racecar()
while not rospy.is_shutdown():
# TODO implement controller logic here
rate.sleep()
| [
"bpaez@g.hmc.edu"
] | bpaez@g.hmc.edu |
c35ac0e38e1bf86dd30321937444199e6b545717 | 478009111504d54f45490f71c1e61190ecfede3f | /sesion_4/test_xl2.py | 54fc999f79dc94a336ef6161315e7708b6b13766 | [] | no_license | BeatrizInGitHub/python-sci | 9938c406e9642385d602971e985668765dfabbfa | d8ed9656184b5f08d41430ed5a770dc6f4550893 | refs/heads/master | 2020-05-22T19:46:08.065601 | 2017-02-26T02:15:31 | 2017-02-26T02:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from openpyxl import load_workbook
wb = load_workbook("Libro1.xlsx")
ws = wb["Hoja1"]
cells = ws["C4:F10"]
mat = []
for r in cells:
row = []
for cell in r:
x = cell.value
row.append(x)
mat.append(row)
print mat
x = ws["C4"].value
x = mat[0][0] | [
"badillo.soft@hotmail.com"
] | badillo.soft@hotmail.com |
51cf140ce032eb19a30ee3990d6bf89971df2ab8 | c1fcdd80101aeae0ba8b4ae0e229f58ed58fd41f | /testPredictor.py | fe7bea830d5e4f619513d78c2e0e5b9ab4ea4023 | [] | no_license | luizgh/ModelEvaluation | 367432f0456052f539c61766b00f1f59bc5fa3a5 | 2933918276d0dcc42d484d9163c4b6c9ea32f26a | refs/heads/master | 2016-09-06T04:15:11.140896 | 2014-03-19T00:07:46 | 2014-03-19T00:07:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | import unittest
from Predictor import Predictor
from TestUtils import *
class testPredictor(unittest.TestCase):
def testAll(self):
logProbabilities = numpy.asarray([[0.4, 0.80, 0.50],
[0.45, 0.4, 0.41],
[0.4, 0.41, 0.45]])
expected = [1,0,2]
target = Predictor()
self.assertEquals(expected, target.getPredictions(logProbabilities))
if __name__ == '__main__':
unittest.main()
| [
"luiz.gh@gmail.com"
] | luiz.gh@gmail.com |
27686e6e5f8eccbf4f84eb31338d61161279c57e | 19e062dd35430ba371305f9ab3bac00eada00bf8 | /manage.py | 99b7e3d68a71c1ea4567ed2bd4909c0316b881dd | [] | no_license | amitraikkr/djangodock_prj | 9b0d5166bff6d29d3979281d0dac3965f2fed65b | 4bb89a9ecb7a650f4a507a5f7c4fdb73d16d2b7c | refs/heads/master | 2023-03-27T02:52:39.001425 | 2021-03-23T00:54:20 | 2021-03-23T00:54:20 | 349,776,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangodock_prj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"amitraikkr760@gmail.com"
] | amitraikkr760@gmail.com |
a14a5c7fab69f053d5297c0667fd620b3f5ce400 | 107a379811077c75bf884bc925ca28b82e62a0da | /monitor/etc/graphite/local_settings.py | a07530334f21fbde10fea535b79df6539e09d1a6 | [] | no_license | stackdio-formulas/monitoring-formula | f11aff5196b07e48328723e13f3215b5b72e4efa | 0582b28c0c43eb796bd94c5dd893ed0874335c0d | refs/heads/master | 2021-04-22T06:44:34.350717 | 2017-10-16T14:22:00 | 2017-10-16T14:22:00 | 25,693,796 | 1 | 2 | null | 2017-10-16T14:22:01 | 2014-10-24T15:04:15 | Ruby | UTF-8 | Python | false | false | 9,004 | py | {%- set secret_key = salt['pillar.get']('monitor:graphite:secret_key') -%}
{%- set storage_dir = salt['pillar.get']('monitor:graphite:storage_dir') -%}
{%- set db_password = salt['pillar.get']('monitor:graphite:db_password') -%}
## Graphite local_settings.py
# Edit this file to customize the default Graphite webapp settings
#
# Additional customizations to Django settings can be added to this file as well
#####################################
# General Configuration #
#####################################
# Set this to a long, random unique string to use as a secret key for this
# install. This key is used for salting of hashes used in auth tokens,
# CRSF middleware, cookie storage, etc. This should be set identically among
# instances if used behind a load balancer.
#SECRET_KEY = 'UNSAFE_DEFAULT'
SECRET_KEY = '{{ secret_key }}'
# In Django 1.5+ set this to the list of hosts your graphite instances is
# accessible as. See:
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
#ALLOWED_HOSTS = [ '*' ]
# Set your local timezone (Django's default is America/Chicago)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
#TIME_ZONE = 'America/Los_Angeles'
TIME_ZONE = 'America/Chicago'
# Override this to provide documentation specific to your Graphite deployment
#DOCUMENTATION_URL = "http://graphite.readthedocs.org/"
# Logging
# True see: https://answers.launchpad.net/graphite/+question/159731
LOG_RENDERING_PERFORMANCE = True
LOG_CACHE_PERFORMANCE = True
LOG_METRIC_ACCESS = True
# Enable full debug page display on exceptions (Internal Server Error pages)
#DEBUG = True
# If using RRD files and rrdcached, set to the address or socket of the daemon
#FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock'
# This lists the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you should ensure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address (127.0.0.1) here if using clustering
# as every webapp in the cluster should use the exact same values to prevent
# unneeded cache misses. Set to [] to disable caching of images and fetched data
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
#DEFAULT_CACHE_DURATION = 60 # Cache images and data for 1 minute
#####################################
# Filesystem Paths #
#####################################
# Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite
# to somewhere else
GRAPHITE_ROOT = '/usr/share/graphite-web'
# Most installs done outside of a separate tree such as /opt/graphite will only
# need to change these three settings. Note that the default settings for each
# of these is relative to GRAPHITE_ROOT
CONF_DIR = '/etc/graphite'
#STORAGE_DIR = '/var/lib/graphite/whisper'
STORAGE_DIR = '{{ storage_dir }}/whisper'
CONTENT_DIR = '/usr/share/graphite-web/static'
# To further or fully customize the paths, modify the following. Note that the
# default settings for each of these are relative to CONF_DIR and STORAGE_DIR
#
## Webapp config files
#DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf'
#GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf'
## Data directories
# NOTE: If any directory is unreadable in DATA_DIRS it will break metric browsing
#WHISPER_DIR = '/var/lib/graphite/whisper'
WHISPER_DIR = '{{ storage_dir }}/whisper'
#RRD_DIR = '/opt/graphite/storage/rrd'
#DATA_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables
LOG_DIR = '/var/log/graphite'
#INDEX_FILE = '/var/lib/graphite/search_index' # Search index file
INDEX_FILE = '{{ storage_dir }}/search_index' # Search index file
#####################################
# Email Configuration #
#####################################
# This is used for emailing rendered Graphs
# Default backend is SMTP
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_HOST = 'localhost'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#EMAIL_USE_TLS = False
# To drop emails on the floor, enable the Dummy backend:
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
#####################################
# Authentication Configuration #
#####################################
## LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = "ldap.mycompany.com"
#LDAP_PORT = 389
# OR
#LDAP_URI = "ldaps://ldap.mycompany.com:636"
#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com"
#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com"
#LDAP_BASE_PASS = "readonly_account_password"
#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem")
# See http://www.python-ldap.org/ for further details on these options.
## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/
USE_REMOTE_USER_AUTHENTICATION = True
# Override the URL for the login link (e.g. for django_openid_auth)
#LOGIN_URL = '/account/login'
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (such as MySQL) and configure all of the webapp
# instances to use the same database. Note that this database is only used to store
# Django models such as saved graphs, dashboards, user preferences, etc.
# Metric data is not stored here.
#
# DO NOT FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DATABASE
#
# The following built-in database engines are available:
# django.db.backends.postgresql # Removed in Django 1.4
# django.db.backends.postgresql_psycopg2
# django.db.backends.mysql
# django.db.backends.sqlite3
# django.db.backends.oracle
#
# The default is 'django.db.backends.sqlite3' with file 'graphite.db'
# located in STORAGE_DIR
#
#DATABASES = {
# 'default': {
# 'NAME': '/var/lib/graphite/graphite.db',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '',
# 'PORT': ''
# }
#}
DATABASES = {
'default': {
'NAME': 'graphite',
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'graphite',
'PASSWORD': '{{ db_password }}',
'HOST': '127.0.0.1',
'PORT': ''
}
}
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of the webapp on each
# remote server in the cluster. These servers must each have local access to
# metric data. Note that the first server to return a match for a query will be
# used.
#CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"]
## These are timeout values (in seconds) for requests to remote webapps
#REMOTE_STORE_FETCH_TIMEOUT = 6 # Timeout to fetch series data
#REMOTE_STORE_FIND_TIMEOUT = 2.5 # Timeout for metric find requests
#REMOTE_STORE_RETRY_DELAY = 60 # Time before retrying a failed remote webapp
#REMOTE_FIND_CACHE_DURATION = 300 # Time to cache remote metric find results
## Remote rendering settings
# Set to True to enable rendering of Graphs on a remote webapp
#REMOTE_RENDERING = True
# List of IP (and optionally port) of the webapp on each remote server that
# will be used for rendering. Note that each rendering host should have local
# access to metric data or should have CLUSTER_SERVERS configured
#RENDERING_HOSTS = []
#REMOTE_RENDER_CONNECT_TIMEOUT = 1.0
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use 127.0.0.1 here in most cases
#CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"]
#CARBONLINK_TIMEOUT = 1.0
#####################################
# Additional Django Settings #
#####################################
# Uncomment the following line for direct access to Django settings such as
# MIDDLEWARE_CLASSES or APPS
#from graphite.app_settings import *
| [
"charlie.penner@digitalreasoning.com"
] | charlie.penner@digitalreasoning.com |
ad90f70fbd5560a5d6802f1070e7578b594b569f | ed6324dbe6ef35b21c8c11f11d866ac60b4b0132 | /registry_project/apps/main/models.py | ff3224414822e90e1f9ff37203bb73a97373dd78 | [] | no_license | sdcasas/registry | 2c41a252f18f1b9ab6c80ae77790720d6b07603c | 5919298392e6e048a35d76bae9002ce57b4bff47 | refs/heads/master | 2020-03-28T21:53:22.551255 | 2018-10-01T20:17:08 | 2018-10-01T20:17:08 | 149,189,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,967 | py | from django.db import models
from apps.util import models as utilmodels
class Module(models.Model):
name = models.CharField(max_length=300)
description = models.TextField(blank=None, null=None)
class Meta:
verbose_name = "Module"
verbose_name_plural = "Modules"
def __str__(self):
return self.name
class Mentor(utilmodels.People):
username_discord = models.CharField(max_length=50, null=True, blank=True)
class Meta:
verbose_name = "Mentor"
verbose_name_plural = "Mentores"
class Student(utilmodels.People):
username_discord = models.CharField(max_length=50, null=True, blank=True)
class Meta:
verbose_name = "Estudiante"
verbose_name_plural = "Estudiantes"
ordering = ('last_name', 'first_name')
class Payment(models.Model):
student = models.ForeignKey(Student, on_delete=models.DO_NOTHING)
date_pay = models.DateField(null=True, blank=True)
period = models.DateField(null=True, blank=True)
concept = models.TextField(null=True, blank=True)
receipt = models.CharField(max_length=50)
value = models.PositiveIntegerField(null=True, blank=True)
class Meta:
verbose_name = "Pago"
verbose_name_plural = "Pagos"
def __str__(self):
return "{} {}".format(self.student, self.value)
class Teen(models.Model):
name = models.CharField(max_length=200)
students = models.ManyToManyField(Student, related_name='teen')
class Meta:
verbose_name = "Equipo"
verbose_name_plural = "Equipos"
def __str__(self):
return self.name
class Course(models.Model):
name = models.CharField(max_length=300)
description = models.TextField(blank=True, null=True)
students = models.ManyToManyField(Student, blank=True)
class Meta:
verbose_name = "Curso"
verbose_name_plural = "Cursos"
def __str__(self):
return self.name
| [
"sdcasas@gmail.com"
] | sdcasas@gmail.com |
3bda4a5bb9ba317a71ccec87961bb03d971ff69d | 3927b532310d8bc236d102854efac7cf11f27855 | /superlists/lists/urls.py | 0e6b25bf7dc2278394c1ddb3ed0a6a12a141bc02 | [] | no_license | lepirescomp/tdd_python_studies | 8ba60a11092b03a0a3495ed93bd019b8159e8d1a | b253599cc78590acd9a66d0fcf0be44ff7bb5f59 | refs/heads/master | 2023-05-15T06:09:36.771539 | 2020-07-25T22:10:07 | 2020-07-25T22:10:07 | 280,016,882 | 0 | 0 | null | 2021-06-10T23:15:19 | 2020-07-16T00:55:32 | JavaScript | UTF-8 | Python | false | false | 247 | py | from django.conf.urls import url
from lists import views
urlpatterns = [
url(r'^new$', views.new_list, name='new_list'),
url(r'^(\d+)/$', views.view_list, name='view_list'),
url(r'^(\d+)/add_item$', views.add_item, name='add_item')
]
| [
"lepirescomp@gmail.com"
] | lepirescomp@gmail.com |
8ccabfc5878446d9961222bfabb5e529f1876ecb | 00f18153e38eef712e8c4c139623226e540a356a | /Credit_Calculator_2.py | 93cef3251f6489ccb05d2013e37c75bf74d6e6ed | [] | no_license | plojhi/Python_Credit_Calculator | 086cb071202105bd23719aeef75b7b38f134a36d | 43a29fcf8772cd08c7f8db16925b5ff6b708e194 | refs/heads/master | 2020-12-27T02:36:39.087863 | 2020-02-02T07:39:23 | 2020-02-02T07:39:23 | 237,735,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py | import math
import sys
def credit_type():
P = 0
A = 0
n = 0
i = 0
for types in args:
for j in range(len(types)):
if types[j] == "=":
if types[:j] == "--principal":
P = float(types[j+1:])
elif types[:j] == "--payment":
A =float(types[j+1:])
elif types[:j] == "--interest":
i = float(types[j+1:])/(12*100)
elif types[:j] == "--periods":
n = int(types[j+1:])
if P < 0 or A < 0 or i < 0 or n < 0:
print("Incorrect parameters.")
elif args[1] == "--type=annuity":
if P != 0 and A != 0 and i != 0:
return count_of_months(P, A, i)
elif P != 0 and n != 0 and i != 0:
return monthly_payment(P, n, i)
elif A != 0 and n != 0 and i != 0:
return credit_principal(A, n, i)
elif A != 0 and n != 0 and A != 0:
print("Incorrect parameters.")
elif args[1] == "--type=diff":
if A != 0:
print("Incorrect parameters.")
else:
return differentiated_payment(P, n, i)
def count_of_months(P, A, i):
n = math.ceil(math.log((A / (A - i * P)), (1 + i)))
if n // 12 == 1 and n % 12 == 0:
print(f"You need {(n // 12):.0f} year to repay this credit!")
elif n % 12 == 0 and n // 12 != 1:
print(f"You need {(n // 12):.0f} years to repay this credit!")
elif math.ceil(n % 12) == 12:
print(f"You need {((n // 12)+1):.0f} years to repay this credit!")
elif n // 12 == 0:
print(f"You need {math.ceil(n % 12)} months to repay this credit!")
else:
print(f"You need {round(n // 12)} years and {math.ceil(n % 12)} months to repay this credit!")
print(f"Overpayment = {(n * A - P):.0f}")
def monthly_payment(P, n, i):
A = math.ceil(P * ((i * pow((1+i),n)) / (pow((1+i),n) - 1)))
print(f"Your annuity payment = {A}!")
print(f"Overpayment = {(n * A - P):.0f}")
def credit_principal(A, n, i):
P = A / ((i * pow((1+i),n)) / (pow((1+i),n) - 1))
print(f"Your credit principal = {P:.0f}!")
print(f"Overpayment = {(n * A - P):.0f}")
def differentiated_payment(P, n, i):
m = 1
Dm_sum = 0
while m <= n:
Dm = math.ceil((P / n) + (i * (P - ((P * (m - 1)) / n))))
print(f"Month {m}: paid out {Dm:.0f}")
Dm_sum += Dm
m += 1
print(f"Overpayment = {(Dm_sum - P):.0f}")
args = sys.argv
if len(args) != 5:
print("Incorrect parameters.")
elif args[1] != "--type=annuity":
if args[1] == "--type=diff":
credit_type()
else:
print("Incorrect parameters.")
elif args[1] != "--type=diff":
if args[1] == "--type=annuity":
credit_type()
else:
print("Incorrect parameters.")
else:
credit_type()
| [
"noreply@github.com"
] | plojhi.noreply@github.com |
a0b726ca16865f5077e8d0a563538ac2f6752e45 | 874153914c2784eb164c271e4a68c01265e5ce7f | /tool/module/arkon_config.py | a6a18adaf5c5f245e552d73e4e0c95d158b9cff6 | [
"Apache-2.0"
] | permissive | random-archer/mkinitcpio-systemd-tool | 3c6f572244fb84e9eab4fa1291d4e5526a614751 | 5685070783975c934c517d603bcfd614f8b194a3 | refs/heads/master | 2023-08-17T20:52:13.040297 | 2022-12-21T14:20:10 | 2022-12-21T14:20:10 | 59,916,137 | 108 | 29 | NOASSERTION | 2023-08-13T09:35:00 | 2016-05-28T22:00:58 | Python | UTF-8 | Python | false | false | 2,750 | py | #!/usr/bin/env python
#
# shared config for build/setup/verify cycle
#
import os
import datetime
import nspawn
# detect build system
def has_ci_azure():
return "AZURE_EXTENSION_DIR" in os.environ
# no http proxy in azure
if has_ci_azure():
nspawn.CONFIG['proxy']['use_host_proxy'] = 'no'
nspawn.CONFIG['proxy']['use_machine_proxy'] = 'no'
# report state of non-bufered stdout/stderr
print(f"### PYTHONUNBUFFERED={os.environ.get('PYTHONUNBUFFERED', None)}")
# location of machine resources
nspawn_store = nspawn.CONFIG['storage']["root"]
# location of this module
this_dir = os.path.dirname(os.path.abspath(__file__))
# arch linux archive iso image date
build_epoch = datetime.datetime(year=2020, month=3, day=1)
# kernel version used by arch linux archive iso
kernel_version = f"5.5.6-arch1-1"
# current user account
host_user = os.getenv('USER', "root")
# azure or local resource location
user_home = os.getenv('HOME', "/root")
# location of source repository
project_repo = os.popen("git rev-parse --show-toplevel").read().strip()
# location of disk mount shared host/machine
# contains output of mkinitcpio: vmlinuz, linux-initramfs.img
project_boot = f"{project_repo}/boot"
# location of disk mount shared host/machine
# contains extracted content of linux-initramfs.img
project_data = f"{project_repo}/data"
# location of sysroot produced by this tool (with azure cache)
media_store = f"{nspawn_store}/resource/media"
# location of images produced by this tool (with azure cache)
image_store = f"{nspawn_store}/resource/image"
#
# container definitions
#
# image base for unit test
base_machine = "arch-base"
base_image_path = f"{image_store}/{base_machine}/default.tar.gz"
base_image_url = f"file://localhost/{base_image_path}"
# unit test: cryptsetup
cryptsetup_machine = "test-cryptsetup"
cryptsetup_image_path = f"{image_store}/{cryptsetup_machine}/default.tar.gz"
cryptsetup_image_url = f"file://localhost/{cryptsetup_image_path}"
# unit test: dropbear
dropbear_machine = "test-dropbear"
dropbear_image_path = f"{image_store}/{dropbear_machine}/default.tar.gz"
dropbear_image_url = f"file://localhost/{dropbear_image_path}"
# unit test: tinysshd
tinysshd_machine = "test-tinysshd"
tinysshd_image_path = f"{image_store}/{tinysshd_machine}/default.tar.gz"
tinysshd_image_url = f"file://localhost/{tinysshd_image_path}"
# unit test: nftables
nftables_machine = "test-nftables"
nftables_image_path = f"{image_store}/{nftables_machine}/default.tar.gz"
nftables_image_url = f"file://localhost/{nftables_image_path}"
# unit test: anything else
unitada_machine = "test-unitada"
unitada_image_path = f"{image_store}/{unitada_machine}/default.tar.gz"
unitada_image_url = f"file://localhost/{unitada_image_path}"
| [
"andrei.pozolotin@gmail.com"
] | andrei.pozolotin@gmail.com |
bd42285b2775c2bc449211d34086b9219ceab4fb | 1c76e59804d5686f4f6407d572b638f5669ad9ab | /leetcode/valid parentheses/faster_valid_parentheses.py | 5f7089758f8ee61c894179454e1c0e34dd79ea13 | [] | no_license | Turall/leetcode | 28b9ccd24b32309ad77c0ad5e8a72f1b11a3bafd | c510cc2cc494070542b35d744eafb381fe7d0994 | refs/heads/master | 2020-05-21T06:19:28.899393 | 2019-05-15T06:46:28 | 2019-05-15T06:46:28 | 185,941,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | def check(s):
if not s:
return True
temp = []
breckets = {"(": ")", "{": "}", "[": "]"}
for parenthese in s:
if parenthese in breckets:
temp.append(parenthese)
elif len(temp) == 0 or breckets[temp.pop()] != parenthese:
return False
return len(temp) == 0
print(check("()]"))
| [
"tural_m@hotmail.com"
] | tural_m@hotmail.com |
9b66a0e02508ee60fbd50d1de623179d0ef4f34b | 9cec93a18ea94504947820205d0faae4d67ecd8d | /H2TauTau/python/eventContent/common_cff.py | cd998b53c2a17602f3659cbf83da20caa949c0ea | [] | no_license | DESY-CMS-SUS/cmgtools-lite | de88b1d5dc20a925ed5b7c7be69fa3ef677955c6 | db52d50047178563a0eb7f5858ae100aa408ec68 | refs/heads/8_0_25 | 2021-05-23T04:36:22.900460 | 2017-11-09T10:32:41 | 2017-11-09T10:32:41 | 60,184,794 | 3 | 9 | null | 2021-02-17T23:22:12 | 2016-06-01T14:37:18 | Python | UTF-8 | Python | false | false | 1,693 | py | import copy
common = [
# 'drop *',
'keep double_fixedGridRho*_*_*',
'keep edmTriggerResults_TriggerResults_*_*',
'keep patPackedTriggerPrescales_*_*_*',
'keep patElectrons_slimmedElectrons_*_*',
'keep patJets_slimmedJets_*_*',
'keep patJets_patJetsReapplyJEC_*_*',
'keep patMETs_slimmedMETs_*_*',
'keep patMuons_slimmedMuons_*_*',
# 'keep patPacked*_*_*_*',
'keep patPackedCandidate*_*packedPFCandidates*_*_*', # RIC: agreed to keep it to: 1. tau vtx 2. possibly compute isolations at analysis level
'keep patTaus_slimmedTaus_*_*',
'keep patTrigger*_*_*_*',
'keep recoVertexs_*_*_*',
'keep cmgMETSignificances_*_*_*',
'keep patCompositeCandidates_cmg*CorSVFitFullSel_*_H2TAUTAU',
'keep patJets_patJetsAK4PF_*_*',
'keep PileupSummaryInfos_*_*_*',
'keep recoGenParticles_prunedGenParticles_*_*',
'keep patPackedGenParticles_packedGenParticles__*', # these are status 1
'keep recoGsfElectronCores_*_*_*', # needed?
'keep recoSuperClusters_*_*_*', # for electron MVA ID
'keep recoGenJets_slimmedGenJets_*_*',
'keep *_slimmedSecondaryVertices_*_*',
'keep patPackedCandidates_packedPFCandidates__*',
'keep *_puppi_*_*',
'keep *_slimmedMETsPuppi_*_*',
'keep *_generator_*_*',
'keep *_genEvtWeightsCounter_*_H2TAUTAU',
'keep *_offlineBeamSpot_*_*',
'keep *_reducedEgamma_reducedConversions_*',
'keep LHEEventProduct_*_*_*',
'keep *_l1extraParticles_*_*',
# 'keep *_mvaMETTauMu_*_H2TAUTAU'
]
commonDebug = copy.deepcopy(common)
commonDebug.extend([
'keep patCompositeCandidates_*_*_*', # keep all intermediate di-taus
'keep patElectrons_*_*_*'
])
| [
"jan.steggemann@cern.ch"
] | jan.steggemann@cern.ch |
28e9a28d0506b14afc1df502489b8cf5a7abf67b | cbe3a71a33d783e68f6da5a8cf43f30991fc0646 | /app/translate.py | 1c96a509daf1da8d9617e3d31f7999a653e0b32c | [] | no_license | garethbreeze1993/flask_microblog_tutorial | 65d0e3eb2113ef6a60a432aa63942695f4b520aa | 44ad685fb5c76fc2dca0274b32f5dccd2abd6250 | refs/heads/main | 2023-01-22T20:51:41.508328 | 2020-12-05T16:51:48 | 2020-12-05T16:51:48 | 318,836,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | import json
import requests
from flask_babel import _
from flask import current_app
def translate(text, source_language, dest_language):
if 'MS_TRANSLATOR_KEY' not in current_app.config or not current_app.config['MS_TRANSLATOR_KEY']:
return _('Error the translation service is not configured')
data = [{'Text': text}]
data_length_json = json.dumps(data)
length_of_body = len(data_length_json.encode('utf-8'))
auth = {'Ocp-Apim-Subscription-Key': current_app.config['MS_TRANSLATOR_KEY'], 'Ocp-Apim-Subscription-Region': 'westeurope',
"Content-Type": "application/json; charset=UTF-8"}
r = requests.post('https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to={}&from={}'.format(
dest_language, source_language),
headers=auth, data=data_length_json)
if r.status_code != 200:
return _('Error: the translation service failed.')
python_response = json.loads(r.content.decode('utf-8-sig')) # returns list
translated_text = python_response[0].get('translations')[0].get('text')
return translated_text
# json loads turns json into python r.content returns bytes s needs to be decoded
| [
"gareth.breeze1993@gmail.com"
] | gareth.breeze1993@gmail.com |
0247f8d4f45c85ecf055e2b5e59598a913f4f131 | 12404199bbc771ca8ddd3fd02c5d599100633502 | /apps/maap/migrations/0001_initial.py | 4b7ca5168c01833484c7bec5ca8630bc49466aa6 | [] | no_license | apanruning/nomenclador | 9712e6248a2788dc75cf84fdf3d7a425fab6cbec | f27251a86ee52e5267295df239ca068ae6fc0f3f | refs/heads/master | 2016-09-05T20:21:17.926508 | 2011-03-21T01:08:50 | 2011-03-21T01:08:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,085 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MaapMetadata'
db.create_table('maap_maapmetadata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=100)),
('value', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('maap', ['MaapMetadata'])
# Adding model 'MaapModel'
db.create_table('maap_maapmodel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='creators', to=orm['auth.User'])),
('editor', self.gf('django.db.models.fields.related.ForeignKey')(related_name='editors', to=orm['auth.User'])),
('tags', self.gf('tagging.fields.TagField')(default='')),
('banner_slots', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('default_layers', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('metadata', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['maap.MaapMetadata'], null=True, blank=True)),
))
db.send_create_signal('maap', ['MaapModel'])
# Adding M2M table for field category on 'MaapModel'
db.create_table('maap_maapmodel_category', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('maapmodel', models.ForeignKey(orm['maap.maapmodel'], null=False)),
('maapcategory', models.ForeignKey(orm['maap.maapcategory'], null=False))
))
db.create_unique('maap_maapmodel_category', ['maapmodel_id', 'maapcategory_id'])
# Adding model 'MaapCategory'
db.create_table('maap_maapcategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=35)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['maap.MaapCategory'])),
('is_public', self.gf('django.db.models.fields.BooleanField')(default=True)),
('show_all', self.gf('django.db.models.fields.BooleanField')(default=False)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('maap', ['MaapCategory'])
# Adding model 'MaapPoint'
db.create_table('maap_maappoint', (
('maapmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['maap.MaapModel'], unique=True, primary_key=True)),
('geom', self.gf('django.contrib.gis.db.models.fields.PointField')(srid=900913)),
('icon', self.gf('django.db.models.fields.related.ForeignKey')(default=185, to=orm['maap.Icon'], blank=True)),
('closest', self.gf('django.db.models.fields.BooleanField')(default=False)),
('popup_text', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('maap', ['MaapPoint'])
# Adding model 'MaapArea'
db.create_table('maap_maaparea', (
('maapmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['maap.MaapModel'], unique=True, primary_key=True)),
('geom', self.gf('django.contrib.gis.db.models.fields.PolygonField')(srid=900913)),
))
db.send_create_signal('maap', ['MaapArea'])
# Adding model 'MaapZone'
db.create_table('maap_maapzone', (
('maaparea_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['maap.MaapArea'], unique=True, primary_key=True)),
))
db.send_create_signal('maap', ['MaapZone'])
# Adding model 'MaapMultiLine'
db.create_table('maap_maapmultiline', (
('maapmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['maap.MaapModel'], unique=True, primary_key=True)),
('geom', self.gf('django.contrib.gis.db.models.fields.MultiLineStringField')(srid=900913)),
))
db.send_create_signal('maap', ['MaapMultiLine'])
# Adding model 'Icon'
db.create_table('maap_icon', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
))
db.send_create_signal('maap', ['Icon'])
def backwards(self, orm):
# Deleting model 'MaapMetadata'
db.delete_table('maap_maapmetadata')
# Deleting model 'MaapModel'
db.delete_table('maap_maapmodel')
# Removing M2M table for field category on 'MaapModel'
db.delete_table('maap_maapmodel_category')
# Deleting model 'MaapCategory'
db.delete_table('maap_maapcategory')
# Deleting model 'MaapPoint'
db.delete_table('maap_maappoint')
# Deleting model 'MaapArea'
db.delete_table('maap_maaparea')
# Deleting model 'MaapZone'
db.delete_table('maap_maapzone')
# Deleting model 'MaapMultiLine'
db.delete_table('maap_maapmultiline')
# Deleting model 'Icon'
db.delete_table('maap_icon')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'maap.icon': {
'Meta': {'object_name': 'Icon'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'maap.maaparea': {
'Meta': {'ordering': "('created', 'name')", 'object_name': 'MaapArea', '_ormbases': ['maap.MaapModel']},
'geom': ('django.contrib.gis.db.models.fields.PolygonField', [], {'srid': '900913'}),
'maapmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['maap.MaapModel']", 'unique': 'True', 'primary_key': 'True'})
},
'maap.maapcategory': {
'Meta': {'object_name': 'MaapCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['maap.MaapCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'maap.maapmetadata': {
'Meta': {'object_name': 'MaapMetadata'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'maap.maapmodel': {
'Meta': {'ordering': "('created', 'name')", 'object_name': 'MaapModel'},
'banner_slots': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'maapmodel_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['maap.MaapCategory']"}),
'changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creators'", 'to': "orm['auth.User']"}),
'default_layers': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'editors'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['maap.MaapMetadata']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'tags': ('tagging.fields.TagField', [], {'default': "''"})
},
'maap.maapmultiline': {
'Meta': {'ordering': "('created', 'name')", 'object_name': 'MaapMultiLine', '_ormbases': ['maap.MaapModel']},
'geom': ('django.contrib.gis.db.models.fields.MultiLineStringField', [], {'srid': '900913'}),
'maapmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['maap.MaapModel']", 'unique': 'True', 'primary_key': 'True'})
},
'maap.maappoint': {
'Meta': {'ordering': "('created', 'name')", 'object_name': 'MaapPoint', '_ormbases': ['maap.MaapModel']},
'closest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '900913'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'default': '185', 'to': "orm['maap.Icon']", 'blank': 'True'}),
'maapmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['maap.MaapModel']", 'unique': 'True', 'primary_key': 'True'}),
'popup_text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'maap.maapzone': {
'Meta': {'ordering': "('created', 'name')", 'object_name': 'MaapZone', '_ormbases': ['maap.MaapArea']},
'maaparea_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['maap.MaapArea']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['maap']
| [
"maturburu@gmail.com"
] | maturburu@gmail.com |
f2823cca9d54e10f9edbd92f0ac793533752b9a5 | ca3e17da908ab06b27b451e2a7cd5593211e0dda | /solutions/01/kepler.py | 2caace095ec801519f8d37bc9b4c970828fc7725 | [] | no_license | ArmaanSethi/Physical-Modeling | e34ffee50ee2bf61716b08a4abe2e416773965e8 | 031d7f0fe994b3a6dcd5e91917dbdc5fcb38ea05 | refs/heads/master | 2020-04-11T05:13:16.950401 | 2018-12-12T20:43:32 | 2018-12-12T20:43:32 | 161,541,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,429 | py | #=============================================================
# Test bank for ODE integrators: Kepler problem.
#
# Contains the functions:
# get_planetdata : returns basic information about planets
# set_odepar : setting global variables for get_dydx()
# get_odepar : getting global variables for get_dydx()
# get_dydx : the RHS of the ODEs
# ode_init : initializes ODE problem, setting functions and initial values
# ode_check : performs tests on results (plots, sanity checks)
# main : calls the rest.
#
# Arguments:
# --stepper [euler,rk2,rk4]
#=============================================================
# required libraries
import argparse # allows us to deal with arguments to main()
from argparse import RawTextHelpFormatter
import numpy as np # numerical routines (arrays, math functions etc)
import matplotlib.pyplot as plt # plotting commands
import p358utilities as util # for rescaleplot
import ode_integrators as odeint # contains the drivers for ODE integration
import ode_step as step # the stepper functions
#=============================================================
# interface for global variables
#=============================================================
# function set_odepar()
def set_odepar(par):
global odepar
odepar = par
#=============================================================
# function get_odepar()
def get_odepar():
global odepar
return odepar
#==============================================================
# function mass,eps,rap,vorb,torb = get_planetdata(which)
#
# Returns planetary orbit data
#
# input:
# which: integer array with elements between 1 and 8, with 1: Mercury...8: Neptune
# output:
# mass: planet mass in kg
# eps : eccentricity
# rap : aphelion distance (in km)
# vorb: aphelion velocity (in km/s)
# torb: orbital period (in years)
#---------------------------------------------------------------
def get_planetdata(which):
nplanets = len(which)
mass = np.array([1.989e30,3.3011e23,4.8675e24,5.972e24,6.41e23,1.89819e27,5.6834e26,8.6813e25,1.02413e26])
eps = np.array([0.0,0.205,0.0067,0.0167,0.0934,0.0489,0.0565,0.0457,0.0113])
rap = np.array([0.0,6.9816e10,1.0894e11,1.52139e11,2.49232432e11,8.1662e11,1.5145e12,3.00362e12,4.54567e12])
vorb = np.array([0.0,3.87e4,3.479e4,2.929e4,2.197e4,1.244e4,9.09e3,6.49e3,5.37e3])
yrorb = np.array([0.0,0.241,0.615,1.0,1.881,1.1857e1,2.9424e1,8.3749e1,1.6373e2])
rmass = np.zeros(nplanets+1)
reps = np.zeros(nplanets+1)
rrap = np.zeros(nplanets+1)
rvorb = np.zeros(nplanets+1)
ryrorb = np.zeros(nplanets+1)
rmass [0] = mass [0]
rmass [1:nplanets+1] = mass [which]
reps [1:nplanets+1] = eps [which]
rrap [1:nplanets+1] = rap [which]
rvorb [1:nplanets+1] = vorb [which]
ryrorb[1:nplanets+1] = yrorb[which]
return rmass,reps,rrap,rvorb,ryrorb
#==============================================================
# function dydx = get_dydx(x,y,dx)
#
# Calculates ODE RHS for Kepler problem via direct summation.
#
# input:
# x,y : position and values for RHS.
# If we have three bodies, y has the shape
# [x1,y1,x2,y2,x3,y3,vx1,vy1,vx2,vy2,vx3,vy3], for bodies 1,2,3.
# with the cartesian positions (x,y) and their
# velocities (v_x,v_y).
# global:
# G : grav constant (par[0])
# masses : masses of bodies (par[1:npar])
#
# output:
# dydx : vector of results as in y'=f(x,y)
#--------------------------------------------------------------
def get_dydx(x, y, dx):
nbodies = y.size // 4 # per body, we have four variables
par = get_odepar()
npar = par.size
gnewton = par[0]
masses = par[1:npar]
dydx = np.zeros(y.size)
# The function needs to accomplish the following:
# (1) Set the time derivatives of the positions for all objects.
# (2) Calculate the gravitational force on each object.
# (3) Set velocity derivatives to the resulting accelerations.
#?????????????? from here
indx = 2 * np.arange(nbodies)
indy = 2 * np.arange(nbodies) + 1
indvx = 2 * np.arange(nbodies) + 2 * nbodies
indvy = 2 * np.arange(nbodies) + 2 * nbodies + 1
dydx[indx] = y[indvx]
dydx[indy] = y[indvy]
for k in range(nbodies):
gravx = 0.0
gravy = 0.0
for j in range(nbodies):
if (k != j):
dx = y[indx[k]] - y[indx[j]]
dy = y[indy[k]] - y[indy[j]]
R3 = np.power(dx * dx + dy * dy, 1.5)
gravx = gravx - gnewton * masses[j] * dx / R3
gravy = gravy - gnewton * masses[j] * dy / R3
dydx[indvx[k]] = gravx
dydx[indvy[k]] = gravy
#?????????????? to here
return dydx
#==============================================================
# function fRHS,x0,y0,x1 = ode_init(stepper)
#
# Initializes derivative function, parameters, and initial conditions
# for ODE integration.
#
# input:
# stepper: euler
# rk2
# rk4
# rk45
# output:
# fINT : function handle for integrator (problem) type: initial or boundary value problem (ode_ivp or ode_bvp)
# fORD : function handle for integrator order (euler, rk2, rk4, rk45).
# Note: symplectic integrators require euler.
# fRHS : function handle for RHS of ODE. Needs to return vector dydx of same size as y0.
# fBVP : function handle for boundary values in case fINT == ode_bvp.
# x0 : starting x
# y0 : starting y(x0)
# x1 : end x
#--------------------------------------------------------------
def ode_init(stepper):
fBVP = 0 # default is IVP, but see below.
if (stepper == 'euler'):
fORD = step.euler
elif (stepper == 'rk2'):
fORD = step.rk2
elif (stepper == 'rk4'):
fORD = step.rk4
elif (stepper == 'rk45'):
fORD = step.rk45
else:
raise Exception('[ode_init]: invalid stepper value: %s' % (stepper))
print('[ode_init]: initializing Kepler problem')
# We set the initial positions, assuming orbit starts at aphel.
# Units are different here. We set G=1, L=1AU, t=1yr. This results
# a set scale for the mass, as below.
AU = 1.495979e11 # AU in meters
year = 3.6e3*3.65e2*2.4e1 # year in seconds
mass,eps,r_aphel,v_orb,yr_orb = get_planetdata(np.array([3]))
gnewton = 6.67408e-11
uLeng = AU
uTime = year
uVelo = uLeng/uTime
uAcce = uVelo/uTime
uMass = uAcce*uLeng*uLeng/gnewton
masscu = mass/uMass
rapcu = r_aphel/uLeng
velcu = v_orb/uVelo
# Set initial conditions. All objects are aligned along x-axis, with planets to positive x, sun to negative x.
rapcu[0]= -np.sum(masscu*rapcu)/masscu[0]
velcu[0]= -np.sum(masscu*velcu)/masscu[0]
nstepyr = 100 # number of steps per year
nyears = int(np.ceil(np.max(yr_orb)))
x0 = 0.0 # starting at t=0
x1 = nyears*year/uTime # end time in years
nstep = nyears*nstepyr # thus, each year is resolved by nstepyr integration steps
nbodies = mass.size # number of objects
y0 = np.zeros(4*nbodies)
par = np.zeros(nbodies+1) # number of parameters
par[0] = 1.0
for k in range(nbodies): # fill initial condition array and parameter array
y0[2*k] = rapcu[k]
y0[2*k+1] = 0.0
y0[2*(nbodies+k)] = 0.0
y0[2*(nbodies+k)+1] = velcu[k]
par[k+1] = masscu[k]
fINT = odeint.ode_ivp
fRHS = get_dydx
set_odepar(par)
return fINT,fORD,fRHS,fBVP,x0,y0,x1,nstep
#==============================================================
# function ode_check(x,y)
#
# input:
# iinteg : integrator type
# x : independent variable
# y : integration result
# it : number of substeps used. Only meaningful for RK45 (iinteg = 4).
#--------------------------------------------------------------
def ode_check(x,y,it):
# for the direct Kepler problem, we check for energy and angular momentum conservation,
# and for the center-of-mass position and velocity
color = ['black','green','cyan','blue','red','black','black','black','black']
n = x.size
par = get_odepar()
npar = par.size
nbodies = par.size-1
gnewton = par[0]
masses = par[1:npar]
Egrav = np.zeros(n)
indx = 2*np.arange(nbodies)
indy = 2*np.arange(nbodies)+1
indvx = 2*np.arange(nbodies)+2*nbodies
indvy = 2*np.arange(nbodies)+2*nbodies+1
E = np.zeros(n) # total energy
Lphi = np.zeros(n) # angular momentum
R = np.sqrt(np.power(y[indx[0],:]-y[indx[1],:],2)+np.power(y[indy[0],:]-y[indy[1],:],2))
Rs = np.zeros(n) # center of mass position
vs = np.zeros(n) # center of mass velocity
for k in range(n):
E[k] = 0.5*np.sum(masses*(np.power(y[indvx,k],2)+np.power(y[indvy,k],2)))
Lphi[k] = np.sum(masses*(y[indx,k]*y[indvy,k]-y[indy,k]*y[indvx,k]))
Rsx = np.sum(masses*y[indx,k])/np.sum(masses)
Rsy = np.sum(masses*y[indy,k])/np.sum(masses)
vsx = np.sum(masses*y[indvx,k])/np.sum(masses)
vsy = np.sum(masses*y[indvy,k])/np.sum(masses)
Rs[k] = np.sqrt(Rsx*Rsx+Rsy*Rsy)
vs[k] = np.sqrt(vsx*vsx+vsy*vsy)
for j in range(nbodies):
for i in range(j): # preventing double summation. Still O(N^2) though.
dx = y[indx[j],:]-y[indx[i],:]
dy = y[indy[j],:]-y[indy[i],:]
Rt = np.sqrt(dx*dx+dy*dy)
Egrav = Egrav - gnewton*masses[i]*masses[j]/Rt
E = E + Egrav
E = E/E[0]
Lphi = Lphi/Lphi[0]
for k in range(n):
print('k=%7i t=%13.5e E/E0=%20.12e L/L0=%20.12e Rs=%10.2e vs=%10.2e'
% (k,x[k],E[k],Lphi[k],Rs[k],vs[k]))
Eplot = E-1.0
Lplot = Lphi-1.0
logE = np.log10(np.abs(Eplot-1.0))
logL = np.log10(np.abs(Lplot-1.0))
# now plot everything
# (1) the orbits
xmin = np.min(y[indx,:])
xmax = np.max(y[indx,:])
ymin = np.min(y[indy,:])
ymax = np.max(y[indy,:])
plt.figure(num=1,figsize=(6,6),dpi=100,facecolor='white')
plt.subplot(111)
plt.xlim(1.05*xmin,1.05*xmax)
plt.ylim(1.05*ymin,1.05*ymax)
for k in range(nbodies):
plt.plot(y[indx[k],:],y[indy[k],:],color=color[k],linewidth=1.0,linestyle='-')
plt.axes().set_aspect('equal')
plt.xlabel('x [AU]')
plt.ylabel('y [AU]')
# (2) the checks (total energy and angular momentum)
plt.figure(num=2,dpi=100,facecolor='white')
plt.subplot(211)
plt.plot(x,Eplot,linestyle='-',color='black',linewidth=1.0,label='E')
plt.plot(x,Lplot,linestyle='--',color='black',linewidth=1.0,label='L')
plt.xlabel('t [yr]')
plt.ylabel('E/E0-1, L/L0-1')
plt.legend()
plt.subplot(212)
plt.plot(x,logE,linestyle='-',color='black',linewidth=1.0,label='E')
plt.plot(x,logL,linestyle='--',color='black',linewidth=1.0,label='L')
plt.xlabel('t [yr]')
plt.ylabel('log|E/E0-1|, log|L/L0-1|')
plt.legend()
plt.show()
#==============================================================
#==============================================================
# main
#
# parameters:
def main():
parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.add_argument("stepper",type=str,default='euler',
help="stepping function:\n"
" euler: Euler step\n"
" rk2 : Runge-Kutta 2nd order\n"
" rk4 : Runge-Kutta 4th order\n")
args = parser.parse_args()
stepper= args.stepper
fINT,fORD,fRHS,fBVP,x0,y0,x1,nstep = ode_init(stepper)
x,y,it = fINT(fRHS,fORD,fBVP,x0,y0,x1,nstep)
ode_check(x,y,it)
#==============================================================
main()
| [
"armaan.sethi@gmail.com"
] | armaan.sethi@gmail.com |
f16e00871bb29e5f1bbe0af1268aab7a20e5c408 | 9e80ef3b434e26c50878a7c5d6ef3b33e0b8d457 | /given_armstrong_number_or_not.py | 0feffbd6ad2c93c65c496b8e380cb638b5b9a0b1 | [] | no_license | nikhilrane1992/Python_Basic_Exercise | 8a337dc0d3cb80fa6560a9de98ae4bbf1c9e91ad | bd3759b0c2e24a140a417f952c8fdff4f995e6d7 | refs/heads/master | 2021-08-08T13:20:34.702325 | 2017-11-10T10:49:47 | 2017-11-10T10:49:47 | 105,880,075 | 1 | 4 | null | 2017-10-11T05:58:04 | 2017-10-05T11:07:28 | Python | UTF-8 | Python | false | false | 597 | py | # Python program to check if the number provided by the user is an
# Armstrong number or not
# Method 1
num = raw_input("Enter a number: ")
n, t_sum = len(num), 0
for i in num:
t_sum += int(i) ** n
if t_sum == int(num):
print "{} is armstrong".format(num)
else:
print "Number is not armstrong"
print "-" * 20
# Method 2
num = int(raw_input("Enter a number: "))
tmp, t_sum, n = num, 0, len(str(num))
for i in range(n):
digit = tmp % 10
t_sum += digit ** n
tmp //= 10
if t_sum == num:
print "{} is armstrong".format(num)
else:
print "Number is not armstrong"
| [
"nikhilrane1992@gmail.com"
] | nikhilrane1992@gmail.com |
2e29fe62e754d295f00d590bcd2ce5ca0afddcf2 | 5ef2ebd4334769955cff15a03f5956b0fac6ba52 | /docs/conf.py | 15332c991b4e2357f380f776c35bd8c2c007fe0e | [
"BSD-3-Clause-Modification"
] | permissive | smurfix/repoze.retry | b58bf0b1e96e995d8631e03d1eb51cea84bb2a3c | 0b7eae20b2ae29180bc36a5549ae9e54e5b6a7bd | refs/heads/master | 2021-01-17T08:00:17.562716 | 2013-12-03T09:16:39 | 2013-12-03T09:20:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,741 | py | # -*- coding: utf-8 -*-
#
# repoze.retry documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 9 08:03:46 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
parent = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(parent))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'repoze.retry'
copyright = u'2010, Agendaless Consulting, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from pkg_resources import require
dist = require('repoze.retry')[0]
version = dist.version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'repoze.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'repozeretrydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'repozeretry.tex', u'repoze.retry Documentation',
u'Agendaless Consulting, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| [
"tseaver@palladion.com"
] | tseaver@palladion.com |
cf586d21d70ab55c9b7d5a8e1e146b94a0e7493b | 4ba18540bfd8c523fe39bbe7d6c8fa29d4ec0947 | /atlas/foundations_events/src/test/consumers/jobs/queued/test_queued_job_notifier.py | d0480777c4e91860a0d9142ea598f1e067386f54 | [
"BSD-3-Clause",
"MIT",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MPL-2.0"
] | permissive | yottabytt/atlas | c9d8ef45a0921c9f46d3ed94d42342f11488a85e | b040e574fbc64c833039b003f8a90345dd98e0eb | refs/heads/master | 2022-10-14T11:12:12.311137 | 2020-06-13T13:19:35 | 2020-06-13T13:19:35 | 272,008,756 | 0 | 0 | Apache-2.0 | 2020-06-13T12:55:29 | 2020-06-13T12:55:28 | null | UTF-8 | Python | false | false | 1,369 | py |
import unittest
from mock import Mock
from foundations_spec.helpers import *
from foundations_spec.helpers.spec import Spec
class TestQueuedJobNotifier(Spec):
job_notifier = let_mock()
@let
def consumer(self):
from foundations_events.consumers.jobs.queued.job_notifier import JobNotifier
return JobNotifier(self.job_notifier)
@let
def job_id(self):
from uuid import uuid4
return uuid4()
@let
def project_name(self):
return self.faker.sentence()
def test_call_sends_notification_with_qeueud_message(self):
time = 1551457960.22515
self.consumer.call({'job_id': self.job_id, 'project_name': self.project_name}, time, None)
self.job_notifier.send_message.assert_called_with(
"""
Job Queued
Job Id: {}
Timestamp: 2019-03-01 11:32:40.225150
Project Name: {}
""".format(self.job_id, self.project_name)
)
def test_call_sends_notification_with_qeueud_message_different_time_stamp(self):
time = 1551458381.9642663
self.consumer.call({'job_id': self.job_id, 'project_name': self.project_name}, time, None)
self.job_notifier.send_message.assert_called_with(
"""
Job Queued
Job Id: {}
Timestamp: 2019-03-01 11:39:41.964266
Project Name: {}
""".format(self.job_id, self.project_name)
) | [
"mislam@squareup.com"
] | mislam@squareup.com |
61afb06b14cebb1b2e48fc3470a43ba7a3778d28 | 614c28bcf7e0443065e9172e0de04ac6494d38ef | /core/forms.py | 8e5e79253477780939b0053571098d029ae05742 | [] | no_license | inood/foodgram-project | a7a26d2d4c9c89a773a8084bfcc3a25ea1e65fe1 | 00d066cbacaa781838474a6e36db3e47618a1bc9 | refs/heads/master | 2023-02-11T19:18:47.371882 | 2021-01-11T13:16:34 | 2021-01-11T13:16:34 | 315,265,172 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from django import forms
from core.models import Recipe
from foodgram.settings import ITEM_COUNT
class RecipeForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ('title', 'description', 'cooking_time', 'tags', 'image')
widgets = {
'description': forms.Textarea({'rows': ITEM_COUNT}),
'tags': forms.CheckboxSelectMultiple()
}
| [
"inood@yandex.ru"
] | inood@yandex.ru |
eb8e03b50b9cf04c181f5fd214e1461f869755bb | beea119ff63911711c906ea82cd7c8fa0b33f149 | /src/oscar/apps/dashboard/catalogue/mixins.py | b924a7dbbda53e107a9875e6c67e2c80b06ce6bc | [
"MIT"
] | permissive | benjaminbills/fundizshop | 6157316182ffa0045d68737315682c361f968ce9 | b5d09f70c0a728c74efa72a37b03efef2bed9d3d | refs/heads/master | 2023-06-21T19:05:08.120925 | 2021-07-13T05:00:15 | 2021-07-13T05:00:15 | 379,179,978 | 1 | 4 | MIT | 2021-07-07T09:18:29 | 2021-06-22T07:27:11 | Python | UTF-8 | Python | false | false | 529 | py | class PartnerProductFilterMixin:
def filter_queryset(self, queryset):
"""
Restrict the queryset to products the given user has access to.
A staff user is allowed to access all Products.
A non-staff user is only allowed access to a product if they are in at
least one stock record's partner user list.
"""
user = self.request.user
if user.is_staff:
return queryset
return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct()
| [
"paulkush7777@gmail.com"
] | paulkush7777@gmail.com |
992d734c0a60e30c0ac311832377a1bb0b471d35 | f56bf63efa7f6e8d00f3f593ebef9ae15a97a57b | /pythonday2.py | 8496de757c1a17a0d04f7f11cf63512585af1f97 | [] | no_license | thomhard-lab/pythonday1 | c8822ea7b9ef2fbf1cfc46a2efb234b6dda7da8d | 4e02696cc001612c9305b9cbfe2d957aa11eddc9 | refs/heads/main | 2023-02-06T13:34:57.385450 | 2020-12-17T14:28:05 | 2020-12-17T14:28:05 | 321,659,624 | 0 | 0 | null | 2020-12-15T12:21:44 | 2020-12-15T12:21:43 | null | UTF-8 | Python | false | false | 4,718 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
kardus = ['aqua', 'sprite', 'cocacola', 'fanta']
print(kardus)
# In[2]:
botol = 'sprite'
print(botol)
# In[3]:
print(kardus[3])
# In[4]:
print(kardus[-1])
# In[5]:
kardus[0]='cleo'
print(kardus)
# In[6]:
kardus[-1]=80
print(kardus)
# In[7]:
print(kardus[1:3])
# In[8]:
print(kardus[1:4])
# In[9]:
kardus[1:3]=['chitato', 'chiki', 'cheetos', 'jetzet', 'qtela', 'oreo', 'malkist', 'doritos', 'taro']
print(kardus)
# In[10]:
kasir=('aqua', 'sprite', 'cocacola', 'fanta')
kasir[0]='chitato'
# In[11]:
kardus_label={'aqua':'nurdeka', 'sprite':'arif', 'cocacola':'thomas', 'fanta':'tio'}
print(kardus_label)
# In[12]:
print(kardus_label['aqua'])
# In[13]:
print(kardus_label['fanta'])
# In[14]:
kardus_label['dancow']='khafid'
print(kardus_label)
# In[15]:
kardus_label['baterai']=['ajie','husein']
print(kardus_label)
# In[16]:
kardus_label['mars']={'lampu': 'mars', 'gunting':'mars'}
print(kardus_label)
# In[21]:
if 'fanta' in kardus :
print('fanta ada di dalam kardus')
else :
print('fanta tidak ada di dalam kardus')
print('kardus berisi ')
print(kardus)
# In[77]:
while 'pringles' not in kardus:
if 'pringles' in kardus :
print('pringles ada di dalam kardus')
kardus.remove('pringles')
print('kardus berisi ')
print(kardus)
else :
print('pringles tidak ada di dalam kardus')
print('mencari taro')
if 'taro' in kardus :
print ('Taro ada di dalam kardus')
kardus.remove('taro')
if 'Qtela' in kardus :
print ('Qtela ada di dalam kardus')
kardus.remove('Qtela')
elif 'oreo' in kardus :
print ('Oreo ada di dalam kardus')
kardus.remove('oreo')
else :
print ('Qtela dan Oreo tidak ada di dalam kardus')
print ('membeli Qtela, oreo dan pringles')
kardus.extend(['qtela','oreo','pringles'])
print (' kardus berisi')
print(kardus)
else :
print ('Taro tidak ada di dalam kardus')
print ('membeli taro')
kardus.append('taro')
print('kardus berisi ')
print(kardus)
# In[57]:
kardus.remove('pringles')
# In[80]:
import random
a = 0
print('kardus berisi ' + str(len(kardus)) + ' barang')
print(kardus)
if 'pringles' in kardus:
kardus.remove('pringles')
print('mengambil pringles')
while 'pringles' not in kardus:
a = a + 1
if 'pringles' in kardus :
print('pringles ada di dalam kardus')
del kardus[random.randint(0,len(kardus)-1)]
print('kardus berisi ' + str(len(kardus)) + ' barang')
print(kardus)
else :
print('pringles tidak ada di dalam kardus')
print('mencari taro')
if 'taro' in kardus :
print ('Taro ada di dalam kardus')
del kardus[random.randint(0,len(kardus)-1)]
if 'Qtela' in kardus :
print ('Qtela ada di dalam kardus')
del kardus[random.randint(0,len(kardus)-1)]
print (kardus)
elif 'oreo' in kardus :
print ('Oreo ada di dalam kardus')
del kardus[random.randint(0,len(kardus)-1)]
print (kardus)
else :
print ('Qtela dan Oreo tidak ada di dalam kardus')
print ('membeli Qtela, oreo dan pringles')
kardus.extend(['qtela','oreo','pringles'])
print('kardus berisi ' + str(len(kardus)) + ' barang')
print(kardus)
else :
print ('Taro tidak ada di dalam kardus')
print ('membeli taro')
kardus.append('taro')
print('kardus berisi ' + str(len(kardus)) + ' barang')
print(kardus)
print('terjadi perulangan ' + str(a) + ' kali' )
# In[93]:
n = 210
while n > 0:
n = n-40 # n -= 40
print (n)
# In[94]:
while True:
msg = input("Ketikan karakter:").lower()
print(msg)
if msg == "stop":
break
# In[103]:
temp = input ("Ketikkan temperatur yang ingin dikonversi, eg.45F, 120C: ")
degree = int(temp[:1])
i_convertion = temp[-1]
if i_convertion == "C":
result = int(round(9 * degree) / 5 + 32)
elif i_convertion == "F":
result = int(round(degree - 32) * 5 / 9)
else:
print("masukan input yang benar")
if i_convertion == "C":
print("temperaturenya adalah ", result, "derajat Fahrenhait")
elif i_convertion == "F":
print("temperaturenya adalah ", result, "derajat Celcius")
# In[ ]:
| [
"noreply@github.com"
] | thomhard-lab.noreply@github.com |
de2cee78d88fad86b3dfd3e9394490c17a9be289 | 3df663b68b55ea6ebac628aba7b5489a6a36d25e | /validadorCPF.py | 99a7e808e7816a16de454892c16262e232b3e68e | [] | no_license | adroaldo-jr/Validador-de-CPF | eeda5e8eefdaede6058098b5d2b67bd8af395755 | 1e5a6e53b0d5973b8d5776bff85e850f2f399e6f | refs/heads/main | 2023-06-04T03:34:43.220465 | 2021-06-29T00:21:51 | 2021-06-29T00:21:51 | 381,184,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | while True:
cpf = input('Digite um CPF (somente números): ').strip()
# Faz os cálculos para validar o CPF
novoCpf = cpf[:-2]
reverso = 10
total = 0
for index in range(19):
if index > 8:
index -= 9
total += int(novoCpf[index]) * reverso
reverso -= 1
if reverso < 2:
reverso = 11
digito = 11 - (total % 11)
if digito > 9:
digito = 0
total = 0
novoCpf += str(digito)
# Verifica se o cpf inserido não é apenas sequência de um único número
sequencia = novoCpf == str(novoCpf[0]) * len(cpf)
# Verifica se o resultado é válido ou inválido
if cpf == novoCpf and not sequencia:
print('Válido')
else:
print('Inválido')
# Verifica se o programa deve continuar rodando
continuar = input('Deseja validar outro CPF? [S/N] ').strip().upper()
try:
if continuar == 'S':
continue
else:
quit()
except:
while continuar not in 'SN':
continuar = input('Deseja validar outro CPF? [S/N] ').strip().upper()
if continuar == 'S':
continue
else:
quit() | [
"adroaldosouto@gmail.com"
] | adroaldosouto@gmail.com |
40ff46ac61dab69c55f35bf02697ae1ce5adca82 | 88841c4b8c8a2d8c12186105510d325ff84324a5 | /scripts/artifacts/bluetoothOther.py | 111a4684b9a7165e034b4f586b0c34ae67aa8dad | [
"MIT"
] | permissive | mastenp/iLEAPP | 404b8aca6b6bc6fab04240fdccf822839bade1e1 | ee40ef7505b36d0b9b04131f284a9d4d036514a5 | refs/heads/master | 2022-12-26T07:55:17.905307 | 2020-09-29T11:58:16 | 2020-09-29T11:58:16 | 272,011,420 | 1 | 0 | MIT | 2020-09-29T11:58:17 | 2020-06-13T13:12:05 | Python | UTF-8 | Python | false | false | 1,198 | py | import glob
import os
import sys
import stat
import pathlib
import plistlib
import sqlite3
import json
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_bluetoothOther(files_found, report_folder, seeker):
file_found = str(files_found[0])
os.chmod(file_found, 0o0777)
db = sqlite3.connect(file_found)
cursor = db.cursor()
cursor.execute(
"""
SELECT
Name,
Address,
LastSeenTime,
Uuid
FROM
OtherDevices
order by Name desc
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
data_list = []
if usageentries > 0:
for row in all_rows:
data_list.append((row[0], row[1], row[3]))
description = ''
report = ArtifactHtmlReport('Bluetooth Other LE')
report.start_artifact_report(report_folder, 'Other LE', description)
report.add_script()
data_headers = ('Name','Address','UUID' )
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Bluetooth Other LE'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No data available for Bluetooth Other')
db.close()
return
| [
"abrignoni@gmail.com"
] | abrignoni@gmail.com |
b77dc7690fac0f34094f9a7ce9bdcf7977eb6be0 | 4c8b7a9a11310ac7aede6fa738f8e631f1193766 | /pywinusb/hid/tools.py | 35c4419a9de9386447c577a19b802be7558426b6 | [] | no_license | yshome-hb/hid_tester | a62550a790f4d2775bbd5baac54e473c072197ec | c1d53f45ebdfda1d0f00ad44d0797268db0c1db1 | refs/heads/main | 2023-09-02T06:34:46.445905 | 2021-11-11T07:29:31 | 2021-11-11T07:29:31 | 426,536,271 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,441 | py | # -*- coding: utf-8 -*-
"""
Other helper functions.
"""
from __future__ import absolute_import
from operator import attrgetter
from . import usage_pages, helpers, winapi
def write_documentation(self, output_file):
"Issue documentation report on output_file file like object"
if not self.is_opened():
raise helpers.HIDError("Device has to be opened to get documentation")
#format
class CompundVarDict(object):
"""Compound variables dictionary.
Keys are strings mapping variables.
If any string has a '.' on it, it means that is an
object with an attribute. The attribute name will be
used then as the returned item value.
"""
def __init__(self, parent):
self.parent = parent
def __getitem__(self, key):
if '.' not in key:
return self.parent[key]
else:
all_keys = key.split('.')
curr_var = self.parent[all_keys[0]]
for item in all_keys[1:]:
new_var = getattr(curr_var, item)
curr_var = new_var
return new_var
dev_vars = vars(self)
dev_vars['main_usage_str'] = repr(
usage_pages.HidUsage(self.hid_caps.usage_page,
self.hid_caps.usage) )
output_file.write( """\n\
HID device documentation report
===============================
Top Level Details
-----------------
Manufacturer String: %(vendor_name)s
Product Sting: %(product_name)s
Serial Number: %(serial_number)s
Vendor ID: 0x%(vendor_id)04x
Product ID: 0x%(product_id)04x
Version number: 0x%(version_number)04x
Device Path: %(device_path)s
Device Instance Id: %(instance_id)s
Parent Instance Id: %(parent_instance_id)s
Top level usage: Page=0x%(hid_caps.usage_page)04x, Usage=0x%(hid_caps.usage)02x
Usage identification: %(main_usage_str)s
Link collections: %(hid_caps.number_link_collection_nodes)d collection(s)
Reports
-------
Input Report
~~~~~~~~~~~~
Length: %(hid_caps.input_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_input_button_caps)d button(s)
Values: %(hid_caps.number_input_value_caps)d value(s)
Output Report
~~~~~~~~~~~~~
length: %(hid_caps.output_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_output_button_caps)d button(s)
Values: %(hid_caps.number_output_value_caps)d value(s)
Feature Report
~~~~~~~~~~~~~
Length: %(hid_caps.feature_report_byte_length)d byte(s)
Buttons: %(hid_caps.number_feature_button_caps)d button(s)
Values: %(hid_caps.number_feature_value_caps)d value(s)
""" % CompundVarDict(dev_vars)) #better than vars()!
#return
# inspect caps
for report_kind in [winapi.HidP_Input,
winapi.HidP_Output, winapi.HidP_Feature]:
all_usages = self.usages_storage.get(report_kind, [])
if all_usages:
output_file.write('*** %s Caps ***\n\n' % {
winapi.HidP_Input : "Input",
winapi.HidP_Output : "Output",
winapi.HidP_Feature : "Feature"
}[report_kind])
# normalize usages to allow sorting by usage or min range value
for item in all_usages:
if getattr(item, 'usage', None) != None:
item.flat_id = item.usage
elif getattr(item, 'usage_min', None) != None:
item.flat_id = item.usage_min
else:
item.flat_id = None
sorted(all_usages, key=attrgetter('usage_page', 'flat_id'))
for usage_item in all_usages:
# remove helper attribute
del usage_item.flat_id
all_items = usage_item.inspect()
# sort first by 'usage_page'...
usage_page = all_items["usage_page"]
del all_items["usage_page"]
if "usage" in all_items:
usage = all_items["usage"]
output_file.write(" Usage {0} ({0:#x}), "\
"Page {1:#x}\n".format(usage, usage_page))
output_file.write(" ({0})\n".format(
repr(usage_pages.HidUsage(usage_page, usage))) )
del all_items["usage"]
elif 'usage_min' in all_items:
usage = (all_items["usage_min"], all_items["usage_max"])
output_file.write(" Usage Range {0}~{1} ({0:#x}~{1:#x}),"
" Page {2:#x} ({3})\n".format(
usage[0], usage[1], usage_page,
str(usage_pages.UsagePage(usage_page))) )
del all_items["usage_min"]
del all_items["usage_max"]
else:
raise AttributeError("Expecting any usage id")
attribs = list( all_items.keys() )
attribs.sort()
for key in attribs:
if 'usage' in key:
output_file.write("{0}{1}: {2} ({2:#x})\n".format(' '*8,
key, all_items[key]))
else:
output_file.write("{0}{1}: {2}\n".format(' '*8,
key, all_items[key]))
output_file.write('\n')
| [
"hustshenyang@126.com"
] | hustshenyang@126.com |
d2cb686f16745b3f32e57a8074aa9761232ddbb2 | e9b40496c500793d3ab0886295d4cc7c2538c009 | /code/chap3/regTwoVarPlot.py | 43ca9869b2b65f27598fe6d186c043f23f2fd2ba | [] | no_license | gurbuxanink/Python-Companion-to-ISLR | e27ca0342f3ffc7df2bffc857a106535d34c7513 | f46aa0d35551e323a08c201def5ddd6154b09370 | refs/heads/master | 2021-09-11T02:07:54.072651 | 2021-09-06T06:43:42 | 2021-09-06T06:43:42 | 184,629,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,334 | py | # Three dimensional setting with two predictors and one response
# We simulate data used in plotting
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import statsmodels.formula.api as smf
np.random.seed(911)
x1 = np.random.uniform(0, 5, 50)
x2 = np.random.uniform(0, 5, x1.size)
y = x1 + x2 + np.random.normal(loc=0, scale=4, size=x1.size)
my_df = pd.DataFrame({'x1': x1 , 'x2': x2, 'y': y})
reg_model = smf.ols(formula='y ~ x1 + x2', data=my_df)
reg_fit = reg_model.fit()
y_fit = reg_fit.fittedvalues
x1_array = np.linspace(0, 5, 20)
x2_array = np.linspace(0, 5, 20)
x1_grid, x2_grid = np.meshgrid(x1_array, x2_array)
y_series = reg_fit.predict(exog=dict(x1=x1_grid.ravel(),
x2=x2_grid.ravel()))
y_grid = y_series.values.reshape(x1_grid.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(x1_grid, x2_grid, y_grid, alpha=0.3)
for i in range(x1.size):
ax.plot([x1[i], x1[i]], [x2[i], x2[i]], [y_fit[i], y[i]], color='grey')
ax.scatter(x1, x2, y, c='r')
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_zlabel(r'$Y$')
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.grid(False)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
fig.tight_layout()
| [
"naresh_gurbuxani@hotmail.com"
] | naresh_gurbuxani@hotmail.com |
d7f81f1b456b403d5cfaa53ae22d927009042b7d | 8a73cde463081afd76427d5af1e6837bfa51cc47 | /service/surf/apps/filters/metadata.py | 49cdddb85d42d447a04530c4871e5959605a61e7 | [
"MIT"
] | permissive | surfedushare/search-portal | 8af4103ec6464e255c5462c672b30f32cd70b4e1 | 63e30ad0399c193fcb686804062cedf3930a093c | refs/heads/acceptance | 2023-06-25T13:19:41.051801 | 2023-06-06T13:37:01 | 2023-06-06T13:37:01 | 254,373,874 | 2 | 1 | MIT | 2023-06-06T12:04:44 | 2020-04-09T13:07:12 | Python | UTF-8 | Python | false | false | 2,079 | py | from collections import defaultdict
import requests
from django.conf import settings
from django.utils.functional import cached_property
class MetadataTree(object):
harvester_url = None
api_token = None
def __init__(self, harvester_url, api_token, warm_up_cache=False):
self.harvester_url = harvester_url
self.api_token = api_token
if warm_up_cache:
self._warm_up_cache = self.translations # result should be ignored as it only fills the cache
def _fetch(self, url):
response = requests.get(url, headers={"Authorization": f"Token {self.api_token}"})
if response.status_code != requests.status_codes.codes.ok:
raise ValueError(f"Failed request: {response.status_code}")
return response.json()
@cached_property
def tree(self):
return self._fetch(f"{self.harvester_url}metadata/tree/?site_id={settings.SITE_ID}")
@cached_property
def partial_tree(self):
return self._fetch(f"{self.harvester_url}metadata/tree/?site_id={settings.SITE_ID}&max_children=20")
@cached_property
def cache(self):
cache = defaultdict(dict)
def _cache_children(field_name, children):
for child in children:
cache[field_name][child["value"]] = child
_cache_children(field_name, child["children"])
for field in self.tree:
field_name = field["value"]
cache[field_name]["_field"] = field
_cache_children(field_name, field["children"])
return cache
@cached_property
def translations(self):
return {
field["value"]: {
value: child["translation"]
for value, child in self.cache[field["value"]].items()
}
for field in self.tree
}
def get_field(self, field_name):
return self.cache[field_name]["_field"]
def get_filter_field_names(self):
return [
field["value"]
for field in self.tree if not field["is_manual"]
]
| [
"email@fakoberkers.nl"
] | email@fakoberkers.nl |
3ea01b02f1d2247da82de5a03180b328416801ca | 1a80c38ea020a8b18bb2c61b55caff8a38f553b9 | /SWEA/trying/6058.py | 647d2ed1e2f5a17a5950b75fb675a9047cc7a63c | [] | no_license | jiwookseo/problem-solving | 775a47825dc73f8a29616ef7011e8ee7be346f80 | eefbefb21608ae0a2b3c75c010ae14995b7fc646 | refs/heads/master | 2020-04-19T03:11:02.659816 | 2019-08-14T08:59:06 | 2019-08-14T08:59:06 | 167,926,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # 6058. 새해 축하 파티 D5
# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AWbHe_w6AHIDFAV0&categoryId=AWbHe_w6AHIDFAV0&categoryType=CODE
def gen():
a=0
for i in range(1,401):
a+=i
yield a
for tc in range(1,int(input())+1):
inp=list(map(int,input().split()))
b,l,k=3*inp[0],inp[1],inp[2]
genN=gen()
# 너무 어렵다. 그래프? 트리? | [
"spjo21@naver.com"
] | spjo21@naver.com |
b1a0b4815b9bfa38c9c14e9d2b66274c1cfce7da | 156ede071b75ce824dcac0dddb0162b6f83963ee | /03 Conditionals and Control Flow/04_how_the_tables_have_turned.py | fc37dfe32312e542870f5ada12c8207e4d9cd3b9 | [] | no_license | mindovermiles262/codecademy-python | c834b4a7f3b1381dd3e65db27b2edb15eabda9c8 | b6a6aa4db10192264ec2c88ddf4d1d16db66c533 | refs/heads/master | 2021-01-12T03:31:04.388402 | 2017-01-11T16:58:06 | 2017-01-11T16:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 11 08:35:31 2017
@author: mindovermiles262
Codecademy Python
For each boolean value in the editor, write an expression that evaluates to
that value.
Remember, comparators are: ==, !=, >, >=, <, and <=.
Use at least three different ones!
Don't just use True and False! That's cheating!
"""
# Create comparative statements as appropriate on the lines below!
# Make me true!
bool_one = 3 < 5 # We already did this one for you!
# Make me false!
bool_two = 3 > 5
# Make me true!
bool_three = 3 < 5
# Make me false!
bool_four = 3 == 5
# Make me true!
bool_five = 3 != 5 | [
"mindovermiles262@gmail.com"
] | mindovermiles262@gmail.com |
d534c147b2b1de14fbf5db3b13e01bc44e796f15 | 96b3290c5b10222fc9fef339815388d572849845 | /manage.sjark | cf27c7640d8493a476b82ddca403ea5f4b84f9b1 | [] | no_license | hlindset/divensis | 7114aa10a89f6dbae7adb792de6800973c0d99e4 | b12b6fd35dbfa6727166c0b8ba68dd057a355927 | refs/heads/master | 2020-12-25T11:41:20.492308 | 2011-02-10T22:37:07 | 2011-02-10T22:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | sjark | #!/usr/bin/env python
import site
site.addsitedir('/home/www/.virtualenvs/d3/lib/python2.5/site-packages')
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
"torkil@gmail.com"
] | torkil@gmail.com |
3147ed8320b6303dfd503e350957275f7d9e0b10 | 8337fec2d4e3ea85dd49f87da9f00024b6095190 | /addressbook/settings/local.py | f770a5215e8ada6e81cbdba43fc0d498653ee90b | [] | no_license | ValleyForgeTech/effective-django-tutorial | 12be30c9977b42083d75ae8697c33fbba07d8a13 | 5169c417664b88e29fb7beede6caa7f8d8411423 | refs/heads/master | 2021-01-16T19:17:39.593567 | 2014-06-10T03:13:09 | 2014-06-10T03:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'address.db',
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
| [
"mathew.goldsborough@foreveroceans.com"
] | mathew.goldsborough@foreveroceans.com |
8688ac67867a981e531391ddbcc4d45d57031d25 | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /google/ads/googleads/v6/services/services/ad_group_criterion_label_service/transports/grpc.py | 3858d13b56d6f9146bc2f4cc05078782cd0906a2 | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,967 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v6.resources.types import ad_group_criterion_label
from google.ads.googleads.v6.services.types import (
ad_group_criterion_label_service,
)
from .base import AdGroupCriterionLabelServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupCriterionLabelServiceGrpcTransport(
AdGroupCriterionLabelServiceTransport
):
"""gRPC backend transport for AdGroupCriterionLabelService.
Service to manage labels on ad group criteria.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_criterion_label(
self,
) -> Callable[
[ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest],
ad_group_criterion_label.AdGroupCriterionLabel,
]:
r"""Return a callable for the get ad group criterion label method over gRPC.
Returns the requested ad group criterion label in
full detail.
Returns:
Callable[[~.GetAdGroupCriterionLabelRequest],
~.AdGroupCriterionLabel]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group_criterion_label" not in self._stubs:
self._stubs[
"get_ad_group_criterion_label"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v6.services.AdGroupCriterionLabelService/GetAdGroupCriterionLabel",
request_serializer=ad_group_criterion_label_service.GetAdGroupCriterionLabelRequest.serialize,
response_deserializer=ad_group_criterion_label.AdGroupCriterionLabel.deserialize,
)
return self._stubs["get_ad_group_criterion_label"]
@property
def mutate_ad_group_criterion_labels(
self,
) -> Callable[
[ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest],
ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse,
]:
r"""Return a callable for the mutate ad group criterion
labels method over gRPC.
Creates and removes ad group criterion labels.
Operation statuses are returned.
Returns:
Callable[[~.MutateAdGroupCriterionLabelsRequest],
~.MutateAdGroupCriterionLabelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_group_criterion_labels" not in self._stubs:
self._stubs[
"mutate_ad_group_criterion_labels"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v6.services.AdGroupCriterionLabelService/MutateAdGroupCriterionLabels",
request_serializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsRequest.serialize,
response_deserializer=ad_group_criterion_label_service.MutateAdGroupCriterionLabelsResponse.deserialize,
)
return self._stubs["mutate_ad_group_criterion_labels"]
__all__ = ("AdGroupCriterionLabelServiceGrpcTransport",)
| [
"noreply@github.com"
] | hurricanelennane.noreply@github.com |
b50c1c5b1c3f1af01da5a81fe144e3b6e3bccdd9 | 43c9603d5314a7ba5f1a6e2f7445266002b54b77 | /account_parser.py | 2a9978f8c1e3050fcb456c50819e10b797899549 | [] | no_license | madjabal/notyouraveragebank | 6cc0dc26737765370a8cbae04395e58300f04694 | 62cfd29031d364424bc5aa75cc64ea7082f4aed9 | refs/heads/master | 2020-04-05T17:16:53.364069 | 2018-11-11T06:15:46 | 2018-11-11T06:15:46 | 157,052,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,772 | py | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm
def find_still_cash(dataframe, give=False):
stddev = dataframe['Balance'].std()
min = dataframe['Balance'].min()
still_cash = round(min - stddev, 2)
if not give:
print("Cash safe to be invested over the past 2 months: " + str(still_cash))
print("This could have yielded: " + str(still_cash * (1.01)**2))
print("Or an extra " + str(still_cash * (1.01)**2 - still_cash))
else:
return still_cash
def plot(dataframe):
sns.lineplot(x=dataframe['Date'], y=dataframe['Balance'])
plt.show()
def check_savings(dataframe, give=False):
x = list(range(len(dataframe['Balance'])))
y = np.array(dataframe['Balance'])
x = sm.add_constant(x)
mod = sm.OLS(y, x)
res = mod.fit()
r2 = round(res.rsquared, 3)
returnstring = ''
b1 = .4
b2 = .2
b3 = .1
if r2 > b1:
returnstring = "Extra income is high, consider a monthly deposit to investments or savings, score from " \
"-1 to 1: " + str(r2)
elif b1 >= r2 >= b2:
returnstring = "Savings are strong, consider a monthly deposit to investments or savings, score from " \
"-1 to 1: " + str(r2)
elif b2 > r2 >= b3:
returnstring = "Fair strength savings, consider monthly contribution to investments, score from " \
"-1 to 1: " + str(r2)
elif b3 > r2 >= 0:
returnstring = "Savings are fair, if you don't have a savings account, consider re-budgeting, score from " \
"-1 to 1: " + str(r2)
elif 0 > r2 >= -b3:
returnstring = "Savings are somewhat weak, if you don't have a savings account, consider re-budgeting, " \
"score from -1 to 1: " + str(r2)
elif -b3 > r2 >= -b2:
returnstring = "Savings are fairly weak, consider re-budgeting, score from " \
"-1 to 1: " + str(r2)
elif -b2 > r2 >= -b1:
returnstring = "Weak savings, strongly consider re-budgeting, score from " \
"-1 to 1: " + str(r2)
else:
returnstring = "Account in need of severe re-budgeting, score from " \
"-1 to 1: " + str(r2)
if not give:
print(returnstring)
else:
return r2, returnstring
# def stability(dataframe, give=False):
account_history = pd.read_csv('account_history.csv')
# print(account_history.describe())
# print(check_savings(account_history))
# print(account_history)
# print(find_still_cash(account_history))
print("Use pd.read_csv() to enter account history")
print("Methods: find_still_cash(), plot(), check_savings()")
| [
"madjabal@bu.edu"
] | madjabal@bu.edu |
831d02112e7ceee36ced42de31fb8ccfb82dfac5 | 2247218311804a8e6ca0d07840ab918cd749376d | /tests/func/test_glr_parsing.py | 00ca40063da92a24e7c3ae3f8c0c690526bf186a | [
"Python-2.0",
"MIT"
] | permissive | morganjk/parglare | e48067ee99ead524d9bb311be80bc173e50ef5d2 | 39b8034d50d328844dfd91fcd94e23166056398f | refs/heads/master | 2021-07-18T09:44:39.758588 | 2017-10-22T15:08:56 | 2017-10-22T15:08:56 | 107,910,367 | 1 | 0 | null | 2017-10-22T23:17:58 | 2017-10-22T23:17:58 | null | UTF-8 | Python | false | false | 5,101 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from parglare import GLRParser, Grammar, Parser
from parglare.exceptions import SRConflicts
def test_lr2_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g = Grammar.from_string(grammar)
# This grammar is not LR(1) as it requires
# at least two tokens of lookahead to decide
# what to do on each ID from the right side.
# If '=' is after ID than it should reduce "Prod"
# else it should reduce ID as ProdRefs.
with pytest.raises(SRConflicts):
Parser(g)
# But it can be parsed unambiguously by GLR.
p = GLRParser(g)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
results = p.parse(txt)
assert len(results) == 1
def test_expressions():
actions = {
"E": [
lambda _, nodes: nodes[0] + nodes[2],
lambda _, nodes: nodes[0] * nodes[2],
lambda _, nodes: nodes[1],
lambda _, nodes: int(nodes[0])
]
}
# This grammar is highly ambiguous if priorities and
# associativities are not defined to disambiguate.
grammar = """
E: E "+" E | E "*" E | "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions, debug=True)
# Even this simple expression has 2 different interpretations
# (4 + 2) * 3 and
# 4 + (2 * 3)
results = p.parse("4 + 2 * 3")
assert len(results) == 2
assert 18 in results and 10 in results
# Adding one more operand rises number of interpretations to 5
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 5
# One more and there are 14 interpretations
results = p.parse("4 + 2 * 3 + 8 * 5")
assert len(results) == 14
# The number of interpretation will be the Catalan number of n
# where n is the number of operations.
# https://en.wikipedia.org/wiki/Catalan_number
# This number rises very fast. For 10 operations number of interpretations
# will be 16796!
# If we rise priority for multiplication operation we reduce ambiguity.
# Default production priority is 10. Here we will raise it to 15 for
# multiplication.
grammar = """
E: E "+" E | E "*" E {15}| "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
# This expression now has 2 interpretation:
# (4 + (2*3)) + 8
# 4 + ((2*3) + 8)
# This is due to associativity of + operation which is not defined.
results = p.parse("4 + 2 * 3 + 8")
assert len(results) == 2
# If we define associativity for both + and * we have resolved all
# ambiguities in the grammar.
grammar = """
E: E "+" E {left}| E "*" E {left, 15}| "(" E ")" | /\d+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, actions=actions)
results = p.parse("4 + 2 * 3 + 8 * 5 * 3")
assert len(results) == 1
assert results[0] == 4 + 2 * 3 + 8 * 5 * 3
def test_epsilon_grammar():
grammar = """
Model: Prods EOF;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g = Grammar.from_string(grammar)
p = GLRParser(g, debug=True)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
results = p.parse(txt)
assert len(results) == 1
results = p.parse("")
assert len(results) == 1
def test_non_eof_grammar_nonempty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_nonempty = """
Model: Prods;
Prods: Prod | Prods Prod;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g_nonempty = Grammar.from_string(grammar_nonempty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_nonempty, debug=True)
results = p.parse(txt)
# There is three succesful parses.
# e.g. one would be the production 'First = One Two three Second' and the
# parser could not continue as the next token is '=' but it succeds as
# we haven't terminated our model with EOF so we allow partial parses.
assert len(results) == 3
def test_non_eof_grammar_empty():
"""
Grammar that is not anchored by EOF at the end might
result in multiple trees that are produced by sucessful
parses of the incomplete input.
"""
grammar_empty = """
Model: Prods;
Prods: Prod | Prods Prod | EMPTY;
Prod: ID "=" ProdRefs;
ProdRefs: ID | ProdRefs ID;
ID: /\w+/;
"""
g_empty = Grammar.from_string(grammar_empty)
txt = """
First = One Two three
Second = Foo Bar
Third = Baz
"""
p = GLRParser(g_empty, debug=True)
results = p.parse(txt)
assert len(results) == 3
results = p.parse("")
assert len(results) == 1
| [
"igor.dejanovic@gmail.com"
] | igor.dejanovic@gmail.com |
c55b6c702c78501e61600c16337b8dad5c3dddb6 | 13754ebfa1517a8a847709c00db04578f4f5754f | /IT/214_database_management_system/p_m_jat/2019/lab_sessions/201601020_201601132_code1/throughput_plot.py | 30c5f7c6cf3404387033908431a24cf42bffc25c | [
"MIT"
] | permissive | shivani-nandani/AcadVault | 53c23be3ba763514fc7defda8eebba76100d6e1b | a89bad12cc498aa484d6a6005e036fb56985a406 | refs/heads/master | 2023-09-05T21:29:09.966860 | 2021-11-14T19:04:09 | 2021-11-14T19:04:09 | 260,447,543 | 0 | 2 | MIT | 2020-05-01T11:54:00 | 2020-05-01T11:53:59 | null | UTF-8 | Python | false | false | 1,379 | py | import matplotlib.pyplot as plt
import math
fp = open("parallel_vector_add.txt", "r")
f = open("serial_vector_add.txt", "r")
s = []
p = []
ser = []
for i, j in zip(f, fp):
t = i.split()
tp = j.split()
#print(t[0])
s.append(math.log(long(t[0]), 2))
if(tp[1] == "inf"):
tmp = 0
sup.append(tmp)
elif(t[1] == "inf"):
tmp = float("inf")
sup.append(tmp)
else:
tmp =long (t[0])/float(t[1])
ser.append(tmp)
# tmp = float(tp[1])
# p.append(tmp)
plt.plot(s[0:16], ser[0:16], marker='^', label="addition")
#plt.ylabel('Speedup')
#plt.xlabel('Size (Bytes)')
fp = open("parallel_square.txt", "r")
f = open("serial_square.txt", "r")
s = []
sup = []
for i, j in zip(f, fp):
t = i.split()
tp = j.split()
#print(t[0])
s.append(math.log(long(t[0]), 2))
#print(s)
if(tp[1] == "inf"):
tmp = 0
sup.append(tmp)
elif(t[1] == "inf"):
tmp = float("inf")
sup.append(tmp)
else:
tmp = long (t[0])/float(t[1])
sup.append(tmp)
plt.plot(s[0:16], sup[0:16], marker='o', label="square")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.ylabel('throughput')
plt.xlabel('Size (Bytes)')
plt.savefig('plot_tp.png')
plt.show() | [
"noreply@github.com"
] | shivani-nandani.noreply@github.com |
94fa79eb815b275ca6a5e5dadddd86fa66dfc38e | 48718ffbc6edc032eb3381ead91dacc71e0e7bfd | /timg/timglib/errors.py | 2b875feb06bee9ef87db346e86e92e05630ff8bb | [
"MIT"
] | permissive | 0xC4SE/timg | 4e1a7977372844d72e0b0131bacde962fca10257 | 063985a905862bdfeb79b2d9b1ab62d473bc2d9b | refs/heads/main | 2023-03-02T04:19:36.623481 | 2021-02-14T12:49:34 | 2021-02-14T12:49:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: thepoy
# @Email: thepoy@aliyun.com
# @File Name: errors.py
# @Created: 2021-02-08 17:04:25
# @Modified: 2021-02-10 14:24:27
class UploadFailed(Exception):
pass
class OverSizeError(Exception):
pass
class NotLogin(Exception):
pass
| [
"thepoy@163.com"
] | thepoy@163.com |
76875bec9b96853a5c5cb052cce66c86f04acb9a | 41ece9eafe777948758fa3c5d12bc238829164bc | /Python/Avito_parser/main.py | 05d89ab061ee7ff0ba70579be8cec111abb3f505 | [] | no_license | edition89/projects | d4d3a6ecb80cad18e74d9402c401d85077eb974a | d2052d89b2b460f8b6cb9d0f3a08904c50df7862 | refs/heads/main | 2023-04-06T13:02:30.321554 | 2021-04-19T08:46:20 | 2021-04-19T08:46:20 | 359,360,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,354 | py | import requests
from bs4 import BeautifulSoup
import csv
def get_html(url):
proxies = {'http': "socks5://99.109.24.9:1080"}
r = requests.get(url=url, proxies=proxies)
return r.text
def get_total_pages(html):
soup = BeautifulSoup(html, 'lxml')
print(soup.find('p', class_='text').text)
pages = soup.find('div', attrs={"data-marker" : "pagination-button"}).find_all('span')[-2].text
return int(pages)
def write_csv(data):
print(end)
with open('avito.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow( (data['title'],
data['price'],
data['metro'],
data['photo'],
data['url']) )
def get_page_data(html):
soup = BeautifulSoup(html, 'lxml')
ads = soup.find('div', class_='snippet-list').find_all('div', class_='item_table')
for ad in ads:
try:
title = ad.find('a', class_="snippet-link").text
except:
title = "Загаловок пустой"
try:
url = 'https://www.avito.ru' + ad.find('a', class_="snippet-link").get('href')
except:
url = "Нет ссылки"
try:
price = ad.find('div', class_="snippet-price-row").find('span', class_="snippet-price").text.strip()
except:
price = "Нет цены"
try:
metro = ad.find('div', class_="snippet-date-row").find('div', class_="snippet-date-info").text.strip()
except:
metro = "Неизвестная дата"
try:
photo = soup_in.find('div', class_='item-photo').find('img').get('src')
except:
photo = "Нет фото"
data = {'title' : title,
'price' : price,
'metro' : metro,
'photo' : photo,
'url' : url}
write_csv(data)
def main():
base_url = "https://www.avito.ru/rossiya/muzykalnye_instrumenty/dlya_studii_i_kontsertov-ASgBAgICAUTEAsgK?d=1&pmax=5000&q=dj+контроллер&p=1"
total_pages = get_total_pages(get_html(base_url))
for i in range(1, total_pages + 1):
url_gen = base_url[:-1] + str(i)
html = get_html(url_gen)
get_page_data(html)
if __name__ == '__main__':
main()
| [
"roman_gabibullae@mail.ru"
] | roman_gabibullae@mail.ru |
1e741b4e3772f4de6d267d636bf813ac0c28d13d | c82c7b04df004c5f0cb371e56ec2a11d632032c1 | /simpy_airport.py | a0ad21e06217bf7d8c3cc05998ae3bb1c9e65e40 | [] | no_license | krangan2587/algorithms-master | ed560a56a496d6aa838712d411c7e410628b8450 | d6a2ef5ab059fc843e2d35480ffbc6bb3ac57a6f | refs/heads/master | 2021-07-05T12:38:33.617957 | 2021-05-08T16:37:59 | 2021-05-08T16:37:59 | 56,648,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py |
####### Homework 9 Question 4 using Simpy ######
# ---------- Import modules -----------
# import SimPy module
import simpy
# Import random module
import random
# ------------ Set constants ---------------
numCheckers = 35 # number of boarding-pass checkers
numScanners = 35 # number of scanners
arrRate = 50 # arrival rate (passengers per minute)
checkRate = 0.75 # boarding-pass check rate (minutes per passenger)
minScan = 0.5 # scanner minimum time for uniform distribution
maxScan = 1.0 # scanner maximum time for uniform distribution
runTime = 100 # run time (minutes) per simulation
replications = 100 # number of replications
# ------------ Initialize global variables ----------
avgCheckTime = [] # average boarding-pass check time (for each replication)
avgScanTime = [] # average scan time (for each replication)
avgWaitTime = [] # average total wait time (for each replication)
avgSystemTime = [] # average total time in system (for each replication)
# ------------ Create model ------------------
# System class
class System(object):
def __init__(self, env):
self.env = env
self.checker = simpy.Resource(env, numCheckers) # define number of boarding-pass checkers
self.scanner = [] # define a set of scanners with 1 each; needed because each has its own queue
for i in range(numScanners):
self.scanner.append(simpy.Resource(env, 1))
# define boarding-pass check time (exponential)
def check(self, passenger):
# For some reason in python, expovariate actually uses 1 over the mean, like Poisson
yield self.env.timeout(random.expovariate(1.0 / checkRate))
# define scan time (uniform)
def scan(self, passenger):
yield self.env.timeout(random.uniform(minScan, maxScan))
# Passenger process through system
def passenger(env, name, s):
# access global variables to be able to modify them
global checkWait
global scanWait
global sysTime
global totThrough
timeArrive = env.now # note arrival time of passenger
# print('%s arrives at time %.2f' % (name,timeArrive))
# Go through boarding-pass check queue
with s.checker.request() as request:
# print('check queue length = %d' % len(s.checker.queue))
yield request # request a checker
tIn = env.now # note when passenger starts being checked
yield env.process(s.check(name)) # call check process
tOut = env.now # note when passenger ends being checked
checkTime.append(tOut - tIn) # calculate total time for passenger to be checked
# Find the shortest scanner queue (note: scanners are numbered 0 through numScanners-1)
minq = 0
for i in range(1, numScanners):
if (len(s.scanner[i].queue) < len(s.scanner[minq].queue)):
minq = i
# print('scanner queue %d lengths = %d' % (minq,len(s.scanner[minq].queue)))
# Go through scanner queue
with s.scanner[minq].request() as request: # use scanner number minq (the shortest, from above)
yield request # request the scanner
tIn = env.now # note when passenger starts being scanned
yield env.process(s.scan(name)) # call scan process
tOut = env.now # note when passenger ends being scanned
scanTime.append(tOut - tIn) # calculate total time for passenger to be scanned
timeLeave = env.now # note time passenger finishes
sysTime.append(timeLeave - timeArrive) # calculate total time in system for passenger
totThrough += 1 # count another passenger who got through the system
# Passenger arrival process
def setup(env):
i = 0
s = System(env)
while True: # keep doing it (until simulation ends)
yield env.timeout(random.expovariate(arrRate)) # find tieme until next passenger is created
i += 1 # count one more passenger
# send the passenger through its process
env.process(passenger(env, 'Passenger %d' % i, s)) # name the passenger "Passenger i"
# ------------------ Run the model --------------------
# for each replication
for i in range(replications):
# choose random seed
random.seed(i)
# create environment
env = simpy.Environment()
# initialize global variables
totThrough = 0
checkTime = []
scanTime = []
sysTime = []
# run the simulation
env.process(setup(env)) # start passenger arrival process
env.run(until=runTime) # run for runTime simulated minutes
# Calculate average times for this replication
avgSystemTime.append(sum(sysTime[1:totThrough]) / totThrough)
avgCheckTime.append(sum(checkTime[1:totThrough]) / totThrough)
avgScanTime.append(sum(scanTime[1:totThrough]) / totThrough)
avgWaitTime.append(avgSystemTime[i] - avgCheckTime[i] - avgScanTime[i])
print('%d : Replication %d times %.2f %.2f %.2f %.2f' % (
totThrough, i + 1, avgSystemTime[i], avgCheckTime[i], avgScanTime[i], avgWaitTime[i]))
# Calculate overall averages across all replications
print('-----')
print('Average system time = %.2f' % (sum(avgSystemTime) / replications))
print('Average check time = %.2f' % (sum(avgCheckTime) / replications))
print('Average scan time = %.2f' % (sum(avgScanTime) / replications))
print('Average wait time = %.2f' % (sum(avgWaitTime) / replications))
| [
"kasturiranganp25@gmail.com"
] | kasturiranganp25@gmail.com |
f33fdb9037716d0a59e8f804848a001ca2e547e0 | b4dc0f5f0360f3b24ee5ada5cb90c14528ce66a7 | /책 나눠주기.py | bfce62d358b2e8706f4b637e21720ab0b89fecf9 | [] | no_license | YG-creator/Greedy_Algorithm | 508c92610adf1078317f7790aed365888d9ffe80 | 6614f8f2fcb1c333907a1dbeac550b692aa81bab | refs/heads/master | 2023-04-21T10:02:18.536358 | 2021-04-12T13:57:44 | 2021-04-12T13:57:44 | 357,213,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | T = int(input()) #테스트 케이스 갯수
for _ in range(T):
N, M = map(int,input().split()) #책수, 학생 수
books = []
for _ in range(M):
a, b = map(int, input().split()) #a이상 b 이하 책번호
books.append( [a, b] )
# b를 기준으로 정렬 후, 같으면 a로 정렬
books.sort(key = lambda x: (x[1], x[0]))
# 정렬한 순서대로 a번의 책을 준다.
nl = [False for i in range(N+1)] #false N번 리스트
result = 0
for book in books:
for i in range(book[0], book[1]+1): #줄 수 있는 책번호 세기
if nl[i] == False:
nl[i] = True
result += 1
break
print(result)
| [
"2016272046@yonsei.ac.kr"
] | 2016272046@yonsei.ac.kr |
832c7aa91adfe0a5eaf875f4ef9aa36ef34176c8 | 49772506f78205290fb3229532bb81dae7afd6b2 | /src/subjects/serializers.py | 938a37462326353e5c062cfa8022d387153bd684 | [
"MIT"
] | permissive | rooneyrulz/explore_restframework_level_2 | 5fb06924ecff4550d9c4c5e510e99fc802207d87 | c2f0fab99a950e51d0dc49e8ab38dc6422e3f685 | refs/heads/master | 2021-09-23T07:44:36.957201 | 2019-12-04T18:58:32 | 2019-12-04T18:58:32 | 225,658,826 | 1 | 0 | MIT | 2021-09-22T18:05:08 | 2019-12-03T15:53:11 | Python | UTF-8 | Python | false | false | 457 | py | from rest_framework import serializers
from .models import Subject
class SubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = ('id', 'name', 'teacher',)
read_only_fields = ('id',)
# def validate_name(self, value):
# qs = Teacher.objects.filter(name__iexact=value)
# if qs.exists():
# raise serializers.ValidationError('name already exists!')
# return value
| [
"ruzny@ruzny.com"
] | ruzny@ruzny.com |
f08d3bbd0dd875736377148ef15b65d78292cf9f | 638f2a3d825ddd36b5a501c00d1a2c317a56b7ce | /src/model.py | 5cde5ba891ac829576462dbb16287f4bb97fecdc | [] | no_license | vmccready/fraud-detection | 7742b4521fb8e15741575a9e88f30f567b9b01a2 | a549eca9ec2a3567c4846597c47819fe56c310c9 | refs/heads/main | 2023-04-05T14:11:39.430531 | 2021-04-21T20:06:00 | 2021-04-21T20:06:00 | 358,333,809 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | import numpy as np
import pandas as pd
import pickle
# from pipeline import get_X, get_y
from sklearn.ensemble import RandomForestClassifier
# BEST MODEL
# features - 'previous_payout', 'premium', 'no_payout_name',
# 'payout_toself', 'user_age'
def create_model(data, pipeline, model_path='models/test_model.pickle'):
'''
Takes raw training data as dataframe, runs through pipeline, and trains a model.
Input:
data (pd.DataFrame) - Dataframe of raw training data that has been read
in from json file.
Output:
returns none - best model will be saved to models folder
'''
# Create X and y from raw data
X = pipeline.get_X(data)
y = pipeline.get_y(data)
# Create and train model
model = RandomForestClassifier()
model.fit(X,y)
# Output model as pickle file to input path.
pickle.dump(model, open(model_path, 'wb')) | [
"vinson.mcc@gmail.com"
] | vinson.mcc@gmail.com |
a083f7ad8469cb81abd1209db8dd1c48afc39efa | 52d9f9fb6348c51e14c470e4405a79204ea98980 | /unit5_Auto_Test_Model/test5_3模块化与参数化/read_json.py | cf0bae59ca450578251cece4a4810693c6a6ce5f | [] | no_license | UULIN/automation_test | 0a815d9b1032d8d393e4f340bd294f17e20d194d | 5a193bb897d653a44e0376bcb6592d7b1811a424 | refs/heads/master | 2023-01-05T15:07:54.133329 | 2020-11-02T04:58:11 | 2020-11-02T04:58:11 | 280,456,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | """
读取json
"""
import json
import os
path = os.path.abspath("config")
with open(path+"\\user_info.json", "r") as f:
data = f.read()
userlist = json.loads(data)
print(userlist) | [
"1036190402@qq.com"
] | 1036190402@qq.com |
2d2ab5a792bc6425cc0658ad058e03676e4f4f14 | 3467bd7ea78649ebfb39746cf05663b7913691b3 | /ComputerAgent.py | 265b5a5e49695a79f10ead93674a1618f00b1f61 | [] | no_license | anindyy/halma-ai | a04b8bc50a73945c43309eb810d9e2bdfabb2d59 | 7113e5d3c2497c1da0d5a6b284ce75bda3342d30 | refs/heads/main | 2023-02-24T14:27:35.838865 | 2021-01-22T03:49:28 | 2021-01-22T03:49:28 | 331,825,614 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,109 | py | from Agent import Agent
from Tree import Tree
from math import inf
from time import time
class MinimaxAgent(Agent):
def __init__(self, id, _time):
super().__init__(id, _time)
def play(self, board):
print("MINIMAX PLAYS")
bestval = board.value(self.id)
bestboard = board
deadline = time() + self.time
#print("current value", bestval)
cdepth = 0
while (time() <= deadline):
print("depth", cdepth)
finboard, finval = self.minimax(board, True, cdepth, -inf, inf, time() + self.time)
if (finval > bestval):
bestval = finval
bestboard = finboard
cdepth += 1
board.update(bestboard)
#print("new value", self.id, board.value(self.id))
def minimax(self, board, maximizing, depth, a, b, deadline):
moves = Tree(board).generateChild(self.id)
bestmove = board
# a adalah nilai terbaik untuk maximizing (paling besar) sepanjang path ke root
# b adalah nilai terbaik untuk minimizing (paling kecil) sepanjang path ke root
if (depth == 0 or len(moves) == 0 or board.winCheck() != 0 or time() > deadline):
return board, board.value(self.id)
elif (maximizing):
bestvalue = -inf
for m in moves:
if (time() > deadline):
return bestmove, bestvalue
m.changePlayerTurn()
_, mvalue = self.minimax(m, not maximizing, depth-1, a, b, deadline)
if (mvalue > bestvalue):
bestvalue = mvalue
bestmove = m
a = max(a, mvalue)
if (b <= a):
break
else:
bestvalue = inf
for m in moves:
if (time() > deadline):
return bestmove, bestvalue
m.changePlayerTurn()
_, mvalue = self.minimax(m, not maximizing, depth-1, a, b, deadline)
if (mvalue < bestvalue):
bestvalue = mvalue
bestmove = m
b = min(b, mvalue)
if (b <= a):
break
return bestmove, bestvalue
class LocalSearchAgent(Agent):
def __init__(self, id, _time):
super().__init__(id, _time)
def play(self,board,listSize,T):
print("LOCAL SEARCH PLAYS... id =", self.id)
deadline = time() + self.time
bestval = board.value(self.id)
bestboard = board
cdepth = 0
while (time() <= deadline):
print("depth", cdepth)
finboard, finval = self.minimaxlocal(board, True, cdepth, -inf, inf, time() + self.time, listSize, T)
if (finval > bestval):
bestval = finval
bestboard = finboard
cdepth+=1
board.update(bestboard)
print("value",board.value(self.id))
def minimaxlocal(self,board,maximizing,depth,a,b,deadline,listSize,T):
# a adalah nilai terbaik untuk maximizing (paling besar) sepanjang path ke root
# b adalah nilai terbaik untuk minimizing (paling kecil) sepanjang path ke root
# memanggil simulate untuk generate child
moves = Tree(board).generateChildLocal(listSize,T,self.id)
bestmove = board
if (depth == 0 or len(moves) == 0 or board.winCheck()!=0 or time() > deadline):
if (time() > deadline):
print("timeout")
return board, board.value(self.id)
elif (maximizing):
bestvalue = -inf
for m in moves:
if (time() > deadline):
#print("timeout at maximizing")
return bestmove, bestvalue
m.changePlayerTurn()
_, mvalue = self.minimaxlocal(m, not maximizing, depth-1, a, b, deadline, listSize, T)
if (mvalue > bestvalue):
bestvalue = mvalue
bestmove = m
a = max(a, mvalue)
if (b <= a):
break
else:
bestvalue = inf
for m in moves:
if (time() > deadline):
#print("timeout at minimizing")
return bestmove, bestvalue
m.changePlayerTurn()
_, mvalue = self.minimaxlocal(m, not maximizing, depth-1, a, b, deadline,listSize,T)
if (mvalue < bestvalue):
bestvalue = mvalue
bestmove = m
b = min(b, mvalue)
if (b <= a):
break
return bestmove, bestmove.value(self.id)
| [
"pamsrewari@gmail.com"
] | pamsrewari@gmail.com |
088f8b051d9ebba440d3ead3bdbd22c41891d7ee | 98371fb1fe6300ad42d18bfbfd029ac1a30d1eb6 | /d21.py | a3ec5fa8269a866ef3874f8f7e5e6a0e396babe8 | [] | no_license | Springo/AdventOfCode2020 | f96db67da9028524b4ce5dcb09509b13c93ffc36 | f5488d5c6f45f1d8b0ae7dbe8586ee4dce479c3a | refs/heads/main | 2023-02-03T22:55:36.681541 | 2020-12-25T05:42:30 | 2020-12-25T05:42:30 | 317,372,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | def readFile(filename):
lines = []
with open(filename, 'r') as f:
for line in f:
lines.append(line[:-1])
return lines
lines = readFile("d21input.txt")
al_list = dict()
ing_lines = dict()
al_lines = dict()
for i in range(len(lines)):
line = lines[i]
ls1 = line.split(" (contains ")
ings = ls1[0].split()
algs = ls1[1][:-1].split(', ')
for ing in ings:
if ing not in ing_lines:
ing_lines[ing] = []
ing_lines[ing].append(i)
for alg in algs:
if alg not in al_list:
al_list[alg] = set()
al_list[alg].add(ing)
for alg in algs:
if alg not in al_lines:
al_lines[alg] = []
al_lines[alg].append(i)
for alg in al_list:
rem_list = []
for ing in al_list[alg]:
for li in al_lines[alg]:
if li not in ing_lines[ing]:
rem_list.append(ing)
break
for ing in rem_list:
al_list[alg].remove(ing)
invalid = dict()
for alg in al_list:
for ing in al_list[alg]:
invalid[ing] = True
total = 0
for ing in ing_lines:
if ing not in invalid:
total += len(ing_lines[ing])
print(total)
for iter in range(10):
for alg in al_list:
if len(al_list[alg]) == 1:
bad_ing = list(al_list[alg])[0]
for alg2 in al_list:
if alg2 != alg and bad_ing in al_list[alg2]:
al_list[alg2].remove(bad_ing)
ans_list = []
for alg in al_list:
ans_list.append((list(al_list[alg])[0], alg))
ans_list = sorted(ans_list, key=lambda tup: tup[1])
for a, b in ans_list:
print("{},".format(a), end='')
| [
"kevinmxia@gmail.com"
] | kevinmxia@gmail.com |
ad012277c0b78fc13734a65394c57cb8435f2783 | 2a5ebd4e349011b12dd25b3b7cd989f8731cce07 | /common.py | 8f7f588ad4a6c80f60c5071b09a6dc16d3a9a7d1 | [
"MIT"
] | permissive | Unknowncmbk/NeuralNetwork | d865c3f8d1c5985b6aee49235e10a3154c5cc5db | 1c05cbb36cd87c87f2d8ff9be32542e8ab908e57 | refs/heads/master | 2020-06-02T05:53:25.361402 | 2014-11-27T19:26:31 | 2014-11-27T19:26:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #
# Common numerical functions useful for perceptrons and neural networks.
#
# Compiled against Python 2.7
# Author: Stephen Bahr (sbahr@bu.edu)
def dot(a, b):
"""Return the dot (inner) product of vectors a and b."""
assert len(a) == len(b), "Lengths of %s and %s do not match" % (a, b)
return sum((ai * bi) for ai, bi in zip(a,b))
def scale_and_add(v0, a, v1):
"""Scale v1 by a and add to v0."""
assert len(v0) == len(v1)
for i in range(len(v0)):
v0[i] += a * v1[i]
def step(v):
return 1 if v > 0 else 0
def average(lst):
"""Return average of an iterable of numbers."""
total = 0.0
count = 0
for elt in lst:
total += elt
count += 1
return total / count
| [
"sbahr@bu.edu"
] | sbahr@bu.edu |
7a28ea1553bdc4de0d169a474e8f80f39f34399a | bf66a28310d934fd1b8b14f762b336b9ec893a04 | /spaces/management/commands/seed_detail_spaces.py | f9b9f8cc4f792d7c46b1fa631cb36906e2b90db2 | [] | no_license | jeongmin14/enter_cloud-backend | 0664a6563ea090926a8522b454b762afed1f5c9d | 789b358e8c3cf5be8505185c048e10556bfd9e0a | refs/heads/main | 2023-03-26T03:54:39.565076 | 2021-03-17T13:09:32 | 2021-03-17T13:09:32 | 348,644,724 | 0 | 0 | null | 2021-03-17T13:08:07 | 2021-03-17T09:09:14 | null | UTF-8 | Python | false | false | 3,291 | py | import csv
import random
import bcrypt
from django_seed import Seed
from faker import Faker
from django.contrib.admin.utils import flatten
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.db.models import Count
from users.models import Host
from spaces.models import (Space,
Tag,
Facility,
BreakDay,
Type,
SubImage,
SpaceFacility,
SpaceTag,
SpaceBreakday,
DetailSpace,
DetailType,
DetailFacility)
from my_settings import (name_list,
simple_info_list,
main_info_list,
detail_name_list,
detail_facility_list)
class Command(BaseCommand):
help = "create detail spaces"
def add_arguments(self, parser):
parser.add_argument("--number", type=int, default=1)
def handle(self, *args, **options):
number = options.get("number")
spaces = Space.objects.all()
file = open("all.csv", mode="r")
reader = file.readlines()
image_length = len(reader)
detail_types = DetailType.objects.all()
detail_facilities = DetailFacility.objects.all()
seeder = Seed.seeder()
seeder.add_entity(
DetailSpace,
number,
{
"space": lambda x : random.choice(spaces) if spaces.aggregate(Count("detailspace"))["detailspace__count"] < 3 else random.choice(spaces),
"name": lambda x : random.choice(detail_name_list),
"information": lambda x : random.choice(main_info_list),
"image": lambda x : reader[random.randint(0, image_length-1)],
"min_reservation_time": lambda x : random.randint(2, 5),
"min_people": lambda x : random.randint(1, 2),
"max_people": lambda x : random.randint(4, 10),
"price": lambda x : random.randint(5, 40) * 1000
}
)
seed_detail_space = seeder.execute()
detail_space_id_list = flatten(seed_detail_space.values())
for detail_space_id in detail_space_id_list:
detail_space = DetailSpace.objects.get(id = detail_space_id)
random_number = random.randint(1, len(detail_types))
detail_type_list = detail_types[random_number:random_number + 2]
detail_space.detailtype_set.set(detail_type_list)
random_number = random.randint(1, len(detail_facilities))
detail_facility_list = detail_facilities[random_number:random_number + 6]
detail_space.detailfacility_set.set(detail_facility_list)
self.stdout.write(self.style.SUCCESS(f'spaces created {number}')) | [
"sol35352000@gmail.com"
] | sol35352000@gmail.com |
60d915aacf5bb27433fa991ce2f4ff01a97940b8 | 40f6aa355384efd0eb05ccf91f832e5b2aaa10bc | /mingw32/bin/gtester-report-script.py | ec1fefeae3480add88d609d268018c2bf5eaddf9 | [] | no_license | Persimmon-Consulting/git-sdk-32 | efd764dd562d8ef0c13558d5fd3111fa4d2723a5 | 5a2146abed2bd6a20f82e28e96f815ca8c95e60c | refs/heads/main | 2023-02-25T08:15:00.190961 | 2021-02-01T03:13:50 | 2021-02-01T03:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,114 | py | #!C:/git-sdk-32-ci/mingw32/bin/python3.exe
# GLib Testing Framework Utility -*- Mode: python; -*-
# Copyright (C) 2007 Imendio AB
# Authors: Tim Janik
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
# Deprecated: Since GLib 2.62, gtester and gtester-report have been deprecated
# in favour of TAP.
import datetime
import optparse
import sys, re, xml.dom.minidom
try:
import subunit
from subunit import iso8601
from testtools.content import Content, ContentType
mime_utf8 = ContentType('text', 'plain', {'charset': 'utf8'})
except ImportError:
subunit = None
# xml utilities
def find_child (node, child_name):
for child in node.childNodes:
if child.nodeName == child_name:
return child
return None
def list_children (node, child_name):
rlist = []
for child in node.childNodes:
if child.nodeName == child_name:
rlist += [ child ]
return rlist
def find_node (node, name = None):
if not node or node.nodeName == name or not name:
return node
for child in node.childNodes:
c = find_node (child, name)
if c:
return c
return None
def node_as_text (node, name = None):
if name:
node = find_node (node, name)
txt = ''
if node:
if node.nodeValue:
txt += node.nodeValue
for child in node.childNodes:
txt += node_as_text (child)
return txt
def attribute_as_text (node, aname, node_name = None):
node = find_node (node, node_name)
if not node:
return ''
attr = node.attributes.get (aname, '')
if hasattr (attr, 'value'):
return attr.value
return ''
# HTML utilities
def html_indent_string (n):
uncollapsible_space = ' ' # HTML won't compress alternating sequences of ' ' and ' '
string = ''
for i in range (0, int((n + 1) / 2)):
string += uncollapsible_space
return string
# TestBinary object, instantiated per test binary in the log file
class TestBinary:
def __init__ (self, name):
self.name = name
self.testcases = []
self.duration = 0
self.success_cases = 0
self.skipped_cases = 0
self.file = '???'
self.random_seed = ''
# base class to handle processing/traversion of XML nodes
class TreeProcess:
def __init__ (self):
self.nest_level = 0
def trampoline (self, node):
name = node.nodeName
if name == '#text':
self.handle_text (node)
else:
try: method = getattr (self, 'handle_' + re.sub ('[^a-zA-Z0-9]', '_', name))
except: method = None
if method:
return method (node)
else:
return self.process_recursive (name, node)
def process_recursive (self, node_name, node):
self.process_children (node)
def process_children (self, node):
self.nest_level += 1
for child in node.childNodes:
self.trampoline (child)
self.nest_level += 1
# test report reader, this class collects some statistics and merges duplicate test binary runs
class ReportReader (TreeProcess):
def __init__ (self):
TreeProcess.__init__ (self)
self.binary_names = []
self.binaries = {}
self.last_binary = None
self.info = {}
def binary_list (self):
lst = []
for name in self.binary_names:
lst += [ self.binaries[name] ]
return lst
def get_info (self):
return self.info
def handle_info (self, node):
dn = find_child (node, 'package')
self.info['package'] = node_as_text (dn)
dn = find_child (node, 'version')
self.info['version'] = node_as_text (dn)
dn = find_child (node, 'revision')
if dn is not None:
self.info['revision'] = node_as_text (dn)
def handle_testcase (self, node):
self.last_binary.testcases += [ node ]
result = attribute_as_text (node, 'result', 'status')
if result == 'success':
self.last_binary.success_cases += 1
if bool (int (attribute_as_text (node, 'skipped') + '0')):
self.last_binary.skipped_cases += 1
def handle_text (self, node):
pass
def handle_testbinary (self, node):
path = node.attributes.get ('path', None).value
if self.binaries.get (path, -1) == -1:
self.binaries[path] = TestBinary (path)
self.binary_names += [ path ]
self.last_binary = self.binaries[path]
dn = find_child (node, 'duration')
dur = node_as_text (dn)
try: dur = float (dur)
except: dur = 0
if dur:
self.last_binary.duration += dur
bin = find_child (node, 'binary')
if bin:
self.last_binary.file = attribute_as_text (bin, 'file')
rseed = find_child (node, 'random-seed')
if rseed:
self.last_binary.random_seed = node_as_text (rseed)
self.process_children (node)
class ReportWriter(object):
"""Base class for reporting."""
def __init__(self, binary_list):
self.binaries = binary_list
def _error_text(self, node):
"""Get a string representing the error children of node."""
rlist = list_children(node, 'error')
txt = ''
for enode in rlist:
txt += node_as_text (enode)
if txt and txt[-1] != '\n':
txt += '\n'
return txt
class HTMLReportWriter(ReportWriter):
# Javascript/CSS snippet to toggle element visibility
cssjs = r'''
<style type="text/css" media="screen">
.VisibleSection { }
.HiddenSection { display: none; }
</style>
<script language="javascript" type="text/javascript"><!--
function toggle_display (parentid, tagtype, idmatch, keymatch) {
ptag = document.getElementById (parentid);
tags = ptag.getElementsByTagName (tagtype);
for (var i = 0; i < tags.length; i++) {
tag = tags[i];
var key = tag.getAttribute ("keywords");
if (tag.id.indexOf (idmatch) == 0 && key && key.match (keymatch)) {
if (tag.className.indexOf ("HiddenSection") >= 0)
tag.className = "VisibleSection";
else
tag.className = "HiddenSection";
}
}
}
message_array = Array();
function view_testlog (wname, file, random_seed, tcase, msgtitle, msgid) {
txt = message_array[msgid];
var w = window.open ("", // URI
wname,
"resizable,scrollbars,status,width=790,height=400");
var doc = w.document;
doc.write ("<h2>File: " + file + "</h2>\n");
doc.write ("<h3>Case: " + tcase + "</h3>\n");
doc.write ("<strong>Random Seed:</strong> <code>" + random_seed + "</code> <br /><br />\n");
doc.write ("<strong>" + msgtitle + "</strong><br />\n");
doc.write ("<pre>");
doc.write (txt);
doc.write ("</pre>\n");
doc.write ("<a href=\'javascript:window.close()\'>Close Window</a>\n");
doc.close();
}
--></script>
'''
def __init__ (self, info, binary_list):
ReportWriter.__init__(self, binary_list)
self.info = info
self.bcounter = 0
self.tcounter = 0
self.total_tcounter = 0
self.total_fcounter = 0
self.total_duration = 0
self.indent_depth = 0
self.lastchar = ''
def oprint (self, message):
sys.stdout.write (message)
if message:
self.lastchar = message[-1]
def handle_info (self):
if 'package' in self.info and 'version' in self.info:
self.oprint ('<h3>Package: %(package)s, version: %(version)s</h3>\n' % self.info)
if 'revision' in self.info:
self.oprint ('<h5>Report generated from: %(revision)s</h5>\n' % self.info)
def handle_text (self, node):
self.oprint (node.nodeValue)
def handle_testcase (self, node, binary):
skipped = bool (int (attribute_as_text (node, 'skipped') + '0'))
if skipped:
return # skipped tests are uninteresting for HTML reports
path = attribute_as_text (node, 'path')
duration = node_as_text (node, 'duration')
result = attribute_as_text (node, 'result', 'status')
rcolor = {
'success': 'bgcolor="lightgreen"',
'failed': 'bgcolor="red"',
}.get (result, '')
if result != 'success':
duration = '-' # ignore bogus durations
self.oprint ('<tr id="b%u_t%u_" keywords="%s all" class="HiddenSection">\n' % (self.bcounter, self.tcounter, result))
self.oprint ('<td>%s %s</td> <td align="right">%s</td> \n' % (html_indent_string (4), path, duration))
perflist = list_children (node, 'performance')
if result != 'success':
txt = self._error_text(node)
txt = re.sub (r'"', r'\\"', txt)
txt = re.sub (r'\n', r'\\n', txt)
txt = re.sub (r'&', r'&', txt)
txt = re.sub (r'<', r'<', txt)
self.oprint ('<script language="javascript" type="text/javascript">message_array["b%u_t%u_"] = "%s";</script>\n' % (self.bcounter, self.tcounter, txt))
self.oprint ('<td align="center"><a href="javascript:view_testlog (\'%s\', \'%s\', \'%s\', \'%s\', \'Output:\', \'b%u_t%u_\')">Details</a></td>\n' %
('TestResultWindow', binary.file, binary.random_seed, path, self.bcounter, self.tcounter))
elif perflist:
presults = []
for perf in perflist:
pmin = bool (int (attribute_as_text (perf, 'minimize')))
pmax = bool (int (attribute_as_text (perf, 'maximize')))
pval = float (attribute_as_text (perf, 'value'))
txt = node_as_text (perf)
txt = re.sub (r'&', r'&', txt)
txt = re.sub (r'<', r'>', txt)
txt = '<strong>Performance(' + (pmin and '<em>minimized</em>' or '<em>maximized</em>') + '):</strong> ' + txt.strip() + '<br />\n'
txt = re.sub (r'"', r'\\"', txt)
txt = re.sub (r'\n', r'\\n', txt)
presults += [ (pval, txt) ]
presults.sort()
ptxt = ''.join ([e[1] for e in presults])
self.oprint ('<script language="javascript" type="text/javascript">message_array["b%u_t%u_"] = "%s";</script>\n' % (self.bcounter, self.tcounter, ptxt))
self.oprint ('<td align="center"><a href="javascript:view_testlog (\'%s\', \'%s\', \'%s\', \'%s\', \'Test Results:\', \'b%u_t%u_\')">Details</a></td>\n' %
('TestResultWindow', binary.file, binary.random_seed, path, self.bcounter, self.tcounter))
else:
self.oprint ('<td align="center">-</td>\n')
self.oprint ('<td align="right" %s>%s</td>\n' % (rcolor, result))
self.oprint ('</tr>\n')
self.tcounter += 1
self.total_tcounter += 1
self.total_fcounter += result != 'success'
def handle_binary (self, binary):
self.tcounter = 1
self.bcounter += 1
self.total_duration += binary.duration
self.oprint ('<tr><td><strong>%s</strong></td><td align="right">%f</td> <td align="center">\n' % (binary.name, binary.duration))
erlink, oklink = ('', '')
real_cases = len (binary.testcases) - binary.skipped_cases
if binary.success_cases < real_cases:
erlink = 'href="javascript:toggle_display (\'ResultTable\', \'tr\', \'b%u_\', \'failed\')"' % self.bcounter
if binary.success_cases:
oklink = 'href="javascript:toggle_display (\'ResultTable\', \'tr\', \'b%u_\', \'success\')"' % self.bcounter
if real_cases != 0:
self.oprint ('<a %s>ER</a>\n' % erlink)
self.oprint ('<a %s>OK</a>\n' % oklink)
self.oprint ('</td>\n')
perc = binary.success_cases * 100.0 / real_cases
pcolor = {
100 : 'bgcolor="lightgreen"',
0 : 'bgcolor="red"',
}.get (int (perc), 'bgcolor="yellow"')
self.oprint ('<td align="right" %s>%.2f%%</td>\n' % (pcolor, perc))
self.oprint ('</tr>\n')
else:
self.oprint ('Empty\n')
self.oprint ('</td>\n')
self.oprint ('</tr>\n')
for tc in binary.testcases:
self.handle_testcase (tc, binary)
def handle_totals (self):
self.oprint ('<tr>')
self.oprint ('<td><strong>Totals:</strong> %u Binaries, %u Tests, %u Failed, %u Succeeded</td>' %
(self.bcounter, self.total_tcounter, self.total_fcounter, self.total_tcounter - self.total_fcounter))
self.oprint ('<td align="right">%f</td>\n' % self.total_duration)
self.oprint ('<td align="center">-</td>\n')
if self.total_tcounter != 0:
perc = (self.total_tcounter - self.total_fcounter) * 100.0 / self.total_tcounter
else:
perc = 0.0
pcolor = {
100 : 'bgcolor="lightgreen"',
0 : 'bgcolor="red"',
}.get (int (perc), 'bgcolor="yellow"')
self.oprint ('<td align="right" %s>%.2f%%</td>\n' % (pcolor, perc))
self.oprint ('</tr>\n')
def printout (self):
self.oprint ('<html><head>\n')
self.oprint ('<title>GTester Unit Test Report</title>\n')
self.oprint (self.cssjs)
self.oprint ('</head>\n')
self.oprint ('<body>\n')
self.oprint ('<h2>GTester Unit Test Report</h2>\n')
self.handle_info ()
self.oprint ('<p style="color:red;font-weight:bold"><blink>'
'Deprecated: Since GLib 2.62, gtester and gtester-report are '
'deprecated. Port to TAP.</blink></p>\n');
self.oprint ('<table id="ResultTable" width="100%" border="1">\n<tr>\n')
self.oprint ('<th>Program / Testcase </th>\n')
self.oprint ('<th style="width:8em">Duration (sec)</th>\n')
self.oprint ('<th style="width:5em">View</th>\n')
self.oprint ('<th style="width:5em">Result</th>\n')
self.oprint ('</tr>\n')
for tb in self.binaries:
self.handle_binary (tb)
self.handle_totals()
self.oprint ('</table>\n')
self.oprint ('</body>\n')
self.oprint ('</html>\n')
class SubunitWriter(ReportWriter):
"""Reporter to output a subunit stream."""
def printout(self):
reporter = subunit.TestProtocolClient(sys.stdout)
for binary in self.binaries:
for tc in binary.testcases:
test = GTestCase(tc, binary)
test.run(reporter)
class GTestCase(object):
"""A representation of a gtester test result as a pyunit TestCase."""
def __init__(self, case, binary):
"""Create a GTestCase for case 'case' from binary program 'binary'."""
self._case = case
self._binary = binary
# the name of the case - e.g. /dbusmenu/glib/objects/menuitem/props_boolstr
self._path = attribute_as_text(self._case, 'path')
def id(self):
"""What test is this? Returns the gtester path for the testcase."""
return self._path
def _get_details(self):
"""Calculate a details dict for the test - attachments etc."""
details = {}
result = attribute_as_text(self._case, 'result', 'status')
details['filename'] = Content(mime_utf8, lambda:[self._binary.file])
details['random_seed'] = Content(mime_utf8,
lambda:[self._binary.random_seed])
if self._get_outcome() == 'addFailure':
# Extract the error details. Skips have no details because its not
# skip like unittest does, instead the runner just bypasses N test.
txt = self._error_text(self._case)
details['error'] = Content(mime_utf8, lambda:[txt])
if self._get_outcome() == 'addSuccess':
# Successful tests may have performance metrics.
perflist = list_children(self._case, 'performance')
if perflist:
presults = []
for perf in perflist:
pmin = bool (int (attribute_as_text (perf, 'minimize')))
pmax = bool (int (attribute_as_text (perf, 'maximize')))
pval = float (attribute_as_text (perf, 'value'))
txt = node_as_text (perf)
txt = 'Performance(' + (pmin and 'minimized' or 'maximized'
) + '): ' + txt.strip() + '\n'
presults += [(pval, txt)]
presults.sort()
perf_details = [e[1] for e in presults]
details['performance'] = Content(mime_utf8, lambda:perf_details)
return details
def _get_outcome(self):
if int(attribute_as_text(self._case, 'skipped') + '0'):
return 'addSkip'
outcome = attribute_as_text(self._case, 'result', 'status')
if outcome == 'success':
return 'addSuccess'
else:
return 'addFailure'
def run(self, result):
time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
result.time(time)
result.startTest(self)
try:
outcome = self._get_outcome()
details = self._get_details()
# Only provide a duration IFF outcome == 'addSuccess' - the main
# parser claims bogus results otherwise: in that case emit time as
# zero perhaps.
if outcome == 'addSuccess':
duration = float(node_as_text(self._case, 'duration'))
duration = duration * 1000000
timedelta = datetime.timedelta(0, 0, duration)
time = time + timedelta
result.time(time)
getattr(result, outcome)(self, details=details)
finally:
result.stopTest(self)
# main program handling
def parse_opts():
"""Parse program options.
:return: An options object and the program arguments.
"""
parser = optparse.OptionParser()
parser.version = '2.66.4'
parser.usage = "%prog [OPTIONS] <gtester-log.xml>"
parser.description = "Generate HTML reports from the XML log files generated by gtester."
parser.epilog = "gtester-report (GLib utils) version %s."% (parser.version,)
parser.add_option("-v", "--version", action="store_true", dest="version", default=False,
help="Show program version.")
parser.add_option("-s", "--subunit", action="store_true", dest="subunit", default=False,
help="Output subunit [See https://launchpad.net/subunit/"
" Needs python-subunit]")
options, files = parser.parse_args()
if options.version:
print(parser.epilog)
return None, None
if len(files) != 1:
parser.error("Must supply a log file to parse.")
if options.subunit and subunit is None:
parser.error("python-subunit is not installed.")
return options, files
def main():
options, files = parse_opts()
if options is None:
return 0
print("Deprecated: Since GLib 2.62, gtester and gtester-report are "
"deprecated. Port to TAP.", file=sys.stderr)
xd = xml.dom.minidom.parse (files[0])
rr = ReportReader()
rr.trampoline (xd)
if not options.subunit:
HTMLReportWriter(rr.get_info(), rr.binary_list()).printout()
else:
SubunitWriter(rr.get_info(), rr.binary_list()).printout()
if __name__ == '__main__':
main()
| [
"ci@git-for-windows.build"
] | ci@git-for-windows.build |
0285552898e2f3c4ef99eed1b5f9c01d312dad95 | 5c41836471e08252f11bc51ce54e4d449f6a4f88 | /cajas/migrations/0004_auto_20180807_1115.py | b0877a004b33455f8b8122267004dd4dcc2bc3f3 | [] | no_license | wahello/dr_amor_app | 1e9be88681d94bda5425d006a769fd3c55edbadb | 3604f7992df97e981f6f72fd844b58186ebed6ef | refs/heads/master | 2020-04-27T10:46:46.453354 | 2019-02-25T04:50:34 | 2019-02-25T04:50:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.0.2 on 2018-08-07 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cajas', '0003_auto_20180801_2003'),
]
operations = [
migrations.AlterField(
model_name='billetemoneda',
name='activo',
field=models.BooleanField(default=False),
),
]
| [
"fabio.garcia.sanchez@gmail.com"
] | fabio.garcia.sanchez@gmail.com |
fa7186feee14a377eaf7cacdfd7aafc2db7bccca | 6518c74441a68fc99b2b08423b5ea11480806499 | /tests/tracking/test_artifact_utils.py | 42df335dbdc72043cd5ea533d5a16b1c13f91b18 | [
"Apache-2.0"
] | permissive | criteo-forks/mlflow | da58e64d09700623810da63999a1aca81b435b90 | 499284d8dc9e9ec79d8d9dbd03c58d162a2b7eaa | refs/heads/master | 2023-04-14T17:59:29.997458 | 2022-01-11T09:50:26 | 2022-01-11T09:50:26 | 191,391,769 | 5 | 4 | Apache-2.0 | 2023-04-07T15:16:20 | 2019-06-11T14:44:00 | Python | UTF-8 | Python | false | false | 5,304 | py | import os
from unittest import mock
from unittest.mock import ANY
import mlflow
from mlflow.tracking.artifact_utils import (
_download_artifact_from_uri,
_upload_artifacts_to_databricks,
)
def test_artifact_can_be_downloaded_from_absolute_uri_successfully(tmpdir):
artifact_file_name = "artifact.txt"
artifact_text = "Sample artifact text"
local_artifact_path = tmpdir.join(artifact_file_name).strpath
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_path = "artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_path)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_path)
downloaded_artifact_path = os.path.join(
_download_artifact_from_uri(artifact_uri), artifact_file_name
)
assert downloaded_artifact_path != local_artifact_path
assert downloaded_artifact_path != logged_artifact_path
with open(downloaded_artifact_path, "r") as f:
assert f.read() == artifact_text
def test_download_artifact_from_absolute_uri_persists_data_to_specified_output_directory(tmpdir):
artifact_file_name = "artifact.txt"
artifact_text = "Sample artifact text"
local_artifact_path = tmpdir.join(artifact_file_name).strpath
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_subdir = "logged_artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)
artifact_output_path = tmpdir.join("artifact_output").strpath
os.makedirs(artifact_output_path)
_download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
assert logged_artifact_subdir in os.listdir(artifact_output_path)
assert artifact_file_name in os.listdir(
os.path.join(artifact_output_path, logged_artifact_subdir)
)
with open(
os.path.join(artifact_output_path, logged_artifact_subdir, artifact_file_name), "r"
) as f:
assert f.read() == artifact_text
def test_download_artifact_with_special_characters_in_file_name_and_path(tmpdir):
artifact_file_name = " artifact_ with! special characters.txt"
artifact_sub_dir = " path with ! special characters"
artifact_text = "Sample artifact text"
local_sub_path = os.path.join(tmpdir, artifact_sub_dir)
os.makedirs(local_sub_path)
local_artifact_path = os.path.join(local_sub_path, artifact_file_name)
with open(local_artifact_path, "w") as out:
out.write(artifact_text)
logged_artifact_subdir = "logged_artifact"
with mlflow.start_run():
mlflow.log_artifact(local_path=local_artifact_path, artifact_path=logged_artifact_subdir)
artifact_uri = mlflow.get_artifact_uri(artifact_path=logged_artifact_subdir)
artifact_output_path = os.path.join(tmpdir, "artifact output path!")
os.makedirs(artifact_output_path)
_download_artifact_from_uri(artifact_uri=artifact_uri, output_path=artifact_output_path)
assert logged_artifact_subdir in os.listdir(artifact_output_path)
assert artifact_file_name in os.listdir(
os.path.join(artifact_output_path, logged_artifact_subdir)
)
with open(
os.path.join(artifact_output_path, logged_artifact_subdir, artifact_file_name), "r"
) as f:
assert f.read() == artifact_text
def test_upload_artifacts_to_databricks():
import_root = "mlflow.tracking.artifact_utils"
with mock.patch(import_root + "._download_artifact_from_uri") as download_mock, mock.patch(
import_root + ".DbfsRestArtifactRepository"
) as repo_mock:
new_source = _upload_artifacts_to_databricks(
"dbfs:/original/sourcedir/",
"runid12345",
"databricks://tracking",
"databricks://registry:ws",
)
download_mock.assert_called_once_with("dbfs://tracking@databricks/original/sourcedir/", ANY)
repo_mock.assert_called_once_with(
"dbfs://registry:ws@databricks/databricks/mlflow/tmp-external-source/"
)
assert new_source == "dbfs:/databricks/mlflow/tmp-external-source/runid12345/sourcedir"
def test_upload_artifacts_to_databricks_no_run_id():
from uuid import UUID
import_root = "mlflow.tracking.artifact_utils"
with mock.patch(import_root + "._download_artifact_from_uri") as download_mock, mock.patch(
import_root + ".DbfsRestArtifactRepository"
) as repo_mock, mock.patch("uuid.uuid4", return_value=UUID("4f746cdcc0374da2808917e81bb53323")):
new_source = _upload_artifacts_to_databricks(
"dbfs:/original/sourcedir/", None, "databricks://tracking:ws", "databricks://registry"
)
download_mock.assert_called_once_with(
"dbfs://tracking:ws@databricks/original/sourcedir/", ANY
)
repo_mock.assert_called_once_with(
"dbfs://registry@databricks/databricks/mlflow/tmp-external-source/"
)
assert (
new_source == "dbfs:/databricks/mlflow/tmp-external-source/"
"4f746cdcc0374da2808917e81bb53323/sourcedir"
)
| [
"noreply@github.com"
] | criteo-forks.noreply@github.com |
ff25ff702dbe8fbc4ba5a8fae2eeef3dc4c65831 | c6b85409d9e1e7a4cdac266d78c42715ec4f6fea | /blog/feeds.py | ed31d70ce9d3a537dab5139a56def1981d7c0e34 | [] | no_license | LissaAlissa/Django_Blog | d35cd73727bb8d8af294ca7d8ae732470e1fd92b | 90522a8ff0cc08548502a077f84a5321880e7247 | refs/heads/master | 2020-08-26T19:02:48.588274 | 2019-10-23T17:00:37 | 2019-10-23T17:00:37 | 217,111,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from .models import Post
class LatestPostsFeed(Feed):
title = "My blog"
link = '/blog/'
description = 'New posts of my blog.'
def items(self):
return Post.published.all()[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatewords(item.body, 30) | [
"davidovich.olga.v@mail.ru"
] | davidovich.olga.v@mail.ru |
c21c873a7bb3165f4fe9bd3f91f7f19f0165c4cc | 9f80330a51af33578c64c2a7f3db84c7190eaf68 | /core/serializers/user.py | 041cba00741807c73a2c6b7b508b9f2ad80c611b | [] | no_license | MashuqurRahman/bokshi-chat-dev | 501c0906e3a60716d2f7a07887c6bee21d699fab | 0b03ff3637ea8ac036b411c6a6c6dd2b12a22aa2 | refs/heads/main | 2023-02-28T03:56:59.475570 | 2021-02-11T09:06:26 | 2021-02-11T09:06:26 | 337,929,883 | 1 | 1 | null | 2021-02-11T09:06:27 | 2021-02-11T04:34:36 | Python | UTF-8 | Python | false | false | 375 | py | # core/serializers/user.py
from django.contrib.auth.models import User
from rest_framework import serializers
class UserListSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
class UserRetrieveSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
| [
"shahid.ice.nstu@gmail.com"
] | shahid.ice.nstu@gmail.com |
df611eb84cc4a732b0e4702ae54d667788b7c362 | 9eef3e4cf39a659268694cf08a4a799af8fb13e2 | /inference/surrogate/nn_surrogate/learning/training.py | 313ed4ae3a7b98e3561490f4cf3964ce074c4d2c | [] | no_license | cselab/tRBC-UQ | c30ec370939b949c989d2e9cd30137073b53e7d2 | cd7711b76c76e86bc6382914111f4fa42aa78f2c | refs/heads/master | 2023-04-18T03:06:49.175259 | 2022-10-25T15:45:07 | 2022-10-25T15:45:07 | 483,407,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | #!/usr/bin/env python
from copy import deepcopy
import torch
from torch.utils.data import DataLoader
def train_model(model,
data_loader: DataLoader,
x_valid: torch.tensor,
y_valid: torch.tensor,
criterion = torch.nn.MSELoss(),
lr=0.1,
max_epoch=5000,
info_every=100,
device=torch.device('cpu')):
model.to(device)
x_valid, y_valid = x_valid.to(device), y_valid.to(device)
with torch.no_grad():
model_bck = deepcopy(model)
best_valid_loss = 1e20
num_worse_valid_losses = 0
patience = 10
max_number_of_rounds = 5
number_of_rounds = 0
lr_reduction_factor = 0.1
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# trace
train_losses = list()
valid_losses = list()
for epoch in range(max_epoch):
y_pred_valid = model(x_valid)
with torch.no_grad():
valid_loss = criterion(y_pred_valid, y_valid)
if valid_loss.item() < best_valid_loss:
best_valid_loss = valid_loss.item()
num_worse_valid_losses = 0
model_bck.load_state_dict(model.state_dict())
else:
num_worse_valid_losses += 1
if num_worse_valid_losses > patience:
model.load_state_dict(model_bck.state_dict())
num_worse_valid_losses = 0
number_of_rounds += 1
lr *= lr_reduction_factor
print(f"reduced lr from {lr/lr_reduction_factor:e} to {lr:e}")
# set the learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if number_of_rounds >= max_number_of_rounds:
break
train_loss = 0.0
for x_batch, y_batch in data_loader:
x_batch, y_batch = x_batch.to(device), y_batch.to(device)
y_pred = model(x_batch)
batch_loss = criterion(y_pred, y_batch)
optimizer.zero_grad()
batch_loss.backward()
if epoch > 0:
optimizer.step()
train_loss += batch_loss.cpu().detach().numpy()
nbatches = len(data_loader)
train_loss /= nbatches
if epoch % info_every == 0:
print(f"epoch {epoch:05d}: training loss {train_loss.item():e}, valid loss {valid_loss.item():e}")
# save trace
train_losses.append(train_loss.item())
valid_losses.append(valid_loss.item())
return model, train_losses, valid_losses
| [
"lucas.amoudruz@wanadoo.fr"
] | lucas.amoudruz@wanadoo.fr |
4d708ea3bd225f0c0c2dc484df2259a106ba8471 | f0fe4f17b5bbc374656be95c5b02ba7dd8e7ec6d | /all_functions/linux server/python GUI/menus/popupMenu.py | 1f03b6eadd92ccda984447a8fd4cfb671036029d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | Heroku-elasa/heroku-buildpack-python-ieee-new | f46a909ebc524da07f8e15c70145d1fe3dbc649b | 06ec2fda04d9e478ed2506400e460489b0ca91ab | refs/heads/master | 2022-12-10T13:14:40.742661 | 2020-01-29T14:14:10 | 2020-01-29T14:14:10 | 60,902,385 | 0 | 0 | MIT | 2022-12-07T23:34:36 | 2016-06-11T10:36:10 | Python | UTF-8 | Python | false | false | 2,177 | py | # submenu.py
import wx
########################################################################
class MyForm(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
wx.Frame.__init__(self, None, title="Popup Menu Tutorial")
panel = wx.Panel(self, wx.ID_ANY)
lbl = wx.StaticText(panel, label="Right click anywhere!")
self.Bind(wx.EVT_CONTEXT_MENU, self.onContext)
#----------------------------------------------------------------------
def onContext(self, event):
"""
Create and show a Context Menu
"""
# only do this part the first time so the events are only bound once
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.itemTwoId = wx.NewId()
self.itemThreeId = wx.NewId()
self.Bind(wx.EVT_MENU, self.onPopup, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.onPopup, id=self.itemTwoId)
self.Bind(wx.EVT_MENU, self.onExit, id=self.itemThreeId)
# build the menu
menu = wx.Menu()
itemOne = menu.Append(self.popupID1, "ItemOne")
itemTwo = menu.Append(self.itemTwoId, "ItemTwo")
itemThree = menu.Append(self.itemThreeId, "Exit")
# show the popup menu
self.PopupMenu(menu)
menu.Destroy()
#----------------------------------------------------------------------
def onExit(self, event):
"""
Exit program
"""
self.Close()
#----------------------------------------------------------------------
def onPopup(self, event):
"""
Print the label of the menu item selected
"""
itemId = event.GetId()
menu = event.GetEventObject()
menuItem = menu.FindItemById(itemId)
print menuItem.GetLabel()
#----------------------------------------------------------------------
# Run the program
if __name__ == "__main__":
app = wx.App(False)
frame = MyForm().Show()
app.MainLoop() | [
"soheil_paper@yahoo.com"
] | soheil_paper@yahoo.com |
67b110211c48f58f63f581bf59716e00f685a415 | c4a01824e750f7a710fa5488128ec2694ad3f585 | /tensorflow_projects/ML_python_ch10.py | 7234eda5fa0583c6d53b2475df4fed0202f35a3e | [] | no_license | karsevar/tensorflow_projects | 4887391046c1bfc124a07a4f8599c8ef0b41face | 97d093f4dfeea83185c92d4cbf73d8b1c28cbacf | refs/heads/master | 2020-03-28T18:13:49.562167 | 2018-09-15T02:44:53 | 2018-09-15T02:44:53 | 148,862,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,173 | py | ###Hands on machine learning with tensorflow and scikit learn:
###Chapter 10 Introduction to Artificial Neural Networks
##The Perceptron:
#The Perceptron is one of the simplest ANN architectures. It is based on a slightly
#different artificial neuron called a linear threshold unit: The inputs
#and output are now numbers and each input connection is associated with a weight
#The LTU computes a weighted sum of its inputs, then applies a step function
#to the sum and outputs the results
#A single LTU can be used for simple linear binary classification. It
#computes a linear combination of the inputs and if the result exceeds
#a threshold, it outputs the positive class or else outputs the negative class.
#Much like gradient descent, training a LTU neural network revolves around
#finding the best W_0, w_1, w_2 values for the dataset.
#Training algorithm (Hebbian learning): The perceptron is fed one training
#instance at a time, and for each instance it makes it predictions. For
#every output neuron that produced a wrong prediction, it reinforces the connection
#weights from the inputs that would have contributed to the correct
#prediction.
#Look at page 259 for the equation.
#The decision boundary of each output neuron is linear, so Perceptrons are incapable
#of learning complex patterns.
##Creating a single LTU network using the Perceptron class:
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
X = iris.data[:, (2,3)]#petal length, petal width
y = (iris.target == 0).astype(np.int)
per_clf = Perceptron(random_state = 42)
per_clf.fit(X, y)
y_pred = per_clf.predict([[2, 0.5]])# Predicts the class of an iris with a
#petal length of 2 and petal width of 0.5.
print(y_pred)
#the SGDClassifier can perform the same computation through setting
#the hyperparameters to loss="perceptron", learning_rate = constant,
#and eta = 0.1 and penalty = None.
##Multi-layer perceptron and backpropagation:
#An MLP is composed of one input layer, one or more layers of LTUs,
#called hidden layers, and one final layer of LTUs called the output layer.
#Every layer except the output layer includes a bias neuron and is fully
#connected to the next layer.
#Backpropagation explained: for each training instance the back propagation
#algorithm first makes a prediction (forward pass), measures the error, then goes
#through each layer in reverse to measure the error contribution from each
#connect (reverse pass), and finally slightly tweaks the connection weights
#to reduce the error (Gradient Descent step).
#The early activation function was replaced by the sigmoid equation
#(1/1 + exp(-z)) as a means to make gradient descent work with the
#model.
##Important reason why the author in Fundamentals of deep learning used
#softmax logistic regression: When classes are exclusive, the output layer
#is typically modified by replacing the individual activation functions by
#a shared soft max function. The output of each neuron corresponds to the estimated
#probability of the corresponding class.
##Training a MLP with Tensroflow's High level API:
#The DNNClassifier class makes it fairly easy to train a deep neural
#network with any number of hidden layers, and a softmax output layer
#to output extimated class probabilities.
import tensorflow as tf
#(X_train, y_train), (X_test, y_test) = tf.contrib.keras.datasets.mnist.load_data()
#X_train = X_train.astype(np.float32).reshape(-1, 28*28)/255.0
#X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
#y_train = y_train.astype(np.int32)
#y_test = y_test.astype(np.int32)
#X_valid, X_train = X_train[:5000], X_train[5000:]
#y_valid, y_train = y_train[:5000], y_train[5000:]
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata("MNIST original")
X, y = mnist["data"], mnist["target"]
X_train, X_test = X[:50000], X[50000:]
y_train, y_test = y[:50000], y[50000:]
X_train = X_train.astype(np.float32).reshape(-1, 28*28)/255.0
X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)
X_valid, X_train = X_train[:5000], X_train[5000:]
y_valid, y_train = y_train[:5000], y_train[5000:]
feature_cols = [tf.feature_column.numeric_column("X", shape=[28 * 28])]
dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units = [300, 100], n_classes = 10,
feature_columns= feature_cols)
input_fn = tf.estimator.inputs.numpy_input_fn(
x={"X": X_train}, y=y_train, num_epochs = 40, batch_size=50, shuffle=True)
#dnn_clf.train(input_fn=input_fn)
#Switched the mnist dataset from the sklearn.datasets directory and it works just fine.
#Now the problem is that the dnn_clf.train() function doesn't seem to want to work.
#Will need to read the documentation about this.
tf.reset_default_graph()
#Under the hood, the DNNClassifier class creates all the neuron layers, based on
#the ReLu acitivation function (we can change this by setting the activation_fn hyperparameter).
#The output layer relies on the softmax function, and the cost function is
#cross entropy.
##Training a DNN Using Plain Tensorflow:
##construction phase:
n_inputs = 28 * 28 #The total number of pixels beinging inputed into
#the model. In other words, one pixel per feature.
n_hidden1 = 300
n_hidden2 = 100
n_outputs = 10
X = tf.placeholder(tf.float32, shape = (None, n_inputs), name = "X")#What about the bias
#term?
y = tf.placeholder(tf.int32, shape = (None), name = "y")
def neuron_layer(X, n_neurons, name, activation = None):
with tf.name_scope("name"):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev = stddev)
W = tf.Variable(init, name = "kernel")
b = tf.Variable(tf.zeros([n_neurons]), name = "bias")#Here is the
#bias term. For neural networks the bias term is a neuron.
Z = tf.matmul(X, W) + b
if activation is not None:
return activation(Z)
else:
return Z
#this neural network is using the same code as the ReLU activation
#neural network on page 247. thus meaning that this model is using
#the relu activation function to create the neural network.
#the stddev = 2 / np.sqrt(n_inputs) init = tf.truncated_normal((n_inputs, n_neurons), stddev = stddev)
#parts of the function is a Gaussian initialization number generator with a
#standard deviation of 2/sqrt(n_inputs)
#the first hidden layer takes X as its input. The second takes the output
#of the first hidden layer as its input. And finally, the output layer takes
#the output of the second hidden layer as its input.
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, name = "hidden1",
activation=tf.nn.relu)
hidden2 = neuron_layer(hidden1, n_hidden2, name = "hidden2",
activation=tf.nn.relu)
logits = neuron_layer(hidden2, n_outputs, name = "outputs")
#Tensorflow's tf.layers.dense() function creates a fully connected layer, where
#all the inputs are connected to al the neurons in the layer. It takes care of
#creating the weights and biases variables, named kernel and bias respectively
#using the appropriate initialization strategy, and you can set the
#activation function using the activation argument.
#with tf.name_scope("dnn"):
#hidden1 = neuron_layer(X, n_hidden1, name = "hidden1",
#activation=tf.nn.relu)
#hidden2 = neuron_layer(hidden1, n_hidden2, name = "hidden2",
#activation=tf.nn.relu)
#logits = neuron_layer(hidden2, n_outputs, name = "outputs")
#As you can see this creates the same neural network without having to
#create a neuron function that specifies the variables within each
#neuron in a specific layer.
#After this we will need to assign a penality term within the equation.
#this this model we will use cross entropy. We will use sparse_soft_max_entropy_with_logits():
#it computes the cross entropy based on the logits. We can use tensorflow's
#reduce_mean() to compute the mean cross entropy over all instances.
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name = "loss")
#the following steps are just like the ones used in chapter 9 to create
#the linear regression computational map.
learning_rate = 0.01
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
#The last important step in the construction phase is to specify how to evaluate
#the model. We will simply use accuracy as our performance measure. First, for each
#instance, determine if the neural network's prediction is correct by checking whether
#or not the highest logit corresponds to the target class. For this you can use
#the in_top_k() function. this returns a 1 D tensor full of boolean values, so we need
#to cast these booleans to floats and then computate the average. this will give us
#the network's overall accuracy.
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
def fetch_batch(epoch, batch_index, batch_size):
indices = np.random.randint(0, len(X_train), size = batch_size)
X_batch = X_train[indices]
y_batch = y_train[indices]
return X_batch, y_batch
n_epochs = 60
batch_size = 50
n_batches = int(np.ceil(int(X_train.shape[0]) / batch_size))
init = tf.global_variables_initializer()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})
print(epoch, "Train accuracy: ", acc_train, "valid accuracy:", acc_valid)
#Running the testing dataset, I can't get to an accuracy rating of over 62 percent
#even with an n_epoch argument of 100. This most likely means that the indexes between the
#testing set and training set weren't properly shuffled.
| [
"masonkarsevar@gmail.com"
] | masonkarsevar@gmail.com |
a47695bab212efdb53838f47f47917c0959cdcb5 | 216da5e56b0d9139e220c423abb90ccf8a293ab6 | /colossalai/nn/layer/parallel_3d/_vit.py | ffe7a146af714eac4f707f6f443ebe65e2d6bca5 | [
"Apache-2.0"
] | permissive | hierarchyJK/ColossalAI | 0cb88eb6b238553bdd86b773d916d792086e219c | 0aa07e600c7119498d3a49492c9cebcdbe3e89a3 | refs/heads/main | 2023-08-25T17:22:24.620883 | 2021-11-04T06:26:28 | 2021-11-04T06:26:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,482 | py | import math
from typing import Tuple
import torch
import torch.distributed as dist
from colossalai.context import ParallelMode, seed
from colossalai.core import global_context as gpc
from colossalai.registry import LAYERS
from colossalai.utils import checkpoint, get_current_device
from torch import Tensor, dtype, nn
from .._common_utils import ACT2FN, divide, set_tensor_parallel_attribute
from ..vanilla_vision_transformer.layers import to_2tuple
from ._utils import get_depth_from_env
from .layers import Linear3D
@LAYERS.register_module
class ViTPatchEmbedding3D(nn.Module):
""" 3D Image to Patch Embedding
:param img_size: iamge size
:type img_size: int
:param patch_size: patch size
:type patch_size: int
:param in_chans: number of channels of input image
:type in_chans: int
:param embed_size: dimension of embedding
:type embed_size: int
:param drop_prob: dropout probability
:type drop_prob: float
:param flatten: whether to flatten output tensor, defaults to True
:type flatten: bool, optional
"""
def __init__(self,
img_size: int,
patch_size: int,
in_chans: int,
embed_size: int,
drop_prob: float,
flatten: bool = True):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT
self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT
self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0],
img_size[1] // patch_size[1])
self.embed_size = embed_size
self.embed_size_per_partition = divide(self.embed_size, self.depth)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
with seed(ParallelMode.TENSOR):
self.proj = nn.Conv2d(in_chans,
self.embed_size_per_partition,
kernel_size=patch_size,
stride=patch_size)
self.cls_token = nn.Parameter(
torch.zeros(1, 1, self.embed_size_per_partition))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.num_patches + 1,
self.embed_size_per_partition))
self.pos_drop = nn.Dropout(drop_prob)
self._sync_parameters()
self.proj.weight.register_hook(self._sync_grad_hook)
self.proj.bias.register_hook(self._sync_grad_hook)
self.cls_token.register_hook(self._sync_grad_hook)
self.pos_embed.register_hook(self._sync_grad_hook)
self._set_tensor_parallel_attribute()
def _set_tensor_parallel_attribute(self):
set_tensor_parallel_attribute(self.proj.weight)
set_tensor_parallel_attribute(self.proj.bias)
set_tensor_parallel_attribute(self.cls_token)
set_tensor_parallel_attribute(self.pos_embed)
def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
return self.input_parallel_mode, self.weight_parallel_mode
def _sync_parameters(self):
self.to(get_current_device())
weight_src_rank = gpc.get_ranks_in_group(self.weight_parallel_mode)[0]
dist.broadcast(self.proj.weight,
src=weight_src_rank,
group=gpc.get_group(self.weight_parallel_mode))
dist.broadcast(self.proj.bias,
src=weight_src_rank,
group=gpc.get_group(self.weight_parallel_mode))
input_src_rank = gpc.get_ranks_in_group(self.input_parallel_mode)[0]
dist.broadcast(self.proj.weight,
src=input_src_rank,
group=gpc.get_group(self.input_parallel_mode))
dist.broadcast(self.proj.bias,
src=input_src_rank,
group=gpc.get_group(self.input_parallel_mode))
set_tensor_parallel_attribute(self.proj.weight)
set_tensor_parallel_attribute(self.proj.bias)
set_tensor_parallel_attribute(self.cls_token)
set_tensor_parallel_attribute(self.pos_embed)
def _sync_grad_hook(self, grad) -> None:
dist.all_reduce(grad, group=gpc.get_group(self.input_parallel_mode))
dist.all_reduce(grad, group=gpc.get_group(self.weight_parallel_mode))
return grad
def forward(self, x: Tensor) -> Tensor:
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
# split a partition from embedded states
x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank(
self.weight_parallel_mode)].contiguous()
x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank(
self.input_parallel_mode)].contiguous()
# add cls token & pos embedding
# [b/q^2,s,h/q] --> [b/q^2, 1+s, h/q]
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), dim=1)
with seed(ParallelMode.TENSOR):
x = self.pos_drop(x + self.pos_embed)
return x
@LAYERS.register_module
class ViTSelfAttention3D(nn.Module):
"""Self-attention layer for 3D parallel Vision Transformer
:param hidden_size: hidden size
:type hidden_size: int
:param num_attention_heads: number of attention heads
:type num_attention_heads: int
:param attention_probs_dropout_prob: dropout probability for attention layers
:type attention_probs_dropout_prob: bool
:param hidden_dropout_prob: dropout probability for hidden layers
:type hidden_dropout_prob: bool
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
hidden_size: int,
num_attention_heads: int,
attention_probs_dropout_prob: float,
hidden_dropout_prob: float,
dtype: dtype = None,
bias: bool = True,
checkpoint: bool = False):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT
self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT
self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT
self.hidden_size = hidden_size
self.num_attention_heads = divide(num_attention_heads, self.depth)
self.attention_head_size = divide(hidden_size, num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.checkpoint = checkpoint
self.query_key_value = Linear3D(self.hidden_size,
3 * self.hidden_size,
self.input_parallel_mode,
self.weight_parallel_mode,
dtype=dtype,
bias=bias)
self.attention_dropout = nn.Dropout(attention_probs_dropout_prob)
self.dense = Linear3D(self.hidden_size,
self.hidden_size,
self.output_parallel_mode,
self.weight_parallel_mode,
dtype=dtype,
bias=bias)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.softmax = nn.Softmax(dim=-1)
def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
return self.input_parallel_mode, self.weight_parallel_mode
def _forward(self, hidden_states: Tensor) -> Tensor:
query_key_value = self.query_key_value(hidden_states)
new_qkv_shape = query_key_value.shape[:-1] + \
(self.num_attention_heads, 3 * self.attention_head_size)
query_key_value = query_key_value.view(new_qkv_shape)
query_key_value = query_key_value.permute((0, 2, 1, 3))
query_layer, key_layer, value_layer = torch.chunk(query_key_value,
3,
dim=-1)
attention_scores = torch.matmul(query_layer,
key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(
self.attention_head_size)
attention_probs = self.softmax(attention_scores)
with seed(ParallelMode.TENSOR):
attention_probs = self.attention_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(1, 2)
new_context_layer_shape = context_layer.size()[:-2] + (
self.all_head_size, )
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
with seed(ParallelMode.TENSOR):
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTMLP3D(nn.Module):
"""[summary]
:param hidden_size: hidden size
:type hidden_size: int
:param mlp_ratio: hidden size of MLP divided by embedding dim
:type mlp_ratio: int
:param hidden_dropout_prob: dropout probability for hidden layers
:type hidden_dropout_prob: float
:param hidden_act: activation function for hidden layers
:type hidden_act: str
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
hidden_size: int,
mlp_ratio: int,
hidden_dropout_prob: float,
hidden_act: str = 'gelu',
dtype: dtype = None,
bias: bool = True,
checkpoint: bool = False):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT
self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT
self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT
self.hidden_size = hidden_size
self.mlp_ratio = mlp_ratio
self.checkpoint = checkpoint
self.dense_1 = Linear3D(self.hidden_size,
self.mlp_ratio * self.hidden_size,
self.input_parallel_mode,
self.weight_parallel_mode,
dtype=dtype,
bias=bias)
self.activation_func = ACT2FN[hidden_act]
self.dense_2 = Linear3D(self.mlp_ratio * self.hidden_size,
self.hidden_size,
self.output_parallel_mode,
self.weight_parallel_mode,
dtype=dtype,
bias=bias)
self.dropout = nn.Dropout(hidden_dropout_prob)
def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
return self.input_parallel_mode, self.weight_parallel_mode
def _forward(self, hidden_states: Tensor) -> Tensor:
intermediate_output = self.dense_1(hidden_states)
intermediate_output = self.activation_func(intermediate_output)
output = self.dense_2(intermediate_output)
with seed(ParallelMode.TENSOR):
output = self.dropout(output)
return output
def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor:
return checkpoint(self._forward, hidden_states)
def forward(self, hidden_states: Tensor) -> Tensor:
if self.checkpoint:
return self._checkpoint_forward(hidden_states)
else:
return self._forward(hidden_states)
@LAYERS.register_module
class ViTHead3D(nn.Module):
"""Output layer for 3D parallel Vision Transformer
:param in_features: size of input tensor
:type in_features: int
:param num_classes: number of classes
:type num_classes: int
:param depth: the 3D parallelism depth
:type depth: int
:param input_parallel_mode: parallel mode of input tensor
:type input_parallel_mode: ParallelMode
:param weight_parallel_mode: parallel mode of weight
:type weight_parallel_mode: ParallelMode
:param dtype: dtype of parameters, defaults to None
:type dtype: dtype, optional
:param bias: whether to add bias, defaults to True
:type bias: bool, optional
"""
def __init__(self,
in_features: int,
num_classes: int,
dtype: dtype = None,
bias: bool = True):
super().__init__()
self.depth = get_depth_from_env()
self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT
self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT
self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT
self.in_features = in_features
self.num_classes = num_classes
out_features = math.ceil(self.num_classes /
(self.depth**2)) * (self.depth**2)
self.num_classes_per_partition = divide(self.num_classes, self.depth)
self.linear = Linear3D(self.in_features,
out_features,
self.input_parallel_mode,
self.weight_parallel_mode,
dtype=dtype,
bias=bias)
def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]:
return self.linear.groups_for_next_layer()
def forward(self, x: Tensor) -> Tensor:
# [b/q^2, s, h/q] --> [b/q^2, h/q]
x = x[:, 0]
# [b/q^2, h/q] --> [b/q^2, c/q]
x = self.linear(x)
return x[:, :self.num_classes_per_partition]
def extra_repr(self):
return 'in_features={}, num_classes={}'.format(self.in_features,
self.num_classes)
| [
"kurisusnowdeng@gmail.com"
] | kurisusnowdeng@gmail.com |
4fb0267024365fe73642c4b39046867cd3b64c55 | 7412328d5ddf175ca4186a5d8015e03fcc12ad58 | /website_blocker.py | 5b3db032c4e22df099017d6fcacbff5bf6d95442 | [] | no_license | Subho25/Website_Blocker | e3f2e1bb62a37d80556445f2b9cedfaa6b19b1c2 | 320b444382ce45302e26dd9f4bdb2b7b999d903d | refs/heads/main | 2023-04-24T06:16:23.655970 | 2021-05-06T11:22:22 | 2021-05-06T11:22:22 | 325,788,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import time
from datetime import datetime as dt
hosts_path = r"C:\Windows\System32\drivers\etc\hosts"
hosts_temp = "hosts"
redirect = "127.0.0.1"
website_list = [
"www.facebook.com",
"facebook.com",
"twitter.com",
"www.twitter.com",
"instagram.com",
"www.instagram.com"
]
while True:
if dt(dt.now().year, dt.now().month, dt.now().day, 8) \
< dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, 17):
print("Working Hours...")
with open(hosts_path, 'r+') as file:
content = file.read()
for website in website_list:
if website in content:
pass
else:
file.write(redirect+" "+website+"\n")
else:
print("Fun Hours...")
with open(hosts_path, 'r+') as file:
content = file.readlines()
file.seek(0)
for line in content:
if not any(website in line for website in website_list):
file.write(line)
file.truncate()
time.sleep(5)
| [
"01chandrasubhodeep@gmail.com"
] | 01chandrasubhodeep@gmail.com |
3f87c89ac30e6067d6d60050f99e7ddc4417d01a | 0435b6282cfc7cb27c5c5f2d7d2bcbf160ca8d7d | /Project_2/linear.py | 7c9b6e7ce2bec81bd7b80dcd64aeeecc566bf256 | [] | no_license | zx-joe/EPFL-Deep-Learning | 39d97b1d02c2d4b5fdee471ffe41ce06328e2f9a | 8d2b1aa94608e6cdc2dcc60fa6c5f4e3b7e69e36 | refs/heads/master | 2022-12-11T11:11:07.960851 | 2020-09-08T09:03:21 | 2020-09-08T09:03:21 | 190,944,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | import torch
import math
from torch import Tensor, FloatTensor
import matplotlib.pyplot as plt
from module import Module
class Linear (Module) :
# one fully-connected layer
def __init__(self, in_dim, out_dim, eps=1., method='xavier'):
self.in_dim=in_dim
self.out_dim=out_dim
# define weight, bias and their gradient
self.w=FloatTensor(out_dim, in_dim)
self.dw=FloatTensor(out_dim, in_dim)
self.b=FloatTensor(out_dim)
self.db=FloatTensor(out_dim)
# initialization: defaulted as Xavier
if method=='zero':
self.w=self.w.fill_(0)
self.b=self.w.fill_(0)
elif method=='normal':
self.w=self.w.normal_(mean=0,std=eps)
self.w=self.b.normal_(mean=0,std=eps)
else:
temp_std=1./math.sqrt((self.in_dim + self.out_dim)/2)
self.w=self.w.normal_(mean=0,std=temp_std)
self.b=self.b.normal_(mean=0,std=temp_std)
# zero gradient intialization
self.dw=self.dw.zero_()
self.db=self.db.zero_()
def forward( self ,x ):
# y = w * x + b
self.input=x.clone()
self.output=self.w.matmul(self.input)+self.b
#self.output=self.w @ self.input + self.b
return self.output
def backward( self , gradwrtoutput ):
temp_wt=self.w.t()
# dw = dL/dy * x
temp_dw=gradwrtoutput.view(-1,1).mm(self.input.view(1,-1))
self.dw.add_(temp_dw)
# db = dL/dy
temp_db=gradwrtoutput.clone()
self.db.add_(temp_db)
# dx = w.T * dL/dy
temp_dx=temp_wt.matmul(gradwrtoutput)
return temp_dx
def param(self ) :
return [ self.w, self.dw , self.b, self.db]
def zero_grad(self):
self.dw.zero_()
self.db.zero_() | [
"xiao.zhou@epfl.ch"
] | xiao.zhou@epfl.ch |
29732b4e97e41afa600a1a52c1ba800ee682dec3 | 94146fef03b1b7e4f3bb5d02067772a3d3e114c4 | /Token.py | c70abacf27485d8df47e869f60da678d2a7c0d0a | [
"MIT"
] | permissive | PMARINA/Canvas-Autograder | a3654d15f8af5b80c7f8b37181487e3ddd931239 | 9a10b0872061202ea6311b6df257f1bbc071b798 | refs/heads/main | 2023-03-07T02:12:24.332766 | 2021-02-21T08:16:37 | 2021-02-21T08:16:37 | 340,815,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | """Handles the user token for making authenticated requests to the canvas api."""
import os
import pickle
import pyperclip # type: ignore
from loguru import logger
import Canvas_Request
import Variables
token_fp: str = os.path.abspath(Variables.TOKEN_FILE)
token_string: str = ""
def verify() -> None:
"""Save token if none found. Verify new or existing token by requesting the list of courses available.
Raises:
ValueError: Using the token results in a non-ok http status code
"""
global token_fp, token_string
if not os.path.exists(token_fp):
error_msg: str = f"Token was not found at {token_fp}"
logger.info(error_msg)
input("Please copy the token. When ready, please hit enter. >>> ")
token_string = pyperclip.paste().strip()
with open(token_fp, "wb") as f:
pickle.dump(token_string, f)
logger.success(f"Saved token to {token_fp}. Exiting")
stat_code: int = Canvas_Request.get_endpoint("courses").status_code
if stat_code != 200:
raise ValueError(
(
"Canvas server indicates a bad token."
f" Please delete {token_fp} and try again."
),
)
def load() -> None:
"""Load the Canvas Token from disk (only useful for Canvas_Request).
Raises:
EnvironmentError: The token was not found but explicitly requested
"""
global token_fp, token_string
if not os.path.exists(token_fp):
error_msg: str = f"Token was not found at {token_fp}"
raise EnvironmentError(error_msg)
with open(token_fp, "rb") as f:
token_string = pickle.load(f)
def get() -> str:
"""Return the token. Uses load only if not already in memory to avoid excessive disk-reading, thus the first call might be slightly slower than subsequent.
Returns:
str: The user's token
"""
global token_string
if token_string == "":
load()
return token_string
| [
"bluerat33@gmail.com"
] | bluerat33@gmail.com |
cc29d5398e6d41ed19ef958c63351c898a368bb6 | 11cd362cdd78c2fc48042ed203614b201ac94aa6 | /desktop/core/ext-py3/boto-2.49.0/boto/configservice/exceptions.py | 58aa550f9cd9b165a42667b8d7135033fda20174 | [
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0... | permissive | cloudera/hue | b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908 | dccb9467675c67b9c3399fc76c5de6d31bfb8255 | refs/heads/master | 2023-08-31T06:49:25.724501 | 2023-08-28T20:45:00 | 2023-08-28T20:45:00 | 732,593 | 5,655 | 2,244 | Apache-2.0 | 2023-09-14T03:05:41 | 2010-06-21T19:46:51 | JavaScript | UTF-8 | Python | false | false | 2,528 | py | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import BotoServerError
class InvalidLimitException(BotoServerError):
pass
class NoSuchBucketException(BotoServerError):
pass
class InvalidSNSTopicARNException(BotoServerError):
pass
class ResourceNotDiscoveredException(BotoServerError):
pass
class MaxNumberOfDeliveryChannelsExceededException(BotoServerError):
pass
class LastDeliveryChannelDeleteFailedException(BotoServerError):
pass
class InsufficientDeliveryPolicyException(BotoServerError):
pass
class InvalidRoleException(BotoServerError):
pass
class InvalidTimeRangeException(BotoServerError):
pass
class NoSuchDeliveryChannelException(BotoServerError):
pass
class NoSuchConfigurationRecorderException(BotoServerError):
pass
class InvalidS3KeyPrefixException(BotoServerError):
pass
class InvalidDeliveryChannelNameException(BotoServerError):
pass
class NoRunningConfigurationRecorderException(BotoServerError):
pass
class ValidationException(BotoServerError):
pass
class NoAvailableConfigurationRecorderException(BotoServerError):
pass
class InvalidNextTokenException(BotoServerError):
pass
class InvalidConfigurationRecorderNameException(BotoServerError):
pass
class NoAvailableDeliveryChannelException(BotoServerError):
pass
class MaxNumberOfConfigurationRecordersExceededException(BotoServerError):
pass
| [
"noreply@github.com"
] | cloudera.noreply@github.com |
232ac4debdccb67b46d0441af9c4ba867812edf9 | 3539d0e3ddd7849a14876e95f0332428ec28ebf7 | /Data Scientist Career Path/11. Foundations of Machine Learning Supervised Learning/4. Classification/1. KNN/7. classify.py | 9009fe68fc2f9cd150796ea094d743cfcc322a16 | [
"MIT"
] | permissive | DincerDogan/Data-Science-Learning-Path | ff146de2cf4ebc5fedfa9377babf959208dfe7e6 | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | refs/heads/main | 2023-05-08T10:53:47.449974 | 2021-06-06T21:27:31 | 2021-06-06T21:27:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from movies import movie_dataset, movie_labels, normalize_point
def distance(movie1, movie2):
squared_difference = 0
for i in range(len(movie1)):
squared_difference += (movie1[i] - movie2[i]) ** 2
final_distance = squared_difference ** 0.5
return final_distance
def classify(unknown, dataset, labels, k):
distances = []
#Looping through all points in the dataset
for title in dataset:
movie = dataset[title]
distance_to_point = distance(movie, unknown)
#Adding the distance and point associated with that distance
distances.append([distance_to_point, title])
distances.sort()
#Taking only the k closest points
neighbors = distances[0:k]
num_good = 0
num_bad = 0
for neighbor in neighbors:
title = neighbor[1]
if labels[title] == 0:
num_bad += 1
elif labels[title] == 1:
num_good += 1
if num_good > num_bad:
return 1
else:
return 0
print("Call Me By Your Name" in movie_dataset)
my_movie = [3500000, 132, 2017]
normalized_my_movie = normalize_point(my_movie)
print(normalized_my_movie)
print(classify(normalized_my_movie, movie_dataset, movie_labels, 5))
| [
"aristyanto2320@gmail.com"
] | aristyanto2320@gmail.com |
39457cf330a0cc0001c530272b46b72c44f37a05 | ef37e8e9ac656e211ccf3354734261395907a12e | /sfotipy/urls.py | 6d76bd2cc401975533a9449802442d0465b3469d | [] | no_license | seethersan/sfotipy | 655846fd9b4e7c255b2ce328bda767597d6aa89b | f0d23cfd82df7c65fb4cdd9b1d3dac3bfed32cd7 | refs/heads/master | 2021-01-10T04:15:19.577795 | 2016-01-07T00:56:15 | 2016-01-07T00:56:15 | 49,170,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | """sfotipy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, patterns, include
from django.contrib import admin, auth
from django.conf import settings
from artists.views import ArtistDetailView, ArtistViewSet, ArtistListView
from albums.views import AlbumViewSet
from tracks.views import TrackViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'artists', ArtistViewSet)
router.register(r'albums', AlbumViewSet)
router.register(r'tracks', TrackViewSet)
urlpatterns = patterns('',
url(r'^admin/password_reset/$', 'django.contrib.auth.views.password_reset', name='admin_password_reset'),
url(r'^admin/password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
url(r'^$', 'home.views.index'),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', admin.site.urls),
url(r'^tracks/(?P<title>[\w\-\W]+)/', 'tracks.views.track_view', name='track_view'),
url(r'^artists/(?P<pk>[\d]+)/', ArtistDetailView.as_view()),
url(r'^artists/', ArtistListView.as_view()),
url(r'^signup/', 'userprofiles.views.signup', name='signup'),
url(r'signin/', 'userprofiles.views.signin', name='signin'),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.MEDIA_ROOT,}),
)
| [
"carlos_jcez@hotmail.com"
] | carlos_jcez@hotmail.com |
aa73120bf09c8c6af195213100bedac6b5e7e642 | 54934cfe32ce5aa5c2e718b0c5c2afa4b458fe75 | /25ch/graph.py | 617c1d6ad6e8616a8e8c0e6ea2ca8c171b8b289c | [] | no_license | mccarvik/intro_to_algorithms | 46d0ecd20cc93445e0073eb0041d481a29322e82 | c2d41706150d2bb477220b6f929510c4fc4ba30b | refs/heads/master | 2021-04-12T12:25:14.083434 | 2019-11-09T05:26:28 | 2019-11-09T05:26:28 | 94,552,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,475 | py | from random import randint
import pdb
class Graph:
"""
Graph data structure.
"""
def __init__(self, fname = None, numVertices = None, numEdges = None, weightRange = None, directed = True):
"""
Generates a weighted graph.
"""
self.adjacent = {}
self.weight = {}
if fname == None:
if any(arg == None for arg in (numVertices, numEdges, weightRange)):
numVertices, numEdges, weightRange = map(int, input("numVertices, numEdges, weightRange: ").split())
self.randomGraph(numVertices, numEdges, weightRange, directed)
else:
self.loadGraph(fname, directed)
def numVertices(self):
"""
Returns the number of vertices in the graph.
"""
return len(self.adjacent)
def vertices(self):
"""
Returns the list of vertices in the graph.
"""
return range(self.numVertices())
def edges(self):
"""
Returns a generator containing the edges in the graph.
"""
return ((fromVertex,toVertex) for fromVertex in self.vertices() for toVertex in self.adjacent[fromVertex])
def addDirectedEdge(self, fromVertex, toVertex, weight):
"""
Inserts a weighted directed edge into the graph.
"""
self.adjacent.setdefault(fromVertex, set()).add(toVertex)
self.weight[(fromVertex, toVertex)] = weight
def addUndirectedEdge(self, fromVertex, toVertex, weight):
"""
Inserts a weighted undirected edge into the graph.
"""
self.addDirectedEdge(fromVertex, toVertex, weight)
self.addDirectedEdge(toVertex, fromVertex, weight)
def randomGraph(self, numVertices, numEdges, weightRange, directed):
"""
Generates a random graph.
"""
addEdge = self.addDirectedEdge if directed else self.addUndirectedEdge
for vertex in range(numVertices):
self.adjacent[vertex] = set()
for edge in range(numEdges):
fromVertex = toVertex = None
while fromVertex == toVertex:
fromVertex = randint(0, numVertices-1)
toVertex = randint(0, numVertices-1)
weight = randint(0, weightRange)
addEdge(fromVertex, toVertex, weight)
def loadGraph(self, fname, directed):
"""
Loads a graph from a file containing a list of edges
of the form: fromVertex, toVertex, weight.
"""
addEdge = self.addDirectedEdge if directed else self.addUndirectedEdge
with open(fname, 'r') as f:
for vertex in range(int(f.readline())):
self.adjacent[vertex] = set()
for line in f.readlines():
fromVertex, toVertex, weight = map(int, line.split())
addEdge(fromVertex, toVertex, weight)
def adjacentStr(self, fromVertex):
"""
Returns a string representing the neighborhood of the
given vertex.
"""
return ", ".join(f"({toVertex}, {self.weight[(fromVertex, toVertex)]})" for toVertex in self.adjacent[fromVertex])
def __str__(self):
"""
Returns a string representing the graph.
"""
return "\n".join(f"{vertex}: {self.adjacentStr(vertex)}" for vertex in range(self.numVertices()))
def __repr__(self):
"""
Represents the graph.
"""
return str(self)
| [
"ec2-user@ip-172-31-91-31.ec2.internal"
] | ec2-user@ip-172-31-91-31.ec2.internal |
d3edd991480a17a75bc5d77d28df7498dfa829ef | f4271ab6fd88b2c05ccc45cdaa7611f06ba8a489 | /jianda/extra_plugins/qiniudemo/qiniudemo/settings.py | f5887a9d1db9958c9bbec1fe2ecf248c55f09752 | [] | no_license | wanganzhi1986/wantest | 245914f4a2ffcfd6d9a0e17576eedeecbe2156db | 6512e59816d3a111b602ca21f07bddc45b2ee022 | refs/heads/master | 2020-06-17T21:40:58.305121 | 2017-07-15T05:45:00 | 2017-07-15T05:45:00 | 74,971,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,134 | py | """
Django settings for qiniudemo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7^$h*vrw&j%=6effp0z$r6*pb#hw=u*5(65t#pp=^(gk3e6p=8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'video',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'qiniudemo.urls'
WSGI_APPLICATION = 'qiniudemo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# for static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "./static"),
)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, "./templates"),)
| [
"wangwei03@puhuifinance.com"
] | wangwei03@puhuifinance.com |
01f71854fb9d777d10176477c617eccd675ac52c | a3f1e80179c23d9202d72b75dd37a49b44785f45 | /api/client/test/test_api_catalog_upload_item.py | f3b1cbbfc6b43d84d22085c8e5fe119b0da41801 | [
"Apache-2.0"
] | permissive | pvaneck/mlx | b1e82fae5ac8aaa1dddac23aaa38c46f6e6cfc27 | 6edaa0bd77787c56b737322a0c875ae30de6cd49 | refs/heads/main | 2023-05-14T06:08:38.404133 | 2021-05-04T01:41:11 | 2021-05-04T01:41:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
MLX API
MLX API Extension for Kubeflow Pipelines # noqa: E501
OpenAPI spec version: 0.1.25-related-assets
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.api_catalog_upload_item import ApiCatalogUploadItem # noqa: E501
from swagger_client.rest import ApiException
class TestApiCatalogUploadItem(unittest.TestCase):
"""ApiCatalogUploadItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiCatalogUploadItem(self):
"""Test ApiCatalogUploadItem"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.api_catalog_upload_item.ApiCatalogUploadItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"82406273+mlx-bot@users.noreply.github.com"
] | 82406273+mlx-bot@users.noreply.github.com |
311778ed50cc91a89d77cd08a94fd8589d8562ed | 55ba24f74f70e8a3bf2ab28dac0655eacfb8cdb9 | /src/feature_extraction.py | d0ec331c6264a6f87ccff30553b2cdef154acff3 | [
"MIT"
] | permissive | grantgasser/vehicle-detection | 7afd9130745dffbe8bcc19598b03cc759308389e | 812932d545eba088f530fa77dc9ef0399d30b713 | refs/heads/master | 2023-02-28T13:58:52.225288 | 2021-02-10T02:58:05 | 2021-02-10T02:58:05 | 318,337,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | from tqdm import tqdm
import numpy as np
import cv2
from skimage.feature import hog
def get_color_hist_features(img, bins=32, range=(0, 256)):
"""Compute and return color histogram features"""
rhist = np.histogram(img[:, :, 0], bins=bins, range=range)
ghist = np.histogram(img[:, :, 1], bins=bins, range=range)
bhist = np.histogram(img[:, :, 2], bins=bins, range=range)
features = np.concatenate((rhist[0], ghist[0], bhist[0]))
return features
def change_color_space(img, color_space='RGB'):
"""Changes color space and returns as feature"""
if color_space == 'RGB':
return img
else:
if color_space == 'LUV':
img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YCrCb':
img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
elif color_space == 'HSV':
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'YUV':
img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
else:
raise ValueError('Invalid color space:', color_space)
return img
def get_spatial_features(img, size=(32, 32)):
"""Performs spatial binning, reducing size of image, and unravel into feature vector"""
spatial_features = cv2.resize(img, size).ravel()
return spatial_features
def get_hog_features(img):
"""Get histogram of oriented gradients (HOG) features"""
features = hog(img)
return features
def extract_features(images, hist_bins, color_space, spatial_size):
"""
Get feature vector for each image and store in a list.
Increase param list if want to tweak and play around more.
"""
features = []
# get feature vec for each image
for img in images:
img = change_color_space(img, color_space=color_space)
# features
hist_features = get_color_hist_features(img, bins=hist_bins)
spatial_features = get_spatial_features(img, size=spatial_size)
hog_features = get_hog_features(img)
# combine and store features for this image
feature_vector = np.concatenate((hist_features, spatial_features, hog_features))
features.append(feature_vector)
return features | [
"glgasser@gmail.com"
] | glgasser@gmail.com |
d6eff62f0cf2c08de8264ef0002a1498a2529538 | 4d53f9c03ce769c51c4baa7c1920045c0f45f799 | /moon/viewer/layout.py | d3bc0c3b3ad282478e581481c9d2a8e264bc197a | [
"MIT"
] | permissive | epunzal2/moon-board-climbing | 9b206e001d1d0eb49a259dfba8d3d43c270be46e | ae149586099f4e0a0ee3664404f6493f19f6ad61 | refs/heads/master | 2023-08-15T03:11:08.297803 | 2020-06-08T21:31:37 | 2020-06-08T21:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | import os
import sys
import tkinter as tk
from PIL import Image, ImageTk
from climbset import Climbset
import_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/types"
sys.path.append(import_path)
base_image = Image.open(os.path.dirname(os.path.realpath(__file__)) + "/cleaned.png")
def fix_transparency(climb_image):
climb_image = climb_image.convert("RGBA")
datas = climb_image.getdata()
# Deals one by one with pixels
# Black goes to transparent
# White goes to semi transparent black
newData = []
for item in datas:
if item[0] == 255 and item[1] == 255 and item[2] == 255:
newData.append((0, 0, 0, 100))
else:
newData.append((0, 0, 0, 0))
climb_image.putdata(newData)
return climb_image
def format_image(climb_image):
# Crop the climb
climb_image = climb_image.crop((0, 0, 11, 18))
# Resize to match background
climb_image = climb_image.resize((540, 900), Image.ANTIALIAS)
# Convert holds to transparent regions
climb_image = fix_transparency(climb_image)
# Paste holds onto large transparent canvas (which matches the background size)
blank = Image.new("RGBA", (650, 1000))
blank.paste(climb_image, (75, 65))
# Paste transparent holds onto background
joined_image = Image.alpha_composite(base_image, blank)
# return joined_image
# Finally scale for display
return joined_image.resize((400, 615), Image.ANTIALIAS)
class ClimbsetNavigator:
def __init__(self, climbset):
self.app_root = tk.Tk()
# Create the variables for managing the display
self.climb_num = 0
self.overlay_visible = True
self.climbset = climbset
# Bind keypress events
self.app_root.bind("<Left>", self.left_event)
self.app_root.bind("<Right>", self.right_event)
self.app_root.bind("<Escape>", self.close_window)
self.app_root.bind("<Delete>", self.delete_event)
# Create the display elements
self.top_label = tk.Label(
self.app_root, text="Image 1 of {}".format(len(self.climbset.climbs))
)
self.grade_label = tk.Label(self.app_root, text="Grade: ")
self.left_button = tk.Button(self.app_root, text="<-", command=self.last_image)
self.right_button = tk.Button(self.app_root, text="->", command=self.next_image)
self.toggle_button = tk.Button(self.app_root, text="Toggle", command=self.toggle_overlay)
self.delete_button = tk.Button(self.app_root, text="Delete", command=self.delete_current)
self.save_button = tk.Button(self.app_root, text="Save Climbs", command=self.save_all)
self.main_image = tk.Label(self.app_root)
# Manage the layout
self.top_label.grid(column=0, row=0, columnspan=3, padx=10, pady=10)
self.grade_label.grid(column=3, row=0, columnspan=2, padx=10, pady=10)
self.left_button.grid(column=0, row=1, padx=10, pady=10)
self.right_button.grid(column=4, row=1, padx=10, pady=10)
self.toggle_button.grid(column=0, row=2, columnspan=2, padx=10, pady=10)
self.delete_button.grid(column=2, row=2, padx=10, pady=10)
self.save_button.grid(column=3, row=2, columnspan=2, padx=10, pady=10)
self.main_image.grid(column=1, row=1, columnspan=3, padx=10, pady=10)
# Manage the initial state of buttons
self.left_button.config(state=tk.DISABLED)
if len(self.climbset.climbs) == 1:
self.right_button.config(state=tk.DISABLED)
self.delete_button.config(state=tk.DISABLED)
self.set_image_from_index()
def left_event(self, event):
self.last_image()
def right_event(self, event):
self.next_image()
def close_window(self, event):
self.app_root.withdraw()
def delete_event(self, event):
self.delete_current()
def save_all(self):
from pathlib import Path
import pickle
save_dir = str(Path().resolve().parent) + "/data/climbsets/"
save_name = "climbs.pkl"
with open(save_dir + save_name, "wb") as handle:
pickle.dump(self.climbset, handle)
def delete_current(self):
if len(self.climbset.climbs) > 1:
self.climbset.climbs.pop(self.climb_num)
if self.climb_num == 0:
self.process_image_change()
else:
self.last_image()
if len(self.climbset.climbs) <= 1:
self.delete_button.config(state=tk.DISABLED)
def toggle_overlay(self):
self.overlay_visible = not self.overlay_visible
def set_image_from_index(self):
# Change the image which is displayed to match the index
image = self.climbset.climbs[self.climb_num].as_image()
image = format_image(image)
image.save("current.png")
self.img = ImageTk.PhotoImage(file="current.png")
self.main_image.configure(image=self.img)
climb = self.climbset.climbs[self.climb_num]
self.grade_label.configure(text=f"Grade: {climb.grade} - {climb.grade.grade_number}")
def run(self):
# Show the app
self.app_root.mainloop()
def next_image(self):
# Move to next image
if self.climb_num < len(self.climbset.climbs) - 1:
self.climb_num += 1
self.process_image_change()
def last_image(self):
# Move to previous image
if self.climb_num > 0:
self.climb_num += -1
self.process_image_change()
def process_image_change(self):
self.set_image_from_index()
self.update_view_state()
def update_view_state(self):
# Check that the title at the top and the left and right buttons are in the correct state
self.top_label.configure(
text="Image {} of {}".format(self.climb_num + 1, len(self.climbset.climbs))
)
if self.climb_num == 0:
self.left_button.config(state=tk.DISABLED)
elif self.climb_num >= 1:
self.left_button.config(state=tk.NORMAL)
if self.climb_num == len(self.climbset.climbs) - 1:
self.right_button.config(state=tk.DISABLED)
elif self.climb_num <= len(self.climbset.climbs):
self.right_button.config(state=tk.NORMAL)
if __name__ == "__main__":
example_no_string = ["ChDlHnGjEr", "JbIeDhDjCmEoBr", "FeHhJhHkEjEmEnIhEoDqEr"]
loaded_climbset = Climbset(example_no_string, "sample")
app = ClimbsetNavigator(loaded_climbset)
app.run()
| [
"houghtonandrew0@gmail.com"
] | houghtonandrew0@gmail.com |
b75c4770199293de8d847af50386a6f6211d23b6 | 26c0f80688f75a188097a232c229a73c8e7cc6ed | /user/migrations/0031_alter_profile_zipcode.py | 26fb0ef6fbd873151e7d3d517d0ac2cbcf69cd3b | [] | no_license | creep1g/DjangoWebstore | 8207d7ea53c478fb7e5745e1c6ae6699102b5df5 | bd27340b86bf2289b8c14216462d932ccdf4986d | refs/heads/main | 2023-05-06T09:50:04.846489 | 2021-05-28T14:40:40 | 2021-05-28T14:40:40 | 371,730,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Generated by Django 3.2 on 2021-05-12 15:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0030_merge_0024_alter_profile_card_cvc_0029_profile_city'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='zipcode',
field=models.IntegerField(),
),
]
| [
"thorgilshjalmarsson@gmail.com"
] | thorgilshjalmarsson@gmail.com |
a86a3fd42452beca316aaf1e2ab27e00942906ad | 266c55fd4f94fdf85652f8000d4e0f3d7ded68a5 | /opener-project/get-er-done.py | cf1c838153bcd3724046f49cb514c5ec7c105a4f | [] | no_license | yuqing123/compjour-hw | 5a30c210518cc26e8b971267f1019a5ef5d57a50 | 16d5992b1a90f104273c6baa59a568e38bdbf736 | refs/heads/master | 2021-01-22T19:44:52.475524 | 2015-06-10T04:03:13 | 2015-06-10T04:03:13 | 33,498,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | import os.path
filenames = [
"hello-world.py",
"imports-and-setup.py",
"stanford-news-download.py",
"stanford-news-heds.py",
"stanford-news-topics.py",
"get-er-done.py",
"fetch-and-unpack-twitter-data.py",
"read-sunlight-csv.py",
"read-twitter-json.py",
"twitter_foo.py",
"twitter_foo_fun.py",
"twitter-tablemaker.py",
"twitter-word-tweets.py"
]
current_path = os.path.dirname(os.path.realpath(__file__))
if current_path.split('/')[-1] != 'opener-project':
print("Warning: the project directory needs to be named: opener-project")
missing_files = []
for fn in filenames:
if os.path.exists(fn) == False or os.path.getsize(fn) < 100:
missing_files.append(fn)
print(fn, "...Unfinished")
else:
print(fn, "...Finished!")
if len(missing_files) > 0:
print("###################")
print("{} missing files".format(len(missing_files)))
for fn in missing_files:
print(fn)
else:
print("""
All done (theoretically...)!
You should now be able to turn in the assignment by pushing it to Github
""") | [
"yuqingp@stanford.edu"
] | yuqingp@stanford.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.