index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,200 | b69e3f5e57adc8e89b6ff22fb4a10d2539e13ca3 |
import json
import datetime
import requests
import pymysql
import pymongo
def insert_category(conn):
"""将商品的种类插入数据库 """
# 商品种类的 id 和对应的名称
categories_dict = {
66: "手机",
327: "腕表配饰",
65: "电脑办公",
67: "相机单反",
217: "平板数码",
179: "运动户外",
255: "家电家居",
1000: "其他",
}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = "insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)"
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
result = cursor.execute(sql, (category_id, category_name, create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = "https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}"
res = requests.get(brand_url.format(category_id=category_id))
# 所有的brand字典组成的列表
brands = json.loads(res.content.decode("utf-8")).get("brand_list")
brand_list += brands
except:
print("出错了:category_id:", category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']
sql = "insert into goods_brand values (%s, %s, %s, %s, %s, %s)"
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get("brand_id"))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
brand_name = brand.get("brand_name")
brand_name_ch = brand.get("brand_name_ch") if brand.get("brand_name_ch") else brand_name
brand_name_en = brand.get("brand_name_en") if brand.get("brand_name_en") else brand_name
category_id = int(brand.get("category_id_1"))
category_id = category_id if category_id in category_id_list else 1000
# 插入数据库
result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
# 加入去重队列
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
# 数据库中的所有的字段 22 个
kws = ("product_name", "category_id_1", "brand_id", "product_desc",
"short_product_name", "sku_key_1", "sku_key_2", "sku_key_3", "product_flag",
"min_firstpay", "is_product_up_down", "real_amount", "mart_amount", "fq_num",
"product_info", "delivery_time", "gift_list", "fe_params", "slider_imgs",
"detail_imgs", "create_time")
# 插入除 商品 id 之外的字段
# sql = "insert into goods () values (%s, %s, %s, %s, %s, " \
# "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql = "insert into goods (good_name,category_id,brand_id,product_name,short_product_name," \
"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount," \
"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs," \
"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
# 获取mongodb 中的数据
goods = GOODS.find()
for good in goods:
try:
data = []
# 商品 id 去重集合
# good_id_set = set()
for kw in kws[:-5]:
info = good["detail_data"].get(kw)
data.append(info)
# 单独处理复杂的项目
gift_list = " ".join([str(s) for s in good["detail_data"].get("gift_list")[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good["detail_data"].get("fe_params"))
data.append(fe_params)
slider_imgs = "||".join(good["slider_imgs"])
data.append(slider_imgs)
detail_imgs = "||".join(good["detail_imgs"])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
data.append(create_time)
# 判断 id 是否重复
# if good["good_id"] not in good_id_set:
with conn.cursor() as cursor:
cursor.execute("select brand_id from goods_brand")
# 查出所有的品牌 id
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute("select category_id from goods_category")
# 查出所有的种类 id
all_category_ids = [category_id[0] for category_id in cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])
cursor.execute(sql, tuple(data))
conn.commit()
# good_id_set.add(good["good_id"])
except Exception as e:
print(e)
continue
def main():
# MySQL 连接
conn = pymysql.connect(host="127.0.0.1", port=3306, user="root", password="123456",
db="test", charset="utf8", autocommit=False)
# 将分类插入数据库
# insert_category(conn)
# 将品牌插入数据库
# insert_brand(conn)
# 将商品插入数据库
# mongodb 连接
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN["fenqile"]["goods"]
insert_goods(conn, GOODS)
conn.close()
if __name__ == "__main__":
main()
|
7,201 | 30c24b9a4738c1952fc5d36a4bc36d8d3576ed3b | from django.db import models
from django.utils.translation import ugettext_lazy as _
from apps.sources.models.mixins.page_numbers import PageNumbersMixin
from apps.sources.models.source import Source
PIECE_TYPES = (('essay', 'Essay'),)
TYPE_MAX_LENGTH: int = 10
class Piece(Source, PageNumbersMixin):
"""A piece (e.g., essay)."""
type = models.CharField(
verbose_name=_('piece type'),
max_length=TYPE_MAX_LENGTH,
choices=PIECE_TYPES,
default=PIECE_TYPES[0][0],
)
def __html__(self) -> str:
"""Return the piece's citation HTML string."""
components = [
self.attributee_html,
f'"{self.linked_title}"',
self.date.string if self.date else '',
]
return self.components_to_html(components)
|
7,202 | 3cc473f6bb4b2e1dd806edb8b096a6118fe7056a | # This file is part of the printrun suite.
#
# printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with printrun. If not, see <http://www.gnu.org/licenses/>.
import traceback
import logging
import wx
class NoViz:
showall = False
def clear(self, *a):
pass
def addfile_perlayer(self, gcode, showall = False):
layer_idx = 0
while layer_idx < len(gcode.all_layers):
yield layer_idx
layer_idx += 1
yield None
def addfile(self, *a, **kw):
pass
def addgcode(self, *a, **kw):
pass
def addgcodehighlight(self, *a, **kw):
pass
def Refresh(self, *a):
pass
def setlayer(self, *a):
pass
class NoVizWindow:
def __init__(self):
self.p = NoViz()
def Destroy(self):
pass
class VizPane(wx.BoxSizer):
def __init__(self, root, parentpanel = None):
super(VizPane, self).__init__(wx.VERTICAL)
if not parentpanel: parentpanel = root.panel
if root.settings.mainviz == "None":
root.gviz = NoViz()
root.gwindow = NoVizWindow()
return
use2dview = root.settings.mainviz == "2D"
if root.settings.mainviz == "3D":
try:
import printrun.gcview
root.gviz = printrun.gcview.GcodeViewMainWrapper(parentpanel, root.build_dimensions_list, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
root.gviz.clickcb = root.show_viz_window
except:
use2dview = True
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if use2dview:
from printrun import gviz
root.gviz = gviz.Gviz(parentpanel, (300, 300),
build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gviz.SetToolTip(wx.ToolTip(_("Click to examine / edit\n layers of loaded file")))
root.gviz.showall = 1
root.gviz.Bind(wx.EVT_LEFT_DOWN, root.show_viz_window)
use3dview = root.settings.viz3d
if use3dview:
try:
import printrun.gcview
objects = None
if isinstance(root.gviz, printrun.gcview.GcodeViewMainWrapper):
objects = root.gviz.objects
root.gwindow = printrun.gcview.GcodeViewFrame(None, wx.ID_ANY, 'Gcode view, shift to move view, mousewheel to set layer', size = (600, 600), build_dimensions = root.build_dimensions_list, objects = objects, root = root, circular = root.settings.circular_bed, antialias_samples = int(root.settings.antialias3dsamples))
except:
use3dview = False
logging.error("3D view mode requested, but we failed to initialize it.\n"
+ "Falling back to 2D view, and here is the backtrace:\n"
+ traceback.format_exc())
if not use3dview:
from printrun import gviz
root.gwindow = gviz.GvizWindow(build_dimensions = root.build_dimensions_list,
grid = (root.settings.preview_grid_step1, root.settings.preview_grid_step2),
extrusion_width = root.settings.preview_extrusion_width,
bgcolor = root.bgcolor)
root.gwindow.Bind(wx.EVT_CLOSE, lambda x: root.gwindow.Hide())
if not isinstance(root.gviz, NoViz):
self.Add(root.gviz.widget, 1, flag = wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL)
|
7,203 | c96ebfe41b778e85e954e2b7d6de4b078e72c81f | # The Minion Game
# Kevin and Stuart want to play the 'The Minion Game'.
# Your task is to determine the winner of the game and their score.
"""
Game Rules
Both players are given the same string, S.
Both players have to make substrings using the letters of the string S.
Stuart has to make words starting with consonants.
Kevin has to make words starting with vowels.
The game ends when both players have made all possible substrings.
Scoring
A player gets +1 point for each occurrence of the substring in the string S.
For Example:
String = BANANA
Kevin's vowel beginning word = ANA
Here, ANA occurs twice in BANANA. Hence, Kevin will get 2 Points.
Output Format
Print one line: the name of the winner and their score separated by a space.
If the game is a draw, print Draw.
"""
string = "BANANA"
vowels = "AEIOU"
Stuart = 0
Kevin = 0
for i in range(len(string)):
if string[i] in vowels:
Kevin += (len(string)-i)
else:
Stuart += (len(string)-i)
if Kevin > Stuart:
print("Kevin", Kevin)
elif Kevin<Stuart:
print("Stuart", Stuart)
else: print("Draw")
# string = "BANANA"
# N, *z = len(string), 0, 0
# for i,c in enumerate(string):
# z[int(c in "AEIOU")]+=N-i
# print(*reversed(max(zip(z,["Stuart","Kevin"]))) if z[0]!=z[1] else ["Draw"]) |
7,204 | a9e0659c6a18ffc954079845b7d0de04c46a78c9 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
|
7,205 | 5aecd021297fee4407d6b529c24afb3c6398f7ba | """
@File : jump.py
@copyright : GG
@Coder: Leslie_s
@Date: 2020/1/26
"""
import requests
from lxml import html
import pandas as pd
import time
import pandas as pd
import datetime
import re
import json
headers = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange',
'accept-encoding':'gzip, deflate, br',
'accept-language':'zh-CN,zh;q=0.8',
'upgrade - insecure - requests': '1',
'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',# 需要填写
}
url = 'https://3g.dxy.cn/newh5/view/pneumonia?scene=2&clicktime=1579582238&enterid=1579582238&from=timeline&isappinstalled=0'
r = requests.get(url, headers=headers,timeout=15,allow_redirects=False)
r.encoding='utf-8'
t1 = html.fromstring(r.text)
doc = r.text
test_com = r'(?P<first>"provinceName":"[\u4e00-\u9fa5]{1,9}"),(?P<second>"provinceShortName":"[\u4e00-\u9fa5]{1,9}"),(?P<three>"confirmedCount":\d{1,9})'
iter_dict = {}
gg_a = r'provinceName":(?P<first>"[\u4e00-\u9fa5]{1,9}"),"provinceShortName":(?P<second>"[\u4e00-\u9fa5]{1,9}"),"confirmedCount":(?P<three>\d{1,9})'
r=re.finditer(gg_a,doc)
train = re.findall(gg_a,doc)
for i in r:
print(i.group(1))
provinceName=i.group(1)
provinceShortName=i.group(2)
confirmedCount=i.group(3)
iter_dict.setdefault( provinceShortName,confirmedCount)
#
# result = re.finditer(test_com,r.text)
# for i in result:
# print(i.group(1))
#
# search = re.finditer(test_com, r.text)
# print('group 0:', search.group(0))
# list_provincename=[]
# list_confircount=[]
# for match in matches_pro:
# print(match.group(1))
# list_provincename.append(match.group(1))
# for match in matches_confirmedCount:
# print(match.group(1))
# list_confircount.append(match.group(1))
#
# dic_result = dict(zip(list_confircount,list_provincename))
#
|
7,206 | 6a1f58af26bbc4d584ffd699c512ef433ffb80d8 | from selenium import webdriver
driver = webdriver.Chrome()
driver.get("http://192.168.1.248:9079/#/")
lanuage = driver.find_element_by_class_name("el-dropdown-trigger-text")
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name("el-dropdown-trigger-text").text =="中文"
print("符合要求")
except EOFError:
print("不是中文")
# driver.find_element_by_link_text("简体中文")
|
7,207 | cd322f9771f1ac90931a7229ffd5effd1cae1a54 | print("hello world")
print("welcome to london") |
7,208 | e886b88a0b7e8c06772fe8a9554cab1bfe9e94a7 | '''
runSPP.py - wrap spp peak caller
========================================
:Tags: Python
Purpose
-------
Runs the spp peak caller.
The workflow follows the tutorial at:
http://compbio.med.harvard.edu/Supplements/ChIP-seq/tutorial.html
Usage
-----
Documentation
-------------
Requirements:
* spp >= ?
* snow >= 0.3.13
* bedtools >= 2.21.0
Code
----
'''
import os
import sys
import subprocess
import collections
from cgatcore import experiment as E
from rpy2.robjects import r as R
def bamToBed(infile, outfile):
'''convert bam to bed with bedtools.'''
statement = "bamToBed -i %(infile)s > %(outfile)s" % locals()
E.debug("executing statement '%s'" % statement)
retcode = subprocess.call(statement,
cwd=os.getcwd(),
shell=True)
if retcode < 0:
raise OSError("Child was terminated by signal %i: \n%s\n" %
(-retcode, statement))
return outfile
SPPPeak = collections.namedtuple(
"SPPPeak",
"contig unrefined_start unrefined_end strand "
"posterior summit height refined_start refined_end median fdr")
def iteratePeaks(infile):
'''iterate of zinba peaks in infile.'''
for line in infile:
if line.startswith("#"):
continue
if line.startswith("PEAKID\tChrom"):
continue
# skip empty lines
if line.startswith("\n"):
continue
data = line[:-1].split("\t")
if len(data) != 12:
raise ValueError("could not parse line %s" % line)
# I assume these are 1-based coordinates
data[2] = max(int(data[2]) - 1, 0)
# end
data[3] = int(data[3])
# posterior
data[5] = float(data[5])
# summit
data[6] = max(int(data[6]) - 1, 0)
# height
data[7] = int(data[7])
# refined_start
data[8] = max(int(data[8]) - 1, 0)
# end
data[9] = int(data[9])
# median
data[10] = int(data[10])
# qvalue
data[11] = float(data[11])
yield SPPPeak._make(data[1:])
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-f", "--input-format", dest="input_format",
type="choice",
choices=("bam",),
help="input file format [default=%default].")
parser.add_option("-w", "--window-size", dest="window_size", type="int",
help="window size [default=%default].")
parser.add_option("-c", "--control-filename",
dest="control_filename",
type="string",
help="filename of input/control data in "
"bed format [default=%default].")
parser.add_option("-t", "--threads", dest="threads", type="int",
help="number of threads to use [default=%default].")
parser.add_option("-q", "--fdr-threshold",
dest="fdr_threshold", type="float",
help="fdr threshold [default=%default].")
parser.add_option("-z", "--spp-z-threshold", dest="z_threshold", type="float",
help="z threshold [default=%default].")
parser.add_option("--bin", dest="bin", type="int",
help="bin tags within the specified number "
" of basepairs to speed up calculation;"
" increasing bin size decreases the accuracy "
"of the determined parameters [default=%default]")
parser.add_option("--spp-srange-min", dest="srange_min", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.add_option("--spp-srange-max", dest="srange_max", type="float",
help="srange gives the possible range for the "
" size of the protected region;"
" srange should be higher than tag length; "
" making the upper boundary too high"
" will increase calculation time [%default]")
parser.set_defaults(
input_format="bam",
threads=1,
fdr_threshold=0.05,
window_size=1000,
offset=125,
srange_min=50,
srange_max=500,
bin=5,
z_threshold=3,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.start(parser, argv=argv)
if len(args) != 2:
raise ValueError(
"please specify a filename with sample data and an output file")
filename_sample, filename_output = args[0], args[1]
filename_control = options.control_filename
# load Zinba
R.library('spp')
R.library('snow')
# read data
E.info("reading data")
R('''chip.data <- read.bam.tags('%s')''' % filename_sample)
R('''input.data <- read.bam.tags('%s')''' % filename_control)
R('''cluster = makeCluster( %i )''' % (options.threads))
E.info("computing binding characteristics")
# get binding info from cross-correlation profile
# srange gives the possible range for the size of the protected region;
# srange should be higher than tag length; making the upper boundary too
# high will increase calculation time
# bin - bin tags within the specified number of basepairs to speed
# up calculation; increasing bin size decreases the accuracy of
# the determined parameters
srange_min, srange_max = options.srange_min, options.srange_max
bin = options.bin
R('''binding.characteristics <- get.binding.characteristics(chip.data,
srange=c(%(srange_min)i,%(srange_max)i),
bin=%(bin)s,
cluster=cluster);''' % locals())
# print out binding peak separation distance
options.stdout.write(
"shift\t%i\n" % R('''binding.characteristics$peak$x''')[0])
##################################################
##################################################
##################################################
E.info("plot cross correlation profile")
# plot cross-correlation profile
R('''pdf(file="%s.crosscorrelation.pdf",width=5,height=5)''' %
filename_output)
R('''par(mar = c(3.5,3.5,1.0,0.5), mgp = c(2,0.65,0), cex = 0.8);''')
R('''plot(binding.characteristics$cross.correlation,
type='l',
xlab="strand shift",
ylab="cross-correlation");''')
R('''abline(v=binding.characteristics$peak$x,lty=2,col=2)''')
R('''dev.off();''')
E.info("selecting informative tags based on the binding characteristics")
# select informative tags based on the binding characteristics
R('''chip.data <- select.informative.tags(
chip.data,binding.characteristics);''')
R('''input.data <- select.informative.tags(
input.data,binding.characteristics);''')
E.info("outputting broad peaks")
window_size, z_threshold = options.window_size, options.z_threshold
R('''broad.clusters <- get.broad.enrichment.clusters(chip.data,input.data,
window.size=%(window_size)i,
z.thr=%(z_threshold)f,
tag.shift=round(binding.characteristics$peak$x/2))''' % locals())
# write out in broadPeak format
R('''write.broadpeak.info(broad.clusters,"%s.broadpeak.txt")''' %
filename_output)
# binding detection parameters desired FDR (1%). Alternatively, an
# E-value can be supplied to the method calls below instead of the
# fdr parameter the binding.characteristics contains the optimized
# half-size for binding detection window
R('''detection.window.halfsize <- binding.characteristics$whs;''')
# determine binding positions using wtd method
E.info("determining binding positions using wtd method")
fdr = options.fdr_threshold
R('''bp <- find.binding.positions(
signal.data=chip.data,control.data=input.data,
fdr=%(fdr)f,whs=detection.window.halfsize,cluster=cluster)''' % locals())
options.stdout.write("detected_peaks\t%i\n" % R(
'''sum(unlist(lapply(bp$npl,function(d) length(d$x))))''')[0])
# output detected binding positions
R('''output.binding.results(bp,"%s.summit.txt");''' % filename_output)
R('''bp <- add.broad.peak.regions(chip.data,input.data,bp,
window.size=%(window_size)i,z.thr=%(z_threshold)f)''' % locals())
# output using narrowPeak format
R('''write.narrowpeak.binding(bp,"%s.narrowpeak.txt")''' %
filename_output)
# write footer and output benchmark information.
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
7,209 | 72abba6fa40441ab172bccb9065aaa0af5fefd64 | import requests
import json
import pyttsx
engine = pyttsx.init()
engine.say('Hello from Eliq.')
engine.runAndWait()
power_value = 0
power_value_int = 0
prompt=0
Eliq_just_NOW ={}
accesstoken = "xxxxxxxxxxxxxxxxxxxxxx"
#Say warning for power use over this limmit in Watts
level_warning = 2000
Eliq_request_string = ('https://my.eliq.io/api/datanow?accesstoken={}&channelid=32217'.format(accesstoken))
response = requests.get (Eliq_request_string)
Eliq_just_NOW = (response.json())
power_value = Eliq_just_NOW['power']
power_value_int = int (float (power_value))
power_str = ('Power is {} Watts'.format(power_value_int))
print (power_str)
if power_value_int > level_warning:
engine.say(power_str)
engine.say('Warning.')
engine.runAndWait()
else:
engine.say(power_str)
engine.say('Good.')
engine.runAndWait()
|
7,210 | 54002bc7e2a1991d2405acbe1d399e8803ac5582 | ##
# hunt_and_kill.py
# 05 Oct 2021
# Generates a maze using the hunt and kill algorithm
# S
from sys import argv
from enum import Enum
import random
# Cardinal directions, can be OR'd and AND'd
DIRS = {
'N': 1 << 0,
'E': 1 << 1,
'S': 1 << 2,
'W': 1 << 3
}
O_DIRS = {
'N': 'S',
'E': 'W',
'S': 'N',
'W': 'E'
}
def init_maze(width: int, height: int) -> list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
# Shortcut for accessing maze
maze_idx = lambda p: p[1] * width + p[0]
# Shortcut funcs for surrounding points
north = lambda p: (p[0] , p[1] -1)
east = lambda p: (p[0] +1, p[1] )
south = lambda p: (p[0] , p[1] +1)
west = lambda p: (p[0] -1, p[1] )
def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
# Points will be added to this list if they havent been traversed yet
possible_points = dict()
# -- NORTH
p_pt = north(pt)
# This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.
if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "N"
# -- EAST
p_pt = east(pt)
if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "E"
# -- SOUTH
p_pt = south(pt)
if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "S"
# -- WEST
p_pt = west(pt)
if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "W"
return possible_points
# First, connect to a random neighbour that has been visited.
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
# Walk randomly until out of options
while possible_n := check_neighbours(step):
next_step, direction = random.choice(tuple(possible_n.items()))
# Connect the two cells
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
# Go to next
step = next_step
def gen_maze(width: int, height: int) -> list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) -> None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
# top row
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
# left wall
if maze[maze_idx((x, y))] & DIRS["W"]:
# leave wall open if you can also go down
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
# right wall
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f"Generating maze size {width}x{height}")
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == "__main__":
main()
|
7,211 | 5a0a8205977e59ff59a5d334a487cf96eee514d2 | from flask import render_template
from database import db
from api import app
from models import create_models
# Create a URL route in application for "/"
@app.route('/')
def home():
return render_template('home.html')
# If in stand alone mode, run the application
if __name__ == '__main__':
db.connect()
create_models()
app.run(debug=True) |
7,212 | 5ca990bdcbe9378747e438015beb46760b1e987b | import vigra
import os
import sys
import time
import json
from simpleference.inference.inference import run_inference_n5
# from simpleference.backends.pytorch import PyTorchPredict
from simpleference.backends.pytorch import InfernoPredict
from simpleference.backends.pytorch.preprocess import preprocess
def single_gpu_inference(sample, gpu):
raw_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/cremi_warped/sample%s_inference.n5' % sample
model_path = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Weights'
out_file = '/groups/saalfeld/home/papec/Work/neurodata_hdd/networks/neurofire/mws/unet-1/Predictions/prediction_sample%s.n5' % sample
assert os.path.exists(out_file)
offset_file = './offsets_sample%s/list_gpu_%i.json' % (sample, gpu)
with open(offset_file, 'r') as f:
offset_list = json.load(f)
input_shape = (40, 405, 405)
output_shape = (32, 320, 320)
prediction = InfernoPredict(model_path, crop=output_shape, gpu=0)
postprocess = None
t_predict = time.time()
run_inference_n5(prediction,
preprocess,
postprocess,
raw_path,
out_file,
offset_list,
input_key='data',
target_keys='full_affs',
input_shape=input_shape,
output_shape=output_shape,
channel_order=[list(range(19))])
t_predict = time.time() - t_predict
with open(os.path.join(out_file, 't-inf_gpu%i.txt' % gpu), 'w') as f:
f.write("Inference with gpu %i in %f s" % (gpu, t_predict))
return True
if __name__ == '__main__':
sample = sys.argv[1]
gpu = int(sys.argv[2])
single_gpu_inference(sample, gpu)
|
7,213 | f5d285b3a82151b5d7efdcd07d56cc5aaaac5836 | import sys
import requests
def ggwave(message: str, protocolId: int = 1, sampleRate: float = 48000, volume: int = 50, payloadLength: int = -1):
url = 'https://ggwave-to-file.ggerganov.com/'
params = {
'm': message, # message to encode
'p': protocolId, # transmission protocol to use
's': sampleRate, # output sample rate
'v': volume, # output volume
'l': payloadLength, # if positive - use fixed-length encoding
}
response = requests.get(url, params=params)
if response == '':
raise SyntaxError('Request failed')
return response
result = ggwave("Hello world!")
sys.stdout.buffer.write(result.content)
|
7,214 | 3f3db7e8813f49fe0265e110236b6dc4fed6cd1b | import inspect
import json
import os
import re
import urllib.request
from functools import wraps
from ..errors import NotFoundError
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
# self.BRAWLERS = [
# 'shelly', 'nita', 'colt', 'bull', 'jessie', # league reward 0-500
# 'brock', 'dynamike', 'bo', 'tick', '8-bit' # league reward 1000+
# 'el primo', 'barley', 'poco', 'rosa', # rare
# 'rico', 'penny', 'darryl', 'carl', # super rare
# 'frank', 'pam', 'piper', 'bibi', # epic
# 'mortis', 'tara', 'gene', # mythic
# 'spike', 'crow', 'leon' # legendary
# ]
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): str(b['scId'])[:2] + '0' + str(b['scId'])[2:] for b in data['characters'] if b['tID']}
else:
self.BRAWLERS = {}
def bstag(tag):
tag = tag.strip('#').upper().replace('O', '0')
allowed = '0289PYLQGRJCUV'
if len(tag) < 3:
raise NotFoundError('Tag less than 3 characters.', 404)
invalid = [c for c in tag if c not in allowed]
if invalid:
raise NotFoundError(invalid, 404)
return tag
def typecasted(func):
'''Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11'''
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a # do nothing
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
|
7,215 | fe13b57484e0f0796164fda99c0d759238a67153 | from population import Population
class REvolution:
def __init__(self, original_ind, combine_params, mutate_params, fitness, pop_params, method):
self.population = Population(1, fitness, pop_params)
self.combine_params = combine_params
self.mutate_params = mutate_params
self.fitness = fitness
self.method = method
self.result = []
self.original_ind = original_ind
def run_random(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)
offspring.mutate_random(self.mutate_params)
self.population.arrange_population([offspring])
print("Epoch {}: {}".format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1].value))
def run_1_1(self, epochs):
for ep in range(epochs):
pop_ind = self.population.individuals[0]
offspring = pop_ind.compare(self.original_ind, self.combine_params, self.fitness)
offspring.mutate(self.mutate_params)
self.population.arrange_population([offspring])
print("Epoch {}: {}".format(ep, self.get_pop()))
self.result.append(self.fitness(self.population.individuals[-1].value))
def get_pop(self):
ids = ["x: {} => y: {}".format("%.3f" % i.value[0], "%.3f" % self.fitness(i.value))
for i in self.population.individuals]
return ids
|
7,216 | 2730b2a1016f306936dcac3c3b44a3fd7194bac6 | #
# LeetCode
# ver.Python
#
# Created by GGlifer
#
# Open Source
"""
21. Merge Two Sorted Lists
"""
from typing import List
import sys
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
ret = itr = ListNode(-sys.maxsize)
while l1 and l2:
v1 = l1.val
v2 = l2.val
if v1 <= v2:
itr.next = ListNode(v1)
l1 = l1.next
else:
itr.next = ListNode(v2)
l2 = l2.next
itr = itr.next
if l1:
itr.next = l1
if l2:
itr.next = l2
return ret.next
def make_list(_l: List):
head = ListNode(_l[0])
itr = head
for val in _l[1:]:
node = ListNode(val)
itr.next = node
itr = node
return head
def print_list(l: ListNode):
itr = l
while itr:
print(itr.val, end=' ')
itr = itr.next
print()
if __name__ == "__main__":
solution = Solution()
_l1 = [1, 2, 4]
l1 = make_list(_l1)
_l2 = [1, 3, 4]
l2 = make_list(_l2)
print_list(solution.mergeTwoLists(l1, l2))
|
7,217 | b7f6207fe6c013a964258255445004c3f4e0adbb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class CacheDecorator:
def __init__(self):
self.cache = {}
self.func = None
def cachedFunc(self, *args):
if args not in self.cache:
print("Ergebnis berechnet")
self.cache[args] = self.func(*args)
else:
print("Ergebnis geladen")
return self.cache[args]
def __call__(self, func):
self.func = func
return self.cachedFunc
@CacheDecorator()
def fak(n):
ergebnis = 1
for i in range(2, n+1):
ergebnis *= i
return ergebnis
print(fak(10))
print(fak(20))
print(fak(20))
print(fak(10))
|
7,218 | 999c19fd760ffc482a15f5a14e188d416fcc5f21 | from django import template
from apps.account.models import User, Follow, RequestFollow
from apps.post.models import Post
register = template.Library()
@register.inclusion_tag('user/user_list.html')
def user_list():
"""show user name list"""
users = User.objects.all()
return {"users": users}
# @register.inclusion_tag('user/following_post_list.html')
# def following_post_list(pk):
# """show user following
# input:pk user
# output: list following
# """
# following = Follow.objects.following(pk)
# posts = Post.objects.filter(author__email__in=following).values('title', 'author__email')
# return {'posts': posts}
# @register.simple_tag()
# def send_request_follow(pk_login_user, pk_other_user):
# """
# Follow the user
# :param pk_login_user:
# :param pk_other_user:
# :return: message
# """
# return RequestFollow.objects.request_following_user(pk_login_user, pk_other_user)
@register.simple_tag()
def accept_request(pk_login_user, pk_other_user):
RequestFollow.objects.accept_request(pk_login_user, pk_other_user)
return "accept request"
@register.simple_tag()
def delete_request(pk_login_user, pk_other_user):
RequestFollow.objects.delete_request(pk_login_user, pk_other_user)
return "delete request"
@register.simple_tag()
def count_followers(pk):
""" count followers user"""
followers = Follow.objects.followers(pk)
return len(followers)
@register.simple_tag()
def count_following(pk):
""" count following user"""
following = Follow.objects.following(pk)
return len(following)
|
7,219 | 9690366a88a87951f5c51902118888cce8159ffc | from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper(
'http://localhost:3030/ds/query'
)
#Pizzas
def get_response_pizzas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:NamePizza .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#CarnesTopping
def get_response_carnes():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:CarnesTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#EmbutidosTopping
def get_response_embutidos():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:EmbutidosTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#EspeciasTopping
def get_response_especias():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:EspeciasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#FrutasTopping
def get_response_frutas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:FrutasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#QuesosTopping
def get_response_quesos():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:QuesosTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#SalsasTopping
def get_response_salsas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:SalsasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#VegetalesTopping
def get_response_vegetales():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:VegetalesTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
if __name__ == '__main__':
get_response_pizzas()
get_response_carnes()
get_response_embutidos()
get_response_especias()
get_response_frutas()
get_response_quesos()
get_response_salsas()
get_response_vegetales()
|
7,220 | a336434abc526357db0536955885cf076ee60f59 | # import tensorflow as tf
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)
# def build_CNN_clasifier(x):
# x_image = tf.reshape (x, [-1,28,28,1])
#
# #layer1
# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))
# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))
# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)
# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')
#
# #layer2
# w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))
# b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))
# h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)
#
# h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')
#
# #fully-connected layer
# w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))
# b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))
# h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])
# h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)
#
#
#
#
# with tf.Session() as sess:
# sess.run(x_image, feed_dict={x:mnist})
# print(x_image)
# print(x_image.shape)
import numpy as np
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x)+2*p-len(w))/s)+1):
j = s*i;
res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))
return np.array(res)
## Testing:
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
import tensorflow as tf
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))
#res = tf.nn.conv1d(data, kernel, 2, 'SAME')
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data)) |
7,221 | 44175d2559f9c7d6171b6e45d24719d50dc80fb7 | import cv2
import numpy as np
# THRESHOLDING FUNCTION IMPLEMENTATION
def thresholding(img):
# visualizing image in HSV parameters
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the
# trackbar by running ColorPickerScript.py
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
# passing the values of lowerWhite and upperWhite to create the mask
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
# WARPING FUNCTION IMPLEMENTATION
def warpImg (img, points, w, h, inv=False):
pts1 = np.float32(points)
# defining the border coordinates of the warped image
pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])
# finding the transformation matrix
if inv:
#if inverted interchange pts2 and pts1
matrix = cv2.getPerspectiveTransform(pts2,pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1,pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w,h))
return imgWarp
# trackbar change will call nothing()
def nothing(a):
pass
# Creating the trackbars to find the optimal warping points.
# Care should be taken to choose points which are not very far from our current position
# ie. mostly lying in the bottom half region of the image since we should only confidently
# predict the lane warp present on the road at this point of time.
# create trackbars
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
# wT and hT are the target window dimensions ie. window with video
# create trackbar window
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars", 360, 240)
cv2.createTrackbar("Width Top", "Trackbars", initialTrackbarVals[0], wT//2, nothing)
cv2.createTrackbar("Height Top", "Trackbars", initialTrackbarVals[1], hT, nothing)
cv2.createTrackbar("Width Bottom", "Trackbars", initialTrackbarVals[2], wT//2, nothing)
cv2.createTrackbar("Height Bottom", "Trackbars", initialTrackbarVals[3], hT, nothing)
# find the value of trackbars (real-time)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos("Width Top", "Trackbars")
heightTop = cv2.getTrackbarPos("Height Top", "Trackbars")
widthBottom = cv2.getTrackbarPos("Width Bottom", "Trackbars")
heightBottom = cv2.getTrackbarPos("Height Bottom", "Trackbars")
# return the bounding coordinates
points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])
return points
# draw the warp points as red circles
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)
return img
# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)
def getHistogram(img, minPer=0.1, display= False, region=1):
# simply sum all the pixels in the y direction
if region == 1:
# find histvalues for the complete region
histValues = np.sum(img, axis=0)
else:
# find histvalues for ONLY the bottom (1/n)th region where n is region value
histValues = np.sum(img[img.shape[0]//region:,:], axis=0)
#print(histValues)
# Some of the pixels in our image might just be noise. So we don’t want to use them in our
# calculation. Therefore we will set a threshold value which will be the minimum value required
# for any column to qualify as part of the path and not noise. We can set a hard-coded value but
# it is better to get it based on the live data. So we will find the maximum sum value and
# multiply our user defined percentage to it to create our threshold value.
maxValue = np.max(histValues)
minValue = minPer*maxValue
# To get the value of the curvature we will find the indices of all the columns that have value
# more than our threshold and then we will average our indices.
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
#print(basePoint)
if display:
imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)
for x,intensity in enumerate(histValues):
cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)
cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)
return basePoint,imgHist
return basePoint
# stack all the display windows
# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM)
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range (0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver |
7,222 | 51a4d8f1be7009b69f0b69bdd51a0077256304a9 | # -*- coding: utf-8 -*-
{
'name': 'Islamic Datepicker',
'category': 'Extra Tools',
'author': 'Mostafa Mohamed',
'website': 'https://eg.linkedin.com/in/mostafa-mohammed-449a8786',
'price': 25.00,
'currency': 'EUR',
'version': '9.0.1.0.1',
'depends': ['base','web'],
'data': [
'views/islamic_template.xml',
],
'qweb': [
"static/src/xml/islamice_date_widget.xml",
],
'auto_install': False,
'installable': True
}
|
7,223 | 4bd2923381cd3ead9a5605363a86f41b3743bf27 |
def largestVar(s: str):
freq = {i:0 for i in range(26)}
for i in range(len(s)):
freq[(int) (chr(i) - 'a')] += 1
max_var = 0
for a in range(26):
for b in range(26):
left_a = freq[a]
left_b = freq[b]
|
7,224 | 12d59697d5c2ec69d019c64dac762385c8c0cb66 | import sys
import networkx as nx
import bcube.generator_bcube as bcube
import dcell.generate_dcell as dcell
import fat_tree.generate_fat_tree as fatTree
import cayley_graphs.generate_bubble_sort as bubbleSort
import cayley_graphs.generate_hypercube as hypercube
import cayley_graphs.generate_pancake as pancake
import cayley_graphs.generate_transposition as transposition
import cayley_graphs.generate_star as star
import cayley_graphs.generate_butterfly as butterfly
import slim_fly.generate_slim_fly as slimFly
graph_name = sys.argv[1]
p = int(sys.argv[2])
q = int(sys.argv[3])
path = 'temp/' + sys.argv[4]
#integers q, p
if graph_name == "bcube":
#constrains q < p and q < 4
G = bcube.generate_bcube(q,p)
elif graph_name == "dcell":
#constrains q < p and q < 4
G = dcell.generate_dcell(p,q)
elif graph_name == "fat_tree":
#integer p
#constrains p must be even
G = fatTree.generate_fat_tree(p)
elif graph_name == "bubble_sort":
G = bubbleSort.generate_bubble_sort(p)
elif graph_name == "hypercube":
G = hypercube.generate_hypercube(p)
elif graph_name == "pancake":
G = pancake.generate_pancake_graph(p)
elif graph_name == "transposition":
G = transposition.generate_transposition_graph(p)
elif graph_name == "star":
G = star.generate_star_graph(p)
elif graph_name == "butterfly":
G = butterfly.generate_butterfly(p)
elif graph_name == "slim_fly":
# p = 5,7,11,17,19,25,29,35,43,47,55,79
G = slimFly.generate_slim_fly(p)
edges = G.edges()
#print G.nodes(data=True)
H = nx.from_edgelist(edges)
#changing color of nodes
#H.node[1]['co']='red'
nodes = len(H.nodes(data=True))
print nodes
nx.write_graphml(H, path + str(nodes)) |
7,225 | f28b47e1b07011ce9d0708331f68d7f16195c567 | """
contains generic code for use in main menus. currently this is a function which turns dictionaries of functions
into a menu. I envision any further menu functions being stored here so don't expect it to run like a pipeline but
rather like a suite of individual menus.
TODO - refactor spider selection function as jesus christ that things fat
- incorporate spider selector in config manager options
"""
import re
import os
import json
from datetime import date, datetime
from collections import defaultdict
import pandas as pd
from HousingPriceScraper.HousingPriceScraper.functions.basic_functions import return_false, end_process, \
alphabet_list_length, flatten_list_of_lists
from HousingPriceScraper.HousingPriceScraper.functions.data_management import save_list_to_txt
def final_option(dict_of_options, back):
"""
adds the final option to the dictionary based on a boolean
:param dict_of_options: dictionary with readable labels as keys and uncalled functions as values. It is important
that these functions don't require parameters.
:param back: boolean
:return: dict_of_options but with a new, final key added in
"""
if back:
dict_of_options['back'] = return_false
else:
dict_of_options['end_process'] = end_process
return dict_of_options
def basic_menu(dict_of_options, back=False):
"""
basic text based user interface, allows user to select a function to run from a dictionary of options, by using a
simple numeric code. User will see this dictionaries keys enumerated on screen to choose from.
:param dict_of_options: dictionary with readable labels as keys and uncalled functions as values. It is important
that these functions don't require parameters.
:param back: boolean. choose between scrolling back to previous menu or ending the process entirely. defaults to
ending process since that's how the main menu does it and unsure of where else function will be called
:return: run the chosen function
"""
choose = True
dict_of_options = final_option(dict_of_options, back)
list_of_options = list(dict_of_options.keys())
while choose:
print('The following options are available:\n')
for option in enumerate(list_of_options):
print('\t{} - {}'.format(option[0], option[1]))
pick = input('\nType the numeric code you wish to run\n\n')
if pick in [str(i) for i in range((len(dict_of_options)))]:
choose = dict_of_options[list_of_options[int(pick)]]()
else:
print('{} is not currently an option!\n'.format(pick))
def basic_menu_non_functional(list_of_options):
"""
basic text based user interface, allows user to select multiple options from a list of available choices.
:param list_of_options: list of available choices
:return: a list of chosen strings
"""
choose = True
list_of_options.append('back')
while choose:
print('The following options are available:\n')
for option in enumerate(list_of_options):
print('\t{} - {}'.format(option[0], option[1]))
picks = input('\nType the numeric codes you wish to run\n\n').split(',')
choice = []
if str(len(list_of_options)) in picks:
return True
for pick in picks:
if pick in [str(i) for i in range((len(list_of_options)))]:
choice.append(list_of_options[int(pick)])
else:
print('{} is not currently an option!\n'.format(pick))
if len(choice) > 0:
return choice
def select_spiders(spiders_dict):
"""
select from spiders available. allows user to select all spiders, select all spiders within a
project group, select some comma separated list of individual/groups of spiders, or by prefixing a
given selection with "-", the user can remove a spider from his or her selection.
:param spiders_dict: dictionary who's keys are broad options and values are lists of spiders
:return: list containing the spiders the user has selected to run
"""
print('Available spiders include:\n')
enumerated_keys = list(enumerate(spiders_dict.keys()))
for key_group in enumerated_keys:
print('{} - {}'.format(key_group[0], key_group[1]))
for spider in zip(alphabet_list_length(len(key_group[1])), spiders_dict[key_group[1]]):
print('\t{}{} - {}'.format(key_group[0], spider[0], spider[1].name))
print('{} - run all'.format(len(spiders_dict.keys())))
print('{} - back'.format(len(spiders_dict.keys())+1))
choices = input('\nfor multiple, comma separate. To remove, use "-" prefix\ni.e.: 0,-0a to run all of group 0 except the first\n').replace(' ', '').split(',')
if str(len(spiders_dict.keys())+1) in choices:
return False
if str(len(spiders_dict.keys())) in choices:
chosen_spiders = list(spiders_dict.values())
else:
chosen_spiders = []
for choice in choices:
if choice.isdigit():
if choice in [str(i[0]) for i in enumerated_keys]:
chosen_spiders.append(spiders_dict[enumerated_keys[int(choice)][1]])
else:
print('{} is not an option!'.format(choice))
elif '-' not in choice:
numeric = re.findall(r'\d+', choice)
if len(numeric) == 1:
alpha = choice.split(numeric[0])[1]
alpha = len(alphabet_list_length(0, index=alpha))-1
try:
chosen_spiders.append(spiders_dict[enumerated_keys[int(numeric[0])][1]][alpha])
except IndexError:
print('{} is not an option!'.format(choice))
else:
print('{} is not an option!'.format(choice))
if any(isinstance(el, list) for el in chosen_spiders):
chosen_spiders = flatten_list_of_lists(chosen_spiders, make_set=True)
else:
chosen_spiders = list(set(chosen_spiders))
to_remove = [choice for choice in choices if '-' in choice]
if len(to_remove) > 0:
for removee in to_remove:
if removee.replace('-', '').isdigit():
if removee.replace('-', '') in [str(i[0]) for i in enumerated_keys]:
for spider in spiders_dict[enumerated_keys[int(removee.replace('-', ''))][1]]:
chosen_spiders.remove(spider)
else:
print('{} is not an option!'.format(removee))
else:
numeric = re.findall(r'\d+', removee)
if len(numeric) == 1:
alpha = removee.split(numeric[0])[1]
alpha = len(alphabet_list_length(0, index=alpha)) - 1
try:
chosen_spiders.remove(spiders_dict[enumerated_keys[int(numeric[0])][1]][alpha])
except IndexError:
print('{} is not an option!'.format(removee))
else:
print('{} is not an option!'.format(removee))
if len(chosen_spiders) > 0:
return chosen_spiders
else:
print("You haven't selected any spiders!")
return False
def project_visibility_menu():
"""
creates menu to allow user to set which project groups are visible in the run_scrapers menu
:return: creates a txt file containing the list of desired project names, one per row.
"""
projects = [i.split('.')[0] for i in os.listdir('HousingPriceScraper/HousingPriceScraper/spiders/SpiderGroups')[:-1]]
print('Available projects are:\n')
for project in enumerate(projects):
print('\t{} - {}'.format(project[0], project[1]))
print('\t{} - back'.format(len(projects)))
choices = input('\nType the options you wish to select.\nFor multiple, comma separate\n\n').split(',')
if str(len(projects)) in choices:
return True
else:
choice_list = []
for choice in choices:
if choice.isdigit() and int(choice) in range(len(projects)):
choice_list.append(projects[int(choice)])
print('You have selected to display the following spider groupings:\n\t{}\n'.format(choice_list))
save_list_to_txt(choice_list, 'HousingPriceScraper/HousingPriceScraper/configs/visible_projects_to_scrape.txt')
return True
def set_config():
"""
menu to set the url configs.
:return: will set the start_urls of the spiders.
"""
available_configs = open('HousingPriceScraper/HousingPriceScraper/configs/input_url_config_descriptions.txt', 'r')
options = available_configs.readlines()
options_dict = {}
print('available configs include:\n')
for option in enumerate(options):
options_dict[option[0]] = option[1].split(':')[0]
print('\t{} - {}'.format(option[0], option[1].replace('\n', '')))
print('\t{} - back'.format(len(options)))
chosen = input('\ncomma separate for multiple\n').split(',')
if (str(len(options)) in chosen) or (chosen == ['']):
return True
configs = []
for choice in chosen:
if int(choice) in options_dict:
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/{}.json'.format(options_dict[int(choice)])) as f:
configs.append(json.load(f))
final_config = defaultdict(list)
for config in configs:
for key, value in config.items():
if key in final_config:
final_config[key] += value
else:
final_config[key] = value
for key, value in final_config.items():
if any(isinstance(val, list) for val in value):
final_config[key] = flatten_list_of_lists(value, make_set=True)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
for key, value in default_dict.items():
if key not in final_config.keys():
final_config[key] = value
with open('HousingPriceScraper/HousingPriceScraper/configs/chosen_urls.json', 'w') as fp:
json.dump(final_config, fp, sort_keys=True, indent=4)
return True
def append_recent_urls():
"""
function for appending recent scraped urls to default urls json
:return: default.json is updated
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key, value in recent_dict.items():
default_dict.setdefault(key, []).extend(value)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json', 'w') as fp:
json.dump(default_dict, fp, sort_keys=True, indent=4)
def replace_default_urls():
"""
function for replacing default urls config with recent scrapes
:return: defaults.json is updated
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:
default_dict = json.load(default_urls_json)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key, value in recent_dict.items():
default_dict[key] = value
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json', 'w') as fp:
json.dump(default_dict, fp, sort_keys=True, indent=4)
def create_new_config():
"""
function which creates a whole new config file to store recent scraped urls in
:return: new config is created
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
urls_dict = json.load(recent_urls_json)
config_name = input('Type a name for the new config file:\n').replace(' ', '_').replace(':', '')
config_desc = input('Type a brief description for the new config file:\n')
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/{}.json'.format(config_name), 'w') as fp:
json.dump(urls_dict, fp, sort_keys=True, indent=4)
with open('HousingPriceScraper/HousingPriceScraper/configs/input_url_config_descriptions.txt', 'a') as input_descs:
input_descs.write('\n{}: {}'.format(config_name, config_desc))
print('\nSuccessfully saved recently scraped urls to new config: {}.json'.format(config_name))
def clear_recent_urls():
"""
function which bleaches the recent urls config in order to start fresh next time
:return: recent_urls will become an empty dictionary.
"""
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:
recent_dict = json.load(recent_urls_json)
for key in recent_dict.keys():
recent_dict[key] = []
with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json', 'w') as fp:
json.dump(recent_dict, fp, sort_keys=True, indent=4)
def select_date_interval_menu():
"""
function allows user to inout start and end date to define an interval of dates
:return: list of dates
"""
while True:
start_date = input('\nInput desired start date with format dd-mm-yyyy:\n')
try:
start_date = datetime.strptime(start_date, '%d-%m-%Y')
break
except ValueError:
print('invalid start date selected')
while True:
end_date = input('\nInput desired start date with format dd-mm-yyyy,\nor hit enter to select todays date\n')
if end_date == '':
end_date = date.today()
break
else:
try:
end_date = datetime.strptime(end_date, '%d-%m-%Y')
break
except ValueError:
print('invalid end date selected')
list_of_dates = pd.date_range(start_date, end_date, freq='d')
list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]
return list_of_dates
|
7,226 | 73082ed2824ee65f7f4cbac47b9ebad19cec4196 | class State(object):
def __init__(self, stateName, stateLevel):
self.stateName = stateName;
self.stateLevel = stateLevel;
|
7,227 | d00873c3ee72b55cb5b74f78a98de61a25b3cc21 | __doc__
def fizz_buzz(num1, num2, end_range):
if not (
isinstance(num1, int) and isinstance(num2, int) and isinstance(end_range, int)
) or (num1 < 0 or num2 < 0 or end_range < 0):
return "Input should be a positive integer"
# I'm storing the result to test the returned value aka a list of outputs
result = []
for i in range(1, end_range):
output = i
if i % num1 == 0 and i % num2 == 0:
output = "FizzBuzz"
elif i % num1 == 0:
output = "Fizz"
elif i % num2 == 0:
output = "Buzz"
result.append(output)
print(output)
return result
def test_answer():
import sys
answer1 = None
answer2 = None
answer3 = None
try:
answer1 = fizz_buzz(3, 5, 16)
answer2 = fizz_buzz(2, 7, 20)
answer3 = fizz_buzz(100)
except:
print("An error occurred:", sys.exc_info()[1])
assert answer1 == [
1,
2,
"Fizz",
4,
"Buzz",
"Fizz",
7,
8,
"Fizz",
"Buzz",
11,
"Fizz",
13,
14,
"FizzBuzz",
]
assert answer2 == [
1,
"Fizz",
3,
"Fizz",
5,
"Fizz",
"Buzz",
"Fizz",
9,
"Fizz",
11,
"Fizz",
13,
"FizzBuzz",
15,
"Fizz",
17,
"Fizz",
19,
]
assert answer3 == None
|
7,228 | cd89c9eaea9d331288fd07f1968ef9dce89b4a4b | class Port(object):
def __init__(self,mac):
self.mac = mac
|
7,229 | 3073850890eb7a61fb5200c5ab87c802cafe50bb | def reorderAssetsByTypes(nodePath, colorNode=True, alignNode=True):
node = hou.pwd()
def getNaskCasting():
path = "E:/WIP/Work/casting-nask.csv"
file = open(path, "r")
fileText = file.readlines()
file.close()
fileText.pop(0)
assetDic = {}
for line in fileText:
assetType = line.split(",")
assetName = assetType[2]
assetType = assetType[1].split("/")[0]
assetDic[assetName] = assetType.lower()
return assetDic
assetList = getNaskCasting()
colorList = {"sets":(0, 0.4, 1), "chars":(0.4, 1, 0.4), "props":(0.6, 0.4, 1)}
assetTypeList = {"sets":[], "props":[], "chars":[]}
nodeChildren = hou.node(nodePath).children()
#colorize nodes by asset type
for child in list(nodeChildren):
if str(child) in assetList.keys():
type = assetList[str(child)]
if colorNode == True:
child.setColor(hou.Color(colorList[type]))
assetTypeList[type].append(child)
#reorder nodes layout by asset type
if alignNode == True:
u = 0
v = 0
for type in sorted(assetTypeList.keys()):
v = 0
for asset in sorted(assetTypeList[type]):
pos = hou.Vector2 (u,v)
asset.setPosition(pos)
v -= 1
u -= 3
reorderAssetsByTypes("/obj/geo1")
|
7,230 | e68588dff0e54fa03dbb1c629c39d8312a0df26d | input = open('in.txt')
output = open('out.py', 'w+')
def opstr(op):
if op == 'RSHIFT': return '>>'
if op == 'LSHIFT': return '<<'
if op == 'OR': return '|'
if op == 'AND': return '&'
if op == 'NOT': return '~'
raise RuntimeError('Unknown {0}'.format(op))
def funstr(fun):
return '{0}_fn'.format(fun)
def topython(line):
line.strip()
body, result = line.split('->')
fun_name = funstr(result.strip())
fun_body = '''
def {name}():
result = precalc.get(\'{name}\')
if result is None:
result = {calc}
precalc[\'{name}\'] = result
return result
'''
calc = str()
for part in body.strip().split(' '):
if part.isupper():
calc += opstr(part) + ' '
elif part.islower():
calc += funstr(part) + '() '
else:
calc += part + ' '
return fun_body.format(name=fun_name, calc=calc)
with open('in.txt') as input, open('out.py', 'w+') as output:
output.write('precalc = dict()')
for line in input:
output.write(topython(line) + '\n')
output.write('print(a_fn())\n')
import out
|
7,231 | 28c4c09b81d63785750cee36a8efd77760cac451 |
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
import tensorflow as tf
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from PIL import Image
from six.moves import range
train_folder = './data/train'
test_folder = './data/valid'
dimensions = (229, 229)
max_angle = 15
# rotating image
def rotate_img(image, angle, color, filter = Image.NEAREST):
if image.mode == "P" or filter == Image.NEAREST:
matte = Image.new("1", image.size, 1) # mask
else:
matte = Image.new("L", image.size, 255) # true matte
bg = Image.new(image.mode, image.size, color)
bg.paste(
image.rotate(angle, filter),
matte.rotate(angle, filter)
)
return bg
# make gray_scale image or 1channel image
def make_greyscale_white_bg(im, r, b, g):
im = im.convert('RGBA') # Convert to RGBA
data = np.array(im) # "data" is a height x width x 4 numpy array
red, green, blue, alpha = data.T # Temporarily unpack the bands for readability
# Replace grey with white... (leaves alpha values alone...)
grey_areas = (red == r) & (blue == b) & (green == g)
data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed
im2 = Image.fromarray(data)
im2 = im2.convert('L') # convert to greyscale image
#im2.show()
return im2
def process_images(folder):
classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))] # get list of all sub-folders in folder
img_cnt = 0
for class_x in classes:
if os.path.isdir(class_x):
# get paths to all the images in this folder
images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']
for image in images:
img_cnt = img_cnt + 1
if(img_cnt % 1000 == 0):
print("Processed %s images" % str(img_cnt))
im = Image.open(image)
im = im.resize(dimensions) # resize image according to dimensions set
im.save(image) # overwrite previous image file with new image
print("Finished processing images, images found = ")
print(img_cnt)
process_images(test_folder)
process_images(train_folder)
print('ok')
image_size = 229 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size, 3), dtype=np.float32)
print(dataset.shape)
num_images = 0
for image_index, image in enumerate(image_files):
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
print(image_data.shape)
if image_data.shape != (image_size, image_size, 3):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
folders_list = os.listdir(data_folders)
for folder in folders_list:
#print(os.path.join(data_folders, folder))
curr_folder_path = os.path.join(data_folders, folder)
if os.path.isdir(curr_folder_path):
set_filename = curr_folder_path + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# # You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(curr_folder_path, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folder, 89, True)
test_datasets = maybe_pickle(test_folder, 10, True)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size, 3), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
f.close()
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 89
valid_size = 10
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
# _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
# print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
# test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pickle_file = './bacteria.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
|
7,232 | d1864f454b1909196fd9a6e2279b23f4c4148917 | # MEDIUM
# TLE if decrement divisor only
# Bit manipulation.
# input: 100 / 3
# times = 0
# 3 << 0 = 3
# 3 << 1 = 6
# 3 << 2 = 12
# 3 << 3 = 24
# 3 << 4 = 48
# 3 << 5 = 96
# 3 << 6 = 192 => greater than dividend 100 => stop here
# times -=1 because 3 << 6 is too big
# result += 1 << times => divided by 32
# set dividend to dividend -= divisor << times
# times O(log N) Space O(1)
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == -2**31 and divisor == -1:
return 2**31-1
if dividend == 0:
return 0
sign = dividend>=0 and divisor>=0 or (dividend<0 and divisor<0)
left,right = abs(dividend),abs(divisor)
result = 0
while left>= right:
count = 0
while left >= right<< count:
count += 1
#print('count',count)
# count -1 because right * count > left
result += 1 << (count-1)
#print("result",result)
left -= right << (count-1)
#print("dividend",left)
return result if sign else -result
|
7,233 | 65c0d940bacc2d016121812c435cc60f3fc1ba90 | #!usr/bin/env python
#-*- coding:utf-8 -*-
# this model is for decision tree
# objective: To cluster different service
# JialongLi 2017/03/18
import re
import os
import sys
import pickle
import copy
import random
import pydotplus
USER_NUM = 1000
reload(sys)
sys.setdefaultencoding( "utf-8" )
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
# 0 represent Sunday, 1: Monday, 6: Saturday, 0: Sunday
day_index = {'0507': 1, '0508': 2, '0509': 3, '0510': 4, '0511': 5, '0512': 6, '0513': 0,
'0604': 1, '0605': 2, '0606': 3, '0607': 4, '0608': 5, '0609': 6, '0610': 0,
'0702': 1, '0703': 2, '0704': 3, '0705': 4, '0706': 5, '0707': 6, '0708': 0,
'0806': 1, '0807': 2, '0808': 3, '0809': 4, '0810': 5, '0811': 6, '0812': 0}
service_type = ['I', 'F', 'W', 'G', 'S', 'V']
# get activity_dict
# user's activity: default value is 'F'
# format: {id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
def get_activity_dict(activity_dict_path):
pkl_file = open(activity_dict_path, 'rb')
activity_dict = pickle.load(pkl_file)
pkl_file.close()
return activity_dict
# data are divided into train data and test data
# first three weeks: train data; last week: test data
# train_dict and test_dict are subset of activity_dict, id format is different
# activity_dict format: {real id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
# user_id_index: key = number, value = real id
def data_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path):
train_dict = {}
test_dict = {}
user_count = 0
user_id_index = {}
for key_0, value_0 in activity_dict.items(): # key_0: real user_id
train_dict[user_count] = {}
test_dict[user_count] = {}
user_id_index[user_count] = key_0
for key, value in value_0.items():
if key[1] == '8': # data of August, test set
test_dict[user_count][key] = value
else:
train_dict[user_count][key] = value # train set
user_count += 1
output_1 = open(train_dict_path, 'wb')
pickle.dump(train_dict, output_1)
output_2 = open(test_dict_path, 'wb')
pickle.dump(test_dict, output_2)
output_3 = open(user_id_index_path, 'wb')
pickle.dump(user_id_index, output_3)
output_1.close()
output_2.close()
output_3.close()
# get train data and test data
# train_dict, test_dict format: {number id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
def get_data(train_dict_path, test_dict_path, user_id_index_path):
pkl_file_1 = open(train_dict_path, 'rb')
pkl_file_2 = open(test_dict_path, 'rb')
pkl_file_3 = open(user_id_index_path, 'rb')
train_dict = pickle.load(pkl_file_1)
test_dict = pickle.load(pkl_file_2)
user_id_index = pickle.load(pkl_file_3)
pkl_file_1.close()
pkl_file_2.close()
pkl_file_3.close()
return train_dict, test_dict, user_id_index
# get profile
def get_profile(profile_path):
pkl_file = open(profile_path, 'rb')
profile = pickle.load(pkl_file)
return profile
# select different features
# feature format: [user_id, gender, age, edu, job, hour, date], 7 features
# profile: dict, {real user_id: [gender, age, edu, job]}
# feature format: double list, outer list element is a sample: [number user_id, gender, age, edu, job, hour, date]
# category format: list, element is service type, length = feature
def feature_select(data_dict, profile, user_id_index, is_over_sampling):
feature = []
category = []
over_sampling_num = 0
for user_id, all_dates in data_dict.items():
real_user_id = user_id_index[user_id]
one_user_profile = copy.deepcopy(profile[real_user_id]) # gender, age, edu, job
one_user_profile.insert(0, user_id) # insert user_id
for date, activity in all_dates.items():
for i in range(len(activity)):
if 1: #activity[i] != 'F': # do not add 'F'
sample = copy.deepcopy(one_user_profile)
#del(sample[1:4])
sample.append(i) #(int(i/6)) # i represents hour
sample.append(day_index[date]) # day_index: 7 days in one week
feature.append(sample)
#category.append(activity[i])
if activity[i] == 'F':
category.append('F')
else:
category.append('O')
if is_over_sampling and len(sample) > 5: # make sure that features are completed
if activity[i] != 'F':
sample_over = [[] for k in range(over_sampling_num)]
for j in range(over_sampling_num):
sample_over[j] = copy.deepcopy(sample)
sample_over[j][-3] = random.randint(0, 8) # random disturbance in job feature
feature.append(sample_over[j])
category.append('O')
return feature, category
# build features, all features
# False means test data do not need over sampling
def feature_build(train_dict, test_dict, profile, user_id_index):
feature_train, category_train = feature_select(train_dict, profile, user_id_index, True)
feature_test, category_test = feature_select(test_dict, profile, user_id_index, False)
return feature_train, feature_test, category_train, category_test
# calculating the hit rate
def cal_hit_rate(category_predict, category_test):
hit_count = 0
sample_test_count = len(category_predict)
for i in range(sample_test_count):
if category_predict[i] == category_test[i]:
hit_count += 1
hit_rate = float(hit_count) / float(sample_test_count)
print 'hit rate: ' + str(round(hit_rate, 4) * 100) + '%'
# calculating F value
def calculating_F_value(category_predict, category_test):
n_predict = 0
n_origin = 0
hit_count = 0
for item in category_predict:
if item != 'F':
n_predict += 1
for item in category_test:
if item != 'F':
n_origin += 1
for i in range(len(category_predict)):
if category_predict[i] != 'F' and category_predict[i] == category_test[i]:
hit_count += 1
precision = float(hit_count) / float(n_predict)
recall = float(hit_count) / float(n_origin)
F_value = 2 * precision * recall / (precision + recall)
print 'n_predict: ' + str(n_predict)
print 'n_origin: ' + str(n_origin)
print 'precision: ' + str(round(precision, 3))
print 'recall: ' + str(round(recall, 3))
print 'F_value: ' + str(round(F_value, 3))
# 1. select the service type using most in that period in past days
# 2. if user did not use service in that period before, select the service type using most in past days
# 3. if user did not use service before, select service randomly
# service_count_hour: key = (user_id, hour, service_type) value = count
# service_count_past: key = (user_id, service_type) value = count
# service_hour: key = (user_id, hour), value = [service_type, count]
# service_past: key = user_id, value = [service_type, count]
def conventional_method_Mused(feature_train, feature_test, category_train):
if len(feature_train[0]) != 7:
print 'feature wrong'
service_count_hour = {}
service_count_past = {}
for i in range(len(feature_train)):
key_hour = (feature_train[i][0], feature_train[i][5], category_train[i])
if key_hour not in service_count_hour:
service_count_hour[key_hour] = 1
else:
service_count_hour[key_hour] += 1
key_past = (feature_train[i][0], category_train[i])
if key_past not in service_count_past:
service_count_past[key_past] = 1
else:
service_count_past[key_past] += 1
service_hour = {}
service_past = {}
for key, value in service_count_hour.items():
key_hour = (key[0], key[1])
if key_hour not in service_hour:
service_hour[key_hour] = [key[2], value]
else:
if value > service_hour[key_hour][1]:
service_hour[key_hour] = [key[2], value]
else:
pass
for key, value in service_count_past.items():
key_past = key[0]
if key_past not in service_past:
service_past[key_past] = [key[1], value]
else:
if value > service_past[key_past][1]:
service_past[key_past] = [key[1], value]
else:
pass
category_predict = []
for i in range(len(feature_test)):
key_0 = (feature_test[i][0], feature_test[i][5])
key_1 = feature_test[i][0]
if key_0 in service_hour:
value_0 = service_hour[key_0]
category_predict.append(value_0[0])
elif key_1 in service_past:
value_1 = service_past[key_1]
category_predict.append(value_1[0])
else:
random_num = random.randint(0, len(service_type)-1)
category_predict.append(service_type[random_num])
return category_predict
# method 2: service in last week
def conventional_method_Lweek(feature_train, feature_test, category_train):
if len(feature_train[0]) != 7:
print 'feature wrong'
category_predict = ['FFF' for i in range(len(feature_test))]
for i in range(len(feature_train)):
sample = feature_train[i]
user_id = sample[0]
hour = sample[-2]
date = sample[-1]
if date == 0: # 0 means it is Sunday and should be the last
date = 7
else:
pass
service_position = user_id * 168 + (date - 1) * 24 + hour
category_predict[service_position] = category_train[i]
return category_predict
# decision tree
def decision_tree(feature_train, feature_test, category_train):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(feature_train, category_train)
category_predict = clf.predict(feature_test) # the format of category_predict is weird
category_Dtree = []
for item in category_predict:
if item == 'F':
category_Dtree.append('F')
else:
category_Dtree.append('O')
return category_Dtree
# random forests
def random_forests(feature_train, feature_test, category_train):
clf = RandomForestClassifier(n_estimators = 80)
clf = clf.fit(feature_train, category_train)
category_predict = clf.predict(feature_test)
category_RF = []
for item in category_predict:
if item == 'F':
category_RF.append('F')
else:
category_RF.append('O')
return category_RF
# save user_activity as pkl file for migration.py
def user_activity_save(user_activity, user_activity_path):
output = open(user_activity_path, 'wb')
pickle.dump(user_activity, output)
output.close()
# user_activity is for migration.py
# key = user_id, range(1000), value = ['F', 'G'...], length is 7 * 24 = 168
def activity_restore(feature, category):
if len(feature[0]) != 7:
print 'feature wrong'
user_activity = {}
for i in range(USER_NUM):
user_activity[i] = ['FFF' for j in range(168)]
for i in range(len(feature)):
sample = feature[i]
user_id = sample[0]
hour = sample[5]
date = sample[-1]
if date == 0: # 0 means it is Sunday and should be the last
date = 7
else:
pass
position = (date - 1) * 24 + hour
user_activity[user_id][position] = category[i]
return user_activity
def counting_accuate_rate(category_Dtree, category_test):
on_on = 0
on_off = 0
off_on = 0
off_off = 0
print len(category_test)
print len(category_Dtree)
for i in range(21504): #(len(category_Dtree)):
if category_Dtree[i] == 'O' and category_test[i] == 'O':
on_on += 1
elif category_Dtree[i] == 'O' and category_test[i] == 'F':
on_off += 1
elif category_Dtree[i] == 'F' and category_test[i] == 'O':
off_on += 1
else:
off_off += 1
print 'on_on' + '\t' + str(on_on)
print 'on_off' + '\t' + str(on_off)
print 'off_on' + '\t' + str(off_on)
print 'off_off' + '\t' + str(off_off)
# save file for sleep.py
def save_file_for_sleep(category_predict, category_test):
category_predict_path = '../data/category_predict_Dtree.pkl'
category_test_path = '../data/category_test.pkl'
output_1 = open(category_predict_path, 'wb')
pickle.dump(category_predict, output_1)
output_2 = open(category_test_path, 'wb')
pickle.dump(category_test, output_2)
output_1.close()
output_2.close()
if __name__ == '__main__':
'''
activity_dict_path = '../data/activity_dict.pkl'
activity_dict = get_activity_dict(activity_dict_path)
train_dict_path = '../data/train_dict.pkl'
test_dict_path = '../data/test_dict.pkl'
user_id_index_path = '../data/user_id_index.pkl'
data_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path)
'''
train_dict_path = '../data/train_dict.pkl'
test_dict_path = '../data/test_dict.pkl'
user_id_index_path = '../data/user_id_index.pkl'
train_dict, test_dict, user_id_index = get_data(train_dict_path, test_dict_path, user_id_index_path)
profile_path = '../data/profile.pkl'
profile = get_profile(profile_path)
feature_train, feature_test, category_train, category_test = feature_build(train_dict, test_dict, profile, user_id_index)
print 'feature_train sample: ' + str(feature_train[1000])
print 'feature_test sample: ' + str(feature_test[1000])
# decision tree
category_Dtree = decision_tree(feature_train, feature_test, category_train)
# random_forests
#category_RF = random_forests(feature_train, feature_test, category_train)
# conventional method: most-used service
#category_Mused = conventional_method_Mused(feature_train, feature_test, category_train)
# conventional method: last-week service
#category_Lweek = conventional_method_Lweek(feature_train, feature_test, category_train)
#cal_hit_rate(category_Dtree, category_test)
#calculating_F_value(category_Dtree, category_test)
#counting_accuate_rate(category_Dtree, category_test)
#save_file_for_sleep(category_Dtree, category_test)
# this part is for migration.py
'''
# origin data, user_activity_origin is users' real behavior
user_activity_origin_path = '../data/user_activity_test/user_activity_origin.pkl'
user_activity_origin = activity_restore(feature_test, category_test)
user_activity_save(user_activity_origin, user_activity_origin_path)
'''
'''
# predition data using decision_tree
user_activity_Dtree_path = '../data/user_activity_test/user_activity_Dtree.pkl'
user_activity_Dtree = activity_restore(feature_test, category_Dtree)
user_activity_save(user_activity_Dtree, user_activity_Dtree_path)
'''
'''
# predition data according to users' most-used service
user_activity_Mused_path = '../data/user_activity_test/user_activity_Mused.pkl'
user_activity_Mused = activity_restore(feature_test, category_Mused)
user_activity_save(user_activity_Mused, user_activity_Mused_path)
'''
'''
# predition data according to users' last-week service
user_activity_Lweek_path = '../data/user_activity_test/user_activity_Lweek.pkl'
user_activity_Lweek = activity_restore(feature_test, category_Lweek)
user_activity_save(user_activity_Lweek, user_activity_Lweek_path)
''' |
7,234 | 5dc201f743705d6a57dfb61ec2cc2a827db0ba25 | # -*- coding:utf-8 -*-
# Classe com os dados de um cliente que entra no sistema simulado.
class Client:
def __init__(self, id, color):
# Identificador do cliente, usada para o teste de correção.
self.id = id
# Tempo de chegada ao servidor (fila 1 e fila 2)
self.arrival = {}
# Tempo de saída do servidor (fila 1 e fila 2)
self.leave = {}
# Tempo no servidor (fila 1 e fila 2)
self.server = {}
# Indicador que diz qual fila o cliente está no momento
self.queue = 0
# Indicador que diz se o cliente já foi servido e saiu do sistema
self.served = 0
# Cor do cliente (TRANSIENT e EQUILIBRIUM)
self.color = color
def set_arrival(self, arrival):
self.arrival[self.queue] = arrival
def set_leave(self, leave):
self.leave[self.queue] = leave
def set_server(self, server):
self.server[self.queue] = server
def set_queue(self, queue):
self.queue = queue
def set_served(self, served):
self.served = served
# Tempo de espera na fila = Tempo de saída da fila para o servidor - Tempo de chegada na fila.
def wait(self, queue):
return (self.leave[queue] - self.arrival[queue])
|
7,235 | c5e003d625d7798eaf4ef5bca28f6311edccb316 | #!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main()
|
7,236 | df3208a00f7a5dd1ddd76542ac0de85762cc45ab | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
try:
import Image
except ImportError:
from PIL import Image
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))
from file_tools import get_file_paths_from_directory
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/image-rotate
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
# Check arguments.
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir, degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
|
7,237 | 14345a8c4e20d84dfc87476d890f59530a8f4d96 | class Book:
"""Class that defines book model."""
def __init__(self, title, authors, pub_year):
self.title = title
self.authors = authors
self.pub_year = pub_year
|
7,238 | 15eb205e6bd36844fdfc8c05efbc3a3d584c122d | import os , sys , time
print("""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
""")
pla = sys.platform
if pla == "win32":
win = "Windows"
print(" [!] Your Platform is " +win+ "\n")
elif pla == "darwin":
mac = "MacOs"
print(" [+] Your Platform is " +mac+ "\n")
elif pla == "linux":
mac = "Linux"
print(" [+] Your Platform is " +mac+"\n")
if pla == "win32":
print(" [!] Not Suitable For Tool Windows \n")
time.sleep(3)
exit(" [#] https://www.github/arda6")
print("")
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input("root@eyll:~# ")
if soru == '1':
os.system("python3 main.py")
exit()
elif soru == '2':
os.system("python3 wpa2.py")
elif soru == '3':
os.system("python3 attack.py")
|
7,239 | 7d65e4e925e90d6b013ae2c059cde58538884d22 |
two_digit_number=input("Type a two digit number: ")
first_digit=two_digit_number[0]
second_digit=two_digit_number[1]
print(int(first_digit)+int(second_digit)) |
7,240 | 07095bc815f5342b66ef4ca74b769321f3ef2ec5 | '''
Calculations used by algorithms
All calculations for training shall have a standard API that takes in `batch` from algorithm.sample() method and return np array for calculation.
`batch` is a dict containing keys to any data type you wish, e.g. {rewards: np.array([...])}
'''
from slm_lab.lib import logger, util
import numpy as np
import torch
import pydash as ps
logger = logger.get_logger(__name__)
# Policy Gradient calc
# advantage functions
def calc_returns(batch, gamma):
'''
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
'''
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
# handle epi-end, to not sum past current episode
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
def calc_gammas(batch, gamma):
'''Calculate the gammas to the right power for multiplication with rewards'''
news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])
gammas = torch.empty_like(news)
cur_gamma = 1.0
for t, new in enumerate(news):
cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma
gammas[t] = cur_gamma
return gammas
def calc_nstep_returns(batch, gamma, n, v_preds):
'''
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
'''
rets = calc_returns(batch, gamma)
rets_len = len(rets)
# to subtract by offsetting n-steps
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
# to add back the subtracted with v_pred at n
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
'''
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
'''
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
|
7,241 | fe83b45bdc5970d63deab66b26b16752cd8ad8ef | from zope import schema
from zope import interface
from zope import component
from raptus.mailcone.rules_regex import _
from raptus.mailcone.rules import interfaces
class IRegexItem(interfaces.IConditionItem):
""" Interface for regex match filter
"""
regex = schema.TextLine(title=_('Regex'),
required=True,
description=_('a regular expression'))
source = schema.Choice(title=_('Source'),
vocabulary='raptus.mailcone.mails.mailattributes',
required=True) |
7,242 | 742b655ee6aad2575f67e7329ed7a14c4fb6aa06 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
PATH = "C:\\Program Files (x86)\\chromedriver.exe"
destination = "https://news.ycombinator.com/"
class hackernewsUpvoter():
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page="https://news.ycombinator.com/login"):
# Go to hackernews's website
self.driver.get(login_page)
time.sleep(2)
# Enter username
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
# Enter password
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11,35)/10)
# Click enter key
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name("votearrow")
# Click every upvote buttons in the page
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get("https://news.ycombinator.com/news?p={}".format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name("morelink")
more[0].click()
bot = hackernewsUpvoter(input(), input(), destination)
bot.sign_in()
for i in range(3,5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300,500)/100)
|
7,243 | 85b8ffe1bca879acd86251e4662b33648b713588 | from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
class UserManager(BaseUserManager):
#Necesar pentru a scoate username de la required
def create_user(self, email, password, **kwargs):
user = self.model(email=email, **kwargs)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **kwargs):
user = self.model(email=email, is_staff=True, is_superuser=True, **kwargs)
user.set_password(password)
user.save()
return user
class Utilizator(AbstractUser):
""" Tabel info utilizator
nume - extras automat din email ([nume]@gmail.com)
email - se va loga cu emailul
parola - ***
descriere - informatiile despre utilizator scrise de acesta pentru celilati potential colegi de apartament
ocupatie - tipul jobului
sex - mf
varsta -
buget -
imagine_profil - imagine profil
cont_admin - are access la backend, administratorul poate gestiona utilizatorii si anunturile
cont_premium: regular: cont gratis poate avea activ doar un anunt,
premium: cont platit poate avea activ unul sau mai multe anunturi,
poate vedea statistici cu privire la anunturile postate
primeste prin email atunci cand un anunt a fost postat
Un utilizator poate avea unul sau mai multe anunturi postate si/sau unul sau mai multe anunturi salvate la favorite
"""
email = models.EmailField(unique=True)
descriere = models.CharField(max_length=255, blank=True)
ocupatie = models.CharField(max_length=50, blank=True, default="nespecificat")
nume = models.CharField(max_length=50, blank=True)
sex = models.CharField(max_length=1, blank=True, default="N")
varsta = models.PositiveIntegerField(blank=True, null=True)
buget = models.PositiveIntegerField(blank=False, null=True)
telefon = models.CharField(max_length=20, blank=True, default="nespecificat")
imagine_profil = models.ImageField(blank=True, upload_to="utilizatori/", default="utilizatori/imagine_profil.svg")
cont_premium = models.BooleanField(default=False)
token = models.CharField(max_length=1, blank=True)
#Scoatem field/coloanele
first_name = None
last_name = None
#Necesare pentru a inlocui username cu email
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return f"{self.email}"
class Meta:
verbose_name_plural = "Utilizatori"
|
7,244 | 002f65fd77ce5043d1a0495ed13c15e3b4d2fb76 | #!/usr/bin/env python3
import asyncio
import bs4
import itertools
import logging
import sys
import os
import zipfile
from asyncio import TimeoutError
from aiohttp import ClientSession, ClientConnectionError
from aiohttp.client_exceptions import ContentTypeError, ServerDisconnectedError
from bs4 import BeautifulSoup
ROOT_URL = 'https://ulrichsweb.serialssolutions.com/titleDetails/{}'
DEFAULT_START_ID = 12515
DEFAULT_END_ID = 835018
DEFAULT_RANGE_1 = range(DEFAULT_START_ID, DEFAULT_END_ID)
DEFAULT_RANGE_2 = range(15793473, 15798807)
DEFAULT_RANGE_IDS = itertools.chain(DEFAULT_RANGE_1, DEFAULT_RANGE_2)
DEFAULT_DIR_HTML = 'data/ulrich/html/'
DEFAULT_MAX_ATTEMPTS = 5
DEFAULT_MODE = 'collect'
DEFAULT_NUM_THREADS = 4
DEFAULT_SEMAPHORE_LIMIT = 2
DEFAULT_ATTRS = {'bd_Title', 'bd_ISSN', 'bd_Format', 'bd_Frequency', 'bd_Country'}
def _find_all_tr_pairs(key: str, title_details, profile_id):
try:
return title_details.find('div', {'id': key}).find('table', {'class': 'resultsTable'}).find_all('tr')
except AttributeError:
logging.warning('ID %s (KEY) %s doest not have resultsTable' % (profile_id, key))
def _split_journal_attrs(attrs):
if attrs:
return [t.text.replace(':', '').strip().split('\n') for t in
[k for k in attrs if isinstance(k, bs4.element.Tag)]]
return []
def _get_title_history(history_attrs):
all_td = []
if history_attrs:
for h in history_attrs:
all_td.extend(h.find_all('td'))
if len(all_td) > 0:
return '#'.join([''.join([a.strip() for a in k.text.split('\n')]) for k in all_td if isinstance(k, bs4.element.Tag)])
return ''
def _get_pair_key_values(splitted_attrs, prefix: str):
tmp_dict = {}
for j in splitted_attrs:
tmp_dict[prefix + j[0].replace('\t', ' ')] = '#'.join(
[k.strip().replace('\t', ' ').replace('#', ' ') for k in j[1:] if k.strip() != ''])
return tmp_dict
def html2dict(path_zip_file: str):
"""
Open, reads and converts a zipped html into a dict.
:param path_zip_file: path of the zip file
:return: a dict where each key is the profile id and the value is its key-value pairs (attrs)
"""
profile_id = path_zip_file.split('/')[-1].split('.')[0]
inner_html_path = 'data/ulrich/html/' + profile_id + '.html'
html_content = zipfile.ZipFile(path_zip_file).open(inner_html_path).read()
parsed_data = [profile_id]
soupped_html = BeautifulSoup(html_content, 'html.parser')
title_details = soupped_html.find('div', {'id': 'resultPane'})
basic_description_attrs = _find_all_tr_pairs('basicDescriptionContainer', title_details, profile_id)
title_history_attrs = _find_all_tr_pairs('titleHistoryContainer', title_details, profile_id)
bd_splitted = _split_journal_attrs(basic_description_attrs)
dict_bd = _get_pair_key_values(bd_splitted, 'bd_')
title_history = _get_title_history(title_history_attrs)
for k in sorted(DEFAULT_ATTRS):
parsed_data.append(dict_bd.get(k, ''))
parsed_data.append(title_history)
return parsed_data
def save_tsv_file(parsed_data):
"""
Save a parsed journal to a tsv file
:param parsed_data: a list of dictionaries where the only main key is a profile_id and its value is the pairs of journal's attributes
"""
result_file.write('\t'.join(parsed_data) + '\n')
def save_into_html_file(path_html_file: str, response):
"""
Receives a response (in text format).
Saves the document into a html file.
"""
html_file = open(path_html_file, 'w')
html_file.writelines(response)
html_file.close()
with zipfile.ZipFile(path_html_file.replace('.html', '.zip'), 'w') as zf:
zf.write(path_html_file, compress_type=zipfile.ZIP_DEFLATED)
zf.close()
os.remove(path_html_file)
async def fetch(url, session):
"""
Fetches the url.
Calls the method save_into_html_file with the response as a parameter (in text format).
"""
try:
async with session.get(url) as response:
profile_id = url.split('/')[-1]
print('COLLECTING %s' % profile_id)
for attempt in range(DEFAULT_MAX_ATTEMPTS):
try:
if response.status == 200:
response = await response.text(errors='ignore')
save_into_html_file(DEFAULT_DIR_HTML + profile_id + '.html', response)
logging.info('COLLECTED: %s' % profile_id)
break
elif response.status == 500 and attempt == DEFAULT_MAX_ATTEMPTS:
logging.info('RESPONSE_ERROR_500: %s' % profile_id)
elif response.status == 404:
logging.info('RESPONSE_ERROR_404: %s' % profile_id)
except ServerDisconnectedError:
logging.info('SERVER_DISCONNECTED_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('TIMEOUT_ERROR: %s' % profile_id)
except ContentTypeError:
logging.info('CONTENT_TYPE_ERROR: %s' % profile_id)
except TimeoutError:
logging.info('GENERALIZED_TIMEOUT_ERROR')
except ClientConnectionError:
logging.info('GENERALIZED_CLIENT_CONNECTION_ERROR')
except ServerDisconnectedError:
logging.info('GENERALIZED_SERVER_DISCONNECTED_ERROR')
except ContentTypeError:
logging.info('GENERALIZED_CONTENT_TYPE_ERROR')
async def bound_fetch(sem, url, session):
"""
Limits the collecting task to a semaphore.
"""
async with sem:
await fetch(url, session)
async def run():
"""
Creates tasks to get the html file with respect to a list composed by htmls.
"""
sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)
tasks = []
async with ClientSession() as session:
for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:
task = asyncio.ensure_future(bound_fetch(sem, u, session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
if __name__ == "__main__":
logging.basicConfig(filename='ulrich.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
MODE = sys.argv[1]
DIR_HTML = sys.argv[2]
if MODE == 'collect':
DEFAULT_DIR_HTML = DIR_HTML
os.makedirs(DEFAULT_DIR_HTML, exist_ok=True)
if len(sys.argv) == 4:
start_id = int(sys.argv[3])
DEFAULT_RANGE_IDS = itertools.chain(range(start_id, DEFAULT_END_ID), DEFAULT_RANGE_2)
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
elif MODE == 'parse':
DEFAULT_DIR_HTML = DIR_HTML
START = int(sys.argv[3])
END = int(sys.argv[4])
if END > len(os.listdir(DEFAULT_DIR_HTML)):
END = len(os.listdir(DEFAULT_DIR_HTML))
htmls = sorted([DEFAULT_DIR_HTML + h for h in os.listdir(DIR_HTML)])[START:END]
result_file = open(DEFAULT_DIR_HTML + '../' + str(START) + '.tsv', 'w')
result_file.write('\t'.join(['Profile Identifier'] + sorted(DEFAULT_ATTRS) + ['title_history']) + '\n')
for i, h in enumerate(sorted(htmls)):
print('\r%d / %d' % (i + 1 + START, START + len(htmls)), end='')
parsed = html2dict(h)
save_tsv_file(parsed)
result_file.close()
|
7,245 | eabc81cacacc40d55234b60927b17069980a08f8 | #!/usr/bin/env python
# https://github.com/git/git/blob/master/Documentation/githooks.txt#L181
# This hook is called by 'git push' and can be used to prevent a push from taking
# place. The hook is called with two parameters which provide the name and
# location of the destination remote, if a named remote is not being used both
# values will be the same.
# Information about what is to be pushed is provided on the hook's standard
# input with lines of the form:
# <local ref> SP <local sha1> SP <remote ref> SP <remote sha1> LF
# For instance, if the command +git push origin master:foreign+ were run the
# hook would receive a line like the following:
# refs/heads/master 67890 refs/heads/foreign 12345
# although the full, 40-character SHA-1s would be supplied. If the foreign ref
# does not yet exist the `<remote SHA-1>` will be 40 `0`. If a ref is to be
# deleted, the `<local ref>` will be supplied as `(delete)` and the `<local
# SHA-1>` will be 40 `0`. If the local commit was specified by something other
# than a name which could be expanded (such as `HEAD~`, or a SHA-1) it will be
# supplied as it was originally given.
# If this hook exits with a non-zero status, 'git push' will abort without
# pushing anything. Information about why the push is rejected may be sent
# to the user by writing to standard error.
import gitta_hook
import sys
sys.argv.extend(sys.stdin.read().split()) # add stdin arguments
remote_dest_uri = sys.argv[1] # generally, uri is the name of the remote
# but may instead be the url
remote_dest_url = sys.argv[2] # remote destination location
kwargs = {'remote_dest_uri': remote_dest_uri,
'remote_dest_url': remote_dest_url}
if len(sys.argv) > 3: # this can fail if pre-push is going to fail anyways?
local_ref = sys.argv[3] # there was no 4th argument in sys.argv. Had to
# add by reading from stdin
local_sha1 = sys.argv[4]
remote_ref = sys.argv[5]
remote_sha1 = sys.argv[6]
kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,
'remote_ref': remote_ref, 'remote_sha1': remote_sha1})
gitta_hook.trigger(*sys.argv, **kwargs)
|
7,246 | 7901a2bd4ae1070c8263d3cd97351b01ffbf7bb1 | from .facebook import *
|
7,247 | f66f79cd4132b23c082149a3a1d887f661fd7ee5 | from fgpio import GPIO
import boards
|
7,248 | 52e43f795c864340734de2640e3c1a70b05e8ea0 | # -*- coding:utf-8 -*-
import json
from datetime import datetime
from math import ceil, floor
from os.path import abspath, join, pardir
from struct import pack
from .global_settings import (
DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
)
# # # keep in mind: the faster numba optimized helper fct. cannot be used here,
# # # because numpy classes are not being used at this stage yet!
from .helpers import coord2int, inside_polygon, int2coord
# from helpers import coord2int, inside_polygon, int2coord
# from global_settings import (
# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
# )
# import sys
# from os.path import dirname
#
# sys.path.insert(0, dirname(__file__))
# from helpers import coord2int, int2coord, inside_polygon
"""
TODO write tests
USE INSTRUCTIONS:
- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases
- unzip and place the combined.json inside this timezonefinder folder
- run this file_converter.py as a script until the compilation of the binary files is completed.
IMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster
and it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)
B = unsigned char (1byte = 8bit Integer)
H = unsigned short (2 byte integer)
I = unsigned 4byte integer
i = signed 4byte integer
Binaries being written:
[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)
poly_zone_ids: the related zone_id for every polygon ('<H')
poly_coord_amount: the amount of coordinates in every polygon ('<I')
poly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')
poly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)
poly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)
poly_nr2zone_id: the polygon number of the first polygon from every zone('<H')
[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)
hole_poly_ids: the related polygon_nr (=id) for every hole ('<H')
hole_coord_amount: the amount of coordinates in every hole ('<H')
hole_adr2data: address in hole_data.bin where data for every hole starts ('<I')
hole_data: coordinates for every hole (multiple times '<i')
[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.
-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts
shortcut here means storing for every cell in a grid of the world map which polygons are located in that cell
they can therefore be used to drastically reduce the amount of polygons which need to be checked in order to
decide which timezone a point is located in.
the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
this is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)
shortcuts_entry_amount: the amount of polygons for every shortcut ('<H')
shortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')
shortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')
shortcuts_unique_id: the zone id if only polygons from one zone are present,
a high number (with no corresponding zone) if not ('<H').
the majority of zones either have no polygons at all (sea) or just one zone.
this zone then can be instantly returned without actually testing polygons.
also stored extra binary if only one zone (to directly return that zone without checking)
statistics: (data version 2018g)
maximal amount of coordinates in one polygon: 139130
amount_of_holes: 219
amount of polygons: 1177
shortcut statistics:
highest entry amount is 46
frequencies of entry amounts (from 0 to max entries):
[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
relative accumulated frequencies [%]:
[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
58.92 % of all shortcuts are empty
highest amount of different zones in one shortcut is 7
frequencies of entry amounts (from 0 to max):
[76359, 45555, 6963, 672, 43, 6, 1, 1]
relative accumulated frequencies [%]:
[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]
[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]
--------------------------------
The number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)
The number of polygons is: 1177
The number of floats in all the polygons is (2 per point): 10887056
writing file " poly_nr2zone_id.bin "
Done
writing file " poly_zone_ids.bin "
writing file " poly_max_values.bin "
writing file " poly_data.bin "
writing file " poly_adr2data.bin "
writing file " poly_coord_amount.bin "
writing file " shortcuts_entry_amount.bin "
writing file " shortcuts_adr2data.bin "
writing file " shortcuts_data.bin "
writing file " shortcuts_unique_id.bin "
writing file " hole_poly_ids.bin "
writing file " hole_coord_amount.bin "
writing file " hole_adr2data.bin "
writing file " hole_data.bin "
the polygon data makes up 97.11 % of the data
the shortcuts make up 2.01 % of the data
holes make up 0.88 % of the data
"""
nr_of_lines = -1
all_tz_names = []
poly_zone_ids = []
all_boundaries = []
all_coords = []
all_lengths = []
amount_of_holes = 0
polynrs_of_holes = []
all_holes = []
all_hole_lengths = []
list_of_pointers = []
poly_nr2zone_id = []
shortcuts = {}
def x_shortcut(lng):
# higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)
# if lng < -180 or lng >= 180:
# raise ValueError('longitude out of bounds', lng)
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
# lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)
# if lat < -90 or lat >= 90:
# raise ValueError('this latitude is out of bounds', lat)
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
# returns True if a zone with those boundaries could have more than 4 shortcuts
return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT
def percent(numerator, denominator):
return round((numerator / denominator) * 100, 2)
def accumulated_frequency(int_list):
out = []
total = sum(int_list)
acc = 0
for e in int_list:
acc += e
out.append(percent(acc, total))
return out
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
def _polygons(id_list):
for i in id_list:
yield all_coords[i]
def not_empty(iterable):
for i in iterable:
return True
return False
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
# this counter just counts polygons, not holes!
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get("tzid")
# print(tz_name)
all_tz_names.append(tz_name)
geometry = tz_dict.get("geometry")
if geometry.get('type') == 'MultiPolygon':
# depth is 4
multipolygon = geometry.get("coordinates")
else:
# depth is 3 (only one polygon, possibly with holes!)
multipolygon = [geometry.get("coordinates")]
# multipolygon has depth 4
# assert depth_of_array(multipolygon) == 4
for poly_with_hole in multipolygon:
# assert len(poly_with_hole) > 0
# the first entry is polygon
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
# assert len(x_coords) > 0
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
# everything else is interpreted as a hole!
for hole in poly_with_hole:
print(polygon_counter, tz_name)
# keep track of how many holes there are
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
# 34621 in tz_world 2016d (small enough for int16)
# 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)
raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',
max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
# 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)
raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',
max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
# 24k in tz_world 2016d
# 1022 in evansiroky/timezone-boundary-builder 2017a
raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',
nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
# 420 different zones in evansiroky/timezone-boundary-builder 2017a
# used in shortcuts_unique_id and poly_zone_ids
raise ValueError('zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
# pickle the zone names (python array)
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
# show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return ((y - y1) * (x2 - x1) / delta_y) + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return ((x - x1) * (y2 - y1) / delta_x) + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
# print('Y1<=y')
if y_coords[iplus1] > y:
# this was a crossing. compute the intersect
# print('Y2>y')
intersects.append(
compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
# print('Y1>y')
if y_coords[iplus1] <= y:
# this was a crossing. compute the intersect
# print('Y2<=y')
intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
# this was a crossing. compute the intersect
intersects.append(
compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
if x_coords[iplus1] <= x:
# this was a crossing. compute the intersect
intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
# x_longs = binary_reader.x_coords_of(line)
x_longs, y_longs = ints_of(line)
# y_longs = binary_reader.y_coords_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
# print('checking the latitudes')
for lat in latitudes_to_check(ymax, ymin):
# print(lat)
# print(coordinate_to_longlong(lat))
# print(y_longs)
# print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))
# raise ValueError
intersects = sorted([int2coord(x) for x in
x_intersections(coord2int(lat), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut zone here!
# only select the top shortcut if it is actually inside the polygon (point a little up is inside)
if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))
# the bottom shortcut is always selected
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))
else:
# add all the shortcuts for the whole found area of intersection
possible_y_shortcut = y_shortcut(lat)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1, x_longs,
y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# the shortcut above and below of the intersection should be selected!
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))
else:
# polygon does not cross the border!
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# only the shortcut above of the intersection should be selected!
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
# print('now all the longitudes to check')
# same procedure horizontally
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
# print(lng)
# print(coordinate_to_longlong(lng))
# print(x_longs)
# print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))
intersects = sorted([int2coord(y) for y in
y_intersections(coord2int(lng), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut here!
# only select the left shortcut if it is actually inside the polygon (point a little left is inside)
if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))
# the right shortcut is always selected
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))
else:
# add all the shortcuts for the whole found area of intersection
possible_x_shortcut = x_shortcut(lng)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle), x_longs,
y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
# both shortcuts right and left of the intersection should be selected!
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
# only the shortcut right of the intersection should be selected!
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
# xmax, xmin, ymax, ymin = boundaries_of(line=line)
if line % 100 == 0:
print(line)
# print([xmax, xmin, ymax, ymin])
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
# print('line ' + str(line))
# print('This is a big zone! computing exact shortcuts')
# print('Nr of entries before')
# print(len(column_nrs) * len(row_nrs))
# print('columns and rows before optimisation:')
# print(column_nrs)
# print(row_nrs)
# print(ints_of(line))
# This is a big zone! compute exact shortcuts with the whole polygon points
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)
# n += len(shortcuts_for_line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
# remove shortcuts from outside the possible/valid area
for x, y in shortcuts_for_line:
if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
# print('and after:')
# print(len(shortcuts_for_line))
# print(shortcuts_for_line)
# column_nrs_after = set()
# row_nrs_after = set()
# for x, y in shortcuts_for_line:
# column_nrs_after.add(x)
# row_nrs_after.add(y)
# print(column_nrs_after)
# print(row_nrs_after)
# print(shortcuts_for_line)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!')
if len(shortcuts_for_line) < 3:
raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
# print(shortcuts_for_line)
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
# print('collected entries:')
# print(n)
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
# there are two floats per coordinate (lng, lat)
nr_of_floats = 2 * sum(all_lengths)
# write number of entries in shortcut field (x,y)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
# TODO write test
# the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
# this is critical for ruling out zones faster
# (as soon as just polygons of one zone are left this zone can be returned)
# only around 5% of all shortcuts include polygons from more than one zone
# in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
# has only one entry (important to check the zone with one entry first!).
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
# also make sure polygons with the same zone freq. are ordered after their zone id
# (polygons from different zones should not get mixed up)
sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])
return [x[0] for x in sort] # take only the polygon nrs
# count how many shortcut addresses will be written:
# flatten out the shortcuts in one list in the order they are going to be written inside the polygon file
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
# print((x,y,this_lines_shortcuts))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',
round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')
# for every shortcut <H and <I is written (nr of entries and address)
shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)
for nr in nr_of_entries_in_shortcut:
# every line in every shortcut takes up 2bytes
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):', nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
# write zone_ids
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
# write boundary_data
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))
output_file.close()
# write polygon_data, addresses and number of values
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
# [SHORTCUT AREA]
# write all nr of entries
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError("There are too many polygons in this shortcut:", nr)
output_file.write(pack(b'<H', nr))
output_file.close()
# write Address of first Polygon_nr in shortcut field (x,y)
# Attention: 0 is written when no entries are in this shortcut
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
# each line_nr takes up 2 bytes of space
adr += 2 * nr
output_file.close()
# write Line_Nrs for every shortcut
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
# write corresponding zone id for every shortcut (iff unique)
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
# there is a polygon from a different zone (hence an invalid id should be written)
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
# also write an Invalid Id when there is no polygon at all
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
# [HOLE AREA, Y = number of holes (very few: around 22)]
hole_space = 0
# '<H' for every hole store the related line
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
# '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
# '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
# each pair of points takes up 8 bytes of space
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
# Y times [ 2x i signed ints for every hole: x coords, y coords ]
# write hole polygon_data
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
if __name__ == '__main__':
# parsing the data from the .json into RAM
parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)
# update all the zone names and set the right ids to be written in the poly_zone_ids.bin
# sort data according to zone_id
update_zone_names(path=TIMEZONE_NAMES_FILE)
# IMPORTANT: import the newly compiled timezone_names pickle!
# the compilation process needs the new version of the timezone names
with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:
timezone_names = json.loads(f.read())
# compute shortcuts and write everything into the binaries
compile_binaries()
|
7,249 | 381b59ab9fa85561932a9bfb9ab8cef635901a35 | #!/usr/bin/env python
from collections import defaultdict
from cluster.common import Cluster
from cluster.tools import print_table
def check_status(args):
""" Print node details
:param args: Arguments from argparse
:type args: argparse.Namespace
"""
cluster = Cluster(jobs_qstat=True, nodes=True, link=True)
nodes = []
if args.filter_states:
cluster.filter_node_states(set(args.filter_states.lower().split(',')))
for node in cluster.nodes:
nodes.append([
node.name,
node.states,
node.load,
"%3d/%3d (%3d%%)" % (
node.cpu_res, node.cpu_all, 1. * node.cpu_res / node.cpu_all * 100.) if node.cpu_all else 'N/A', # Cores
"%5.1f/%5.1fG (%3d%%)" % (
node.mem_res, node.mem_all, node.mem_res / node.mem_all * 100.) if node.mem_all else 'N/A', # Memory
''.join(('*' * node.cpu_res) + ('-' * (node.cpu_all - node.cpu_res)))
])
if args.show_job_owners:
nodes[-1][-1] = ''
empty = [''] * 5
users = defaultdict(list)
for job in node.jobs_qstat:
users[job.user].append(job)
for orphan in node.orphans:
users['ORPHANS'].append(orphan)
for idx, uitem in enumerate(users.items()):
u, jobs = uitem
column_data = '%s: %s' % (u, ' '.join([str(j.job_id) for j in jobs]))
if idx:
nodes.append(empty + [column_data])
else:
nodes[-1][-1] = column_data
# Printing bits
print_table(['Node', 'Status', 'Load', 'Used cores', 'Used memory', 'Jobs'], nodes)
def main():
""" Execute main program
"""
# noinspection PyCompatibility
import argparse
parser = argparse.ArgumentParser(description='Check nodes status.')
parser.add_argument('-o', '--show-job-owners', action='store_true', help='List jobs running on nodes')
parser.add_argument('-s', '--filter-states', help='Display only nodes in FILTER_STATES (comma separated).')
args = parser.parse_args()
check_status(args)
if __name__ == '__main__':
main()
|
7,250 | 71ab4ada4062ecde1463f2a766b5951860d0f2fb | from django.test import TestCase
from .models import Seller, Product
from rest_framework.test import APIClient
import json
class SellerModelTests(TestCase):
def test_class_str(self):
seller = Seller()
seller.name = "Bruna"
self.assertEquals(seller.__str__(), "Bruna")
def test_to_dict(self):
seller = Seller()
seller.name = "Bruna"
seller.email = "bruna@example.com"
result_seller = {
"id": None,
"name": "Bruna",
"email": "bruna@example.com"
}
self.assertEquals(seller.to_dict(), result_seller)
def test_class_str_without_name(self):
seller = Seller()
self.assertEqual(seller.__str__(), "")
class ProductModelTests(TestCase):
def product_class_str(self):
product = Product()
product.name = "Cadeira"
self.assertEquals(product.__str__(), "Cadeira")
def product_to_dict(self):
product = Product()
product.name = "Cadeira"
product.price = 2000
product.seller = "Bruna"
product.quantity = 10
product.status = "Active"
result_product = {
"id": None,
"name": "Cadeira",
"price": 2000,
"seller": "Bruna",
"quantity": 10,
"status": "Active"
}
self.assertEquals(product.to_dict(), result_product)
class SellerViewTests(TestCase):
@classmethod
def setUpTestData(cls):
Seller.objects.create(name="Bruna", email="bruna@example.com")
def test_get(self):
client = APIClient()
response = client.get('/produtos/sellers/')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data.get('count'), 1)
seller_first = data.get('results')[0]
self.assertEqual(seller_first.get("name"), "Bruna")
self.assertEqual(seller_first.get("email"), "bruna@example.com")
def test_post(self):
client = APIClient()
response = client.post('/produtos/sellers/', {
"name": "Bruna",
"email": "bruna@example.com"
})
self.assertEqual(response.status_code, 201)
self.assertEquals(Seller.objects.count(), 2)
self.assertEquals(Seller.objects.last().name, "Bruna")
class ProductViewTests(TestCase):
@classmethod
def setUpTestData(cls):
Product.objects.create(name="Cadeira", price=250, quantity=2, status="Active")
def test_get(self):
client = APIClient()
response = client.get('/produtos/')
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data.get('count'), 1)
product_first = data.get('results')[0]
self.assertEqual(product_first.get("name"), "Cadeira")
self.assertEqual(product_first.get("price"), 250)
self.assertEqual(product_first.get("quantity"), 2)
self.assertEqual(product_first.get("status"), "Active")
def test_post(self):
client = APIClient()
response = client.post('/produtos/', {
"name": "Mesa",
"price": 300,
"quantity": 2,
"status": "ACTIVE"
})
self.assertEqual(response.status_code, 201)
self.assertEquals(Product.objects.count(), 2)
self.assertEquals(Product.objects.last().name, "Mesa")
|
7,251 | abb08956f55fd1e8af27ce12fa94a4137d7d908e | g7=int(input())
h7=g7/2
i=g7-1
print(int(h7*i))
|
7,252 | d48f02d8d5469b966f109e8652f25352bc9b3b80 | from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from .models import Attendance, Holidays
#I think the update forms are not required here. They might be required in the profiles app. For this app, update attendance option can be available to the staff and faculty members. Update outpass form is required for the student to update the outpass if he/she wants to accomodate the query so as to get the permission without any trouble
#class UserUpdateForm(ModelForm):
# class Meta:
# model = User
# fields = ('first_name', 'last_name', 'email', 'username')
#only available for student
#for staff and faculty
class HolidaysCreateForm(ModelForm):
class Meta:
model = Holidays
fields = ('date_of_holiday','reason')
#for staff and faculty. Only available to selected faculty, but all hostel staff
class HolidaysUpdateForm(ModelForm):
class Meta:
model = Holidays
fields = ('date_of_holiday','reason')
#for hostel staff or faculty
class AttendanceUpdateForm(ModelForm):
class Meta:
model = Attendance
fields = ('enrollment_id', 'date', 'present', 'absent', 'outpass')
|
7,253 | 70cb5673a13967247b6da1fa5948000db39a92c8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 11:56:41 2017
@author: cgao
"""
from beautifultable import BeautifulTable
#1. 新旧税率Bracket
def tax_calculator(taxable_income, bracket, rate):
bracket2 = bracket[1:]
bracket2.append(float('Inf'))
bracket3 = [y-x for x,y in zip(bracket, bracket2)]
income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(bracket, bracket3)]
return sum([x*y for x, y in zip(income_seg, rate)])
def old_bracket(taxable_income, joint = True):
rate= [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint = True):
rate= [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
def AMT_bracket(taxable_income, joint = True):
rate= [0.26, 0.28]
if not joint:
bracket = [0, 93900]
else:
bracket = [0, 187800]
return tax_calculator(taxable_income, bracket, rate)
#2. 增加标准扣除(Standard Deduction)额度
'''
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
'''
#3. 减少利息扣除
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB)*rate
# existing_mtg = True: existing loan. Grand fathered 1.0 Million limit
def MTG_IR_deduction_new(UPB, rate, existing_mtg = False):
if existing_mtg:
return min(1000000.0, UPB)*rate
else:
return min(750000.0, UPB)*rate
#4. 减少州与地方税收(房产税等)扣除
def SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):
return taxable_income*efficient_state_rate + local_tax
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income*efficient_state_rate + local_tax)
#5. 取消Personal Exemption
def PersonalExemption_deduction_old(taxable_income, member, joint = True):
if joint:
phaseout = min(0.02*round((max(taxable_income - 311300, 0)/2500 + 1e-7)), 1)
return int(4050*member*(1 - phaseout))
else:
phaseout = min(0.02*round(max(taxable_income - 259000, 0)/2500 + 1e-7), 1)
return int(4050*member*(1 - phaseout))
def PersonalExemption_deduction_new():
return 0
#6. Child Care Tax Credit
def ChildCare_Credit_old(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 110000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 230000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
#7. 取消AMT (Alternative Minimum Tax)
def AMT_exemption(taxable_income, joint = True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0)/4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0)/4)
#8. 逐步取消遗产税 (Estate Tax)
#9. 综合影响
def tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint = True, existing_mtg = False, display = True, detail = False):
# Personal exemption (applied to both standard and itemized)
old_PersonalExemption_deduction = PersonalExemption_deduction_old(taxable_income, member, joint = joint)
# Child care tax credit (applied to both standard and itemized)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint = joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint = joint)
# Mortgage Interest Rate deduction (applied to itemized and AMT)
old_MTG_IR_deduction= MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction= MTG_IR_deduction_new(UPB, rate, existing_mtg = existing_mtg)
# State and local tax (applied to itemized only)
old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax)
# calculate standard tax
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
# tax before Child care credit
old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint = joint)
# tax before Child after credit
old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit)
# calculate itemized tax
# tax before Child care credit
old_tax_beforeCCTC_itemized = old_bracket(taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint = joint)
# tax before Child after credit
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit)
# calculate AMT tax
AMT_exemption_amount = AMT_exemption(taxable_income, joint = joint)
# tax before Child care credit
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint = joint)
# tax before Child after credit
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized),old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print("Current Tax Should Pay: $%3.2f"%tax_old)
print(" Standard: $%3.2f"%old_tax_standard)
print(" Itemized: $%3.2f"%old_tax_itemized)
print(" AMT tax: $%3.2f"%old_tax_AMT)
print("New Tax Should Pay: $%3.2f"%tax_new)
print(" Standard: $%3.2f"%new_tax_standard)
print(" Itemized: $%3.2f"%new_tax_itemized)
if detail:
print("***********************************************")
print("${:,} taxable income".format(taxable_income) + ', joint = %r'%joint)
print("%d Family Member, %d child(ren)"%(member, child))
print('Existing Mortgage: %r'%existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate'%(rate*100),)
print('${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate'%(efficient_state_rate*100),)
print("***********************************************")
table = BeautifulTable()
table.column_headers = ["Item", "Current", "New"]
table.append_row(["Standard Deduction", old_standard_deduction, new_standard_deduction])
table.append_row(["Personal Exemption", old_PersonalExemption_deduction, 'NA'])
table.append_row(["Child Care Tax Credit", old_ChildCare_Credit, new_ChildCare_Credit])
table.append_row(["Mortgage Interest Deduction", old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(["State and Local Tax Deduction", old_SALT_deduction, new_SALT_deduction])
table.append_row(["AMT Exemption (not including MTG Interest)", AMT_exemption_amount, "NA"])
table.append_row(["Tax", tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]
|
7,254 | 75023c7600fcceda0dc225992e7c433291b1a190 | '''
THROW with or without parameters
Which of the following is true about the THROW statement?
Answer the question
50XP
Possible Answers
- The THROW statement without parameters should be placed within a CATCH block.
- The THROW statement with parameters can only be placed within a CATCH block.
- The THROW statement without parameters can't re-throw an original error.
Answer : The THROW statement without parameters should be placed within a CATCH block.
'''
|
7,255 | 9021fa440561461ee179f333aa04a155d06c6e86 |
from xai.brain.wordbase.nouns._teleconference import _TELECONFERENCE
#calss header
class _TELECONFERENCES(_TELECONFERENCE, ):
def __init__(self,):
_TELECONFERENCE.__init__(self)
self.name = "TELECONFERENCES"
self.specie = 'nouns'
self.basic = "teleconference"
self.jsondata = {}
|
7,256 | 6f6f57ff317d7e3c6e6ae4d450c6fdf0e22eb4eb | from django.db.models import Sum, Count
from django.db.models.functions import Coalesce
from django.utils.timezone import localtime
from .models import Quote, Vote
import pygal
from pygal.style import Style
style = Style(
background='transparent',
plot_background='transparent',
foreground='#3d3d3d',
foreground_strong='#303030',
foreground_subtle='#939393',
opacity='.8',
opacity_hover='.9',
colors=('#fa5555', '#888'),
label_font_size=15,
major_label_font_size=15,
title_font_size=20,
legend_font_size=15
)
MONTHS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
quotes = Quote.objects.annotate(score=Coalesce(Sum('vote__value'), 0), votes=Count('vote')).filter(approved=True)
votes = Vote.objects
class QuotesOverTime():
def __init__(self):
self.chart = pygal.DateTimeLine(
title='Quotes over Time',
x_label_rotation=90,
x_value_formatter=lambda dt: dt.strftime('%b %Y'),
margin=20,
show_legend=False,
show_dots=False,
fill=True,
style=style
)
def pull(self):
data = {}
for quote in quotes.order_by('timestamp'):
timestamp = quote.timestamp.timestamp()
data[timestamp] = data.get(timestamp, 0)
data[timestamp] += 1
return data
def generate(self):
data = self.pull()
points = []
total = 0
for key, value in data.items():
points.append((key, total))
total += value
self.chart.add('quotes', points)
return self.chart.render(is_unicode=True)
class QuotesByHour():
def __init__(self):
self.chart = pygal.Bar(
title='Quotes by Hour',
x_labels = list(map(str, range(24))),
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = [0 for _ in range(24)]
for quote in quotes:
data[localtime(quote.timestamp).hour] += 1
return data
def generate(self):
data = self.pull()
self.chart.add('quotes', data)
return self.chart.render(is_unicode=True)
class QuotesByMonth():
def __init__(self):
self.chart = pygal.Bar(
title='Quotes by Month',
x_labels = MONTHS,
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = [0 for _ in range(12)]
for quote in quotes:
data[localtime(quote.timestamp).month-1] += 1
return data
def generate(self):
data = self.pull()
self.chart.add('quotes', data)
return self.chart.render(is_unicode=True)
class QuotesByRating():
def __init__(self):
self.chart = pygal.Histogram(
title='Quotes by Rating',
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = {}
for quote in quotes:
data[quote.score] = data.get(quote.score, 0)
data[quote.score] += 1
return data
def generate(self):
data = self.pull()
bars = []
for key, value in data.items():
bars.append((value, key, key+1))
self.chart.add('quotes', bars)
return self.chart.render(is_unicode=True)
class VoteDistribution():
def __init__(self):
self.chart = pygal.Pie(
title='Vote Distribution',
margin=20,
inner_radius=.7,
style=style
)
def pull(self):
data = {}
up = votes.filter(value=1).count()
down = votes.filter(value=-1).count()
data['up'] = up
data['down'] = down
return data
def generate(self):
data = self.pull()
for key, value in data.items():
self.chart.add('{} ({})'.format(key, value), value)
return self.chart.render(is_unicode=True)
|
7,257 | b8b50ef021c4b25edbab355e1db5d62d3c5a28ad | import logging
import os
import logzero
from gunicorn.glogging import Logger
_log_level = os.environ.get("LOG_LEVEL", "info").upper()
log_level = getattr(logging, _log_level)
log_format = "%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s"
formatter = logzero.LogFormatter(fmt=log_format)
logger_args = dict(level=log_level, formatter=formatter)
logzero.__name__ = ""
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
logger = logzero.setup_logger("alertmanager_telegram", **logger_args)
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger("gunicorn", **logger_args)
|
7,258 | fde4c10e2ed0ed38d683a220e2985c3f3f336601 | #! /usr/bin/env python
import sys
import socket
def handle_connection(sock):
do_close = False
while 1:
try:
data = sock.recv(4096)
if not data: # closed! stop monitoring this socket.
do_close = True
break
print 'data:', (data,)
sock.sendall(data)
if '.\r\n' in data:
sock.close()
do_close = True # stop monitoring this socket.
break
except socket.error:
print 'no data waiting...'
break
return do_close
if __name__ == '__main__':
interface, port = sys.argv[1:3]
port = int(port)
print 'binding', interface, port
sock = socket.socket()
sock.bind( (interface, port) )
sock.listen(5)
sock.setblocking(0)
connections = []
while 1:
# loop, doing two things:
# first, get a new connection
# second, process (receive/send) data for each existing connection
# first, do we have a new connection waiting?
try:
print 'testing for new connection'
(client_sock, client_address) = sock.accept()
# if this succeeds, we got a new connection... no new connection
# raises a 'socket.error'
print 'got connection', client_address
client_sock.setblocking(0)
connections.append((client_sock, client_address))
except socket.error: # no new connection! do nothing.
pass
# now, process data for existing connections.
open_connections = []
for (client_sock, client_address) in connections:
print 'processing data for', client_address
do_close = handle_connection(client_sock)
if not do_close:
open_connections.append((client_sock, client_address))
connections = open_connections
|
7,259 | 13e7484a80e4e45ee911f15837b9d82a1ef4d0b1 | from django.db import models
#Precisa existir uma conversao ticker -> ticker_id mais facil, ou definir como trabalhar com o ticker.name,
#na maioria dos casos só tenho o nome do ticker, nao o id.
class User(models.Model):
""" Usuario que pode operar ativos """
name = models.CharField(max_length=200)
saldo = models.DecimalField(max_digits=12, decimal_places=3)
def __unicode__(self):
return "User(%s, %.3f)" %(self.name, self.saldo)
class Stock(models.Model):
""" Representa um ativo """
ticker = models.CharField(max_length=8, unique=True)
name = models.CharField(max_length=200)
def __unicode__(self):
return "Stock(%s, %s)" %(self.ticker, self.name)
class Order(models.Model):
""" Ordem criada por um usuario, que vai para o book de um dado ativo """
ORDER_BUY = 'C'
ORDER_SELL = 'V'
STATUS_NEW = 'N'
STATUS_OPEN = 'A'
STATUS_PARTIAL = 'P'
STATUS_FINALIZED = 'F'
STATUS_CANCELLED = 'C'
ORDER_TYPES = [
(ORDER_BUY, "Compra"),
(ORDER_SELL, "Venda") ]
ORDER_STATUS = [
(STATUS_NEW, "Nova"),
(STATUS_OPEN, "Aberta"),
(STATUS_PARTIAL, "Parcialmente Executada"),
(STATUS_FINALIZED, "Finalizada"),
(STATUS_CANCELLED, "Cancelada") ]
user = models.ForeignKey(User)
stock = models.ForeignKey(Stock)
tipo = models.CharField(max_length=1, choices=ORDER_TYPES)
status = models.CharField(max_length=1, choices=ORDER_STATUS, default=STATUS_NEW)
original_qty = models.IntegerField()
qty = models.IntegerField()
value = models.DecimalField(max_digits=6, decimal_places=3)
included = models.DateTimeField(auto_now_add = True)
cancel_reason = models.CharField(max_length=255)
def __unicode__(self):
return "Order(%c, %d, %s, %s, %s | %s)" %(self.tipo, self.qty, self.stock.ticker, self.value, self.user.name, self.status)
class PortfolioItem(models.Model):
""" Representa um ativo em uma custódia """
user = models.ForeignKey(User)
stock = models.ForeignKey(Stock)
qty = models.IntegerField()
def __unicode__(self):
return "PortfolioItem(%s, %s, %d)" %(self.user.name, self.stock.ticker, self.qty)
class Historical(models.Model):
""" Registra uma negociacao efetuada """
stock = models.ForeignKey(Stock)
qty = models.IntegerField()
value = models.DecimalField(max_digits=6, decimal_places=3)
user_buy = models.ForeignKey(User, related_name='buy_historical')
user_sell = models.ForeignKey(User, related_name='sell_historical')
timestamp = models.DateTimeField(auto_now_add = True)
def __unicode__(self):
return "Historical(%s, %d, %s, %s, %s)" %\
(self.stock.ticker, self.qty, self.value, self.user_buy.name, self.user_sell.name)
|
7,260 | e4f7e0c40edde4aac6ba0a7529a2e028a09689ae | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# menuScrnTxt.py
# Created on Mon Mar 8 16:17:50 2021
# @author: jcj52436999
# menuScrnTxt.py-2021-03-08-1641-just noting a general restart in efforts here
import sys
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Top of Start of program " + thisProgramIs))
print(" ")
return
def printTest2():
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- printTest2() - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("printTest2() " + thisProgramIs))
print(" ")
return
# import
def menuInit(cmdArray):
if 0 == 0 :
print(" ")
print("# jcj-jcj-jcj- TOP START OF def menuInit - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("start of menuInit of program " + thisProgramIs))
print(" ")
return
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the Start of main jcj-jcjjcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
def main(argv=None):
#import sys
if argv is None:
argv = sys.argv
lenArgv = len(sys.argv)
pyScriptProgramName = sys.argv[0]
print(" ")
print("# jcj-jcj-jcj- START OF PROGRAM IN MAIN - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
thisProgramIs = "menuScrnTxt.py"
print(("Start of program in Main " + thisProgramIs))
print(" ")
# import sys
import curses
import getpass
import os
import shutil
import subprocess
import pprint
# import pformat
from subprocess import Popen, PIPE, STDOUT
# import urwid
import numpy
import pygame
import tkinter
print (" ")
# Trying to install a favorite set of Ubu software.
#tempHold = tempHold[1]
## print( tempHold )
## cmdArray = " " ;
## cmdArray = menuLineReactions[ tempHold ]();
reEntered = (input( "Stop chosen, all RAM data will be lost, are you sure? y or n: " ))
if reEntered == "y" or reEntered == "Y":
return #sys.exit() sys.exit()
else:
print( "Staying for more entry. ")
#
w = 5
h = 99
cmdArrayWidth = w
cmdArrayHeight = h
cmdArray = {( w, h): " " for w in range(cmdArrayWidth) for h in range(cmdArrayHeight)}
menuInit( cmdArray )
# out_bytes.wait()
out_bytes = " "
print(("# jcj-jcj-jcj-" + thisProgramIs + " Function Main is ending with sys.exit(): ", out_bytes))
print(" ")
print("# jcj-jcj-jcj- END OF PROGRAM - jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj")
print(" ")
# jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj the End of main jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj-jcj
if __name__ == "__main__":
sys.exit(main())
# =============================================================================
#
# def main():
# ...
#
# if __name__ == "__main__":
# main()
#
#
# =============================================================================
|
7,261 | 237f5e2e37187e26b5628032e37d3a525ef72b9a | """
Mount /sys/fs/cgroup Option
"""
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
|
7,262 | 73e4346007acae769b94a55ef53a48a9d3325002 | class Node:
def __init__ (self, val):
self.childleft = None
self.childright = None
self.nodedata = val
root = Node("Kaif")
root.childleft = Node("name")
root.childright = Node("!")
root.childleft.childleft = Node("My")
root.childleft.childright = Node("is")
message = input("Solve In order traversal, Type root: ")
print()
def trying():
if message == "root":
def InOrd(root):
if root:
InOrd(root.childleft)
print(root.nodedata)
InOrd(root.childright)
InOrd(root)
trying() |
7,263 | d763485e417900044d7ce3a63ef7ec2def115f05 | from kafka import KafkaProducer
import json
msg_count = 50
producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
for i in range(0,msg_count):
msg = {'id': i+20, 'payload': 'Here is test message {}'.format(i+20)}
sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8')) |
7,264 | 443bf59bc3c5ed2114f0c276aa7134ff5bf7fb64 | import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
app.layout = html.Div(
children=[
html.Label('Dropdowm'),
dcc.Dropdown(
id='my-dropdown',
options=[
{'label': 'İstanbul', 'value': 34}, # seçeneleri dict tutan liste olarak veririz
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
multi=True,
value=34,
searchable=True,
),
html.Label('Radio'),
dcc.RadioItems(
id='my-radio',
options=[
{'label': 'İstanbul', 'value': 34},
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
value=34,
),
html.Label('Slider'),
dcc.Slider(
id='my-slider',
min=0,
max=20,
step=0.5,
value=10,
marks={i: i for i in range(0, 21)}
),
]
)
if __name__ == '__main__':
app.run_server()
|
7,265 | 1ae8d78c6581d35cd82194e2565e7a11edda1487 | from django.urls import path, include
from .views import StatusAPIView, StateAPIView, LogAPIView
urlpatterns = [
path('status/', StatusAPIView.as_view(), name='status'),
path('log/', LogAPIView.as_view(), name='log'),
path('state/', StateAPIView.as_view(), name='state'),
]
|
7,266 | 9a5ba88a61f5c27c0bc7b980fa9d865b52cbbb20 | from .submit import *
from .fck import * |
7,267 | 4b672ad420bb67b8e2726102939ed6d369683150 | from telethon import events
from var import Var
from pathlib import Path
from ub.config import Config
import re, logging, inspect, sys, json, os
from asyncio import create_subprocess_shell as asyncsubshell, subprocess as asyncsub
from os import remove
from time import gmtime, strftime
from traceback import format_exc
from typing import List
from ub.javes_main.heroku_var import *
from ub import *
from sys import *
from telethon.errors.rpcerrorlist import PhoneNumberInvalidError
from telethon import TelegramClient, functions, types
from telethon.tl.types import InputMessagesFilterDocument
import traceback
import asyncio, time, io, math, os, logging, asyncio, shutil, re
def zzaacckkyy(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
allow_sudo = args.get("allow_sudo", None)
allow_edited_updates = args.get('allow_edited_updates', False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
except:
pass
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if allow_sudo:
args["from_users"] = list(Var.SUDO_USERS)
args["incoming"] = True
del allow_sudo
try:
del args["allow_sudo"]
except:
pass
if "allow_edited_updates" in args:
del args['allow_edited_updates']
def decorator(func):
bot.add_event_handler(func, events.NewMessage(**args))
if client2:
client2.add_event_handler(func, events.NewMessage(**args))
if client3:
client3.add_event_handler(func, events.NewMessage(**args))
try:
LOAD_PLUG[file_test].append(func)
except:
LOAD_PLUG.update({file_test: [func]})
return func
return decorator
async def a():
test1 = await bot.get_messages(cIient, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await client.download_media(await borg.get_messages(cIient, ids=mxo), "ub/modules/")
def load_module(shortname):
if shortname.startswith("__"):
pass
elif shortname.endswith("_"):
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
print("Successfully (re)imported "+shortname)
else:
import ub.events
import sys
import importlib
from pathlib import Path
path = Path(f"ub/modules/{shortname}.py")
name = "ub.modules.{}".format(shortname)
spec = importlib.util.spec_from_file_location(name, path)
mod = importlib.util.module_from_spec(spec)
mod.bot = bot
mod.tgbot = bot.tgbot
mod.Var = Var
mod.command = command
mod.logger = logging.getLogger(shortname)
sys.modules["uniborg.util"] = ub.events
mod.Config = Config
mod.borg = bot
sys.modules["ub.events"] = ub.events
spec.loader.exec_module(mod)
sys.modules["ub.modules."+shortname] = mod
print("Successfully (re)imported "+shortname)
def remove_plugin(shortname):
try:
try:
for i in LOAD_PLUG[shortname]:
bot.remove_event_handler(i)
del LOAD_PLUG[shortname]
except:
name = f"ub.modules.{shortname}"
for i in reversed(range(len(bot._event_builders))):
ev, cb = bot._event_builders[i]
if cb.__module__ == name:
del bot._event_builders[i]
except:
raise ValueError
def rekcah05(pattern=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
allow_sudo = args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith("\#"):
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile("\." + pattern)
cmd = "." + pattern
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
if "trigger_on_inline" in args:
del args['trigger_on_inline']
args["outgoing"] = True
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
args["incoming"] = True
del args["allow_sudo"]
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
allow_edited_updates = False
if "allow_edited_updates" in args and args["allow_edited_updates"]:
allow_edited_updates = args["allow_edited_updates"]
del args["allow_edited_updates"]
is_message_enabled = True
return events.NewMessage(**args)
def javess(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern", None)
pattern = args.get('pattern', None)
disable_edited = args.get('disable_edited', True)
groups_only = args.get('groups_only', False)
trigger_on_fwd = args.get('trigger_on_fwd', False)
trigger_on_inline = args.get('trigger_on_inline', False)
disable_errors = args.get('disable_errors', False)
reg = re.compile('(.*)')
if not pattern == None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace("$", "").replace("\\", "").replace("^", "")
except:
pass
try:
CMD_LIST[file_test].append(cmd)
except:
CMD_LIST.update({file_test: [cmd]})
except:
pass
if pattern is not None and not pattern.startswith('(?i)'):
args['pattern'] = '(?i)' + pattern
if "trigger_on_inline" in args:
del args['trigger_on_inline']
if "disable_edited" in args:
del args['disable_edited']
if "groups_only" in args:
del args['groups_only']
if "disable_errors" in args:
del args['disable_errors']
if "trigger_on_fwd" in args:
del args['trigger_on_fwd']
def decorator(func):
async def wrapper(check):
if LOGSPAMMER:
send_to = BOTLOG_CHATID
if not trigger_on_fwd and check.fwd_from:
return
if check.via_bot_id and not trigger_on_inline:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**JAVES ERROR REPORT**\n"
text += "Send this to @errorsender_bot if you cant find issue\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nThis file uploaded only logchat,"
ftext += "\nreport to admin this error if you cant find any issue"
ftext += "\n---------------------------------\n"
ftext += "================================\n\n"
ftext += "--------BEGIN LOG--------\n"
ftext += "\nDate: " + date
ftext += "\nChat ID: " + str(check.chat_id)
ftext += "\nSender ID: " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END LOG--------"
command = "git log --pretty=format:\"%an: %s\" -10"
ftext += "\n\n\nLast 10 commits:\n"
process = await asyncsubshell(command,
stdout=asyncsub.PIPE,
stderr=asyncsub.PIPE)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) \
+ str(stderr.decode().strip())
ftext += result
file = open("javes_error.log", "w+")
file.write(ftext)
file.close()
try:
await check.client.send_file(send_to, "javes_error.log", caption=text)
remove("javes_error.log")
except:
pass
else:
pass
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if client2:
client2.add_event_handler(wrapper, events.NewMessage(**args))
if client3:
client3.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
borg = javes = bot ; admin_cmd = rekcah05 ; command = zzaacckkyy ; register = javes05 = javess
def errors_handler(func):
async def wrapper(event):
try:
return await func(event)
except Exception:
pass
return wrapper
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
class Loader():
def __init__(self, func=None, **args):
self.Var = Var
bot.add_event_handler(func, events.NewMessage(**args))
data = json.load(open("ub/javes_main/extra/meaning.json"))
def meaning(w):
w = w.lower()
if w in data:
return data[w]
|
7,268 | 7e287eca041cf27d99292a331604fef9e9f90fc2 | #Nianzu Wang
#Email: wangn89@gmail.com
#for_while.py: demonstrates some fun things with for and while loops
def starsFor(x):
array = range(x, 0, -1)
array2 = range(1, x)
for num in array2:
print "*" * num
for num in array:
print "*" * num
def starsWhile(n):
a = 1
while a < n:
print "*" * a
a += 1
while n > 0:
print "*" * n
n -= 1
return
def countdown(x):
array = range(x, -1, -1)
for num in array:
if num == 0:
print "Blastoff!!!"
else:
print num
def parade(string):
lineUp = list(string)
print """Welcome to the Dragon*Con Parade!
The marching order is as follows:"""
for num in lineUp:
if num == "1":
print num + " - Klingons"
elif num == "2":
print num + " - Storm Troopers"
elif num == "3":
print num + " - SGC Officers"
elif num == "4":
print num + " - Superheroes"
elif num == "5":
print num + " - Elves"
elif num == "6":
print num + " - Video Game Characters"
else:
print num + " - Not in Parade"
def walkOfFame():
celeb = input("""We're off to see celebrities!
How many guests would you like to see?""")
if celeb == 0:
return "Maybe you should try gaming..."
nums = range(celeb, 0, -1)
array = " "
for num in nums:
name = raw_input("Please enter the guest's name.")
if len(name) <= 6:
print name + " ...Who?"
array += (" " + name)
elif len(name) <= 9:
print name + " Hey, I've seen them in that show!"
array += (" " + name)
elif len(name) <= 12:
print name + " Ooh, getting to the big time!"
array += (" " + name)
else:
print name + " Wow! They are super famous. Definitely worth the autograph."
array += (" " + name)
print
print "Get those autograph books ready, you get to see: "
print array
|
7,269 | 05f143e28ff9c7397376ad598529c1dfb7528ee3 | #!/usr/bin/env python
# coding: utf-8
# Predicting Surviving the Sinking of the Titanic
# -----------------------------------------------
#
#
# This represents my first attempt at training up some classifiers for the titanic dataset.
# In[ ]:
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic(u'matplotlib inline')
sns.set_style("whitegrid")
# machine learning
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
# In[ ]:
# get titanic & test csv files as a DataFrame
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
combine = [train_df, test_df]
# # Data exploration #
#
# First get some summary statistics about the datasets.
# In[ ]:
# view column labels
print(train_df.columns.values)
# In[ ]:
# preview the data
train_df.head()
# Now transpose the first few rows in order to see all attributes more easily as row labels.
# In[ ]:
train_df.head(3).T
# In[ ]:
# missing values, data types
train_df.info()
print('-'*40)
test_df.info()
# The above info shows that columns (from training data) with missing/empty values are:
#
# - Age (177 missing values)
# - Cabin (687 missing values)
# - Embarked (2 missing values)
# In[ ]:
# describe numeric columns
train_df.describe()
# In the training dataset there are 891 passengers with an overall survival rate of 38.4%.
# The oldest person is 80 years and the youngest is 5 months (0.42*12). The average fare is 32.20 dollars but the median fare is 14.45. This suggests outliers at the upper end of the fare, and indeed the maximum fare is $512.33.
# In[ ]:
# describe categorical columns
train_df.describe(include=['O'])
# In[ ]:
# just for fun, examine the records of ten year olds (there are only two)
train_df[train_df.Age == 10].stack()
# # Detailed data investigation #
#
# A closer look at each of the attributes (columns) and their relationship to survival.
# ##Sex##
#
# Sex is a *nominal* attribute with two categories (i.e. it is dichotomous). Let's plot some counts and survival rates by sex. Note that survival values are 0/1, thus rates can be be calculated simply via the mean survive value.
# In[ ]:
# count passengers by sex
plt.subplot(211) # 3 digit convenience notation for arguments (last digit represents plot number)
sns.countplot(x='Sex', data=train_df, palette='Greens_d')
# survival rate by sex
# note that barplot plots mean() on y by default
plt.subplot(212)
sns.barplot(x='Sex', y='Survived', data=train_df, palette='Greens_d')
# **Observations:**
#
# - Many more males than females
# - Survival rate of females much greater than males
#
# Let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by sex
train_df.groupby('Sex').size()
# In[ ]:
# survival rates by sex
train_df.groupby(['Sex'])['Survived'].mean().sort_values()
# Thus, 18.9% of males (from the training set) survived compared to 74.2% of females.
# ##Passenger class##
#
# Passenger class (Pclass) is an *ordinal* attribute with three categories, 1, 2 and 3. The three categories have an order (representing socioeconomic status) but although the categories are given numeric labels, this attribute *is not* numeric! To see this, consider that 3rd class = 1st + 2nd class is a nonsense. This will be important later when we construct features. Again, let's plot some counts and survival rates.
# In[ ]:
# size of groups in passenger class
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Pclass', data=train_df, palette='Purples_d') # _d = dark palette
# survival rate by sex
plt.subplot(212)
sns.barplot(x='Pclass', y='Survived', data=train_df, palette='Purples_d')
# **Observations:**
#
# - Three classes
# - Most passengers travelled by 3rd class (more than half; see below)
# - Survival rate increases with class
#
# Again, let's get the actual numbers below using pandas.
# In[ ]:
# count passengers by passenger class
train_df.groupby(['Pclass']).size()
# In[ ]:
# survival rates by passenger class
train_df.groupby(['Pclass'])['Survived'].mean().sort_values(ascending=False)
# ##Age##
#
# Age is a *ratio* attribute (it is properly numeric, see [Types of data measurement scales][1]). Ages < 1 indicate age in months.
#
#
# [1]: http://www.mymarketresearchmethods.com/types-of-data-nominal-ordinal-interval-ratio/
# In[ ]:
# count the number of passengers for first 25 ages
train_df.groupby('Age').size().head(25)
# another way to do the above
#train_df['Age'].value_counts().sort_index().head(25)
# In[ ]:
# convert ages to ints
age = train_df[['Age','Survived']].dropna() # returns a copy with blanks removed
age['Age'] = age['Age'].astype(int) # floors floats
# count passengers by age (smoothed via gaussian kernels)
plt.subplots(figsize=(18,6))
plt.subplot(311)
sns.kdeplot(age['Age'], shade=True, cut=0)
# count passengers by age (no smoothing)
plt.subplot(312)
sns.countplot(x='Age', data=age, palette='GnBu_d')
# survival rates by age
plt.subplot(313)
sns.barplot(x='Age', y='Survived', data=age, ci=None, palette='Oranges_d') # takes mean by default
# Observations:
#
# - Under 16s tend to have the highest survival rates
# - Very high survival rates at 53, 63 and 80
# - Survival of over 16s is fairly noisy. Possible that survival might increase with age.
# ## Survival by age group and sex ##
#
# Now let's look at survival by age groups *and* sex to see if any patterns become clearer.
# In[ ]:
# bin age into groups
train_df['AgeGroup'] = pd.cut(train_df['Age'],[0,4,15,25,35,45,65,100])
test_df['AgeGroup'] = pd.cut(test_df['Age'],[0,4,15,25,35,45,65,100])
# survival by age group
train_df.groupby('AgeGroup')['Survived'].mean()
# In[ ]:
# survival by age group and sex
train_df[['Survived','AgeGroup', 'Sex']].groupby(['Sex', 'AgeGroup']).mean()
# In[ ]:
# count passengers by age group and sex
sns.factorplot(x='AgeGroup', col='Sex', data=train_df, kind='count')
# survival by age group and sex
sns.factorplot(x='AgeGroup', y='Survived', col='Sex', data=train_df, kind='bar')
# The relationship between survival and age group looks very different for males and females:
#
# - Males: survival rates increase *inversely* with age for (0, 25] and (25, 100). That is, younger boys fare better than older boys and younger men survive more than older men.
# - Females: no obvious relationship between surviving and age. In particular, girls and baby girls do not fare better than women; in fact, girls (4, 15] have the *lowest* survival rates of females.
#
# A feature space containing (child, man, woman) would do a decent job of representing this relationship to survivability.
#
# Non-linear classifiers (e.g. decision trees, multi-layer nn, nearest neighbour) applied to both sex and age group might do even better because of the noticeable relationship between survivability and age group for males.
# ## Family Size##
#
# We create a new feature, FamilySize, that sums Parch and SibSp. This will enable us to drop Parch and SibSp from the datasets.
# In[ ]:
# calculate family size
train_df['FamilySize'] = train_df['SibSp'] + train_df['Parch'] + 1
test_df['FamilySize'] = test_df['SibSp'] + test_df['Parch'] + 1
# count passengers by age group and sex
plt.subplot(211)
sns.countplot(x='FamilySize', data=train_df)
# survival by age group and sex
plt.subplot(212)
sns.barplot(x='FamilySize', y='Survived', data=train_df)
# Survival increases with family size, until families of size 4. Family sizes of 5 and above have reduced survival.
# Deck
# ----
#
# Cabin might be conceivably be related to survival, but unfortunately most values are missing. Nevertheless, by way of an exercise, we will extract the feature, Deck, from cabin by taking the first character of the label and analyze survival rates by deck.
# In[ ]:
# deck is the first letter of cabin
train_df['Deck'] = train_df['Cabin'].dropna().apply(lambda x: str(x)[0])
train_df[['PassengerId','Name', 'Cabin', 'Deck']].head(2).T
# In[ ]:
# count passengers by the deck their cabin is on
plt.subplots(figsize=(8,6))
plt.subplot(211)
sns.countplot(x='Deck', data=train_df)
# survival rate by deck
plt.subplot(212)
sns.barplot(x='Deck', y='Survived', data=train_df)
# ## Other attributes ##
# For this first attempt, I am ignoring the attributes below as they seem unlikely to be related to survival:
#
# - PassengerId
# - Name (however, extracting titles from names might be informative)
# - Ticket
# - Fare (could be related to socioeconomic status but we already have a class attribute)
# - Embarked
# # Data wrangling - Age group#
#
# Fill missing age group values. We don't want to drop them as this would lose many rows. Instead, we will randomly generate age groups according to the frequency that they occur in the data. We will calculate the frequency separately for males and females.
# In[ ]:
# number of males/females without an age
def get_na(dataset):
na_males = dataset[dataset.Sex == 'male'].loc[:,'AgeGroup'].isnull().sum()
na_females = dataset[dataset.Sex == 'female'].loc[:,'AgeGroup'].isnull().sum()
return {'male': na_males, 'female': na_females}
# number of males and females by age group
def get_counts(dataset):
return dataset.groupby(['Sex', 'AgeGroup']).size()
# randomly generate a list of age groups based on age group frequency (for each sex separately)
def generate_age_groups(num, freq):
age_groups = {}
for sex in ['male','female']:
relfreq = freq[sex] / freq[sex].sum()
age_groups[sex] = np.random.choice(freq[sex].index, size=num[sex], replace=True, p=relfreq)
return age_groups
# insert the new age group values
def insert_age_group_values(dataset, age_groups):
for sex in ['male','female']:
tmp = pd.DataFrame(dataset[(dataset.Sex == sex) & dataset.Age.isnull()]) # filter on sex and null ages
tmp['AgeGroup'] = age_groups[sex] # index age group values
dataset = dataset.combine_first(tmp) # uses tmp to fill holes
return dataset
# fill holes for train_df
na = get_na(train_df)
counts = get_counts(train_df)
counts['female']
age_groups = generate_age_groups(na, counts)
age_groups['female']
train_df = insert_age_group_values(train_df, age_groups)
train_df.info() # check all nulls have been filled
print('-'*40)
# repeat for test_df
na = get_na(test_df)
counts = get_counts(train_df) # reuse the frequencies taken over the training data as it is larger
age_groups = generate_age_groups(na, counts)
test_df = insert_age_group_values(test_df, age_groups)
test_df.info() # check all nulls have been filled
# # Feature engineering #
#
# Now that we've explored the data let's create some features:
#
# - **Sex:** Convert to a single binary feature, Female. No need to create a feature for Male, that would be redundant.
# - **Pclass:** Convert to two binary features, PClass_1 and PClass_2. Similar to Male above, having a PClass_3 would be redundant.
# - **Age group:** The age attribute binned using separators [0, 4, 15, 25, 35, 45, 65, 100]. Convert to a number of binary features, one for each age group.
# - **Family size:** The sum of SibSp and Parch plus 1.
# In[ ]:
# Sex -> Female
# training set
dummy = pd.get_dummies(train_df['Sex'])
dummy.columns = ['Female','Male']
train_df = train_df.join(dummy['Female'])
# test set
dummy = pd.get_dummies(test_df['Sex'])
dummy.columns = ['Female','Male']
test_df = test_df.join(dummy['Female'])
train_df[['Name', 'Sex', 'Female']].head(2).T
#train_df.columns
# In[ ]:
# Pclass -> PClass_1, PClass_2
# training set
dummy = pd.get_dummies(train_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
train_df = train_df.join(dummy[['PClass_1', 'PClass_2']])
# test set
dummy = pd.get_dummies(test_df['Pclass'])
dummy.columns = ['PClass_1','PClass_2','PClass_3']
test_df = test_df.join(dummy[['PClass_1', 'PClass_2']])
train_df[['Name', 'Pclass', 'PClass_1', 'PClass_2']].head(2).T
#train_df.columns
# In[ ]:
# AgeGroup -> binary features
# training set
dummy = pd.get_dummies(train_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
train_df = train_df.join(dummy)
# test set
dummy = pd.get_dummies(test_df['AgeGroup'])
dummy.columns = ['Ages_4','Ages_15','Ages_25','Ages_35','Ages_45','Ages_65','Ages_100']
test_df = test_df.join(dummy)
# ## Experimental features ##
# Some additional features to explore.
# In[ ]:
# Fare
# there is a single missing "Fare" value
test_df['Fare'].fillna(test_df['Fare'].median(), inplace=True)
# convert from float to int (floor)
#train_df['Fare'] = train_df['Fare'].astype(int)
#test_df['Fare'] = test_df['Fare'].astype(int)
# In[ ]:
# Embarked -> PortC, PortQ
# Fill missing values with the most occurred value
print(train_df.groupby('Embarked').size().sort_values())
train_df['Embarked'] = train_df['Embarked'].fillna('S')
# training set
dummy = pd.get_dummies(train_df['Embarked'])
#dummy.columns
dummy.columns = ['Port_C','Port_Q','Port_S']
#train_df = train_df.join(dummy[['Port_C','Port_Q']])
# test set
dummy = pd.get_dummies(test_df['Embarked'])
dummy.columns = ['Port_C','Port_Q','Port_S']
#test_df = test_df.join(dummy[['Port_C','Port_Q']])
# ## Dropping attributes ##
# Drop unused attributes to avoid detecting spurious relationships.
# In[ ]:
# drop the attributes that will be unused
train_df.drop(['PassengerId', 'Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'Deck', 'AgeGroup'], axis=1, inplace=True)
test_df.drop(['Pclass', 'Name', 'Sex', 'Age',
'SibSp', 'Parch', 'Ticket', 'Cabin', 'Fare',
'Embarked', 'AgeGroup'], axis=1, inplace=True)
train_df.head(10).T
# The sample above shows the features and their values for the first ten training examples.
# # Modeling #
#
# Our task is a binary classification problem: we want to formulate a relationship that predicts an output (Survived or not) from engineered features (Sex, Age group, Family size...). This is type of learning is supervised learning, since a model will be trained on a dataset containing pairs of inputs and outputs.
#
# Suitable methods for performing classification include:
#
# - Logistic Regression*
# - Perceptron*
# - Support Vector Machines (SVMs)*
# - Naive Bayes classifier*
# - KNN or k-Nearest Neighbors
# - Decision Tree
# - Random Forrest
# - Artificial neural network
# - Relevance Vector Machine
#
# The methods marked * either discover linear classification boundaries (logistic regression, perceptron, and SVMs if using linear kernels) or assume no relationship between features (naive bayes) and thus are not expected to perform as well (see the section above on the relationship between survival, age group and sex).
# ## Training data ##
# Let's use cross validation to perform the evaluation. This method will give a reasonable indication of predictive accuracy as evaluation will take place on data that is not seen during training. The package **`sklearn.model_selection`** includes support for cross validation.
# In[ ]:
# split the datasets into matched input and ouput pairs
X_train = train_df.drop("Survived", axis=1) # X = inputs
Y_train = train_df["Survived"] # Y = outputs
X_test = test_df.drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# Model fitting
# ----------
# (Some of this section is based on [this titanic tutorial][1].)
#
# Logistic Regression is a useful model to run early in the workflow. Logistic regression measures the relationship between the categorical dependent variable (feature) and one or more independent variables (features) by estimating probabilities using a logistic function, which is the cumulative logistic distribution. See [Logistic regression on Wikipedia][2].
#
# Note the confidence score generated by the model based on our training dataset.
#
#
# [1]: https://www.kaggle.com/startupsci/titanic/titanic-data-science-solutions
# [2]: https://en.wikipedia.org/wiki/Logistic_regression
# In[ ]:
# Logistic Regression
logreg = LogisticRegression()
scores = cross_val_score(logreg, X_train, Y_train, cv=10)
acc_log = round(scores.mean() * 100, 2)
acc_log
#Y_pred = logreg.predict(X_test)
# We can use Logistic Regression to validate our assumptions and decisions for feature creating and completing goals. This can be done by calculating the coefficient of the features in the decision function.
# Positive coefficients increase the log-odds of the response (and thus increase the probability), and negative coefficients decrease the log-odds of the response (and thus decrease the probability).
# In[ ]:
logreg.fit(X_train, Y_train)
coeff_df = pd.DataFrame(train_df.columns.delete(0))
coeff_df.columns = ['Feature']
coeff_df["Correlation"] = pd.Series(logreg.coef_[0])
coeff_df.sort_values(by='Correlation', ascending=False)
# In[ ]:
# Gaussian Naive Bayes
gaussian = GaussianNB()
scores = cross_val_score(gaussian, X_train, Y_train, cv=10)
acc_gaussian = round(scores.mean() * 100, 2)
acc_gaussian
# In[ ]:
# Perceptron (a single layer neural net)
perceptron = Perceptron()
scores = cross_val_score(perceptron, X_train, Y_train, cv=10)
acc_perceptron = round(scores.mean() * 100, 2)
acc_perceptron
# In[ ]:
# Neural Network (a multi layer neural net)
neural_net = MLPClassifier()
scores = cross_val_score(neural_net, X_train, Y_train, cv=10)
acc_neural_net = round(scores.mean() * 100, 2)
acc_neural_net
# In[ ]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
scores = cross_val_score(sgd, X_train, Y_train, cv=10)
acc_sgd = round(scores.mean() * 100, 2)
acc_sgd
# In[ ]:
# Linear SVC
linear_svc = LinearSVC()
scores = cross_val_score(linear_svc, X_train, Y_train, cv=10)
acc_linear_svc = round(scores.mean() * 100, 2)
acc_linear_svc
# In[ ]:
# Support Vector Machine
svc = SVC() # uses a rbf kernel by default (i.e. can discover non-linear boundaries)
scores = cross_val_score(svc, X_train, Y_train, cv=10)
acc_svc = round(scores.mean() * 100, 2)
acc_svc
# In[ ]:
# Decision Tree
decision_tree = DecisionTreeClassifier()
scores = cross_val_score(decision_tree, X_train, Y_train, cv=10)
acc_decision_tree = round(scores.mean() * 100, 2)
acc_decision_tree
# In[ ]:
# Random Forest - an ensemble model
random_forest = RandomForestClassifier(n_estimators=100)
scores = cross_val_score(random_forest, X_train, Y_train, cv=10)
acc_random_forest = round(scores.mean() * 100, 2)
acc_random_forest
# In[ ]:
# AdaBoost - an ensemble method
ada_boost = AdaBoostClassifier(n_estimators=100)
scores = cross_val_score(ada_boost, X_train, Y_train, cv=10)
acc_ada_boost = round(scores.mean() * 100, 2)
acc_ada_boost
# In[ ]:
# k-Nearest Neighbors - a non-parametric method
knn = KNeighborsClassifier(n_neighbors = 5)
scores = cross_val_score(knn, X_train, Y_train, cv=10)
acc_knn = round(scores.mean() * 100, 2)
acc_knn
# Model evaluation
# ----------------
#
# We now rank the models and choose a high performing one for our problem. The Support Vector Machine consistently tops the chart.
#
# Decision Tree and Random Forest also both score high, but we prefer Random Forest as it avoids overfitting to the training set better than a decision tree and is therefore likely to perform better on the test dataset.
# In[ ]:
models = pd.DataFrame({
'Model': ['Support Vector Machine', 'kNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron',
'Stochastic Gradient Descent', 'Linear SVC',
'Decision Tree', 'AdaBoost', 'Neural Network'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree,
acc_ada_boost, acc_neural_net]})
models.sort_values(by='Score', ascending=False)
# In[ ]:
# using random forest for submission
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_pred
})
submission.to_csv('titanic_submission_1.csv', index=False)
#pd.set_option('display.max_rows', len(submission))
#submission
# Use cross validation to assess predictive accuracy
# --------------------------------------------------
#
# We can easily improve the above scores by evaluating on the training data (compare the random forest scores above and below). However, scores produced like this are not truly indicative of predictive accuracy and should be avoided. To see why, consider that a classifier that simply memorizes each input and output pair will score perfectly but be unable to generalise to other examples.
#
# In[ ]:
# Random Forest : scoring on training data
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_train, Y_train) * 100, 2)
acc_random_forest
# What next?
# -------------------------------
#
# **_More feature exploration:_**
# Including *Fare* significantly increases the best accuracy to about 92% when *fare* is floored and 94% otherwise. Additionally including *Embarked* brings it up to 95%. It may worth be investigating if any relationship between these attributes and survival can be detected, especially for *fare*.
#
# Other possibilities for features include *Deck* and *Title*, which can be extracted from *Cabin* and *Name* respectively.
#
# Could also try two or more overlapping binnings for age groups (e.g. bins as defined by cutting on [0,4,15,25,35,45,65,100] and [10,20,30,40,55,100]). If going down this path, focus on introducing extra bins for age groups that contain many passengers and have a steeper gradient on the survival curve (such as for the twenties, e.g. cut on [10,20,30]).
#
# **_Refitting:_**
# Most of the models above used their default parameters. Choose a few promising models and attempt to optimize their (hyper-)parameters. The sklearn library used above offers a couple of ways to do this automatically (via grid search and cross-validated models, see [Model selection][1] and [Tuning the hyper-parameters of an estimator][2]).
#
#
# [1]: http://scikit-learn.org/stable/tutorial/statistical_inference/model_selection.html
# [2]: http://scikit-learn.org/stable/modules/grid_search.html#grid-search
|
7,270 | 3af78dcc0bb0b6f253af01d2945ad6ada02ca7a0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Vertex():
def __init__(self, key):
self.id = key
self.connections = {}
def add_neighbor(self, nbr, weight=0):
self.connections[nbr] = weight
def get_connections(self):
return self.connections.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connections[nbr]
def __str__(self):
connections = str([x.id for x in self.connections])
return f'{str(self.id)} connected to: {connections}'
class Graph():
def __init__(self):
self.vertices = {}
self.num_vertices = 0
def add_vertex(self, key):
new_vertex = Vertex(key)
self.num_vertices += 1
self.vertices[key] = new_vertex
return new_vertex
def get_vertex(self, key):
if key in self.vertices:
return self.vertices[key]
else:
return None
def add_edge(self, origin, dest, weight=0):
if origin not in self.vertices:
self.add_vertex(origin)
if dest not in self.vertices:
self.add_vertex(dest)
self.vertices[origin].add_neighbor(self.vertices[dest], weight)
def get_vertices(self):
return self.vertices.keys()
def __iter__(self):
return iter(self.vertices.values())
def __contains__(self, n):
return n in self.vertices
if __name__ == '__main__':
g = Graph()
for i in range(6):
g.add_vertex(i)
print(g.vertices)
g.add_edge(0, 1, 2)
for vertex in g:
print(vertex)
print(vertex.get_connections)
print('---------------------')
|
7,271 | 21e83369c4100c41885e9ee8a8d7310556bfe51d | from src.MultiValueDictApp import MultiValueDictApp
def main():
app = MultiValueDictApp()
print("Welcome to Multivalue Dictionary App")
print("COMMANDS and format:")
print("KEYS")
print("MEMBERS key")
print("ADD key value")
print("REMOVE key value")
print("REMOVEALL key")
print("CLEAR")
print("KEYEXISTS key")
print("VALUEEXISTS key value")
print("ALLMEMBERS")
print("ITEMS")
print("EXIT")
print("Enter COMMAND key value below")
print("---------------------------------------")
print("")
while True:
command, *args = input().split(' ')
app.run(command, args)
if __name__ == "__main__":
main() |
7,272 | 36ce0de4cb760632959392a9f982532436bd37b0 | # -*- coding: utf-8 -*-
"""overview.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/examples/style_transfer/overview.ipynb
##### Copyright 2019 The TensorFlow Authors.
"""
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
print(tf.__version__)
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import time
import functools
import image_grabber
import def_grabber
import cv2
import random
"""Download the content and style images, and the pre-trained TensorFlow Lite models."""
paths = ["data/style01.jpg", "data/style02.jpg", "data/style03.jpg"]
style_path = random.choice(paths)
style_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')
style_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')
"""## Pre-process the inputs
* The content image and the style image must be RGB images with pixel values being float32 numbers between [0..1].
* The style image size must be (1, 256, 256, 3). We central crop the image and resize it.
* The content image must be (1, 384, 384, 3). We central crop the image and resize it.
"""
# Function to load an image from a file, and add a batch dimension.
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
# Function to pre-process by resizing an central cropping it.
def preprocess_image(image, target_dim):
# Resize the image so that the shorter dimension becomes 256px.
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
# Central crop the image.
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
"""## Run style transfer with TensorFlow Lite
### Style prediction
"""
# Function to run style prediction on preprocessed style image.
def run_style_predict(preprocessed_style_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
# Set model input.
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image)
# Calculate style bottleneck.
interpreter.invoke()
style_bottleneck = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return style_bottleneck
"""### Style transform"""
# Run style transform on preprocessed style image
def run_style_transform(style_bottleneck, preprocessed_content_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
# Set model input.
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
# Set model inputs.
interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image)
interpreter.set_tensor(input_details[1]["index"], style_bottleneck)
interpreter.invoke()
# Transform content image.
stylized_image = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return stylized_image
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
# Load the input images.
content_image = load_img(content_path)
style_image = load_img(style_path)
# Preprocess the input images.
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
# Calculate style bottleneck for the preprocessed style image.
style_bottleneck = run_style_predict(preprocessed_style_image)
# Stylize the content image using the style bottleneck.
stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)
# Visualize the output.
#imshow(stylized_image, 'Stylized Image')
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
|
7,273 | 901f87752026673c41a70655e987ecc2d5cb369f | #include os
#include math
output_file = 'output/mvnt'
def file_writeout(srvN, pos);
with open(output_file, 'a') as f:
f.write(srvN, ' to ', pos)
return 0
class leg(legN):
def __init__(legN):
srvHY = 'srv' + legN + 'HY'
srvHX = 'srv' + legN + 'HX'
srvEY = 'srv' + legN + 'EY'
|
7,274 | 6492f1eda79fd3116058f29647dc5f09e903f637 | ##
## Originally created by https://www.reddit.com/user/AlekseyP
## Seen at: https://www.reddit.com/r/technology/comments/43fi39/i_set_up_my_raspberry_pi_to_automatically_tweet
##
#!/usr/bin/python
import os
import sys
import csv
import datetime
import time
import twitter
#Configuration
# Twitter
ACCESS_TOKEN=""
ACCESS_TOKEN_SECRET=""
CONSUMER_KEY=""
CONSUMER_SECRET=""
# Minimum network speed
min_net_speed = 10
# Speedtest client absolute path
speedtest_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/speedtest-cli"
csv_output_file_path = "/home/alberto/Desarrollo/Proyectos/Scripts/SpeedTest/"
def test():
#run speedtest-cli
print 'running test'
a = os.popen("python %s --simple"%(speedtest_path)).read()
print 'ran'
#split the 3 line result (ping,down,up)
lines = a.split('\n')
print a
ts = time.time()
date =datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#if speedtest could not connect set the speeds to 0
if "Cannot" in a:
p = 100
d = 0
u = 0
#extract the values for ping down and up values
else:
p = lines[0][6:11]
d = lines[1][10:14]
u = lines[2][8:12]
print date,p, d, u
#save the data to file for local network plotting
out_file = open(csv_output_file_path + 'data.csv', 'a')
writer = csv.writer(out_file)
writer.writerow((ts*1000,p,d,u))
out_file.close()
my_auth = twitter.OAuth(ACCESS_TOKEN,ACCESS_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)
twit = twitter.Twitter(auth=my_auth)
#try to tweet if speedtest couldnt even connet. Probably wont work if the internet is down
if "Cannot" in a:
try:
tweet="Hey @Comcast @ComcastCares why is my internet down? I pay for 150down\\10up in Washington DC? #comcastoutage #comcast"
## twit.statuses.update(status=tweet)
print tweet
except:
pass
# tweet if down speed is less than whatever I set
elif eval(d)<min_net_speed:
print "trying to tweet"
try:
# i know there must be a better way than to do (str(int(eval())))
tweet="Hey @Comcast why is my internet speed " + str(int(eval(d))) + "down\\" + str(int(eval(u))) + "up when I pay for 150down\\10up in Washington DC? @ComcastCares @xfinity #comcast #speedtest"
## twit.statuses.update(status=tweet)
print tweet
except Exception,e:
print str(e)
pass
return
if __name__ == '__main__':
test()
print 'completed'
|
7,275 | a10403d7809b97c1bcdfa73224b8c365519cc456 | a = ['a','b','c','d','e']
print(';'.join(a))
|
7,276 | a9c0251b3422457b2c0089b70308a70b09cfa0e0 | # Copyright (C) 2014 Abhay Vardhan. All Rights Reserved.
"""
Author: abhay.vardhan@gmail.com
We have not yet added tests which exercise the HTTP GET directly.
"""
__author__ = 'abhay'
from nose.tools import *
import test_data
import search_index
class TestClass:
def setUp(self):
search_index.buildIndex(test_data.sample_food_trucks_data)
def tearDown(self):
pass
def test_case_query_index(self):
assert_equals(search_index.query_index, test_data.sample_query_index)
def test_case_lat_index(self):
assert_equals(search_index.sorted_latitudes, test_data.sample_latitude_index)
def test_case_lng_index(self):
assert_equals(search_index.sorted_longitudes, test_data.sample_longitude_index)
def test_case_search_query(self):
assert_equals(search_index.searchQuery('cold'), set([2, 3]))
def test_case_search_query_case(self):
assert_equals(search_index.searchQuery('Cold'), set([2, 3]))
def test_case_search_find_le(self):
assert_equals(search_index.find_le([10, 20, 30, 40], 20), 1)
assert_equals(search_index.find_le([10, 20, 30, 40], 20.1), 1)
assert_equals(search_index.find_le([10, 20, 30, 40], 30), 2)
def test_case_search_find_ge(self):
assert_equals(search_index.find_ge([10, 20, 30, 40], 20), 1)
assert_equals(search_index.find_ge([10, 20, 30, 40], 30), 2)
assert_equals(search_index.find_ge([10, 20, 30, 40], 20.1), 2)
def test_case_search_lat(self):
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 20, 30), set([1, 2]))
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 19, 35), set([1, 2]))
assert_equals(search_index.find_array_range_matching([10, 20, 30, 40], 9, 50), set([0, 1, 2, 3]))
def test_case_search1(self):
all_objectids = [x['objectid'] for x in search_index.all_results]
results = search_index.search('', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search2(self):
all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]
results = search_index.search('', 37.7879000978181, -122.398658184604, 37.7901490737255, -122.394594036205)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search3(self):
all_objectids = [x['objectid'] for x in search_index.all_results[0:3]]
results = search_index.search('', 37.787, -122.398658184604, 37.7901490737255, -122.394)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search4(self):
all_objectids = [x['objectid'] for x in search_index.all_results[2:4]]
results = search_index.search('cold', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
def test_case_search5(self):
all_objectids = [x['objectid'] for x in search_index.all_results[1:2]]
results = search_index.search('cheese', 37.7860914634251, -122.398658184604, 37.7901490737255, -122.3934729318)
assert_equals([x['objectid'] for x in results],
all_objectids)
|
7,277 | 5b0252dd862fe1e46c0c1df41935db16ae691dff | from django.db import models
# Create your models here.
class Products(models.Model):
title = models.CharField(max_length=255)
year = models.IntegerField(default=0)
feature = models.CharField(max_length=30)
usage_status = models.CharField(max_length=25)
kms_driven = models.CharField(max_length=10)
price = models.CharField(max_length=10)
|
7,278 | 46dc9917d9b3a7caf8d7ba5024b17d3b755fc5db | def sort_descending(numbers):
numbers.sort(reverse=True)
|
7,279 | 1446268583bf9fa3375319eae3c21cf47f47faca | from convert_data2 import array_rule
from convert_data2 import array_packet
import tensorflow as tf
import numpy as np
train_x, train_y = array_packet()
x_input, input_ip = array_rule()
n_nodes_hl1 = 210
n_nodes_hl2 = 210
n_nodes_hl3 = 210
n_classes = 2
batch_size = 500
hm_epochs = 20
x = tf.placeholder('float')
y = tf.placeholder('float')
z = tf.placeholder('float')
hidden_1_layer = {'f_fum': n_nodes_hl1,
'weight': tf.Variable(tf.random_normal([train_x.shape[1], n_nodes_hl1])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum': n_nodes_hl2,
'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum': n_nodes_hl3,
'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'f_fum': None,
'weight': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes])), }
def neural_network_model(data):
l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
result_array = np.array([])
batch_x = np.array(x_input)
print(batch_x)
result = (sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)))
result_array = np.append(result_array, result)
return result_array
train_neural_network(x)
|
7,280 | 421837698b7fc188c84a3221271f11a40d1625d9 |
from logupload import *
log = LogUpload()
log.uploadLogs(4) |
7,281 | 34a8fc38ed875e1c564f535348dc0d5d88c76ab1 | # 1로 만들기
import sys
N = int(sys.stdin.readline())
dp_table = [0 for _ in range(10**6 + 1)]
dp_table[2], dp_table[3] = 1, 1
for i in range(4,N+1):
two_per = 10**6
three_per = 10**6
if i % 3 ==0:
three_per = dp_table[i//3] + 1
if i % 2 ==0:
two_per = dp_table[i//2] + 1
minus = dp_table[i-1] + 1
dp_table[i] = min(minus, two_per, three_per)
# print(i, dp_table[i])
print(dp_table[N]) |
7,282 | 41aebc4ee9cb058c3351029773be05cdc4f84ffa |
##outcome: Hello, my name is B-max
print("Hello", end="")
print(", my name ", end="")
print("is B-max", end="")
print()
##outcome: ****************************************
for i in range(40):
print('*', end="")
print()
##outcome: x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*
for i in range(20):
print("x*", end="")
print()
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
for i in range(5):
for i in range(5):
print("x*", end="")
print()
for i in range(5):
print("*x", end="")
print()
|
7,283 | 8fcc2a13fd5a803e2d755a567c78c8274bd88aad | import json
import time
from typing import Dict
import threading
"""
Note: każdy request uruchamia osobny wątek.
Przegląd: `top -H -p <process_id>`
"""
from flask import Flask, jsonify, request
app = Flask(__name__)
# https://www.tutorialspoint.com/flask/flask_http_methods.htm
# ładowanie konfiguracji aplikacji (opcjonalne, ale to dobry pomysł);
# po zbudowaniu aplikacji (poniżej) file "config.json" powinien się znajdować w folderze aplikacji
with open('config.json', 'r') as f:
loaded = json.load(f)
magic = loaded['magic']
@app.route('/status')
def get_json_data():
return jsonify({'comment': f'App działa OK; magic:{magic}'})
# dostępna pod: http://localhost:5001/compute?a=10&b=0
@app.route('/compute')
def compute():
a = int(request.args.get('a'))
b = int(request.args.get('b'))
print(f'request a={a}, thread:{threading.current_thread().name}')
time.sleep(10.0)
if b == 0:
# teraz zwracamy komunikat o błędzie, oraz http error-code 400 (BAD_REQUEST)
return jsonify({'comment': 'b==0, cannot divide'}), 400
return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})
# dostępna pod: http://localhost:5001/welcome/roadrunner/suffix/nice%20to%20meet%20you
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
# zadanie -> zbierać userów w jakieś strukturze (np. liście 'users', albo Dict lub Set),
# i zwrócić błąd jeśli tworzymy usera, którego pole "user" już zostało "zajęte"
# rozwiązanie:
users: Dict[str, Auth] = {}
# dostępna per Postman (trzeba zrobić zapytanie POST):
# localhost:5001/user/create
# w sekcji "body" trzba dać "raw -> JSON", i w polu JSON dodać:
# {
# "user": "Xi Wuhan",
# "pass_": "123"
# }
@app.route('/user/create', methods=['POST'])
def create_user():
data = request.json
k = Auth(**data)
if users.keys().__contains__(k.user):
return jsonify({'comment': 'This user name already exists!'}), 400
users[k.user] = k
return jsonify(k.__dict__)
app.run(host='localhost', port=5001, debug=None, load_dotenv=False) # can skip all args
# możliwa kompilacja do pojedynczego pliku wykonywalnego:
# `pyinstaller _zero.py -n my_flask_app --onefile
|
7,284 | cf4170760fe6210d8b06f179484258f4ae3f8796 | #!/usr/bin/python3
from datetime import datetime
import time
import smbus
SENSOR_DATA_FORMAT = "Speed: {} km/h\nSteering: {}\nThrottle: {}\nTemperature: {} C"
class SensorDataFrame:
def __init__(self, data):
self.speed, self.steering, self.throttle, self.temp = data
self.timestamp = datetime.now()
def __str__(self):
return SENSOR_DATA_FORMAT.format(self.speed, self.steering,
self.throttle, self.temp)
def i2c_test():
bus = smbus.SMBus(1)
# bus.write_block_data(0x66, 0, [2, 32, 1, 0, 23])
try:
while True:
start = time.time()
try:
data = bus.read_i2c_block_data(0x66, 0, 4)
print(str(SensorDataFrame(data)))
except (IOError, TimeoutError, OSError):
pass
time.sleep(0.028)
print("")
print("Time: " + str(time.time() - start))
print("")
except KeyboardInterrupt:
pass
finally:
bus.close()
def main():
i2c_test()
if __name__ == "__main__":
main()
|
7,285 | 9339d3bc0c3005880b1c8d1c9914d6e28d39dbbd | from django import template
import ast
register = template.Library()
@register.simple_tag()
def multiplication(value, arg, *args, **kwargs):
return value * arg
@register.filter
def in_category(things, category):
return things.filter(category=category)
@register.simple_tag()
def division(value, arg, *args, **kwargs):
return value / arg
@register.simple_tag()
def add(value, arg, *args, **kwargs):
return value + arg
|
7,286 | ff081a5ff46ab37dc5a144fb4616c06ef3bca490 | n, a, b = map(int, input().split())
cl = list(map(int, input().split()))
for i in range(n):
if cl[i] == a + b:
print(i + 1)
|
7,287 | 37d5696c402737bfafe21b20b90a49e2753fdc4f | import pandas as pd
import os
import openpyxl
from collections import defaultdict,deque
# 調節用パラメータ
filename = 'kaito7.xlsx' # 入力ファイル名
Output = 'output7.xlsx' # 出力ディレクトリ
wb = openpyxl.load_workbook(filename)
sheets = wb.sheetnames
days = []
names = []
dict = defaultdict(dict)
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1,column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i,column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2,2+H):
for x in range(3,3+W):
tmp = sh.cell(row=y,column=x).value
dict[names[y-2]][days[x-3]] = tmp
times = dict['しまむら']['7/10(水)'].split(', ')
ans = [[' ', ' '] + names]
for d in days:
for t in times:
tmpl = [d,t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
wb = openpyxl.load_workbook(Output)
sheets = wb.sheetnames
sheet = wb[sheets[0]]
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
#print(l_2d[y][x])
sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])
#print(sheet.cell(row=start_row + y,column=start_col + x).value)
write_list_2d(sheet,ans,1,1)
wb.save(Output)
print(sheets[0])
|
7,288 | 6dda23cc5d0083e72520b0664b6550ccb48e4b4f | from count_freqs import *
from eval_gene_tagger import *
'''
Using gene.train gene.counts prediction file to evaluate the performance
Usage: python viterbi.py gene.counts gene.dev gene_dev.p1.out
'''
if __name__ == "__main__":
#if len(sys.argv)!=2: # Expect exactly one argument: the training data file
# usage()
# sys.exit(2)
#try:
# input_counts = open(sys.argv[1],"r")
# dev_file = open(sys.argv[2],"r+")
# output_file2 = open(sys.argv[3],"w")
#except IOError:
# sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % arg)
# sys.exit(1)
########### Read gene.counts and write prediction into 'gene_dev.p1.out' #########
counter1 = Hmm(3)
input_counts = open('gene_NoClass.counts','r')
dev_file = open('gene.dev',"r+")
output_file2 = open('gene_dev.NoClass.out.p2',"w")
print('dev_file read')
print('start training viterbi')
counter1.train_viterbi( input_counts, dev_file, output_file2)
print('finished training in viterbi')
dev_file.close()
output_file2.close()
input_counts.close()
print("gene_dev.p2.out file created")
######### Evaluate the result ############
'''
if len(sys.argv)!=3:
usage()
sys.exit(1)
gs_iterator = corpus_iterator(open(sys.argv[1]))
pred_iterator = corpus_iterator(open(sys.argv[4]), with_logprob = False)
evaluator = Evaluator()
evaluator.compare(gs_iterator, pred_iterator)
evaluator.print_scores()
''' |
7,289 | 2cdee8799678e8ead21a0f81c42eb7ce209cfec7 |
class thrs:
def __init__(self, input_wave):
from numpy import mod, array, sqrt, dot,median,convolve
self.D0 = 20
self.last_det = 0
self.mu = 0.6
self.a_up = 0.2
self.a_down = 0.6
self.z_cumulative = 10
self.n_max = max(input_wave[:1000])
self.input_wave = input_wave
self.rr = 60
self.setNewPos(0)
self.lmbda = 10
def setNewPos(self, pos):
self.rr = pos-self.last_det
self.last_det = pos
from numpy import max
self.n_max = max(self.input_wave[max([pos-400,0]):min([pos+1000,len(self.input_wave)])])*1.1
if self.input_wave[pos]-self.z_cumulative > 0:
self.z_cumulative = float(self.z_cumulative + self.a_up * (self.input_wave[pos]-self.z_cumulative))
else:
self.z_cumulative = float(self.z_cumulative + self.a_down * (self.input_wave[pos]-self.z_cumulative))
from numpy import log,e
lmbda2 = log(self.mu)/((self.D0-self.rr)/2)
from numpy import isinf
if not isinf(lmbda2):
self.lmbda = lmbda2
self.A = self.z_cumulative/e**(-self.lmbda*self.D0)
return
def getThrs(self, pos):
if pos-self.last_det < self.D0:
return self.n_max
#elif pos-self.last_det < self.D1:
# return self.n_max - (self.n_max-self.z_cumulative)/(self.D1-self.D1)(pos-self.last_det)
else:
from numpy import e
return self.A * e**(-self.lmbda * (pos-self.last_det))
|
7,290 | 30f02b956af68960804f0cb57695bdbf8510bc43 | Album,artist,year,songs="More Mayhem","Imelda May",2001,((1,"pulling the rug"),(2,"psycho"),(3,"mayhem"),(4,"kentisch town waltz"))
for song in songs:
track,title=song
print(" track number {}\t, title {}".format(track,title)) |
7,291 | 938c4325480608b904bfbe0b11c081166aad694b | # 체크는 오른쪽+아래로만 체크합니다.
def check22(y, x, board) :
dirs = [[0,1], [1,0], [1,1]]
ret = [(y,x)]
for d in dirs :
dy, dx = y+d[0], x+d[1]
if not ( (0<=dy<len(board)) and (0<=dx<len(board[0])) and board[dy][dx]!='0' and board[y][x]==board[dy][dx] ) :
return False
else :
ret.append((dy,dx))
return ret # 나중에 한 번에 삭제될 거임
def dropdown(board) :
for x in range(len(board[0])) :
cnt = 0
movable = False
for y in range(len(board)-1, -1, -1) :
# if y == len(board)-1 :
# if board[y][x] == '0' : break
if board[y][x] == '0' :
cnt += 1
movable = True
if board[y][x] != '0' and movable :
# 위에 떠있는 블록임. cnt만큼 내리면 됨
board[y+cnt][x] = board[y][x]
board[y][x] = '0'
return board
def deleteBoard(delete, board) :
for delNode in delete :
board[delNode[0]][delNode[1]] = '0'
return board
def solution(m, n, board):
answer = 0
for i in range(len(board)) :
board[i] = list(board[i])
while True :
delete = set([])
for y in range(len(board)) :
for x in range(len(board[0])) :
tmp = check22(y, x, board)
if tmp :
delete |= set(tmp)
delete = list(delete)
if not delete : break
answer += len(delete)
board = deleteBoard(delete, board)
# print(board)
board = dropdown(board)
# print(board)
return answer
|
7,292 | b233d212f3a6c453786dc54b2d43578e1faae417 |
import json
from flask import current_app, request, jsonify, make_response
from flask_cors import cross_origin
from alerta.auth.utils import is_authorized, create_token, get_customer
from alerta.utils.api import absolute_url, deepmerge
from . import auth
try:
import saml2
import saml2.entity
import saml2.metadata
import saml2.config
import saml2.client
import saml2.saml
except ImportError:
pass # saml2 authentication will not work
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {
'entityid': absolute_url(),
'service': {
'sp': {
'endpoints': {
'assertion_consumer_service': [
(absolute_url('/auth/saml'), saml2.BINDING_HTTP_POST)
]
}
}
}
}
spConfig().load(deepmerge(saml2_config_default, current_app.config['SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage') is None else 'usePostMessage'
(session_id, result) = saml_client().prepare_for_authenticate(relay_state=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', '') and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
'''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>'''.format(msg_data=json.dumps(resp_obj), origins=json.dumps(origins)),
resp_code
)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(
request.form['SAMLResponse'],
saml2.entity.BINDING_HTTP_POST
)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = (current_app.config.get('SAML2_USER_NAME_FORMAT', '{givenName} {surname}')).format(**dict(map(lambda x: (x[0], x[1][0]), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message': 'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
|
7,293 | 0fb8a9b1073446a62b46a802da69b66e78533c2a | sheik=['a','e','i','o','u','A','E','I','O','U']
s=raw_input()
if(s in sheik):
print('Vowel')
elif(s!=sheik):
print('Consonant')
else:
print('invalid')
|
7,294 | 43196258b61801799b8d6b7d23f5816d84cb5dff | import csv
import os
with open("sample.csv") as rf:
csv_reader=csv.DictReader(rf)
with open("sample1.csv","w") as wf:
csv_headers=['fname','lname','email']
if os.path.isfile('sample1.csv'):
q=input("File already exists. Do you want to overwrite?")
if q.lower()=='yes':
csv_writer=csv.DictWriter(wf,fieldnames=csv_headers,delimiter=',')
csv_writer.writeheader()
for l in csv_reader:
csv_writer.writerow(l)
else:
print("Please try with a different file name") |
7,295 | d058c3df8513e07e4ff7035aa5c5885819e43687 | from modeller import *
from modeller.automodel import *
# This part was within the script loop_modelling_2
# Here is is in a separate file for loop_modelling_3 so the script can be run in parallel
class MyLoop(dopehr_loopmodel):
def select_atoms(self):
# Here only the second loop atoms are allowed to move so we do not mess with the first loop we have previously refined
return selection(self.residue_range('218:', '231:'))
def select_loop_atoms(self):
return selection(self.residue_range('218:', '231:'))
|
7,296 | c05994471d6608b5e48b71d253304a43100d583f | import numpy as np
import scipy
class node(object):
"""docstring for node"""
def __init__(self, feature_idx):
super(node, self).__init__()
self.children = None
self.j = feature_idx
self.c = None
self.vals = None
class decisionTree(object):
"""docstring for decisionTree"""
def __init__(self, arg):
super(decisionTree, self).__init__()
self.arg = arg
def fit(self, X, y):
pass
def predict(self, X):
pass
def information_gain(x, y):
def entropy(x):
px = prob(x, return_counts=False)
return scipy.stats.entropy(px)
def prob(x, return_labels=True):
x_unique, counts = np.unique(x, return_counts=True)
px = counts / x.shape[0]
if return_labels: return px, x_unique
return px
def conditional_prob(x, y):
"""
calculate p(x|y)
"""
def conditional_entropy(x, y, px=None):
pass |
7,297 | c8f2df1471a9581d245d52437470b6c67b341ece | class Solution:
def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:
def squareSum(r1: int, c1: int, r2: int, c2: int) -> int:
return prefixSum[r2 + 1][c2 + 1] - prefixSum[r1][c2 + 1] - prefixSum[r2 + 1][c1] + prefixSum[r1][c1]
m = len(mat)
n = len(mat[0])
ans = 0
prefixSum = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
prefixSum[i][j] = mat[i - 1][j - 1] + prefixSum[i][j - 1] + \
prefixSum[i - 1][j] - prefixSum[i - 1][j - 1]
for i in range(m):
for j in range(n):
for length in range(ans, min(m - i, n - j)):
if squareSum(i, j, i + length, j + length) > threshold:
break
ans = max(ans, length + 1)
return ans
|
7,298 | 965bb4c8e7d6650dab7f002645dceacab59a0c5c | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FE26AAB2-D90B-E211-AD0F-0025902009B8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FCB6A333-C70B-E211-8C99-001E67396D51.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/FA9CB2B5-D90B-E211-82B1-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F8F81697-E90B-E211-9A48-002590200834.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F88B7838-C70B-E211-8971-001E673968F1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F6481280-E90B-E211-8349-002590200B34.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F4DAB680-B90B-E211-BE7E-003048D47A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F2F3F436-C70B-E211-A3A4-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/F03693B3-D90B-E211-8CFB-001E67398633.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEF20E3B-C70B-E211-953A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EEA69380-E90B-E211-833A-002590200970.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EE92E708-A90B-E211-BE6A-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EC8A6530-C70B-E211-9D59-002590200840.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EAE34E85-B90B-E211-B5AD-003048673F3A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/EACF738F-E90B-E211-8D44-00259020081C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/E43E9F40-C70B-E211-8CFE-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DEF585B4-D90B-E211-AD4B-002590200B38.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE991237-C70B-E211-A065-001E67397003.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DE69B13F-B90B-E211-A320-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DCDE4B42-C70B-E211-9F88-003048D4602A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DC7EDE05-A90B-E211-B465-0025902008F4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/DAFF741D-A90B-E211-B24E-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D865D241-B90B-E211-A391-003048673F26.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D6C4A74C-C70B-E211-B449-003048D45F78.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D4EB5C31-C70B-E211-AC1B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D49F4B34-C70B-E211-99F4-0025B3E06400.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D2C6963C-C70B-E211-9D24-002590200908.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D26CAF3C-C70B-E211-A812-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/D026AE93-B90B-E211-9E76-002481E14D76.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CCD8F139-C70B-E211-B2E8-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA85ABB8-D90B-E211-A2BB-001E67397E13.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/CA63512E-C70B-E211-8DDF-001E672CC1E7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C480E406-A90B-E211-8B58-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D5E949-9C0B-E211-A208-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/C0D100AE-D90B-E211-8962-001E67396DBA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/BCF27C36-C70B-E211-876B-002590200A6C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B4E711BB-D90B-E211-A42C-001E67396E3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/B2A005DE-D90B-E211-94B3-001E67397D91.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AC3D508B-B90B-E211-AB8D-003048D45F2C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAC74E91-B90B-E211-A9FF-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/AAAE1D0A-A90B-E211-907D-001E67398CB9.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A89EA633-C70B-E211-AF12-0025902009A4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A836D251-C70B-E211-BFDD-0025902008E4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/A448E095-E90B-E211-8CED-001E673969D2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9EBE5A69-C70B-E211-A36E-001E67398E12.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9C84FAB1-D90B-E211-8EDF-001E67396874.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9AFA54C5-D90B-E211-9C13-001E67396568.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/9A4A0F32-C70B-E211-A372-002590200898.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/94BE773D-C70B-E211-836F-001E67398CE1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/944B6544-B90B-E211-920A-002481E1511E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/90CBB57E-E90B-E211-AB2F-0025902009C0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8E16A8F3-D90B-E211-83D6-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ACD8F9C-B90B-E211-8F86-002590200B4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8ABCBFC1-D90B-E211-9C77-002590200B68.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8AAE9A07-A90B-E211-ABCF-001E673967C5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/8A5DE24C-C70B-E211-9271-002590200988.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/88CA0942-C70B-E211-A894-0025B31E3CC0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7EFFEF3A-C70B-E211-A78B-001E67396A63.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7CDD1A9C-B90B-E211-99CE-003048D45FD8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/7A951BB1-D90B-E211-B97A-003048D476B4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/78C1620C-A90B-E211-AF89-001E67396761.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/748AA33D-C70B-E211-AA21-001E67398390.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/74013EE7-D90B-E211-B929-001E673970C1.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/726A8A0A-A90B-E211-86C8-001E67397094.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/70698A49-C70B-E211-BE12-002590200A28.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E05B863-C70B-E211-B476-002590200938.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6E03CAFF-D90B-E211-96B9-001E67396C52.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6CB7A769-C70B-E211-A569-002590200A80.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6C3E469D-B90B-E211-93ED-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/68D8E30B-A90B-E211-9884-003048673F24.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/6644544A-C70B-E211-B9D8-001E67398E49.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/60FAAC62-9C0B-E211-B091-002590200B00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E932B7F-F60B-E211-A37C-001E67398C05.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5E2DFB9D-B90B-E211-8767-0025B31E3C3C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5C9369BA-D90B-E211-AB39-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5485BB36-C70B-E211-A62A-002590200A98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/54439593-B90B-E211-AF3D-001E67398011.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/540D144A-9C0B-E211-BE2D-001E67396C9D.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/5087B43E-B90B-E211-834E-003048D45FB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/50119B4C-C70B-E211-BC7A-00259020083C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4EBF2B87-B90B-E211-8020-003048D476C4.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E90C544-B90B-E211-92CF-001E67396DCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E42EA41-C70B-E211-89E7-002590200900.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4E24ECEB-D90B-E211-B732-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4A3C00E2-D90B-E211-81B6-0025902009B0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/487FA490-B90B-E211-B401-003048D45FE8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/46C80D32-C70B-E211-ADC0-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4680304C-B90B-E211-9E05-003048D479F2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4442750C-A90B-E211-982C-001E67396644.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/428A4E96-B90B-E211-8098-002590200B74.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/4048B9E0-D90B-E211-AD88-001E67397B07.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3ECD1D4C-B90B-E211-BCE7-003048D46034.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3EA42648-9C0B-E211-96A1-001E673972F6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3E5C2DB5-D90B-E211-AFAA-9C8E991A143E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3C91E824-A90B-E211-A981-001E67397D00.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3AF523B1-D90B-E211-A075-001E67396BB7.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3A3BB131-C70B-E211-AE83-001E67396DB5.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/3642D7AF-D90B-E211-A79C-0030486740BA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30A91F44-9C0B-E211-ABA7-001E6739811A.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/30897650-C70B-E211-9F69-0025902008D8.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/304AEF43-C70B-E211-8856-003048D45F98.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2E24DE3A-B90B-E211-ACC7-0025B3E06556.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2CE679E6-D90B-E211-B835-002590200B0C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2C7439E9-D90B-E211-8919-002590200930.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2AFB4848-B90B-E211-A519-001E673965FE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2A854B08-A90B-E211-9851-001E67397701.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/2817039C-B90B-E211-9F8D-0025B31E3C58.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/241AF10A-A90B-E211-BB12-001E67397CCE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/240A3B43-B90B-E211-BA5F-002481E14FFC.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/20986287-B90B-E211-942A-003048D47A4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1EB30D07-DA0B-E211-BE8F-001E67398E62.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1E2DEC38-B90B-E211-B323-003048D476C2.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/1C490588-B90B-E211-99B7-003048D45FAE.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0E69E144-B90B-E211-AFD2-0025B3E05DB6.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CF5EAB8-D90B-E211-AD4B-002590200AD0.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0CBE6239-B90B-E211-8155-001E67396A18.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/08A93150-9C0B-E211-9BF5-001E67396EAA.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0639D68C-B90B-E211-953D-003048D4609E.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/060DDA6A-C70B-E211-BF0C-001E67396D4C.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/02C8D108-DA0B-E211-8141-001E67397396.root',
'/store/mc/Summer12_DR53X/TTH_Inclusive_M-115_8TeV_pythia6/AODSIM/PU_S10_START53_V7A-v1/00000/0078C0C1-D90B-E211-83A4-001E67396E32.root' ] );
secFiles.extend( [
] )
|
7,299 | 5b860144a592505fea3a8849f5f5429a39ab9053 | #!/usr/bin/env python
"""
mahjong.playerhand
"""
from collections import Counter
from melds import (DiscardedBy, Chow, Pung, Kong)
from shanten import (
count_shanten_13_orphans,
count_shanten_seven_pairs,
count_shanten_std)
import tiles
from walls import TileWallAgent
class PlayerHand:
"""Player's hand."""
def __init__(self, concealed, exposed=None, initial_update=True):
if isinstance(concealed, str):
concealed = tiles.tiles(concealed)
if isinstance(concealed, Counter):
self._concealed = concealed
else:
self._concealed = Counter(concealed)
self._exposed = exposed or []
self.claimable_chow = {}
self.claimable_pung = {}
self.claimable_kong = {}
self.claimable_win = {}
self.shanten_std, self.shanten_7, self.shanten_13 = None, None, None
if initial_update:
self.update()
def __str__(self):
# concealed part
concealed_part = tiles.format_tiles(self.concealed_part.elements())
# exposed part
exposed_part = ' '.join(str(meld) for meld in self.exposed_parts)
return f'{concealed_part} {exposed_part} {self.shanten} シャンテン'
@property
def is_concealed(self) -> bool:
"""Determine if all the tile is concealed."""
# return not self._exposed
return sum(self.concealed_part.values()) == 13
@property
def concealed_part(self):
"""Return concealed tiles."""
return self._concealed
def get_concealed_part_by_class(self, tile_class) -> Counter:
"""Return the part that consists of specific tiles"""
return self.concealed_part & tiles.get_filter(tile_class)
@property
def exposed_parts(self):
"""Return exposed melds."""
return self._exposed
@property
def shanten(self):
"""Return the shanten number"""
if not self.is_concealed:
return self.shanten_std
return min(self.shanten_std, self.shanten_7, self.shanten_13)
def can_claim_chow(self, discarded_tile: tiles.Tile) -> bool:
"""Test if the player can claim for a Chow
>>> [PlayerHand('12m東南').can_claim_chow(
... tiles.tiles('3{}'.format(i))) for i in 'mps']
[True, False, False]
>>> [PlayerHand('89m東南').can_claim_chow(
... tiles.tiles('7{}'.format(i))) for i in 'mps']
[True, False, False]
>>> [PlayerHand('35p東南').can_claim_chow(
... tiles.tiles('4{}'.format(i))) for i in 'mps']
[False, True, False]
>>> [PlayerHand('4567m').can_claim_chow(
... tiles.tiles('{}m'.format(i))) for i in range(1, 10)]
[False, False, True, True, True, True, True, True, False]
>>> any(PlayerHand('258p西').can_claim_chow(
... tiles.tiles('{}p'.format(i))) for i in range(1, 10))
False
>>> all(PlayerHand('1112345678999s').can_claim_chow(
... tiles.tiles('{}s'.format(i))) for i in range(1, 10))
True
"""
return discarded_tile in self.claimable_chow
def can_claim_pung(self, discarded_tile: tiles.Tile):
"""Test if the player can claim for a Pung.
>>> hand = PlayerHand('149m66s発発')
>>> hand.can_claim_pung(tiles.tiles('1s'))
False
>>> hand.can_claim_pung(tiles.tiles('6s'))
True
>>> hand.can_claim_pung(tiles.tiles('発'))
True
>>> hand = PlayerHand('9m66s2p発発発')
>>> hand.can_claim_pung(tiles.tiles('6s'))
True
>>> hand.can_claim_pung(tiles.tiles('発'))
True
>>> PlayerHand('149m6s白発中').can_claim_pung(tiles.tiles('発'))
False
>>> [PlayerHand('1112345678999m').can_claim_pung(
... tiles.tiles(f'{i}m')) for i in range(1, 10)]
[True, False, False, False, False, False, False, False, True]
"""
return discarded_tile in self.claimable_pung
def can_claim_kong(self, target_tile: tiles.Tile):
"""Test if the player can claim for a Kong (melded or concealed).
>>> PlayerHand('149m66s発発').can_claim_kong(tiles.tiles('発'))
False
>>> PlayerHand('9m66s2p発発発').can_claim_kong(tiles.tiles('発'))
True
"""
return target_tile in self.claimable_kong
def commit_chow(
self,
new_tile: tiles.Tile,
tile1: tiles.Tile,
tile2: tiles.Tile):
"""Add a Chow to the exposed part.
>>> player_hand = PlayerHand('12457789m45p346s')
>>> target_tile = tiles.tiles('8m')
>>> tile1, tile2 = tiles.tiles('7m'), tiles.tiles('9m')
>>> player_hand.commit_chow(target_tile, tile1, tile2)
>>> chow = player_hand.exposed_parts[0]
>>> isinstance(chow, Chow)
True
>>> chow.concealed
False
>>> print(chow.discarded_by)
DiscardedBy.LEFT
>>> player_hand.concealed_part[tile1]
1
>>> player_hand.concealed_part[target_tile]
1
>>> player_hand.concealed_part[tile2]
0
"""
self.exposed_parts.append(Chow([new_tile, tile1, tile2], False))
self.concealed_part.subtract([tile1, tile2])
# self.update()
def commit_pung(self, tile: tiles.Tile, discarded_by: DiscardedBy):
"""Add a Pung to the exposed part.
>>> player_hand = PlayerHand('2457789m248p14s白')
>>> target_tile = tiles.tiles('7m')
>>> player_hand.commit_pung(target_tile, DiscardedBy.CENTER)
>>> pung = player_hand.exposed_parts[0]
>>> assert isinstance(pung, Pung)
>>> assert pung.tileinfo == target_tile
>>> pung.concealed
False
>>> print(pung.discarded_by)
DiscardedBy.CENTER
>>> player_hand.concealed_part[target_tile]
0
"""
self.exposed_parts.append(Pung(tile, False, discarded_by))
self.concealed_part.subtract({tile: 2})
# self.update()
def commit_kong(self, tile: tiles.Tile, discarded_by: DiscardedBy):
"""Add/extend a Kong.
Determine if the claiming for this Kong is a melded, concealed or
extension Kong by this hand and ``discarded_by``.
Example 1: 大明槓
>>> hand = PlayerHand(tiles.tiles('479m378p568s東東東白'))
>>> hand.commit_kong(tiles.tiles('東'), DiscardedBy.CENTER)
>>> hand.concealed_part - Counter(tiles.tiles('479m378p568s白'))
Counter()
>>> kong = hand.exposed_parts[-1]
>>> print(kong.discarded_by)
DiscardedBy.CENTER
Example 2: 暗槓
>>> hand = PlayerHand(tiles.tiles('479m378p568s東東東東'))
>>> hand.commit_kong(tiles.tiles('東'), None)
>>> hand.concealed_part - Counter(tiles.tiles('479m378p568s'))
Counter()
>>> kong = hand.exposed_parts[-1]
>>> print(kong.discarded_by)
None
Example 3: 加槓
>>> hand = PlayerHand(tiles.tiles('479m378p568s白'),
... [Pung(tiles.tiles('東'), True, DiscardedBy.RIGHT)])
>>> hand.commit_kong(tiles.tiles('東'), None)
>>> kong = hand.exposed_parts[-1]
>>> isinstance(kong, Kong)
True
>>> kong.tileinfo == tiles.tiles('東')
True
>>> print(kong.discarded_by)
DiscardedBy.RIGHT
"""
if discarded_by:
# A melded Kong
self.exposed_parts.append(Kong(tile, False, discarded_by))
self.concealed_part.subtract({tile: 3})
elif self.concealed_part.get(tile, 0) == 4:
# A concealed Kong
self.exposed_parts.append(Kong(tile, True, None))
self.concealed_part.subtract({tile: 4})
else:
# A melded Pung is extended to a melded Kong
for i, meld in enumerate(self.exposed_parts):
if meld.tileinfo == tile:
self._exposed[i] = meld.extend_to_kong()
break
# Note: リンシャンから補充するまで self.update_shanten() を呼べない
# self.update()
def update(self):
"""Update internal state"""
self.update_claimable_tiles()
self.update_shanten()
def update_claimable_tiles(self):
"""WIP"""
self.update_claimable_tiles_chow()
self.update_claimable_tiles_pung()
self.update_claimable_tiles_kong()
def update_claimable_tiles_chow(self):
"""Update information for claiming a Chow.
>>> player_hand = PlayerHand('26m334568p38s東発発')
>>> player_hand.update_claimable_tiles_chow()
>>> set(tiles.tiles('234567p')) == player_hand.claimable_chow
True
"""
def _find_mate_pairs(suits, part):
def _get_mate_pair(tile):
yield (tile - 2, tile - 1)
yield (tile - 1, tile + 1)
yield (tile + 1, tile + 2)
# XXX: 以下のループをなぜか comprehension で書けない?
for tile in suits:
if any(mate[0] in part and mate[1] in part
for mate in _get_mate_pair(tile)):
claimable_chow.add(tile)
claimable_chow = set()
_find_mate_pairs(tiles.TILE_RANGE_CHARACTERS,
self.concealed_part & tiles.FILTER_CHARACTERS)
_find_mate_pairs(tiles.TILE_RANGE_CIRCLES,
self.concealed_part & tiles.FILTER_CIRCLES)
_find_mate_pairs(tiles.TILE_RANGE_BAMBOOS,
self.concealed_part & tiles.FILTER_BAMBOOS)
self.claimable_chow = claimable_chow
def update_claimable_tiles_pung(self):
"""Update information for claiming a Pung.
>>> player_hand = PlayerHand('26m334568p38s東発発')
>>> player_hand.update_claimable_tiles_pung()
>>> set(tiles.tiles('3p発')) == player_hand.claimable_pung
True
"""
counter = self.concealed_part
self.claimable_pung = set(
tile for tile in counter if counter[tile] >= 2)
def update_claimable_tiles_kong(self):
"""Update information for claiming a Kong.
>>> player_hand = PlayerHand('26m333368p38s発発発')
>>> player_hand.update_claimable_tiles_kong()
>>> set(tiles.tiles('3p発')) == player_hand.claimable_kong
True
"""
counter = self.concealed_part
# 大明槓 or 暗槓
self.claimable_kong = set(
tile for tile in counter if counter[tile] in (3, 4))
# 加槓
self.claimable_kong.union(
meld.tileinfo for meld in self.exposed_parts
if isinstance(meld, Pung))
def update_shanten(self):
"""Update the shanten number"""
player_hand = self.concealed_part
self.shanten_std = count_shanten_std(player_hand)
if self.is_concealed:
self.shanten_7 = count_shanten_seven_pairs(player_hand)
self.shanten_13 = count_shanten_13_orphans(player_hand)
else:
self.shanten_7 = None
self.shanten_13 = None
def main():
"""test"""
wall_agent = TileWallAgent()
player_hands = [PlayerHand(Counter(ph)) for ph in wall_agent.build()]
for player_hand in player_hands:
print(player_hand)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.