text stringlengths 38 1.54M |
|---|
def uniqueCharacters(document):
return sorted([j for i,j in enumerate(document) if j not in document[0:i]])
print(uniqueCharacters("Todd told Tom to trot to the timber"))
|
#! python3.7
# devOps Server-Template ausfüller
from selenium import webdriver
import sys
# DevOps mm Browser aufrufen
browser = webdriver.Firefox()
browser.get('https://dev.azure.com/CEMA-NRW/Projekte%20Diakonie%20Ruhr/_workitems/recentlyupdated/')
linkElem = browser.find_element_by_class_name("msContextualMenu-link root-77")
linkElem.click()
|
from tkinter import *
raiz = Tk()
myFrame = Frame(raiz, width=800, height=400)
myFrame.pack()
textBox = Entry(myFrame)
textBox.grid(row=0,column=1) #grid is for controlate all using rows and columns
#Further, in .grid(row=0, column=1, sticky='e', padx=50) the comand padx o pady move the object, in this case
#50 pixels to este, this direction can changed with the comand sticky
#Besides, the color and others features of the text box can changed
passBox = Entry(myFrame)
passBox.grid(row=1,column=1)
passBox.config(show='*')#this line is for changed the characters with a character special
""" Text Box to comments"""
commentsBox = Text(myFrame,width=10,height=5) #Form to create the comment box
commentsBox.grid(row=2,column=1)
scrollVertical = Scrollbar(myFrame,command=commentsBox.yview) # Form to make a scroll in the comment box
scrollVertical.grid(row=2,column=2,sticky='nsew') # the command nsew is what the scroll it have the same size
#that comment box
commentsBox.config(yscrollcommand=scrollVertical.set) #For the scroll
myLabel = Label(myFrame, text = 'Ingrese el nombre: ')
myLabel.grid(row=0,column=0) #grid is for controlate all using rows and columns
myPass = Label(myFrame, text = 'Ingrese contraseña: ')
myPass.grid(row=1,column=0)
myComments = Label(myFrame, text = 'Comentarios: ')
myComments.grid(row=2,column=0)
raiz.mainloop() |
import imp
import itertools
import os
import sys
import platform
_IS_PYTHON_3_3 = (platform.python_version() >= "3.3")
from logbook import Logger # pylint: disable=F0401
_logger = Logger(__name__)
class NoInitFileFound(Exception):
pass
def import_file(filename):
module_name = _setup_module_name_for_import(filename)
returned = __import__(module_name, fromlist=[''])
return returned
_package_name_generator = ('_{0}'.format(x) for x in itertools.count())
def _generate_package_name():
for suggested in _package_name_generator:
if not _package_name_exists(suggested):
return suggested
def _package_name_exists(pkg_name):
return pkg_name in sys.modules
def _setup_module_name_for_import(filename):
return _create_new_module_name(filename)
_cached_package_names = {}
def _create_new_module_name(filename):
_logger.debug("Creating new package for {0}", filename)
nonpackage_dir, remainder = _split_nonpackage_dir(filename)
_logger.debug("After split: {0}, {1}", nonpackage_dir, remainder)
package_name = _cached_package_names.get(nonpackage_dir, None)
if package_name is None:
package_name = _generate_package_name()
sys.modules[package_name] = _create_package_module(package_name, nonpackage_dir)
_cached_package_names[nonpackage_dir] = package_name
return '{0}.{1}'.format(package_name, remainder)
def _split_nonpackage_dir(path):
if not os.path.isdir(path):
nonpackage_dir, module = os.path.split(os.path.normpath(os.path.abspath(path)))
module = _make_module_name(module).split(".")
else:
nonpackage_dir = path
module = []
while os.path.isfile(os.path.join(nonpackage_dir, "__init__.py")):
if '.' in os.path.split(nonpackage_dir)[-1]:
# we cannot import from such packages, stop traversing upwards...
break
nonpackage_dir, current_component = os.path.split(nonpackage_dir)
module.insert(0, current_component)
_logger.debug("Now at {0}, {1}", nonpackage_dir, module)
if not module:
raise NoInitFileFound("Could not find __init__.py file in {0}".format(path))
return nonpackage_dir, ".".join(module)
def _make_module_name(filename):
assert filename.endswith('.py') or filename.endswith('.pyc')
return filename.rsplit(".", 1)[0].replace(os.path.sep, ".")
def _create_package_module(name, path):
imp.acquire_lock()
try:
if _IS_PYTHON_3_3:
# the package import machinery works a bit differently in
# python 3.3
returned = imp.new_module(name)
returned.__path__ = [path]
sys.modules[name] = returned
else:
returned = imp.load_module(name, None, path, ('', '', imp.PKG_DIRECTORY))
finally:
imp.release_lock()
return returned
|
# -*- coding: utf-8 -*-
"""
#
# ENCODING
#
import json
from stalker import db, Task, Project
from anima.utils import task_hierarchy_io
db.setup()
t = Task.query.get(12106)
data = json.dumps(t, cls=task_hierarchy_io.StalkerEntityEncoder, check_circular=False, indent=4)
#
# DECODING
#
project = Project.query.filter(Project.code == 'TD').first()
decoder = task_hierarchy_io.StalkerEntityDecoder(project=project)
entity = decoder.loads(data)
"""
import json
class StalkerEntityEncoder(json.JSONEncoder):
"""JSON Encoder for Stalker Classes"""
ignore_fields = [
# Generic
"defaults",
# SimpleEntity
"id",
"entity_id",
"nice_name",
# Task
"absolute_path",
"allocation_strategy",
"alternative_resources",
"bid_timing",
"bid_unit",
"children",
"computed_duration",
"computed_end",
"computed_resources",
"computed_start",
"computed_total_seconds",
"create_time_log",
"created_by",
"created_by_id",
"date_created",
"date_updated",
"dependent_of",
"depends",
"duration",
"end",
"entity_groups",
"generic_data",
"generic_text",
"good",
"good_id",
"hold",
"html_class",
"html_style",
"is_container",
"is_leaf",
"is_milestone",
"is_root",
"is_scheduled",
"least_meaningful_time_unit",
"level",
"notes",
"open_tickets",
"parent",
"parent_id",
"parents",
"percent_complete",
"persistent_allocation",
"plural_class_name",
"priority",
"project",
"project_id",
"query",
"references",
"remaining_seconds",
"resources",
"responsible",
"review_number",
"reviews",
"schedule_seconds",
"start",
"status",
"status_id",
"status_list",
"status_list_id",
"tags",
"task_dependent_of",
"task_depends_to",
"thumbnail",
"thumbnail_id",
"tickets",
"time_logs",
"tjp_abs_id",
"tjp_id",
"to_tjp",
"total_logged_seconds",
"total_seconds",
"updated_by",
"updated_by_id",
"walk_dependencies",
"walk_hierarchy",
"walk_inputs",
"watchers",
# Shot
"image_format",
"image_format_id",
"source_in",
"source_out",
"sequences",
# Version
"absolute_full_path",
"inputs",
"latest_published_version",
"latest_version",
"link_id",
"max_version_number",
"naming_parents",
"task",
"task_id",
"outputs",
"version_id",
]
def __init__(self, *args, **kwargs):
super(StalkerEntityEncoder, self).__init__(*args, **kwargs)
self._visited_obj_ids = []
def default(self, obj):
from sqlalchemy.ext.declarative import DeclarativeMeta
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj.id in self._visited_obj_ids:
return {"$ref": obj.id}
# do not append if this is a type instance
if obj.entity_type != "Type":
self._visited_obj_ids.append(obj.id)
# an SQLAlchemy class
fields = {}
for field in [
x for x in dir(obj) if not x.startswith("_") and x != "metadata"
]:
# skip ignore fields
if field in self.ignore_fields:
continue
# skip callables
if callable(obj.__getattribute__(field)):
continue
try:
fields[field] = obj.__getattribute__(field)
except (AttributeError, TypeError, NotImplementedError, RuntimeError):
pass
# a json-encodable dict
return fields
try:
# return json.JSONEncoder.default(self, obj)
return super(StalkerEntityEncoder, self).default(obj)
except TypeError:
return None
class StalkerEntityDecoder(object):
"""Decoder for Stalker classes"""
def __init__(self, project, parent=None):
self.project = project
self.parent = parent
self._created_obj = {}
def loads(self, data, parent=None):
"""Decodes Stalker data
:param data: Raw json data.
:param parent: The parent node to attach the newly created data to.
:return:
"""
from stalker.db.session import DBSession
from stalker import Asset, Task, Shot, Sequence, Version, Type
if isinstance(data, str):
data = json.loads(data)
json_id = None
if "id" in data:
json_id = data.pop("id")
if json_id is not None and json_id in self._created_obj:
return self._created_obj[json_id]
# get the entity_type
try:
entity_type = data["entity_type"]
except KeyError:
return None
# set default entity class to Task
entity_class = Task
# entity_type_dict = {
# 'Asset': Asset,
# 'Shot': Shot,
# 'Sequence': Sequence
# }
if entity_type == "Asset":
entity_class = Asset
elif entity_type == "Shot":
entity_class = Shot
# TODO: this is a bug
data["sequences"] = []
elif entity_type == "Sequence":
entity_class = Sequence
# TODO: We shouldn't need the following code for Type anymore
# get the type
if "type" in data:
type_data = data["type"]
if type_data and not isinstance(type_data, Type):
type_name = type_data["name"]
type_ = Type.query.filter(Type.name == type_name).first()
if not type_:
# create a Type
type_ = Type(**type_data)
data["type"] = type_
# store version data
version_data = sorted(data["versions"], key=lambda x: x["version_number"])
data["versions"] = []
data["project"] = self.project
# check if the data exists before creating it
entity = (
entity_class.query.filter(entity_class.project == self.project)
.filter(entity_class.parent == parent)
.filter(entity_class.name == data["name"])
.first()
)
if not entity:
# then create it
entity = entity_class(**data)
DBSession.add(entity)
DBSession.commit()
if json_id:
self._created_obj[json_id] = entity
# create Versions
if version_data:
for v_data in version_data:
v_json_id = None
if "id" in v_data:
v_json_id = v_data.pop("id")
# check version number and take name
# if there is a version with the same version_number
# don't create it
take_name = v_data["take_name"]
version_number = v_data["version_number"]
v = (
Version.query.filter(Version.task == entity)
.filter(Version.take_name == take_name)
.filter(Version.version_number == version_number)
.first()
)
if not v:
# then create it
# get Version info
v_data["task"] = entity
v = Version(**v_data)
# update version_number
v.version_number = v_data["version_number"]
v.is_published = v_data["is_published"]
self._created_obj[v_json_id] = v
DBSession.commit()
# for each child task call a new StalkerEntityDecoder
for t in data["tasks"]:
self.loads(t, parent=entity)
if parent:
entity.parent = parent
return entity
|
u = 'fars.text'
with open(u, 'r') as infile:
words = infile.read().split()
max_word = max(words, key=len)
max_len = len(max(words, key=len))
print(max_word,max_len) |
from sys import argv
script, first, second, third = argv
blah = raw_input("How you doin? ")
print "The script is called ", script
print "The first variable is called ", first
print "The second variable is called ", second
print "The third variable is called ", third
print "How you doin? ", blah
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 10:17:56 2019
@author: Rupesh S
"""
import pandas as pd
df=pd.read_csv("D:\\Mall_Customers.csv")
x=df.iloc[:,[3,4]].values
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt
dendrogram=sch.dendrogram(sch.linkage(x, method='ward'))
plt.xlabel('customers')
plt.ylabel('euclidean distane')
plt.show()
from sklearn.cluster import AgglomerativeClustering
hc=AgglomerativeClustering(n_clusters=5, affinity='euclidean',linkage='ward')
y_hc=hc.fit_predict(x)
plt.scatter(x[y_hc==0,0], x[y_hc==0,1], s=100, c='red',label='Cluster1')
plt.scatter(x[y_hc==1,0], x[y_hc==1,1], s=100, c='blue',label='Cluster1')
plt.scatter(x[y_hc==2,0], x[y_hc==2,1], s=100, c='cyan',label='Cluster1')
plt.scatter(x[y_hc==3,0], x[y_hc==3,1], s=100, c='black',label='Cluster1')
plt.scatter(x[y_hc==4,0], x[y_hc==4,1], s=100, c='green',label='Cluster1')
plt.scatter() |
# -*- coding: utf-8 -*-
from flask import Flask,render_template,request,abort,jsonify
from ApacheConfigParser.ApacheConfig import ApacheParser
import json
app = Flask(__name__)
#----------------API后端----------------------
#GET请求后端示例,实现读取文件内容功能
@app.route('/api/readfile/')
def readfile():
with open('ApacheConfigParser/examples/test_apache_config.conf','r') as f:
text=f.read()
result='读取到的文件内容:\n'+text
return result
#POST请求后端示例,实现修改ServerName功能
@app.route('/api/change/',methods=['POST'])
def change():
name=request.form['name'] #name为收到的参数
with open('ApacheConfigParser/examples/test_apache_config.conf','rb') as f:
parsed = ApacheParser(f) #调用ApacheConfigParser解析文件,注意使用rb模式读取文件
result='要修改为的ServerName:'+name+'\n'
result+='============修改前的内容============\n'
result+=parsed.render().decode('utf-8')
#修改ServerName
parsed.findAll('VirtualHost').findChildren('ServerName').update(name)
result+='============修改后的内容============\n'
result+=parsed.render().decode('utf-8')
return result
#传输json数据后端示例
@app.route('/api/transferjson/',methods=['POST'])
def transferjson():
data=request.json #前端发来的json数据
result={
'后端接收到的数据':data,
'后端框架':'Flask'
}
return jsonify(result) #后端传回json数据
#持久化存储后端示例,实现在settings.json文件中保存和读取数据功能
@app.route('/api/savedata/',methods=['POST'])
def savedata():
data=request.json #前端发来的数据
#写文件
with open('settings.json','w') as f:
f.write(json.dumps(data,ensure_ascii=False,indent=2))
return '保存到settings.json成功'
@app.route('/api/loaddata/')
def loaddata():
#读文件
with open('settings.json', 'r') as f:
data = json.load(f)
return jsonify(data) #后端传回的数据
#----------------页面----------------------
#homepage
@app.route('/')
def index():
return render_template('index.html')
#aboutpage
@app.route('/about')
def about():
return render_template('about.html')
#contactpage
@app.route('/contact')
def contact():
return render_template('contact.html')
#----------------启动服务器----------------------
if __name__ == '__main__':
app.run(debug=True) #开启debug模式后修改文件内容能够自动重启服务器
|
# -*- coding: utf-8 -*-
import requests
import csv
import sys
from lib import csv_io
def write_csv(file_name, content):
"""write csv"""
with open(file_name, 'w') as output_file:
writer = csv.writer(output_file)
writer.writerows(content)
if __name__ == '__main__':
data = csv_io.read_csv('../data/dengue_all.csv')
urls = ['http://data.tainan.gov.tw/dataset/3ad9da64-0c29-4299-b769-320b57a09be8/resource/7bf16e0a-2445-4ccf-a0a0-ae06a8fda4ac/download/z104104121207.csv', 'http://data.tainan.gov.tw/dataset/3ad9da64-0c29-4299-b769-320b57a09be8/resource/d4af5055-3d2c-420f-ad12-373cfae430d3/download/z104104121208.csv']
for u in urls:
print (urls)
data += csv_io.req_csv(u, 'utf-8')[1:]
#data = csv_io.read_csv('../data/dengue_all.csv')
for item in data:
if not item[0]:
del item
continue
if len(item) < 7:
item.insert(0, '')
if '105' in item[1]:
item[1] = item[1].replace('105', '2015')
if '104' in item[1]:
item[1] = item[1].replace('104', '2015')
try:
if float(item[-1]) < 50:
tmp = item[-1]
item[-1] = item[-2]
item[-2] = tmp
except:
pass
print (data[-1])
data = data[:-1]
write_csv('../data/dengue_all.csv', data)
|
import argparse
import glob
import json
import os
import numpy as np
def split_data(image_path, prefix=""):
paths = glob.glob(f"{image_path}/*.*")
base_names = list(map(os.path.basename, paths))
base_names = np.array(base_names)
# Set data length for valid splits
total_len = len(base_names)
print(f"Total {total_len} data")
valid_len = int(total_len * 0.1)
# Create data splits
indices = np.random.permutation(total_len)
train_indices = indices[valid_len:]
valid_indices = indices[:valid_len]
train_names = base_names[train_indices].tolist()
valid_names = base_names[valid_indices].tolist()
train_names = list(map(lambda x: f"{prefix}{x}", train_names))
valid_names = list(map(lambda x: f"{prefix}{x}", valid_names))
print(f"Train has {len(train_names)} data from {image_path}")
print(f"Valid has {len(valid_names)} data from {image_path}")
return {"train": train_names, "valid": valid_names}
def paired_data(data_path):
# Load distorted images and split
splits = split_data(f"{data_path}/trainA")
# Check if every image exists in folder 'trainB'
for key in ["train", "valid"]:
paths = map(lambda x: f"{data_path}/trainB/{x}", splits[key])
assert all(map(lambda x: os.path.isfile(x), paths))
return splits
def unpaired_data(data_path):
# Load distorted and enhanced images and split
dt_splits = split_data(f"{data_path}/trainA", prefix="trainA/")
eh_splits = split_data(f"{data_path}/trainB", prefix="trainB/")
# Collect train/valid images from folders
splits = dict()
for key in ["train", "valid"]:
splits[key] = dt_splits[key] + eh_splits[key]
return splits
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="PyTorch FUnIE-GAN Data Splitting")
parser.add_argument("-d", "--data", default="", type=str, metavar="PATH",
help="path to data (default: none)")
parser.add_argument("-p", "--pair", action="store_true",
help="Set if data in pairs")
args = parser.parse_args()
seed = 42
np.random.seed(seed)
if args.pair:
print("Create paired dataset...")
splits = paired_data(args.data)
else:
print("Create unpaired dataset...")
splits = unpaired_data(args.data)
# Write out
output_path = f"{args.data}/splits.json"
with open(output_path, "w") as f:
json.dump(splits, f)
print(f"Write splits to JSON {output_path}")
|
import praw
import re
import urllib2
import json
import random
def dimentionsCorrect(w,h,settings):
targetW = settings["targetW"]
targetH = settings["targetH"]
widthDelta = targetW - w
heightDelta = targetH - h
delta = float(max(widthDelta,heightDelta))
# this doesn't care about data size because
# the largest image from this database is not
# that big
if delta <= 0:
return True
if widthDelta > heightDelta:
normalDelta = delta/w
else:
normalDelta = delta/h
if normalDelta < settings["criticalDelta"]:
return True
return False
def getPage(link, key, user):
h = {
"apiauth-apikey" : key,
"apiauth-apiuser" : user
}
request = urllib2.Request(link, headers=h)
r = urllib2.urlopen(request)
page = r.read()
r.close()
return page
def getNewImage(w,h,settings):
# This doesn't seem to change?
apiKey = "9fa5d22ad7b354fe0f9be5597bcf153df56e2ca5"
apiUser = "pod_archive"
page = str(random.randint(1,10))
link = "https://relay.nationalgeographic.com/proxy/distribution/feed/v1?\
format=jsonapi&content_type=featured_image&fields=image,uri&\
collection=fd5444cc-4777-4438-b9d4-5085c0564b44\
&subjects=af70464f-6035-346a-bb85-479b03df4a5e\
&publication_datetime__from=2009-01-01T18:30:02Z&page="+page+"&limit=48"
j = json.loads(getPage(link,apiKey, apiUser))
rands = range(0,len(j["data"]))
random.shuffle(rands)
for i in range(0,int(settings["maxSearch"])):
imgIndex = rands[i]
sizes = j["data"][imgIndex]["attributes"]["image"]["renditions"]
ar = float(j["data"][imgIndex]["attributes"]["image"]["aspect_ratio"])
for size in sizes:
w = int(size["width"])
h = int(int(w) * (1/ar))
if(dimentionsCorrect(w,h,settings)):
img = {
"src": size["uri"],
"credit" : "https://www.nationalgeographic.com/photography/photo-of-the-day/archive/"
}
return img
return None
def simpleSearch(w,h):
img = None
settings = {}
settings['targetW'] = w
settings['targetH'] = h
settings['criticalDelta'] = 0.5
settings['maxSearch'] = 10
img = getNewImage(w,h,settings)
if img == None:
img = {
"src": "http://cdn.wallpapersafari.com/17/34/42nLhc.jpg",
"credit" : "https://wallpapersafari.com/free-mountain-wallpaper-backgrounds/"
}
return img
if __name__ == "__main__":
print(simpleSearch(2560,1600))
|
#a Imports
import os, time
from pathlib import Path
from .verbose import Verbose
from .options import Options
from .log import Log
from .exceptions import *
from .base import GripBase
from typing import Type, List, Dict, Iterable, Optional, Any, Tuple, cast
from .git import branch_upstream, branch_head
from .git import Repository as GitRepo
from .git import Url as GitUrl
from .descriptor import StageDependency as StageDependency
from .descriptor import RepositoryDescriptor
from .descriptor import ConfigurationDescriptor
from .descriptor import GripDescriptor as GripDescriptor
from .configstate import GripConfigStateInitial, GripConfigStateConfigured
from .repo import Repository, GripRepository
from .types import PrettyPrinter, Documentation, MakefileStrings, EnvDict
#a Classes
#a Toplevel grip repository class - this describes/contains the whole thing
#c Toplevel class
class Toplevel(GripBase):
#v Instance properties
invocation : str
# repo_desc: GripRepoDescriptor
# repo_desc_config : Optional[ConfigurationDescriptor] - configured_config_state.config_desc
# repo_config : Optional[GripConfig]
# grip_git_url : Optional[GitUrl]
intial_config_state : GripConfigStateInitial
configured_config_state : GripConfigStateConfigured
repo_instance_tree : GripRepository
_is_configured : bool
#f find_git_repo_of_grip_root
@classmethod
def find_git_repo_of_grip_root(cls, path:Path, options:Options, log:Log) -> GitRepo:
git_repo = GitRepo(path=path, permit_no_remote=True, options=options, log=log)
path = git_repo.path()
if not path.joinpath(Path(".grip")).is_dir():
path = path.parent
return cls.find_git_repo_of_grip_root(path, options=options, log=log)
return git_repo
#f clone - classmethod to perform a git clone and then create an instance
@classmethod
def clone(cls, repo_url:str, dest:Optional[Path], branch:Optional[str], options:Optional[Options]=None, log:Optional[Log]=None, invocation:str="")-> 'Toplevel':
if options is None: options=Options()
if log is None: log = Log()
git_repo = GitRepo.clone(repo_url, new_branch_name="", branch=branch, dest=dest, options=options, log=log)
return cls(path=git_repo.path(), git_repo=git_repo, options=options, log=log, invocation=invocation, ensure_configured=False)
#f path - get a path relative to the repository
def path(self, path:Optional[Path]=None) -> Path:
return self.git_repo.path(path)
#f __init__
def __init__(self, options:Options, log:Log, path:Path, git_repo:Optional[GitRepo]=None, ensure_configured:bool=True, invocation:str="", error_handler:ErrorHandler=None):
if git_repo is None:
try:
git_repo = Toplevel.find_git_repo_of_grip_root(path, options=options, log=log)
pass
except Exception as e:
print(str(e))
raise e
pass
pass
if git_repo is None:
raise NotGripError("Not within a git repository, so not within a grip repository either")
GripBase.__init__(self, options=options, log=log, git_repo=git_repo, branch_name=None)
self.invocation = time.strftime("%Y_%m_%d_%H_%M_%S") + ": " + invocation
self.log.add_entry_string(self.invocation)
self.initial_config_state = GripConfigStateInitial(self)
self.initial_config_state.read_desc_state(error_handler=error_handler)
self._is_configured = False
if self.initial_config_state.has_config_file():
self.initial_config_state.select_current_configuration()
self.configured_config_state = GripConfigStateConfigured(self.initial_config_state)
self.configured_config_state.read_desc(error_handler=error_handler)
if options.get("debug_config",False):
import sys
self.configured_config_state.dump_to_file(sys.stdout)
self._is_configured = True
pass
if ensure_configured and not self._is_configured:
raise Exception("Die:ensure_configured and not self._is_configured:")
self.make_branch_name()
pass
#f make_branch_name
def make_branch_name(self) -> None:
"""
Set branch name; if not configured, then generate a new name
If configured then use the branch name in the local config state
"""
if self.branch_name is None:
if self.initial_config_state.config_file.branch is None:
time_str = time.strftime("%Y_%m_%d_%H_%M_%S")
base = self.initial_config_state.initial_repo_desc.get_name()
if self.initial_config_state.config_file.config is not None:
base += "_" + self.initial_config_state.config_file.config
pass
branch_name = "WIP__%s_%s"%(base, time_str)
self.set_branch_name(branch_name)
self.verbose.message("New branch name '%s'"%(branch_name))
pass
else:
self.set_branch_name(self.initial_config_state.config_file.branch)
pass
pass
pass
#f update_state
def update_state(self) -> None:
self.configured_config_state.update_state(self.repo_instance_tree)
pass
#f write_state
def write_state(self) -> None:
self.configured_config_state.write_state()
pass
#f update_config
def update_config(self) -> None:
self.configured_config_state.update_config()
pass
#f write_config
def write_config(self) -> None:
self.configured_config_state.write_config()
pass
#f get_repo_desc
def get_repo_desc(self) -> GripDescriptor:
if self.is_configured():
return self.configured_config_state.full_repo_desc
return self.initial_config_state.initial_repo_desc
#f debug_repo_desc
def debug_repo_desc(self) -> str:
def p(acc:str, s:str, indent:int=0) -> str:
return acc+"\n"+(" "*indent)+s
return cast(str,self.get_repo_desc().prettyprint("",p))
#f get_name
def get_name(self) -> str:
return self.get_repo_desc().get_name()
#f get_doc
def get_doc(self) -> Documentation:
"""
Return list of (name, documentation) strings
If configured, list should include current configuration and repos
If not configured, list should include all configurations
List should always start with (None, repo.doc) if there is repo doc
"""
if self.is_configured():
return self.configured_config_state.config_desc.get_doc()
return self.initial_config_state.initial_repo_desc.get_doc()
#f get_configurations
def get_configurations(self) -> List[str]:
return self.get_repo_desc().get_configs()
#f is_configured
def is_configured(self) -> bool:
return self._is_configured
#f get_config_name
def get_config_name(self) -> str:
if self.is_configured():
return self.configured_config_state.config_name
raise Exception("Repo is not configured so has no config name")
#f configure
def configure(self, config_name:Optional[str]=None) -> None:
force_configure = self.options.get("force_configure", default=False)
self.add_log_string("Configuring repo %s with config %s"%(str(self.git_repo.path),config_name))
if self.is_configured():
if not force_configure:
raise UserError("Grip repository is already configured - cannot configure it again, a new clone of the grip repo must be used instead")
if (config_name is not None) and (config_name!=self.get_config_name()):
raise UserError("Grip repository is already configured with config '%s' and cannot be configured with a different config name '%s'"%(self.get_config_name(),config_name))
pass
config_name = self.initial_config_state.select_configuration(config_name)
assert config_name is not None
self.configured_config_state = GripConfigStateConfigured(self.initial_config_state)
self.add_log_string("...configuring toplevel for repo %s config %s"%(str(self.git_repo.path), config_name))
self.configure_toplevel_repo()
if not force_configure:
self.check_clone_permitted()
pass
self.add_log_string("...cloning subrepos for repo %s"%(str(self.git_repo.path)))
errors = self.clone_subrepos()
if len(errors)>0:
if not force_configure:
for e in errors:
self.verbose.error("Error from cloning subrepo: %s"%e)
pass
raise ConfigurationError("Failed to clone required subrepos")
else:
for e in errors:
self.verbose.warning("Failed to clone subrepo (but forcing configuration anyway): %s"%e)
pass
pass
pass
self.write_state()
self.write_config()
self.add_log_string("...rereading config and state for repo %s"%(str(self.git_repo.path)))
self.configured_config_state.read_desc()
self.add_log_string("...updating configuration for repo %s"%(str(self.git_repo.path)))
self.create_subrepos()
self._is_configured = True
self.update_state()
self.write_state()
self.update_config()
self.write_config()
self.grip_env_write()
self.create_grip_makefiles()
pass
#f configure_toplevel_repo - set toplevel git repo to have correct branches if it does not already
def configure_toplevel_repo(self) -> None:
"""
Must only be invoked if the grip repository is not yet configured
In some circumstances the repository could have been git cloned by hand
In this case we need to ensure it is unmodified, and set the required branches
appropriately
"""
assert self.branch_name is not None
if self.git_repo.is_modified():
raise ConfigurationError("Git repo is modified and cannot be configured")
# The next bit is really for workflow single I think
try:
branch = self.git_repo.get_branch_name()
pass
except:
raise ConfigurationError("Git repo is not at the head of a branch and so cannot be configured")
has_upstream = self.git_repo.has_cs(branch_name=branch_upstream)
has_wip_branch = self.git_repo.has_cs(branch_name=self.branch_name)
if has_upstream:
remote = self.git_repo.get_branch_remote_and_merge(branch_upstream)
pass
else:
remote = self.git_repo.get_branch_remote_and_merge(branch)
pass
if remote is None:
raise ConfigurationError("Git repo branch does not have a remote to merge with and so cannot be configured")
if has_upstream and has_wip_branch: return
cs = self.git_repo.get_cs(branch_head)
if not has_upstream:
self.verbose.warning("Expected subrepo to already have upstream branch '%s'; will create one"%(branch_upstream))
self.git_repo.change_branch_ref(branch_name=branch_upstream, ref=cs)
pass
if not has_wip_branch:
self.verbose.message("Setting branches '%s' and '%s' to point at current head"%(branch_upstream, self.branch_name))
self.git_repo.change_branch_ref(branch_name=self.branch_name, ref=cs)
self.git_repo.checkout_cs(changeset=self.branch_name)
pass
self.git_repo.set_upstream_of_branch(branch_name=branch_upstream, remote=remote)
pass
#f reconfigure
def reconfigure(self) -> None:
if not self.is_configured():
raise Exception("Grip repository is not properly configured - cannot reconfigure unless it has been")
self.create_subrepos()
for r in self.configured_config_state.config_desc.iter_repos():
r_state = self.configured_config_state.state_file_config.get_repo_state(self.configured_config_state.config_desc, r.name)
self.update_state()
self.write_state()
self.update_config()
self.write_config()
self.grip_env_write()
self.create_grip_makefiles()
pass
#f check_clone_permitted
def check_clone_permitted(self) -> None:
for r in self.configured_config_state.config_desc.iter_repos():
dest = self.git_repo.path(r.path())
if not GitRepo.check_clone_permitted(r.url, branch=r.branch, dest=dest, log=self.log):
raise UserError("Not permitted to clone '%s' to '%s"%(r.url, dest))
pass
pass
#f clone_subrepos - git clone the subrepos to the correct changesets
def clone_subrepos(self, force_shallow:bool=False) -> List[str]:
assert self.branch_name is not None
errors = []
# Clone all subrepos to the correct paths from url / branch at correct changeset
# Use shallow if required
for r in self.initial_config_state.iter_repos():
# r : RepositoryDescriptor
r_state = self.initial_config_state.state_file_config.get_repo_state(self.configured_config_state.config_desc, r.name)
assert r_state is not None
dest = self.git_repo.path(r.path())
self.verbose.info("Cloning '%s' branch '%s' cs '%s' in to path '%s'"%(r.get_git_url_string(), r_state.branch, r_state.changeset, str(dest)))
depth = None
if r.is_shallow(): depth=1
try:
GitRepo.clone(repo_url=r.get_git_url_string(),
new_branch_name=self.branch_name,
branch=r_state.branch,
dest=dest,
depth = depth,
changeset = r_state.changeset,
options = self.options,
log = self.log )
pass
except Exception as e:
errors.append(str(e))
pass
pass
return errors
#f create_subrepos - create python objects that correspond to the checked-out subrepos
def create_subrepos(self) -> None:
self.repo_instance_tree = GripRepository(name="<toplevel>", grip_repo=self, git_repo=self.git_repo, parent=None, workflow=self.configured_config_state.full_repo_desc.workflow )
for rd in self.configured_config_state.config_desc.iter_repos():
# rd : RepositoryDescriptor
try:
repo_path = self.git_repo.path(rd.path())
gr = GitRepo(path=repo_path, options=self.options, log=self.log)
sr = Repository(name=rd.name, grip_repo=self, parent=self.repo_instance_tree, git_repo=gr, workflow=rd.workflow)
pass
except SubrepoError as e:
self.verbose.warning("Subrepo '%s' could not be found - is this grip repo a full checkout?"%(rd.name))
pass
pass
self.repo_instance_tree.install_hooks()
pass
#f get_makefile_stamp_path
def get_makefile_stamp_path(self, rd:StageDependency) -> Path:
"""
Get an absolute path to a makefile stamp filename
"""
rd_tgt = rd.target_name()
rd_tgt_path = Path(self.grip_path(self.makefile_stamps_dirname)).joinpath(Path(rd_tgt))
return rd_tgt_path
#f create_grip_makefiles
def create_grip_makefiles(self) -> None:
"""
Repositories are all ready.
Create makefile stamp directory
Create makefile.env and makefile
Delete makefile stamps
"""
StageDependency.set_makefile_path_fn(self.get_makefile_stamp_path)
self.add_log_string("Cleaning makefile stamps directory '%s'"%self.grip_path(self.makefile_stamps_dirname))
makefile_stamps = self.grip_path(self.makefile_stamps_dirname)
try:
os.mkdir(makefile_stamps)
pass
except FileExistsError:
pass
self.add_log_string("Creating makefile environment file '%s'"%self.grip_path(self.grip_makefile_env_filename))
with open(self.grip_path(self.grip_makefile_env_filename),"w") as f:
print("GQ=@",file=f)
print("GQE=@echo",file=f)
for (n,v) in self.configured_config_state.config_desc.get_env_as_makefile_strings():
print("%s=%s"%(n,v),file=f)
pass
for r in self.configured_config_state.config_desc.iter_repos():
for (n,v) in r.get_env_as_makefile_strings():
print("# REPO %s wants %s=%s"%(r.name, n,v),file=f)
pass
pass
pass
# create makefiles
self.add_log_string("Creating makefile '%s'"%self.grip_path(self.grip_makefile_filename))
with open(self.grip_path(self.grip_makefile_filename),"w") as f:
print("THIS_MAKEFILE = %s\n"%(self.grip_path(self.grip_makefile_filename)), file=f)
print("-include %s"%(self.grip_path(self.grip_makefile_env_filename)), file=f)
def log_and_verbose(s:str) -> None:
self.add_log_string(s)
self.verbose.info(s)
pass
self.configured_config_state.config_desc.write_makefile_entries(f, verbose=log_and_verbose)
pass
# clean out make stamps
pass
#f get_root
def get_root(self) -> Path:
"""
Get path to grip repository
"""
return self.git_repo.path()
#f get_grip_env
def get_grip_env(self) -> EnvDict:
"""
Get immutable environment dictionary (not including OS environment)
"""
return self.configured_config_state.config_desc.get_env()
#f grip_env_iter
def grip_env_iter(self) -> Iterable[Tuple[str,str]]:
"""
Iterate through the grip env in alphabetically-sorted key order
"""
d = self.get_grip_env()
dk = list(d.keys())
dk.sort()
for k in dk:
yield(k,d[k])
pass
pass
#f grip_env_write
def grip_env_write(self) -> None:
"""
Write shell environment file
"""
self.configured_config_state.write_environment()
with open(self.grip_path(self.grip_env_filename), "w") as f:
for (k,v) in self.grip_env_iter():
print('%s="%s" ; export %s'%(k,v,k), file=f)
pass
pass
pass
#f invoke_shell - use created environment file to invoke a shell
def invoke_shell(self, shell:str, args:List[str]=[]) -> None:
env = {}
for (k,v) in os.environ.items():
env[k] = v
pass
env["GRIP_SHELL"] = shell
cmd_line = ["grip_shell"]
cmd_line += ["-c", "source %s; %s %s"%(self.grip_path(self.grip_env_filename), shell, " ".join(args))]
os.execvpe("bash", cmd_line, env)
#f status
def status(self) -> None:
self.create_subrepos()
self.repo_instance_tree.status()
pass
#f commit
def commit(self) -> None:
self.create_subrepos()
self.repo_instance_tree.commit()
self.verbose.message("All repos commited")
self.update_state()
self.write_state()
self.verbose.message("Updated state")
self.verbose.message("**** Now run 'git commit' and 'git push origin HEAD:master' if you wish to commit the GRIP repo itself and push in a 'single' workflow ****")
pass
#f fetch
def fetch(self) -> None:
self.create_subrepos()
self.repo_instance_tree.fetch()
pass
#f update
def update(self) -> None:
self.create_subrepos()
self.repo_instance_tree.update()
self.verbose.message("All subrepos updated")
self.update_state()
self.write_state()
self.verbose.message("Updated state")
pass
#f merge
def merge(self) -> None:
self.create_subrepos()
self.repo_instance_tree.merge()
self.verbose.message("All subrepos merged")
self.update_state()
self.write_state()
self.verbose.message("Updated state")
self.verbose.message("**** Now run 'git commit' and 'git push origin HEAD:master' if you wish to commit the GRIP repo itself and push in a 'single' workflow ****")
pass
#f publish
def publish(self, prepush_only:bool=False) -> None:
self.create_subrepos()
self.repo_instance_tree.prepush()
self.verbose.message("All subrepos prepushed")
if prepush_only: return
self.repo_instance_tree.push()
self.verbose.message("All subrepos pushed")
self.update_state()
self.write_state()
self.verbose.message("Updated state")
self.verbose.message("**** Now run 'git commit' and 'git push origin HEAD:master' if you wish to commit the GRIP repo itself and push in a 'single' workflow ****")
pass
#f All done
|
import sys
i = 1
word = ""
if len(sys.argv) < 2:
sys.exit()
while (i < len(sys.argv)):
j = 0
while j < len(sys.argv[i]):
if sys.argv[i][j].isupper():
c = sys.argv[i][j].lower()
else:
c = sys.argv[i][j].upper()
word = word + c
j = j + 1
i = i + 1
if i < len(sys.argv):
word = word + " "
print(word[::-1])
|
"""
x = int(input("Oppgi verdien til x: "))
y = int(input("Oppgi verdien til y: "))
differanse = x-y
print("Differansen mellom x og y er: ", differanse)
"""
inp = input("Oppgi verdien til x:\n> ")
x = int(inp)
inp = input("Oppgi verdien til y:\n> ")
y = int(inp)
print("Differansen mellom x og y er", x - y)
|
from django.contrib import admin
from .models import Flight, Airport, Passenger
@admin.register(Flight)
class FlightAdmin(admin.ModelAdmin):
list_display = ['origin', 'destination']
@admin.register(Airport)
class AirportAdmin(admin.ModelAdmin):
list_display = ['name', 'code']
@admin.register(Passenger)
class PassengerAdmin(admin.ModelAdmin):
list_display = ['first', 'last']
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agent', '0009_packages'),
]
operations = [
migrations.RenameField(
model_name='service',
old_name='typ',
new_name='type',
),
]
|
feature_0323_1 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도', '쉰시간']
feature_0323_2 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도']
feature_0323_3 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도', '쉰시간', '종료온도']
feature_0323_4 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도', '종료온도']
feature_0323_5 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도', '쉰시간', '종료온도', '시간(총)']
feature_0323_6 = ['장입중량총합', '장입최대중량', '장입소재개수', '시작온도', '종료온도', '시간(총)']
f2 = [['[1]', '에너지', '에너지'], ['[1]', '시간(총)', '시간'],
['[2, 3]', '에너지', '에너지'], ['[2, 3]', '시간(총)', '시간'],
['[4, 5, 6]', '에너지', '에너지'], ['[4, 5, 6]', '시간(총)', '시간'],
['[2, 3, 4, 5, 6]', '에너지', '에너지'], ['[2, 3, 4, 5, 6]', '시간(0제외)', '시간'],
['[17, 18, 19, 20]', '에너지', '에너지'], ['[17, 18, 19, 20]', '시간(0제외)', '시간']]
path_1 = [['전부/1h제외'], ['전부/1h포함'],
['민감만/1h제외'], ['민감만/1h포함'],
['민감제외/1h제외'], ['민감제외/1h포함']]
fl_0331 = [[[['에너지', '시간(총)', '시간(0제외)'], feature_0323_1, None, '쉰시간포함', '에너지']]]
f3 = [['[1]', fl_0331],
['[2, 3]', fl_0331],
['[4, 5, 6]', fl_0331],
['[2, 3, 4, 5, 6]', fl_0331],
['[17, 18, 19, 20]', fl_0331]]
f4 = [['4_filtered', fl_0331],
['5_filtered', fl_0331],
['6_filtered', fl_0331]]
feature_list_0323_1 = [['에너지', feature_0323_1, None, '쉰시간포함'],
['시간(총)', feature_0323_1, None, '쉰시간포함'],
['시간(0제외)', feature_0323_1, None, '쉰시간포함']]
feature_list_0323_2 = [['에너지', feature_0323_2, None, '쉰시간제외'],
['시간(총)', feature_0323_2, None, '쉰시간제외'],
['시간(0제외)', feature_0323_2, None, '쉰시간제외']]
feature_list_0323_3 = [['에너지', feature_0323_3, None, '쉰시간포함'],
['시간(총)', feature_0323_3, None, '쉰시간포함'],
['시간(0제외)', feature_0323_3, None, '쉰시간포함']]
feature_list_0323_4 = [['에너지', feature_0323_4, None, '쉰시간제외'],
['시간(총)', feature_0323_4, None, '쉰시간제외'],
['시간(0제외)', feature_0323_4, None, '쉰시간제외']]
feature_list_0323_5 = [['에너지', feature_0323_5, None, '쉰시간포함']]
feature_list_0323_6 = [['에너지', feature_0323_6, None, '쉰시간제외']]
feature_list_0325 = [feature_list_0323_1, feature_list_0323_2]
feature_list_0325_2 = [feature_list_0323_3, feature_list_0323_4]
feature_list_0325_3 = [feature_list_0323_5, feature_list_0323_6]
|
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from .models import *
from .forms import *
from .utility import *
from django.contrib import messages
import os
def homePage(request):
return render(request, 'core/home.html')
def loginPage(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
if user.is_superuser:
return redirect('core:manager')
return redirect('core:user', username)
messages.error(request, 'Usuário Inválido')
context = {}
return render(request, 'core/login.html', context)
@login_required(login_url='core:login')
def userPage(request, user):
user = User.objects.get(username=user)
client = user.client
contract_list = client.contract_set.all()
context = {'client':client, 'contracts':contract_list}
return render(request, 'core/user.html', context)
@login_required(login_url='core:login')
def managerPage(request):
users = Client.objects.all()
form = ContractForm()
context = {'username':request.user, 'clients':users, 'form':form}
if request.method == 'POST':
addContract(request)
return render(request, 'core/manager.html', context)
return render(request, 'core/manager.html', context)
@login_required(login_url='core:login')
def logoutPage(request):
logout(request)
return redirect('core:home')
@login_required(login_url='core:login')
def userCreatePage(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
return redirect('core:clientCreate', user=user.username, password=user.password)
else:
messages.error(request, form.errors)
return redirect('core:userCreate')
form = UserForm()
context = {'form':form, "username":request.user}
return render(request, 'core/userCreate.html', context)
@login_required(login_url='core:login')
def clientCreatePage(request, user, password):
if request.method == 'POST':
new_user = User(username=user, password=password)
client = Client(user=new_user)
form = ClientForm(request.POST, request.FILES, instance=client)
if form.is_valid():
new_user.save()
form.save()
# messages.success(request, 'Cadastro realizado com sucesso!')
return redirect('core:manager')
else:
messages.error(request, form.errors)
return redirect('core:clientCreate', username=user, password=password)
form = ClientForm()
context = {'form':form, "username":request.user}
return render(request, 'core/clientCreate.html', context)
@login_required(login_url='core:login')
def userSettingsPage(request, user, info):
user = User.objects.get(username=user)
if info == 'user':
form = UserForm(instance=user)
if request.method == 'POST':
form = UserForm(request.POST, instance=user)
if form.is_valid():
form.save()
return redirect('core:user', user.username)
context = {'user':user, 'form':form}
return render(request, 'core/userSettings.html', context)
elif info == 'client':
form = ClientForm(instance=user.client)
if request.method == 'POST':
form = ClientForm(request.POST, request.FILES, instance=user.client)
if form.is_valid():
form.save()
return redirect('core:user', user.username)
context = {'user':user, 'form':form}
return render(request, 'core/userSettings.html', context)
@login_required(login_url='core:login')
def managerSettingsPage(request, user, info):
user = User.objects.get(username=user)
if info == 'user':
form = UserForm(instance=user)
if request.method == 'POST':
form = UserForm(request.POST, instance=user)
if form.is_valid():
form.save()
return redirect('core:manager')
context = {'user':user, 'form':form}
return render(request, 'core/managerUserSetting.html', context)
elif info == 'client':
form = ClientForm(instance=user.client)
if request.method == 'POST':
form = ClientForm(request.POST, request.FILES, instance=user.client)
if form.is_valid():
form.save()
return redirect('core:manager')
context = {'user':user, 'form':form}
return render(request, 'core/managerUserSetting.html', context)
@login_required(login_url='core:login')
def userDetailPage(request, user):
if request.method == 'POST':
if request.POST.get('del'):
userDelete(user)
return redirect('core:manager')
user = User.objects.get(username=user)
contracts = user.client.contract_set.all()
searchs = user.client.search_set.all()
context = {'user':user, 'contracts':contracts,
'n_cont': len(contracts), 'searchs':searchs}
return render(request, 'core/userDetail.html', context)
@login_required(login_url='core:login')
def downloadPage(request, file):
contract = Contract.objects.get(pk=file)
file = contract.file.path
filename = os.path.basename(contract.file.path)
return download_file(request, file, filename)
@login_required(login_url='core:login')
def deleteContractPage(request, user, file):
user = User.objects.get(username=user)
contract = Contract.objects.get(pk=file)
os.remove(contract.file.path)
contract.delete()
return redirect('core:userDetail', user=user)
@login_required(login_url='core:login')
def searchPage(request, user, key):
user = User.objects.get(username=user)
search(user.id, key)
return redirect('core:userDetail', user=user) |
from lxml import html
import cssselect
import requests
from feedgen.feed import FeedGenerator
import boto3
import json
sqs = boto3.resource('sqs')
s3 = boto3.resource('s3')
bucket = 'github-trends.ryotarai.info'
queue_name = 'github_trends_worker'
def handle(event, context):
page = requests.get('https://github.com/trending')
tree = html.fromstring(page.content)
languages = []
languages.append({"url": "https://github.com/trending", "name": "All languages", "key": "all"})
languages.append({"url": "https://github.com/trending/unknown", "name": "Unknown", "key": "unknown"})
aa = tree.cssselect("div.select-menu-list")[1].cssselect("a")
for a in aa:
url = a.get("href")
key = url.split("/")[-1]
languages.append({"url": url, "name": a.cssselect("span")[0].text, "key": key})
s3.Object(bucket, "languages.json").put(Body=json.dumps(languages), ContentType="application/json")
queue = sqs.get_queue_by_name(QueueName=queue_name)
for since in ["daily", "weekly", "monthly"]:
for language in languages:
body = json.dumps({"language": language, "since": since})
print(body)
queue.send_message(MessageBody=body)
if __name__ == '__main__':
handle(None, None)
|
import re, random
from collections import defaultdict, deque
#base markov chain from https://github.com/Codecademy/markov_python
class MarkovChain:
def __init__(self, key_words=2):
self.key_words = key_words
self.lookup_dict = defaultdict(list)
self._punctuation_regex = re.compile('[.!;\?\:\-\[\]\n]+')
self._seeded = False
self.__seed_me()
def __seed_me(self, rand_seed=None):
if self._seeded is False:
try:
if rand_seed is not None:
random.seed(rand_seed)
else:
random.seed()
self._seeded = True
except NotImplementedError:
self._seeded = False
def add_file(self, file_path):
content = ''
with open(file_path, 'r') as fh:
self.__add_source_data(fh.read())
def add_string(self, str):
self.__add_source_data(str)
def __add_source_data(self, str):
clean_str = self._punctuation_regex.sub(' ', str).lower()
tuples = self.__generate_tuple_keys(clean_str.split())
for t in tuples:
self.lookup_dict[t[0]].append(t[1])
def __generate_tuple_keys(self, data):
if len(data) < self.key_words:
return
for i in xrange(len(data) - self.key_words):
yield [tuple(data[i:i + self.key_words]), data[i + self.key_words]]
"""
" Generates text based on the data the Markov Chain contains
" max_length is the maximum number of words to generate
"""
def generate_text(self, max_length=20):
context = deque()
output = []
if len(self.lookup_dict) > 0:
self.__seed_me(rand_seed=len(self.lookup_dict))
idx = random.randint(0, len(self.lookup_dict)-1)
chain_head = list(self.lookup_dict.keys()[idx])
context.extend(chain_head)
while len(output) < (max_length - self.key_words):
next_choices = self.lookup_dict[tuple(context)]
if len(next_choices) > 0:
next_word = random.choice(next_choices)
context.append(next_word)
output.append(context.popleft())
else:
break
output.extend(list(context))
return output
|
import _winreg as winreg
import itertools
from serial import SerialException, Serial
TERM_SEQ_READ = '\r\n'
TERM_SEQ_WRITE = '\n'
class SerialLineTransceiverException(Exception): pass
class SerialLineTransceiver:
def __init__(
self,
ser,
term_seq_read=TERM_SEQ_READ,
term_seq_write=TERM_SEQ_WRITE,
):
self.ser = ser
self.term_seq_read=term_seq_read
self.term_seq_write=term_seq_write
def write_line(self,s):
self.ser.write(s + self.term_seq_write)
def read_line(self):
tsr = self.term_seq_read
s = ''
while True:
c = self.ser.read()
if c == '':
raise SerialLineTransceiverException('timeout before line read')
s += c
if s[-1 * len(tsr):] == tsr:
s = s[:-1*len(tsr)]
return s
@staticmethod
def enumerate_serial_ports():
""" Uses the Win32 registry to return a iterator of serial
(COM) ports existing on this computer.
"""
path = 'HARDWARE\\DEVICEMAP\\SERIALCOMM'
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
raise IterationError
for i in itertools.count():
try:
val = winreg.EnumValue(key, i)
yield (str(val[1]), str(val[0]))
except EnvironmentError:
break
class HandshakeSerialDeviceException(Exception): pass
BAUDRATE = 115200
TIMEOUT = 5
class HandshakeSerialDevice(SerialLineTransceiver):
def __init__(self,handshake_char,id_char,id,baudrate=BAUDRATE,timeout=TIMEOUT):
SerialLineTransceiver.__init__(
self,
self._init(
handshake_char,
id_char,
id,
baudrate,
timeout
)
)
@classmethod
def _init(cls,handshake_char,id_char,device_id,baudrate,timeout):
for port,_ in cls.enumerate_serial_ports():
try:
ser = Serial(
port=port,
baudrate=baudrate,
timeout=2 # give the device this much time to handshake
)
except SerialException, s:
continue
try:
line_trans = SerialLineTransceiver(ser)
if cls._handshakes(line_trans,handshake_char) and device_id == cls._get_id(line_trans,id_char):
ser.close()
ser = Serial(
port=port,
baudrate=baudrate,
timeout=timeout
)
line_trans = SerialLineTransceiver(ser)
cls._handshakes(line_trans,handshake_char)
return ser
except SerialLineTransceiverException:
continue
raise HandshakeSerialDeviceException('device id %d not found' % device_id)
@staticmethod
def _handshakes(line_trans,handshake_char):
handshake = line_trans.read_line()
return handshake == handshake_char
@staticmethod
def _get_id(line_trans,id_char):
line_trans.write_line(id_char)
id = int(line_trans.read_line())
return_code = line_trans.read_line()
return id
|
"""
File: daemon_conf.py
Description: Pycollector configuration file.
"""
#PID SETTINGS
#------------
#PID_PATH: LogCollector pid path/filename
PID_FILE_PATH="/var/run/pycollector/pycollector.pid"
#APPLICATION LOG SETTINGS
#------------------------
#LOG_FILE_PATH: sets where the logs will be located.
LOGS_PATH="/var/log/pycollector/"
#LOG_SEVERITY: changes the level of severity for all application logs created.
#Options: DEBUG, INFO, WARNING, ERROR, CRITICAL
LOG_SEVERITY="DEBUG"
#LOG_FORMATTER: sets the format of application log lines.
#For a complete reference of options, see:
#http://docs.python.org/library/logging.html#logrecord-attributes
LOG_FORMATTER="%(asctime)s - %(filename)s (%(lineno)d) [(%(threadName)-10s)] %(levelname)s - %(message)s"
#LOG_ROTATING: describes when the log files will be rotated.
#Options:
#'S' Seconds
#'M' Minutes
#'H' Hours
#'D' Days
#'W' Week day (0=Monday)
#'midnight' Roll over at midnight
LOG_ROTATING="midnight"
|
a = float(input('Введи длину первой стороны треугольника \n'))
b = float(input('Введи длину второй стороны треугольника \n'))
c = float(input('Введи угол между этими сторонами в градусах \n'))
import math
c = math.radians(c)
cos = math.cos(c)
print ("Третья сторона равна " + str(a*b*cos))
|
# Generated by Django 2.2.11 on 2023-04-22 15:28
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("facility", "0348_merge_20230421_1917"),
]
operations = [
migrations.AddField(
model_name="shiftingrequest",
name="ambulance_driver_name",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="shiftingrequest",
name="ambulance_number",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="shiftingrequest",
name="ambulance_phone_number",
field=models.CharField(
blank=True,
default="",
max_length=14,
validators=[
django.core.validators.RegexValidator(
code="invalid_mobile",
message="Please Enter 10/11 digit mobile number or landline as 0<std code><phone number>",
regex="^((\\+91|91|0)[\\- ]{0,1})?[456789]\\d{9}$",
)
],
),
),
migrations.AddField(
model_name="shiftingrequest",
name="patient_category",
field=models.CharField(
choices=[
("Comfort", "Comfort Care"),
("Stable", "Stable"),
("Moderate", "Abnormal"),
("Critical", "Critical"),
],
max_length=8,
null=True,
),
),
]
|
#!/usr/bin/env python3
import os, sys
import time
try:
from .api import ApiClient
except ImportError:
from api import ApiClient
class SolverExceptions(Exception):
pass
class ValidationException(SolverExceptions):
pass
class NetworkException(SolverExceptions):
pass
class ApiException(SolverExceptions):
pass
class TimeoutException(SolverExceptions):
pass
class TwoCaptcha():
def __init__(self,
apiKey,
softId=None,
callback=None,
defaultTimeout=120,
recaptchaTimeout=600,
pollingInterval=10,
server = '2captcha.com'):
self.API_KEY = apiKey
self.soft_id = softId
self.callback = callback
self.default_timeout = defaultTimeout
self.recaptcha_timeout = recaptchaTimeout
self.polling_interval = pollingInterval
self.api_client = ApiClient(post_url = str(server))
self.max_files = 9
self.exceptions = SolverExceptions
def normal(self, file, **kwargs):
'''
Wrapper for solving normal captcha (image)
Required:
file (image or base64)
Optional params:
phrase
numeric
minLen
maxLen
phrase
caseSensitive
calc
lang
hintText
hintImg
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
method = self.get_method(file)
result = self.solve(**method, **kwargs)
return result
def text(self, text, **kwargs):
'''
Wrapper for solving text captcha
Required:
text
Optional params:
lang
softId
callback
'''
result = self.solve(text=text, method='post', **kwargs)
return result
def recaptcha(self, sitekey, url, version='v2', enterprise=0, **kwargs):
'''
Wrapper for solving recaptcha (v2, v3)
Required:
sitekey
url
Optional params:
invisible
version
enterprise
action
score
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
params = {
'googlekey': sitekey,
'url': url,
'method': 'userrecaptcha',
'version': version,
'enterprise': enterprise,
**kwargs,
}
result = self.solve(timeout=self.recaptcha_timeout, **params)
return result
def funcaptcha(self, sitekey, url, **kwargs):
'''
Wrapper for solving funcaptcha
Required:
sitekey
url
Optional params:
surl
userAgent
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
**{'data[key]': 'anyStringValue'}
'''
result = self.solve(publickey=sitekey,
url=url,
method='funcaptcha',
**kwargs)
return result
def geetest(self, gt, challenge, url, **kwargs):
'''
Wrapper for solving geetest captcha
Required:
gt
challenge
url
Optional params:
apiServer
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
result = self.solve(gt=gt,
challenge=challenge,
url=url,
method='geetest',
**kwargs)
return result
def hcaptcha(self, sitekey, url, **kwargs):
'''
Wrapper for solving hcaptcha
Required:
sitekey
url
Optional params:
invisible
data
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
result = self.solve(sitekey=sitekey,
url=url,
method='hcaptcha',
**kwargs)
return result
def keycaptcha(self, s_s_c_user_id, s_s_c_session_id,
s_s_c_web_server_sign, s_s_c_web_server_sign2, url,
**kwargs):
'''
Wrapper for solving
Required:
s_s_c_user_id
s_s_c_session_id
s_s_c_web_server_sign
s_s_c_web_server_sign2
url
Optional params:
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
params = {
's_s_c_user_id': s_s_c_user_id,
's_s_c_session_id': s_s_c_session_id,
's_s_c_web_server_sign': s_s_c_web_server_sign,
's_s_c_web_server_sign2': s_s_c_web_server_sign2,
'url': url,
'method': 'keycaptcha',
**kwargs,
}
result = self.solve(**params)
return result
def capy(self, sitekey, url, **kwargs):
'''
Wrapper for solving capy
Required:
sitekey
url
Optional params:
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
result = self.solve(captchakey=sitekey,
url=url,
method='capy',
**kwargs)
return result
def grid(self, file, **kwargs):
'''
Wrapper for solving grid captcha (image)
Required:
file (image or base64)
Optional params:
rows
cols
previousId
canSkip
lang
hintImg
hintText
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
method = self.get_method(file)
params = {
'recaptcha': 1,
**method,
**kwargs,
}
result = self.solve(**params)
return result
def canvas(self, file, **kwargs):
'''
Wrapper for solving canvas captcha (image)
Required:
file (image or base64)
Optional params:
previousId
canSkip
lang
hintImg
hintText
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
if not ('hintText' in kwargs or 'hintImg' in kwargs):
raise ValidationException(
'parameters required: hintText and/or hintImg')
method = self.get_method(file)
params = {
'recaptcha': 1,
'canvas': 1,
**method,
**kwargs,
}
result = self.solve(**params)
return result
def coordinates(self, file, **kwargs):
'''
Wrapper for solving coordinates captcha (image)
Required:
file (image or base64)
Optional params:
hintImg
hintText
lang
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
method = self.get_method(file)
params = {
'coordinatescaptcha': 1,
**method,
**kwargs,
}
result = self.solve(**params)
return result
def rotate(self, files, **kwargs):
'''
Wrapper for solving rotate captcha (image)
Required:
files (images)
Optional params:
angle
lang
hintImg
hintText
softId
callback
proxy = {'type': 'HTTPS', 'uri': 'login:password@IP_address:PORT'})
'''
if isinstance(files, str):
file = self.get_method(files)['file']
result = self.solve(file=file, method='rotatecaptcha', **kwargs)
return result
elif isinstance(files, dict):
files = list(files.values())
files = self.extract_files(files)
result = self.solve(files=files, method='rotatecaptcha', **kwargs)
return result
def solve(self, timeout=0, polling_interval=0, **kwargs):
'''
sends captcha, receives result
Parameters
----------
timeout : float
polling_interval : int
**kwargs : all captcha params
Returns
-------
result : string
'''
id_ = self.send(**kwargs)
result = {'captchaId': id_}
if self.callback is None:
timeout = float(timeout or self.default_timeout)
sleep = int(polling_interval or self.polling_interval)
code = self.wait_result(id_, timeout, sleep)
result.update({'code': code})
return result
def wait_result(self, id_, timeout, polling_interval):
max_wait = time.time() + timeout
while time.time() < max_wait:
try:
return self.get_result(id_)
except NetworkException:
time.sleep(polling_interval)
raise TimeoutException(f'timeout {timeout} exceeded')
def get_method(self, file):
if not file:
raise ValidationException('File required')
if not '.' in file and len(file) > 50:
return {'method': 'base64', 'body': file}
if not os.path.exists(file):
raise ValidationException(f'File not found: {file}')
return {'method': 'post', 'file': file}
def send(self, **kwargs):
params = self.default_params(kwargs)
params = self.rename_params(params)
params, files = self.check_hint_img(params)
response = self.api_client.in_(files=files, **params)
if not response.startswith('OK|'):
raise ApiException(f'cannot recognize response {response}')
return response[3:]
def get_result(self, id_):
response = self.api_client.res(key=self.API_KEY, action='get', id=id_)
if response == 'CAPCHA_NOT_READY':
raise NetworkException
if not response.startswith('OK|'):
raise ApiException(f'cannot recognize response {response}')
return response[3:]
def balance(self):
'''
get my balance
Returns
-------
balance : float
'''
response = self.api_client.res(key=self.API_KEY, action='getbalance')
return float(response)
def report(self, id_, correct):
'''
report of solved captcha: good/bad
Parameters
----------
id_ : captcha ID
correct : True/False
Returns
-------
None.
'''
rep = 'reportgood' if correct else 'reportbad'
self.api_client.res(key=self.API_KEY, action=rep, id=id_)
return
def rename_params(self, params):
replace = {
'caseSensitive': 'regsense',
'minLen': 'min_len',
'maxLen': 'max_len',
'hintText': 'textinstructions',
'hintImg': 'imginstructions',
'url': 'pageurl',
'score': 'min_score',
'text': 'textcaptcha',
'rows': 'recaptcharows',
'cols': 'recaptchacols',
'previousId': 'previousID',
'canSkip': 'can_no_answer',
'apiServer': 'api_server',
'softId': 'soft_id',
'callback': 'pingback',
}
new_params = {
v: params.pop(k)
for k, v in replace.items() if k in params
}
proxy = params.pop('proxy', '')
proxy and new_params.update({
'proxy': proxy['uri'],
'proxytype': proxy['type']
})
new_params.update(params)
return new_params
def default_params(self, params):
params.update({'key': self.API_KEY})
callback = params.pop('callback', self.callback)
soft_id = params.pop('softId', self.soft_id)
if callback: params.update({'callback': callback})
if soft_id: params.update({'softId': soft_id})
self.has_callback = bool(callback)
return params
def extract_files(self, files):
if len(files) > self.max_files:
raise ValidationException(
f'Too many files (max: {self.max_files})')
not_exists = [f for f in files if not (os.path.exists(f))]
if not_exists:
raise ValidationException(f'File not found: {not_exists}')
files = {f'file_{e+1}': f for e, f in enumerate(files)}
return files
def check_hint_img(self, params):
hint = params.pop('imginstructions', None)
files = params.pop('files', {})
if not hint:
return params, files
if not '.' in hint and len(hint) > 50:
return params, files
if not os.path.exists(hint):
raise ValidationException(f'File not found: {hint}')
if not files:
files = {'file': params.pop('file', {})}
files.update({'imginstructions': hint})
return params, files
if __name__ == '__main__':
key = sys.argv[1]
sol = TwoCaptcha(key)
|
import re
def get_effective_date(string):
dates = re.findall(r"(?:shall.+effective|effective date):?(.+)(?=[\.:]?)",
string, re.I | re.M)
try:
shortest_date = min(dates, key=(lambda s: len(s)))
except ValueError:
return None
else:
return shortest_date.lstrip().rstrip('.:')
# try:
# date = re.search(r"(?:shall.+effective|effective date):?(.+)(?=[\.:]?)",
# string, re.I | re.M).group(1)
# except AttributeError:
# return None
# else:
# return date.lstrip().rstrip('.:')
def get_portal_paragraph(full_text):
candidates = []
for line in full_text.split('\n'):
if 'shall' in line:
if ('portal' in line) or ('website' in line):
if ('establish' in line) or ('maintain' in line):
if len(line) <= 2000:
candidates.append(line.strip())
return candidates or None
def get_portal_url(full_text):
urls = re.findall(r"((?:\w{2,}\.)?\w{2,}\.(?:gov|org|com|net|info|edu|io))",
full_text, re.I)
return set([url.lower() for url in urls]) or None
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import edalize
import os
import re
import subprocess
from utils.utils import Timed, have_exec
from toolchains.symbiflow import NextpnrGeneric
YOSYS_REGEXP = re.compile("(Yosys [a-z0-9+.]+) (\(git sha1) ([a-z0-9]+),.*")
class NextpnrOxide(NextpnrGeneric):
'''Nextpnr PnR + Yosys synthesis'''
def __init__(self, rootdir):
NextpnrGeneric.__init__(self, rootdir)
self.toolchain = "nextpnr-nexus"
self.carries = (True, False)
self.nextpnr_log = "next.log"
def resources(self):
'''resources map for nexus arch'''
res_map_nexus = {
'LUT': ('OXIDE_COMB'),
'FF': ('OXIDE_FF'),
'CARRY': ('CCU2'),
'IOB': ('SEIO33_CORE'),
'PLL': ('PLL_CORE'),
'BRAM': ('OXIDE_EBR'),
'LRAM': ('LRAM_CORE'),
}
resources_count = {
"LUT": 0,
"FF": 0,
"BRAM": 0,
"LRAM": 0,
"CARRY": 0,
"PLL": 0,
"IOB": 0,
}
res = self.get_resources()
for res_type, res_name in res_map_nexus.items():
if res_name in res:
resources_count[res_type] += res[res_name]
return resources_count
def prepare_edam(self):
os.makedirs(self.out_dir, exist_ok=True)
for f in self.srcs:
self.files.append(
{
'name': os.path.realpath(f),
'file_type': 'verilogSource'
}
)
if self.pdc is not None:
self.files.append(
{
'name': os.path.realpath(self.pdc),
'file_type': 'PDC'
}
)
args = f"--device {self.device} "
args += "--timing-allow-fail "
if self.seed:
args += " --seed %u" % (self.seed, )
edam = {
'files': self.files,
'name': self.project_name,
'toplevel': self.top,
'tool_options': {
'oxide': {
'nextpnr_options': args.split(),
}
}
}
self.env_script = os.path.abspath(
'env.sh'
) + ' nextpnr lattice-' + self.device
return edam
def run(self):
with Timed(self, 'total'):
with Timed(self, 'prepare'):
self.edam = self.prepare_edam()
os.environ["EDALIZE_LAUNCHER"] = f"source {self.env_script} &&"
self.backend = edalize.get_edatool('oxide')(
edam=self.edam, work_root=self.out_dir
)
self.backend.configure("")
try:
with Timed(self, 'fasm'):
self.backend.build_main(self.project_name + '.fasm')
with Timed(self, 'bitstream'):
self.backend.build_main(self.project_name + '.bit')
finally:
del os.environ['EDALIZE_LAUNCHER']
self.add_runtimes()
self.add_wirelength()
@staticmethod
def yosys_ver():
# Yosys 0.7+352 (git sha1 baddb017, clang 3.8.1-24 -fPIC -Os)
yosys_version = subprocess.check_output(
"yosys -V", shell=True, universal_newlines=True
).strip()
m = YOSYS_REGEXP.match(yosys_version)
assert m
return "{} {} {})".format(m.group(1), m.group(2), m.group(3))
@staticmethod
def nextpnr_version():
'''
nextpnr-nexus -V
'''
return subprocess.check_output(
"nextpnr-nexus -V || true",
shell=True,
universal_newlines=True,
stderr=subprocess.STDOUT
).strip()
def versions(self):
return {
'yosys': self.yosys_ver(),
'nextpnr-nexus': self.nextpnr_version(),
}
@staticmethod
def seedable():
return True
@staticmethod
def check_env():
return {
'yosys': have_exec('yosys'),
'nextpnr-nexus': have_exec('nextpnr-nexus'),
}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 14:48:15 2020
@author: Jinsung
"""
import sys
sys.path.insert(0, 'C:\\Users\\Jinsung\\Documents\\Deep_Learning_Code\\Chap14-encoder decoder')
from encoder_decoder import *
class Gan(RnnExtModel):
pass
# 파라미터 생성 메서드 재정의
def gan_init_parameters(self, hconfigs):
gconf = hconfigs['generator']
dconf = hconfigs['discriminor']
if not isinstance(gconf[0], list):
gconf = [gconf]
if not isinstance(dconf[0], list):
dconf = [dconf]
self.seed_shape = hconfigs['seed_shape']
input_shape = self.dataset.input_shape
pmg, gen_shape = self.build_subnet(gconf, self.seed_shape)
pmd, bin_shape = self.build_subnet(dconf, input_shape)
assert tuple(gen_shape) == tuple(input_shape)
assert tuple(bin_shape) == tuple([1])
self.gconfigs, self.dconfigs = gconf, dconf
self.pm_gen, self.pm_dis = pmg, pmd
self.seqout = False
self.pm_output = None
Gan.build_subnet = autoencoder_build_subnet
Gan.init_parameters = gan_init_parameters
# 미니배치 학습 메서드 재정의
def gan_train_step(self, x, y):
self.is_training = True
d_loss = self.train_discriminor(x)
g_loss = self.train_generator(len(x))
self.is_training = False
return [d_loss, g_loss], 0
Gan.train_step = gan_train_step
# 판별기 학습 메서드 정의
def gan_train_discriminor(self, real_x):
mb_size = len(real_x)
fake_x, _ = self.forward_generator(mb_size)
mixed_x = np.vstack([real_x, fake_x])
output, aux_dis = self.forward_discriminor(mixed_x)
y = np.zeros([2*mb_size, 1])
y[0:mb_size, 0] = 1.0
d_loss, aux_pp = self.forward_postproc(output, y)
G_loss = 1.0
G_output = self.backprop_postproc(G_loss, aux_pp)
self.backprop_discriminor(G_output, aux_dis)
return d_loss
Gan.train_discriminor = gan_train_discriminor
# 생성기 학습 메서드 정의
def gan_train_generator(self, mb_size):
fake_x, aux_gen = self.forward_generator(mb_size)
output, aux_dis = self.forward_discriminor(fake_x)
y = np.ones([mb_size, 1])
g_loss, aux_pp = self.forward_postproc(output, y)
G_loss = 1.0
G_output = self.backprop_postproc(G_loss, aux_pp)
self.is_training = False
G_fake_x = self.backprop_discriminor(G_output, aux_dis)
self.is_training = True
self.backprop_generator(G_fake_x, aux_gen)
return g_loss
Gan.train_generator = gan_train_generator
# 판별기에 대한 순전파 및 역전파 처리 메서드 정의
def gan_forward_discriminor(self, x):
hidden = x
aux_dis = []
for n, hconfig in enumerate(self.dconfigs):
hidden, aux = self.forward_layer(hidden, hconfig, self.pm_dis[n])
aux_dis.append(aux)
return hidden, aux_dis
def gan_backprop_discriminor(self, G_hidden, aux_dis):
for n in reversed(range(len(self.dconfigs))):
hconfig, pm, aux = self.dconfigs[n], self.pm_dis[n], aux_dis[n]
G_hidden = self.backprop_layer(G_hidden, hconfig, pm, aux)
return G_hidden
Gan.forward_discriminor = gan_forward_discriminor
Gan.backprop_discriminor = gan_backprop_discriminor
# 생성기에 대한 순전파 및 역전파 처리 메서드 정의
def gan_forward_generator(self, mb_size):
hidden = np.random.uniform(-1.0, 1.0, size = [mb_size]+self.seed_shape)
aux_gen = []
for n, hconfig in enumerate(self.gconfigs):
hidden, aux = self.forward_layer(hidden, hconfig, self.pm_gen[n])
aux_gen.append(aux)
return hidden, aux_gen
def gan_backprop_generator(self, G_hidden, aux_gen):
for n in reversed(range(len(self.gconfigs))):
hconfig, pm, aux = self.gconfigs[n], self.pm_gen[n], aux_gen[n]
G_hidden = self.backprop_layer(G_hidden, hconfig, pm, aux)
return G_hidden
Gan.forward_generator = gan_forward_generator
Gan.backprop_generator = gan_backprop_generator
# 파라미터 수정 메서드 재정의
def gan_update_param(self, pm, key, G_key):
if not self.is_training:
return
super(Gan, self).update_param(pm, key, G_key)
Gan.update_param = gan_update_param
# 정확도 계산 메서드 재정의
def gan_eval_accuracy(self, real_x, y, output=None):
mb_size = len(real_x)
fake_x, _ = self.forward_generator(mb_size)
mixed_x = np.vstack([real_x, fake_x])
output, aux_dis = self.forward_discriminor(mixed_x)
y = np.zeros([2*mb_size, 1])
y[0:mb_size] = 1.0
d_acc = self.dataset.eval_accuracy(mixed_x, y, output)
fake_x, _ = self.forward_generator(mb_size)
otuput, aux_dis = self.forward_discriminor(fake_x)
y = np.ones([mb_size, 1])
g_acc = self.dataset.eval_accuracy(fake_x, y, output)
return [d_acc, g_acc]
gan_eval_accuracy = gan_eval_accuracy
# 시각화 메서드 재정의
def gan_visualize(self, num):
real_x, _ = self.dataset.get_visualize_data(num)
fake_x, _ = self.forward_generator(num)
self.dataset.visualize(np.vstack([real_x, fake_x]))
Gan.visualize = gan_visualize
|
"""
Purpose: Project Euler problems
Date created: 2019-11-08
Contributor(s): Mark M.
ID: 243
Title: Resilience
URI: https://projecteuler.net/problem=243
Difficulty: ?
Status: Incomplete
Problem:
A positive fraction whose numerator is less than its denominator is called a
proper fraction.
For any denominator, d, there will be d−1 proper fractions; for example, with d = 12:
1/12, 2/12, 3/12, 4/12, 5/12, 6/12, 7/12, 8/12, 9/12, 10/12, 11/12.
We shall call a fraction that cannot be cancelled down a resilient fraction.
Furthermore we shall define the resilience of a denominator, R(d), to be the
ratio of its proper fractions that are resilient; for example, R(12) = 4/11.
In fact, d = 12 is the smallest denominator having a resilience R(d) < 4/10.
Find the smallest denominator d, having a resilience R(d) < 15499/94744.
"""
from time import sleep
from os import getcwd
from os.path import join as pjoin
import typing as T
Q = T.TypeVar('Q', int, str, float)
N = T.TypeVar('N', int, float)
q_vec = T.List[Q]
n_vec = T.List[N]
s_vec = T.List[str]
StrFileOutput = T.NewType('StrFileOutput', str)
sample: s_vec = [
'1/12',
'2/12',
'3/12',
'4/12',
'5/12',
'6/12',
'7/12',
'8/12',
'9/12',
'10/12',
'11/12',
]
def mABS(n: N) -> N:
return n * -1 if n < 0 else n
def HCF(x: N, y: N) -> N:
x, y = mABS(x), mABS(y)
if x == 0:
return y
while y != 0:
if x > y:
x -= y
else:
y -= x
return x
def print_frac(n: int, d: int) -> None:
print(f'{n}/{d}')
def format_frac(n: int, d: int) -> str:
return f'{n}/{d}'
def split_em(x: str, split_by: str) -> T.Iterator[str]:
"""Split a string and yield results."""
for i in x.split(split_by):
yield i
def fraction_generator(n: int) -> T.Iterable[str]:
"""Generate n - 1 fractions in string format."""
for numer in range(1, n):
yield f'{numer}/{n}'
#-- Affirm fraction_generator output
assert ([i for i in fraction_generator(12)] == sample), 'Fraction generator error!'
def split_n_check(itr: T.Collection[str], split_on: str = '/') -> T.Tuple[int, float]:
n: int # Numerator
d: int # Denominator
ct: int = 0 # Running count
t_ct: int = len(itr) # Total count
for i in itr:
vals = list(split_em(i, split_on))
n, d = int(vals[0]), int(vals[1])
if HCF(n, d) == 1:
ct += 1
return ct, t_ct
def R(n: int) -> str:
count, tot_count = split_n_check([i for i in fraction_generator(n)])
return format_frac(count, int(tot_count))
def eval_R(target_resilience: str, start: int = 10) -> T.Tuple[int, str]:
max_resilience: float = eval(target_resilience)
n: int = start
current_frac: str = R(n) # Seed first calc
while eval(current_frac) >= max_resilience:
n += 1
current_frac = R(n)
# print(n, current_frac)
return n, current_frac
# res, frac = eval_R('4/10')
# res, frac = eval_R('15499/94744')
def print_to_file(result: int, fraction: str, fn: str) -> None:
print(f'Results of problem 243:\n\nN value: {result}\n\nFraction: {fraction}', file = fn)
def print_msg(msg: str) -> None:
print(msg)
if __name__ == '__main__':
start_level = 94744
target = '15499/94744'
filepath: str = pjoin(getcwd(), r'incomplete\problem_243_results.txt')
print(f'Script running...\nFinding largest fraction below {target}')
while True:
res, frac = eval_R(target, start_level)
print_msg('Processing complete!')
break
print_to_file(res, frac, filepath)
print_msg('Goodbye!')
sleep(2)
|
#! /usr/bin/env python
# encoding: utf-8
"""
Django settings for bongo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from config import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Application definition
INSTALLED_APPS = (
'django.contrib.messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'file_server',
'utils',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
ROOT_URLCONF = 'bongo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'bongo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
# https://docs.djangoproject.com/en/1.3/ref/settings/#std:setting-CACHES
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
# 'LOCATION': '/var/tmp/bongo_cache',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = '/var/www/bongo/static/'
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
'social.backends.github.GithubTeamOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
STATICFILES_DIRS = (
# Twitter Bootstrap stuff
os.path.join(BASE_DIR, "bootstrap/dist"),
os.path.join(BASE_DIR, "bootstrap/assets")
)
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['read:org']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_URL = '/'
|
"""
Реализовать функцию int_func(), принимающую слово из
маленьких латинских букв и возвращающую его же, но с прописной первой буквой.
Например, print(int_func(‘text’)) -> Text.
Продолжить работу над заданием
В программу должна попадать строка из слов, разделенных пробелом.
Каждое слово состоит из латинских букв в нижнем регистре.
Сделать вывод исходной строки, но каждое слово должно начинаться с заглавной буквы.
Необходимо использовать написанную ранее функцию int_func().
"""
def int_func(word):
return word[0].upper() + word[1:]
sentence = input("enter any words: ").split()
print(" ".join([int_func(word) for word in sentence]))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-24 18:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_member_valid_through'),
]
operations = [
migrations.AddField(
model_name='member',
name='is_current',
field=models.BooleanField(default=False),
),
]
|
# coding:utf-8
"""
题目:打印出如下图案(菱形)
*
***
*****
*******
*****
***
*
"""
for i in range(1, 8, 2):
print(" " * (4 - int((i + 1)/2)), "*" * i)
for i in range(5, 0, -2):
print(" " * (4 - int((i + 1)/2)), "*" * i) |
from functools import reduce
def pipe(*args):
return lambda val: reduce(lambda prev, fn: fn(prev), args, val)
|
from flask import Flask, request, \
redirect, url_for, \
render_template, jsonify
from flask_socketio import SocketIO, emit
from flask_yarn import Yarn
from flask_cas import CAS
from db import *
app = Flask(__name__)
app.config['SECRET_KEY'] = APP_SECRET_KEY
app.config['CAS_SERVER'] = CAS_SERVER
app.config['CAS_AFTER_LOGIN'] = CAS_AFTER_LOGIN
socketIO = SocketIO(app)
Yarn(app)
cas = CAS(app)
@socketIO.on('connect')
def test_connect():
emit('my response', {'data': 'Connected'})
@socketIO.on('disconnect')
def test_disconnect():
print('Client disconnected')
@app.route('/query')
def query_records():
since = request.args.get('since')
number = request.args.get('number')
records = Records(number, since)
return jsonify(records.get_records_dict())
@app.route('/add', methods=['POST'])
def add_record():
nickname = request.form.get('nickname')
content = request.form.get('content')
remark = request.form.get('remark')
result = Records.add_record((nickname, content, remark))
socketIO.emit('recordUpdate', result, broadcast=True)
return jsonify(result)
@app.route('/')
def index():
if cas.username is None:
return render_template('index.html', action='login')
else:
return render_template('index.html', action='logout')
@app.errorhandler(405)
def page_not_found(_):
return redirect(url_for('index'))
if __name__ == '__main__':
socketIO.run(app, host=HOST, port=PORT, debug=DEBUG)
|
from urllib import FancyURLopener
from bs4 import BeautifulSoup
import re
PHONE_SITE = 'http://gsd-auth-callinfo.s3-website.us-east-2.amazonaws.com/'
class ValidUAOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11)'
class PhoneNumberEntry:
def __init__(self, phone_number, report_count, comment):
self.area_code = phone_number[:3]
self.phone_number = phone_number
self.report_count = report_count
self.comment = comment.replace('"', '\\"')
def __unicode__(self):
skeleton = u'{{ "area_code": "{}", "phone_number": "{}", "report_count": "{}", "comment": "{}" }}'
return skeleton.format(self.area_code, self.phone_number, self.report_count, self.comment)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return unicode(self).encode('utf-8')
class Parser:
def __init__(self, html):
self.soup = BeautifulSoup(html, 'html.parser')
def entry_parse(self, html):
num_of_reports = html.find(class_='oos_previewSide').getText()
number = html.find(class_='oos_previewHeader').getText()
#remove non-numbers
for letter in number:
if not letter.isdigit() and (letter != '-'):
number = number.replace(letter,"")
#Remove nested tags
comment = html.find('div', class_='oos_previewBody').find(text=True,recursive=False).strip()
return PhoneNumberEntry(number, num_of_reports, comment)
def parse(self):
latest_entries = self.soup.find('ul', id='previews').find_all('li', class_='oos_listItem')
#print latest_entries
return map(self.entry_parse, latest_entries)
## Main
if __name__ == "__main__":
parser = Parser(ValidUAOpener().open(PHONE_SITE).read())
print parser.parse()
|
# In a group of N people (labelled 0, 1, 2, ..., N-1), each person has different amounts of money, and different levels of quietness.
# For convenience, we'll call the person with label x, simply "person x".
# We'll say that richer[i] = [x, y] if person x definitely has more money than person y. Note that richer may only be a subset of valid observations.
# Also, we'll say quiet[x] = q if person x has quietness q.
# Now, return answer, where answer[x] = y if y is the least quiet person (that is, the person y with the smallest value of quiet[y]), among all people who definitely have equal to or more money than person x.
# 1 <= quiet.length = N <= 500
# 0 <= quiet[i] < N, all quiet[i] are different.
# 0 <= richer.length <= N * (N-1) / 2
# 0 <= richer[i][j] < N
# richer[i][0] != richer[i][1]
# richer[i]'s are all different.
# The observations in richer are all logically consistent.
class Solution:
def loudAndRich(self, richer, quiet):
"""
:type richer: List[List[int]]
:type quiet: List[int]
:rtype: List[int]
"""
numberPeople = len(quiet)
self.personalBanks = [[] for x in range(numberPeople)]
self.quiet = quiet
self.richer = richer
self.loudestRichest = [None]*numberPeople
sol = [0]*numberPeople
self.createBanks()
for i in range(numberPeople):
sol[i] = self.getLoudestRichest(i)
return(sol)
def createBanks(self):
for rich, poor in self.richer:
self.personalBanks[poor].append(rich)
print(self.personalBanks)
def getLoudestRichest(self, person):
if self.loudestRichest[person] == None:
self.loudestRichest[person] = person
self.updateLoudestRichest(person)
return(self.loudestRichest[person])
def updateLoudestRichest(self, person):
queue = self.personalBanks[person]
while queue:
newQ = []
for richer in queue:
self.checkNewLoudestRichest(person,richer)
newQ.extend(self.personalBanks[richer])
queue = newQ
def checkNewLoudestRichest(self, person, richer):
currLoudest = self.loudestRichest[person]
currChill = self.quiet[currLoudest]
challengerLoudest = self.getLoudestRichest(richer)
challengerChill = self.quiet[challengerLoudest]
self.loudestRichest[person] = min((currLoudest, currChill), (challengerLoudest, challengerChill), key=lambda x: x[1])[0]
driver = Solution()
print("_1_")
print(driver.loudAndRich([[1,0],[2,1],[3,1],[3,7],[4,3],[5,3],[6,3]],[3,2,5,4,6,1,7,0]))
print("[5, 5, 2, 5, 4, 5, 6, 7]\n")
print("_2_")
print(driver.loudAndRich([],[0,1]))
print("[0, 1]\n")
print("_3_")
print(driver.loudAndRich([[0,1]],[0,1]))
print("[0, 0]\n") |
#!/usr/bin/python
#-*- coding: UTF-8 -*-
#Only limited by top-level domain
import dns.resolver
domain = raw_input("Please input an domain: ")
ns = dns.resolver.query(domain, 'NS')
for i in ns.response.answer:
for j in i.items:
print j.to_text()
|
class Solution:
def distributeCandies(self, candyType: List[int]) -> int:
types = dict()
ans = 0
for x in candyType:
if x not in types:
types[x] = 1
ans += 1
return min(len(candyType)//2,ans)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClasseItem',
fields=[
('codigo', models.AutoField(serialize=False, primary_key=True)),
('descricao', models.CharField(max_length=100, verbose_name=b'Descri\xc3\xa7\xc3\xa3o')),
],
),
migrations.CreateModel(
name='Item',
fields=[
('codigo', models.AutoField(serialize=False, primary_key=True)),
('nome', models.CharField(max_length=255, verbose_name=b'Nome do Item')),
('tipo', models.IntegerField(verbose_name=b'Tipo Item', choices=[(0, b'Material'), (1, b'Servi\xc3\xa7o')])),
('classe', models.ForeignKey(verbose_name=b'Categoria', to='Item.ClasseItem')),
],
),
migrations.CreateModel(
name='Marca',
fields=[
('codigo', models.AutoField(serialize=False, primary_key=True)),
('descricao', models.CharField(max_length=255, verbose_name=b'Descri\xc3\xa7\xc3\xa3o')),
('observacao', models.TextField(verbose_name=b'Observa\xc3\xa7\xc3\xa3o')),
],
),
migrations.AddField(
model_name='item',
name='marca',
field=models.ForeignKey(verbose_name=b'Marca', to='Item.Marca'),
),
]
|
import math
import numpy as np
from qsim.codes import qubit
from qsim.tools import tools
class State(np.ndarray):
def __new__(cls, state, is_ket=None, code=qubit, IS_subspace=False, graph=None):
# TODO: add code manipulation tools as class attributes
# Input array is an already formed ndarray instance
# We first cast to be our class type
arr = state.view(cls).astype(np.complex128, copy=False)
# add the new attribute to the created instance
# Assume is_ket is a static attribute
if is_ket is None:
arr.is_ket = tools.is_ket(state)
else:
arr.is_ket = is_ket
arr.code = code
arr.dimension = state.shape[0]
arr.IS_subspace = IS_subspace
# Identify the number of physical qudits
# TODO: identify how to deal with this if you're on a graph
arr.graph = graph
if IS_subspace:
arr.number_physical_qudits = graph.n
arr.number_logical_qudits = graph.n
else:
arr.number_physical_qudits = int(math.log(state.shape[0], code.d))
arr.number_logical_qudits = int(math.log(state.shape[0], code.d) / code.n)
# Finally, we must return the newly created object:
return arr
def __array_finalize__(self, arr):
if arr is None: return
self.is_ket = getattr(arr, 'is_ket', None)
self.dimension = getattr(arr, 'dimension', None)
self.code = getattr(arr, 'code', None)
self.IS_subspace = getattr(arr, 'IS_subspace', None)
self.graph = getattr(arr, 'graph', None)
self.number_logical_qudits = getattr(arr, 'number_logical_qudits', None)
self.number_physical_qudits = getattr(arr, 'number_physical_qudits', None)
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
"""
Assume that we put the iPER data into $FashionVideo_root_dir
1. Download the FashionVideo dataset, https://vision.cs.ubc.ca/datasets/fashion/
1.1 download the `fashion_train.txt` into $FashionVideo_root_dir:
https://vision.cs.ubc.ca/datasets/fashion/resources/fashion_dataset/fashion_train.txt
1.2 download the `fashion_test.txt` into $FashionVideo_root_dir:
https://vision.cs.ubc.ca/datasets/fashion/resources/fashion_dataset/fashion_test.txt
1.3 crawl each video in `fashion_train.txt`, as well as `fashion_test.txt` and
save them into $FashionVideo_root_dir/videos
The file structure of $FashionVideo_root_dir will be:
$FashionVideo_root_dir:
--fashion_train.txt
--fashion_test.txt
--videos:
2. Preprocess all videos in $FashionVideo_root_dir/videos.
3. Reorganize the processed data for evaluations, https://github.com/iPERDance/his_evaluators
"""
import sys
import os
import subprocess
import argparse
from tqdm import tqdm
import requests
from urllib.request import urlopen
from iPERCore.services.options.options_setup import setup
from iPERCore.services.options.process_info import ProcessInfo
from iPERCore.tools.utils.filesio.persistence import mkdir
from iPERCore.services.preprocess import human_estimate, digital_deform
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, required=True, help="the root directory of iPER dataset.")
parser.add_argument("--gpu_ids", type=str, default="9", help="the gpu ids.")
parser.add_argument("--image_size", type=int, default=512, help="the image size.")
parser.add_argument("--model_id", type=str, default="FashionVideo_Preprocess", help="the renamed model.")
parser.add_argument("--Preprocess.Cropper.src_crop_factor", type=float, default=0, help="directly resize on iPER.")
parser.add_argument("--ref_path", type=str, default="", help="set this empty when preprocessing training dataset.")
args = parser.parse_args()
args.cfg_path = os.path.join("./assets", "configs", "deploy.toml")
FashionVideo_root_dir = mkdir(args.output_dir)
FashionVideo_video_dir = mkdir(os.path.join(FashionVideo_root_dir, "videos"))
FashionVideo_train_url_txt = os.path.join(FashionVideo_root_dir, "fashion_train.txt")
FashionVideo_test_url_txt = os.path.join(FashionVideo_root_dir, "fashion_test.txt")
TRAIN_list_txt = os.path.join(FashionVideo_root_dir, "train.txt")
TEST_list_txt = os.path.join(FashionVideo_root_dir, "val.txt")
TRAIN_URL = "https://vision.cs.ubc.ca/datasets/fashion/resources/fashion_dataset/fashion_train.txt"
TEST_URL = "https://vision.cs.ubc.ca/datasets/fashion/resources/fashion_dataset/fashion_test.txt"
def raise_error(msg):
instruction_url = "https://vision.cs.ubc.ca/datasets/fashion/resources/fashion_dataset"
print(f"{msg} Please manually download all stuffs follow the instruction in {instruction_url}")
sys.exit(0)
def download_from_url_to_file(url, file_path):
print(f"Download {url}")
r = requests.get(url, stream=True)
with open(file_path, "wb") as f:
f.write(r.content)
success = (r.status_code == 200)
return success
def download_from_url(url, dst):
"""
Args:
url (str): url to download file
dst (str): dst place to put the file
Returns:
"""
file_size = int(urlopen(url).info().get("Content-Length", -1))
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return True
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit="B", unit_scale=True, desc=url.split("/")[-1])
req = requests.get(url, headers=header, stream=True)
content_size = first_byte
with(open(dst, "ab")) as f:
for chunk in req.iter_content(chunk_size=1024):
if len(chunk) != 1024:
print(len(chunk))
content_size += len(chunk)
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
print(content_size, file_size)
return content_size >= file_size
def download_train_test_url_txt():
global FashionVideo_root_dir, FashionVideo_train_url_txt, FashionVideo_test_url_txt, TRAIN_URL, TEST_URL
success = download_from_url_to_file(TRAIN_URL, FashionVideo_train_url_txt)
if not success or not os.path.exists(FashionVideo_train_url_txt):
raise_error(f"Download {TRAIN_URL} failed.")
success = download_from_url_to_file(TEST_URL, FashionVideo_test_url_txt)
if not success or not os.path.exists(FashionVideo_test_url_txt):
raise_error(f"Download {TEST_URL} failed.")
def crawl_videos(url_txt_file):
"""
Args:
url_txt_file (str): the txt file contains all video urls.
Returns:
"""
global FashionVideo_video_dir
video_urls = []
with open(url_txt_file, "r") as reader:
# TODO, convert this to multi-thread or multi-process?
for vid_url in tqdm(reader.readlines()):
vid_url = vid_url.rstrip()
file_name = os.path.split(vid_url)[-1]
video_path = os.path.join(FashionVideo_video_dir, file_name)
success = download_from_url(vid_url, video_path)
if success and os.path.exists(video_path):
print(f"crawl {vid_url}")
video_urls.append(vid_url)
else:
raise_error(f"crawl {vid_url} failed.")
return video_urls
def extract_one_video(video_path, save_dir):
os.makedirs(save_dir, exist_ok=True)
# os.system('ffmpeg -i %s -start_number 0 %s/frame%%08d.png > /dev/null 2>&1' % (video_path, save_dir))
cmd = [
"ffmpeg",
"-i", video_path,
"-start_number", "0",
"{save_dir}/frame_%08d.png".format(save_dir=save_dir),
"-loglevel", "quiet"
]
print(" ".join(cmd))
subprocess.run(cmd)
def get_video_dirs(txt_file):
vid_names = []
with open(txt_file, "r") as reader:
for line in reader:
line = line.rstrip()
vid_names.append(line)
return vid_names
def prepare_src_path():
global FashionVideo_video_dir
template_path = "path?={path},name?={name}"
src_paths = []
for vid_name in os.listdir(FashionVideo_video_dir):
vid_path = os.path.join(FashionVideo_video_dir, vid_name)
assert os.path.exists(vid_path)
path = template_path.format(path=vid_path, name=vid_name)
src_paths.append(path)
print(path)
return src_paths
def get_video_names(video_urls):
global FashionVideo_video_dir
video_names = []
for vid_url in video_urls:
file_name = os.path.split(vid_url)[-1]
vid_path = os.path.join(FashionVideo_video_dir, file_name)
video_names.append(file_name)
assert os.path.exists(vid_path), f"download {vid_url} failed."
return video_names
def download():
global FashionVideo_train_url_txt, FashionVideo_test_url_txt, TRAIN_list_txt, TEST_list_txt
download_train_test_url_txt()
train_urls = crawl_videos(FashionVideo_train_url_txt)
test_urls = crawl_videos(FashionVideo_test_url_txt)
train_names = get_video_names(train_urls)
test_names = get_video_names(test_urls)
same_set = set(train_names) & set(test_names)
print(same_set)
with open(TRAIN_list_txt, "w") as writer:
train_lines = "\n".join(train_names)
writer.writelines(train_lines)
with open(TEST_list_txt, "w") as writer:
test_lines = "\n".join(test_names)
writer.writelines(test_lines)
def process_data():
# 1. preprocess
src_paths = prepare_src_path()
args.src_path = "|".join(src_paths)
print(args.src_path)
# set this as empty when preprocessing the training dataset.
args.ref_path = ""
cfg = setup(args)
# 1. human estimation, including 2D pose, tracking, 3D pose, parsing, and front estimation.
human_estimate(opt=cfg)
# 2. digital deformation.
digital_deform(opt=cfg)
# 3. check
meta_src_proc = cfg.meta_data["meta_src"]
invalid_meta_process = []
for meta_proc in meta_src_proc:
process_info = ProcessInfo(meta_proc)
process_info.deserialize()
# check it has been processed successfully
if not process_info.check_has_been_processed(process_info.vid_infos, verbose=False):
invalid_meta_process.append(meta_proc)
num_invalid = len(invalid_meta_process)
if num_invalid > 0:
for meta_proc in invalid_meta_process:
print(f"invalid meta proc {meta_proc}")
else:
print(f"process successfully.")
def reorganize():
# TODO, support evaluations
pass
if __name__ == "__main__":
download()
process_data()
reorganize()
|
from infrastructure.common.database import DBSession # Base
def resolve_db_session():
# Base.metadata.create_all()
return DBSession()
|
from utils import enter_depend_test, STEPS, RESULT, SETUP
enter_depend_test()
from depend_test_framework.test_object import Mist, MistDeadEndException, MistClearException
from depend_test_framework.dependency import Provider, Consumer
def set_ivshmem_device(params, env):
"""
Add the ivshmem device in guest xml
"""
if params.ivshmem.model != 'ivshmem-plain':
raise NotImplementedError
params.doc_logger.info(STEPS + """
# cat ivshmem.xml
<shmem name='%s'>
<model type='%s'/>
<size unit='KiB'>%d</size>
</shmem>
""" % (params.ivshmem.name,
params.ivshmem.model,
params.ivshmem.size if params.ivshmem.size else 4096))
if '/' in params.ivshmem.name:
start = [Provider('$guest_name.active', Provider.CLEAR),
Provider('$guest_name.active.ivshmem', Provider.CLEAR)]
end = [Provider('$guest_name.active.ivshmem', Provider.SET)]
start2 = [Provider('$guest_name.active', Provider.SET),
Provider('$guest_name.active.ivshmem', Provider.CLEAR)]
end2 = [Provider('$guest_name.active.ivshmem', Provider.SET)]
def use_invald_ivshmem_name(name, func, params, env):
"""
Guest should fail to start with a invalid ivshmem device
"""
if name == 'start':
params.doc_logger.info(STEPS + "# virsh start %s", params.guest_name)
params.doc_logger.info(RESULT + """
error: Failed to start domain %s
error: unsupported configuration: shmem name '%s' must not contain '/'
""" % (params.guest_name, params.ivshmem.name))
elif name == 'attach':
params.doc_logger.info(STEPS + "# virsh attach-device %s ivshmem.xml --live", params.guest_name)
params.doc_logger.info(RESULT + """
error: Failed to attach device from ivshmem.xml
error: unsupported configuration: shmem name '%s' must not contain '/'
""" % params.ivshmem.name)
raise MistDeadEndException
return Mist({"start": (start, end), "attach": (start2, end2)}, use_invald_ivshmem_name)
def check_ivshmem_cmdline(params, env):
"""
check qemu command line:
"""
params.doc_logger.info(STEPS + "# ps aux|grep %s" % params.guest_name)
params.doc_logger.info(RESULT + """
...
-object memory-backend-file,id=shmmem-shmem0,mem-path=/dev/shm/%s,size=%d,share=yes -device ivshmem-plain,id=shmem0,memdev=shmmem-shmem0
...
""" % (params.ivshmem.name, params.ivshmem.size if params.ivshmem.size else 4194304))
def check_ivshmem_in_guest(params, env):
"""
Verify the ivshmem device function in guest
"""
params.doc_logger.info(STEPS + "TODO")
params.doc_logger.info(RESULT + "TODO")
def check_ivshmem_audit(params, env):
"""
Check the audit system
"""
params.doc_logger.info(STEPS + "Make sure the auditd is running")
start1 = [Provider('$guest_name.active', Provider.SET),
Provider('$guest_name.active.ivshmem', Provider.CLEAR)]
end1 = [Provider('$guest_name.active.ivshmem', Provider.SET)]
start2 = [Provider('$guest_name.active', Provider.CLEAR),
Provider('$guest_name.active.ivshmem', Provider.CLEAR)]
end2 = [Provider('$guest_name.active.ivshmem', Provider.SET)]
start3 = [Provider('$guest_name.active.ivshmem', Provider.SET)]
end3 = [Provider('$guest_name.active.ivshmem', Provider.CLEAR),
Provider('$guest_name.active', Provider.SET)]
def check_audit_log(name, func, params, env):
"""
Check the audit log
"""
func(params, env)
active_info = env.get_data('$guest_name.active').data
params.doc_logger.info("")
params.doc_logger.info(STEPS + "# ausearch -m VIRT_RESOURCE -ts recent")
params.doc_logger.info(RESULT + """
...
type=VIRT_RESOURCE ... msg='virt=kvm resrc=shmem reason=%s vm="%s" uuid=%s size=%d path=/dev/shm/%s exe="/usr/sbin/libvirtd" hostname=? addr=? terminal=? res=success'
...
""" % (name, active_info.get('name'),
active_info.get('uuid'),
params.ivshmem.size if params.ivshmem.size else 4194304,
params.ivshmem.name,))
return Mist({"attach": (start1, end1), "start": (start2, end2), "detach": (start3, end3)}, check_audit_log)
def hot_plug_ivshmem(params, env):
"""
Hot plug a ivshmem device
"""
params.doc_logger.info("ivshmem.xml:")
params.doc_logger.info("""
<shmem name='%s'>
<model type='%s'/>
<size unit='KiB'>%d</size>
</shmem>
""" % (params.ivshmem.name,
params.ivshmem.model,
params.ivshmem.size if params.ivshmem.size else 4096))
params.doc_logger.info(STEPS + "# virsh attach-device %s ivshmem.xml --live" % params.guest_name)
params.doc_logger.info(RESULT + "Device attached successfully")
def hot_unplug_ivshmem(params, env):
"""
Hot unplug ivshmem device
"""
params.doc_logger.info("ivshmem.xml:")
params.doc_logger.info("""
<shmem name='%s'>
<model type='%s'/>
<size unit='KiB'>%d</size>
</shmem>
""" % (params.ivshmem.name,
params.ivshmem.model,
params.ivshmem.size if params.ivshmem.size else 4096))
params.doc_logger.info(STEPS + "# virsh detach-device %s ivshmem.xml --live" % params.guest_name)
params.doc_logger.info(RESULT + "Device detached successfully")
|
"""All sorts of geo utils for measurement project
"""
import math, sys, getopt
def distance(origin, destination, radius = 6371):
"""Based on Haversine formula, default return result is kilometers"""
# The Haversine formula is an equation that can be used to find great-circle distances between two points on a sphere from their longitudes and latitudes.
# When this formula is applied to the earth the results are an approximation because the Earth is not a perfect sphere.
# The currently accepted (WGS84) radius at the equator is 6378.137 km and 6356.752 km at the polar caps. For aviation purposes the FAI uses a radius of 6371.0 km
lat1, lon1 = origin
lat2, lon2 = destination
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if __name__ == "__main__":
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
seattle = [47.621800, -122.350326]
olympia = [47.041917, -122.893766]
print "distance:", distance(seattle, olympia)
## {{{ http://code.activestate.com/recipes/577360/ (r1)
import threading
def threaded_map(func, data, timeout=None):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in xrange(N)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join(timeout) if timeout else t.join()
return result
## end of http://code.activestate.com/recipes/577360/ }}}
def outputException(e):
import rpyc, traceback
try:
return
if type(e) is rpyc.core.async.AsyncResultTimeout:
print 'Result Timeout'
elif type(e) is EOFError:
print e, '-----------'
else:
print e
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb(exc_traceback)
except:
return
|
# ch10_5.py
from tkinter import *
from tkinter import messagebox
def myMsg1():
ret = messagebox.askretrycancel("Test1","安裝失敗,再試一次?")
print("安裝失敗",ret)
def myMsg2():
ret = messagebox.askyesnocancel("Test2","編輯完成,是或否或取消?")
print("編輯完成",ret)
root = Tk()
root.title("ch10_5") # 視窗標題
Button(root,text="安裝失敗",command=myMsg1).pack()
Button(root,text="編輯完成",command=myMsg2).pack()
root.mainloop()
|
"""
Author: Justin Cappos
Description:
It should be okay to put __ in a doc string...
"""
#pragma repy
def foo():
"""__ should also be allowed here__"""
pass
class bar:
"""__ and here__"""
pass
|
import gzip
import os, shutil, wget
# STOLEN from NeMo
def prepare_arpa_file(arpa):
if arpa in ["3-gram.arpa","4-gram.arpa","3-gram.pruned.1e-7.arpa","3-gram.pruned.3e-7.arpa"]:
lm_path = preprocess_openslr_arpa(arpa)
else:
if arpa.endswith(".gz") and arpa.startswith("/mydrive"): # very hacky! just for colab!
lm_path = arpa.replace('.gz', '')
lm_path = lm_path.split("/")[-1]
assert os.system(f"zcat {arpa} > {lm_path}") == 0
else:
lm_path = arpa
return lm_path
def preprocess_openslr_arpa(arpa):
lm_gzip_path = f'{arpa}.gz'
if not os.path.exists(lm_gzip_path) and not os.path.isfile(arpa):
print('Downloading pruned 3-gram model.')
lm_url = f'http://www.openslr.org/resources/11/{lm_gzip_path}'
lm_gzip_path = wget.download(lm_url)
print(f'Downloaded {lm_gzip_path}')
else:
print('Pruned .arpa.gz already exists.')
uppercase_lm_path = f'{arpa}'
if not os.path.exists(uppercase_lm_path):
with gzip.open(lm_gzip_path, 'rb') as f_zipped:
with open(uppercase_lm_path, 'wb') as f_unzipped:
shutil.copyfileobj(f_zipped, f_unzipped)
print('Unzipped the 3-gram language model.')
else:
print('Unzipped .arpa already exists.')
lm_path = f'lowercase_{arpa}'
if not os.path.exists(lm_path):
with open(uppercase_lm_path, 'r') as f_upper:
with open(lm_path, 'w') as f_lower:
for line in f_upper:
f_lower.write(line.lower())
print('Converted language model file to lowercase.')
return lm_path
if __name__ == '__main__':
arpa = '3-gram.pruned.1e-7.arpa'
prepare_arpa_file(arpa) |
import random
class Alice:
# Initialize x to be Alice's credit score and generate random k
def __init__(self, x):
self.x = x
self.k = random.randint(10, 99) # Toned down to the level Python can handle
self.e = 0
self.N = 0
self.x0 = 0
self.x1 = 0
self.sel = -1
print(f"Alice: x={x}")
# Receive public key and random messages from Bob
def recv_pubkey_rand(self, e, N, x0, x1):
self.e = e
self.N = N
self.x0 = x0
self.x1 = x1
# Receive encrypted threshold from Bob and decide which promo code to obtain
def recv_threshold(self, y_enc):
result = y_enc >= self.x
if result.get_plain_text().item() == 1.0:
self.sel = 1
x_sel = self.x1
print("Alice: x is lower or equal to y, selecting promo1")
else:
self.sel = 0
x_sel = self.x0
print("Alice: x is greater than y, selecting promo0")
v = (x_sel + self.k ** self.e) % self.N
return v
# Receive two obfuscated promo code and reveal the correct one
def recv_promos(self, p0, p1):
if self.sel == 0:
promo = p0
else:
promo = p1
print(f"Alice: obtained promo code {self.sel}")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium import webdriver
import time
# driver = webdriver.Chrome()
driver = webdriver.Firefox()
driver.get("http://www.youdao.com")
#向cookie 的name 和value 添加會話信息。
driver.add_cookie({'name':'key-aaaaaaa', 'value':'value-bbbbbb'})
#遍歷cookies 中的name 和value 資訊列印,當然還有上面添加的資訊
for cookie in driver.get_cookies():
print("%s -> %s" % (cookie['name'], cookie['value']))
# driver.quit()
|
import numpy as np
from scipy.special import gamma, assoc_laguerre, sph_harm
from sympy.physics.wigner import wigner_3j, wigner_6j
from sympy import N
import matplotlib.pyplot as plt
import mcint
import random
import math
from scipy import integrate
import time
def f(r_pol):
u, theta = r_pol
return -np.log(u)*np.exp(-(np.log(u)**2))/u
def sampler():
while True:
u = random.uniform(0, 1)
theta = random.uniform(0, 2*math.pi)
yield (u, theta)
domainsize = 2*math.pi
nmcs = []
elapsed = []
errors = []
for nmc in [5**10]:
start = time.time()
result,trash = mcint.integrate(f, sampler(), measure=domainsize, n=nmc)
nmcs.append(nmc)
elapsed.append(time.time()-start)
errors.append(result-np.pi)
print(errors)
|
import sys
import commands
python SitemapPyContent.py samsung galaxy s3 pantalla blanca > C:\Dev\Python\logsSFMS\custom.txt
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def length(self, A):
tmp = A
count = 0
while tmp != None:
count += 1
tmp = tmp.next
return count
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def getIntersectionNode(self, A, B):
len_A = self.length(A)
len_B = self.length(B)
tmp_A = A
tmp_B = B
diff = abs(len_A - len_B)
if len_A > len_B:
for _ in range(diff):
tmp_A = tmp_A.next
else:
for _ in range(diff):
tmp_B = tmp_B.next
while tmp_A != tmp_B:
tmp_A = tmp_A.next
tmp_B = tmp_B.next
return tmp_A
|
import os
ope = os.path.exists
import numpy as np
import socket
import warnings
warnings.filterwarnings('ignore')
sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
hostname = socket.gethostname()
print('run on %s' % hostname)
RESULT_DIR = "/data4/data/siim_open/result"
DATA_DIR = "/data5/data/siim_open"
PRETRAINED_DIR = "/data5/data/pretrained"
PI = np.pi
INF = np.inf
EPS = 1e-12
ID = 'ImageId'
TARGET = 'EncodedPixels'
IMG_SIZE = 1024
CROP_ID = 'CropImageId'
MASK_AREA = 'MaskArea'
DATASET = 'dataset'
PNEUMOTHORAX = 'Pneumothorax'
NIH_LABELS = [
'Atelectasis', 'Cardiomegaly', 'Consolidation', 'Edema', 'Effusion',
'Emphysema', 'Fibrosis', 'Hernia', 'Infiltration', 'Mass',
'No Finding', 'Nodule', 'Pleural_Thickening', 'Pneumonia', PNEUMOTHORAX
]
|
from django.db import models
from django.conf import settings
# Create your models here.
class Blog(models.Model):
text = models.TextField(max_length=10000)
title = models.CharField(max_length=40)
author = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,verbose_name='作者')
time = models.DateField()
def __str__(self):
return self.title |
import warnings
__all__ = ["ema_parallel", "parameters"
"model", "outcomes", "samplers",
"Model", 'FileModel', "ModelEnsemble",
"ScalarOutcome", "TimeSeriesOutcome", "Constraint",
"RealParameter", "IntegerParameter", "CategoricalParameter",
"BooleanParameter", "Scenario", "Policy", "Experiment",
"Constant", "create_parameters",
"parameters_to_csv", "Category", "SobolSampler", "MorrisSampler",
"get_SALib_problem", "FASTSampler",
"peform_experiments", 'optimize', "IpyparallelEvaluator",
"MultiprocessingEvaluator", "SequentialEvaluator"
'ReplicatorModel', "EpsilonProgress", "HyperVolume",
"Convergence", "ArchiveLogger", "ArrayOutcome"]
from .outcomes import (ScalarOutcome, TimeSeriesOutcome, Constraint,
ArrayOutcome)
from .model import Model, FileModel, ReplicatorModel, Replicator, SingleReplication
from .parameters import (RealParameter, IntegerParameter, CategoricalParameter,
BooleanParameter, Scenario, Policy, Constant,
Experiment, create_parameters, parameters_to_csv,
Category, experiment_generator)
from .samplers import (MonteCarloSampler, FullFactorialSampler, LHSSampler,
PartialFactorialSampler, sample_levers,
sample_uncertainties)
from .salib_samplers import (SobolSampler, MorrisSampler, FASTSampler,
get_SALib_problem)
from .evaluators import (perform_experiments, optimize,
MultiprocessingEvaluator, SequentialEvaluator)
from .optimization import (Convergence, HyperVolume, EpsilonProgress,
ArchiveLogger)
try:
from .evaluators import IpyparallelEvaluator
except ImportError:
IpyparallelEvaluator = None
warnings.warn("ipyparallel not available", ImportWarning)
del warnings
|
# -*- coding: utf-8 -*-
import geojson
# import shapely.wkt
from hashids import Hashids
from pydal.objects import Table
from py4web import Field
from geomet import wkt
import mercantile as mc
import h3
def geojsonFeature(id, geometry, properties):
return geojson.Feature(
geometry = geometry,
properties = properties,
id = id
)
def raise_error(err):
raise err
hids = Hashids(min_length=1)
class PlanetTable(Table):
"""docstring for PlanetTable."""
def __init__(self, *args, **kwargs):
super(PlanetTable, self).__init__(*args, **kwargs)
self._set_encoder()
self._set_feature_co()
def _set_encoder(self):
self._extra = {'encoder': hids}
self._extra["_decode"] = lambda encoded: self._extra['encoder'].decode(encoded) or \
raise_error(Exception('String expected, "{}" found'.format(encoded)))
self._extra["decode"] = lambda encoded: dict(zip(['src_id', 'id'], self._extra['encoder'].decode(encoded)))
self._extra["get_by_hash"] = lambda encoded: self(**self._extra["decode"](encoded))
# self._extra["fetch_info"] = lambda encoded: info(id=self._extra["_decode"](encoded)[1])
def _set_alias(self, alias, fieldname):
""" Sets an alias for the given field value """
setattr(self, alias, Field.Virtual(alias,
lambda row: row[self._tablename][fieldname]
))
def _set_hashid(self, fieldname, first, *ofields):
setattr(self, fieldname, Field.Virtual(fieldname,
lambda row: self._extra['encoder'].encode(
row[self._tablename][first],
*map(lambda ff: row[self._tablename][ff], ofields)
)
))
def _set_feat_properties(self, **kwargs):
def _props(row):
properties = dict(
row[self._tablename].properties or row[self._tablename].tags,
id = row[self._tablename].hashid,
# **{"_{}_".format(row[self._tablename].source_name): row[self._tablename].source_id}
)
properties.update({
k: row[self._tablename][v] if not callable(v) else v(row[self._tablename])\
for k,v in kwargs.items()})
return properties
setattr(self, 'feat_properties', Field.Virtual('feat_properties', _props))
def _set_geometry(self):
if 'geom' in self.fields and self['geom'].type=='geometry()':
self.feat_geometry = Field.Virtual('feat_geometry', lambda row: wkt.loads(row[self._tablename].geom))
def _set_feature_co(self):
self._set_hashid('hashid', 'src_id', 'id')
self._set_feat_properties()
self._set_geometry()
if 'geom' in self.fields and self['geom'].type=='geometry()':
self.feature = Field.Virtual('feature', lambda row: geojsonFeature(
geometry = row[self._tablename].feat_geometry,
properties = row[self._tablename].feat_properties,
id = row[self._tablename].hashid
))
def get_tile(lon, lat, zoom, classic=True):
if classic is True:
return mc.tile(lon, lat, zoom)
elif classic is False:
return h3.geo_to_h3(lat, lon, resolution=zoom)
# def foo(row, zoom, classic=True):
# import pdb; pdb.set_trace()
class PlanetPointTable(PlanetTable):
"""docstring for PlanetPointTable."""
def __init__(self, *args, **kwargs):
super(PlanetPointTable, self).__init__(*args, **kwargs)
self._set_tile()
def _set_tile(self):
self.tile = Field.Method(
'tile',
lambda row, zoom, classic=True: get_tile(
*wkt.loads(row[self._tablename].geom)["coordinates"],
zoom = zoom,
classic = classic
)
)
class PlanetGraphTable(PlanetTable):
"""docstring for PlanetGraphTable."""
def _set_encoder(self):
PlanetTable._set_encoder(self)
self._extra['node_encoder'] = hids
def _set_node_hashid(self, fieldname, first, *ofields):
setattr(self, fieldname, Field.Virtual(fieldname,
lambda row: self._extra['node_encoder'].encode(
row[self._tablename][first],
*map(lambda ff: row[self._tablename][ff], ofields)
)
))
def _set_feature_co(self):
self._set_hashid('hashid', 'src_id', 'id')
self._set_node_hashid('shashid', 'src_id', 'sinfo_id')
self._set_node_hashid('thashid', 'src_id', 'tinfo_id')
# self._set_alias("shid", 'shashid')
# self._set_alias("thid", 'thashid')
self._set_feat_properties(weight=lambda row: round(row.len, 3))
if 'geom' in self.fields and self['geom'].type=='geometry()':
self.feature = Field.Virtual('feature', lambda row: geojson.Feature(
geometry = wkt.loads(row[self._tablename].geom),
properties = row[self._tablename].feat_properties,
id = row[self._tablename].hashid
))
|
import xlsxwriter
from docx import Document
from docx.oxml import parse_xml
from docx.shared import Inches, Pt, Mm, RGBColor
from docx.dml.color import ColorFormat
from docx.enum.table import WD_ALIGN_VERTICAL, WD_TABLE_ALIGNMENT
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.oxml.ns import qn, nsdecls
import re
import pandas as pd
import numpy as np
from openpyxl import load_workbook
from word_tool import plt_bar, plt_plot, plt_pie, plt_scatter
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
def initBaogao(file='./demo.docx'):
document = Document()
# 设置一个空白样式
style = document.styles['Normal']
# 设置西文字体
style.font.name = u'宋体' # 'Times New Roman'
# 设置中文字体
style.element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体') # '微软雅黑')#
style = document.styles['Heading 1']
font = style.font
# 获取段落样式
paragraph_format = style.paragraph_format
# 首行缩进0.74厘米,即2个字符
paragraph_format.first_line_indent = Mm(7.4)
# 标题级别
heading1 = 0
heading2 = 1
sections = document.sections
current_section = sections[-1]
# 第一章
p = document.add_heading('第一章 本期评测工作概述', heading1)
'''
p1 = document.add_paragraph()
run = p1.add_run(u'第一章 本期评测综述')
run.font.name = u'宋体'
r = run._element
r.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
'''
p = document.add_paragraph('A plain paragraph having some ')
p.paragraph_format.first_line_indent = Mm(7.4)
p.add_run('bold').bold = True
p.add_run(' and some ')
p.add_run('italic.').italic = True
heading_first = ['一、参评数量',
'二、评测结果',
'2、获优秀奖节目和不达标节目:',
'3、以上评测结果将提交考核办,按相关文件要求,对节目考核和奖惩;并在OA网、北楼大屏公示两周。',
'三、本期说明',
'本期参评节目来源',]
for heading in heading_first:
head = document.add_heading(heading, heading2)
# head.element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
p = document.add_paragraph('please input some words.')
p.paragraph_format.first_line_indent = Mm(7.4)
p_shuoming = ['台外录制:以台外演播室录制为主,包括北京的演播室、联合录制、西院五楼新媒体演播室',
'台内录制:以台内演播室录制为主,包括800演播厅、4heading10演播厅、300演播厅、260演播厅、120演播室、110演播室、70演播室',
'台外制作:是指在台外制作、但不包括在北京制作',
'包装制作:在我台云平台高清制作网、由电视制作中心包装制作人员完成',
'高清自制:在我台云平台高清制作网、由编辑人员自行完成',
'120自制:在电视制作中心120演播室、由编辑人员自行完成',
'影视自制:在影视频道制作网、由编辑人员自行完成',
'少儿自制:在少儿频道制作网、由编辑人员自行完成',
'农民自制:在农民制作网、由编辑人员自行完成',
'广告自制:在广告发展公司制作']
for p in p_shuoming:
i = document.add_paragraph(p)
i.paragraph_format.first_line_indent = Mm(7.4)
# 第二章
# document.add_page_break()
document.add_section()
document.add_heading('第二章 本期评测数据分析', heading1)
heading_first = ['一、按综合得分分析',
'得分分析图',
'综合得分排序表',
'二、按首播频道分析',
'首播频道分析表',
'首播频道排序表',
'三、按录制地点分析',
'录制地点分析表',
'录制地点排序表',
'三、按制作方式分析',
'制作方式分析表',
'制作方式排序表',
'专家意见及建议']
for heading in heading_first:
document.add_heading(heading, heading2)
p = document.add_paragraph('please input some words.')
p.paragraph_format.first_line_indent = Mm(7.4)
# 第三章
# document.add_page_break()
document.add_section()
document.add_heading('第三章 专家评语', heading1)
heading_first = ['一、优秀节目评语',
'二、后十名节目评语',]
for heading in heading_first:
document.add_heading(heading, heading2)
p = document.add_paragraph('please input some words.')
p.paragraph_format.first_line_indent = Mm(7.4)
document.save(file)
def move_table_after(table, paragraph):
tbl, p = table._tbl, paragraph._p
p.addnext(tbl)
def move_paragraph_after(pnew, pold):
tbl, p = pnew._p, pold._p
p.addnext(tbl)
def setCellBackgroundColor(self, cell, rgbColor):
if not isinstance(rgbColor, RGBValue):
print('rgbColor is not RGBValue...', type(rgbColor))
return
hr = str(hex(int(rgbColor.r)))[-2:]
hg = str(hex(int(rgbColor.g)))[-2:]
hb = str(hex(int(rgbColor.b)))[-2:]
colorStr = hr + hg + hb
# print(colorStr)
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value=colorStr))
cell._tc.get_or_add_tcPr().append(shading_elm_1)
def canping_shuliang(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['序号', '节目名称', '播出时间', '频道']]
counts = df['频道'].value_counts()
print(counts)
pindao = ['卫视', '经济', '都市', '影视', '少儿', '公共', '农民']
myText = "1、抽测节目共计{}个。包括:".format(df.shape[0])
for i in pindao:
myText = '{}河北{}{}个、'.format(myText, i, counts[i])
myText = '{}。'.format(myText[:-1])
print(myText)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '一、参评数量':
print(p.text)
# 因为表头占一行,所以行数inRow加1
pNew = document.add_paragraph(myText)
pNew.paragraph_format.first_line_indent = Mm(7.4)
move_paragraph_after(pNew, p)
document.save(file)
def defen_dengji(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['序号', '节目名称', '播出时间', '等级']]
counts = df['等级'].value_counts()
print(counts)
dengji = ['良好', '良', '及格', '不及格']
myText = '1、本期常规节目综合评分为{}的节目{}个,'.format('优秀', counts['优秀'])
myText = '{}{}率为{}%;'.format(myText, '优秀', round(counts['优秀'] / df.shape[0] * 100, 2))
for i in range(len(counts)-1):
myText = '{}综合评分为{}的节目{}个,'.format(myText, dengji[i], counts[dengji[i]])
myText = '{}占比为{}%;'.format(myText, round(counts[dengji[i]] / df.shape[0] * 100, 2))
myText = '{}。'.format(myText[:-1])
print(myText)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '二、评测结果':
print(p.text)
# 因为表头占一行,所以行数inRow加1
pNew = document.add_paragraph(myText)
pNew.paragraph_format.first_line_indent = Mm(7.4)
move_paragraph_after(pNew, p)
document.save(file)
def canping_program(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['序号', '节目名称', '节目来源']]
# 按序号排序
df = df.sort_values(by=['序号'], ascending=True)
df.reset_index(drop=True, inplace=True)
print(df)
# 计算节目分两列后所占行数
inRownumber = df.shape[0]
if inRownumber % 2 :
inRownumber += 1
inRow = inRownumber // 2
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '本期参评节目来源':
print(p.text)
# 因为表头占一行,所以行数inRow加1
table = document.add_table(rows=inRow + 1, cols=7, style='Table Grid')
move_table_after(table, p)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.cell(0, 3).merge(table.cell(inRow, 3))
table.autofit = False
table.columns[0].width = Mm(12)
table.columns[1].width = Mm(42)
table.columns[2].width = Mm(24)
table.columns[3].width = Mm(4)
table.columns[4].width = Mm(12)
table.columns[5].width = Mm(42)
table.columns[6].width = Mm(24)
columns = df.columns.to_list()
for i in range(2):
for column in columns:
cell1 = table.cell(0, i * 4 + columns.index(column))
cell1.text = column
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#FDEADA'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 让正确转行
table.rows[0].height = Mm(7.2) # 表头行高
for index, row in df.iterrows():
table.rows[index % inRow + 1].height = Mm(7.2) # 数据行高
for i in range(3):
cell1 = table.cell(index % inRow + 1, index // inRow * 4 + i)
cell1.text = str(row[columns[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
document.save(file)
def fenxi_zhanbi(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['频道', '节目名称', '播出时间', '录制地点',
'制作方式', '制片人', '主观', '客观', '总分', '等级']]
df['主观'] = df['主观'].round(0)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
pindao = ['卫视', '经济', '都市', '影视', '少儿', '公共', '农民']
fenxi = []
for i in pindao:
# 按频道筛选
df_temp = df[df['频道'] == i]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(i)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(i)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
# 生成pandas数据
data = pd.DataFrame(fenxi)
# 无数据填充为0
data.fillna(0, inplace=True)
# 增加频道各节目数
temp = df.loc[:, '频道'].value_counts()
# 将频道各节目数合并
data.insert(0, '节目数量', temp)
# 添加无数据列
s = data.columns.to_list()
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '平均分', '最高分', '最低分']
for i in dengji:
if i in s:
pass
else:
data[i] = 0
data = data[dengji]
data.insert(6, '优秀率', data[['节目数量', '优秀']].apply(lambda x: x['优秀'] / x['节目数量'], axis=1))
data.insert(6, '达标率', data[['节目数量', '优秀', '良好', '良']].apply(
lambda x: (x['优秀'] + x["良好"] + x['良']) / x['节目数量'], axis=1))
# 数据类型
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '最高分', '最低分']
data[dengji] = data[dengji].astype(np.int64)
data['平均分'] = data['平均分'].round(2)
data['优秀率%'] = data['优秀率'].apply(lambda x: format(x, '.2%'))
data['达标率%'] = data['达标率'].apply(lambda x: format(x, '.2%'))
data.reset_index(inplace=True)
data = data.rename({'index': '频道'}, axis='columns')
# print(data)
# data.insert(0, '频道', pindao)
# data.reset_index(drop=True,inplace=True)
# print(data)
# 表格数据写入Excel
# 读取原数据
souce = pd.read_excel('database.xlsx', sheet_name=None)
new_sheet = '按频道分'
if new_sheet in souce:
souce.pop(new_sheet)
with pd.ExcelWriter('database.xlsx', engine='xlsxwriter') as writer:
for i in souce:
souce[i].to_excel(writer, sheet_name=i, index=False)
data.to_excel(writer, sheet_name=new_sheet, index=False)
data[['频道', '达标率%', '优秀率%']].to_excel(writer, sheet_name='按频道分', startrow=data.shape[0] + 3, index=False)
workbook = writer.book
worksheet = writer.sheets[new_sheet]
chart = workbook.add_chart({'type': 'column'})
chart.add_series({
'name': "=按频道分!$B$11",
'categories': '=按频道分!$A$12:$C$18',
'values': '=按频道分!$B$12:$B$18',
})
chart.add_series({
'name': "=按频道分!$C$11",
'categories': '=按频道分!$A$12:$C$18',
'values': '=按频道分!$C$12:$C$18',
})
chart.set_title({'name': '各频道达标率、优秀率'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D2', chart, {'x_offset': 25, 'y_offset': 10})
data[['频道', '最高分', '最低分', '平均分']].to_excel(writer, sheet_name='按频道分',
startrow=(data.shape[0] + 3) * 2, index=False)
chart = workbook.add_chart({'type': 'line'})
chart.add_series({
'name': "=按频道分!$B$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$B$22:$B$28',
})
chart.add_series({
'name': "=按频道分!$C$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$C$22:$C$28',
})
chart.add_series({
'name': "=按频道分!$D$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$D$22:$D$28',
})
chart.set_title({'name': '各频道分数比较'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D38', chart, {'x_offset': 25, 'y_offset': 10})
# 表格数据写入报告docx
dengji = ['频道', '节目数量', '优秀', '良好', '良', '及格', '不及格', '达标率%', '优秀率%', '平均分']
df = data[dengji]
print(df)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '专家意见及建议':
print(p.text)
table = document.add_table(rows=df.shape[0] + 2, cols=df.shape[1], style='Table Grid')
move_table_after(table, p)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 表头
# 合并表头单元格
for i in [0, 1, 7, 8, 9]:
table.cell(0, i).merge(table.cell(1, i))
cell1 = table.cell(0, 2).merge(table.cell(0, 4))
cell1.text = '技术质量达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
cell1 = table.cell(0, 5).merge(table.cell(0, 6))
cell1.text = '技术质量不达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
# 各列宽度
table_width = {'频道': 10.9, '节目数量': 14, '优秀': 14, '良好': 14, '良': 14,
'及格': 16, '不及格': 16, '达标率%': 17, '优秀率%': 16, '平均分': 16}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
cell1 = table.cell(1, columns_name.index(i))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
for index, row in df.iterrows():
for i in range(len(row)):
cell1 = table.cell(index + 2, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
# make picture
dengji = ['达标率', '优秀率']
df_lv = data[dengji]
# df_lv[u'线损率'] = df_lv[u'线损率'].str.strip('%').astype(float) / 100
df_lv.index = pindao
print(df_lv.info())
x_names = ['a','b','c']
y_values = [1,2,3]
plt_bar(x_names, y_values, "柱状图.png")
plt_plot(x_names, y_values, "折线图.png")
plt_scatter(x_names, y_values, "散点图.png")
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
plt_pie(labels, sizes, "饼状图.png")
document.add_picture('柱状图.png', width=Inches(6.25))
document.add_picture('折线图.png', width=Inches(6.25))
document.add_picture('饼状图.png', width=Inches(6.25))
document.add_picture('散点图.png', width=Inches(6.25))
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def fenxi_youxiu(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['等级', '排名', '节目名称', '播出时间', '总分', '主观', '客观', '序号']]
# df['播出时间'] = pd.to_datetime(df['播出时间'])
# df['播出时间'] = df['播出时间'].apply(lambda x: x.strftime('%Y年%m月%d日'))
# 按总分、主观、客观,序号排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df = df[['等级', '排名', '节目名称', '播出时间', '总分']]
df = df[(df['总分'] >= 90) | (df['总分'] < 85)]
df.reset_index(drop=True, inplace=True)
# 修改列名
df.rename({'总分': '综合得分', }, axis='columns', inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '2、获优秀奖节目和不达标节目:':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 17.7, '序号': 7.4, '节目名称': 43.2, '频道': 10.9, '播出时间': 39.2, '录制地点': 20.6,
'制作方式': 18, '制片人': 14.4, '主观': 7.4, '客观': 9.1, '综合得分': 21.7, '总分': 21.7, '等级': 19.5}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
table.rows[0].height = Mm(7.5) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def fenxi_dabiao(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['节目名称', '播出时间', '总分', '主观', '客观', '序号']]
df['播出时间'] = pd.to_datetime(df['播出时间'])
df['播出时间'] = df['播出时间'].apply(lambda x: x.strftime('%Y年%m月%d日'))
# 按总分排序
# 按总分、主观、客观,序号排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df = df[['节目名称', '播出时间', '总分']]
df = df[df['总分'] < 85]
df.reset_index(drop=True, inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '2、不达标节目':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 7.4, '序号': 7.4, '节目名称': 50, '频道': 10.9, '播出时间': 33.2, '录制地点': 20.6,
'制作方式': 18, '制片人': 14.4, '主观': 7.4, '客观': 9.1, '总分': 11.3, '等级': 10.9}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
table.rows[0].height = Mm(7.5) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def zonghe_fen(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
df['排名'] = df.apply(lambda x: x.index + 1)
myColumnts = ['排名', '节目名称', '首播频道', '播出时间', '录制地点',
'制作方式', '制片人', '主观', '客观', '总分', '等级']
df = df[myColumnts]
df['主观'] = df['主观'].round(0).astype(np.int64)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
# 总分变为综合
df.rename({'总分':'综合'}, axis='columns', inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '综合得分排序表':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 9.5, '序号': 8.6, '节目名称': 28, '频道': 11.2, '首播频道': 19.2, '播出时间': 22.3, '录制地点': 20.9,
'制作方式': 18.2, '制片人': 19.3, '主观': 8.1, '客观': 10.1, '总分': 8, '综合': 8, '等级': 7}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
table.rows[0].height = Mm(15) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并频道单元格
temp = df.loc[:, '等级'].value_counts()
temp1 = temp.index.tolist()
temp2 = ['优秀', '良好', '良', '及格', '不及格']
dengji = []
for i in temp2:
if i in temp1:
dengji.append(i)
j = 1
inDengji = myColumnts.index('等级') # 定位等级列的列数
for i in dengji:
cell1 = table.cell(j, inDengji).merge(table.cell(j + temp[i] - 1, inDengji))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def rank_pindao(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
df = df[['首播频道', '节目名称', '播出时间', '录制地点',
'制作方式', '制片人', '主观', '客观', '总分', '等级']]
df['主观'] = df['主观'].round(0).astype(np.int64)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
# 频道改名
df.rename({'首播频道': '频道', '总分': '综合'}, axis='columns', inplace=True)
pindao = ['河北卫视', '河北经济', '河北都市', '河北影视', '河北少儿', '河北公共', '农民']
result = {}
for i in pindao:
# 筛选出频道数据
df_temp = df[df['频道'] == i].copy()
# 按总分排序 旧
# df_temp = df_temp.sort_values(by='总分', ascending=False)
# 重新生成行索引
# df_temp.reset_index(drop=True, inplace=True)
# 插入 排名 列
df_temp.insert(1, '排名', df_temp['综合'].rank(ascending=False, method='first', ))
# 排名列改为int32
df_temp['排名'] = df_temp['排名'].astype(np.int32)
# 保存到字典中
result[i] = df_temp
# 合并数据
df = pd.concat(result)
df.reset_index(drop=True, inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '首播频道排序表':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 9.1, '序号': 9.1, '节目名称': 29.6, '频道': 9.1, '播出时间': 23.6, '录制地点': 22.1,
'制作方式': 19.2, '制片人': 20.4, '主观': 8.6, '客观': 10.7, '总分': 8.4, '综合': 8.4, '等级': 11.6}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 写入数据
table.rows[0].height = Mm(15) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并频道单元格
temp = df.loc[:, '频道'].value_counts()
j = 1
for i in pindao:
cell1 = table.cell(j, 0).merge(table.cell(j + temp[i] - 1, 0))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def fenxi_pindao(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['首播频道', '节目名称', '播出时间', '录制地点',
'制作方式', '制片人', '主观', '客观', '总分', '等级']]
df['主观'] = df['主观'].round(0)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
pindao = ['河北卫视', '河北经济', '河北都市', '河北影视', '河北少儿', '河北公共', '农民']
fenxi = []
for i in pindao:
# 按频道筛选
df_temp = df[df['首播频道'] == i]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(i)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(i)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
# 生成pandas数据
data = pd.DataFrame(fenxi)
# 无数据填充为0
data.fillna(0, inplace=True)
# 增加频道各节目数
temp = df.loc[:, '首播频道'].value_counts()
# 将频道各节目数合并
data.insert(0, '测评节目数量', temp)
# 添加无数据列
s = data.columns.to_list()
dengji = ['测评节目数量', '优秀', '良好', '良', '及格', '不及格', '平均分', '最高分', '最低分']
for i in dengji:
if i in s:
pass
else:
data[i] = 0
data = data[dengji]
data.insert(6, '优秀率', data[['测评节目数量', '优秀']].apply(lambda x: x['优秀'] / x['测评节目数量'], axis=1))
data.insert(6, '达标率', data[['测评节目数量', '优秀', '良好', '良']].apply(
lambda x: (x['优秀'] + x["良好"] + x['良']) / x['测评节目数量'], axis=1))
# 数据类型
dengji = ['测评节目数量', '优秀', '良好', '良', '及格', '不及格', '最高分', '最低分']
data[dengji] = data[dengji].astype(np.int64)
data['平均分'] = data['平均分'].round(2)
data['达标率%'] = data['达标率'].apply(lambda x: format(x, '.2%'))
data['优秀率%'] = data['优秀率'].apply(lambda x: format(x, '.2%'))
data.reset_index(inplace=True)
data = data.rename({'index': '首播频道'}, axis='columns')
# print(data)
# data.insert(0, '频道', pindao)
# data.reset_index(drop=True,inplace=True)
# print(data)
# 表格数据写入Excel
# 读取原数据
souce = pd.read_excel('database.xlsx', sheet_name=None)
new_sheet = '按频道分'
if new_sheet in souce:
souce.pop(new_sheet)
with pd.ExcelWriter('database.xlsx', engine='xlsxwriter') as writer:
for i in souce:
souce[i].to_excel(writer, sheet_name=i, index=False)
data.to_excel(writer, sheet_name=new_sheet, index=False)
data[['首播频道', '达标率%', '优秀率%']].to_excel(writer, sheet_name='按频道分', startrow=data.shape[0] + 3, index=False)
workbook = writer.book
worksheet = writer.sheets[new_sheet]
chart = workbook.add_chart({'type': 'column'})
chart.add_series({
'name': "=按频道分!$B$11",
'categories': '=按频道分!$A$12:$C$18',
'values': '=按频道分!$B$12:$B$18',
})
chart.add_series({
'name': "=按频道分!$C$11",
'categories': '=按频道分!$A$12:$C$18',
'values': '=按频道分!$C$12:$C$18',
})
chart.set_title({'name': '各频道达标率、优秀率'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D2', chart, {'x_offset': 25, 'y_offset': 10})
data[['首播频道', '最高分', '最低分', '平均分']].to_excel(writer, sheet_name='按频道分',
startrow=(data.shape[0] + 3) * 2, index=False)
chart = workbook.add_chart({'type': 'line'})
chart.add_series({
'name': "=按频道分!$B$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$B$22:$B$28',
})
chart.add_series({
'name': "=按频道分!$C$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$C$22:$C$28',
})
chart.add_series({
'name': "=按频道分!$D$21",
'categories': '=按频道分!$A$22:$C$28',
'values': '=按频道分!$D$22:$D$28',
})
chart.set_title({'name': '各频道分数比较'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D38', chart, {'x_offset': 25, 'y_offset': 10})
# 表格数据写入报告docx
dengji = ['首播频道', '测评节目数量', '优秀', '良好', '良', '及格', '不及格', '达标率%', '优秀率%', '平均分']
df = data[dengji]
print(df)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '首播频道分析表':
print(p.text)
table = document.add_table(rows=df.shape[0] + 2, cols=df.shape[1], style='Table Grid')
move_table_after(table, p)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 表头
# 表格行高
for i in range(df.shape[0] + 2):
table.rows[i].height = Mm(10)
# 合并表头单元格
for i in [0, 1, 7, 8, 9]:
table.cell(0, i).merge(table.cell(1, i))
cell1 = table.cell(0, 2).merge(table.cell(0, 4))
cell1.text = '技术质量达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#D6E3BC'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
cell1 = table.cell(0, 5).merge(table.cell(0, 6))
cell1.text = '技术质量不达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#E5B8B7'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 各列宽度
table_width = {'频道': 10.9, '首播频道': 22, '测评节目数量': 14, '优秀': 13, '良好': 13, '良': 13,
'及格': 16, '不及格': 19, '达标率%': 22, '优秀率%': 22, '平均分': 19}
table_colors = ['#8DB3E2', '#8DB3E2', '#3AA315', '#9BBB59', '#943634', '#C0504D', '#D8D8D8',
'#C6D9F1', '#C6D9F1', '#C6D9F1']
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
cell1 = table.cell(1, columns_name.index(i))
if i in ['优秀率%', '达标率%']:
cell1.text = i[:-1]
else:
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置标题颜色
table_color = table_colors[columns_name.index(i)]
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value=table_color))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
if i in ['优秀', '良好', '良', '及格']:
font.color.rgb = RGBColor(0xff, 0xff, 0xff)
for index, row in df.iterrows():
for i in range(len(row)):
cell1 = table.cell(index + 2, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
'''
# make picture
dengji = ['达标率', '优秀率']
df_lv = data[dengji]
# df_lv[u'线损率'] = df_lv[u'线损率'].str.strip('%').astype(float) / 100
df_lv.index = pindao
print(df_lv.info())
x_names = ['a','b','c']
y_values = [1,2,3]
plt_bar(x_names, y_values, "柱状图.png")
plt_plot(x_names, y_values, "折线图.png")
plt_scatter(x_names, y_values, "散点图.png")
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
plt_pie(labels, sizes, "饼状图.png")
document.add_picture('柱状图.png', width=Inches(6.25))
document.add_picture('折线图.png', width=Inches(6.25))
document.add_picture('饼状图.png', width=Inches(6.25))
document.add_picture('散点图.png', width=Inches(6.25))
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def rank_didian(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
didian = ['引进节目', '外景录制', '台外录制', '台内录制']
result = {}
for i in didian:
# 筛选出频道数据
df_temp = df[df['地点'] == i].copy()
# 按总分排序
if i == '台内录制':
df_temp['演播室'] = df_temp['录制地点'].apply(lambda x: x[:-3])
df_temp['演播室'] = df_temp['演播室'].astype(np.int64)
df_temp = df_temp.sort_values(by=['演播室', '总分', '主观', '客观', '序号'],
ascending=[False, False, False, False, True])
df_temp.drop(['演播室'], axis=1, inplace=True)
# else:
# df_temp = df_temp.sort_values(by='总分', ascending=False)
# 重新生成行索引
# df_temp.reset_index(drop=True, inplace=True)
# 插入 排名 列
# df_temp.insert(1, '排名', df_temp['总分'].rank(ascending=False, method='first',))
# 排名列改为int32
# df_temp['排名'] = df_temp['排名'].astype(np.int32)
# 保存到字典中
result[i] = df_temp
# print(result)
# 合并数据
df = pd.concat(result)
df.reset_index(drop=True, inplace=True)
# 选择要展示字段
df = df[['地点', '录制地点', '节目名称', '首播频道', '总分', '等级']]
# df['主观'] = df['主观'].round(0).astype(np.int64)
# df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '录制地点排序表':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 列名改表头名
df.rename({'等级': '测评等级', '总分': '综合得分'}, axis='columns', inplace=True)
# 各列宽度
table_width = {'地点': 20.6, '频道': 7.4, '节目名称': 42.7, '首播频道': 24.7, '播出时间': 21.5, '录制地点': 23.5,
'制作方式': 18, '制片人': 14.4, '主观': 7.4, '客观': 9.1, '综合得分': 20.4, '测评等级': 19.3}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
if i == '地点':
cell1.text = '形式'
elif i == '录制地点':
cell1.text = '录制方式'
else:
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 写入数据
table.rows[0].height = Mm(7.5) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并地点单元格
temp = df.loc[:, '地点'].value_counts()
j = 1
for i in didian:
cell1 = table.cell(j, 0).merge(table.cell(j + temp[i] - 1, 0))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并演播室单元格
temp = df_temp['录制地点'].value_counts()
j = len(df) - len(df_temp) + 1
tai_inter = ['800演播室', '400演播室', '300演播室', '260演播室', '120演播室', '110演播室', '70演播室', ]
for i in tai_inter:
cell1 = table.cell(j, 1).merge(table.cell(j + temp[i] - 1, 1))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def fenxi_didian(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['地点', '录制地点', '节目名称', '频道', '总分', '等级']]
# df['主观'] = df['主观'].round(0).astype(np.int64)
# df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
fenxi = []
didian = ['引进节目', '外景录制', '台外录制', '台内录制']
for i in didian:
# 按频道筛选
df_temp = df[df['地点'] == i]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(i)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(i)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
tai_inter = ['800演播室', '400演播室', '300演播室', '260演播室', '120演播室', '110演播室', '70演播室', ]
for i in tai_inter:
# 按频道筛选
df_temp = df[df['录制地点'] == i]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(i)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(i)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
# 生成pandas数据
data = pd.DataFrame(fenxi)
# 无数据填充为0
data.fillna(0, inplace=True)
# 增加频道各节目数
temp = df.loc[:, '地点'].value_counts()
temp1 = df[df['地点'] == '台内录制'].loc[:, '录制地点'].value_counts()
temp = temp.append(temp1)
# 将频道各节目数合并
data.insert(0, '节目数量', temp)
# 添加无数据列
s = data.columns.to_list()
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '平均分', '最高分', '最低分']
for i in dengji:
if i in s:
pass
else:
data[i] = 0
data = data[dengji].copy()
data.insert(6, '优秀率', data[['节目数量', '优秀']].apply(lambda x: x['优秀'] / x['节目数量'], axis=1))
data.insert(6, '达标率', data[['节目数量', '优秀', '良好', '良']].apply(
lambda x: (x['优秀'] + x["良好"] + x['良']) / x['节目数量'], axis=1))
# 数据类型
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '最高分', '最低分']
data[dengji] = data[dengji].astype(np.int64)
data['平均分'] = data['平均分'].round(2)
data['达标率%'] = data['达标率'].apply(lambda x: format(x, '.2%'))
data['优秀率%'] = data['优秀率'].apply(lambda x: format(x, '.2%'))
data.reset_index(inplace=True)
data = data.rename({'index': '地点'}, axis='columns')
# print(data)
# data.insert(0, '频道', pindao)
# data.reset_index(drop=True,inplace=True)
# print(data)
# 表格数据写入Excel
# 读取原数据
souce = pd.read_excel('database.xlsx', sheet_name=None)
new_sheet = '按地点分'
if new_sheet in souce:
souce.pop(new_sheet)
with pd.ExcelWriter('database.xlsx', engine='xlsxwriter') as writer:
for i in souce:
souce[i].to_excel(writer, sheet_name=i, index=False)
data.to_excel(writer, sheet_name=new_sheet, index=False)
# 图表数据
tu1 = data[['地点', '达标率%', '优秀率%']]
tu1 = tu1[tu1['地点'].isin(didian)]
tu1.to_excel(writer, sheet_name='按地点分', startrow=14, index=False)
tu2 = data[['地点', '达标率%', '优秀率%']]
tu2 = tu2[tu2['地点'].isin(tai_inter)]
tu2.to_excel(writer, sheet_name='按地点分', startrow=19, index=False)
workbook = writer.book
worksheet = writer.sheets[new_sheet]
chart = workbook.add_chart({'type': 'column'})
chart.add_series({
'name': "=按地点分!$B$15",
'categories': '=按地点分!$A$16:$C$19',
'values': '=按地点分!$B$16:$B$19',
})
chart.add_series({
'name': "=按地点分!$C$15",
'categories': '=按地点分!$A$16:$C$19',
'values': '=按地点分!$C$16:$C$19',
})
chart.set_title({'name': '各录制地点达标率、优秀率'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D2', chart, {'x_offset': 25, 'y_offset': 10})
# 表格数据写入报告docx
dengji = ['地点', '节目数量', '优秀', '良好', '良', '及格', '不及格', '达标率%', '优秀率%', ]
df = data[dengji]
print(df)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '录制地点分析表':
print(p.text)
table = document.add_table(rows=df.shape[0] + 2, cols=df.shape[1], style='Table Grid')
move_table_after(table, p)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 表头
df.rename({'节目数量': '测评节目数量', '地点': '录制部门'}, axis='columns', inplace=True)
# 表格行高
for i in range(df.shape[0] + 2):
table.rows[i].height = Mm(10)
# 合并表头单元格
for i in [0, 1, 7, 8]:
table.cell(0, i).merge(table.cell(1, i))
cell1 = table.cell(0, 2).merge(table.cell(0, 4))
cell1.text = '技术质量达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#D6E3BC'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
cell1 = table.cell(0, 5).merge(table.cell(0, 6))
cell1.text = '技术质量不达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#E5B8B7'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 各列宽度
table_width = {'录制部门': 25.3, '测评节目数量': 14, '优秀': 13, '良好': 13, '良': 13,
'及格': 16, '不及格': 19, '达标率%': 22, '优秀率%': 22, '平均分': 19}
table_colors = ['#8DB3E2', '#8DB3E2', '#3AA315', '#9BBB59', '#943634', '#C0504D', '#D8D8D8',
'#C6D9F1', '#C6D9F1', '#C6D9F1']
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
cell1 = table.cell(1, columns_name.index(i))
if i in ['优秀率%', '达标率%']:
cell1.text = i[:-1]
else:
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置标题颜色
table_color = table_colors[columns_name.index(i)]
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value=table_color))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
if i in ['优秀', '良好', '良', '及格']:
font.color.rgb = RGBColor(0xff, 0xff, 0xff)
for index, row in df.iterrows():
for i in range(len(row)):
cell1 = table.cell(index + 2, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
if index in [0, 1, 2, 3]:
font.bold = True
'''
# make picture
dengji = ['达标率', '优秀率']
df_lv = data[dengji]
# df_lv[u'线损率'] = df_lv[u'线损率'].str.strip('%').astype(float) / 100
df_lv.index = pindao
print(df_lv.info())
x_names = ['a','b','c']
y_values = [1,2,3]
plt_bar(x_names, y_values, "柱状图.png")
plt_plot(x_names, y_values, "折线图.png")
plt_scatter(x_names, y_values, "散点图.png")
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
plt_pie(labels, sizes, "饼状图.png")
document.add_picture('柱状图.png', width=Inches(6.25))
document.add_picture('折线图.png', width=Inches(6.25))
document.add_picture('饼状图.png', width=Inches(6.25))
document.add_picture('散点图.png', width=Inches(6.25))
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def rank_fangshi(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
# df['主观'] = df['主观'].round(0).astype(np.int64)
# df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
# group方式,无法控制前后顺序
# tt = df.groupby(['方式', '制作方式']).apply(lambda x: x.sort_values('总分', ascending=False))
fangshi = ['台外制作', '台内制作']
# 制作方式分类
tai_outer = ['北京制作', '台外制作', '联合制作']
tai_inter = ['包装制作', '高清自制', '直播', '120自制', '影视自制', '少儿自制', '农民自制', '广告自制', '录播']
result = {}
for i in fangshi:
# 筛选出频道数据
df_temp = df[df['方式'] == i].copy()
# 按总分排序
if i == '台内制作':
df_temp['方式1'] = df_temp['制作方式'].apply((lambda x: tai_inter.index(x)))
else:
df_temp['方式1'] = df_temp['制作方式'].apply((lambda x: tai_outer.index(x)))
df_temp = df_temp.sort_values(by=['方式1', '总分', '主观', '客观', '序号'],
ascending=[True, False, False, False, True])
df_temp.drop(['方式1'], axis=1, inplace=True)
# 重新生成行索引
# df_temp.reset_index(drop=True, inplace=True)
# 插入 排名 列
# df_temp.insert(1, '排名', df_temp['总分'].rank(ascending=False, method='first',))
# 排名列改为int32
# df_temp['排名'] = df_temp['排名'].astype(np.int32)
# 保存到字典中
result[i] = df_temp
# 合并数据
df = pd.concat(result)
df.reset_index(drop=True, inplace=True)
# 选择展示字段
df = df[['方式', '制作方式', '节目名称', '首播频道', '总分', '等级']]
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '制作方式排序表':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0] + 1, cols=df.shape[1], style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 列名改表头名
df.rename({'等级': '测评等级', '总分': '综合得分'}, axis='columns', inplace=True)
# 各列宽度
table_width = {'方式': 20.6, '节目名称': 47.7, '首播频道': 23.2, '播出时间': 21.5, '录制地点': 23.5,
'制作方式': 20, '制片人': 14.4, '主观': 7.4, '客观': 9.1, '综合得分': 18.9, '测评等级': 23.7}
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
# 写入列名称
if i == '方式':
cell1.text = '形式'
else:
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
font.bold = True
# 写入数据
table.rows[0].height = Mm(7.5) # 表头行高
for index, row in df.iterrows():
table.rows[index + 1].height = Mm(7.5) # 数据行高
for i in range(len(row)):
cell1 = table.cell(index + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并方式单元格
temp = df.loc[:, '方式'].value_counts()
j = 1
for i in fangshi:
cell1 = table.cell(j, 0).merge(table.cell(j + temp[i] - 1, 0))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
# 合并制作方式单元格
temp = df['制作方式'].value_counts() # 制作方式行数统计
# 合并制作方式list
tai_outer.extend(tai_inter)
# 去掉此报告中没有的制作方式
tai = []
for i in tai_outer:
if i in temp.index.to_list():
tai.append(i)
j = 1 # 合并制作方式开始的单元格行数
for i in tai:
cell1 = table.cell(j, 1).merge(table.cell(j + temp[i] - 1, 1))
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
j += temp[i]
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(10)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def fenxi_fangshi(file='demo.docx'):
df = pd.read_excel('database.xlsx')
df = df[['方式', '制作方式', '节目名称', '频道', '总分', '等级']]
# df['主观'] = df['主观'].round(0).astype(np.int64)
# df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
fenxi = []
fangshi = ['台外制作', '台内制作']
tai_outer = ['北京制作', '台外制作', '联合制作']
tai_inter = ['包装制作', '高清自制', '直播', '120自制', '影视自制', '少儿自制', '农民自制', '广告自制', '录播']
tai_outer.extend(tai_inter)
for i in fangshi:
# 按频道筛选
df_temp = df[df['方式'] == i]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(i)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(i)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
# 二级目录
temp1 = df_temp.drop_duplicates(subset='制作方式', keep='first')
temp2 = temp1['制作方式'].tolist()
temp = []
for k in tai_outer:
if k in temp2:
temp.append(k)
for j in temp:
# 按频道筛选
df_temp = df[df['制作方式'] == j]
# 统计等级个数
temp = df_temp.loc[:, '等级'].value_counts()
# 用频道名称重新命名序列名
temp = temp.rename(j)
# 找到最高分、最低分、平均分
s = pd.Series([df_temp['总分'].max(), df_temp['总分'].min(), df_temp['总分'].mean()],
index=['最高分', '最低分', '平均分'])
# 用频道名称重新命名序列名
s = s.rename(j)
# 合并到等级序列中
temp = temp.append(s)
# print(temp)
# 将各频道合并到一起
fenxi.append(temp)
# 生成pandas数据
data = pd.DataFrame(fenxi)
# 无数据填充为0
data.fillna(0, inplace=True)
# 添加无数据列
s = data.columns.to_list()
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '平均分', '最高分', '最低分']
for i in dengji:
if i in s:
pass
else:
data[i] = 0
# 增加节目数汇总
data['节目数量'] = data[['优秀', '良好', '良', '及格', '不及格']].sum(axis=1)
# data = data[dengji]
data.insert(6, '优秀率', data[['节目数量', '优秀']].apply(lambda x: x['优秀'] / x['节目数量'], axis=1))
data.insert(6, '达标率', data[['节目数量', '优秀', '良好', '良']].apply(
lambda x: (x['优秀'] + x["良好"] + x['良']) / x['节目数量'], axis=1))
# 数据类型
dengji = ['节目数量', '优秀', '良好', '良', '及格', '不及格', '最高分', '最低分']
data[dengji] = data[dengji].astype(np.int64)
data['平均分'] = data['平均分'].round(2)
data['达标率%'] = data['达标率'].apply(lambda x: format(x, '.2%'))
data['优秀率%'] = data['优秀率'].apply(lambda x: format(x, '.2%'))
data.reset_index(inplace=True)
data = data.rename({'index': '方式'}, axis='columns')
# data.insert(0, '频道', pindao)
# data.reset_index(drop=True,inplace=True)
# print(data)
# 表格数据写入Excel
# 读取原数据
souce = pd.read_excel('database.xlsx', sheet_name=None)
new_sheet = '按方式分'
if new_sheet in souce:
souce.pop(new_sheet)
with pd.ExcelWriter('database.xlsx', engine='xlsxwriter') as writer:
for i in souce:
souce[i].to_excel(writer, sheet_name=i, index=False)
data.to_excel(writer, sheet_name=new_sheet, index=False)
# 图表数据
tu1 = data[['方式', '达标率%', '优秀率%']]
tu1 = tu1[tu1['方式'].isin(tai_outer)]
tu1.to_excel(writer, sheet_name='按方式分', startrow=14, index=False)
tu2 = data[['方式', '达标率%', '优秀率%']]
tu2 = tu2[tu2['方式'].isin(tai_inter)]
tu2.to_excel(writer, sheet_name='按方式分', startrow=19, index=False)
# 插入图表
workbook = writer.book
worksheet = writer.sheets[new_sheet]
chart = workbook.add_chart({'type': 'column'})
chart.add_series({
'name': "=按方式分!$B$11",
'categories': '=按方式分!$A$12:$C$18',
'values': '=按方式分!$B$12:$B$18',
})
chart.add_series({
'name': "=按方式分!$C$11",
'categories': '=按方式分!$A$12:$C$18',
'values': '=按方式分!$C$12:$C$18',
})
chart.set_title({'name': '各制作方式达标率、优秀率'})
# chart.set_x_axis({'name': 'Test number'})
# chart.set_y_axis({'name': 'Sample length (mm)'})
chart.set_style(10)
chart.height = 600
chart.width = 960
worksheet.insert_chart('D2', chart, {'x_offset': 25, 'y_offset': 10})
# 表格数据写入报告docx
dengji = ['方式', '节目数量', '优秀', '良好', '良', '及格', '不及格', '达标率%', '优秀率%', '平均分']
df = data[dengji]
print(df)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '制作方式分析表':
print(p.text)
table = document.add_table(rows=df.shape[0] + 2, cols=df.shape[1], style='Table Grid')
move_table_after(table, p)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 表头
df.rename({'节目数量': '测评节目数量', '方式': '制作地点'}, axis='columns', inplace=True)
# 表格行高
for i in range(df.shape[0] + 2):
table.rows[i].height = Mm(10)
# 合并表头单元格
for i in [0, 1, 7, 8, 9]:
table.cell(0, i).merge(table.cell(1, i))
cell1 = table.cell(0, 2).merge(table.cell(0, 4))
cell1.text = '技术质量达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#D6E3BC'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
cell1 = table.cell(0, 5).merge(table.cell(0, 6))
cell1.text = '技术质量不达标'
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#E5B8B7'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 各列宽度
table_width = {'制作地点': 21.4, '测评节目数量': 14, '优秀': 13, '良好': 13, '良': 13,
'及格': 16, '不及格': 19, '达标率%': 22, '优秀率%': 22, '平均分': 19}
table_colors = ['#8DB3E2', '#8DB3E2', '#3AA315', '#9BBB59', '#943634', '#C0504D', '#D8D8D8',
'#C6D9F1', '#C6D9F1', '#C6D9F1']
# 取得各列名称
columns_name = df.columns.to_list()
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
cell1 = table.cell(1, columns_name.index(i))
if i in ['优秀率%', '达标率%']:
cell1.text = i[:-1]
else:
cell1.text = i
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置标题颜色
table_color = table_colors[columns_name.index(i)]
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value=table_color))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
if i in ['优秀', '良好', '良', '及格']:
font.color.rgb = RGBColor(0xff, 0xff, 0xff)
for index, row in df.iterrows():
for i in range(len(row)):
cell1 = table.cell(index + 2, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
if index in [0, 3]:
font.bold = True
'''
# make picture
dengji = ['达标率', '优秀率']
df_lv = data[dengji]
# df_lv[u'线损率'] = df_lv[u'线损率'].str.strip('%').astype(float) / 100
df_lv.index = pindao
print(df_lv.info())
x_names = ['a','b','c']
y_values = [1,2,3]
plt_bar(x_names, y_values, "柱状图.png")
plt_plot(x_names, y_values, "折线图.png")
plt_scatter(x_names, y_values, "散点图.png")
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
sizes = [15, 30, 45, 10]
plt_pie(labels, sizes, "饼状图.png")
document.add_picture('柱状图.png', width=Inches(6.25))
document.add_picture('折线图.png', width=Inches(6.25))
document.add_picture('饼状图.png', width=Inches(6.25))
document.add_picture('散点图.png', width=Inches(6.25))
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def Experts_zongping_youxiu(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
df['主观'] = df['主观'].round(0).astype(np.int64)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
# 选择数据行
df = df[df['等级'].isin(['优秀'])]
# temp1 = df[:11]
# temp2 = df[-11:]
# df = pd.concat([temp1, temp2])
# df = df.sort_values(by='总分', ascending=False)
df.reset_index(drop=True, inplace=True)
print(df)
df = df[['排名', '节目名称', '首播频道', '播出时间',
'制片人', '主观', '客观', '总分', '等级', '评语']]
df.rename({'总分': '综合'}, axis='columns', inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '一、优秀节目评语':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0]*4+1, cols=df.shape[1]-1, style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 12.5, '节目名称': 33.2, '首播频道': 21.1, '播出时间': 26.5, '录制地点': 21.6,
'制作方式': 18, '制片人': 18.3, '主观': 12.7, '客观': 12.7, '综合': 12.7, '等级': 13.3}
# 取得各列名称
columns_name = df.columns.to_list()
columns_name.pop(columns_name.index('评语'))
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 写入数据
table.rows[0].height = Mm(12.5) # 表头行高
for index, row in df.iterrows():
for j in range(1):
table.rows[index*4+j+1].height = Mm(7.5) # 数据行高
# print(index*4+j+1)
print(index+1, str(row['节目名称']))
for i in range(len(row)-1):
# 写入节目数据
cell1 = table.cell(index * 4 + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
# 合并序号单元格
cell1 = table.cell(index * 4 + 1, 0).merge(table.cell(index * 4 + 4, 0))
# cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 合并评语单元格
cell1 = table.cell(index * 4 + 2, 1).merge(table.cell(index * 4 + 4, len(row) - 2))
# cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
cell1.text = df.loc[index, '评语']
cell1.paragraphs[0].paragraph_format.first_line_indent = Mm(7.4)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def Experts_zongping_backten(file='demo.docx'):
df = pd.read_excel('database.xlsx')
# 按总分排序
df = df.sort_values(by=['总分', '主观', '客观', '序号'], ascending=[False, False, False, True])
df.reset_index(drop=True, inplace=True)
df['主观'] = df['主观'].round(0).astype(np.int64)
df['客观'] = df['客观'].astype(np.int64)
df['总分'] = df['总分'].astype(np.int64)
# 选择数据行
df = df[df['等级'].isin(['良', '及格', '不及格'])]
# temp1 = df[:11]
# temp2 = df[-11:]
# df = pd.concat([temp1, temp2])
# df = df.sort_values(by='总分', ascending=False)
df.reset_index(drop=True, inplace=True)
print(df)
df = df[['排名', '节目名称', '首播频道', '播出时间',
'制片人', '主观', '客观', '总分', '等级', '评语']]
df.rename({'总分': '综合'}, axis='columns', inplace=True)
document = Document(file)
# 将表格插入指定位置
for p in document.paragraphs:
if re.match("^Heading \d+$", p.style.name):
if p.text == '二、后十名节目评语':
print(p.text)
# 插入表格
table = document.add_table(rows=df.shape[0]*4+1, cols=df.shape[1]-1, style='Table Grid')
# 移动表格到指定位置
move_table_after(table, p)
# 设置表格居中
table.alignment = WD_TABLE_ALIGNMENT.CENTER
table.autofit = False
# 各列宽度
table_width = {'排名': 12.5, '节目名称': 33.2, '首播频道': 21.1, '播出时间': 26.5, '录制地点': 21.6,
'制作方式': 18, '制片人': 18.3, '主观': 12.7, '客观': 12.7, '综合': 12.7, '等级': 13.3}
# 取得各列名称
columns_name = df.columns.to_list()
columns_name.pop(columns_name.index('评语'))
for i in columns_name:
# 设置表格列宽
table.columns[columns_name.index(i)].width = Mm(table_width[i])
# 取得表格单元格
cell1 = table.cell(0, columns_name.index(i))
# 写入列名称
cell1.text = i
# 设置居中
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# 设置垂直居中
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置表头底色
shading_elm_1 = parse_xml(r'<w:shd {} w:fill="{color_value}"/>'.format(nsdecls('w'), color_value='#8DB4E2'))
cell1._tc.get_or_add_tcPr().append(shading_elm_1)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
font.bold = True
# 写入数据
table.rows[0].height = Mm(12.5) # 表头行高
for index, row in df.iterrows():
for j in range(1):
table.rows[index*4+j+1].height = Mm(7.5) # 数据行高
# print(index*4+j+1)
print(index+1, str(row['节目名称']))
for i in range(len(row)-1):
# 写入节目数据
cell1 = table.cell(index * 4 + 1, i)
cell1.text = str(row[columns_name[i]])
cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
# 合并序号单元格
cell1 = table.cell(index * 4 + 1, 0).merge(table.cell(index * 4 + 4, 0))
# cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
# cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
# 合并评语单元格
cell1 = table.cell(index * 4 + 2, 1).merge(table.cell(index * 4 + 4, len(row) - 2))
# cell1.paragraphs[0].paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER
cell1.vertical_alignment = WD_ALIGN_VERTICAL.CENTER
cell1.text = df.loc[index, '评语']
cell1.paragraphs[0].paragraph_format.first_line_indent = Mm(7.4)
# 设置字体大小
for run in cell1.paragraphs[0].runs:
font = run.font
font.size = Pt(12)
'''
#设置表格宋体大小
for row in table.rows:
for cell in row.cells:
paragraphs = cell.paragraphs
for paragraph in paragraphs:
for run in paragraph.runs:
font = run.font
font.size = Pt(10)
'''
document.save(file)
def write_to_Excel(file='database.xlsx', sheet_name='sheet1', start_row=0, start_col=0, df=pd.DataFrame):
book = load_workbook(file)
with pd.ExcelWriter(file, engine='openpyxl', datetime_format='%Y/%M/%D') as writer:
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer, sheet_name, index=False, startrow=start_row, startcol=start_col)
if __name__ == '__main__':
initBaogao()
canping_shuliang()
defen_dengji()
fenxi_youxiu()
# fenxi_dabiao
canping_program()
zonghe_fen()
rank_pindao()
fenxi_pindao()
rank_didian()
fenxi_didian()
rank_fangshi()
fenxi_fangshi()
Experts_zongping_youxiu()
Experts_zongping_backten()
|
import sys
sys.stdin = open("1267.txt")
for tc in range(1, 11):
V, E = map(int, input().split())
edge = list(map(int, input().split()))
matrix = [[0] * (V + 1) for _ in range(V + 1)]
check = [0] * (V + 1)
ans = ""
stack = []
for i in range(0, len(edge), 2):
matrix[edge[i]][edge[i+1]] = 1
check[edge[i+1]] += 1
for i in range(1, len(check)):
if not check[i]:
stack.append(i)
while len(stack):
currentNode = stack.pop(0)
ans += str(currentNode) + " "
for nextNode in range(1, V + 1):
if matrix[currentNode][nextNode]:
check[nextNode] -= 1
if not check[nextNode]:
stack.append(nextNode)
print("#{} {}".format(tc, ans))
|
"""
Задание 1.
Выполните профилирование памяти в скриптах
Проанализировать результат и определить программы с
наиболее эффективным использованием памяти.
Примечание: Для анализа возьмите любые 1-5 ваших разных скриптов!.
Сделать их разные реализации.
Можно взять задачи с курса Основ
или с текущего курса Алгоритмов
Результаты анализа вставьте в виде комментариев к коду.
Также укажите в комментариях версию Python и разрядность вашей ОС.
ВНИМАНИЕ: ЗАДАНИЯ, В КОТОРЫХ БУДУТ ГОЛЫЕ ЦИФРЫ ЗАМЕРОВ (БЕЗ АНАЛИТИКИ)
БУДУТ ПРИНИМАТЬСЯ С ОЦЕНКОЙ УДОВЛЕТВОРИТЕЛЬНО
Попытайтесь дополнительно свой декоратор используя ф-цию memory_usage из memory_profiler
С одновременным замером времени (timeit.default_timer())!
"""
import timeit
from memory_profiler import memory_usage, profile
from random import choice
from string import ascii_lowercase
from pympler import asizeof
# Создан декоратор для замера времени и памяти выполнения входящей функции
def time_execution(fun):
def inside_fun():
start_time = timeit.default_timer()
start_memory = memory_usage()
fun()
end_time = timeit.default_timer()
end_memory = memory_usage()
return f'Время: {end_time - start_time} сек. || Память: {end_memory[0] - start_memory[0]} Мб.'
return inside_fun
"""
Версия Python - 3.7
Разрядность системы - 64
1. Выполнен анализ двух функций, разворачивающих число (результаты ниже).
Вторая функция не только быстрее по времени, но и эффективнее по памяти.
Неэффективность первого решения заключается в созданиях переменных на каждом шаге цикла.
Line # Mem usage Increment Occurences Line Contents
============================================================
96 19.2 MiB 19.2 MiB 1 @time_execution
97 @profile
98 def revers_1(enter_num, revers_num=0):
99 20.2 MiB 0.0 MiB 32359 while enter_num != 0:
100 20.2 MiB 0.7 MiB 32358 num = enter_num % 10
101 20.2 MiB 0.0 MiB 32358 revers_num = (revers_num + num / 10) * 10
102 20.2 MiB 0.3 MiB 32358 enter_num //= 10
103 20.2 MiB 0.0 MiB 1 return revers_num
Время: 4.0287409389999995 сек. || Память: 1.234375 Мб.
Line # Mem usage Increment Occurences Line Contents
============================================================
105 20.2 MiB 20.2 MiB 1 @time_execution
106 @profile
107 def revers_2(enter_num):
108 20.2 MiB 0.0 MiB 1 enter_num = str(enter_num)
109 20.3 MiB 0.0 MiB 1 revers_num = enter_num[::-1]
110 20.3 MiB 0.0 MiB 1 return int(revers_num)
Время: 0.1255177439999997 сек. || Память: 0.0625 Мб.
"""
enter_num = 123456789 ** 3999
@time_execution
@profile
def revers_1(enter_num=123456789 ** 3999, revers_num=0):
while enter_num != 0:
num = enter_num % 10
revers_num = (revers_num + num / 10) * 10
enter_num //= 10
return revers_num
@time_execution
@profile
def revers_2(enter_num=123456789 ** 3999):
enter_num = str(enter_num)
revers_num = enter_num[::-1]
return int(revers_num)
print(revers_1())
print(revers_2())
"""
Версия Python - 3.7
Разрядность системы - 64
Во рассмотрел имитацию фильтрации списка + важность зачистки параметра, если он более не требуется.
В первом алгоритме помимо большой нагрузке на память для создания списка, память еще и нагружена операцией .append
Во втором же алгоритме удалось снизить затраты памяти за счет применения фильтра с лямбда функцией + после
удален ненужный элемент, так что это облегчает дальнейшее выполнение кода.
Line # Mem usage Increment Occurences Line Contents
============================================================
106 19.3 MiB 19.3 MiB 1 @time_execution
107 @profile
108 def filter_1():
109 27.1 MiB 7.7 MiB 1 n1 = list(range(200000))
110 27.1 MiB 0.0 MiB 1 b1 = []
111 28.1 MiB 0.0 MiB 200001 for i in n1:
112 28.1 MiB 0.0 MiB 200000 if i % 2 == 0:
113 28.1 MiB 1.0 MiB 100000 b1.append(i)
114 28.1 MiB 0.0 MiB 1 return b1
Время: 10.472295461 сек. || Память: 0.44921875 Мб.
Line # Mem usage Increment Occurences Line Contents
============================================================
117 19.5 MiB 19.5 MiB 1 @time_execution
118 @profile
119 def filter_2():
120 27.1 MiB 7.5 MiB 1 n2 = list(range(200000))
121 27.1 MiB 0.0 MiB 400001 b2 = list(filter(lambda x: x % 2 == 2, n2))
122 19.6 MiB -7.5 MiB 1 del n2
123 19.6 MiB 0.0 MiB 1 return b2
Время: 8.443688721000001 сек. || Память: 0.06640625 Мб.
"""
@time_execution
@profile
def filter_1():
n1 = list(range(200000))
b1 = []
for i in n1:
if i % 2 == 0:
b1.append(i)
# какой-то код
@time_execution
@profile
def filter_2():
n2 = list(range(200000))
b2 = list(filter(lambda x: x % 2 == 2, n2))
del n2
# какой-то код
print(filter_1())
print(filter_2())
"""
В последнем примере мне подробнее захотелось посмотреть на разницу в использовании памяти: list - string.
Потому что еще с основ помню, как говорилось, что string легче list.
Для этого в первой функции сделал подсчет количества букв "а" в списке, а во второй функции перевел list в string
с последующим удалением list.
В результате удалось выполнить такой же подсчет, только задействовав меньше памяти. Дополнительно разницу в раммере
можно заметить воспользовавшись asizeof библиотеки pympler.
Знаит string легче, и этот тип даннх можно эффективно использовать в некоторых ситуациях.
Line # Mem usage Increment Occurences Line Contents
============================================================
174 54.1 MiB 54.1 MiB 1 @time_execution
175 @profile
176 def list_w():
177 55.6 MiB 1.5 MiB 100003 gen_list = [choice(ascii_lowercase) for i in range(100000)]
178 55.6 MiB 0.0 MiB 1 count_num = 0
179 55.6 MiB 0.0 MiB 100001 for i in gen_list:
180 55.6 MiB 0.0 MiB 100000 if i == 'a':
181 55.6 MiB 0.0 MiB 3871 count_num += 1
182 55.6 MiB 0.0 MiB 1 print(count_num)
Время: 7.8314694959999995 сек. || Память: 0.42578125 Мб.
Line # Mem usage Increment Occurences Line Contents
============================================================
184 54.1 MiB 54.1 MiB 1 @time_execution
185 @profile
186 def string_w():
187 55.6 MiB 1.5 MiB 100003 gen = [choice(ascii_lowercase) for i in range(100000)]
188 55.6 MiB 0.0 MiB 1 gen_string = ''.join(gen)
189 54.2 MiB -1.4 MiB 1 del gen
190 54.2 MiB 0.0 MiB 1 count_num = 0
191 54.2 MiB 0.0 MiB 100001 for i in gen_string:
192 54.2 MiB 0.0 MiB 100000 if i == 'a':
193 54.2 MiB 0.0 MiB 3853 count_num += 1
194 54.2 MiB 0.0 MiB 1 print(count_num)
Время: 7.820635463 сек. || Память: 0.09375 Мб.
"""
@time_execution
@profile
def list_w():
gen_list = [choice(ascii_lowercase) for i in range(100000)]
count_num = 0
for i in gen_list:
if i == 'a':
count_num += 1
print(count_num)
@time_execution
@profile
def string_w():
gen = [choice(ascii_lowercase) for i in range(100000)]
gen_string = ''.join(gen)
print(asizeof.asizeof(gen))
print(asizeof.asizeof(gen_string))
del gen
count_num = 0
for i in gen_string:
if i == 'a':
count_num += 1
print(list_w())
print(string_w())
|
from django.db import models
# Create your models here.
class Identity(models.Model):
address = models.CharField(max_length=80,primary_key=True)
label = models.CharField(max_length=100)
def __unicode__(self):
return str(self.address + " (" + self.label + ")")
class Address(models.Model):
address = models.CharField(max_length=80,primary_key=True)
label = models.CharField(max_length=100)
def __unicode__(self):
return str(self.address + " (" + self.label + ")")
class Message(models.Model):
subject = models.CharField(max_length=200)
msg_from = models.ForeignKey(Address)
msg_to = models.ForeignKey(Identity)
msg_unread = models.BooleanField(default=True)
rcv_date = models.DateTimeField('date received')
msg = models.TextField()
class Meta:
ordering = ('-rcv_date',)
def __unicode__(self):
return self.subject
class OutMessage(models.Model):
subject = models.CharField(max_length=200)
msg_from = models.ForeignKey(Identity)
msg_to = models.ForeignKey(Address)
msg_status = models.CharField(max_length=200)
last_action_date = models.DateTimeField('last action date')
msg = models.TextField()
msg_id = models.CharField(max_length=100)
class Meta:
ordering = ('-last_action_date',)
def __unicode__(self):
return self.subject
|
def knap(P, W, maxW):
n = len(W)
DP = [[0 for _ in range(maxW + 1)] for _ in range(n)]
Par = [[False for _ in range(maxW + 1)] for _ in range(n)]
for i in range(W[0], maxW + 1):
DP[0][i] = P[0]
Par[0][i] = True
for i in range(1, n):
for j in range(1, maxW + 1):
DP[i][j] = DP[i - 1][j]
if j >= W[i]:
if DP[i - 1][j - W[i]] + P[i] > DP[i][j]:
DP[i][j] = DP[i - 1][j - W[i]] + P[i]
Par[i][j] = True
result = []
j = maxW
i = n-1
while j > 0:
if Par[i][j] is True:
print(i,j)
result.append(i)
j -= W[i]
i-=1
else:
i-=1
print(result)
val = [1, 2, 3]
wt = [1, 2, 3]
W = 5
knap(val,wt,W)
|
"""
Copyright (C) 2016 Data61 CSIRO
Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
Serene Python client: Model classes
The Model holds the model object from the server and associated state and
collection objects.
"""
import collections
import logging
import pprint
import time
from enum import Enum, unique
from functools import lru_cache
import pandas as pd
from serene.elements.dataset import DataSet, Column
from serene.utils import convert_datetime
@unique
class SamplingStrategy(Enum):
UPSAMPLE_TO_MAX = "UpsampleToMax"
RESAMPLE_TO_MEAN = "ResampleToMean"
UPSAMPLE_TO_MEAN = "UpsampleToMean"
BAGGING = "Bagging"
BAGGING_TO_MAX = "BaggingToMax"
BAGGING_TO_MEAN = "BaggingToMean"
NO_RESAMPLING = "NoResampling"
@classmethod
def values(cls):
return [z.value for z in cls]
@unique
class ModelType(Enum):
RANDOM_FOREST = "randomForest"
@classmethod
def values(cls):
return [z.value for z in cls]
class Status(Enum):
"""Enumerator of possible model states."""
ERROR = "error"
UNTRAINED = "untrained"
BUSY = "busy"
COMPLETE = "complete"
@staticmethod
def to_status(status):
"""Helper function to convert model state
from a string to Status Enumerator."""
if status == "error":
return Status.ERROR
if status == "untrained":
return Status.UNTRAINED
if status == "busy":
return Status.BUSY
if status == "complete":
return Status.COMPLETE
raise ValueError("Status {} is not supported.".format(status))
class ModelState(object):
"""
Class to wrap the model state.
Attributes:
status
message
date_created
date_modified
"""
def __init__(self, json):
"""
Initialize instance of class ModelState.
Args:
status : string
date_created
date_modified
"""
self.status = Status.to_status(json['status']) # convert to Status enum
self.message = json['message']
self.date_modified = convert_datetime(json['dateChanged'])
def __repr__(self):
if len(self.message):
return "ModelState({}, modified on {}, msg: {})".format(
self.status,
self.date_modified,
self.message
)
else:
return "ModelState({}, modified on {})".format(
self.status,
self.date_modified
)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return (self.status == other.status) and \
(self.message == other.message) and \
(self.date_modified == other.date_modified)
def decache(func):
"""
Decorator for clearing the cache. Here we explicitly mark the
caches that need clearing. There may be a more elegant way to
do this by having a new lru_cache wrapper that adds the functions
to a global store, and the cache busters simply clear from this
list.
"""
def wrapper(self, *args, **kwargs):
"""
Wrapper function that busts the cache for each lru_cache file
"""
if not issubclass(type(self), Model):
raise ValueError("Can only clear cache of the Model")
Model.predict.cache_clear()
Model._full_predict.cache_clear()
return func(self, *args, **kwargs)
return wrapper
class Model(object):
"""Holds information about the Model object on the Serene server"""
def __init__(self, json, session, dataset_endpoint):
self._pp = pprint.PrettyPrinter(indent=4)
# self.parent = parent
#self.parent = parent # parent.api
#self.endpoint = self.parent.api.model
#self._ds_endpoint = dataset_endpoint
self._session = session
self._ds_endpoint = dataset_endpoint
self.PREDICT_KEYS = [
"column_id",
"column_name",
"confidence",
"dataset_id",
"model_id",
"label",
"user_label"
]
self.PREDICT_SCORE_PRE = "scores"
self.PREDICT_FEATURE_PRE = "features"
self.description = json['description']
self.id = json['id']
self.model_type = json['modelType']
self.classes = json['classes']
self.features = json['features']
self.cost_matrix = json['costMatrix']
self.resampling_strategy = json['resamplingStrategy']
self.label_data = json['labelData']
self.ref_datasets = json['refDataSets']
self.model_path = json['modelPath'] if 'modelPath' in json else ''
self.state = ModelState(json['state'])
self.date_created = convert_datetime(json['dateCreated'])
self.date_modified = convert_datetime(json['dateModified'])
self.num_bags = json['numBags']
self.bag_size = json['bagSize']
@decache
def add_label(self, col, label):
"""Users can add a label to column col. `col` can be a
Column object or a string id
Args:
col: Column object or int id
label: The class label (this must exist in the model params)
Returns:
The updated set of labels
"""
key, value = self._label_entry(col, label)
label_table = self.label_data
label_table[key] = value
json = self._session.model_api.update(self.id, labels=label_table)
self._update(json)
return self.labels
@decache
def add_labels(self, table):
"""Users can add a label to column col. `col` can be a
Column object or a string id
Args:
table: key-value dict with keys as Column objects or int ids
and the values as the class labels (this must exist in the model params)
Returns:
The updated set of labels
"""
label_table = self.label_data
for k, v in table.items():
key, value = self._label_entry(k, v)
label_table[key] = value
json = self._session.model_api.update(self.id, labels=label_table)
self._update(json)
return self.labels
@decache
def train(self):
"""
Send the training request to the API.
Args:
wait : boolean indicator whether to wait for the training to finish.
Returns: boolean -- True if model is trained, False otherwise
"""
self._session.model_api.train(self.id) # launch training
def state():
"""Query the server for the model state"""
json = self._session.model_api.item(self.id)
self._update(json)
return self.state
def is_finished():
"""Check if training is finished"""
return state().status in {Status.COMPLETE, Status.ERROR}
print("Training model {}...".format(self.id))
while not is_finished():
logging.info("Waiting for the training to complete...")
time.sleep(2) # wait in polling loop
print("Training complete for {}".format(self.id))
logging.info("Training complete for {}.".format(self.id))
return state().status == Status.COMPLETE
@lru_cache(maxsize=32)
def predict(self, dataset, scores=True, features=False):
"""Runs a prediction across the `dataset`"""
logging.debug("Model prediction start...")
df = self._full_predict(dataset)
logging.debug("--> full predict done")
keys = [k for k in self.PREDICT_KEYS]
if features:
keys += [col for col in df.columns if self.PREDICT_FEATURE_PRE in col]
if scores:
keys += [col for col in df.columns if self.PREDICT_SCORE_PRE in col]
logging.debug("-->finished processing df")
return df[keys]
@property
def is_error(self):
"""Returns True if model is in the error state"""
return self.state.status == Status.ERROR
@property
def summary(self):
"""Shows the information about the dataset"""
df = {
"id": self.id,
"description": self.description,
"modelType": self.model_type,
"classes": self.classes,
"features": self.features,
"cost_matrix": self.cost_matrix,
"resamplingStrategy": self.resampling_strategy,
"labelData": self.label_data,
"refDataSets": self.ref_datasets,
"modelPath": self.model_path,
"state": self.state,
"dateCreated": self.date_created,
"dateModified": self.date_modified,
"numBags": self.num_bags,
"bagSize": self.bag_size
}
return self._pp.pformat(df)
@property
def labels(self):
"""Returns the label DataFrame, which contains the
user-specified columns
"""
keys = [int(k) for k in self.label_data.keys()]
labels = [lab for lab in self.label_data.values()]
return pd.DataFrame({
'user_label': labels,
'column_name': [self._column_lookup[x].name for x in keys],
'dataset_id': [self._column_lookup[x].datasetID for x in keys],
'column_id': keys
})
@lru_cache(maxsize=32)
def _full_predict(self, dataset):
"""
Predict the column labels for this dataset
:param dataset: Can be dataset id or a DataSet object
:return: Pandas Dataframe with prediction data
"""
if issubclass(type(dataset), DataSet):
key = dataset.id
else:
key = int(dataset)
json = self._session.model_api.predict(self.id, key)
logging.debug("converting model predictions to pandas df")
df = self._predictions(json)
logging.debug("convertion success")
return df
@decache
def _update(self, json):
"""Re-initializes the model based on an updated json string"""
self.__init__(json, self._session, self._ds_endpoint)
def _columns(self):
# first we grab all the columns out from the datasets
return [ds.columns for ds in self._ds_endpoint.items]
@property
def _column_lookup(self):
# first we grab all the columns out from the datasets
return {item.id: item for sublist in self._columns() for item in sublist}
def _label_entry(self, col, label):
"""Prepares the label entry by ensuring the key is a
valid string key and the label is also valid"""
if issubclass(type(col), Column):
key = str(col.id)
else:
key = str(col)
# ensure that the key is valid...
assert \
int(key) in self._column_lookup, \
"Key '{}' is not in column ids {}".format(key, self._column_lookup.keys())
# ensure that the label is in the classes...
assert \
label in self.classes, \
"Label '{}' is not in classes {}".format(label, self.classes)
return str(key), label
def _predictions(self, json):
"""
Here we flatten out the nested json returned from Serene
into a flat table structure.
Initially we have:
datasetID: {
item1 : asdf
item2 : qwer
nested-item1: {
item1: asdf
item2: qwer
}
nested-item2: {
item1: asdf
item2: qwer
}
}
and we want to flatten it to a DataFrame with:
'datasetID': [...]
'item1': [...]
'item2': [...]
'nested-item1-item1: [...]
'nested-item1-item2: [...]
'nested-item2-item1: [...]
'nested-item2-item2: [...]
:param json: JSON returned from the backend
:return: Flattened dictionary of lists for each key
"""
table = self._flat_predict(json)
logging.debug("flattening of predictions success")
df = pd.DataFrame(table)
# now we add the user labels
final = pd.merge(
df,
self.labels[['column_id', 'user_label']],
on='column_id',
how='left'
)
logging.debug("merging of predictions success")
final['column_name'] = final['column_id'].apply(
lambda col_id: self._column_lookup[col_id].name)
logging.debug("looking up columns")
return final
def _flat_predict(self, json):
"""
Here we flatten out the nested json returned from Serene
into a flat table structure.
Initially we have:
datasetID: {
item1 : asdf
item2 : qwer
nested-item1: {
item1: asdf
item2: qwer
}
nested-item2: {
item1: asdf
item2: qwer
}
}
and we want to flatten it to a DataFrame with:
{
'datasetID': [...]
'item1': [...]
'item2': [...]
'nested-item1-item1: [...]
'nested-item1-item2: [...]
'nested-item2-item1: [...]
'nested-item2-item2: [...]
}
:param json: JSON returned from the backend
:return: Flattened dictionary of lists for each key
"""
# first we need to drop the datasetID key down
def update(d, key, val):
"""Better dictionary update function"""
d[key] = val
return d
# first store the IDs for later...
datasetID = json['dataSetID']
modelID = json['modelID']
# next drop the key into a "columnID":key inside the child dictionary
dlist = [update(d, "column_id", int(k)) for k, d in json['predictions'].items()]
# we now want to flatten the nested items
flat_list = [self._flatten(d) for d in dlist]
# now we add the IDs as well
final = [update(d, "dataset_id", int(datasetID)) for d in flat_list]
final = [update(d, "model_id", int(modelID)) for d in final]
table = collections.defaultdict(list)
# WARNING: This is dangerous! If a value is missing it will
# break! We need to add np.nan as missing values if there
# are missing elements...
for d in final:
for k, v in d.items():
table[k].append(v)
return table
def _flatten(self, d, parent_key='', sep='_'):
"""
Flattens a nested dictionary by squashing the
parent keys into the sub-key name with separator
e.g.
flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
>> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
:param d: The nested dictionary
:param parent_key: parent key prefix
:param sep: The separator to
:return:
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
class ModelList(collections.MutableSequence):
"""
Container type for Model objects in the Serene Python Client
"""
def __init__(self, *args):
self.list = list()
self.extend(list(args))
@staticmethod
def check(v):
if not issubclass(type(v), Model):
raise TypeError("Only Model types permitted: {}".format(v))
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def __delitem__(self, i):
msg = "Use SchemaMatcher.remove_model to correctly remove model"
logging.error(msg)
raise Exception(msg)
def __setitem__(self, i, v):
self.check(v)
self.list[i] = v
def insert(self, i, v):
self.check(v)
self.list.insert(i, v)
def __repr__(self):
ms = []
for v in self.list:
s = "Model({})".format(v.id)
ms.append(s)
return "[{}]".format('\n'.join(ms))
@property
def summary(self):
df = pd.DataFrame(columns=[
'model_id',
'description',
'created',
'modified',
'status',
'state_modified'
])
for elem in self.list:
df.loc[len(df)] = [
elem.id,
elem.description,
elem.date_created,
elem.date_modified,
elem.state.status.name,
elem.state.date_modified
]
return df
|
#-*- coding: UTF-8 -*-
#二分法 找到数字的平方根,精确到0.01
x=int(raw_input("please enter number:"))
exact=0.01
high=x
low=0.0
ans=(high+low)/2.0
while abs(ans**2-x)>exact:
if ans**2>x:
high=ans
else:
low=ans
ans=(high+low)/2.0
print ans
print "final anwser:"+ str(ans)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 21:19:54 2019
@author: itamar
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
dataset = pd.read_csv("datasets/london_merged.csv")#,decimal = ",")
dataset['timestamp'] = pd.to_datetime(dataset['timestamp'])
dataset['day'] = dataset['timestamp'].dt.day
dataset['month'] = dataset['timestamp'].dt.month
dataset['hour'] = dataset['timestamp'].dt.hour
dataset = dataset.drop(columns = ['timestamp'])
dataset.isnull().sum()
dataset.duplicated()
stats = dataset.describe(include = "all")
#sns.pairplot(dataset)
"""
#corr = dataset.corr()
plt.boxplot(dataset['cnt'])
plt.title('boxplot')
plt.xlabel('cnt')
plt.ylabel('valores')
plt.ticklabel_format(style='sci', axis='y', useMathText = True)
dataset['cnt'].mean()
pd.plotting.scatter_matrix(dataset, figsize=(12, 12))
plt.show()
"""
X = dataset.iloc[:,1:13].values
y = dataset.iloc[:,0].values
from sklearn.preprocessing import MinMaxScaler,StandardScaler,RobustScaler
sc= StandardScaler()
X= sc.fit_transform(X)
y= y.reshape(-1,1)
y=sc.fit_transform(y)
"""
from sklearn.preprocessing import StandardScaler
X = StandardScaler().fit_transform(X)
"""
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state = 0,test_size = 0.2)
"""
#1.Linear regressor
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
#2. Support vector regression machine regressor
from sklearn.svm import SVR
regressor = SVR(kernel='rbf')
#3. Decision tree
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor()
#4. RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators= 10)
regressor.fit(X_train, y_train)
#5. import sklearn
from sklearn.neighbors import KNeighborsRegressor
regressor = KNeighborsRegressor(n_neighbors= 2)
regressor.fit(X_train,y_train)
#sorted(sklearn.neighbors.VALID_METRICS['brute'])
"""
from xgboost import XGBClassifier #fast and you dont need feature scalling
regressor = XGBClassifier()
regressor.fit(X_train, y_train.ravel())
y_pred = regressor.predict(X_test)
"""
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = regressor, X = X, y = y, cv = 10,n_jobs=-1)
accuracies.mean()
accuracies.std()
Metrics, in R2 best possible is 1. (can be negative,which is really bad)
in explained_variance_score the best possible is also two
max error -> best is 0 (its not working here)
1.Linear Regression we obtained an r2_score of 0.3018994
and an explained_variance of 0.301962
2.SVR obtained 0.15 r2 score and 0.23 explained variance
3.In decisiontreeregressor we got an explained variance and and r2 of 0.9158
4. In random forest we obtained 0.95 in each.
5.KNN obtained both 0.59 with 3 neighbours with 5 got 0.60
"""
from sklearn import metrics
r2 = metrics.r2_score(y_test,y_pred)
explained_variance = metrics.explained_variance_score(y_test,y_pred)
#maxerr = max_error(y_test,y_pred)
"""
from sklearn.model_selection import GridSearchCV
parameters = [{'n_estimators': [5,10], 'criterion': ['mse']},
{'n_estimators': [5,10], 'criterion': ['mae']}]
grid = GridSearchCV(estimator = regressor,
param_grid= parameters,
cv = 10,
n_jobs=-1)
grid = grid.fit(X_train,y_train)
best_accuracy = grid.best_score_
best_parameters = grid.best_params_
grid.best_estimator_
grid.best_index_
grid.cv_results_
"""
"""
fig, ax = plt.subplots()
ax.scatter(y_test, y_pred)
ax.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=4)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
""" |
import pandas as pd
import numpy as np
from scipy import stats
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error,r2_score,explained_variance_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler, QuantileTransformer, PowerTransformer
from sklearn.linear_model import LinearRegression, LassoCV
from math import sqrt
import warnings
warnings.filterwarnings("ignore")
from sklearn import preprocessing
import matplotlib
from sklearn.feature_selection import SelectKBest, f_regression, RFE
from pydataset import data
from statsmodels.formula.api import ols
import re
import multiprocessing as mp
|
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXAMPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20 # the vehicle loop will pause if faster than this speed.
MAX_LOOPS = None # the vehicle loop can abort after this many iterations, when given a positive integer.
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
CAMERA_VFLIP = False
CAMERA_HFLIP = False
CAMERA_INDEX = 0 # used for 'WEBCAM' and 'CVCAM' when there is more than one camera connected
# For CSIC camera - If the camera is mounted in a rotated position, changing the below parameter will correct the output frame orientation
CSIC_CAM_GSTREAMER_FLIP_PARM = 0 # (0 => none , 4 => Flip horizontally, 6 => Flip vertically)
BGR2RGB = False # true to convert from BRG format to RGB format; requires opencv
SHOW_PILOT_IMAGE = False # show the image used to do the inference when in autopilot mode
# For IMAGE_LIST camera
# PATH_MASK = "~/mycar/data/tub_1_20-03-12/*.jpg"
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40 #I2C address, use i2cdetect to validate this number
PCA9685_I2C_BUSNUM = None #None will auto detect, which is fine on the pi. But other platforms should specify the bus num.
#SSD1306_128_32
USE_SSD1306_128_32 = False # Enable the SSD_1306 OLED Display
SSD1306_128_32_I2C_ROTATION = 0 # 0 = text is right-side up, 1 = rotated 90 degrees clockwise, 2 = 180 degrees (flipped), 3 = 270 degrees
SSD1306_RESOLUTION = 1 # 1 = 128x32; 2 = 128x64
#
# DRIVE_TRAIN_TYPE
# These options specify which chasis and motor setup you are using.
# See Actuators documentation https://docs.donkeycar.com/parts/actuators/
# for a detailed explanation of each drive train type and it's configuration.
# Choose one of the following and then update the related configuration section:
#
# "PWM_STEERING_THROTTLE" uses two PWM output pins to control a steering servo and an ESC, as in a standard RC car.
# "MM1" Robo HAT MM1 board
# "SERVO_HBRIDGE_2PIN" Servo for steering and HBridge motor driver in 2pin mode for motor
# "SERVO_HBRIDGE_3PIN" Servo for steering and HBridge motor driver in 3pin mode for motor
# "DC_STEER_THROTTLE" uses HBridge pwm to control one steering dc motor, and one drive wheel motor
# "DC_TWO_WHEEL" uses HBridge in 2-pin mode to control two drive motors, one on the left, and one on the right.
# "DC_TWO_WHEEL_L298N" using HBridge in 3-pin mode to control two drive motors, one of the left and one on the right.
# "MOCK" no drive train. This can be used to test other features in a test rig.
# "VESC" VESC Motor controller to set servo angle and duty cycle
# (deprecated) "SERVO_HBRIDGE_PWM" use ServoBlaster to output pwm control from the PiZero directly to control steering,
# and HBridge for a drive motor.
# (deprecated) "PIGPIO_PWM" uses Raspberrys internal PWM
# (deprecated) "I2C_SERVO" uses PCA9685 servo controller to control a steering servo and an ESC, as in a standard RC car
#
DRIVE_TRAIN_TYPE = "PWM_STEERING_THROTTLE"
#
# PWM_STEERING_THROTTLE
#
# Drive train for RC car with a steering servo and ESC.
# Uses a PwmPin for steering (servo) and a second PwmPin for throttle (ESC)
# Base PWM Frequence is presumed to be 60hz; use PWM_xxxx_SCALE to adjust pulse with for non-standard PWM frequencies
#
PWM_STEERING_THROTTLE = {
"PWM_STEERING_PIN": "PCA9685.1:40.1", # PWM output pin for steering servo
"PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range
"PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse
"PWM_THROTTLE_PIN": "PCA9685.1:40.0", # PWM output pin for ESC
"PWM_THROTTLE_SCALE": 1.0, # used to compensate for PWM frequence differences from 60hz; NOT for increasing/limiting speed
"PWM_THROTTLE_INVERTED": False, # True if hardware requires an inverted PWM pulse
"STEERING_LEFT_PWM": 460, #pwm value for full left steering
"STEERING_RIGHT_PWM": 290, #pwm value for full right steering
"THROTTLE_FORWARD_PWM": 500, #pwm value for max forward throttle
"THROTTLE_STOPPED_PWM": 370, #pwm value for no movement
"THROTTLE_REVERSE_PWM": 220, #pwm value for max reverse throttle
}
#
# I2C_SERVO (deprecated in favor of PWM_STEERING_THROTTLE)
#
STEERING_CHANNEL = 1 #(deprecated) channel on the 9685 pwm board 0-15
STEERING_LEFT_PWM = 460 #pwm value for full left steering
STEERING_RIGHT_PWM = 290 #pwm value for full right steering
THROTTLE_CHANNEL = 0 #(deprecated) channel on the 9685 pwm board 0-15
THROTTLE_FORWARD_PWM = 500 #pwm value for max forward throttle
THROTTLE_STOPPED_PWM = 370 #pwm value for no movement
THROTTLE_REVERSE_PWM = 220 #pwm value for max reverse throttle
#
# PIGPIO_PWM (deprecated in favor of PWM_STEERING_THROTTLE)
#
STEERING_PWM_PIN = 13 #(deprecated) Pin numbering according to Broadcom numbers
STEERING_PWM_FREQ = 50 #Frequency for PWM
STEERING_PWM_INVERTED = False #If PWM needs to be inverted
THROTTLE_PWM_PIN = 18 #(deprecated) Pin numbering according to Broadcom numbers
THROTTLE_PWM_FREQ = 50 #Frequency for PWM
THROTTLE_PWM_INVERTED = False #If PWM needs to be inverted
#
# SERVO_HBRIDGE_2PIN
# - configures a steering servo and an HBridge in 2pin mode (2 pwm pins)
# - Servo takes a standard servo PWM pulse between 1 millisecond (fully reverse)
# and 2 milliseconds (full forward) with 1.5ms being neutral.
# - the motor is controlled by two pwm pins,
# one for forward and one for backward (reverse).
# - the pwm pin produces a duty cycle from 0 (completely LOW)
# to 1 (100% completely high), which is proportional to the
# amount of power delivered to the motor.
# - in forward mode, the reverse pwm is 0 duty_cycle,
# in backward mode, the forward pwm is 0 duty cycle.
# - both pwms are 0 duty cycle (LOW) to 'detach' motor and
# and glide to a stop.
# - both pwms are full duty cycle (100% HIGH) to brake
#
# Pin specifier string format:
# - use RPI_GPIO for RPi/Nano header pin output
# - use BOARD for board pin numbering
# - use BCM for Broadcom GPIO numbering
# - for example "RPI_GPIO.BOARD.18"
# - use PIPGIO for RPi header pin output using pigpio server
# - must use BCM (broadcom) pin numbering scheme
# - for example, "PIGPIO.BCM.13"
# - use PCA9685 for PCA9685 pin output
# - include colon separated I2C channel and address
# - for example "PCA9685.1:40.13"
# - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily,
# although it is discouraged to mix RPI_GPIO and PIGPIO.
#
SERVO_HBRIDGE_2PIN = {
"FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", # provides forward duty cycle to motor
"BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", # provides reverse duty cycle to motor
"PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", # provides servo pulse to steering servo
"PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range
"PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse
"STEERING_LEFT_PWM": 460, # pwm value for full left steering (use `donkey calibrate` to measure value for your car)
"STEERING_RIGHT_PWM": 290, # pwm value for full right steering (use `donkey calibrate` to measure value for your car)
}
#
# SERVO_HBRIDGE_3PIN
# - configures a steering servo and an HBridge in 3pin mode (2 ttl pins, 1 pwm pin)
# - Servo takes a standard servo PWM pulse between 1 millisecond (fully reverse)
# and 2 milliseconds (full forward) with 1.5ms being neutral.
# - the motor is controlled by three pins,
# one ttl output for forward, one ttl output
# for backward (reverse) enable and one pwm pin
# for motor power.
# - the pwm pin produces a duty cycle from 0 (completely LOW)
# to 1 (100% completely high), which is proportional to the
# amount of power delivered to the motor.
# - in forward mode, the forward pin is HIGH and the
# backward pin is LOW,
# - in backward mode, the forward pin is LOW and the
# backward pin is HIGH.
# - both forward and backward pins are LOW to 'detach' motor
# and glide to a stop.
# - both forward and backward pins are HIGH to brake
#
# Pin specifier string format:
# - use RPI_GPIO for RPi/Nano header pin output
# - use BOARD for board pin numbering
# - use BCM for Broadcom GPIO numbering
# - for example "RPI_GPIO.BOARD.18"
# - use PIPGIO for RPi header pin output using pigpio server
# - must use BCM (broadcom) pin numbering scheme
# - for example, "PIGPIO.BCM.13"
# - use PCA9685 for PCA9685 pin output
# - include colon separated I2C channel and address
# - for example "PCA9685.1:40.13"
# - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily,
# although it is discouraged to mix RPI_GPIO and PIGPIO.
#
SERVO_HBRIDGE_3PIN = {
"FWD_PIN": "RPI_GPIO.BOARD.18", # ttl pin, high enables motor forward
"BWD_PIN": "RPI_GPIO.BOARD.16", # ttl pin, high enables motor reverse
"DUTY_PIN": "RPI_GPIO.BOARD.35", # provides duty cycle to motor
"PWM_STEERING_PIN": "RPI_GPIO.BOARD.33", # provides servo pulse to steering servo
"PWM_STEERING_SCALE": 1.0, # used to compensate for PWM frequency differents from 60hz; NOT for adjusting steering range
"PWM_STEERING_INVERTED": False, # True if hardware requires an inverted PWM pulse
"STEERING_LEFT_PWM": 460, # pwm value for full left steering (use `donkey calibrate` to measure value for your car)
"STEERING_RIGHT_PWM": 290, # pwm value for full right steering (use `donkey calibrate` to measure value for your car)
}
#
# DRIVETRAIN_TYPE == "SERVO_HBRIDGE_PWM" (deprecated in favor of SERVO_HBRIDGE_2PIN)
# - configures a steering servo and an HBridge in 2pin mode (2 pwm pins)
# - Uses ServoBlaster library, which is NOT installed by default, so
# you will need to install it to make this work.
# - Servo takes a standard servo PWM pulse between 1 millisecond (fully reverse)
# and 2 milliseconds (full forward) with 1.5ms being neutral.
# - the motor is controlled by two pwm pins,
# one for forward and one for backward (reverse).
# - the pwm pins produce a duty cycle from 0 (completely LOW)
# to 1 (100% completely high), which is proportional to the
# amount of power delivered to the motor.
# - in forward mode, the reverse pwm is 0 duty_cycle,
# in backward mode, the forward pwm is 0 duty cycle.
# - both pwms are 0 duty cycle (LOW) to 'detach' motor and
# and glide to a stop.
# - both pwms are full duty cycle (100% HIGH) to brake
#
HBRIDGE_PIN_FWD = 18 # provides forward duty cycle to motor
HBRIDGE_PIN_BWD = 16 # provides reverse duty cycle to motor
STEERING_CHANNEL = 0 # PCA 9685 channel for steering control
STEERING_LEFT_PWM = 460 # pwm value for full left steering (use `donkey calibrate` to measure value for your car)
STEERING_RIGHT_PWM = 290 # pwm value for full right steering (use `donkey calibrate` to measure value for your car)
#VESC controller, primarily need to change VESC_SERIAL_PORT and VESC_MAX_SPEED_PERCENT
VESC_MAX_SPEED_PERCENT =.2 # Max speed as a percent of the actual speed
VESC_SERIAL_PORT= "/dev/ttyACM0" # Serial device to use for communication. Can check with ls /dev/tty*
VESC_HAS_SENSOR= True # Whether or not the bldc motor is using a hall effect sensor
VESC_START_HEARTBEAT= True # Whether or not to automatically start the heartbeat thread that will keep commands alive.
VESC_BAUDRATE= 115200 # baudrate for the serial communication. Shouldn't need to change this.
VESC_TIMEOUT= 0.05 # timeout for the serial communication
VESC_STEERING_SCALE= 0.5 # VESC accepts steering inputs from 0 to 1. Joystick is usually -1 to 1. This changes it to -0.5 to 0.5
VESC_STEERING_OFFSET = 0.5 # VESC accepts steering inputs from 0 to 1. Coupled with above change we move Joystick to 0 to 1
#
# DC_STEER_THROTTLE with one motor as steering, one as drive
# - uses L298N type motor controller in two pin wiring
# scheme utilizing two pwm pins per motor; one for
# forward(or right) and one for reverse (or left)
#
# GPIO pin configuration for the DRIVE_TRAIN_TYPE=DC_STEER_THROTTLE
# - use RPI_GPIO for RPi/Nano header pin output
# - use BOARD for board pin numbering
# - use BCM for Broadcom GPIO numbering
# - for example "RPI_GPIO.BOARD.18"
# - use PIPGIO for RPi header pin output using pigpio server
# - must use BCM (broadcom) pin numbering scheme
# - for example, "PIGPIO.BCM.13"
# - use PCA9685 for PCA9685 pin output
# - include colon separated I2C channel and address
# - for example "PCA9685.1:40.13"
# - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily,
# although it is discouraged to mix RPI_GPIO and PIGPIO.
#
DC_STEER_THROTTLE = {
"LEFT_DUTY_PIN": "RPI_GPIO.BOARD.18", # pwm pin produces duty cycle for steering left
"RIGHT_DUTY_PIN": "RPI_GPIO.BOARD.16", # pwm pin produces duty cycle for steering right
"FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", # pwm pin produces duty cycle for forward drive
"BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", # pwm pin produces duty cycle for reverse drive
}
#
# DC_TWO_WHEEL pin configuration
# - configures L298N_HBridge_2pin driver
# - two wheels as differential drive, left and right.
# - each wheel is controlled by two pwm pins,
# one for forward and one for backward (reverse).
# - each pwm pin produces a duty cycle from 0 (completely LOW)
# to 1 (100% completely high), which is proportional to the
# amount of power delivered to the motor.
# - in forward mode, the reverse pwm is 0 duty_cycle,
# in backward mode, the forward pwm is 0 duty cycle.
# - both pwms are 0 duty cycle (LOW) to 'detach' motor and
# and glide to a stop.
# - both pwms are full duty cycle (100% HIGH) to brake
#
# Pin specifier string format:
# - use RPI_GPIO for RPi/Nano header pin output
# - use BOARD for board pin numbering
# - use BCM for Broadcom GPIO numbering
# - for example "RPI_GPIO.BOARD.18"
# - use PIPGIO for RPi header pin output using pigpio server
# - must use BCM (broadcom) pin numbering scheme
# - for example, "PIGPIO.BCM.13"
# - use PCA9685 for PCA9685 pin output
# - include colon separated I2C channel and address
# - for example "PCA9685.1:40.13"
# - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily,
# although it is discouraged to mix RPI_GPIO and PIGPIO.
#
DC_TWO_WHEEL = {
"LEFT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.18", # pwm pin produces duty cycle for left wheel forward
"LEFT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.16", # pwm pin produces duty cycle for left wheel reverse
"RIGHT_FWD_DUTY_PIN": "RPI_GPIO.BOARD.15", # pwm pin produces duty cycle for right wheel forward
"RIGHT_BWD_DUTY_PIN": "RPI_GPIO.BOARD.13", # pwm pin produces duty cycle for right wheel reverse
}
#
# DC_TWO_WHEEL_L298N pin configuration
# - configures L298N_HBridge_3pin driver
# - two wheels as differential drive, left and right.
# - each wheel is controlled by three pins,
# one ttl output for forward, one ttl output
# for backward (reverse) enable and one pwm pin
# for motor power.
# - the pwm pin produces a duty cycle from 0 (completely LOW)
# to 1 (100% completely high), which is proportional to the
# amount of power delivered to the motor.
# - in forward mode, the forward pin is HIGH and the
# backward pin is LOW,
# - in backward mode, the forward pin is LOW and the
# backward pin is HIGH.
# - both forward and backward pins are LOW to 'detach' motor
# and glide to a stop.
# - both forward and backward pins are HIGH to brake
#
# GPIO pin configuration for the DRIVE_TRAIN_TYPE=DC_TWO_WHEEL_L298N
# - use RPI_GPIO for RPi/Nano header pin output
# - use BOARD for board pin numbering
# - use BCM for Broadcom GPIO numbering
# - for example "RPI_GPIO.BOARD.18"
# - use PIPGIO for RPi header pin output using pigpio server
# - must use BCM (broadcom) pin numbering scheme
# - for example, "PIGPIO.BCM.13"
# - use PCA9685 for PCA9685 pin output
# - include colon separated I2C channel and address
# - for example "PCA9685.1:40.13"
# - RPI_GPIO, PIGPIO and PCA9685 can be mixed arbitrarily,
# although it is discouraged to mix RPI_GPIO and PIGPIO.
#
DC_TWO_WHEEL_L298N = {
"LEFT_FWD_PIN": "RPI_GPIO.BOARD.16", # TTL output pin enables left wheel forward
"LEFT_BWD_PIN": "RPI_GPIO.BOARD.18", # TTL output pin enables left wheel reverse
"LEFT_EN_DUTY_PIN": "RPI_GPIO.BOARD.22", # PWM pin generates duty cycle for left motor speed
"RIGHT_FWD_PIN": "RPI_GPIO.BOARD.15", # TTL output pin enables right wheel forward
"RIGHT_BWD_PIN": "RPI_GPIO.BOARD.13", # TTL output pin enables right wheel reverse
"RIGHT_EN_DUTY_PIN": "RPI_GPIO.BOARD.11", # PWM pin generates duty cycle for right wheel speed
}
#ODOMETRY
HAVE_ODOM = False # Do you have an odometer/encoder
ENCODER_TYPE = 'GPIO' # What kind of encoder? GPIO|Arduino|Astar
MM_PER_TICK = 12.7625 # How much travel with a single tick, in mm. Roll you car a meter and divide total ticks measured by 1,000
ODOM_PIN = 13 # if using GPIO, which GPIO board mode pin to use as input
ODOM_DEBUG = False # Write out values on vel and distance as it runs
# #LIDAR
USE_LIDAR = False
LIDAR_TYPE = 'RP' #(RP|YD)
LIDAR_LOWER_LIMIT = 90 # angles that will be recorded. Use this to block out obstructed areas on your car, or looking backwards. Note that for the RP A1M8 Lidar, "0" is in the direction of the motor
LIDAR_UPPER_LIMIT = 270
# TFMINI
HAVE_TFMINI = False
TFMINI_SERIAL_PORT = "/dev/serial0" # tfmini serial port, can be wired up or use usb/serial adapter
#TRAINING
# The default AI framework to use. Choose from (tensorflow|pytorch)
DEFAULT_AI_FRAMEWORK = 'tensorflow'
# The DEFAULT_MODEL_TYPE will choose which model will be created at training
# time. This chooses between different neural network designs. You can
# override this setting by passing the command line parameter --type to the
# python manage.py train and drive commands.
# tensorflow models: (linear|categorical|tflite_linear|tensorrt_linear)
# pytorch models: (resnet18)
DEFAULT_MODEL_TYPE = 'linear'
BATCH_SIZE = 128 #how many records to use when doing one pass of gradient decent. Use a smaller number if your gpu is running out of memory.
TRAIN_TEST_SPLIT = 0.8 #what percent of records to use for training. the remaining used for validation.
MAX_EPOCHS = 100 #how many times to visit all records of your data
SHOW_PLOT = True #would you like to see a pop up display of final loss?
VERBOSE_TRAIN = True #would you like to see a progress bar with text during training?
USE_EARLY_STOP = True #would you like to stop the training if we see it's not improving fit?
EARLY_STOP_PATIENCE = 5 #how many epochs to wait before no improvement
MIN_DELTA = .0005 #early stop will want this much loss change before calling it improved.
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
SEND_BEST_MODEL_TO_PI = False #change to true to automatically send best model during training
CREATE_TF_LITE = True # automatically create tflite model in training
CREATE_TENSOR_RT = False # automatically create tensorrt model in training
SAVE_MODEL_AS_H5 = False # if old keras format should be used instead of savedmodel
CACHE_IMAGES = True # if images are cached in training for speed up
PRUNE_CNN = False #This will remove weights from your model. The primary goal is to increase performance.
PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#
# Augmentations and Transformations
#
# - Augmentations are changes to the image that are only applied during
# training and are applied randomly to create more variety in the data.
# Available augmentations are:
# - BRIGHTNESS - modify the image brightness. See [albumentations](https://albumentations.ai/docs/api_reference/augmentations/transforms/#albumentations.augmentations.transforms.RandomBrightnessContrast)
# - BLUR - blur the image. See [albumentations](https://albumentations.ai/docs/api_reference/augmentations/blur/transforms/#albumentations.augmentations.blur.transforms.Blur)
#
# - Transformations are changes to the image that apply both in
# training and at inference. They are always applied and in
# the configured order. Available image transformations are:
# - Apply a mask to the image:
# - 'CROP' - apply rectangular mask to borders of image
# - 'TRAPEZE' - apply a trapezoidal mask to image
# - Apply an enhancement to the image
# - 'CANNY' - apply canny edge detection
# - 'BLUR' - blur the image
# - resize the image
# - 'RESIZE' - resize to given pixel width and height
# - 'SCALE' - resize by given scale factor
# - change the color space of the image
# - 'RGB2BGR' - change color model from RGB to BGR
# - 'BGR2RGB' - change color model from BGR to RGB
# - 'RGB2HSV' - change color model from RGB to HSV
# - 'HSV2RGB' - change color model from HSV to RGB
# - 'BGR2HSV' - change color model from BGR to HSV
# - 'HSV2BGR' - change color model from HSV to BGR
# - 'RGB2GRAY' - change color model from RGB to greyscale
# - 'BGR2GRAY' - change color model from BGR to greyscale
# - 'HSV2GRAY' - change color model from HSV to greyscale
# - 'GRAY2RGB' - change color model from greyscale to RGB
# - 'GRAY2BGR' - change color model from greyscale to BGR
#
# You can create custom tranformations and insert them into the pipeline.
# - Use a tranformer label that beings with `CUSTOM`, like `CUSTOM_CROP`
# and add that to the TRANSFORMATIONS or POST_TRANFORMATIONS list.
# So for the custom crop example, that might look like this;
# `POST_TRANSFORMATIONS = ['CUSTOM_CROP']`
# - Set configuration properties for the module and class that
# implement your custom transformation.
# - The module config will begin with the transformer label
# and end with `_MODULE`, like `CUSTOM_CROP_MODULE`. It's value is
# the absolute file path to the python file that has the transformer
# class. For instance, if you called the file
# `my_custom_transformer.py` and put in in the root of
# your `mycar` folder, next to `myconfig.py`, then you would add
# the following to your myconfig.py file (keeping with the crop example);
# `CUSTOM_CROP_MODULE = "/home/pi/mycar/my_custom_transformer.py"`
# The actual path will depend on what OS you are using and what
# your user name is.
# - The class config will begin with the transformer label and end with `_CLASS`,
# like `CUSTOM_CROP_CLASS`. So if your class is called `CustomCropTransformer`
# the you would add the following property to your `myconfig.py` file:
# `CUSTOM_CROP_CLASS = "CustomCropTransformer"`
# - Your custom class' constructor will take in the Config object to
# it it's constructor. So you can add whatever configuration properties
# you need to your myconfig.py, then read them in the constructor.
# You can name the properties anything you want, but it is good practice
# to prefix them with the custom tranformer label so they don't conflict
# with any other config and so it is way to see what they go with.
# For instance, in the custom crop example, we would want the border
# values, so that could look like;
# ```
# CUSTOM_CROP_TOP = 45 # rows to ignore on the top of the image
# CUSTOM_CROP_BOTTOM = 5 # rows ignore on the bottom of the image
# CUSTOM_CROP_RIGHT = 10 # pixels to ignore on the right of the image
# CUSTOM_CROP_LEFT = 10 # pixels to ignore on the left of the image
# ```
# - Your custom class must have a `run` method that takes an image and
# returns an image. It is in this method where you will implement your
# transformation logic.
# - For example, a custom crop that did a blur after the crop might look like;
# ```
# from donkeycar.parts.cv import ImgCropMask, ImgSimpleBlur
#
# class CustomCropTransformer:
# def __init__(self, config) -> None:
# self.top = config.CUSTOM_CROP_TOP
# self.bottom = config.CUSTOM_CROP_BOTTOM
# self.left = config.CUSTOM_CROP_LEFT
# self.right = config.CUSTOM_CROP_RIGHT
# self.crop = ImgCropMask(self.left, self.top, self.right, self.bottom)
# self.blur = ImgSimpleBlur()
#
# def run(self, image):
# image = self.crop.run(image)
# return self.blur.run(image)
# ```
#
AUGMENTATIONS = [] # changes to image only applied in training to create
# more variety in the data.
TRANSFORMATIONS = [] # changes applied _before_ training augmentations,
# such that augmentations are applied to the transformed image,
POST_TRANSFORMATIONS = [] # transformations applied _after_ training augmentations,
# such that changes are applied to the augmented image
# Settings for brightness and blur, use 'MULTIPLY' and/or 'BLUR' in
# AUGMENTATIONS
AUG_BRIGHTNESS_RANGE = 0.2 # this is interpreted as [-0.2, 0.2]
AUG_BLUR_RANGE = (0, 3)
# "CROP" Transformation
# Apply mask to borders of the image
# defined by a rectangle.
# If these crops values are too large, they will cause the stride values to
# become negative and the model with not be valid.
# # # # # # # # # # # # #
# xxxxxxxxxxxxxxxxxxxxx #
# xxxxxxxxxxxxxxxxxxxxx #
# xx xx # top
# xx xx #
# xx xx #
# xxxxxxxxxxxxxxxxxxxxx # bottom
# xxxxxxxxxxxxxxxxxxxxx #
# # # # # # # # # # # # #
ROI_CROP_TOP = 45 # the number of rows of pixels to ignore on the top of the image
ROI_CROP_BOTTOM = 0 # the number of rows of pixels to ignore on the bottom of the image
ROI_CROP_RIGHT = 0 # the number of rows of pixels to ignore on the right of the image
ROI_CROP_LEFT = 0 # the number of rows of pixels to ignore on the left of the image
# "TRAPEZE" tranformation
# Apply mask to borders of image
# defined by a trapezoid.
# # # # # # # # # # # # # #
# xxxxxxxxxxxxxxxxxxxxxxx #
# xxxx ul ur xxxxxxxx # min_y
# xxx xxxxxxx #
# xx xxxxxx #
# x xxxxx #
# ll lr xx # max_y
# # # # # # # # # # # # # #
ROI_TRAPEZE_LL = 0
ROI_TRAPEZE_LR = 160
ROI_TRAPEZE_UL = 20
ROI_TRAPEZE_UR = 140
ROI_TRAPEZE_MIN_Y = 60
ROI_TRAPEZE_MAX_Y = 120
# "CANNY" Canny Edge Detection tranformation
CANNY_LOW_THRESHOLD = 60 # Canny edge detection low threshold value of intensity gradient
CANNY_HIGH_THRESHOLD = 110 # Canny edge detection high threshold value of intensity gradient
CANNY_APERTURE = 3 # Canny edge detect aperture in pixels, must be odd; choices=[3, 5, 7]
# "BLUR" transformation (not this is SEPARATE from the blur augmentation)
BLUR_KERNEL = 5 # blur kernel horizontal size in pixels
BLUR_KERNEL_Y = None # blur kernel vertical size in pixels or None for square kernel
BLUR_GAUSSIAN = True # blur is gaussian if True, simple if False
# "RESIZE" transformation
RESIZE_WIDTH = 160 # horizontal size in pixels
RESIZE_HEIGHT = 120 # vertical size in pixels
# "SCALE" transformation
SCALE_WIDTH = 1.0 # horizontal scale factor
SCALE_HEIGHT = None # vertical scale factor or None to maintain aspect ratio
#Model transfer options
#When copying weights during a model transfer operation, should we freeze a certain number of layers
#to the incoming weights and not allow them to change during training?
FREEZE_LAYERS = False #default False will allow all layers to be modified by training
NUM_LAST_LAYERS_TO_TRAIN = 7 #when freezing layers, how many layers from the last should be allowed to train?
#WEB CONTROL
WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) # which port to listen on when making a web controller
WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode.
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
CONTROLLER_TYPE = 'xbox' #(ps3|ps4|xbox|pigpio_rc|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command
USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
NETWORK_JS_SERVER_IP = None #when listening for network joystick control, which ip is serving this information
JOYSTICK_DEADZONE = 0.01 # when non zero, this is the smallest throttle before recording triggered.
JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
USE_FPV = False # send camera data to FPV webserver
JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick.
#For the categorical model, this limits the upper bound of the learned throttle
#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
#and ideally wouldn't change once set.
MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.8
#RNN or 3D
SEQUENCE_LENGTH = 3 #some models use a number of images over time. This controls how many.
#IMU
HAVE_IMU = False #when true, this add a Mpu6050 part and records the data. Can be used with a
IMU_SENSOR = 'mpu6050' # (mpu6050|mpu9250)
IMU_ADDRESS = 0x68 # if AD0 pin is pulled high them address is 0x69, otherwise it is 0x68
IMU_DLP_CONFIG = 0 # Digital Lowpass Filter setting (0:250Hz, 1:184Hz, 2:92Hz, 3:41Hz, 4:20Hz, 5:10Hz, 6:5Hz)
#SOMBRERO
HAVE_SOMBRERO = False #set to true when using the sombrero hat from the Donkeycar store. This will enable pwm on the hat.
#PIGPIO RC control
STEERING_RC_GPIO = 26
THROTTLE_RC_GPIO = 20
DATA_WIPER_RC_GPIO = 19
PIGPIO_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line
PIGPIO_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster
PIGPIO_STOPPED_PWM = 1500
PIGPIO_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster
PIGPIO_SHOW_STEERING_VALUE = False
PIGPIO_INVERT = False
PIGPIO_JITTER = 0.025 # threshold below which no signal is reported
#ROBOHAT MM1
MM1_STEERING_MID = 1500 # Adjust this value if your car cannot run in a straight line
MM1_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the faster
MM1_STOPPED_PWM = 1500
MM1_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster
MM1_SHOW_STEERING_VALUE = False
# Serial port
# -- Default Pi: '/dev/ttyS0'
# -- Jetson Nano: '/dev/ttyTHS1'
# -- Google coral: '/dev/ttymxc0'
# -- Windows: 'COM3', Arduino: '/dev/ttyACM0'
# -- MacOS/Linux:please use 'ls /dev/tty.*' to find the correct serial port for mm1
# eg.'/dev/tty.usbmodemXXXXXX' and replace the port accordingly
MM1_SERIAL_PORT = '/dev/ttyS0' # Serial Port for reading and sending MM1 data.
#LOGGING
HAVE_CONSOLE_LOGGING = True
LOGGING_LEVEL = 'INFO' # (Python logging level) 'NOTSET' / 'DEBUG' / 'INFO' / 'WARNING' / 'ERROR' / 'FATAL' / 'CRITICAL'
LOGGING_FORMAT = '%(message)s' # (Python logging format - https://docs.python.org/3/library/logging.html#formatter-objects
#TELEMETRY
HAVE_MQTT_TELEMETRY = False
TELEMETRY_DONKEY_NAME = 'my_robot1234'
TELEMETRY_MQTT_TOPIC_TEMPLATE = 'donkey/%s/telemetry'
TELEMETRY_MQTT_JSON_ENABLE = False
TELEMETRY_MQTT_BROKER_HOST = 'broker.hivemq.com'
TELEMETRY_MQTT_BROKER_PORT = 1883
TELEMETRY_PUBLISH_PERIOD = 1
TELEMETRY_LOGGING_ENABLE = True
TELEMETRY_LOGGING_LEVEL = 'INFO' # (Python logging level) 'NOTSET' / 'DEBUG' / 'INFO' / 'WARNING' / 'ERROR' / 'FATAL' / 'CRITICAL'
TELEMETRY_LOGGING_FORMAT = '%(message)s' # (Python logging format - https://docs.python.org/3/library/logging.html#formatter-objects
TELEMETRY_DEFAULT_INPUTS = 'pilot/angle,pilot/throttle,recording'
TELEMETRY_DEFAULT_TYPES = 'float,float'
# PERF MONITOR
HAVE_PERFMON = False
#RECORD OPTIONS
RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train.
AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly
#LED
HAVE_RGB_LED = False #do you have an RGB LED like https://www.amazon.com/dp/B07BNRZWNF
LED_INVERT = False #COMMON ANODE? Some RGB LED use common anode. like https://www.amazon.com/Xia-Fly-Tri-Color-Emitting-Diffused/dp/B07MYJQP8B
#LED board pin number for pwm outputs
#These are physical pinouts. See: https://www.raspberrypi-spy.co.uk/2012/06/simple-guide-to-the-rpi-gpio-header-and-pins/
LED_PIN_R = 12
LED_PIN_G = 10
LED_PIN_B = 16
#LED status color, 0-100
LED_R = 0
LED_G = 0
LED_B = 1
#LED Color for record count indicator
REC_COUNT_ALERT = 1000 #how many records before blinking alert
REC_COUNT_ALERT_CYC = 15 #how many cycles of 1/20 of a second to blink per REC_COUNT_ALERT records
REC_COUNT_ALERT_BLINK_RATE = 0.4 #how fast to blink the led in seconds on/off
#first number is record count, second tuple is color ( r, g, b) (0-100)
#when record count exceeds that number, the color will be used
RECORD_ALERT_COLOR_ARR = [ (0, (1, 1, 1)),
(3000, (5, 5, 5)),
(5000, (5, 2, 0)),
(10000, (0, 5, 0)),
(15000, (0, 5, 5)),
(20000, (0, 0, 5)), ]
#LED status color, 0-100, for model reloaded alert
MODEL_RELOADED_LED_R = 100
MODEL_RELOADED_LED_G = 0
MODEL_RELOADED_LED_B = 0
#BEHAVIORS
#When training the Behavioral Neural Network model, make a list of the behaviors,
#Set the TRAIN_BEHAVIORS = True, and use the BEHAVIOR_LED_COLORS to give each behavior a color
TRAIN_BEHAVIORS = False
BEHAVIOR_LIST = ['Left_Lane', "Right_Lane"]
BEHAVIOR_LED_COLORS = [(0, 10, 0), (10, 0, 0)] #RGB tuples 0-100 per chanel
#Localizer
#The localizer is a neural network that can learn to predict its location on the track.
#This is an experimental feature that needs more developement. But it can currently be used
#to predict the segement of the course, where the course is divided into NUM_LOCATIONS segments.
TRAIN_LOCALIZER = False
NUM_LOCATIONS = 10
BUTTON_PRESS_NEW_TUB = False #when enabled, makes it easier to divide our data into one tub per track length if we make a new tub on each X button press.
#DonkeyGym
#Only on Ubuntu linux, you can use the simulator as a virtual donkey and
#issue the same python manage.py drive command as usual, but have them control a virtual car.
#This enables that, and sets the path to the simualator and the environment.
#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
#then extract that and modify DONKEY_SIM_PATH.
DONKEY_GYM = False
DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first.
DONKEY_GYM_ENV_NAME = "donkey-generated-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} # body style(donkey|bare|car01) body rgb 0-255
GYM_CONF["racer_name"] = "Your Name"
GYM_CONF["country"] = "Place"
GYM_CONF["bio"] = "I race robots."
SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com"
SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable.
# Save info from Simulator (pln)
SIM_RECORD_LOCATION = False
SIM_RECORD_GYROACCEL= False
SIM_RECORD_VELOCITY = False
SIM_RECORD_LIDAR = False
#publish camera over network
#This is used to create a tcp service to publish the camera feed
PUB_CAMERA_IMAGES = False
#When racing, to give the ai a boost, configure these values.
AI_LAUNCH_DURATION = 0.0 # the ai will output throttle for this many seconds
AI_LAUNCH_THROTTLE = 0.0 # the ai will output this throttle value
AI_LAUNCH_ENABLE_BUTTON = 'R2' # this keypress will enable this boost. It must be enabled before each use to prevent accidental trigger.
AI_LAUNCH_KEEP_ENABLED = False # when False ( default) you will need to hit the AI_LAUNCH_ENABLE_BUTTON for each use. This is safest. When this True, is active on each trip into "local" ai mode.
#Scale the output of the throttle of the ai pilot for all model types.
AI_THROTTLE_MULT = 1.0 # this multiplier will scale every throttle value for all output from NN models
#Path following
PATH_FILENAME = "donkey_path.pkl" # the path will be saved to this filename
PATH_SCALE = 5.0 # the path display will be scaled by this factor in the web page
PATH_OFFSET = (0, 0) # 255, 255 is the center of the map. This offset controls where the origin is displayed.
PATH_MIN_DIST = 0.3 # after travelling this distance (m), save a path point
PID_P = -10.0 # proportional mult for PID path follower
PID_I = 0.000 # integral mult for PID path follower
PID_D = -0.2 # differential mult for PID path follower
PID_THROTTLE = 0.2 # constant throttle value during path following
USE_CONSTANT_THROTTLE = False # whether or not to use the constant throttle or variable throttle captured during path recording
SAVE_PATH_BTN = "cross" # joystick button to save path
RESET_ORIGIN_BTN = "triangle" # joystick button to press to move car back to origin
# Intel Realsense D435 and D435i depth sensing camera
REALSENSE_D435_RGB = True # True to capture RGB image
REALSENSE_D435_DEPTH = True # True to capture depth as image array
REALSENSE_D435_IMU = False # True to capture IMU data (D435i only)
REALSENSE_D435_ID = None # serial number of camera or None if you only have one camera (it will autodetect)
# Stop Sign Detector
STOP_SIGN_DETECTOR = False
STOP_SIGN_MIN_SCORE = 0.2
STOP_SIGN_SHOW_BOUNDING_BOX = True
STOP_SIGN_MAX_REVERSE_COUNT = 10 # How many times should the car reverse when detected a stop sign, set to 0 to disable reversing
STOP_SIGN_REVERSE_THROTTLE = -0.5 # Throttle during reversing when detected a stop sign
# FPS counter
SHOW_FPS = False
FPS_DEBUG_INTERVAL = 10 # the interval in seconds for printing the frequency info into the shell
|
import numpy as np
import os
import os.path
import re
# Format of photon results
#pt yield yield*vn_cos[1] yield*vn_sin[1] yield*vn_cos[2] yield*vn_sin[2] yield*vn_cos[3] yield*vn_sin[3] yield*vn_cos[4] yield*vn_sin[4] yield*vn_cos[5] yield*vn_sin[5] yield*vn_cos[6] yield*vn_sin[6]
base_directory=os.path.dirname(os.path.realpath(__file__))
##########################################################
############## Where are the calculations?? ##############
##########################################################
def get_rate_filelist(rate_type, system, cent_class, event):
return rate_dict[rate_type](system,str(cent_class),str(event))
def get_hadron_Qns_path(system,cent_class,event):
file_dir="../../raw_hydro_calcs/"
return os.path.join(base_directory,file_dir, system, cent_class, event, "Qn_vectors_pions_smash.dat")
# Dictionary to know which rates to average together
rate_dict={
#'prompt':lambda system, cent_class, event : [os.path.join("/home/jp401/Dropbox/work/my_papers/mcgill_qm2019/prompt_photons",system,cent_class,event, "prompt.dat")]
}
# Don't fill the dictonary by hand for thermal photons --- it's too much work
# Just loop over all the possible thermal photon rates
#for_rate_crosscheck/ photons_T100-150/ photons_T110-150_nx200/ photons_T130-150/ photons_T140-150_nx200/ photons_above_Tfr_nx200/
#for_rate_crosscheck_total/ photons_T100-150_nx200/ photons_T120-150/ photons_T130-150_nx200/ photons_above_Tfr/
#get_res.sh photons_T110-150/ photons_T120-150_nx200/ photons_T140-150/ photons_above_Tfr2/
thermal_rate_types_below_T150=[
"photons_T100-150_nx200",
"photons_T120-150_nx200",
"photons_T140-150_nx200",
]
thermal_rate_above_T150="photons_above_Tfr_nx200"
rate_dict["photons_above_Tfr_nx200"]=lambda system, cent_class, event : [ os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_above_T150, "vn_rate_thermal_ideal.dat") ]
# Files to average for the thermal rates
for thermal_rate_type in thermal_rate_types_below_T150:
#vn_rate_hg_ideal_Turbide_fit_noPiPi_tabulated.dat
#vn_rate_hg_pion_brem_ideal_Rapp_fit_tabulated.dat
#vn_rate_thermal_ideal.dat
file_dir="../../raw_hydro_calcs/"
#
rate_dict["22_with_photons_above_Tfr_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type : [ os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_type,"vn_rate_hg_ideal_Turbide_fit_noPiPi_tabulated.dat"), os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_above_T150,"vn_rate_thermal_ideal.dat") ]
#
rate_dict["brem_with_photons_above_Tfr_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type : [ os.path.join(base_directory,file_dir,system,cent_class,event, thermal_rate_type,"vn_rate_hg_pion_brem_ideal_Rapp_fit_tabulated.dat"), os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_above_T150,"vn_rate_thermal_ideal.dat") ]
#
rate_dict["tot_with_photons_above_Tfr_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type : [ os.path.join(base_directory,file_dir,system,cent_class,event, thermal_rate_type,"vn_rate_thermal_ideal.dat"), os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_above_T150,"vn_rate_thermal_ideal.dat") ]
#
rate_dict["tot2_with_photons_above_Tfr_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type : get_rate_filelist("22_"+thermal_rate_type, system, cent_class, event) + get_rate_filelist("brem_"+thermal_rate_type, system, cent_class, event) + [ os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_above_T150,"vn_rate_thermal_ideal.dat") ]
for thermal_rate_type in thermal_rate_types_below_T150:
#vn_rate_hg_ideal_Turbide_fit_noPiPi_tabulated.dat
#vn_rate_hg_pion_brem_ideal_Rapp_fit_tabulated.dat
#vn_rate_thermal_ideal.dat
file_dir="../../raw_hydro_calcs/"
#
rate_dict["22_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type: [ os.path.join(base_directory,file_dir, system, cent_class, event, thermal_rate_type,"vn_rate_hg_ideal_Turbide_fit_noPiPi_tabulated.dat") ]
#
rate_dict["brem_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type: [ os.path.join(base_directory,file_dir,system,cent_class,event, thermal_rate_type,"vn_rate_hg_pion_brem_ideal_Rapp_fit_tabulated.dat") ]
#
rate_dict["tot_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type: [ os.path.join(base_directory,file_dir,system,cent_class,event, thermal_rate_type,"vn_rate_thermal_ideal.dat") ]
#
rate_dict["tot2_"+thermal_rate_type]=lambda system, cent_class, event, thermal_rate_type=thermal_rate_type: get_rate_filelist("22_"+thermal_rate_type, system, cent_class, event) + get_rate_filelist("brem_"+thermal_rate_type, system, cent_class, event)
####################################################
############## Systems & centralities ##############
####################################################
system_list=["PbPb2760"]
cent_class_list_calc=["C10-20"]
additional_cent_class_list=[]
cent_class_list=cent_class_list_calc+additional_cent_class_list
cent_class_combination_info={
'C0-20':['C0-5','C5-10','C10-20'],
'C20-40':['C20-30','C30-40'],
'C40-60':['C40-50','C50-60']
}
####################################################
############## Systems & centralities ##############
####################################################
def make_destination_dir(tmp_dir):
if (os.path.isdir(tmp_dir)):
print("Destination directory ",tmp_dir, " already exists... Aborting.")
#exit(1)
else:
os.makedirs(tmp_dir)
# Assume format e.g. "C0-5"
def centrality_class_weight(cent_string):
subdir_regex = re.compile("C([0-9]{1,2})-([0-9]{1,2})")
match=subdir_regex.match(cent_string)
if (match != None):
low_bound=int(float(match.group(1)))
high_bound=int(float(match.group(2)))
else:
print("Can't parse centrality string",cent_string)
exit(1)
return high_bound-low_bound
# Loop over photon channel combinations specified in 'extract_dict'
#for result_name, file_function_list in extract_dict.items():
for result_name in rate_dict.keys():
for system in system_list:
pre_event_list_dict={}
###############################################################################################
############### First make a list of events available for each centrality class ###############
###############################################################################################
for cent_class in cent_class_list_calc:
# Find all events
#tmp_ref_dir=os.path.dirname(os.path.dirname(os.path.dirname(get_rate_filelist(result_name,system,cent_class,"1")[0])))
tmp_ref_dir=os.path.join("../../raw_hydro_calcs/",system,cent_class)
all_file_in_local_dir=os.listdir(path=tmp_ref_dir)
subdir_regex = re.compile('([0-9]+)')
event_list=[]
for tmp_file in all_file_in_local_dir:
if (not os.path.isdir(os.path.join(tmp_ref_dir,tmp_file))):
continue
match=subdir_regex.match(tmp_file)
if (match != None):
event=int(float(match.group(1)))
filelist=get_rate_filelist(result_name,system,cent_class,event)
#filelist=[]
#for fct_name in file_function_list:
# filelist+=fct_name(system,cent_class,str(event))
# filelist=get_kompost_photons_path(system,cent_class,str(event))
# filelist+=get_thermal_photons_path(system,cent_class,str(event))
#print(filelist)
file_exists=[os.path.isfile(filepath) for filepath in filelist]
hadron_Qns_path=get_hadron_Qns_path(system,cent_class,str(event))
if (all(file_exists)):
if (os.path.isfile(hadron_Qns_path)):
event_list.append((cent_class,event))
else:
print("No hadron Q_s's in ",hadron_Qns_path, "... Will skip the event, but be careful about this...")
else:
print("No photon in directory ",os.path.join(tmp_ref_dir,tmp_file))
print(filelist)
print(file_exists)
#exit(1)
pre_event_list_dict[cent_class]=event_list
##########################################################################################################
############### Second, determine which event should be averaged for each centrality class ###############
##########################################################################################################
event_list_dict={}
for cent_class in cent_class_list:
#
if (cent_class in cent_class_list_calc):
event_list_dict[cent_class]=pre_event_list_dict[cent_class]
else:
# Figure out which centralities to combine
cent_class_combine=cent_class_combination_info[cent_class]
#sub_cent_class_dict={}
#num_events_per_cent_class=[]
min_event_per_cent_percent=np.inf
for sub_cent_class in cent_class_combine:
#sub_cent_class_dict[sub_cent_class]={}
# Figure out the weight
tmp_weight=centrality_class_weight(sub_cent_class)
#sub_cent_class_dict[sub_cent_class]['weight']=tmp_num
tmp_num_events=len(pre_event_list_dict[sub_cent_class])
tmp_min_event=tmp_num_events*1.0/tmp_weight
# min_event_per_cent_percent=np.min(min_event_per_cent_percent,tmp_min_event)
if (tmp_min_event < min_event_per_cent_percent):
min_event_per_cent_percent=tmp_min_event
tmp_event_list=[]
for sub_cent_class in cent_class_combine:
tmp_weight=centrality_class_weight(sub_cent_class)
num_event_to_take=int(min_event_per_cent_percent*tmp_weight)
#print("pre-batard",sub_cent_class,pre_event_list_dict[sub_cent_class],pre_event_list_dict[sub_cent_class][:num_event_to_take])
tmp_event_list=tmp_event_list+pre_event_list_dict[sub_cent_class][:num_event_to_take]
event_list_dict[cent_class]=tmp_event_list
#print("batard",cent_class,tmp_event_list)
#exit(1)
## print(cent_class)
# print(event_list_dict.keys())
# #print(event_list_dict)
# print("miaw")
# print(event_list_dict['C0-20'])
for cent_class_label in cent_class_list:
event_list=event_list_dict[cent_class_label]
#print(event_list)
num_events=len(event_list)
print("Averaging ",str(num_events)," events for "+system+" "+cent_class_label)
#print("event_list", event_list)
if (num_events < 1):
continue
res_dict={
'pT':None,
'yield':None,
'v1':None,
'v2':None,
'v3':None,
'v4':None,
'v5':None,
'v6':None,
}
#################################################
############## Average over events ##############
#################################################
# Read one of the photon files to figure out the size of all the arrays once and for all
tmp_cent, tmp_ev = event_list[0]
filename=get_rate_filelist(result_name,system,tmp_cent,str(tmp_ev))[0]
tmp_res=np.loadtxt(filename)
NpT, Ncol= tmp_res.shape
N_harmonics=int((Ncol-2)/2)
pT_list=tmp_res[:,0]
photon_event_vn=np.zeros((N_harmonics,NpT))
photon_event_Psin=np.zeros((N_harmonics,NpT))
photon_yield=np.zeros((1,NpT))
photon_vn_rms=np.zeros((N_harmonics,NpT))
photon_vn_sp=np.zeros((N_harmonics,NpT))
photon_vn_sp_num=np.zeros((N_harmonics,NpT))
photon_vn_sp_denum=np.zeros((N_harmonics,NpT))
photon_cos_psis=np.zeros((N_harmonics,NpT))
#for result_name, file_function_list in extract_dict.items():
#
# for system in system_list:
#
# for cent_class in cent_class_list_calc:
#
#################################################################################
############## Average calculations over all existing hydro events ##############
#################################################################################
# For each event
for cent_class, event in event_list:
# The photon calculations are arranged such that one can simply
# sum over the files containing each channel's results to
# combine the different photon production channels together
file_list=get_rate_filelist(result_name,system,cent_class,str(event))
#file_list=[]
#for fct_name in file_function_list:
# file_list+=fct_name(system,cent_class,str(event))
# Sum over all channels
filename=file_list[0]
result=np.loadtxt(filename)
## pT = result[:,0]
## print(pT)
for photon_file in file_list[1:]:
filename=photon_file
#tpT,ty,tyv1c,tyv1s,tyv2c,tyv2s,tyv3c,tyv3s,tyv4c,tyv4s,tyv5c,tyv5s,tyv6c,tyv6s = np.transpose(np.loadtxt(filename))
result+=np.loadtxt(filename)
tpT, y, yv1c, yv1s, yv2c, yv2s, yv3c, yv3s, yv4c, yv4s, yv5c, yv5s, yv6c, yv6s = np.transpose(result)
yvn_array=[
[1, yv1c, yv1s],
[2, yv2c, yv2s],
[3, yv3c, yv3s],
[4, yv4c, yv4s],
[5, yv5c, yv5s],
[6, yv6c, yv6s]
]
# This computes the RMS v_n
for n, yc, ys in yvn_array:
photon_event_vn[n-1,:]=np.sqrt((yc*yc+ys*ys)/(y*y))
photon_event_Psin[n-1,:]=np.arctan2(ys,yc)/n
# print(np.cos(2*photon_event_Psin))
####################################################
############## Get the hadronic Q_n's ##############
####################################################
# Current hadron Qn format:
# n Qn_real Qn_imag
n, Qn_real, Qn_im=np.loadtxt(get_hadron_Qns_path(system,cent_class,str(event))).T
#Qn_real=np.float(Qn_real)
#Qn_im=np.float(Qn_im)
Psin_hadrons=np.arctan2(Qn_im, Qn_real)/n
vn_hadrons=np.zeros_like(Psin_hadrons)+1.0
#vn_hadrons=Qns_raw[1:,5]
#Psin_hadrons=Qns_raw[1:,6]
# print(np.cos(2*Psin_hadrons))
# print(vn_hadrons[1],photon_event_vn[1,0])
# print(Psin_hadrons[1],photon_event_Psin[1,0])
# exit(1)
################################################################
############## Compute photon average over events ##############
################################################################
# Yield is always just a sum over events
photon_yield+=y
# Compute v_n with different definitions
for n, yc, ys in yvn_array:
# This computes the RMS v_n
photon_vn_rms[n-1,:]+=np.power(photon_event_vn[n-1,:],2)
# This computes the scalar product v_n
photon_vn_sp_num[n-1,:]+=photon_event_vn[n-1,:]*vn_hadrons[n-1]*np.cos(n*(photon_event_Psin[n-1,:]-Psin_hadrons[n-1]))
photon_vn_sp_denum[n-1,:]+=np.power(vn_hadrons[n-1],2)
#photon_vn_sp_num[n-1,:]+=np.cos(n*(photon_event_Psin[n-1,:]-Psin_hadrons[n-1]))
#photon_vn_sp_denum[n-1,:]+=1
#photon_vn_sp_num[n-1,:]+=photon_event_vn[n-1,:]*photon_event_vn[n-1,:]*np.cos(n*(photon_event_Psin[n-1,:]-photon_event_Psin[n-1,:]))
#photon_vn_sp_denum[n-1,:]+=np.power(photon_event_vn[n-1,:],2)
photon_cos_psis[n-1,:]+=np.cos(n*(photon_event_Psin[n-1,:]-Psin_hadrons[n-1]))
# Post processing
num_events=len(event_list)
photon_yield/=num_events
# For RMS v_n
photon_vn_rms=np.sqrt(photon_vn_rms/num_events)
# For scalar product v_n
photon_vn_sp=(photon_vn_sp_num/num_events)/np.sqrt(photon_vn_sp_denum/num_events)
# For <cos(n(Psi_n^h-Psi_n^g(p_T)))>
photon_cos_psis=photon_cos_psis/num_events
#print(pT_list)
#print(photon_yield)
#print(photon_vn)
destination_dir=os.path.join(".","results",result_name,system,cent_class_label)
make_destination_dir(destination_dir)
np.savetxt(os.path.join(destination_dir,"average_rms.dat"),np.transpose(np.concatenate(([pT_list],photon_yield,photon_vn_rms))))
np.savetxt(os.path.join(destination_dir,"average_sp.dat"),np.transpose(np.concatenate(([pT_list],photon_yield,photon_vn_sp))))
np.savetxt(os.path.join(destination_dir,"average_cosnspsis.dat"),np.transpose(np.concatenate(([pT_list],photon_yield,photon_cos_psis))))
|
import os
import pytest
from polyglotdb import CorpusContext
# def test_run_script(acoustic_utt_config, praat_path, praatscript_test_dir):
# with CorpusContext(acoustic_utt_config) as g:
# g.config.praat_path = praat_path
# script_path = os.path.join(praatscript_test_dir, 'COG.praat')
# sibilantfile_path = os.path.join(textgrid_test_dir, 'acoustic_corpus_sib1.wav')
# output = run_script(g.config.praat_path, script_path, sibilantfile_path, 0.0, 0.137, '1', '2')
# output = output.strip()
# assert (float(output) == 4654.12)
# assert (output.replace('.','').isnumeric())
#
# def test_analyze_script_file(acoustic_utt_config, praat_path, praatscript_test_dir):
# with CorpusContext(acoustic_utt_config) as g:
# g.config.praat_path = praat_path
# script_path = os.path.join(praatscript_test_dir, 'COG.praat')
# sibilantfile_path = os.path.join(textgrid_test_dir, 'acoustic_corpus_sib1.wav')
# output = g.analyze_script_file(script_path, sibilantfile_path, 0.0, 0.137, None, '1', '2')
# assert(output == 4654.12)
@pytest.mark.acoustic
def test_analyze_script(acoustic_utt_config, praat_path, praatscript_test_dir):
with CorpusContext(acoustic_utt_config) as g:
g.config.praat_path = praat_path
g.encode_class(['s', 'z', 'sh', 'zh'], 'sibilant')
script_path = os.path.join(praatscript_test_dir, 'sibilant_jane.praat')
props = g.analyze_script(subset='sibilant', annotation_type="phone", script_path=script_path, stop_check=None, call_back=None,multiprocessing=False)
assert props == sorted(['cog', 'peak', 'slope', 'spread'])
q = g.query_graph(g.phone).filter(g.phone.subset == 'sibilant')
q = q.columns(g.phone.begin, g.phone.end, g.phone.peak)
results = q.all()
assert (len(results) > 0)
for r in results:
assert (r.values)
q2 = g.query_graph(g.phone).filter(g.phone.subset == 'sibilant')
q2 = q2.columns(g.phone.begin, g.phone.end, g.phone.spread)
results = q2.all()
assert (len(results) > 0)
for r in results:
assert (r.values)
@pytest.mark.acoustic
def test_analyze_track_script(acoustic_utt_config, praat_path, praatscript_test_dir):
with CorpusContext(acoustic_utt_config) as g:
g.reset_acoustics()
g.config.praat_path = praat_path
g.encode_class(['ih', 'iy', 'ah', 'uw', 'er', 'ay', 'aa', 'ae', 'eh', 'ow'], 'vowel')
script_path = os.path.join(praatscript_test_dir, 'formants.praat')
props = [('F1', float), ('F2', float), ('F3', float)]
arguments = [0.01, 0.025, 5, 5500]
g.analyze_track_script('formants_other', props, script_path, phone_class='vowel', file_type='vowel', arguments=arguments)
assert 'formants_other' in g.hierarchy.acoustics
assert (g.discourse_has_acoustics('formants_other', g.discourses[0]))
q = g.query_graph(g.phone).filter(g.phone.label == 'ow')
q = q.columns(g.phone.begin, g.phone.end, g.phone.formants_other.track)
results = q.all()
assert (len(results) > 0)
print(len(results))
for r in results:
# print(r.track)
assert (len(r.track))
g.reset_acoustic_measure('formants_other')
assert not g.discourse_has_acoustics('formants_other', g.discourses[0])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006 José de Paula Eufrásio Junior (jose.junior@gmail.com) AND
# Yves Junqueira (yves.junqueira@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Provides installed software information for win32 computers
"""
import win32com.client
class packages:
installed = []
installed_ver = {}
update_candidates = {}
def __init__(self):
self.installed_ver = self._get_win32_product()
self.installed = self.installed_ver.keys()
def _get_win32_product(self):
strComputer = "."
objWMIService = win32com.client.Dispatch("WbemScripting.SWbemLocator")
objSWbemServices = objWMIService.ConnectServer(strComputer,"root\cimv2")
colItems = objSWbemServices.ExecQuery("Select * from Win32_Product")
installed_ver = {}
for objItem in colItems:
installed_ver[objItem.Caption] = objItem.Version
return installed_ver
if __name__ == '__main__':
s = packages()
print s.installed_ver
print s.installed |
from restaurant_entities.models.menu import Allergen
from django.core.exceptions import ValidationError
from .base import BaseForm
class AllergenForm(BaseForm):
class Meta:
model = Allergen
fields = [
'number',
'internal_name'
]
|
n = int(input())
sumASCII = 0
while n > 0:
a = input()
a = ord(a)
sumASCII += a
n -= 1
print(f"The sum equals: {sumASCII}")
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 09:44:37 2018
@author: Dartoon
Test Background remove.
"""
from photutils.datasets import make_100gaussians_image
data = make_100gaussians_image()
import matplotlib.pyplot as plt
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
norm = ImageNormalize(stretch=SqrtStretch())
plt.imshow(data, norm=norm, origin='lower', cmap='Greys_r')
plt.show()
import numpy as np
from astropy.stats import biweight_location
print(np.median(data))
print(biweight_location(data))
from astropy.stats import mad_std
print(mad_std(data))
from astropy.stats import sigma_clipped_stats
mean, median, std = sigma_clipped_stats(data, sigma=3.0, iters=5)
print((mean, median, std))
from photutils import make_source_mask
mask = make_source_mask(data, snr=2, npixels=5, dilate_size=11)
mean, median, std = sigma_clipped_stats(data, sigma=3.0, mask=mask)
print((mean, median, std))
ny, nx = data.shape
y, x = np.mgrid[:ny, :nx]
gradient = x * y / 5000.
data2 = data + gradient
plt.imshow(data2, norm=norm, origin='lower', cmap='Greys_r')
plt.show()
from astropy.stats import SigmaClip
from photutils import Background2D, SExtractorBackground
sigma_clip = SigmaClip(sigma=3., iters=10)
bkg_estimator = SExtractorBackground()
bkg = Background2D(data2, (50, 50), filter_size=(3, 3),
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
plt.imshow(data2, norm=norm, origin='lower', cmap='Greys_r')
bkg.plot_meshes(outlines=True, color='#1f77b4')
plt.show()
print(bkg.background_median)
print(bkg.background_rms_median)
plt.imshow(bkg.background, origin='lower', cmap='Greys_r')
plt.show()
#from scipy.ndimage import rotate
#data3 = rotate(data2, -45.)
#norm = ImageNormalize(stretch=SqrtStretch())
#plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm)
#plt.show()
#mask = (data3 == 0)
#bkg3 = Background2D(data3, (25, 25), filter_size=(3, 3), mask=mask)
#
#back3 = bkg3.background * ~mask
#norm = ImageNormalize(stretch=SqrtStretch())
#plt.imshow(back3, origin='lower', cmap='Greys_r', norm=norm)
#plt.show()
#plt.imshow(data3, origin='lower', cmap='Greys_r', norm=norm)
#bkg3.plot_meshes(outlines=True, color='#1f77b4')
#plt.show() |
import falcon
from wsgiref import simple_server
class DynamicInfo(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "hola que tal"
app = falcon.API()
app.add_route('/', DynamicInfo())
if __name__ == '__main__':
httpd = simple_server.make_server('127.0.0.1', 8000, app)
httpd.serve_forever()
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.ListCreatePostAPIView.as_view(), name='get_post'),
path('<int:pk>/', views.RetrieveUpdateDestroyPostAPIView.as_view(), name='get_delete_update_post'),
] |
import similarity_utilities as su
import load_utilities as lu
import test_functions as test_f
import time
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
import sklearn as sk
#with 50 query -> 70-100 sec for query ==> 1-1.4 h
N_QUERY = 14
tf = lu.load_sparse("Intermidiate-data-structure/tfidf_doc_matrix.npz")
qtf = lu.load_sparse("Intermidiate-data-structure/tfidf_query_matrix.npz")
terms = lu.load_terms("Intermidiate-data-structure/termID_mapping_list.txt")
QQ = [3,7,14,22,25,26,37,41,46,56,61,68,75,93]
relevant_doc = [] #indicizzati per query
all_doc = set()
for z,i in enumerate(QQ):
relevant_doc.append( lu.load_relevant_for_query(i, "Test/Dataset/rlv-ass.txt"))
all_doc.update(relevant_doc[z])
all_doc = list(all_doc)
#print(len(all_doc))
N_DOC = len(all_doc)
final_score = np.zeros((N_QUERY,N_DOC,2))
final_score_cos_sim = np.zeros((N_QUERY,N_DOC,2))
#print(all_doc)
print("\nTASK 1 : Computing scores on query, doc : "+str(N_QUERY-1)+" "+str(N_DOC)+"\n")
count=0
for z,i in enumerate(QQ):
start = time.time()
query = qtf[i]
print("\nQuery N: "+str(i)+" on "+str(N_QUERY-1))
for j in range(N_DOC):
print(j)
doc = tf[ all_doc[j] ]
#s = su.gvsm_approx_similarity(doc, query, terms, su.custom_similarity)
s=sk.metrics.pairwise.cosine_similarity(doc,query)
final_score[z][j][0] = s
final_score[z][j][1] = np.int32(all_doc[j])
#print(str(s)+" "+str(all_doc[j]))
count+=1
if (count > 20):
print("--- Complete : "+str(100*j/N_DOC)+"%")
#print("Doc N: "+str(j)+" on "+str(N_DOC))
count = 0
print("time : "+str(time.time()-start))
print("--- Complete : " + str(100 ) + "%")
print("\nTASK 2 : Sorting scores\n")
for i in range (0,N_QUERY):
final_score[i] = final_score[i][final_score[i][:,0].argsort()]
precision = [0]*10 #i-th index is the precision at recall (i+1)*0,1
single_query = [0]*10
file = open("gvsm-report-customsimilaritycs2-NQuery "+str(N_QUERY)+".txt","w")
print("\nTASK 3 : Precision-Recall\n")
for i in range(0,N_QUERY):
print("\n\n\n\nRelevant docs for the query : "+str(i))
file.write("\n\n\n\nRelevant docs for the query : "+str(i)+"\n")
den_recall = len(relevant_doc[i])
den_precision = 0.0
num_recall = 0.0
recall_level = 1
tot_relevant = 0.0
for j in reversed(range(N_DOC)):
den_precision+=1
#print("score : "+str(final_score[i][j][0]))
#print("docID : " + str(final_score[i][j][1]))
if (final_score[i][j][1] in relevant_doc[i]):
print(" docID : "+str(final_score[i][j][1])+" score : "+str(final_score[i][j][0]))
file.write(" docID : "+str(final_score[i][j][1])+" score : "+str(final_score[i][j][0])+"\n")
num_recall+=1
#print("recall"+str(num_recall/den_recall))
#print("precision "+str(num_recall/den_precision))
# print("recall level "+str(recall_level*0.1))
if (recall_level*0.1<= num_recall/den_recall and recall_level<11 ):
#print("RECALL"+str(num_recall/den_recall))
#print("PRECISION "+str(num_recall/den_precision))
if (num_recall/den_recall >=1):
print(num_recall/den_recall)
print(recall_level)
for l in range(recall_level,11):
print(precision[l - 1])
precision[l - 1] += num_recall / den_precision
single_query[l - 1] = num_recall / den_precision
break
precision[recall_level-1] += num_recall/den_precision
single_query[recall_level - 1] = num_recall / den_precision
recall_level += 1
print("The single score for the query : " + str(single_query))
file.write("The single score for the query : " + str(single_query) + "\n")
#print(precision)
precision = list(map(lambda x: x/(N_QUERY-1), precision))
print("The final score for all the queries : "+str(precision))
file.write("\n\nThe final score for all the queries : "+str(precision)+"\n")
file.close()
#print(su.gvsm_approx_similarity(tf[813], qtf[4], terms, su.f))
#score : 0.31237573126322477
#docID : 813.0
'''
The (score docs) for the query : 2
score : 0.4007771964051852
docID : 1239.0
recall level 0.1
The (score docs) for the query : 1
score : 0.02021351690760601
docID : 4569.0
'''
#print(su.gvsm_approx_similarity(tf[4569], qtf[1], terms, su.sim))
plt.plot([0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] ,precision, '-o')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.1])
plt.xlim([0.0, 1.1])
plt.show()
|
'''
Created on Mar 10, 2013
@author: io
'''
import psutil
from logging import getLogger
from time import gmtime, strftime
from beecell.uwsgi_sys.wrapper import uwsgi_util
from datetime import datetime
from beecell.simple import truncate
import traceback
class UwsgiManagerError(Exception):
pass
class UwsgiManager(object):
"""
"""
def __init__(self):
self.logger = getLogger(self.__class__.__module__+ \
'.'+self.__class__.__name__)
def _get_proc_infos(self, p, extended=False):
"""Internal function to get process infos
:param p: process instance
:return: dictionary with process infos
"""
try:
io_counters = p.io_counters()
mem = p.memory_full_info()
files = p.open_files()
conns = p.connections(kind='all')
res = {'type':'process',
'pid':p.pid,
'ppid':p.ppid(),
'name':p.name(),
'exe':p.exe(),
'cmdline':p.cmdline(),
'environ':p.environ(),
'create_time':datetime.fromtimestamp(p.create_time()).strftime("%Y-%m-%d %H:%M:%S"),
'status':p.status(),
'state':p.is_running(),
'cwd':p.cwd(),
'user':{'name':p.username(),
'uids':p.uids,
'gids':p.gids},
'stats':{'io':{'read':{'count':io_counters.read_count,
'bytes':io_counters.read_bytes},
'write':{'count':io_counters.write_count,
'bytes':io_counters.write_bytes}}},
'ctx_switches':p.num_ctx_switches(),
'cpu':p.cpu_percent(interval=0.1),
'mem':{'rss':mem.rss,
'vms':mem.vms,
'shared':mem.shared,
'text':mem.text,
'lib':mem.lib,
'data':mem.data,
'dirty':mem.dirty,
'uss':mem.uss,
'pss':mem.pss,
'swap':mem.swap},
'fds':{'num':p.num_fds(),
'files':[{'path':f.path,
'fd':f.fd,
'position':f.position,
'mode':f.mode,
'flags':f.flags} for f in files]},
'cpu':{'affinity':p.cpu_affinity()},
'mem':{'use':p.memory_percent(memtype="rss")},
'conn':[{'fd':c.fd,
'family':c.family,
'type':c.type,
'laddr':c.laddr,
'raddr':c.raddr,
'status':c.status}for c in conns],
'threads':{'num':p.num_threads(), 'list':p.threads()},
'children':[]}
if extended is True:
res['mem']['maps'] = p.memory_maps()
self.logger.debug('Get process: %s' % p)
for child in p.children(False):
res['children'].append(self._get_proc_infos(child,
extended=extended))
return res
except:
self.logger.error(traceback.format_exc())
raise UwsgiManagerError('Can not get process %s info' % p)
def info(self, extended=False):
"""Get uwsgi instance infos
memory:
rss: aka "Resident Set Size", this is the non-swapped physical memory a process has used. On UNIX it matches "top"'s RES column (see doc). On Windows this is an alias for wset field and it matches "Mem Usage" column of taskmgr.exe.
vms: aka "Virtual Memory Size", this is the total amount of virtual memory used by the process. On UNIX it matches "top"'s VIRT column (see doc). On Windows this is an alias for pagefile field and it matches "Mem Usage" "VM Size" column of taskmgr.exe.
shared: (Linux) memory that could be potentially shared with other processes. This matches "top"'s SHR column (see doc).
text (Linux, BSD): aka TRS (text resident set) the amount of memory devoted to executable code. This matches "top"'s CODE column (see doc).
data (Linux, BSD): aka DRS (data resident set) the amount of physical memory devoted to other than executable code. It matches "top"'s DATA column (see doc).
lib (Linux): the memory used by shared libraries.
dirty (Linux): the number of dirty pages.
open files:
path: the absolute file name.
fd: the file descriptor number; on Windows this is always -1.
position (Linux): the file (offset) position.
mode (Linux): a string indicating how the file was opened, similarly open's mode argument. Possible values are 'r', 'w', 'a', 'r+' and 'a+'. There's no distinction between files opened in bynary or text mode ("b" or "t").
flags (Linux): the flags which were passed to the underlying os.open C call when the file was opened (e.g. os.O_RDONLY, os.O_TRUNC, etc).
connections:
fd: the socket file descriptor. This can be passed to socket.fromfd() to obtain a usable socket object. This is only available on UNIX; on Windows -1 is always returned.
family: the address family, either AF_INET, AF_INET6 or AF_UNIX.
type: the address type, either SOCK_STREAM or SOCK_DGRAM.
laddr: the local address as a (ip, port) tuple or a path in case of AF_UNIX sockets.
raddr: the remote address as a (ip, port) tuple or an absolute path in case of UNIX sockets. When the remote endpoint is not connected you'll get an empty tuple (AF_INET) or None (AF_UNIX). On Linux AF_UNIX sockets will always have this set to None.
status: represents the status of a TCP connection. The return value is one of the psutil.CONN_* constants. For UDP and UNIX sockets this is always going to be psutil.CONN_NONE.
:param extended: if True print processes memory maps
:raise UwsgiManagerError:
"""
master_proc = psutil.Process(int(uwsgi_util.masterpid()))
resp = self._get_proc_infos(master_proc, extended=extended)
self.logger.debug('Get uwsgi processes: %s' % truncate(resp))
return resp
def stats(self):
"""Get uwsgi instance statistics
:raise UwsgiManagerError:
"""
try:
timestamp = strftime("%d %b %Y %H:%M:%S +0000", gmtime())
resp = {'timestamp':timestamp,
'workers':uwsgi_util.workers(),
'masterpid':uwsgi_util.masterpid(),
'tot_requests':uwsgi_util.total_requests(),
#'applist':uwsgi.applist,
#'options':uwsgi.get_option(),
'mem':uwsgi_util.mem()}
self.logger.debug('Get uwsgi workers stats: %s' % truncate(resp))
return resp
except:
raise UwsgiManagerError('Can not get info for uwsgi server')
def reload(self):
"""Reload uwsgi instance
:raise UwsgiManagerError:
"""
try:
pid = uwsgi_util.masterpid()
timestamp = strftime("%d %b %Y %H:%M:%S +0000", gmtime())
reloadState = uwsgi_util.reload()
#mem_info = info.getMemInfo()
resp = {'timestamp':timestamp, 'msg':str(reloadState)}
self.logger.debug('Reload uwsgi instance %s: %s' % (pid, resp))
return resp
except:
raise UwsgiManagerError('Can not reload uwsgi server') |
# Berechnen Sie nun für n= 1,...,500 den Fehler der Riemann-Summe,
# und plotten Sie diesen Fehler in einer logarithmischen Skala gegen n.
# Werten Sie dazu einmal die Funktion an den Anfangspunkten der Teilintervalle
# aus (d.h.q= 0), ein anderes Mal an deren Mittelpunkten (d.h. q= 0.5).
# Vergleichen Sie Ihre Ergebnisse. Was beobachten Sie?
from matplotlib import pyplot as plt
from scipy import special
import math
import numpy as np
f1 = lambda x: math.e ** (-x ** 2)
def riemann(I,f,n,q):
"""
Berechnet die Riemann-Summe über einem bestimmten Intervall mit
einer variablen Anzahl an Teilintervallen.
Parameter:
@param I: Integrationsintervall
@type I: tuple
@param f: Funktion
@param n: Anzahl der Teilintervalle
@type n: int
@param q: einen Wert, der durch ξ_k=x_{k−1}+q(x_k−x_k−1) die
Lage des Wertes ξ_k festlegt. Es muss gelten
0 ≤ q ≤ 1
@type q: float
@rtype: float
@return die Riemannsumme
"""
xvalues = np.linspace(I[0], I[1], n+1)
qvalues = _get_xi_k_values(xvalues, q)
return _get_sum(f, xvalues, qvalues)
def _get_xi_k_values(list, q):
"""
Berechnet die Werte, die durch ξ_k=x_{k−1}+q(x_k−x_k−1) die
Lage des Wertes ξ_k festlegen.
Parameter:
@param list: Liste der x-Werte
@type list: list
@param q: Benötigt zur Berechnung von xi. Es muss gelten
0 ≤ q ≤ 1
@type q: float
@rtype: list
@return Eine Liste von xi-Werten
"""
qv = []
for i in range(len(list) - 1):
qv.append(list[i] + q * (list[i + 1] - list[i]))
return qv
def _get_sum(f, x, q):
"""
Berechnet die Riemann-Summe.
Parameter:
@param f: Funktion
@param x: Eine Liste der x-Werte
@type x: list
@param q: Eine Liste der xi-Werte
@type q: list
@rtype: float
@return Die Riemann-Summe
"""
result = 0
for i in range(len(q)):
result += f(q[i]) * (x[i + 1] - x[i])
return result
def _make_plot_list_with_riemann_failure(I, f, n, q):
"""
Erzeugt zwei Listen zum plotten der Riemann-Fehler
in einem gegebenen Intervall.
Parameter:
@param I: Integrationsintervall
@type I: tuple
@param f: Funktion
@param n: Anzahl der Teilintervalle
@type n: int
@param q: einen Wert, der durch ξ_k=x_{k−1}+q(x_k−x_k−1) die
Lage des Wertes ξ_k festlegt. Es muss gelten
0 ≤ q ≤ 1
@type q: float
@rtype: tupel of lists
@return Ein Tupel der Form (list, list)
"""
x = list(np.arange(1,n+1,1))
y = []
for i in x:
# Fehler der Riemann-Summe in Bezug zum Vergleichswert
# 0.5*scipy.special.erf(1)*math.sqrt(math.pi)
y.append(riemann(I, f, i, q) - 0.5 * special.erf(1) * math.sqrt(math.pi))
return (x, y)
def make_plot(I, f, n, q, c):
plotlist = _make_plot_list_with_riemann_failure(I, f, n, q)
x = plotlist[0]
y = plotlist[1]
plt.plot(x, y, c, label='q=%s' % q)
def _plot_riemann_intervall():
# from https://www.math.ubc.ca/~pwalls/math-python/integration/riemann-sums/
f = f1
a = 0; b = 1; N = 7
n = 10*N+1 # Use n*N+1 points to plot the function smoothly
x = np.linspace(a,b,N+1)
y = f(x)
X = np.linspace(a,b,n*N+1)
Y = f(X)
plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
plt.plot(X,Y,'b')
x_left = x[:-1] # Left endpoints
y_left = y[:-1]
plt.plot(x_left,y_left,'b.',markersize=10)
plt.bar(x_left,y_left,width=(b-a)/N,alpha=0.2,align='edge',edgecolor='b')
plt.title('Linke Riemann-Summe, n= {}'.format(N))
plt.subplot(1,3,2)
plt.plot(X,Y,'b')
x_mid = (x[:-1] + x[1:])/2 # Midpoints
y_mid = f(x_mid)
plt.plot(x_mid,y_mid,'b.',markersize=10)
plt.bar(x_mid,y_mid,width=(b-a)/N,alpha=0.2,edgecolor='b')
plt.title('Mittlere Riemann-Summe, n = {}'.format(N))
plt.subplot(1,3,3)
plt.plot(X,Y,'b')
x_right = x[1:] # Right endpoints
y_right = y[1:]
plt.plot(x_right,y_right,'b.',markersize=10)
plt.bar(x_right,y_right,width=-(b-a)/N,alpha=0.2,align='edge',edgecolor='b')
plt.title('Rechte Riemann-Summe, n = {}'.format(N))
#plt.show()
I = (0, 1)
n = 500
# q = 0 für left-points
# q=0.5 für mid-points
make_plot(I, f1, n, 0, 'r')
make_plot(I, f1, n, 0.5, 'b')
make_plot(I, f1, n, 1, '--g')
plt.xscale("log")
plt.grid(which='both')
plt.title('Fehler der Riemann-Summe für\n left-points(q=0) und mid-points(q=0.5)')
plt.legend()
plt.xlabel('n=%i (logarithmische Skala)' %n)
_plot_riemann_intervall()
plt.show()
|
import asyncio
import pytest
from liualgotrader.common.database import create_db_connection
from liualgotrader.models.accounts import Accounts
@pytest.fixture
def event_loop():
loop = asyncio.get_event_loop()
loop.run_until_complete(create_db_connection())
yield loop
loop.close()
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_create() -> bool:
account_id = await Accounts.create(
balance=1000.0,
allow_negative=True,
credit_line=2000.0,
)
print(f"new account_id:{account_id}")
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_negative_create() -> bool:
try:
await Accounts.create(
balance=1000.0,
allow_negative=True,
)
except Exception as e:
print(e)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_balance() -> bool:
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=True,
credit_line=2000.0,
)
print(f"new account_id:{account_id}")
if balance != await Accounts.get_balance(account_id):
raise AssertionError("get_balance() did not return the expect value")
print(f"balance {balance}")
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_add_transaction() -> bool:
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=True,
credit_line=2000.0,
)
amount = 1000.0
await Accounts.add_transaction(account_id, amount)
if balance + amount != await Accounts.get_balance(account_id):
raise AssertionError(
"test_add_transaction(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_add_transaction2() -> bool:
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=True,
credit_line=2000.0,
)
amount = 1000.0
await Accounts.add_transaction(account_id, amount)
await Accounts.add_transaction(account_id, -amount)
if balance != await Accounts.get_balance(account_id):
raise AssertionError(
"test_add_transaction(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_add_transaction3() -> bool:
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=False,
)
amount = 1000.0
await Accounts.add_transaction(account_id, amount)
await Accounts.add_transaction(account_id, -amount)
if balance != await Accounts.get_balance(account_id):
raise AssertionError(
"test_add_transaction3(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_negative_add_transaction() -> bool:
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=False,
)
amount = 1000.0
await Accounts.add_transaction(account_id, amount)
await Accounts.add_transaction(account_id, -amount)
try:
await Accounts.add_transaction(account_id, -10000.0)
except Exception as e:
print(e)
if balance != await Accounts.get_balance(account_id):
raise AssertionError(
"test_negative_add_transaction(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_add_transaction5() -> bool:
balance = 100.0
account_id = await Accounts.create(
balance=balance, allow_negative=True, credit_line=5000.0
)
amount = 1000.0
await Accounts.add_transaction(account_id, -amount)
if balance - amount != await Accounts.get_balance(account_id):
raise AssertionError(
"test_add_transaction5(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_negative_add_transaction2() -> bool:
balance = 100.0
account_id = await Accounts.create(
balance=balance, allow_negative=True, credit_line=5000.0
)
amount = 10000.0
try:
await Accounts.add_transaction(account_id, -amount)
except Exception as e:
print(e)
if balance != await Accounts.get_balance(account_id):
raise AssertionError(
"test_add_transaction5(): get_balance() did not return the expect value"
)
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_clear_balance() -> bool:
print("test_clear_balance")
balance = 1234.0
account_id = await Accounts.create(
balance=balance,
allow_negative=True,
credit_line=2000.0,
)
print(f"new account_id:{account_id}")
if balance != await Accounts.get_balance(account_id):
raise AssertionError("get_balance() did not return the expect value")
print(f"balance {balance}")
await Accounts.clear_balance(account_id, 0)
if await Accounts.get_balance(account_id) != 0.0:
raise AssertionError("clear balance failed")
return True
@pytest.mark.asyncio
@pytest.mark.devtest
async def test_clear_transactions() -> bool:
print("test_clear_transactions")
balance = 100.0
account_id = await Accounts.create(
balance=balance, allow_negative=True, credit_line=5000.0
)
amount = 1000.0
await Accounts.add_transaction(account_id, -amount)
if balance - amount != await Accounts.get_balance(account_id):
raise AssertionError(
"test_clear_transactions(): get_balance() did not return the expect value"
)
await Accounts.clear_account_transactions(account_id)
_df = await Accounts.get_transactions(account_id)
if not _df.empty:
raise AssertionError(
"test_clear_transactions(): failed to clear account_transactions"
)
return True
|
import json
import random
import argparse
import numpy as np
from config import Config
import torch
import torch.nn as nn
import os
import pickle
import numpy as np
import random
import torch.nn.functional as F
import math
from Model import build_model
from utils import EnvHndler, bool_flag
from trainer import Trainer
from evaluator import Evaluator
## Raise an Error for every floating-point operation error: devition by zero, overfelow, underflow, and invalid operation
np.seterr(all='raise')
def set_seed(config):
"""Set seed"""
if config.env_seed == -1 :
config.env_seed = np.random.randint(1_000_000_000)
seed = config.env_seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
os.environ["PYTHONHASHSEED"] = str(seed)
print(f'set seed to {config.env_seed}')
def get_parser():
"""
Generate a parameters parser.
"""
# parse parameters
parser = argparse.ArgumentParser(description="Language transfer")
# main parameters
parser.add_argument("--save_periodic", type=int, default=0,
help="Save the model periodically (0 to disable)")
parser.add_argument("--exp_id", type=int, default=0,
help="Experiment ID (if 0, generate a new one)")
# model parameters
parser.add_argument("--model_type", type=str, default="Transformers",
help="Set the model type {Transformers, Performers}")
parser.add_argument("--model_dim", type=int, default=512,
help="Embedding and other layers size")
parser.add_argument("--num_enc_layer", type=int, default=6,
help="Number of Transformer layers in the encoder")
parser.add_argument("--num_dec_layer", type=int, default=6,
help="Number of Transformer layers in the decoder")
parser.add_argument("--forward_expansion", type=int, default=4,
help="The ratio of the hidden size of the Feed Forward net to model_dim")
parser.add_argument("--max_position", type=int, default=4096,
help="The maximum number of positions we have in the data")
parser.add_argument("--num_head", type=int, default=8,
help="Number of Transformer heads")
parser.add_argument("--share_inout_emb", type=bool_flag, default=True,
help="Share input and output embeddings")
# training parameters
parser.add_argument("--env_seed", type=int, default=0,
help="Base seed for environments (-1 to use timestamp seed)")
parser.add_argument("--batch_size", type=int, default=32,
help="Number of sentences per batch")
parser.add_argument("--learning_rate", type=float, default=0.0001,
help="Learning rate for Adam optimizer")
parser.add_argument("--clip_grad_norm", type=float, default=5,
help="Clip gradients norm (0 to disable)")
parser.add_argument("--epoch_size", type=int, default=300000,
help="Epoch size / evaluation frequency")
parser.add_argument("--max_epoch", type=int, default=100000,
help="Number of epochs")
# reload data
parser.add_argument("--train_reload_size", type=int, default=10000,
help="Reloaded training set size (-1 for everything)")
parser.add_argument("--test_reload_size", type=int, default=500,
help="Reloaded training set size (-1 for everything)")
# reload pretrained model / checkpoint
parser.add_argument("--load_model", type=bool_flag, default=False,
help="Load a pretrained model")
parser.add_argument("--reload_checkpoint", type=str, default="",
help="Reload a checkpoint")
# evaluation
parser.add_argument("--eval_only", type=bool_flag, default=False,
help="Only run evaluations")
parser.add_argument("--eval_verbose", type=int, default=0,
help="Export evaluation details")
parser.add_argument("--eval_verbose_print", type=bool_flag, default=False,
help="Print evaluation details")
return parser
def main(args):
"""
Main function contains the main procedure of training and evaluation.
It take args as its argument
args: a parser object that contains main configurations of the current experiment
"""
config = Config(args)
set_seed(config)
logger = config.get_logger()
env = EnvHndler(config)
# Clear the cash of CUDA
torch.cuda.empty_cache()
model = build_model(config)
trainer = Trainer(config, env, model)
evaluator = Evaluator(trainer)
# evaluation
if config.eval_only:
scores = evaluator.run_all_evals()
for k, v in scores.items():
logger.info("%s -> %.6f" % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
exit()
# training
for epoch in range(config.max_epoch):
logger.info("============ Starting epoch %i ... ============" % trainer.epoch)
trainer.n_equations = 0
torch.cuda.empty_cache()
while trainer.n_equations < trainer.epoch_size:
# training steps
torch.cuda.empty_cache()
trainer.enc_dec_step()
trainer.iter()
logger.info("============ End of epoch %i ============" % trainer.epoch)
# evaluate perplexity
scores = evaluator.run_all_evals()
# print / JSON log
for k, v in scores.items():
logger.info("%s -> %.6f" % (k, v))
logger.info("__log__:%s" % json.dumps(scores))
# end of epoch
trainer.save_best_model(scores)
trainer.save_periodic()
trainer.end_epoch(scores)
if epoch%10 == 0:
while True:
t = input("Continue training? [y/n]")
if t not in ['y', 'n']:
print('Invalid input')
continue
elif t == 'y':
break
else:
exit()
if __name__ == '__main__':
# generate parser / parse parameters
args = get_parser()
args = args.parse_args()
print('Hey')
main(args)
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# base case for empty string
if s == '':
return 0
# get length and list
str_arr = list(s)
str_len = len(str_arr)
# base case of length 1
if str_len == 1:
return 1
largest_len = 0
for i in range(0,str_len):
#set default params
largest_temp = 1
unique_set = set(str_arr[i])
#iterate over next elems
for j in range(i+1, str_len):
if str_arr[j] in unique_set:
break
else:
largest_temp +=1
unique_set.add(str_arr[j])
if largest_temp > largest_len:
largest_len = largest_temp
unique_set.clear()
return largest_len
|
# coding: utf-8
# # Implementing a Route Planner
# In this project you will use A\* search to implement a "Google-maps" style
# route planning algorithm.
# In[1]:
# Run this cell first!
from helpers import Map, load_map, show_map
from student_code import shortest_path
#get_ipython().run_line_magic('load_ext', 'autoreload')
#get_ipython().run_line_magic('autoreload', '2')
# ### Map Basics
# In[2]:
map_10 = load_map('map_10.json')
show_map(map_10)
# The map above (run the code cell if you don't see it) shows a disconnected
# network of 10 intersections. The two intersections on the left are connected
# to each other but they are not connected to the rest of the road network.
#
# These `Map` objects have two properties you will want to use to implement A\*
# search: `intersections` and `roads`
#
# **Intersections**
#
# The `intersections` are represented as a dictionary.
#
# In this example, there are 10 intersections, each identified by an x,y
# coordinate. The coordinates are listed below. You can hover over each dot
# in the map above to see the intersection number.
# In[3]:
map_10.intersections
# **Roads**
#
# The `roads` property is a list where `roads[i]` contains a list of the
# intersections that intersection `i` connects to.
# In[4]:
# this shows that intersection 0 connects to intersections 7, 6, and 5
map_10.roads[0]
# In[5]:
# This shows the full connectivity of the map
map_10.roads
# In[6]:
# map_40 is a bigger map than map_10
map_40 = load_map('map-40.pickle')
show_map(map_40)
# ### Advanced Visualizations
#
# The map above shows a network of roads which spans 40 different intersections
# (labeled 0 through 39).
#
# The `show_map` function which generated this map also takes a few optional
# parameters which might be useful for visualizaing the output of the search
# algorithm you will write.
#
# * `start` - The "start" node for the search algorithm.
# * `goal` - The "goal" node.
# * `path` - An array of integers which corresponds to a valid sequence of
# intersection visits on the map.
# In[7]:
# run this code, note the effect of including the optional
# parameters in the function call.
show_map(map_40, start=5, goal=34, path=[5,16,37,12,34])
# ### Writing your algorithm
# You should open the file `student_code.py` in another tab and work on your
# algorithm there. Do that by selecting `File > Open` and then selecting the
# appropriate file.
#
# The algorithm you write will be responsible for generating a `path` like the
# one passed into `show_map` above. In fact, when called with the same map,
# start and goal, as above you algorithm should produce the path `[5, 16, 37,
# 12, 34]`
#
# ```bash
# > shortest_path(map_40, 5, 34)
# [5, 16, 37, 12, 34]
# ```
# In[8]:
path = shortest_path(map_40, 5, 34)
if path == [5, 16, 37, 12, 34]:
print("great! Your code works for these inputs!")
else:
print("something is off, your code produced the following:")
print(path)
# ### Testing your Code
# If the code below produces no errors, your algorithm is behaving correctly.
# You are almost ready to submit! Before you submit, go through the following
# submission checklist:
#
# **Submission Checklist**
#
# 1. Does my code pass all tests?
# 2. Does my code implement `A*` search and not some other search algorithm?
# 3. Do I use an **admissible heuristic** to direct search efforts towards the
# goal?
# 4. Do I use data structures which avoid unnecessarily slow lookups?
#
# When you can answer "yes" to all of these questions, submit by pressing the
# Submit button in the lower right!
# In[9]:
from test import test
test(shortest_path)
|
# MIT License
#
# Copyright (c) 2021 Jacob Miller
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Utility functions that are only used for tests"""
import pytest
import numpy as np
import torch
Tensor = torch.Tensor
def complete_binary_dataset(str_len: int) -> Tensor:
"""
Generate dataset with all binary strings of given length
"""
input_dim = 2
samp_size = input_dim ** str_len
all_data = np.zeros((samp_size, str_len), dtype="int32")
for i in range(samp_size):
bin_str = bin(i)[2:]
bin_str = "0" * (str_len - len(bin_str)) + bin_str
for n, b in enumerate(bin_str):
all_data[i, n] = int(b)
return torch.tensor(all_data).long()
def allcloseish(arr1: Tensor, arr2: Tensor, tol=1e-4) -> bool:
"""
Same as `torch.allclose`, but less nit-picky
"""
if not isinstance(arr1, torch.Tensor):
arr1 = torch.tensor(arr1)
if not isinstance(arr2, torch.Tensor):
arr2 = torch.tensor(arr2)
return torch.allclose(arr1, arr2, rtol=tol, atol=tol)
def group_name(name: str):
"""Convenience wrapper for setting pytest benchmark group names"""
return pytest.mark.benchmark(group=name)
|
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
res=[]
temp=0
for i in nums:
temp+=i
res.append(temp)
return res |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home,name='home'),
path('loginPage/', views.loginPage,name='login'),
path('register/', views.register,name='register'),
path('logoutUser/', views.logoutUser,name='logout'),
path('addProject/', views.addProject,name='addProject'),
path('projectReport/', views.projectReport,name='projectReport'),
path('updateProject/<str:pk>/', views.updateProject,name='updateProject'),
path('deleteProject/<str:pk>/', views.deleteProject,name='deleteProject'),
path('addBug/<str:pk>/', views.addBug,name='addBug'),
path('bugReport/<str:pk>/', views.bugReport,name='bugReport'),
path('updateBug/<str:pk>/', views.updateBug,name='updateBug'),
path('deleteBug/<str:pk>/', views.deleteBug,name='deleteBug'),
path('user/', views.userPage,name='userPage'),
]
|
from testcase.page.home_page.all_home_page import AllHomePage
from testcase.page.learn_center.grammer.all_gra_page import AllGraPage
from testcase.page.learn_center.listening.all_listening_page import AllListenPage
from testcase.page.learn_center.reading.all_read_page import AllReadPage
from testcase.page.learn_center.writing.all_writing_page import AllWritingPage
from testcase.page.learn_center.words_lists.words_lists_AllResultPage2 import WordsListsAllAnswerPage
class AllPage(AllHomePage, AllListenPage, AllReadPage, AllGraPage, AllWritingPage, WordsListsAllAnswerPage):
pass
if __name__ == '__main__':
pass |
"""Module for all insertable Typhon tools"""
__all__ = ['TyphonConsole', 'TyphonLogDisplay', 'TyphonTimePlot']
from .console import TyphonConsole
from .plot import TyphonTimePlot
from .log import TyphonLogDisplay
|
"""
What happens if the program exits.
It just stops until you restart the hardware.
"""
from microbit import *
display.scroll('abc')
|
import math
m=600851475143
def isprime(n):
if n<2:
return False
i=2
while i<=math.sqrt(n):
if n%i==0:
return False
i+=1
return True
maxprime=1
while not isprime(m):
for i in range(2,m):
if (isprime(i) and m%i==0):
maxprime = max(maxprime,i)
m=int(m/i)
break
maxprime = max(maxprime,m)
print(maxprime)
|
#fileencoding=utf-8
#!/usr/bin/env python3
"""
Multithread Echo protocol server, with ability to add delays to byte stream.
python echo_thread_stream_delay.py max_delay
max_delay -- seconds, server will use [0 .. delay_max] inteval for random delays
if this parametr is 0 server will not perform delays for streams.
"""
import socket
from threading import Thread, Event
import time
import random
import sys
from queue import Queue
SERVER_ADDR = '127.0.0.1'
SERVER_TCP_PORT = 9000
BUFFER_SIZE = 8192
MAX_BACKLOG = 32
# Printer thread
def printer(printerQ):
while True:
# blocked until some string is arrived
s = printerQ.get()
print(s)
def process_client_connection(client_sock, max_delay, printerQ):
while True:
req = client_sock.recv(BUFFER_SIZE)
if req == b'':
break
else:
remote_addr = client_sock.getpeername()
printerQ.put('req from {}:{} (len:{})\n {!r}'.format(
remote_addr[0],
remote_addr[1],
len(req),
req))
if max_delay == 0:
# replay
client_sock.send(req)
else:
# or split req to 2 part and replay with delay
req1 = req[0:len(req)%2]; req2=req[len(req)%2:];
client_sock.send(req1)
time.sleep(random.randint(0, max_delay))
client_sock.send(req2)
client_sock.close()
# separate thread for server
# blocking calls are moved to his thread
# this gives chance for main thread to process KeyboardInterrupt
def server(server_sock, server_stop_event, max_delay, printerQ):
while not server_stop_event.is_set():
client_sock, client_addr = server_sock.accept()
printerQ.put('Accepted conection from {}:{}'.format(client_addr[0], client_addr[1]))
client_thread = Thread(
target=process_client_connection,
args=(client_sock, max_delay, printerQ)
)
# The entire Python programs (main thread) exits
# only if no no-deamon thread left
# The program can exit witout waitiong for
# temination of all client_threads
client_thread.deamon = True
client_thread.start()
if __name__ == '__main__':
print('Starting ...<Ctrl-C> to stop.')
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
else:
max_delay = int(sys.argv[1])
if max_delay < 0 :
print(__doc__)
sys.exit(1)
# Printer Thread
printerQ = Queue()
printer_thread = Thread(
target=printer,
args=(printerQ, )
)
printer_thread.daemon = True
printer_thread.start()
# Preparing server Socket
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#
# The SO_REUSEADDR flag tells the kernel to reuse a local socket in
# TIME_WAIT state, without waiting for its natural timeout to expire.
#
# without this flag got folliwing error when restert server and some sockets still
# not closed:
# [Errno 98] Address already in use
#
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind((SERVER_ADDR, SERVER_TCP_PORT))
server_sock.listen(MAX_BACKLOG)
local_addr = server_sock.getsockname()
print('Listening on {}:{}'.format(local_addr[0], local_addr[1]))
server_stop_event = Event()
# Starting server Thread
server_thread = Thread(
target=server,
args=(server_sock, server_stop_event, max_delay, printerQ)
)
# The entire Python programs exits
# only if no no-deamon thread left
# The main thread stops server-thread before exit
# but this is not helps, programs does not terminates and stuck
#
# In [2]: server_thread.is_alive()
# Out[2]: True
#
# Only with daemon = True option for
# server-thread programm can exit normally (Tested on Win10 Python 3.7.3)
server_thread.daemon = True
server_thread.start()
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
print ('Stopping ...')
server_stop_event.set()
sys.exit(1)
|
import urllib.request
from urllib.error import URLError, HTTPError
from venv import create
import Grid
#validLink
def validLink(theLink):
try:
theSource = urllib.request.urlopen(theLink)
return True
except ValueError:
return False
except HTTPError:
return False
except URLError:
return False
else:
return False
def createLinkList(linkSource): #input string reference to document
linkList = []
with open(linkSource) as theSource:
for line in theSource:
cleanLine = line.rstrip('\n')
if validLink(cleanLine):
linkList.append(cleanLine)
return linkList #list |
#!/usr/bin/env python
"""
Removes Vowels from the input String.
Ex:
$ ./anti_vowel.py
Enter your string: Hello World
This is your string with all vowels removed: Hll Wrld
"""
def anti_vowel(string):
output=""
for index in range(0,len(string)):
char=string[index].lower()
if not (char in "aeiou"):
output+=string[index]
return output
string=raw_input("Enter your string: ")
print "This is your string with all vowels removed: "+anti_vowel(string)
|
"Calculates the nth Fibonacci number."
# Create a memoization cache so our recursion is faster.
fib_cache = {}
def fib(n):
if n in fib_cache:
return fib_cache[n]
if n == 0:
return 0
elif n == 1:
return 1
elif n == 2:
return 1
else:
fib_cache[n] = fib(n-1) + fib(n-2)
return fib(n-1) + fib(n-2)
for n in range(1, 501):
print "%s : %s" % (n, fib(n)) |
from functions.generators.generators import *
from functions.miscellanea import _write_nested, _plotter, GridDisplay
import torch
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from functions.miscellanea import _write_nested, _plotter, GridDisplay, _basic_univar_distplot
from functions.generators.generators import DatasetSampler
from itertools import product
from scipy.interpolate import UnivariateSpline
X = torch.linspace(-5,5,1000).numpy()
def scale_xy(x,func):
y = func(x) ; y = (y-y.mean(0))/y.std(0)
x = (x - x.mean(0))/x.std(0)
return x,y
def round_(val):
rounder = (lambda x,precision: int(10**precision * x)/(10**precision))
if any((isinstance(val,list),
isinstance(val,tuple),
isinstance(val,np.ndarray),
isinstance(val,torch.Tensor))):
return [rounder(v,3) for v in val]
else:
return rounder(val,3)
def viz_mechanisms(num=10):
for _ in range(num):
ms = MechanismSampler(X) ; mech = ms.RbfGP(bounds=(2,10))
plt.plot(*scale_xy(X,mech))
plt.title('Randomized RBF GP Quantile Sums')
plt.legend()
plt.show()
plt.pause(1)
for _ in range(num):
ms = MechanismSampler(X) ; mech = ms.SigmoidAM()
plt.plot(*scale_xy(X,mech))
plt.title('Sigmoid AM')
plt.show()
plt.pause(1)
for _ in range(num):
ms = MechanismSampler(X) ; mech = ms.CubicSpline()
plt.plot(*scale_xy(X,mech))
plt.title('Cubic Spline')
plt.show()
plt.pause(1)
for _ in range(num):
ms = MechanismSampler(X) ; mech = ms.tanhSum()
plt.plot(*scale_xy(X,mech))
plt.title('Shift/Scale/Amplitude Tanh Sum')
plt.show()
def viz_cause(num=10):
i = 0
def callback(ax,X,i):
hist_vals, _ = np.histogram(X,bins='auto', density=True)
sns.distplot(X, ax=ax, color=f'C{i}')
low_x, up_x, low_y, up_y = X.min()-X.std(), X.max()+X.std(), 0, hist_vals.max()*1.07
plt.axis([low_x,up_x,low_y,up_y])
plt.xticks([], []); plt.yticks([], [])
plt.tight_layout()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.uniform()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Uniform', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.uniform_mixture()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Uniform Mixture', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.gaussian_mixture()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Gaussian Mixture', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.subgaussian_mixture()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Sub Gaussian Mixture', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.supergaussian_mixture()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Super Gaussian Mixture', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
display = GridDisplay(num_items=10, nrows=-1, ncols=5)
for i in range(num):
n = 1000 ; s = CauseSampler(sample_size=n)
X = s.subsupgaussian_mixture()
display.add_plot(callback=(lambda ax: callback(ax,X,i)))
display.fig.suptitle('Sub & Super Gaussian Mixture', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
def viz_pair(save=True):
SEED = 1020
torch.manual_seed(SEED)
np.random.seed(SEED)
causes = ['gmm', 'subgmm','supgmm','subsupgmm','uniform','mixtunif']
base_noises = ['normal', 'student', 'triangular', 'uniform',
'beta']
mechanisms = ['spline','sigmoidam','tanhsum','rbfgp']
anms = [False, True]
for anm,c,bn,m in product(anms,causes,base_noises,mechanisms):
print(f'anm? {anm}, cause: {c}, base_noise: {bn}, mechanism: {m}')
DtSpl = DatasetSampler(N=5, n=1000, anm=anm,
base_noise=bn,
cause_type=c,
mechanism_type=m,
with_labels=False)
display = GridDisplay(num_items=5, nrows=-1, ncols=5)
for pair in DtSpl:
def callback(ax, pair):
ax.scatter(pair[0],pair[1], s=10, facecolor='none', edgecolor='k')
idx = np.argsort(pair[0])
x,y = pair[0][idx], pair[1][idx] ; spl = UnivariateSpline(x, y)
x_display = np.linspace(x.min(), x.max(), 1000)
ax.plot(x_display, spl(x_display), 'r--')
display.add_plot(callback=(lambda ax: callback(ax,pair)))
display.fig.suptitle(f'anm? {anm}, cause: {c}, base_noise: {bn}, mechanism: {m}', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.93])
if save:
_write_nested(f'./tests/data/fcm_examples/pairs/anm_{anm}_c_{c}_bn_{bn}_m_{m}',
callback= lambda fp: plt.savefig(fp,dpi=70))
#plt.savefig(f'./data/fcm_examples/pairs/anm_{anm}_c_{c}_bn_{bn}_m_{m}', dpi=40)
else:
plt.show()
def viz_confounded(save=True):
SEED = 1020
torch.manual_seed(SEED)
np.random.seed(SEED)
causes = ['gmm', 'subgmm','supgmm','subsupgmm','uniform','mixtunif']
base_noises = ['normal', 'student', 'triangular', 'uniform',
'beta']
mechanisms = ['spline','sigmoidam','tanhsum','rbfgp']
anms = [ False,True]
for anm,c,bn_x,bn_y,m_x,m_y in product(anms,causes,
base_noises, base_noises,
mechanisms,mechanisms):
print(f'anm? {anm}, cause: {c}, base_noise: {bn_x,bn_y}, mechanism: {m_x,m_y}')
DtSpl = ConfoundedDatasetSampler(N=5, n=1000, anm=anm,
base_noise=[bn_x,bn_y],
confounder_type=c,
mechanism_type=[m_x,m_y],
with_labels=False)
display = GridDisplay(num_items=5, nrows=-1, ncols=5)
for pair in DtSpl:
def callback(ax, pair):
ax.scatter(pair[0],pair[1], s=10, facecolor='none', edgecolor='k')
idx = np.argsort(DtSpl.pSampler.x_sample)
ax.scatter(DtSpl.pSampler.x_sample[idx], DtSpl.pSampler.y_sample[idx], facecolor='r', s=14, alpha=0.7)
display.add_plot(callback=(lambda ax: callback(ax,pair)))
display.fig.suptitle(f'Confounded: anm? {anm}, cause: {c}, base_noise: {bn_x,bn_y}, mechanism: {m_x,m_y}', fontsize=20)
display.fig.tight_layout(rect=[0, 0.03, 1, 0.93])
if save:
_write_nested(f'./tests/data/fcm_examples/pairs/cdf_anm_{anm}_c_{c}_bn_{bn_x}+{bn_y}_m_{m_x}+{m_y}',
callback= lambda fp: plt.savefig(fp,dpi=70))
#plt.savefig(f'./data/fcm_examples/pairs/anm_{anm}_c_{c}_bn_{bn}_m_{m}', dpi=40)
else:
plt.show()
if __name__ == '__main__':
# uncomment any to visualize samples
# viz_cause()
# viz_mechanisms()
# viz_pair(save=True)
# viz_confounded(save=False)
pass
|
import tkinter as tk
import coler as c
import random
class Game(tk.Frame):
def __init__(self):
tk.Frame.__init__(self)
self.grid()
self.master.title("2048 3D")
self.n=int(input())
self.mainGrid=tk.Frame(
self,bg=c.GridColor, bd=3,width=600,height=600
)
self.mainGrid.grid(pady=(100,0))
self.makeGUI()
self.startGame()
self.master.bind("<Left>",self.left)
self.master.bind("<Right>", self.right)
self.master.bind("<Up>", self.up)
self.master.bind("<Down>", self.down)
self.mainloop()
def makeGUI(self): # making the GUI
self.cells=[]
for i in range(self.n):
row=[]
for j in range (self.n):
cellFrame=tk.Frame(
self.mainGrid,
bg=c.EmptyCellColor,
width=600//self.n,
height=600//self.n
)
cellFrame.grid(row=i,column=j, padx=5, pady=5)
cellNumber=tk.Label(self.mainGrid, bg=c.EmptyCellColor)
cellNumber.grid(row=i, column=j)
cellData={"frame": cellFrame, "number": cellNumber}
row.append(cellData)
self.cells.append(row)
# make score header
scoreFrame=tk.Frame(self)
scoreFrame.place(relx=0.5,y=45,anchor="center")
tk.Label(
scoreFrame,
text="Score",
font=c.ScoreLabelFont
).grid(row=0)
self.scoreLabel=tk.Label(scoreFrame, text="0", font=c.ScoreFont)
self.scoreLabel.grid(row=1)
def startGame(self):
# create matrix
self.matrix=[[0]*self.n for _ in range(self.n)]
#fill 2 random cell with 2s
row=random.randint(0,self.n-1)
col=random.randint(0,self.n-1)
self.matrix[row][col]=2
self.cells[row][col]["frame"].configure(bg=c.CellColors[2])
self.cells[row][col]["number"].configure(
bg=c.CellColors[2],
fg=c.CellNumberColors[2],
font=c.CellNumberFonts[2],
text="2"
)
while(self.matrix[row][col] != 0):
row = random.randint(0,3)
col = random.randint(0,3)
self.matrix[row][col] = 2
self.cells[row][col]["frame"].configure(bg=c.CellColors[2])
self.cells[row][col]["number"].configure(
bg=c.CellColors[2],
fg=c.CellNumberColors[2],
font=c.CellNumberFonts[2],
text="2"
)
self.score = 0
# Matrix fuctions
def stack(self): #Stack : it will make a new matrix in which it will stack numbers on left side and return that new matrix to Matrix
newMatrix = [[0]*self.n for _ in range(self.n)]
for i in range(self.n):
fillPosition=0
for j in range(self.n):
if (self.matrix[i][j] != 0):
newMatrix[i][fillPosition]=self.matrix[i][j]
fillPosition = fillPosition+1
self.matrix=newMatrix
def combine(self): # after stacking the matrix, combine will merge the same numbers
for i in range(self.n):
for j in range (self.n-1):
if self.matrix[i][j] != 0 and self.matrix[i][j]==self.matrix[i][j+1]:
self.matrix[i][j] *=2
self.matrix[i][j+1] =0
self.score +=self.matrix[i][j]
def reverse(self): #it will give mirror image of the matrix
newMatrix = []
for i in range(self.n):
newMatrix.append([])
for j in range(self.n):
newMatrix[i].append(self.matrix[i][self.n-j-1])
self.matrix = newMatrix
def transpose(self): #it will give transpose of the matrix
newMatrix=[[0]*self.n for _ in range(self.n)]
for i in range(self.n):
for j in range(self.n):
newMatrix[i][j]=self.matrix[j][i]
self.matrix = newMatrix
# Add new tile
def addTile(self):
row = random.randint(0, (self.n)-1)
col = random.randint(0, (self.n)-1)
while (self.matrix[row][col] != 0):
row = random.randint(0, (self.n)-1)
col = random.randint(0, (self.n)-1)
self.matrix[row][col] = random.choice([2,4])
#Update the GUI
def updateGUI(self):
for i in range(self.n):
for j in range(self.n):
cellValue = self.matrix[i][j]
if cellValue==0:
self.cells[i][j]["frame"].configure(bg=c.EmptyCellColor)
self.cells[i][j]["number"].configure(bg=c.EmptyCellColor,text="")
else:
self.cells[i][j]["frame"].configure(bg=c.CellColors[cellValue])
self.cells[i][j]["number"].configure(
bg=c.CellColors[cellValue],
fg=c.CellNumberColors[cellValue],
font=c.CellNumberFonts[cellValue],
text=str(cellValue)
)
self.scoreLabel.configure(text=self.score)
self.update_idletasks()
# arrow press functions
def left(self,event):
self.stack()
self.combine()
self.stack()
self.addTile()
self.updateGUI()
self.gameOver()
def right(self,event):
self.reverse()
self.stack()
self.combine()
self.stack()
self.reverse()
self.addTile()
self.updateGUI()
self.gameOver()
def up(self,event):
self.transpose()
self.stack()
self.combine()
self.stack()
self.transpose()
self.addTile()
self.updateGUI()
self.gameOver()
def down(self,event):
self.transpose()
self.reverse()
self.stack()
self.combine()
self.stack()
self.reverse()
self.transpose()
self.addTile()
self.updateGUI()
self.gameOver()
# check if any move are possible
def horizMove(self):
for i in range(self.n):
for j in range(self.n-1):
if self.matrix[i][j] == self.matrix[i][j+1]:
return True
return False
def vertiMove(self):
for i in range(self.n-1):
for j in range(self.n):
if self.matrix[i][j] == self.matrix[i+1][j]:
return True
return False
# check game is over
def gameOver(self):
if any(2048 in row for row in self.matrix):
gameOverFrame=tk.Frame(self.mainGrid,borderwidth=2)
gameOverFrame.place(relx=0.5,rely=0.5,anchor="center")
tk.Label(
gameOverFrame,
text="You Win!!!",
bg=c.WinnerBG,
fg=c.GameOverFontColor,
font=c.GameOverFont
).pack()
elif not any(0 in row for row in self.matrix)and not self.horizMove() and not self.vertiMove():
gameOverFrame = tk.Frame(self.mainGrid, borderwidth=2)
gameOverFrame.place(relx=0.5, rely=0.5, anchor="center")
tk.Label(
gameOverFrame,
text="You Lose....",
bg=c.LoserBG,
fg=c.GameOverFontColor,
font=c.GameOverFont
).pack()
def main():
Game()
if __name__ =="__main__":
main() |
from utils.exceptions import FormattingException, FileException, BizdaysException
from utils.logging import LoggingMixin
import datetime
from dateutil.relativedelta import relativedelta
import csv
import re
import os
class Calendar(LoggingMixin):
"""
Calendar class will create an object with a list of holidays and
it will allow to do some date validation.
:param holidays_file: The file path for holidays calendar file. The calendar file
must contain strings with the following format YYYYMMDD or YYMMDD and using
the following separators: - or / or . or _
:type holidays_file: string
:param output_format: The string output format for dates. It must follow
strftime() and strptime() behavior.
:type output_format: string
"""
def __init__(self, holidays_file=None, output_format='%Y/%m/%d'):
self.holidays_file = holidays_file
self.holidays = []
self.date_output_format_validation(output_format)
self.output_format = output_format
self.open_file()
def open_file(self):
"""
Functions that reads holidays_file and append each line, if valid, to attribute holidays
"""
try:
with open(self.holidays_file, mode="r") as infile:
reader = csv.reader(infile)
for line in reader:
self.holidays.append(self.string_to_datetime(line[0]))
except:
self.log.debug('Calendar file {} is not valid. Holidays will not be defined.'.format(self.holidays_file))
def date_input_format_validation(self, date_string):
"""
Functions that validates if a date string is following the formats YYYYMMDD or YYMMDD
and using the following separators: - or / or . or _
:param date_string: A string representation of a date
:type date_string: string
"""
date_string_validate = re.compile('^(\d{2,4})([-/:._|])(0[1-9]|1[0-2])([-/:._|])(3[01]|[12][0-9]|0[1-9])$')
if not date_string:
raise FormattingException("Output format cannot be None.")
if not date_string_validate.match(date_string):
raise FormattingException("\"{}\" is not a valid date format. Please choose between: \"%Y/%m/%d\","
" \"%Y-%m-%d\" or check strftime and strptime behavior.".format(date_string))
def string_to_datetime(self, sourcedate):
"""
Functions that converts string to datetime object
:param sourcedate: A string representation of date
:type sourcedate: string
:return: datetime object
"""
self.date_input_format_validation(sourcedate)
filtered_date = re.sub(r"^(\d{2,4})([-/:._|])(0[1-9]|1[0-2])([-/:._|])(3[01]|[12][0-9]|0[1-9])$", r"\1\3\5",
sourcedate)
for fmt in ('%y%m%d', '%Y%m%d'):
try:
return datetime.datetime.strptime(filtered_date, fmt)
except ValueError:
self.log.debug(
'The date {0} does not have a valid date format {1}. Returned none.'.format(filtered_date, fmt))
@staticmethod
def datetime_to_string(sourcedate, output_format="%Y/%m/%d"):
"""
Functions that converts datetime object to string
:param sourcedate: A string representation of date
:type sourcedate: string
:param output_format: The string output format for dates. It must follow strftime() and strptime() behavior.
:type output_format: string
:return: string
"""
return sourcedate.strftime(output_format)
@staticmethod
def date_output_format_validation(date_string):
"""
Functions that validates if string output format for dates follows
strftime() and strptime() behavior.
:param date_string: A string representation of a date
:type date_string: string
"""
date_string_validate = re.compile('(.*)(%-?[\w])(.)(%-?[\w])(.)(%-?[\w])(.*)')
if not date_string:
raise FormattingException("Output format cannot be None.")
if not date_string_validate.match(date_string):
raise FormattingException("\"{}\" is not a valid date format. Please choose between: \"%Y/%m/%d\", "
"\"%Y-%m-%d\" or check strftime and strptime behavior.".format(date_string))
def get_holidays(self):
"""
Function to return holidays attributes
:return: Array list of holidays.
"""
return self.holidays
class Businessdays(Calendar):
"""
Businessdays is derived from Calendar class.
Businessdays class will create an object with a list of holidays, allow to do some date calculation
and check if the result is a business day or not.
:param holidays_file: The file path for holidays calendar file. The calendar file
must contain strings with the following format YYYYMMDD or YYMMDD and using
the following separators: - or / or . or _
:type holidays_file: string
:param output_format: The string output format for dates. It must follow
strftime() and strptime() behavior.
:type output_format: string
:param sourcedate: A string representation of a date with the following
format YYYYMMDD or YYMMDD and using the following separators: - or / or . or _
:type sourcedate: string
"""
def __init__(self, sourcedate, holidays_file, output_format='%Y/%m/%d'):
self.date_input_format_validation(sourcedate)
self.date_output_format_validation(output_format)
self.output_format = output_format
self.sourcedate = sourcedate
if not isinstance(self.sourcedate, datetime.datetime):
self.sourcedate = self.string_to_datetime(sourcedate)
if not holidays_file or not os.path.isfile(holidays_file) or os.stat(holidays_file).st_size == 0:
raise FileException("Holiday file {} is empty. Existing because Businessdays need it."
.format(holidays_file))
self.holidays_file = holidays_file
super(Businessdays, self).__init__(self.holidays_file, self.output_format)
def date_calculator(self, years=0, months=0, days=0):
"""
Functions to do arithmetic operations with the parsed date.
:param years: An arithmetic operator and an integer value
:type years: int
:param months: An arithmetic operator and an integer value
:type months: int
:param days: An arithmetic operator and an integer value
:type days: int
:return: sourcedate attribute after arithmetic operations
"""
try:
self.sourcedate = self.sourcedate + relativedelta(years=years)
self.sourcedate = self.sourcedate + relativedelta(months=months)
self.sourcedate = self.sourcedate + relativedelta(days=days)
except (ValueError, TypeError) as e_info:
raise FormattingException('The date calculation for {0} failed: {1}. Returned none.'.format(self.sourcedate, e_info))
return self
def get_date(self):
"""
Functions that returns sourcedate attribute as string
:return: sourcedate attribute as string
"""
if isinstance(self.sourcedate, datetime.datetime):
self.sourcedate = self.datetime_to_string(self.sourcedate, output_format=self.output_format)
return self.sourcedate
def is_business_day(self):
"""
Functions that verify if sourcedate attribute is a business day.
"""
if self.sourcedate.isoweekday() in {6, 7} or self.sourcedate in self.holidays:
return False
else:
return True
def next_business_day(self):
"""
Functions that return the next business day of sourcedate attribute.
:return: sourcedate attribute as string
"""
return self.__move_business_day__(1)
def prev_business_day(self):
"""
Functions that return the previous business day of sourcedate attribute.
:return: sourcedate attribute as string
"""
return self.__move_business_day__(-1)
def get_business_day(self, timedelta=0):
"""
Functions that return sourcedate attribute after arithmetic operations using timedelta argument.
:param timedelta: An arithmetic operator and an integer value
:type timedelta: int
:return: sourcedate attribute as string
"""
return self.__move_business_day__(timedelta)
def __move_business_day__(self, days_to_move):
"""
Functions that return sourcedate attribute after arithmetic operations using days_to_move argument.
:param days_to_move: An arithmetic operator and an integer value
:type days_to_move: int
:return: sourcedate attribute as string
"""
if days_to_move is None:
raise BizdaysException("days_to_move argument cannot be None.")
self.sourcedate = self.sourcedate + relativedelta(days=days_to_move)
if not self.is_business_day() and days_to_move == 0:
raise BizdaysException("days_to_move argument cannot be 0 if sourcedate is a non business day.")
while not self.is_business_day():
self.sourcedate = self.sourcedate + relativedelta(days=days_to_move)
return self
def format_date(self, output_string):
"""
Takes an output string and outputs that string
with the source date with the specified time format
:param output_string: output string format E.g. %Y-%m-%d
:type output_string: str
:return: S
>>> Businessdays('2017/09/25', './test/test.cal').format_date("~/dev/code.%y.%m.%d")
'~/dev/code.17.09.25'
>>> Businessdays('2017/06/30', './test/test.cal').format_date("~/dev/code/file%Y%m%d")
'~/dev/code/file20170630'
"""
self.date_output_format_validation(output_string)
return self.sourcedate.strftime(output_string)
|
import pytest
from votesmart.methods.committee import *
def test_Committee():
with pytest.raises(NotImplementedError):
method = Committee(api_instance='test')
|
from django.shortcuts import render
from django.views import View
from .models import CourseOrg,CityDict,Teacher
from apps.courses.models import Course
#分页
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
# Create your views here.
class OrglistView(View):
def get(self,request):
all_orgs = CourseOrg.objects.all()
all_cirys = CityDict.objects.all()
#热门机构
hot_orgs = all_orgs.order_by("-click_nums")[:3]
#取出筛选城市
city_id = request.GET.get('city',"")
if city_id :
all_orgs = all_orgs.filter(city_id=int(city_id))
#取出筛选机构类型
org_ct = request.GET.get('ct',"")
if org_ct :
all_orgs =all_orgs.filter(category=org_ct)
org_nums = all_orgs.count()
sort = request.GET.get('sort',"")
if sort :
if sort == "students":
all_orgs = all_orgs.order_by("-students")
if sort == "courses":
all_orgs = all_orgs.order_by("-course_nums")
#对课程机构进行分页
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# Provide Paginator with the request object for complete querystring generation
p = Paginator(all_orgs,5,request=request)
orgs = p.page(page)
return render(request,"org-list.html",{
"orgs":orgs,
"citys":all_cirys,
"org_nums":org_nums,
"city_id":city_id,
"org_ct":org_ct,
"hot_orgs":hot_orgs,
"sort":sort})
class OrgDetailView(View):
def get(self,request,org_id):
if org_id :
courses = Course.objects.filter(course_org=org_id)[:3]
teachers = Teacher.objects.filter(org_id=org_id)[:3]
orgs = Course.objects.filter(course_org=org_id)
for org in orgs:
desc = org.desc
return render(request,"org-detail-homepage.html",{"teachers":teachers,"courses":courses,"desc":desc,"org_id":org_id})
class CourseDetailView(View):
def get(self,request,org_id):
if org_id:
courses = Course.objects.filter(course_org=org_id)
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
# Provide Paginator with the request object for complete querystring generation
p = Paginator(courses,3,request=request)
courses = p.page(page)
return render(request,"org-detail-course.html",{"courses":courses,"org_id":org_id})
class OrgDescView(View):
def get(self,request,org_id):
if org_id:
orgs = Course.objects.filter(course_org=org_id)
for org in orgs:
desc = org.desc
return render(request,"org-detail-desc.html",{"desc":desc,"org_id":org_id})
else:
pass
class OrgTeacherView(View):
def get(self,request,org_id):
if org_id:
teachers = Teacher.objects.filter(org_id=org_id)
return render(request,"org-detail-teachers.html",{"teachers":teachers,"org_id":org_id}) |
x=float(input("valor:"))
y=int(input("quantidade de tickets do RU:"))
z=float(input("valor de tickets:"))
w=int(input("quantidade de passes de onibus:"))
a=float(input("valor dos passes:"))
b=(y*z)+(w*a)
if(x>b):
msg="SUFICIENTE"
else:
msg="INSUFICIENTE"
print(msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.