text stringlengths 38 1.54M |
|---|
from src.py.AST.ASTNode import ASTNodeType, pointerType
from src.py.AST.AST import AST
from src.py.ST.SymbolTable import SymbolTable, SymbolMapping
from src.py.UTIL.VarTypes import *
from src.py.SA.ErrorMsgHandler import ErrorMsgHandler
class TypeDeductor:
@staticmethod
def deductType(node, symbolTable):
"""
Returns the type of the (rvalue) node.
"""
if node.type == ASTNodeType.RValueChar:
return CharType()
elif node.type == ASTNodeType.RValueInt:
return IntType()
elif node.type == ASTNodeType.RValueFloat:
return FloatType()
elif node.type == ASTNodeType.RValueBool:
return BoolType()
elif node.type == ASTNodeType.RValueID:
nodeType = symbolTable.lookupSymbol(node.value).type
# Return the type, except if the ID references an array (without element access)
return nodeType if not(type(nodeType) is ArrayType) else nodeType.addressOf()
elif node.type == ASTNodeType.LValue:
nodeType = symbolTable.lookupSymbol(node.value).type
return nodeType if not(type(nodeType) is ArrayType) else nodeType.addressOf()
elif node.type == ASTNodeType.Addition:
return TypeDeductor.checkTypeChildrenExpression(node.children, symbolTable)
elif node.type == ASTNodeType.Subtraction:
return TypeDeductor.checkTypeChildrenExpression(node.children, symbolTable)
elif node.type == ASTNodeType.Mul:
return TypeDeductor.checkTypeChildrenExpression(node.children, symbolTable)
elif node.type == ASTNodeType.Div:
return TypeDeductor.checkTypeChildrenExpression(node.children, symbolTable)
elif node.type == ASTNodeType.Brackets:
return TypeDeductor.deductType(node.children[0], symbolTable)
elif node.type == ASTNodeType.FunctionCall:
return symbolTable.lookupSymbol(node.value).type
elif node.type == ASTNodeType.RValueArrayElement:
return symbolTable.lookupSymbol(node.value).type.type
elif node.type == ASTNodeType.LValueArrayElement:
return symbolTable.lookupSymbol(node.value).type.type
elif node.type == ASTNodeType.RValueAddress:
return symbolTable.lookupSymbol(node.children[0].value).type.addressOf()
elif node.type == ASTNodeType.Dereference:
return TypeDeductor.checkDereferenceValidity(node, symbolTable)
elif node.type == ASTNodeType.Greater or \
node.type == ASTNodeType.GreaterOrEqual or \
node.type == ASTNodeType.Or or \
node.type == ASTNodeType.And or \
node.type == ASTNodeType.Equals or \
node.type == ASTNodeType.NotEquals or \
node.type == ASTNodeType.Less or \
node.type == ASTNodeType.LessOrEqual:
# Check to see if the children are of equal type
return TypeDeductor.checkTypeChildrenExpression(node.children, symbolTable)
elif node.type == ASTNodeType.Not or \
node.type == ASTNodeType.Brackets or \
node.type == ASTNodeType.NegateBrackets:
return TypeDeductor.deductType(node.children[0], symbolTable)
elif node.type == ASTNodeType.Negate:
childType = TypeDeductor.deductType(node.children[0], symbolTable)
if not(childType == IntType() or childType == FloatType()):
ErrorMsgHandler.negateInvalid(node, childType)
return childType
elif node.type == ASTNodeType.Condition:
# Don't need to typecheck further, handled later
return BoolType()
else:
raise Exception("Could not deduct type of node '" + str(node.type.name) + "'.")
@staticmethod
def checkDereferenceValidity(node, symbolTable):
"""
Checks the validity of dereferencing.
Returns the type after dereferencing.
"""
def getPtrCount(node, originalNode, stopAtExpressionOperator = False):
operations = [ASTNodeType.Negate, ASTNodeType.Addition, ASTNodeType.Subtraction, \
ASTNodeType.Mul, ASTNodeType.Div, ASTNodeType.Or, ASTNodeType.And, ASTNodeType.Not, \
ASTNodeType.Equals, ASTNodeType.NotEquals, ASTNodeType.Greater, ASTNodeType.GreaterOrEqual, \
ASTNodeType.Less, ASTNodeType.LessOrEqual]
count = 0
while node != originalNode:
node = node.parent
if node.type == ASTNodeType.Dereference:
count += node.value.count("*")
elif node.type in operations and stopAtExpressionOperator == True:
return count
return count
queue = [node]
derefType = None
typeDerefCount = 0
while len(queue) != 0:
currentNode = queue.pop()
[queue.append(i) for i in currentNode.children]
if currentNode.type == ASTNodeType.Dereference:
pass
elif currentNode.type == ASTNodeType.RValueID or currentNode.type == ASTNodeType.RValueArrayElement or currentNode.type == ASTNodeType.FunctionCall:
newType = TypeDeductor.deductType(currentNode, symbolTable)
if type(newType) is ArrayType:
newType = newType.addressOf()
elif type(newType) is ReferenceType:
newType = newType.referencedType
elif type(newType) is FunctionType:
newType = newType.returnType
totalCount = getPtrCount(currentNode, node)
partialCount = getPtrCount(currentNode, node, True)
if partialCount == 0:
if not(newType == IntType()):
ErrorMsgHandler.addressOpNonInt(currentNode, newType)
elif type(newType) != PointerType:
ErrorMsgHandler.derefNonPointer(currentNode)
elif newType.ptrCount == 0:
ErrorMsgHandler.derefNonPointer(currentNode)
elif newType.ptrCount < partialCount:
ErrorMsgHandler.overDereferencing(currentNode, newType.ptrCount, partialCount)
if typeDerefCount < totalCount and newType.ptrCount >= totalCount:
typeDerefCount = totalCount
derefType = newType.dereference(totalCount)
elif typeDerefCount < partialCount and newType.ptrCount >= partialCount:
typeDerefCount = partialCount
derefType = newType.dereference(partialCount)
elif derefType != None and newType.ptrCount > derefType.ptrCount:
derefType = newType.dereference(typeDerefCount)
else:
# All other nodes are part of an expression
rType = type(TypeDeductor.deductType(currentNode, symbolTable))
if not(rType is IntType) and not(rType is PointerType):
ErrorMsgHandler.derefInvalidExpression(node)
return derefType
@staticmethod
def checkTypeChildrenExpression(children, symbolTable):
"""
Compares the types of the children. If the types differ: throw Exception.
Returns the type of the children if equal.
"""
type1 = TypeDeductor.deductType(children[0], symbolTable)
type2 = TypeDeductor.deductType(children[1], symbolTable)
if (type1 != type2):
ErrorMsgHandler.typesOperationWrong(children[0], type1, type2, children[0].parent)
return type1 |
from glob import glob
Import('env')
# TODO: This should be fixed to play nicer with scons, I think.
font = env.Command('_font.h', ['_font.h.base', 'font.8x5.png'],
'./scripts/generate_fonts.py base/font.8x5.png '
'base/_font.h.base base/_font.h')
for source in glob('*.[cS]')+glob('drivers/*.[cS]')+glob('lib/*/*.[cS]'):
obj = env.Object(source.split('.')[0], source)
env.Append(NXOS_BASEPLATE=obj)
if source == 'display.c':
env.Depends(obj, font)
if env['WITH_DOXYGEN']:
env.Doxygen('Doxyfile')
|
# Generated by Django 2.2.2 on 2019-07-04 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myknowapp', '0021_auto_20190704_1004'),
]
operations = [
migrations.AddField(
model_name='entity',
name='comment',
field=models.TextField(blank=True, null=True),
),
]
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def middleNode(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slowIter = head
fastIter = head
while (fastIter != None and fastIter.next != None):
fastIter = fastIter.next.next
slowIter = slowIter.next
return slowIter |
#!/bin/python3
import os
import sys
#
# Complete the waiter function below.
#
def waiter(number, q):
primes = []
a=2
while len(primes) in range(q):
b=2
flag=0
for b in range(2,a):
if a%b == 0:
flag+=1
break
b=b+1
if flag == 0:
primes.append(a)
a+=1
output=[]
for i in range(q):
output.append([num for num in number[::-1] if num % primes[i] == 0])
number = [num for num in number if num % primes[i] != 0][::-1]
output.append(number)
return [x for y in output for x in y[::-1]]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nq = input().split()
n = int(nq[0])
q = int(nq[1])
number = list(map(int, input().rstrip().split()))
result = waiter(number, q)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
"""Things here get run first.
This initializes the Celery app, and makes sure that environment
variables are loaded for it.
"""
from __future__ import absolute_import # for Python 2.x workers
import os
import warnings
import dotenv
# Load environment variables from .env file
with warnings.catch_warnings():
warnings.filterwarnings("error")
try:
dotenv.read_dotenv(
os.path.join(os.path.dirname(os.path.dirname(__file__)), ".env")
)
except UserWarning:
raise FileNotFoundError("Could not find .env!")
# Initialize Celery (see
# http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html)
from saltant.celery import app as celery_app
__all__ = ("celery_app",)
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a utility for parsing path templates."""
# This is ported over from endpoints.api_config_manager.
from __future__ import absolute_import
import base64
import re
# Internal constants
_PATH_VARIABLE_PATTERN = r'[a-zA-Z_][a-zA-Z_.\d]*'
_PATH_VALUE_PATTERN = r'[^/?#\[\]{}]*'
RegexError = re.error # convenient alias
def _to_safe_path_param_name(matched_parameter):
"""Creates a safe string to be used as a regex group name.
Only alphanumeric characters and underscore are allowed in variable name
tokens, and numeric are not allowed as the first character.
We cast the matched_parameter to base32 (since the alphabet is safe),
strip the padding (= not safe) and prepend with _, since we know a token
can begin with underscore.
Args:
matched_parameter: A string containing the parameter matched from the URL
template.
Returns:
A string that's safe to be used as a regex group name.
"""
return '_' + base64.b32encode(matched_parameter).rstrip('=')
def compile_path_pattern(pattern):
r"""Generates a compiled regex pattern for a path pattern.
e.g. '/MyApi/v1/notes/{id}'
returns re.compile(r'/MyApi/v1/notes/(?P<id>[^/?#\[\]{}]*)')
Args:
pattern: A string, the parameterized path pattern to be checked.
Returns:
A compiled regex object to match this path pattern.
"""
def replace_variable(match):
"""Replaces a {variable} with a regex to match it by name.
Changes the string corresponding to the variable name to the base32
representation of the string, prepended by an underscore. This is
necessary because we can have message variable names in URL patterns
(e.g. via {x.y}) but the character '.' can't be in a regex group name.
Args:
match: A regex match object, the matching regex group as sent by
re.sub().
Returns:
A string regex to match the variable by name, if the full pattern was
matched.
"""
if match.lastindex > 1:
var_name = _to_safe_path_param_name(match.group(2))
return '%s(?P<%s>%s)' % (match.group(1), var_name,
_PATH_VALUE_PATTERN)
return match.group(0)
pattern = re.sub('(/|^){(%s)}(?=/|$|:)' % _PATH_VARIABLE_PATTERN,
replace_variable, pattern)
return re.compile(pattern + '/?$')
|
##InputQuestions##
Month=str(input("What Month:"))
Category=str(input("Which Accounting Category:"))
##Arithmetic Operations##
def Add(num1,num2,num3,num4,num5):
return (num1 + num2 + num3 + num4 + num5)
def subtract(num1,num2,num3,num4,num5):
return(num1-num2-num3-num4-num5)
def multiply(num1,num2,num3,num4,num5):
return(num1 * num2 * num3 * num4 * num5)
def divide(num1,num2,num3,num4,num5):
return(num1 / num2 / num3 / num4 / num5)
##JanuarySetup##
January_Income_List = [
'January - Paycheck/Salary',
'January - Other Income',
'January - ATM Deposits',
'January - Transfers',
'January - Reimbursements']
January_Expense_List = [
'January - Automotive Expenses',
'January - Miscellaneous Expenses',
'January - Food Expenses',
'January - Medical Expenses',
'January - Clothing Expenses']
if Month == 'January' and Category == 'Income':
##Input configuration for Monthly Income##
January_Primary_Income = float(input("January Primary Income:"))
January_Other_Income = float(input("January Other Income:"))
January_ATM_Deposits = float(input("January ATM Deposits:"))
January_Transfers = float(input("January Transfers:"))
January_Reimbursements = float(input("January Reimbursements:"))
##Arithmetic Operations##
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
operation = input("Enter operation(1/2/3/4): ")
if Month == 'January' and Category == 'Expenses':
##Input configuration for january monthly expenses##
January_Automotive_Expenses = float(input("January Automotive Expenses:"))
January_Miscellaneous_Expenses = float(input("January Miscellaneous Expenses:"))
January_Food_Expenses = float(input("January Food Expenses:"))
January_Medical_Expenses = float(input("January Medical Expenses:"))
January_Clothing_Expenses = float(input("January Clothing Expenses:"))
##Arithmetic Operations##
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
operation = input("Enter operation(1/2/3/4): ")
##FebruarySetup##
February_Income_List = [
'February - Paycheck/Salary',
'February - Other Income',
'February - ATM Deposits',
'February - Transfers',
'February - Reimbursements']
February_Expenses_List = [
'February - Automotive Expenses',
'February - Miscellaneous Expenses',
'February - Food Expenses',
'February - Medical Expenses',
'February - Clothing Expenses']
if Month == 'February' and Category == 'Income':
##Input configuration for february monthly income##
February_Primary_Income = float(input("February Primary Income:"))
February_Other_Income = float(input("February Other Income:"))
February_ATM_Deposits = float(input("February ATM Deposits:"))
February_Transfers = float(input("February Transfers:"))
February_Reimbursements = float(input("February Reimbursements:"))
##Arithmetic Operations##
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
operation = input("Enter operation(1/2/3/4): ")
if Month == 'February' and Category == 'Expenses':
##Input configuration for february monthly expenses##
February_Automotive_Expenses = float(input("February Automotive Expenses:"))
February_Miscellaneous_Expenses = float(input("February Miscellaneous Expenses:"))
February_Food_Expenses = float(input("February Food Expenses:"))
February_Medical_Expenses = float(input("February Medical Expenses:"))
February_Clothing_Expenses = float(input("February Clothing Expenses:"))
##Arithmetic Operations##
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
operation = input("Enter operation(1/2/3/4): ")
###arithmetic operations setup using Month/Category/Operation##
while True:
if Month == 'January' and Category == 'Income' and operation in ('1', '2', '3', '4'):
num1 = January_Primary_Income
num2 = January_Other_Income
num3 = January_ATM_Deposits
num4 = January_Transfers
num5 = January_Reimbursements
elif Month == 'January' and Category == 'Expenses' and operation in ('1', '2', '3', '4'):
num1 = January_Automotive_Expenses
num2 = January_Miscellaneous_Expenses
num3 = January_Food_Expenses
num4 = January_Medical_Expenses
num5 = January_Clothing_Expenses
elif Month == 'February' and Category == 'Income' and operation in ('1', '2', '3', '4'):
num1 = February_Primary_Income
num2 = February_Other_Income
num3 = February_ATM_Deposits
num4 = February_Transfers
num5 = February_Reimbursements
if operation == '1':
print(num1, "+", num2, "+", num3, "+", num4, "+", num5, "=", Add(num1, num2, num3, num4, num5))
if operation == '2':
print(num1,"-",num2,"-",num3,"-",num4,"-",num5,"=", subtract(num1,num2,num3,num4,num5))
if operation == '3':
print (num1, "*", num2, "*", num3, "*", num4, "*", num5, "=", multiply(num1,num2,num3,num4,num5))
if operation == '4':
print(num1, "/", num2, "/", num3, "/", num4, "/", num5, "=", divide(num1,num2,num3,num4,num5))
break
else:
print("Invalid Input")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
import requests
from HTMLParser import HTMLParser
import re
from stopwords import STOPWORDS
ES_URL = "http://localhost:9200/"
SOURCE_INDEX = "music"
SUGGESTION_INDEX = "music_suggest"
ALL_QUERY = "*:*"
SCROLL_TIME = "2m"
CHUNK_RE = re.compile(r'[\.\,\?\!\;\:]')
WORD_RE = re.compile(r"\w+(?:[\-\_\']+\w+)?")
TEXT_FIELDS = [ "body", "title" ]
META_FIELDS = [ "viewcount", "answercount" ]
MIN_WORD_LEN = 2
MAX_WORD_LEN = 30
MAX_SHINGLE_LEN = 3
class ShingleData:
def __init__(self, shingle):
self.shingle = shingle
self.length = len(shingle)
self.freq = 0
self.metadata = {}
def update(self, freq, metadata):
self.freq += freq
mdkey = hash(json.dumps(metadata, sort_keys=True))
self.metadata.setdefault(mdkey, metadata)
def read_index(index):
processed = 0
shingle_data = {}
# fetch 100 items from the index
url = "{0}{1}/_search".format(ES_URL, SOURCE_INDEX)
response = requests.get(url, params={
"q": ALL_QUERY,
"scroll": SCROLL_TIME,
"size": 100
})
response.raise_for_status()
respobj = response.json()
hits = respobj['hits']['hits']
process_documents(hits, shingle_data)
processed += len(hits)
# fetch more until we have processed them all
while True:
url = "{0}_search/scroll".format(ES_URL)
body = json.dumps({
"scroll_id": respobj["_scroll_id"],
"scroll": SCROLL_TIME
})
response = requests.post(url, data=body)
response.raise_for_status()
respobj = response.json()
hits = respobj['hits']['hits']
if len(hits) == 0:
break
process_documents(hits, shingle_data)
processed += len(hits)
print "processed", processed, "docs, shingle_data size:", len(shingle_data), "\r",
sys.stdout.flush()
print
return shingle_data
def process_documents(docs, shingle_data):
for doc in docs:
source = doc["_source"]
metadata = { field: source.get(field) for field in META_FIELDS
if source.get(field) }
for field in TEXT_FIELDS:
text = stripHTML(source.get(field, '').lower())
for shin, freq in get_shingles(text).iteritems():
shingle_data.setdefault(shin, ShingleData(shin)).update(freq, metadata)
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def stripHTML(text):
s = MLStripper()
s.feed(text)
return s.get_data()
class RejectShingle(Exception):
pass
def get_shingles(text):
shingles = {}
# extract shingles from within sentences and clauses
for chunk in CHUNK_RE.split(text):
words = WORD_RE.findall(chunk)
for size in xrange(1, MAX_SHINGLE_LEN + 1):
for i in xrange(len(words) + 1 - size):
try:
shin = words[i:i + size]
for word in shin:
if len(word) < MIN_WORD_LEN or len(word) > MAX_WORD_LEN or word in STOPWORDS:
raise RejectShingle
shin = ' '.join(shin)
# increase the shingle freq
shingles[shin] = shingles.get(shin, 0) + 1
except RejectShingle:
pass
return shingles
def create_suggestion_index(shingle_data, index):
batch = []
for _id, item in enumerate(shingle_data.itervalues()):
batch.append(json.dumps({
"index": { "_index": index, "_type": "suggestion", "_id": _id }
}))
batch.append(json.dumps({
"suggestion": item.shingle,
"freq": item.freq,
"length": item.length,
"meta": item.metadata.values()
}))
if len(batch) == 100:
index_batch(batch)
batch = []
print "indexed", _id, "of", len(shingle_data), "\r",
sys.stdout.flush()
if len(batch) > 0:
index_batch(batch)
print "\ndone"
def index_batch(batch):
body = "\n".join(batch) + "\n"
response = requests.post(ES_URL + "_bulk", data=body)
response.raise_for_status()
def main():
shingle_data = read_index(SOURCE_INDEX)
create_suggestion_index(shingle_data, SUGGESTION_INDEX)
if __name__ == "__main__":
main()
|
# print_hyer
# Created by JKChang
# 03/08/2017, 15:42
# Tag:
# Description:
from xml.dom import minidom
import xml.etree.ElementTree as ET
def perf_func(elem, func, level=0):
func(elem, level)
for child in elem.getchildren():
perf_func(child, func, level + 1)
def print_level(elem, level):
print('\t' * level + elem.tag)
#======================================================
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t") |
import scrapy
# 打开数据库连接
# from test import time_mk
from mingyan.util.minyanitem import getMinyanItem
city_name = '大连'
p_list = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7']
a_list = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7']
y_list = ['y4', 'y5']
lc_list = ['lc1', 'lc2', 'lc3', 'lc4', 'lc5']
proxy_ip = ''
class WeatherSpider(scrapy.Spider):
# https://sz.ke.com/chengjiao/nanshanqu/pg2/
# name = "beike_all_area_of_chengjiao_by_city_2"
allowed_domains = ["dl.ke.com"]
start_urls = ['https://dl.ke.com']
def start_requests(self):
# 武汉二手房:https://wh.ke.com/chengjiao/pg2/
url = self.start_urls[0] + "/chengjiao/"
yield scrapy.Request(url=url, callback=self.parse_a, meta={'proxy': proxy_ip})
def parse_a(self, response):
select_area_href_list_first = response.xpath(
'//*[@data-role="ershoufang"]/div[1]/a[@class=" CLICKDATA"]/@href').extract()
for j in range(len(select_area_href_list_first) - 1, -1, -1):
area_i = select_area_href_list_first[j]
# if str(area_i).__contains__('chaoyang'):
for p_index in range(0, len(p_list)):
for a_index in range(0, len(a_list)):
for y_index in range(0, len(y_list)):
for lc_index in range(0, len(lc_list)):
tiaojian = p_list[p_index] + a_list[a_index] + y_list[y_index] + lc_list[lc_index]
url = self.start_urls[0] + area_i + "pg1" + tiaojian + '/'
# print(url)
yield scrapy.Request(url=url, callback=self.parse_b,
meta={'tiaojian': tiaojian, 'proxy': proxy_ip}, dont_filter=True)
def parse_b(self, response):
total_num = response.xpath(
'//*[@data-component="listOverview"]/div[@class="resultDes clear"]/div[@class="total fl"]/span/text()').extract()
if len(total_num) > 0:
total_num = total_num[0].replace(' ', '').replace('\n', '')
select_area_list = response.xpath(
'//*[@data-role="ershoufang"]/div[1]/a[@class="selected CLICKDATA"]/@href').extract()
areaname = select_area_list[0]
# areaname = areaname.replace(' ', '').replace('\n', '')
if int(total_num) > 0:
num_avg = int(int(total_num) / 30)
total_page = num_avg + 2
if total_page > 101:
total_page = 101
for i in range(total_page - 1, -1, -1):
url = self.start_urls[0] + areaname + "pg" + str(i) + '/'
print("请求url:" + url)
# time.sleep(0.5)
yield scrapy.Request(url=url, callback=self.parse_first, meta={'proxy': proxy_ip}, dont_filter=True)
def parse_first(self, response):
select_area_list = response.xpath(
'//*[@data-role="ershoufang"]/div[1]/a[@class="selected CLICKDATA"]/text()').extract()
if isinstance(select_area_list, list) and len(select_area_list) == 1:
area = select_area_list[0]
# area = area.replace(' ', '').replace('\n', '')
common_str = '//*[@data-component="list"]/ul/li/div[@class="info"]'
ListTitle = response.xpath(
common_str + '/div[@class="title"]/a/text()').extract()
ListMaidian = response.xpath(
common_str + '/div[@class="title"]/a/@href').extract()
ListdealDate = response.xpath(
common_str + '/div[@class="address"]/div[@class="dealDate"]/text()').extract()
ListtotalPrice = response.xpath(
common_str + '/div[@class="address"]/div[@class="totalPrice"]/span/text()').extract()
ListUnitPrice = response.xpath(
common_str + '/div[@class="flood"]/div[@class="unitPrice"]/span/text()').extract()
ListHouseAge = response.xpath(
common_str + '/div[@class="flood"]/div[1]/text()').extract()
ListGuapai_price = response.xpath(
common_str + '/div[@class="dealCycleeInfo"]/span[@class="dealCycleTxt"][1]/span[1]/text()').extract()
Listdealcycle_date = response.xpath(
common_str + '/div[@class="dealCycleeInfo"]/span[@class="dealCycleTxt"][1]/span[2]/text()').extract()
size = len(ListTitle)
size_house_age = len(ListHouseAge)
flag = size_house_age == size * 2
for i in range(size):
item = getMinyanItem(i, ListMaidian, ListTitle, ListdealDate, ListtotalPrice, ListUnitPrice,
ListGuapai_price,
Listdealcycle_date, ListHouseAge, flag, area, city_name)
yield item
|
__author__ = 'ANKIT VERMA'
import requests
import operator
from bs4 import BeautifulSoup
keywords = {}
# ADD THE WORDS WHOSE COUNT IS NOT REQUIRED IN THE LIST BELOW
not_require_keywords = ['how', 'and', 'when', 'why', 'which', 'who', 'a', 'all', 'or', 'an', 'the', 'aboard', 'about',
'above',
'across', 'after', 'against', 'ahead of', 'along', 'amid', 'amidst', 'among', 'around', 'as',
'as far as', 'as of', 'aside from', 'at', 'athwart', 'atop ', 'barring', 'because of', 'before',
'behind', 'below', 'beneath', 'beside', 'besides', 'between', 'beyond', 'but', 'by',
'by means of ', 'circa', 'concerning ', 'despite', 'down', 'during ', 'except', 'except for',
'excluding ', 'far from', 'following', 'for', 'from ', 'in', 'in accordance with',
'in addition to', 'in case of', 'in front of', 'in lieu of', 'in place of', 'in spite of',
'including', 'inside', 'instead of', 'into ', 'like ', 'minus ', 'near', 'next to',
'notwithstanding ', 'of', 'off', 'on', 'on account of', 'on behalf of', 'on top of', 'onto',
'opposite', 'out', 'out of', 'outside', 'over ', 'past', 'plus', 'prior to ', 'regarding',
'regardless of ', 'save', 'since ', 'than', 'through', 'till', 'to', 'toward', 'towards ',
'under', 'underneath', 'unlike', 'until', 'up', 'upon ', 'versus', 'via ', 'with',
'with regard to', 'within', 'without']
def start(url):
word_list = []
source_code = requests.get(url).text
soup = BeautifulSoup(source_code, "html.parser")
for post_text in soup.findAll('a'):
content = post_text.string
if content:
words = content.lower().split()
for each_word in words:
word_list.append(each_word)
clean_word(word_list)
def clean_word(word_list):
clean_word_list = []
for word in word_list:
symbols = ",./;\'-!@#$%^&*()\"_+=|`[]>>:{}?><:1234567890"
for i in range(0, len(symbols)):
word = word.replace(symbols[i], "")
if len(word) > 0:
clean_word_list.append(word)
get_count(clean_word_list)
def get_count(clean_word_list):
for word in clean_word_list:
if word in keywords and word not in not_require_keywords:
keywords[word] += 1
elif word not in not_require_keywords:
keywords[word] = 1
url = input("ENTER URL TO GET WORD FREQUENCY IN HYPERLINKS:\n")
start(url)
for key, value in sorted(keywords.items(), key=operator.itemgetter(1), reverse=True):
print(key + ':', value)
|
import random
import ACTIONS
from Strategy import Strategy
class RandomStrategy(Strategy):
def __init__(self, robot, world):
self.robot = robot
self.world = world
def choose_action(self):
actions = []
x, y = self.robot.state
grid = self.world.sumo_grid
if grid[x + 1][y] != -9:
actions.append(ACTIONS.MOVE_EAST)
if grid[x - 1][y] != -9:
actions.append(ACTIONS.MOVE_WEST)
if grid[x][y - 1] != -9:
actions.append(ACTIONS.MOVE_NORTH)
if grid[x][y + 1] != -9:
actions.append(ACTIONS.MOVE_SOUTH)
choice = random.choice(actions)
return choice
def __str__(self):
return "Random"
|
first_name = 'Cesar'
last_name = 'Romero'
mensaje = first_name + '['+ last_name + '] es programador'
#String con formato
msg = f'{first_name} [{last_name}] es programador'
print(mensaje)
print(msg) |
import tkinter as tk
from tkinter import ttk
mainfrm = tk.Tk()
ttk. Frame (height=80, width=200). pack()
ttk.Style().configure("enter.TButton", font=("Times", "10", "bold"), anchor="e")
ttk.Style().configure("cancel.TButton", font=("Times", "10", "italic"), anchor="w")
btnEnter = ttk.Button(mainfrm, text="Enter", style="enter.TButton").place(x=10, y=30)
btnCancel = ttk.Button(mainfrm, text="Cencle", style="cancel.TButton").place(x=100, y=30)
mainfrm.mainloop() |
#BOJ1524 세준세비 20210113
def main():
tc = int(input())
for ___ in range(tc):
input()
n,m = map(int, input().rstrip().split(" "))
s = list(map(int, input().rstrip().split(" ")))
b = list(map(int, input().rstrip().split(" ")))
s.sort(); b.sort()
i = 0; j = 0
while True:
if i == n or j == m:
winner = "B" if i==n else "S"
break
if s[i] < b[j]:
i += 1
else:
j += 1
print(winner)
if __name__ == '__main__':
main() |
#!/usr/bin/env python
from lv import LV
from player import Player
import os
class LVService():
def __init__(self):
self.lv=LV()
self.lv.adminResetToNull()
if self.refreshSchedule():
# setup the player using custom callback functions
self.player=Player(TILT_SWITCH_CALLBACK=self.tiltSwitchCallback,
EXT_SWITCH_CALLBACK=self.extSwitchCallback)
self.player.setup()
def refreshSchedule(self):
"""
Gets the current schedule, then
"""
self.schedule=self.lv.getSchedule()
if self.schedule:
print "Got a schedule containing: %d item(s)" % len(self.schedule['schedule'])
self.urls=self.lv.getDlUrls(self.schedule)
#print "List of audio URLs to download \n %s \n" % u
if self.lv.dlAllFiles(self.urls):
self.lv.confirmScheduleRetrieval()
self.highestBid=self.lv.getHighestBid(self.schedule)
self.audiofile=os.path.basename(self.highestBid['filename'])
print self.highestBid
print self.audiofile
return True
else:
print "I've got nothing to do because I've received an empty schedule!"
return False
def tiltSwitchCallback(self, channel):
"""
Custom callback method passed to the Player.
Should be executed when tilt event is detected
"""
if self.player.input(channel):
print "tilt switch callback"
self.player.toggleRedLed()
self.player.playMp3(self.audiofile)
def extSwitchCallback(self, channel):
"""
Custom callback method passed to the Player.
Should be executed when an event from external switch is detected
"""
if self.player.input(channel):
print "ext switch callback"
self.player.toggleGreenLed()
self.refreshSchedule()
if __name__ == "__main__":
srvs=LVService()
|
import maya.cmds as cmds
sels = cmds.ls(sl=True)
bbox = cmds.exactWorldBoundingBox(sels)
xLoc = (bbox[0] + bbox[3]) / 2
yLoc = (bbox[1] + bbox[4]) / 2
zLoc = (bbox[2] + bbox[5]) / 2
locator = cmds.spaceLocator(p=[0, 0, 0])
cmds.xform(locator, ws=True, t=[xLoc, yLoc, zLoc])
|
from django.contrib import admin
from django.conf import settings
admin.site.site_title = '[xSACdb:{club_name}]'.format(club_name=settings.CLUB.get('name'))
admin.site.site_header = admin.site.site_title
|
from openCurrents.interfaces import common
from openCurrents.interfaces.ocuser import OcUser
from openCurrents.interfaces.orgs import OrgUserInfo
from openCurrents.interfaces.orgadmin import OrgAdmin
from openCurrents.models import OrgUser
from django.contrib.auth.models import User
from csv import writer
from datetime import datetime, timedelta
from pytz import utc
orgusers = OrgUser.objects.filter(org__status='npf')
data = {}
for orguser in orgusers:
user = orguser.user
orguserinfo = OrgUserInfo(user.id)
is_approved = orguserinfo.is_org_admin()
orgadmin = OrgAdmin(user.id)
hours_req = orgadmin.get_hours_requested()
hours_app = orgadmin.get_hours_approved()
hours_total_pending = orgadmin.get_total_hours_pending()
hours_total_approved_all = orgadmin.get_total_hours_issued()
hours_total_approved_last_week = sum([
common.diffInHours(
rec.usertimelog.event.datetime_start,
rec.usertimelog.event.datetime_end
)
for rec in hours_app
if rec.date_created > datetime.now(tz=utc) - timedelta(weeks=1)
])
data[user.id] = {
'org': orguser.org.name,
'admin_email': user.email,
'admin_first_name': user.first_name.encode('utf-8'),
'admin_last_name': user.last_name.encode('utf-8'),
'is_approved': is_approved,
'admin hours total (pending)': hours_total_pending,
'admin hours total (approved)': hours_total_approved_all,
'admin hours total (approved, last week)': hours_total_approved_last_week,
'date last hour approved':
hours_app[0].date_created.strftime('%m/%d/%Y') if hours_app else None,
'date_joined': user.date_joined.strftime('%m/%d/%Y'),
'date_last_login': user.last_login.strftime('%m/%d/%Y') if user.last_login else None
}
with open('./metrics/scripts/org-user/orguser-data.csv', 'w') as f:
wr = writer(f)
wr.writerow(['id'] + data[data.keys()[0]].keys())
for idx, metrics in data.iteritems():
print idx
wr.writerow([idx] + metrics.values())
|
'''
This module provides some simple, shared utility functions
Created on 8 Sep 2015
@author: Tobias Meggendorfer
'''
from pulp.pulp import lpSum, LpProblem, LpVariable
from pulp.constants import LpMaximize, LpStatusOptimal, LpStatusInfeasible,\
LpStatusUnbounded, LpStatusUndefined, LpStatusNotSolved
from vote.society import Lottery
from itertools import chain, combinations
def createLpSum(choiceClass, choiceNames, choiceVariables):
return lpSum(choiceVariables[choiceNames[choice]] for choice in choiceClass)
def checkPulpStatus(status,
errorInfeasible=True, errorUnbounded=True,
errorUndefined=True, errorNotSolved=True):
if status == LpStatusOptimal:
return status
if status == LpStatusInfeasible:
if errorInfeasible:
raise ValueError("Infeasible")
return status
if status == LpStatusUnbounded:
if errorUnbounded:
raise ValueError("Unbounded")
return status
if status == LpStatusUndefined:
if errorUndefined:
raise ValueError("Undefined")
return status
if status == LpStatusNotSolved:
if errorNotSolved:
raise ValueError("Not solver")
return status
raise ValueError("Unknown status " + repr(status))
def getAllSubsets(elements, startSize=1):
if startSize == len(elements):
return set([elements])
elif startSize > len(elements):
return set()
return chain.from_iterable(combinations(elements, i)
for i in range(startSize, len(elements)))
def getUniqueNames(objects, prefix="U_"):
name = 0
uniqueNames = dict()
for obj in objects:
uniqueNames[obj] = prefix + str(name)
name += 1
return uniqueNames
def findLottery(vote, classHeights, solverSettings):
'''
Returns a Lottery satisfying all constraints specified by the classHeights parameter
@type vote: vote.society.Vote
@type classHeights: dict(vote.society.ChoiceClass, float)
@type solverSettings: vote.solver.settings.SolverSettings
@rtype: vote.society.Lottery
@raise ValueError: If the constraints are not satisfiable
'''
classNames = getUniqueNames(classHeights.keys(), prefix="Class ")
choiceNames = getUniqueNames(vote.getChoices(), prefix="Choice ")
problem = LpProblem("Lambda", LpMaximize)
choiceVariables = LpVariable.dicts("p", choiceNames.values(), lowBound=0)
problem += lpSum(choiceVariables) <= 1, "Distribution"
for choiceClass, height in classHeights.items():
problem += createLpSum(choiceClass, choiceNames, choiceVariables) >= \
height, classNames[choiceClass] + " height"
problem.setObjective(lpSum(choiceVariables.values()))
checkPulpStatus(problem.solve(solverSettings.getSolver()))
# uncomment to print the linear program
# print repr(problem)
choiceValues = dict()
for choice, choiceName in choiceNames.items():
choiceValues[choice.getObject()] = choiceVariables[choiceName].value()
return Lottery(choiceValues, solverSettings)
|
# from sys import argv
# script, user_name = argv
# prompt = ">"
# user_name = input()
# print(f"Hi {user_name}, I'm the {script} script.")
# print("I would like to ask you a few questions.")
# print(f"Do you like me {user_name}")
# like = input(prompt)
# print(f"Where do you live {user_name}")
# live = input(prompt)
# print(f"What kind of computer do you have {user_name}")
# computer = input(prompt)
# print(f"""
# So alright you said {like} about me.
# You live in {live}, No sure where that is
# And You have a {computer} computer. Nice
# """)
from sys import argv
script, firstName, lastName = argv
prompt = ">>>"
print("Hi {} {}, I'm the {} script".format(firstName, lastName, script))
print("I wish to answer some questions....")
print("Do you like to travel the world? ")
like = input(prompt)
print("What is your favorite destination? ")
destination = input(prompt)
print("Do you care to be solely in the world? ")
alone = input(prompt)
print("Espagne or France or USA Or Russia which destination do you prefer? ")
preference = input(prompt)
print(f"""
Alright, so you said: travel the world {like}.
Your favorite destination is {destination}.
To be alone: {alone}.
And your favorite destination is {preference}
""")
|
from django.contrib import admin
from .models import Especialidade
# Register your models here.
@admin.register(Especialidade)
class EspecialidadeAdmin(admin.ModelAdmin):
list_display = ('nome',) |
import base64
from PIL import Image
import os
# Wraps images in HTML and inserts them one after the other
def figures_to_paragraph(fig_names, fig_dir):
figures_html=""
current_dir = os.getcwd()
os.chdir(fig_dir)
for fig in fig_names:
im=open(fig, 'rb')
encoded = base64.b64encode(im.read()).decode('utf-8')
figures_html+=(f'<p><img src="data:image/png;base64,{encoded}"></p>')
os.chdir(current_dir)
return figures_html
# HTML report structure
def weekly_report_wrapper(table, fig_names, fig_dir):
figures_as_html=figures_to_paragraph(fig_names, fig_dir)
wrapper =(
"<html>"+
"<head>"+
"<title>Weekly report</title>"+
"</head>"+
"<body>" +
figures_as_html +
f"<p>{table}</p>" +
"</body>" +
"</html>"
)
return wrapper
|
import sys
_module = sys.modules[__name__]
del sys
create_datasets = _module
metric = _module
confusionmatrix = _module
iou = _module
SmaAt_UNet = _module
layers = _module
regression_lightning = _module
unet_parts = _module
unet_parts_depthwise_separable = _module
unet_precip_regression_lightning = _module
test_precip_lightning = _module
train_SmaAtUNet = _module
train_precip_lightning = _module
utils = _module
data_loader_precip = _module
dataset_VOC = _module
dataset_precip = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import matplotlib.pyplot as plt
from torchvision import transforms
import time
import random
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelAttention(nn.Module):
def __init__(self, input_channels, reduction_ratio=16):
super(ChannelAttention, self).__init__()
self.input_channels = input_channels
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.MLP = nn.Sequential(Flatten(), nn.Linear(input_channels, input_channels // reduction_ratio), nn.ReLU(), nn.Linear(input_channels // reduction_ratio, input_channels))
def forward(self, x):
avg_values = self.avg_pool(x)
max_values = self.max_pool(x)
out = self.MLP(avg_values) + self.MLP(max_values)
scale = x * torch.sigmoid(out).unsqueeze(2).unsqueeze(3).expand_as(x)
return scale
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avg_out, max_out], dim=1)
out = self.conv(out)
out = self.bn(out)
scale = x * torch.sigmoid(out)
return scale
class CBAM(nn.Module):
def __init__(self, input_channels, reduction_ratio=16, kernel_size=7):
super(CBAM, self).__init__()
self.channel_att = ChannelAttention(input_channels, reduction_ratio=reduction_ratio)
self.spatial_att = SpatialAttention(kernel_size=kernel_size)
def forward(self, x):
out = self.channel_att(x)
out = self.spatial_att(out)
return out
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_channels, output_channels, kernel_size, padding=0, kernels_per_layer=1):
super(DepthwiseSeparableConv, self).__init__()
self.depthwise = nn.Conv2d(in_channels, in_channels * kernels_per_layer, kernel_size=kernel_size, padding=padding, groups=in_channels)
self.pointwise = nn.Conv2d(in_channels * kernels_per_layer, output_channels, kernel_size=1)
def forward(self, x):
x = self.depthwise(x)
x = self.pointwise(x)
return x
class DoubleConvDS(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None, kernels_per_layer=1):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(DepthwiseSeparableConv(in_channels, mid_channels, kernel_size=3, kernels_per_layer=kernels_per_layer, padding=1), nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True), DepthwiseSeparableConv(mid_channels, out_channels, kernel_size=3, kernels_per_layer=kernels_per_layer, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x)
class DownDS(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels, kernels_per_layer=1):
super().__init__()
self.maxpool_conv = nn.Sequential(nn.MaxPool2d(2), DoubleConvDS(in_channels, out_channels, kernels_per_layer=kernels_per_layer))
def forward(self, x):
return self.maxpool_conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UpDS(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True, kernels_per_layer=1):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConvDS(in_channels, out_channels, in_channels // 2, kernels_per_layer=kernels_per_layer)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConvDS(in_channels, out_channels, kernels_per_layer=kernels_per_layer)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class SmaAt_UNet(nn.Module):
def __init__(self, n_channels, n_classes, kernels_per_layer=2, bilinear=True, reduction_ratio=16):
super(SmaAt_UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
kernels_per_layer = kernels_per_layer
self.bilinear = bilinear
reduction_ratio = reduction_ratio
self.inc = DoubleConvDS(self.n_channels, 64, kernels_per_layer=kernels_per_layer)
self.cbam1 = CBAM(64, reduction_ratio=reduction_ratio)
self.down1 = DownDS(64, 128, kernels_per_layer=kernels_per_layer)
self.cbam2 = CBAM(128, reduction_ratio=reduction_ratio)
self.down2 = DownDS(128, 256, kernels_per_layer=kernels_per_layer)
self.cbam3 = CBAM(256, reduction_ratio=reduction_ratio)
self.down3 = DownDS(256, 512, kernels_per_layer=kernels_per_layer)
self.cbam4 = CBAM(512, reduction_ratio=reduction_ratio)
factor = 2 if self.bilinear else 1
self.down4 = DownDS(512, 1024 // factor, kernels_per_layer=kernels_per_layer)
self.cbam5 = CBAM(1024 // factor, reduction_ratio=reduction_ratio)
self.up1 = UpDS(1024, 512 // factor, self.bilinear, kernels_per_layer=kernels_per_layer)
self.up2 = UpDS(512, 256 // factor, self.bilinear, kernels_per_layer=kernels_per_layer)
self.up3 = UpDS(256, 128 // factor, self.bilinear, kernels_per_layer=kernels_per_layer)
self.up4 = UpDS(128, 64, self.bilinear, kernels_per_layer=kernels_per_layer)
self.outc = OutConv(64, self.n_classes)
def forward(self, x):
x1 = self.inc(x)
x1Att = self.cbam1(x1)
x2 = self.down1(x1)
x2Att = self.cbam2(x2)
x3 = self.down2(x2)
x3Att = self.cbam3(x3)
x4 = self.down3(x3)
x4Att = self.cbam4(x4)
x5 = self.down4(x4)
x5Att = self.cbam5(x5)
x = self.up1(x5Att, x4Att)
x = self.up2(x, x3Att)
x = self.up3(x, x2Att)
x = self.up4(x, x1Att)
logits = self.outc(x)
return logits
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // self.bs ** 2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous()
x = x.view(N, C // self.bs ** 2, H * self.bs, W * self.bs)
return x
class SpaceToDepth(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
x = x.view(N, C * self.bs ** 2, H // self.bs, W // self.bs)
return x
class DoubleDense(nn.Module):
def __init__(self, in_channels, hidden_neurons, output_channels):
super(DoubleDense, self).__init__()
self.dense1 = nn.Linear(in_channels, out_features=hidden_neurons)
self.dense2 = nn.Linear(in_features=hidden_neurons, out_features=hidden_neurons // 2)
self.dense3 = nn.Linear(in_features=hidden_neurons // 2, out_features=output_channels)
def forward(self, x):
out = F.relu(self.dense1(x.view(x.size(0), -1)))
out = F.relu(self.dense2(out))
out = self.dense3(out)
return out
class DoubleDSConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_ds_conv = nn.Sequential(DepthwiseSeparableConv(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), DepthwiseSeparableConv(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_ds_conv(x)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1), nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True), nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(nn.MaxPool2d(2), DoubleConv(in_channels, out_channels))
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(CBAM,
lambda: ([], {'input_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(ChannelAttention,
lambda: ([], {'input_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DepthwiseSeparableConv,
lambda: ([], {'in_channels': 4, 'output_channels': 4, 'kernel_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DoubleConv,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DoubleConvDS,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DoubleDSConv,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DoubleDense,
lambda: ([], {'in_channels': 4, 'hidden_neurons': 4, 'output_channels': 4}),
lambda: ([torch.rand([4, 4])], {}),
True),
(Down,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(DownDS,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Flatten,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(OutConv,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(SmaAt_UNet,
lambda: ([], {'n_channels': 4, 'n_classes': 4}),
lambda: ([torch.rand([4, 4, 64, 64])], {}),
True),
(SpaceToDepth,
lambda: ([], {'block_size': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(SpatialAttention,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(Up,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])], {}),
True),
(UpDS,
lambda: ([], {'in_channels': 4, 'out_channels': 4}),
lambda: ([torch.rand([4, 1, 4, 4]), torch.rand([4, 3, 4, 4])], {}),
True),
]
class Test_HansBambel_SmaAt_UNet(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
def test_011(self):
self._check(*TESTCASES[11])
def test_012(self):
self._check(*TESTCASES[12])
def test_013(self):
self._check(*TESTCASES[13])
def test_014(self):
self._check(*TESTCASES[14])
def test_015(self):
self._check(*TESTCASES[15])
|
#!/usr/bin/python3
def merge_bed(bedFile):
f = open(bedFile)
dic = {}
for line in f:
line = line.rstrip()
tmp = line.split('\t')
chr = tmp[0]
bg = int(tmp[1])
ed = int(tmp[2])
id = tmp[3]
if id not in dic: dic[id] = 0
dic[id] += ed-bg
for i in dic:
print('%s\t%i' % (i, dic[i]))
def main():
import sys
if len(sys.argv) != 2:
sys.exit('python3 %s <bed>' % (sys.argv[0]))
bedFile = sys.argv[1]
merge_bed(bedFile)
if __name__ == '__main__':
main()
|
from getpass import getpass
import os
import shutil
import sys
import click
from colorama import Back, Fore, Style
import electionday.config as config
import electionday.navigation as navigation
import electionday.voter as voter_model
import electionday.party as party_model
import electionday.database as db
# Use UI menu.
menu = config.MENU
@click.command()
@click.option('-o', '--option', default='',
help=f'Menu option selector ('
f"{', '.join(selector for selector in menu.selectors)})")
@click.option('-n', '--name', default='', help='Name of voter')
def main(option: str, name: str):
"""Python voting system prototype
Login to vote on a party and view current results.
To vote you must be a registered voter and enter your name and voter ID.
You can run the application with or without any options.
Any option values will be reset inside the program loop after first iteration.
"""
PASSWORD: str = config.PASSWORD
error_msg: str = ''
selected_option: str = ''
user_name: str = ''
try:
while True:
clear()
header('MAIN MENU')
menu.view()
if error_msg:
print(Fore.RED, pad(error_msg),Style.RESET_ALL, sep='',
end='\n\n')
error_msg = ''
if option:
selected_option = option
else:
selected_option = get_option()
if selected_option not in menu.selectors:
error_msg = (f'Invalid selector ({selected_option}), please try'
' again.')
option = ''
elif selected_option == '3':
# Break out of loop to exit program.
break
elif selected_option == '1':
# Prompt user for name and voter ID.
if not name:
user_name = prompt('Name: ')
else:
user_name = name
voter_id: str = getpass(pad('Voter ID: '))
# Validate user.
if not voter_model.is_valid(user_name, voter_id):
error_msg = 'Invalid credentials.'
name, option = '', ''
continue
valid_voter = voter_model.get_by_voter_id(voter_id)
# Check if voter has voted.
if valid_voter.has_voted:
error_msg = 'You have already voted.'
name, option = '', ''
continue
clear()
# Display parties without votes.
parties = party_model.select_all()
header('CAST VOTE')
for i, party in enumerate(parties):
COLOR = Fore.CYAN if i % 2 == 0 else Fore.GREEN
print(
COLOR, pad(f'{party.selector} {party.name}'), sep='')
print(Style.RESET_ALL)
while True:
print(pad('Select a party to cast your vote.'))
print(pad('Enter C to cancel.'))
selector: str = prompt('').lower()
confirm_cancel: bool = False
if selector == 'c':
confirm_cancel = prompt('Return to menu? Y/n ').lower() == 'y'
if confirm_cancel:
break
selected_party = party_model.get_by_selector(parties, selector)
if selector == 'c':
# User has regretted cancelling and should be
# prompted to select a party without seeing
# invalid selection message.
continue
elif selected_party is None:
print(Fore.RED, pad('Invalid selection.'), Style.RESET_ALL,
sep='')
continue
print(pad(f'You have selected: {selected_party.name.upper()}'))
confirm_selection: bool = prompt('Confirm vote? Y/n ').lower()
if confirm_selection != 'y':
continue
break
if confirm_cancel:
continue
cast_vote(voter=valid_voter, party=selected_party)
print(pad('Thank you for voting!'))
print(pad(f'Use password "{config.PASSWORD}" to access current'
' results.'), end='\n\n')
name, option = '', ''
go_back()
elif selected_option == '2':
# Prompt voter for password.
password = getpass(pad('Enter password to view results: '))
if password != PASSWORD:
error_msg = 'Invalid password.'
option = ''
continue
clear()
parties = party_model.select_results()
header('CURRENT RESULTS')
for i, party in enumerate(parties):
COLOR = Fore.CYAN if i % 2 == 0 else Fore.GREEN
print(
COLOR, pad(f'Votes: {party.votes} {party.name}'),
sep='')
print(Style.RESET_ALL)
winning_parties = party_model.select_winners()
if winning_parties[0].votes:
print(pad('Winning'
f" part{'y' if len(winning_parties) == 1 else 'ies'}:"
f" {', '.join(party.name for party in winning_parties)}"))
else:
print(pad('No votes'))
print()
go_back()
password = ''
option = ''
exit_program()
except Exception as e:
print(repr(e))
raise e
except KeyboardInterrupt:
exit_program()
def pad(string: str) -> str:
"""Pad string with blank spaces.
Args:
string (str): String to pad
Returns:
str: Padded string
"""
return navigation.add_padding(padding=2, direction='left')(string)
def clear() -> None:
"""Wrapper for os.system to clear previous output."""
command: str = 'clear'
if shutil.which(command) is None:
os.system('cls')
else:
os.system(command)
def prompt(string: str) -> str:
"""Wrapper for padded input prompt.
Args:
string (str): Input message
Returns:
string (str): User input value
"""
return input(pad(string))
def go_back() -> None:
"""Prompt user to return to main menu.
Temporarily halts program loop to keep any previously output data
visible until user chooses to return to menu.
Returns:
None
"""
prompt('Back to main menu >')
def get_option() -> str:
"""Prompt user to select a menu option.
Returns:
str: Selected option selector
"""
return prompt('Select menu option: ')
def header(string: str) -> None:
"""Wrapper function to print formatted header text."""
print('\n', pad(string), sep='', end='\n\n')
@db.connect_with_cursor
def cast_vote(cursor: db.sqlite3.Cursor,
voter: voter_model.Voter,
party: party_model.Party) -> None:
"""Cast a vote by incrementing party votes by 1 and setting voter
has_voted attribute/value to True/1.
Args:
cursor (db.sqlite3.Cursor): Database connection cursor
voter (voter_model.Voter): Voter to vote
party (party_model.Party): Party to vote for
Raises:
Exception: Generic exception
"""
try:
voter_model.vote(cursor, voter)
party_model.add_vote(cursor, party)
except Exception as e:
print(repr(e))
raise e
def exit_program():
"""Close database connection and exit program via sys.exit()."""
print('\n\n', pad('Goodbye'), sep='', end='\n\n')
db.CONNECTION.close()
sys.exit()
if __name__ == '__main__':
main()
|
"""empty message
Revision ID: ee248674f637
Revises: ebf728dc4d0d
Create Date: 2017-05-31 15:07:32.715000
"""
# revision identifiers, used by Alembic.
revision = 'ee248674f637'
down_revision = 'ebf728dc4d0d'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
from random import random
inp = str(input())
x = list(map(int,inp.split(" ")))
y = int(input())
for i in range(y):
x.insert(len(x)-1-x[len(x)- 1],x[len(x)-1])
x.reverse()
x.remove(x[0])
x.reverse()
print(x) |
from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_source,get_articles
from ..models import Source,Articles
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data i.e news source
'''
general = get_source('general')
science = get_source('science')
business = get_source('business')
technology = get_source('technology')
health = get_source('health')
entertainment = get_source('entertainment')
sports = get_source('sports')
return render_template('index.html',general = general, science = science, business = business,technology = technology, health = health, entertainment = entertainment, sports = sports)
@main.route('/sources/<id>')
def articles(id):
'''
view articles page
'''
articles = get_articles(id)
return render_template('articles.html',articles = articles)
|
from src import app
class Restaurant :
def __init__(self,rid, name, phone, zip, street,latitude,longitude):
self.rid=rid
self.name = name
self.phone = phone
self.zip = zip
self.street = street
self.latitude = latitude
self.longitude = longitude
def fetchRestaurant(id):
mycursor=app.config['DATABASE'].cursor()
query="SELECT * FROM restaurant WHERE rid="+str(id)+";"
mycursor.execute(query)
myresult=mycursor.fetchall()
restaurant=Restaurant(myresult[0][0],myresult[0][1],myresult[0][2],myresult[0][3],myresult[0][4],myresult[0][5],myresult[0][6])
return restaurant
|
from django.db import models
from django.utils import timezone
# Create your models here.
class Doctors(models.Model):
doctor_name=models.CharField(max_length=120)
speciality=models.CharField(max_length=250)
image = models.ImageField(upload_to="images")
specialist=models.CharField(max_length=120)
about=models.CharField(max_length=500)
date=models.DateField(default=timezone.now)
start_time=models.TimeField(max_length=250)
end_time=models.TimeField(max_length=250)
availability=models.CharField(max_length=120)
|
import logging
from collections import defaultdict
import numpy as np
from PySide import QtGui, QtCore
from PySide.QtCore import Qt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from pubsub import pub
from ...functions import utility, calculations
from ...settings import settings
from ...models import model
class AsymmetryWidget(QtGui.QWidget):
def __init__(self, parent):
super(AsymmetryWidget, self).__init__(parent)
self.label = QtGui.QLabel("Asymmetry")
self.parent = parent
self.active = False
self.asymmetry_front = AsymmetryView(self, "Asymmetry Front", compare=[[0],[2]])
self.asymmetry_hind = AsymmetryView(self, "Asymmetry Hind", compare=[[1],[3]])
self.asymmetry_pt = AsymmetryView(self, "Asymmetry PT", compare=[[0,2],[1,3]])
self.asymmetry_list = [self.asymmetry_front,
self.asymmetry_hind,
self.asymmetry_pt]
self.asymmetry_layout = QtGui.QHBoxLayout()
self.asymmetry_layout.addWidget(self.asymmetry_front)
self.asymmetry_layout.addWidget(self.asymmetry_hind)
self.asymmetry_layout.addWidget(self.asymmetry_pt)
self.main_layout = QtGui.QHBoxLayout()
self.main_layout.addLayout(self.asymmetry_layout)
self.setLayout(self.main_layout)
pub.subscribe(self.active_widget, "active_widget")
def active_widget(self, widget):
self.active = False
if self == widget:
self.active = True
progress = 0
pub.sendMessage("update_progress", progress=progress)
for view in self.asymmetry_list:
view.draw()
pub.sendMessage("update_progress", progress=100)
class AsymmetryView(QtGui.QWidget):
def __init__(self, parent, label, compare):
super(AsymmetryView, self).__init__(parent)
label_font = settings.settings.label_font()
self.label = QtGui.QLabel(label)
self.label.setFont(label_font)
self.parent = parent
self.model = model.model
self.compare = compare
self.frame = -1
self.length = 0
self.ratio = 1
self.average_toggle = False
self.labels = {}
self.text_boxes = {}
self.columns = ["peak_force","peak_pressure","peak_surface","vertical_impulse",
"stance_duration","stance_percentage","step_duration","step_length"]
self.asi_layout = QtGui.QGridLayout()
self.asi_layout.setSpacing(10)
self.asi_layout.addWidget(self.label, 0, 0, columnSpan=1)
for index, column in enumerate(self.columns):
label = QtGui.QLabel(" ".join([word.title() for word in column.split("_")]))
self.labels[column] = label
self.asi_layout.addWidget(label, index+1, 0)
text_box = QtGui.QLineEdit("0.0")
self.text_boxes[column] = text_box
self.asi_layout.addWidget(text_box, index+1, 1)
# This adds stretch to an empty column
self.asi_layout.setColumnStretch(2, 1)
self.main_layout = QtGui.QVBoxLayout(self)
self.main_layout.addLayout(self.asi_layout)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
pub.subscribe(self.clear_cached_values, "clear_cached_values")
pub.subscribe(self.filter_outliers, "filter_outliers")
def filter_outliers(self):
if self.parent.active:
self.clear_cached_values()
self.draw()
def draw(self):
if len(self.model.dataframe) == 0:
return
asi = defaultdict(list)
index = self.model.dataframe.index
if self.model.outlier_toggle:
index = self.model.dataframe[self.model.dataframe["filtered"]==False].index
# I probably should calculate this in the model as well
for measurement_id, measurement_group in self.model.dataframe.ix[index].groupby("measurement_id"):
contact_group = measurement_group.groupby("contact_label")
# Check if all the compare contacts are present
present = True
for l in self.compare[0]:
if l not in contact_group.groups:
present = False
for r in self.compare[1]:
if r not in contact_group.groups:
present = False
if not present:
continue
for column in self.columns:
left = 0.
right = 0.
for l in self.compare[0]:
if l in contact_group.groups:
left += np.mean(contact_group.get_group(l)[column].dropna())
for r in self.compare[1]:
if r in contact_group.groups:
right += np.mean(contact_group.get_group(r)[column].dropna())
# Somehow one or the other can have an opposite sign, so make them absolute
if column == "step_length":
left = abs(left)
right = abs(right)
# Only calculate the ASI if we've progressed from the default
if left > 0 and right > 0:
asi[column].append(calculations.asymmetry_index(left, right))
for column in self.columns:
#print column, asi[column]
self.text_boxes[column].setText("{:>6} +/- {:>5}".format("{:.2f}".format(np.mean(asi[column])),
"{:.2f}".format(np.std(asi[column]))))
def clear_cached_values(self):
# Put the screen to black
for column, text_box in self.text_boxes.items():
text_box.setText("")
def resizeEvent(self, event=None):
pass
|
from PyQt5.Qt import *
from history import Ui_Form
from s1 import history,gameend
from PyQt5 import QtWidgets
class HistoryPane(QWidget, Ui_Form):
show_search_exit_signal = pyqtSignal()
show_nexthistory_exit_signal = pyqtSignal()
show_uppagehistory_exit_signal = pyqtSignal()
history_exit_signal=pyqtSignal()
def __init__(self, parent=None, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.setAttribute(Qt.WA_StyledBackground, True)
self.setupUi(self)
self.setWindowTitle('欢乐十三水')
self.setWindowIcon(QIcon('./1.png'))
def history(self,token,user_id):
self.pushButton.setEnabled(True)
self.pushButton_2.setEnabled(True)
self.label_7.setText('战局id:')
self.label_16.setText('战局id:')
self.label_17.setText('战局id:')
self.label_21.setText('战局id:')
self.page = 1
self.res = history(token,user_id)
self.res = self.res['data']
self.long = len(self.res) / 4
self.id_label_1.setText(str(self.res[(self.page - 1)*4]['id']))
self.score_label_1.setText(str(self.res[(self.page - 1)*4]['score']))
self.card_label_1.setText(str(self.res[(self.page - 1)*4]['card']))
self.id_label_2.setText(str(self.res[(self.page - 1)*4+1]['id']))
self.score_label_2.setText(str(self.res[(self.page - 1)*4+1]['score']))
self.card_label_2.setText(str(self.res[(self.page - 1)*4+1]['card']))
self.id_label_3.setText(str(self.res[(self.page - 1)*4+2]['id']))
self.score_label_3.setText(str(self.res[(self.page - 1)*4+2]['score']))
self.card_label_3.setText(str(self.res[(self.page - 1)*4+2]['card']))
self.id_label_4.setText(str(self.res[(self.page - 1)*4+3]['id']))
self.score_label_4.setText(str(self.res[(self.page - 1)*4+3]['score']))
self.card_label_4.setText(str(self.res[(self.page - 1)*4+3]['card']))
def previewhistory(self,token,user_id):
if self.page >= 2:
self.page = self.page - 1
self.long = len(self.res) / 4
self.id_label_1.setText(str(self.res[(self.page - 1) * 4]['id']))
self.score_label_1.setText(str(self.res[(self.page - 1) * 4]['score']))
self.card_label_1.setText(str(self.res[(self.page - 1) * 4]['card']))
self.id_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['id']))
self.score_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['score']))
self.card_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['card']))
self.id_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['id']))
self.score_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['score']))
self.card_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['card']))
self.id_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['id']))
self.score_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['score']))
self.card_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['card']))
else:
msg_box = QtWidgets.QMessageBox
msg_box.question(self, '温馨提醒', '当前已是第一页', msg_box.Yes | msg_box.Yes, msg_box.No)
def nexthistory(self,token,user_id):
if self.page<self.long-1:
self.page = self.page +1
self.long = len(self.res) / 4
self.id_label_1.setText(str(self.res[(self.page - 1) * 4]['id']))
self.score_label_1.setText(str(self.res[(self.page - 1) * 4]['score']))
self.card_label_1.setText(str(self.res[(self.page - 1) * 4]['card']))
self.id_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['id']))
self.score_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['score']))
self.card_label_2.setText(str(self.res[(self.page - 1) * 4 + 1]['card']))
self.id_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['id']))
self.score_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['score']))
self.card_label_3.setText(str(self.res[(self.page - 1) * 4 + 2]['card']))
self.id_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['id']))
self.score_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['score']))
self.card_label_4.setText(str(self.res[(self.page - 1) * 4 + 3]['card']))
else:
msg_box = QtWidgets.QMessageBox
msg_box.question(self, '温馨提醒', '当前已是最后一页', msg_box.Yes | msg_box.Yes, msg_box.No)
def search_id(self,token):
id=self.search_le.text()
print(id)
self.label_7.setText('Name:')
self.label_16.setText('Name:')
self.label_17.setText('Name:')
self.label_21.setText('Name:')
self.res=gameend(token,id)
print(self.res['status'])
if self.res['status'] == 0:
self.res = self.res['data']
self.res = self.res['detail']
self.id_label_1.setText(str(self.res[0]['name']))
self.score_label_1.setText(str(self.res[0]['score']))
self.card_label_1.setText(str(self.res[0]['card']))
self.id_label_2.setText(str(self.res[1]['name']))
self.score_label_2.setText(str(self.res[1]['score']))
self.card_label_2.setText(str(self.res[1]['card']))
self.id_label_3.setText(str(self.res[2]['name']))
self.score_label_3.setText(str(self.res[2]['score']))
self.card_label_3.setText(str(self.res[2]['card']))
self.id_label_4.setText(str(self.res[3]['name']))
self.score_label_4.setText(str(self.res[3]['score']))
self.card_label_4.setText(str(self.res[3]['card']))
self.pushButton.setEnabled(False)
self.pushButton_2.setEnabled(False)
def search(self):
search_id=self.search_le.text()
self.show_search_exit_signal.emit()
def uppage(self):
self.show_uppagehistory_exit_signal.emit()
def nextpage(self):
self.show_nexthistory_exit_signal.emit()
def exit(self):
self.history_exit_signal.emit()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = HistoryPane() # 创建控件
window.show()
sys.exit(app.exec_()) |
#!/usr/bin/env python3
import numpy as np
import struct
if __name__ == '__main__':
'''
(x0,x1,x2,x3) = (0,0,0,1)
for i in range(20):
print(i, x0,x1,x2,x3)
y0 = x0*16 + x1*15 + x2*14 + x3*13
y1 = x0*12 + x1*11 + x2*10 + x3* 9
y2 = x0* 8 + x1* 7 + x2* 6 + x3* 5
y3 = x0* 4 + x1* 3 + x2* 2 + x3* 1
x0 = y0 % 0x0096433D
x1 = y1 % 0x0096433D
x2 = y2 % 0x0096433D
x3 = y3 % 0x0096433D
'''
N = 1234567890123456789
m = np.matrix([[16,15,14,13],[12,11,10,9],[8,7,6,5],[4,3,2,1]])
N_bits = []
while N > 0:
N_bits.append(N&1)
N = N>>1
N_bits.reverse()
p = m
for i in N_bits[1:]:
p = (p*p) % 0x0096433D
if i == 1:
p = (p*m) % 0x0096433D
xmm0 = (p.item(0,3)<<0) | (p.item(0,2)<<32) | (p.item(0,1)<<64) | (p.item(0,0)<<96)
xmm1 = (p.item(1,3)<<0) | (p.item(1,2)<<32) | (p.item(1,1)<<64) | (p.item(1,0)<<96)
xmm2 = (p.item(2,3)<<0) | (p.item(2,2)<<32) | (p.item(2,1)<<64) | (p.item(2,0)<<96)
xmm3 = (p.item(3,3)<<0) | (p.item(3,2)<<32) | (p.item(3,1)<<64) | (p.item(3,0)<<96)
f = open('326c15f8884fcc13d18a60e2fb933b0e35060efa8a44214e06d589e4e235fe34','rb').read()[0x00001090:0x000010b0]
res = int.to_bytes(xmm3,16,'little') + int.to_bytes(xmm2,16,'little') + int.to_bytes(xmm1,16,'little') + int.to_bytes(xmm0,16,'little')
ans = b''
for i in range(0,32,2):
ans = ans + bytes([res[2*i+0]^f[i],res[2*i+1]^f[i+1]])
print(ans)
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.url_choice, name = 'choice'),
path('show', views.show, name = 'show'),
path('add', views.add, name = 'add'),
path('delete/<int:id>', views.delete, name = 'delete')
] |
##Yea Won Yoon
##B3
##903076043
##yyoon75/yw1013@gatech.edu
##I worked on the homework assignment alone, using only this semester's course materials.
def tallEnough(height):
cm = int(height)/0.38370
if cm < 120:
return False
elif cm > 190:
return False
else:
return True
def whereIsWaldo(Int1,Int2):
Int1 = input("Guess Waldo's x-coordinate")
Int2 = input("Guess Waldo's y-coordinate")
if int(Int1) == 5 and int(Int2) == 4:
return 'You found Waldo.'
else:
return 'Couldn\'t find Waldo. Better luck next time'
import string
def allLetters(userString):
newString = ""
for letter in userString:
if letter in string.ascii_letters:
newString = newString + letter
return newString
def replaceLetter(aString,aLetter):
alphabet = input("Input a letter")
num = 0
while(num == 0):
for letter in aString:
if letter == alphabet:
num = 1
if num == 0:
alphabet = input("Letter not in string. Input a letter")
newString = ""
for letter in aString:
if letter != alphabet:
newString = newString + letter
else:
newString = newString + aLetter
print(newString)
def CountUp(start,end,increment):
num = start
while(num <= end):
print(num)
num = num+increment
print("Done!")
def numMountainRange(X):
num = 1
while(num <= X):
a = 1
answer = ""
while(a <= num):
answer = answer + str(num)
a = a+1
a = 1
while(a <= ((X-num)*2)):
answer = answer + " "
a = a+1
a = 1
while(a <= num):
answer = answer + str(num)
a = a+1
print(answer)
num = num + 1
def printTimestable():
print("Times: ",end = "")
a = 1
while(a < 10):
print(a,"\t",end = "")
a = a+1
for b in range(1,10):
print("\n",b,"\t",end = "")
for c in range(1,10):
d = c*b
print(d,"\t",end = "")
return None
def printTimes(N):
print("Times: ",end = "")
a = 1
while(a < N+1):
print(a,"\t",end = "")
a = a+1
for b in range(1,N+1):
print("\n",b,"\t",end = "")
for c in range(1,N+1):
d = c*b
print(d,"\t",end = "")
return None
|
numbers = input('Podaj strat, stop i step (w formacie START,STOP,STEP): ')
while int(numbers.split(",")[0]) >= int(numbers.split(",")[1]):
print('start ma byc mniejszy niz stop')
numbers = input('Podaj strat, stop i step (w formacie START,STOP,STEP): ')
start = int(numbers.split(",")[0])
stop = int(numbers.split(",")[1])
step = int(numbers.split(",")[2])
count_even=0
count_noteven=0
for i in range(start, stop+1, step):
if i % 2:
count_noteven += 1
else:
count_even += 1
print('parzystych: ', count_even, ' nieparzystych: ', count_noteven) |
from random import randrange as randomNum
from random import choice as randomChar
from datetime import datetime as when
from time import sleep as snooze
import string, re
def EventGen():
categories = ['ADVISORY', 'ALARM', 'CLEARED']
category = categories[randomNum(3)]
types = ['ALFA', 'BRAVO']
type = types[randomNum(2)]
textLetters = string.ascii_uppercase
t1 = randomChar(textLetters)
t2 = str(randomNum(9))
t3 = str(randomNum(9))
t4 = randomChar(textLetters)
text = t1 + t2 + t3 + t4
time = re.sub( '\s', 'T', str(when.now()) )
time = time[0:23]
event = time+' '+category+' '+type+' '+text
# NOTE: This app was just a quick effort to play with a use case, so I didn't go to hard on this script.
# If necessary, change the following line to the correct directory where this script is installed!
with open('/opt/splunk/etc/apps/scada_use_case/log/scada.log', 'a') as log:
log.write(event + '\n')
EventGen();snooze(9)
EventGen();snooze(9)
EventGen();snooze(9)
EventGen();snooze(9)
EventGen();snooze(9)
EventGen()
|
# Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
from bark.viewer import Viewer
from bark.geometry import *
from bark.models.dynamic import *
class BaseViewer(Viewer):
def __init__(self, params=None):
Viewer.__init__(self)
# color parameters
# agents
self.color_agents = params["Visualization"]["Agents"]["ColorVehicle", "Color of agents", (0,102/255,0)]
self.alpha_agents = params["Visualization"]["Agents"]["AlphaVehicle", "Alpha of agents", 0.8]
self.route_color = params["Visualization"]["Agents"]["ColorRoute", "Color of agents routes", (0.2,0.2,0.2)]
self.draw_route = params["Visualization"]["Agents"]["DrawRoute", "Draw Route of each agent", True]
# map
self.color_lane_boundaries = params["Visualization"]["Map"]["Lanes"]["Boundaries"]["Color", "Color of agents except ego vehicle", (0.7,0.7,0.7)]
self.alpha_lane_boundaries = params["Visualization"]["Map"]["Lanes"]["Boundaries"]["Alpha", "Color of agents except ego vehicle", 1.0]
self.plane_color = params["Visualization"]["Map"]["Plane"]["Color", "Color of the background plane", (1, 1, 1, 1)]
self.plane_alpha = params["Visualization"]["Map"]["Plane"]["Alpha", "Alpha of the background plane", 1.0]
self.parameters = params
def drawPoint2d(self, point2d, color, alpha):
pass
def drawLine2d(self, line2d, color, alpha):
pass
def drawPolygon2d(self, polygon, color, alpha):
pass
def drawTrajectory(self, trajectory, color):
pass
def drawObstacle(self, obstacle):
pass
def getColor(self, color):
pass
def show(self,block=False):
pass
def clear(self):
pass
def drawAgents(self, world):
for _, agent in world.agents.items():
self.drawAgent(agent)
def drawWorld(self, world):
self.drawMap(world.map.get_open_drive_map())
# draw agents
for _, agent in world.agents.items():
self.drawAgent(agent)
def drawMap(self, map):
# draw the boundary of each lane
for _, road in map.get_roads().items():
for lane_section in road.lane_sections:
for _, lane in lane_section.get_lanes().items():
self.drawLine2d(lane.line, self.color_lane_boundaries, self.alpha_lane_boundaries)
def drawAgent(self, agent):
shape = agent.shape
if isinstance(shape, Polygon2d):
pose = np.zeros(3)
# pybind creates column based vectors, initialization maybe row-based -> we consider both
state = agent.state
pose[0] = state[int(StateDefinition.X_POSITION)]
pose[1] = state[int(StateDefinition.Y_POSITION)]
pose[2] = state[int(StateDefinition.THETA_POSITION)]
transformed_polygon = shape.transform(pose)
self.drawPolygon2d(transformed_polygon, self.color_agents, 1.0)
if self.draw_route:
self.drawRoute(agent)
def drawDrivingCorridor(self, corridor, color):
self.drawLine2d(corridor.center, color, 1)
self.drawLine2d(corridor.inner, color, 1)
self.drawLine2d(corridor.outer, color, 1)
def drawRoute(self, agent):
# TODO(@hart): visualize the global as well as the local driving corridor
self.drawDrivingCorridor(agent.local_map.get_driving_corridor(), self.route_color)
self.drawDrivingCorridor(agent.local_map.get_horizon_driving_corridor(), (0.8, 0.72, 0.2))
|
import numpy as np
import time
def find_root(x):
if np.array_equal(zpar[x[0], x[1]], x):
return x
else:
zpar[x[0], x[1]] = find_root(zpar[x[0], x[1]])
return zpar[x[0], x[1]]
def neighbors(p):
n = []
if p[0] > 0:
n.append([p[0] - 1, p[1]])
if p[0] < width - 1:
n.append([p[0] + 1, p[1]])
if p[1] > 0:
n.append([p[0], p[1] - 1])
if p[1] < height - 1:
n.append([p[0], p[1] + 1])
return n
def reverse_sort(f):
c = list([])
for i in range(f.max(), f.min() - 1, -1):
a = np.array(np.where(f == i)).T
b = [a[j] for j in range(len(a))]
c.extend(b)
return np.array(c)
def compute_tree(f):
R = reverse_sort(f)
for p in R:
parent[p[0], p[1]] = p
zpar[p[0], p[1]] = p
for n in neighbors(p):
if not np.array_equal(zpar[n[0], n[1]], np.array([undef, undef])):
r = np.array(find_root(n))
if not np.array_equal(r, p):
parent[r[0], r[1]] = p
zpar[r[0], r[1]] = p
canonize_tree(parent, f, R)
return R, parent
def canonize_tree(parent, f, R):
for p in R[::-1]:
q = parent[p[0], p[1]]
t = parent[q[0], q[1]]
if f[t[0], t[1]] == f[q[0], q[1]]:
parent[p[0], p[1]] = parent[q[0], q[1]]
import cv2
# f = cv2.imread('lena.bmp', 0)
f = np.array([[3, 3, 1, 4, 2], [4, 1, 2, 3, 1]])
# f = np.array([[15,13,16],[12,12,10],[16,12,14]])
undef = -1
width, height = f.shape
parent = np.ones((width, height, 2), dtype=int) * undef
zpar = np.ones((width, height, 2), dtype=int) * undef
# print compute_tree(f)
t = time.time()
R, parent = compute_tree(f)
t = time.time() - t
print t
print R
print parent
|
from Wizard import Step, Wizard
from tkinter import *
from datetime import datetime
def set_up_participant_info(self, my_frame):
current_row = 0
start_time_label = Label(my_frame, text="Start Time")
start_time_label.grid(row=current_row, sticky=W)
start_time = str(datetime.strptime(str(datetime.now()), '%Y-%m-%d %H:%M:%S.%f'))
start_time_timestamp = Label(my_frame, text=start_time)
start_time_timestamp.grid(row=current_row, column=1, sticky=E)
current_row += 1
participant_id_label = Label(my_frame, text="Participant ID")
session_id_label = Label(my_frame, text="Session ID")
participant_id_stringvar = StringVar()
participant_id_stringvar.trace("w", lambda name, index, mode, sv=participant_id_stringvar: self.updateEntry("participant_id", participant_id_stringvar))
participant_id_input = Entry(my_frame, textvariable=participant_id_stringvar)
session_id_stringvar = StringVar()
session_id_stringvar.trace("w", lambda name, index, mode, sv=session_id_stringvar: self.updateEntry("session_id", session_id_stringvar))
session_id_input = Entry(my_frame, textvariable=session_id_stringvar)
participant_id_label.grid(row=current_row, sticky=W)
participant_id_input.grid(row=current_row, column=1, sticky=E)
current_row += 1
session_id_label.grid(row=current_row, sticky=W)
session_id_input.grid(row=current_row, column=1, sticky=E)
current_row += 1
col_count, row_count = my_frame.grid_size()
for col in range(col_count):
my_frame.grid_columnconfigure(col, minsize=20)
for row in range(row_count):
my_frame.grid_rowconfigure(row, minsize=30)
return my_frame
class ParticipantInfo(Step):
def __init__(self, parent, data, stepname, step_label):
super().__init__(parent, data, stepname)
self.step_label = step_label
lbl1 = Label(self, text=self.step_label, font="bold")
lbl1.pack(side="top", fill="x", padx=5, pady=5)
my_frame = Frame(self, width=825)
my_frame = set_up_participant_info(self, my_frame)
my_frame.pack(padx=15, pady=15)
self.data[self.stepname]["step_label"] = self.step_label
def updateChecklist(self, field_label, state):
self.data[self.stepname][field_label] = state
def updateEntry(self, field_label, field_input_text):
self.data[self.stepname][field_label] = field_input_text.get() |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
except IOError:
VERSION = README = ''
install_requires = ['pygithub']
setup(
name='github_star_purge',
version='0.1.0',
description="A utility to help you remove all of the Github stars from your profile",
long_description=README,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
],
keywords='github',
author='Darin Gordon',
author_email='dkcdkg@gmail.com',
url='https://www.github.com/dowwie/github_star_purge',
license='Apache License 2.0',
install_requires=install_requires,
entry_points="""\
[console_scripts]
github_star_purge = github_star_purge.star_purge:main
"""
)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 28 14:58:06 2018
@author: Mikw
"""
import sys
sys.version
#Import Libraries
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import matplotlib
import matplotlib.pyplot as plt
import random
from pandas import DataFrame
from pandas import concat
import tensorflow as tf
import shutil
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn.python.learn import learn_runner
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
#Main File for doing shit
#import panda
import talib
import algo.get
import algo.getpast
import common.config
import common.args
import datetime
tf.__version__
buy=[]
sell=[]
trade=[]
for ii in range(201,4990):
if np.sum(training_y_pred[0,ii]) > 4:
if (len(buy)==0 and len(sell)==0) :
buy.append=close[ii]
if np.sum(training_y_pred[0,ii]) < 4: #sell signal
if (len(buy)==0 and len(sell)==0) :
sell.append=close[ii] #no trades open short currency
if (len(buy)>0):
trade.append=(close[ii]-sell[0]) #if long trade open, then close this |
import expression as xp
import settings as st
precedents = {
"+":0,
"-":0,
"*":1,
"/":1,
'\u00B7':1,
"^":2
}
def pre(op):
if op in precedents:
return precedents[op]
return 3 #Parens and numbers
def wrap(stri,foReallyThough):
if foReallyThough:
return "(" + stri + ")"
else:
return stri
def wrapBrack(stri,foReallyThough):
if foReallyThough:
return "{" + stri + "}"
else:
return stri
def clearCursor(exp):
exp.cursor = False
if type(exp) == xp.Expression:
for child in exp.expList:
clearCursor(child)
def expToStr(exp):
if(type(exp)==xp.NoOpExpression):
if(exp.strRep == " "):
if exp.cursor:
return "|"
return ""
sr = exp.strRep
if exp.cursor:
return sr[:exp.cursor_idx] + "|" + sr[exp.cursor_idx:]
else:
return sr
if(exp.op.strRep == "()"):
return wrap(expToStr(exp.expList[0]), True)
if(exp.op.strRep == "{}"):
return wrapBrack(expToStr(exp.expList[0]), True)
if(exp.op.strRep == "("):
return "(" + expToStr(exp.expList[0]) + "\u2986"
if(exp.op.strRep == ")"):
return "\u2985" + expToStr(exp.expList[0]) + ")"
elif exp.op.strRep in precedents:
firstLower = pre(exp.expList[0].op)<pre(exp.op)#is first op lower precedence than the exp op?
secondLower = pre(exp.expList[1].op)<pre(exp.op)
if exp.op.strRep == "*":
return wrap(expToStr(exp.expList[0]),firstLower) + wrap(expToStr(exp.expList[1]),secondLower)
if exp.op.strRep == '\u00B7':
return wrap(expToStr(exp.expList[0]),firstLower) + '*' + wrap(expToStr(exp.expList[1]),secondLower)
if exp.op.strRep == "/":
if exp.cursor:
if exp.cursor_idx == 0:
return "|" + exp.expList[0] + "/" + exp.expList[1]
if exp.cursor_idx == 1:
return exp.expList[0] + "/" + exp.expList[1]
return wrap(expToStr(exp.expList[0]),firstLower) + exp.op.strRep + wrap(expToStr(exp.expList[1]),secondLower)
|
#!/usr/bin/python3
#------------------------------------------
# loop #
#-----------------input()------------------
#messages = input("input name:")
#print (messages)
#------------------------------------------
#num = input("input a zheng num:")
#num = int(num) #用int()将字符串转为整型
#
#if num%2 == 0:
# print (True)
#else :
# print (False)
#------------------------------------------
# while #
#------------------------------------------
#for循环用于针对集合中的每个元素都一个代码块,而while循环不断地运行,直到指定的条件不满足为止。
#num = input("input a number:")
#num = int(num)
#while num < 5:
# num = int(input('continue:'))
#------------------------------------------
#num = 0
#while num < 5:
# print(str(num))
# if num == 2:
# break #用break来终止循环
# num += 1 #continue类似
#------------------------------------------
#names = ['lz','atom','lz','tom','lz']
#while 'lz' in names:
# names.remove('lz') #用while和remove来剔除多个重复项
#print (names)
#------------------------------------------
#users = { #字典中包含字典
# 'atom':{
# 'first':'A',
# 'last':'atom',
# 'love':'c'
# },
#
# 'lz':{
# 'first':'L',
# 'last':'z',
# 'love':'python'
# }
# }
#
#for user_name,user_info in users.items():
# print ("\nuser:"+user_name)
# full_name = user_info['first'] + user_info['last']
# love = user_info['love']
#
# print ('full name:'+full_name)
# print ('love:'+love)
#------------------------------------
# responses = {}
# polling_active = True
# while polling_active:
# name = input("\nWhat is your name?:")
# response = input("Which mountain would you like to climb someday?")
# responses[name] = response
# repeat = input("Would you like to let respond ?(yes/no)")
# if repeat == 'no':
# polling_active = False
# print("\n --Poll Results ---")
# for name,response in responses.items():
# print(name + "would like to climb " + response + ".")
#-----------------------------------
|
"""
# 예시코드
# 1. 이미지 좌표 추출
from select_ROI import ROI2csv
ROI2csv = ROI2csv('data/ElectricityMeter_ROI_screen') # roi 좌표를 저장할 디렉토리 입력하기
ROI2csv.roi2csv('./data/ElectricityMeter', 0, 20) # 읽어올 이미지의 디렉토리, 인덱스 처음, 끝 입력
# 적절한 roi 를 드래그하고 esc 를 눌러 좌표 추출을 반복함
# 2. 좌표를 이미지에 표시
from select_ROI import ROI2Img
r = ROI2Img(roi_path='./data/roi') # csv 디렉토리 입력
r.roi2img('./data/ElectricityMeter', 0, 20) # 읽어올 이미지의 디렉토리, 인덱스 처음, 끝 입력
"""
# 패키지 임포트
import os
import cv2
import csv
from prep_img import GetImg
# 예외처리 클래스
class MyError(Exception):
def __init__(self, msg='init_error_msg'):
self.msg = msg
def __str__(self):
return self.msg
# 경로가 없으면 생성해주는 클래스
class CreatePath:
def create_path(self, file_path):
try:
if not os.path.exists(file_path):
os.makedirs(file_path)
except OSError:
print(f"{file_path}는 없는 경로입니다. 해당 이미지의 text_roi 폴더를 생성합니다.")
# 디렉토리 내의 파일이름을 리스트로 가져오는 클래스
class Dir2File:
# 디렉토리 경로 설정
def __init__(self, file_path):
self.file_path = file_path # 이미지 들어있는 폴더명
self.file_names = os.listdir(self.file_path) # 이름 받아오기
# 파일이름 리스트 만들기
def filename(self):
# src_names = []
# temp = []
# for name in self.file_names:
# # 이름에서 확장자 제외
# src_name = name.split('.')[:1]
# temp.append(src_name)
# # 이중리스트 -> 단일리스트
# src_names = sum(temp, [])
return sum([name.split('.')[:1] for name in self.file_names], [])
# 이미지에서 마우스로 ROI 를 추출하고 esc 키를 누르면 좌표가 csv 파일로 저장하는 클래스
class ROI2csv:
# ROI 좌표 저장할 디렉토리 설정
def __init__(self, roi_path):
self.roi_path = roi_path
# 디렉토리에 있는 n번 이미지부터 m번 이미지까지 ROI 추출 수행
def roi2csv(self, file_path, n, m):
try:
if not os.path.exists(file_path):
raise MyError(f"{file_path}는 없는 경로입니다. 이미지 경로를 확인하십시오.")
if not os.path.exists(self.roi_path):
raise MyError(f"{self.roi_path}는 없는 경로입니다. 좌표를 저장할 경로를 확인하십시오.")
except MyError as e:
print(e)
# 디렉토리가 존재하는 경우 실행
else:
dir2file = Dir2File(file_path=file_path)
file_path = dir2file.file_path
src_names = dir2file.filename()
# 이미지 데이터 불러오기
for i, src_name in enumerate(src_names[n:m]):
src_file = file_path + '/' + src_name + '.jpg'
print(f"- {(i + 1) / (m - n) * 100:.1f}%.....{i + 1}번째_수행파일:{src_file}") # 확인
src = cv2.imread(src_file, cv2.IMREAD_GRAYSCALE)
# 이미지 resize 하기
src_height, src_width = src.shape[:2]
# 이미지 가로길이가 700이 되도록 설정함
ratio = 700 / src_width
src_height, src_width = int(src_height * ratio), int(src_width * ratio)
# 파라미터 입력 시에는 가로, 세로 순서로 입력
src = cv2.resize(src, (src_width, src_height))
while True:
# 이미지에서 원하는 template 영역을 드래그하면 좌표를 받아오기
# roi --> (x, y, width, height)
roi = cv2.selectROI("src", src)
# 좌표로 변환
x1 = roi[0]
y1 = roi[1]
x2 = roi[0] + roi[2]
y2 = roi[1] + roi[3]
coordinates = [x1, y1, x2, y2]
# roi 좌표 확인
print("- roi 좌표: ", coordinates, "[x1, y1, x2, y2]")
# 사각형 그리기
rect = cv2.rectangle(src, (x1, y1), (x2, y2), (0, 0, 255), thickness=2)
cv2.imshow('roi', rect)
# 좌표를 csv 파일로 저장하기. 파일 이름은 '원본이미지이름.csv'
roi_file = self.roi_path + '/' + src_name
with open(roi_file, 'w', newline='') as out:
csv.writer(out).writerow(coordinates)
# roi 를 드래그하고 키보드 esc 를 누르면 다음 이미지 작업 수행함
key = cv2.waitKey()
if key == 27: # esc 키
break
cv2.destroyAllWindows()
# 이미지에 ROI 좌표를 표시하는 클래스
class ROI2Img:
# ROI 좌표 가져올 디렉토리 설정
def __init__(self, roi_path):
self.roi_path = roi_path
# roi 좌표 읽어오기
def read_roi(self, src_name):
# csv 파일 읽어오기
roi_file = self.roi_path + '/' + src_name
with open(roi_file, 'r', newline='') as coordinates:
coo_obj = csv.reader(coordinates, delimiter=',') # reader object
# 객체 내부 요소는 파일이 열려있는 상태에서만 꺼낼 수 있으므로 with 구문 안에서 꺼냄
# 객체의 요소를 리스트로 만들어주고, 이중리스트를 리스트로 풀어냄
coo = sum([c for c in coo_obj], [])
# 리스트 안의 문자열을 숫자로 바꿈
coo = [int(a) for a in coo]
return coo # x1, y1, x2, y2
# 디렉토리에 있는 n번 이미지부터 m번 이미지까지 ROI 좌표 출력 수행
def roi2img(self, file_path, n, m):
try:
if not os.path.exists(file_path):
raise MyError(f"{file_path}는 없는 경로입니다. 이미지 경로를 확인하십시오.")
if not os.path.exists(self.roi_path):
raise MyError(f"{self.roi_path}는 없는 경로입니다. 좌표를 저장할 경로를 확인하십시오.")
except MyError as e:
print(e)
# 디렉토리가 존재하는 경우 실행
else:
dir2file = Dir2File(file_path=file_path)
file_path = dir2file.file_path
src_names = dir2file.filename()
# 이미지 데이터 불러오기
for i, src_name in enumerate(src_names[n:m]):
src_file = file_path + '/' + src_name + '.jpg'
print(f"- {(i + 1) / (m - n) * 100:.1f}%.....{i + 1}번째_수행파일:{src_file}") # 확인
# src = cv2.imread(src_file, cv2.IMREAD_COLOR)
x = GetImg(src_name)
src = x.printHSV()
# 이미지 resize 하기
src = x.resize_check(src)
while True:
# 좌표 읽어오기
coo = self.read_roi(src_name)
x1, y1, x2, y2 = coo
# roi 영역에 사각형 그리기
rect = cv2.rectangle(src, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
cv2.imshow('roi', rect)
# 계량기 데이터 영역에 사각형 그리기
width = x2 - x1
height = y2 - y1
x1_ = int(x1 - 0.4*width)
x2_ = int(x2 + 0.4*width)
y1_ = int(y1 - 0.7*height)
y2_ = int(y2 + 3*height)
rect = cv2.rectangle(src, (x1_, y1_), (x2_, y2_), (0, 255, 0), thickness=2)
cv2.imshow('roi', rect)
# roi 를 드래그하고 키보드 esc 를 누르면 다음 이미지 작업 수행함
key = cv2.waitKey()
if key == 27: # esc 키
break
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 22:22:20 2019
@author: arian
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.animation as animation
#==============================================================================
# Méthodes fournies
#==============================================================================
def read_file ( filename ):
"""
Lit le fichier contenant les données du geyser Old Faithful
"""
# lecture de l'en-tête
infile = open ( filename, "r" )
for ligne in infile:
if ligne.find ( "eruptions waiting" ) != -1:
break
# ici, on a la liste des temps d'éruption et des délais d'irruptions
data = []
for ligne in infile:
nb_ligne, eruption, waiting = [ float (x) for x in ligne.split () ]
data.append ( eruption )
data.append ( waiting )
infile.close ()
# transformation de la liste en tableau 2D
data = np.asarray ( data )
data.shape = ( int ( data.size / 2 ), 2 )
return data
def dessine_1_normale(params):
# récupération des paramètres
mu_x, mu_z, sigma_x, sigma_z, rho = params
# on détermine les coordonnées des coins de la figure
x_min = mu_x - 2 * sigma_x
x_max = mu_x + 2 * sigma_x
z_min = mu_z - 2 * sigma_z
z_max = mu_z + 2 * sigma_z
# création de la grille
x = np.linspace(x_min, x_max, 100)
z = np.linspace(z_min, z_max, 100)
X, Z = np.meshgrid(x, z)
# calcul des normales
norm = X.copy()
for i in range(x.shape[0]):
for j in range(z.shape[0]):
norm[i,j] = normale_bidim(x[i], z[j], params)
# affichage
fig, ax = plt.subplots()
ax.contour(X, Z, norm, cmap=cm.autumn)
#ajoute pour garder la même échelle sur les 2 axes
ax.set_aspect("equal")
def dessine_normales ( data, params, weights, bounds, ax ):
# récupération des paramètres
mu_x0, mu_z0, sigma_x0, sigma_z0, rho0 = params[0]
mu_x1, mu_z1, sigma_x1, sigma_z1, rho1 = params[1]
# on détermine les coordonnées des coins de la figure
x_min = bounds[0]
x_max = bounds[1]
z_min = bounds[2]
z_max = bounds[3]
# création de la grille
nb_x = nb_z = 100
x = np.linspace ( x_min, x_max, nb_x )
z = np.linspace ( z_min, z_max, nb_z )
X, Z = np.meshgrid(x, z)
# calcul des normales
norm0 = np.zeros ( (nb_x,nb_z) )
for j in range ( nb_z ):
for i in range ( nb_x ):
norm0[j,i] = normale_bidim ( x[i], z[j], params[0] )# * weights[0]
norm1 = np.zeros ( (nb_x,nb_z) )
for j in range ( nb_z ):
for i in range ( nb_x ):
norm1[j,i] = normale_bidim ( x[i], z[j], params[1] )# * weights[1]
# affichages des normales et des points du dataset
ax.contour ( X, Z, norm0, cmap=cm.winter, alpha = 0.5 )
ax.contour ( X, Z, norm1, cmap=cm.autumn, alpha = 0.5 )
for point in data:
ax.plot ( point[0], point[1], 'k+' )
def find_bounds(data, params):
# récupération des paramètres
mu_x0, mu_z0, sigma_x0, sigma_z0, rho0 = params[0]
mu_x1, mu_z1, sigma_x1, sigma_z1, rho1 = params[1]
# calcul des coins
x_min = min ( mu_x0 - 2 * sigma_x0, mu_x1 - 2 * sigma_x1, data[:,0].min() )
x_max = max ( mu_x0 + 2 * sigma_x0, mu_x1 + 2 * sigma_x1, data[:,0].max() )
z_min = min ( mu_z0 - 2 * sigma_z0, mu_z1 - 2 * sigma_z1, data[:,1].min() )
z_max = max ( mu_z0 + 2 * sigma_z0, mu_z1 + 2 * sigma_z1, data[:,1].max() )
return ( x_min, x_max, z_min, z_max )
# calcul des bornes pour contenir toutes les lois normales calculées
def find_video_bounds ( data, res_EM ):
bounds = np.asarray ( find_bounds ( data, res_EM[0][0] ) )
for param in res_EM:
new_bound = find_bounds ( data, param[0] )
for i in [0,2]:
bounds[i] = min ( bounds[i], new_bound[i] )
for i in [1,3]:
bounds[i] = max ( bounds[i], new_bound[i] )
return bounds
#==============================================================================
# Méthodes demandés
#==============================================================================
def normale_bidim(x, z, params):
mux, muz, sigmax,sigmaz, ro = params
return 1 / (2 * np.pi * sigmax * sigmaz * np.sqrt(1 - ro**2)) * \
np.exp(- 1 / (2 * (1 - ro**2)) * (((x - mux) / sigmax)**2 \
- 2 * ro *(((x - mux) * (z - muz)) / (sigmax * sigmaz)) + \
((z - muz) / sigmaz)**2))
def Q_i(tab, params, pi):
qi = np.zeros((tab.shape[0], 2))
for i in range(tab.shape[0]):
alpha0 = pi[0] * normale_bidim(tab[i, 0], tab[i, 1], params[0,:])
alpha1 = pi[1] * normale_bidim(tab[i, 0], tab[i, 1], params[1,:])
qi[i, 0] = alpha0 / (alpha0 + alpha1)
qi[i, 1] = alpha1 / (alpha0 + alpha1)
return qi
def M_step(data, qi, params, pi):
sumY0, sumY1 = qi.sum(0)
pi0 = sumY0 / data.shape[0]
pi1 = sumY1 / data.shape[0]
mux0 = (qi[:,0] * data[:, 0]).sum() / sumY0
mux1 = (qi[:,1] * data[:, 0]).sum() / sumY1
muz0 = (qi[:,0] * data[:, 1]).sum() / sumY0
muz1 = (qi[:,1] * data[:, 1]).sum() / sumY1
sigmax0 = np.sqrt((qi[:, 0] * (data[:, 0] - mux0)**2).sum() / sumY0)
sigmax1 = np.sqrt((qi[:, 1] * (data[:, 0] - mux1)**2).sum() / sumY1)
sigmaz0 = np.sqrt((qi[:, 0] * (data[:, 1] - muz0)**2).sum() / sumY0)
sigmaz1 = np.sqrt((qi[:, 1] * (data[:, 1] - muz1)**2).sum() / sumY1)
ro0 = (qi[:, 0] * ((data[:, 0] - mux0) * (data[:, 1] - muz0)) / (sigmax0 * sigmaz0)).sum() / sumY0
ro1 = (qi[:, 1] * ((data[:, 0] - mux1) * (data[:, 1] - muz1)) / (sigmax1 * sigmaz1)).sum() / sumY1
return np.array([[mux0, muz0, sigmax0, sigmaz0, ro0],[mux1, muz1, sigmax1, sigmaz1, ro1]]), np.array([pi0, pi1])
def algo_EM(data, initial_params, initial_pi, tours = 4, affiche = False):
res_EM = []
res_EM.append((initial_params, initial_pi))
if affiche:
fig = plt.figure ()
ax = fig.add_subplot(111)
bounds = find_bounds (data, initial_params)
dessine_normales (data, initial_params, initial_pi, bounds, ax)
current_params = initial_params
current_pi = initial_pi
for i in range(tours):
qi = Q_i(data, current_params, current_pi)
current_params, current_pi = M_step(data, qi, current_params, current_pi)
res_EM.append((current_params, current_pi))
if affiche:
fig = plt.figure ()
ax = fig.add_subplot(111)
bounds = find_bounds (data, current_params)
dessine_normales (data, current_params, current_pi, bounds, ax)
return res_EM
if __name__ == "__main__":
data = read_file("2015_tme4_faithful.txt")
# print(normale_bidim(1, 2, (1, 2, 3, 4, 0)))
# print(np.allclose(normale_bidim(1, 2, (1, 2, 3, 4, 0)), \
# 0.013262911924324612))
#
# print(normale_bidim(1, 0, (1, 2, 1, 2, 0.7)))
# print(np.allclose(normale_bidim(1, 0, (1, 2, 1, 2, 0.7)), \
# 0.041804799427614503))
#
# dessine_1_normale((-3.0, -5.0, 3.0, 2.0, 0.7))
# dessine_1_normale((-3.0, -5.0, 3.0, 2.0, 0.2))
# affichage des données : calcul des moyennes et variances des 2 colonnes
# mean1 = data[:,0].mean ()
# mean2 = data[:,1].mean ()
# std1 = data[:,0].std ()
# std2 = data[:,1].std ()
#
# les paramètres des 2 normales sont autour de ces moyennes
# params = np.array ( [(mean1 - 0.2, mean2 - 1, std1, std2, 0),
# (mean1 + 0.2, mean2 + 1, std1, std2, 0)] )
# weights = np.array ( [0.4, 0.6] )
# bounds = find_bounds ( data, params )
#
# affichage de la figure
# fig = plt.figure ()
# ax = fig.add_subplot(111)
# dessine_normales ( data, params, weights, bounds, ax)
# plt.show ()
#current_params = np.array ( [(mu_x, mu_z, sigma_x, sigma_z, rho), # params 1ère loi normale
# (mu_x, mu_z, sigma_x, sigma_z, rho)] ) # params 2ème loi normale
# current_params = np.array([[ 3.28778309, 69.89705882, 1.13927121, 13.56996002, 0. ],
# [ 3.68778309, 71.89705882, 1.13927121, 13.56996002, 0. ]])
#
# current_weights = np.array ( [ pi_0, pi_1 ] )
# current_weights = np.array ( [ 0.5, 0.5 ] )
#
# print(Q_i(data, current_params, current_weights))
#
# current_params = np.array([[ 3.2194684, 67.83748075, 1.16527301, 13.9245876, 0.9070348 ],
# [ 3.75499261, 73.9440348, 1.04650191, 12.48307362, 0.88083712]])
# current_weights = np.array ( [ 0.49896815, 0.50103185] )
# print(Q_i ( data, current_params, current_weights ))
#
# current_params = np.array([(2.51460515, 60.12832316, 0.90428702, 11.66108819, 0.86533355),
# (4.2893485, 79.76680985, 0.52047055, 7.04450242, 0.58358284)])
# current_weights = np.array([ 0.45165145, 0.54834855])
# Q = Q_i ( data, current_params, current_weights )
# print(M_step ( data, Q, current_params, current_weights))
mean1 = data[:,0].mean ()
mean2 = data[:,1].mean ()
std1 = data[:,0].std ()
std2 = data[:,1].std ()
params = np.array ( [(mean1 - 0.2, mean2 - 1, std1, std2, 0),
(mean1 + 0.2, mean2 + 1, std1, std2, 0)] )
weights = np.array ( [ 0.5, 0.5 ] )
res_EM = algo_EM(data, params, weights, tours = 20)
bounds = find_video_bounds ( data, res_EM )
# création de l'animation : tout d'abord on crée la figure qui sera animée
fig = plt.figure ()
ax = fig.gca (xlim=(bounds[0], bounds[1]), ylim=(bounds[2], bounds[3]))
# la fonction appelée à chaque pas de temps pour créer l'animation
def animate ( i ):
ax.cla ()
dessine_normales (data, res_EM[i][0], res_EM[i][1], bounds, ax)
ax.text(5, 40, 'step = ' + str ( i ))
print ("step animate = %d" % ( i ))
# exécution de l'animation
anim = animation.FuncAnimation(fig, animate,
frames = len ( res_EM ), repeat = False)
plt.show ()
# éventuellement, sauver l'animation dans une vidéo
# anim.save('old_faithful.avi', bitrate=4000)
|
from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
running_total = 0
best_sum = None
for num in nums:
# each num may be individually the best
if best_sum is None or num > best_sum:
best_sum = num
if num > 0:
# positive numbers
if running_total > 0:
running_total += num
else:
running_total = num
else:
# negative numbers
running_total += num
# maybe update best_sum with running total if better
if running_total > best_sum and running_total > 0:
best_sum = running_total
return best_sum
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text normalization
"""
import re
from typing import List, Union
from pythainlp import thai_above_vowels as above_v
from pythainlp import thai_below_vowels as below_v
from pythainlp import thai_follow_vowels as follow_v
from pythainlp import thai_lead_vowels as lead_v
from pythainlp import thai_tonemarks as tonemarks
from pythainlp.tokenize import word_tokenize
_DANGLING_CHARS = f"{above_v}{below_v}{tonemarks}\u0e3a\u0e4c\u0e4d\u0e4e"
_RE_REMOVE_DANGLINGS = re.compile(f"^[{_DANGLING_CHARS}]+")
_ZERO_WIDTH_CHARS = "\u200b\u200c" # ZWSP, ZWNJ
_REORDER_PAIRS = [
("\u0e40\u0e40", "\u0e41"), # Sara E + Sara E -> Sara Ae
(
f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)",
"\\2\\1",
), # TONE/Thanthakhat + ABV/BLW VOWEL -> ABV/BLW VOWEL + TONE/Thanthakhat
(
f"\u0e4d([{tonemarks}]*)\u0e32",
"\\1\u0e33",
), # Nikhahit + TONEMARK + Sara Aa -> TONEMARK + Sara Am
(
f"([{follow_v}]+)([{tonemarks}]+)",
"\\2\\1",
), # FOLLOW VOWEL + TONEMARK+ -> TONEMARK + FOLLOW VOWEL
("([^\u0e24\u0e26])\u0e45", "\\1\u0e32"), # Lakkhangyao -> Sara Aa
]
# VOWELS + Phinthu, Thanthakhat, Nikhahit, Yamakkan
_NOREPEAT_CHARS = (
f"{follow_v}{lead_v}{above_v}{below_v}\u0e3a\u0e4c\u0e4d\u0e4e"
)
_NOREPEAT_PAIRS = list(
zip([f"({ch}[ ]*)+{ch}" for ch in _NOREPEAT_CHARS], _NOREPEAT_CHARS)
)
_RE_TONEMARKS = re.compile(f"[{tonemarks}]+")
_RE_REMOVE_NEWLINES = re.compile("[ \n]*\n[ \n]*")
def _last_char(matchobj): # to be used with _RE_NOREPEAT_TONEMARKS
return matchobj.group(0)[-1]
def remove_dangling(text: str) -> str:
"""
Remove Thai non-base characters at the beginning of text.
This is a common "typo", especially for input field in a form,
as these non-base characters can be visually hidden from user
who may accidentally typed them in.
A character to be removed should be both:
* tone mark, above vowel, below vowel, or non-base sign AND
* located at the beginning of the text
:param str text: input text
:return: text without dangling Thai characters at the beginning
:rtype: str
:Example:
::
from pythainlp.util import remove_dangling
remove_dangling('๊ก')
# output: 'ก'
"""
return _RE_REMOVE_DANGLINGS.sub("", text)
def remove_dup_spaces(text: str) -> str:
"""
Remove duplicate spaces. Replace multiple spaces with one space.
Multiple newline characters and empty lines will be replaced
with one newline character.
:param str text: input text
:return: text without duplicated spaces and newlines
:rtype: str
:Example:
::
from pythainlp.util import remove_dup_spaces
remove_dup_spaces('ก ข ค')
# output: 'ก ข ค'
"""
while " " in text:
text = text.replace(" ", " ")
text = _RE_REMOVE_NEWLINES.sub("\n", text)
text = text.strip()
return text
def remove_tonemark(text: str) -> str:
"""
Remove all Thai tone marks from the text.
Thai script has four tone marks indicating four tones as follows:
* Down tone (Thai: ไม้เอก _่ )
* Falling tone (Thai: ไม้โท _้ )
* High tone (Thai: ไม้ตรี _๊ )
* Rising tone (Thai: ไม้จัตวา _๋ )
Putting wrong tone mark is a common mistake in Thai writing.
By removing tone marks from the string, it could be used to
for a approximate string matching
:param str text: input text
:return: text without Thai tone marks
:rtype: str
:Example:
::
from pythainlp.util import remove_tonemark
remove_tonemark('สองพันหนึ่งร้อยสี่สิบเจ็ดล้านสี่แสนแปดหมื่นสามพันหกร้อยสี่สิบเจ็ด')
# output: สองพันหนึงรอยสีสิบเจ็ดลานสีแสนแปดหมืนสามพันหกรอยสีสิบเจ็ด
"""
for ch in tonemarks:
while ch in text:
text = text.replace(ch, "")
return text
def remove_zw(text: str) -> str:
"""
Remove zero-width characters.
These non-visible characters may cause unexpected result from the
user's point of view. Removing them can make string matching more robust.
Characters to be removed:
* Zero-width space (ZWSP)
* Zero-width non-joiner (ZWJP)
:param str text: input text
:return: text without zero-width characters
:rtype: str
"""
for ch in _ZERO_WIDTH_CHARS:
while ch in text:
text = text.replace(ch, "")
return text
def reorder_vowels(text: str) -> str:
"""
Reorder vowels and tone marks to the standard logical order/spelling.
Characters in input text will be reordered/transformed,
according to these rules:
* Sara E + Sara E -> Sara Ae
* Nikhahit + Sara Aa -> Sara Am
* tone mark + non-base vowel -> non-base vowel + tone mark
* follow vowel + tone mark -> tone mark + follow vowel
:param str text: input text
:return: text with vowels and tone marks in the standard logical order
:rtype: str
"""
for pair in _REORDER_PAIRS:
text = re.sub(pair[0], pair[1], text)
return text
def remove_repeat_vowels(text: str) -> str:
"""
Remove repeating vowels, tone marks, and signs.
This function will call reorder_vowels() first, to make sure that
double Sara E will be converted to Sara Ae and not be removed.
:param str text: input text
:return: text without repeating Thai vowels, tone marks, and signs
:rtype: str
"""
text = reorder_vowels(text)
for pair in _NOREPEAT_PAIRS:
text = re.sub(pair[0], pair[1], text)
# remove repeating tone marks, use last tone mark
text = _RE_TONEMARKS.sub(_last_char, text)
return text
def normalize(text: str) -> str:
"""
Normalize and clean Thai text with normalizing rules as follows:
* Remove zero-width spaces
* Remove duplicate spaces
* Reorder tone marks and vowels to standard order/spelling
* Remove duplicate vowels and signs
* Remove duplicate tone marks
* Remove dangling non-base characters at the beginning of text
normalize() simply call remove_zw(), remove_dup_spaces(),
remove_repeat_vowels(), and remove_dangling(), in that order.
If a user wants to customize the selection or the order of rules
to be applied, they can choose to call those functions by themselves.
Note: for Unicode normalization, see unicodedata.normalize().
:param str text: input text
:return: normalized text according to the fules
:rtype: str
:Example:
::
from pythainlp.util import normalize
normalize('เเปลก') # starts with two Sara E
# output: แปลก
normalize('นานาาา')
# output: นานา
"""
text = remove_zw(text)
text = remove_dup_spaces(text)
text = remove_repeat_vowels(text)
text = remove_dangling(text)
return text
def maiyamok(sent: Union[str, List[str]]) -> List[str]:
"""
Thai MaiYaMok
MaiYaMok (ๆ) is the mark of duplicate word in Thai language.
This function is preprocessing MaiYaMok in Thai sentence.
:param Union[str, List[str]] sent: input sentence (list or str)
:return: List of words
:rtype: List[str]
:Example:
::
from pythainlp.util import maiyamok
maiyamok("เด็กๆชอบไปโรงเรียน")
# output: ['เด็ก', 'เด็ก', 'ชอบ', 'ไป', 'โรงเรียน']
maiyamok(["ทำไม","คน","ดี"," ","ๆ","ๆ"," ","ถึง","ทำ","ไม่ได้"])
# output: ['ทำไม', 'คน', 'ดี', 'ดี', 'ดี', ' ', 'ถึง', 'ทำ', 'ไม่ได้']
"""
if isinstance(sent, str):
sent = word_tokenize(sent)
_list_word = []
i = 0
for j, text in enumerate(sent):
if text.isspace() and "ๆ" in sent[j + 1]:
continue
if " ๆ" in text:
text = text.replace(" ๆ", "ๆ")
if "ๆ" == text:
text = _list_word[i - 1]
elif "ๆ" in text:
text = text.replace("ๆ", "")
_list_word.append(text)
i += 1
_list_word.append(text)
i += 1
return _list_word
|
from bs4 import BeautifulSoup, SoupStrainer
import requests
class Ara():
url = ""
kelime =""
page = ""
data = ""
soup = ""
url_list =[]
def __init__(self, url, kelime):
self.page = requests.get(url)
self.data = self.page.text
self.soup = BeautifulSoup(self.data,features="lxml")
self.url = url
self.kelime = kelime
def mynet_ara(self):
for a in self.soup.find_all('a'):
try:
link = a.get('href')
res = requests.get(link)
html_page = res.content
soup = BeautifulSoup(html_page, features="lxml")
for i in soup.find_all('p'):
if self.kelime in i.get_text():
self.url_list.append(link)
print(link)
except:
print("error: "+ str(link))
mylist = list(set(self.url_list))
return mylist
def sondakika_ara(self):
for a in self.soup.find_all('a'):
link = a.get('href')[1:]
url2 = self.url+link
res = requests.get(url2)
html_page = res.content
soup = BeautifulSoup(html_page, 'html.parser')
for i in soup.find_all('p'):
if self.kelime in i.get_text():
self.url_list.append(url2)
print(url2)
mylist = list(set(self.url_list))
return mylist
|
#input의 값에 따라 출력을 내거나 내지 않음
input = 11
real = 11
if real == input:
print("True!!") |
import requests
r = requests.get('http://www.google.com')
print(r.status_code)
print(r.raise_for_status())
r = requests.get('http://192.0.2.1')
|
import asyncio
import json
import logging
from sinks.base_bot_request_handler import AsyncRequestHandler
logger = logging.getLogger(__name__)
class webhookReceiver(AsyncRequestHandler):
_bot = None
@asyncio.coroutine
def process_request(self, path, query_string, content):
path = path.split("/")
conv_or_user_id = path[1]
if conv_or_user_id is None:
logger.error(
"conversation or user id must be provided as part of path")
return
try:
payload = json.loads(content)
except Exception as e:
logger.exception("invalid payload")
if "repository" in payload and "commits" in payload and "pusher" in payload:
html = '<b>{0}</b> has <a href="{2}">pushed</a> {1} commit(s)<br />'.format(payload["pusher"]["name"],
len(payload[
"commits"]),
payload["repository"]["url"])
for commit in payload["commits"]:
html += '* <i>{0}</i> <a href="{2}">link</a><br />'.format(commit["message"],
commit["author"][
"name"],
commit[
"url"],
commit[
"timestamp"],
commit["id"])
yield from self.send_data(conv_or_user_id, html)
elif "zen" in payload:
logger.info("github zen received: {}".format(payload["zen"]))
else:
logger.error("unrecognised payload: {}".format(payload))
|
class Error(Exception):
pass
class BaiduOCRError(Error):
def __init__(self, code, message):
self.code = code
self.message = message
class SogouOCRError(Error):
def __init__(self, code, message):
self.code = code
self.message = message
class AliOCRError(Error):
def __init__(self, code, message):
self.code = code
self.message = message
class OCREngineUnknownError(Error):
pass
|
#!/usr/bin/env python
class contig(object):
"""This class will hold all short reads within each contig, which is generated into seperate files"""
def __init__(self):
#list to hold short reads per output file generated
self.lists = []
#short reads generated in ouput file are read in from file and appended to lists
file = open('shortread.txt', 'r')
lists = []
for reads in file.readlines():
lists.append(reads)
lists[:] = [line.rstrip('\n') for line in lists]
#This class is to retain records on each fragment
class fragmentRecord(object):
""" A class to define a short read within a contig."""
def __init__(self,sequence):
self.sequence = sequence
def printRecord(self):
print("Sequence: " +self.sequence)
#will now use printRecord def to apply info to fragmentRecord
#set fragment equal to generic version of input to avoid manual input
#use in for loop with list of short reads from class contig to obtain various types of information
#program specific loop to receieve data desired in return and create new variables of interest
print("---shortreadinformation---")
for fragment in lists:
fragment = fragmentRecord(sequence=fragment)
fragment.printRecord()
for fragment in lists:
print("Length of: ",end='')
print(fragment, end='')
print(" is ",end='')
print(len(fragment),end='')
print( " base pairs.")
|
#!/usr/bin/python3
"""This is the state class"""
from models.base_model import BaseModel
from models.base_model import Base
#from models.engine.file_storage import FileStorage
import os
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
class State(BaseModel, Base):
"""This is the class for State
Attributes:
name: input name
"""
os_env = os.environ['HBNB_TYPE_STORAGE']
if os_env == "db":
__tablename__ = "states"
name = Column(String(128), nullable=False)
cities = relationship('City', cascade="all, delete", backref='state')
else:
name = ""
@property
def get_cities(self):
# my_storage = FileStorage()
my_list = []
# dict_city = my_storage.all(City)
# for key, value in dict_city.items():
# if self.id == dict_city['state_id']:
# my_list.append(value)
return(my_list)
|
#!/home/vagrant/rpmbuild/BUILD/build-toolchain/opt/toolchains/stbgcc-4.8-1.5/python-runtime/bin/python2.7
from idlelib.PyShell import main
if __name__ == '__main__':
main()
|
# =========================================================================================
# Copyright 2016 Community Information Online Consortium (CIOC) and KCL Software Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================================
# stdlib
import logging
from collections import OrderedDict
from xml.etree import ElementTree as ET
# 3rd party
from pyramid.httpexceptions import (
HTTPUnauthorized,
HTTPInternalServerError,
HTTPNotFound,
)
from pyramid.view import view_config
# this app
from cioc.core import i18n, syslanguage
from cioc.core.format import textToHTML
from cioc.core.stat import insert_stat
from cioc.web.vol import viewbase
log = logging.getLogger(__name__)
_ = i18n.gettext
def make_headers(extra_headers=None):
tmp = dict(extra_headers or {})
return tmp
def make_401_error(message, realm="CIOC RPC"):
error = HTTPUnauthorized(
headers=make_headers({"WWW-Authenticate": 'Basic realm="%s"' % realm})
)
error.content_type = "text/plain"
error.text = message
return error
def make_internal_server_error(message):
error = HTTPInternalServerError()
error.content_type = "text/plain"
error.text = message
return error
@view_config(route_name="rpc_oppdetails", renderer="json")
@view_config(route_name="rpc_oppdetails_opid", renderer="json")
class RpcOrgDetails(viewbase.VolViewBase):
def __call__(self):
request = self.request
user = request.user
if not user:
return make_401_error("Access Denied")
if "realtimestandard" not in user.vol.ExternalAPIs:
return make_401_error("Insufficient Permissions")
opid = request.matchdict.get("opid")
vnum = request.matchdict.get("vnum")
if not opid and not vnum:
raise HTTPNotFound()
tmp_culture = cur_culture = request.params.get("TmpLn")
restore_culture = request.language.Culture
log.debug("Culture: %s", request.language.Culture)
if cur_culture and syslanguage.is_record_culture(cur_culture):
request.language.setSystemLanguage(cur_culture)
else:
cur_culture = request.language.Culture
tmp_culture = None
viewdata = request.viewdata.vol
with request.connmgr.get_connection() as conn:
if opid and not vnum:
vnum = conn.execute(
"SELECT VNUM FROM VOL_Opportunity WHERE OP_ID=?", opid
).fetchone()
if not vnum:
raise HTTPNotFound()
vnum = vnum.VNUM
fields = conn.execute(
"EXEC sp_VOL_View_DisplayFields ?, ?, ?, ?, ?",
viewdata.ViewType,
False,
vnum,
False,
None,
).fetchall()
sql = [
"""DECLARE @ViewType int
SET @ViewType = ?
SELECT bt.MemberID, vo.OP_ID, vod.OPD_ID,
dbo.fn_VOL_RecordInView(vo.VNUM,@ViewType,vod.LangID,0,GETDATE()) AS IN_VIEW,
dbo.fn_CIC_RecordInView(bt.NUM,?,btd.LangID,0,GETDATE()) AS IN_CIC_VIEW,
vo.VNUM, vod.OPD_ID, vo.RECORD_OWNER, vo.NUM,
vod.NON_PUBLIC,
cioc_shared.dbo.fn_SHR_GBL_DateString(vod.MODIFIED_DATE) AS MODIFIED_DATE,
cioc_shared.dbo.fn_SHR_GBL_DateString(vod.UPDATE_DATE) AS UPDATE_DATE,
cioc_shared.dbo.fn_SHR_GBL_DateString(vod.UPDATE_SCHEDULE) AS UPDATE_SCHEDULE,
cioc_shared.dbo.fn_SHR_GBL_DateString(vod.DELETION_DATE) AS DELETION_DATE,
cioc_shared.dbo.fn_SHR_GBL_DateString(vo.DISPLAY_UNTIL) AS DISPLAY_UNTIL,
(SELECT Culture,LangID,LanguageName,LanguageAlias,LCID,Active
FROM STP_Language LANG WHERE LangID<>@@LANGID AND dbo.fn_VOL_RecordInView(vo.VNUM,@ViewType,LangID,0,GETDATE())=1 AND """,
"ActiveRecord=1"
if viewdata.ViewOtherLangs
else "EXISTS(SELECT * FROM VOL_View_Description WHERE ViewType=@ViewType AND LangID=LANG.LangID)",
""" ORDER BY CASE WHEN Active=1 THEN 0 ELSE 1 END, LanguageName FOR XML AUTO) AS RECORD_LANG,""",
]
exclude = {
"OP_ID",
"VNUM",
"NUM",
"RECORD_OWNER",
"NON_PUBLIC",
"DELETION_DATE",
"MODIFIED_DATE",
"UPDATE_DATE",
"UPDATE_SCHEDULE",
"DISPLAY_UNTIL",
"POSITION_TITLE",
}
if viewdata.DataMgmtFields:
sql.append(
"\ncioc_shared.dbo.fn_SHR_GBL_DateString(vod.CREATED_DATE) AS CREATED_DATE,"
)
exclude.add("CREATED_DATE")
sql.append(
",".join(x.FieldSelect for x in fields if x.FieldName not in exclude)
)
sql.append(
""", vod.POSITION_TITLE, vod.VNUM AS LangVNUM
FROM VOL_Opportunity vo
LEFT JOIN VOL_Opportunity_Description vod ON vo.VNUM=vod.VNUM AND vod.LangID=@@LANGID
INNER JOIN GBL_BaseTable bt ON vo.NUM=bt.NUM
LEFT JOIN GBL_BaseTable_Description btd ON bt.NUM=btd.NUM AND btd.LangID=(SELECT TOP 1 LangID FROM GBL_BaseTable_Description WHERE NUM=btd.NUM ORDER BY CASE WHEN LangID=@@LANGID THEN 0 ELSE 1 END, LangID)
WHERE vo.VNUM=?
"""
)
sql = "".join(sql)
log.debug(sql)
data = conn.execute(
sql, viewdata.ViewType, request.viewdata.cic.ViewType, vnum
).fetchone()
request.language.setSystemLanguage(restore_culture)
if not data:
raise HTTPNotFound()
if not data.OPD_ID:
# TODO other langs interface?
raise HTTPNotFound()
if not data.IN_VIEW:
return make_401_error(_("Record not in view", request))
request.language.setSystemLanguage(cur_culture)
if request.params.get("texttohtml"):
def htmlvalue(field, data=data):
field_contents = getattr(data, field.FieldName)
if not field_contents:
return None
if field.CheckMultiline or field.CheckHTML:
field_contents = textToHTML(field_contents)
return field_contents
else:
def htmlvalue(field, data=data):
return getattr(data, field.FieldName)
fields = [
OrderedDict(
[
("value", htmlvalue(x)),
("name", x.FieldName),
("display_name", x.FieldDisplay),
("allow_html", x.CheckHTML),
]
)
for x in fields
]
makeLink = request.passvars.makeLink
route_url = request.passvars.route_url
format = request.params.get("format")
if format and format.lower() == "xml":
extra_link_args = [("format", "xml")]
else:
extra_link_args = []
if data.RECORD_LANG:
xml = ET.fromstring(
("<langs>%s</langs>" % data.RECORD_LANG).encode("utf-8")
)
other_langs = [
OrderedDict(
[
("name", x.get("LanguageName")),
("culture", x.get("Culture")),
(
"link",
route_url(
"rpc_oppdetails",
vnum=vnum,
_query=[
(
"Ln" if x.get("Active") == "1" else "TmpLn",
x.get("Culture"),
)
]
+ extra_link_args,
),
),
]
)
for x in xml.findall("./LANG")
]
else:
other_langs = []
full_info = OrderedDict(
[
("orgname", data.ORG_NAME_FULL),
("position_title", data.POSITION_TITLE),
(
"feedback_link",
"https://"
+ request.host
+ makeLink(
"~/volunteer/feedback.asp",
[("VNUM", vnum), ("UpdateLn", cur_culture)] + extra_link_args,
),
),
("non_public", data.NON_PUBLIC),
("deletion_date", data.DELETION_DATE),
]
)
if data.IN_CIC_VIEW:
full_info["org_link"] = request.host_url + request.passvars.makeDetailsLink(
data.NUM,
[("TmpLn" if tmp_culture else "Ln", cur_culture)] + extra_link_args,
)
if viewdata.LastModifiedDate:
full_info.update(
[
("modified_date", data.MODIFIED_DATE),
("update_date", data.UPDATE_DATE),
]
)
if viewdata.DataMgmtFields:
full_info.update(
[
("update_schedule", data.UPDATE_SCHEDULE),
("created_date", data.CREATED_DATE),
]
)
full_info.update(
[
("orgname", data.ORG_NAME_FULL),
("other_languages", other_langs),
("fields", fields),
(
"volunteer_api_url",
makeLink(
"~/volunteer/volunteer2.asp", [("api", "on")] + extra_link_args
),
),
]
)
with request.connmgr.get_connection("admin") as conn:
insert_stat(request, data.OP_ID, data.VNUM, api=True)
format = request.params.get("format")
if format and format.lower() == "xml":
request.override_renderer = "cioc:xml"
return full_info
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
GeoRestrictedError,
HEADRequest,
int_or_none,
parse_duration,
remove_start,
strip_or_none,
try_get,
unified_strdate,
unified_timestamp,
update_url_query,
urljoin,
xpath_text,
)
class RaiBaseIE(InfoExtractor):
_UUID_RE = r'[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
_GEO_COUNTRIES = ['IT']
_GEO_BYPASS = False
def _extract_relinker_info(self, relinker_url, video_id):
if not re.match(r'https?://', relinker_url):
return {'formats': [{'url': relinker_url}]}
formats = []
geoprotection = None
is_live = None
duration = None
for platform in ('mon', 'flash', 'native'):
relinker = self._download_xml(
relinker_url, video_id,
note='Downloading XML metadata for platform %s' % platform,
transform_source=fix_xml_ampersands,
query={'output': 45, 'pl': platform},
headers=self.geo_verification_headers())
if not geoprotection:
geoprotection = xpath_text(
relinker, './geoprotection', default=None) == 'Y'
if not is_live:
is_live = xpath_text(
relinker, './is_live', default=None) == 'Y'
if not duration:
duration = parse_duration(xpath_text(
relinker, './duration', default=None))
url_elem = find_xpath_attr(relinker, './url', 'type', 'content')
if url_elem is None:
continue
media_url = url_elem.text
# This does not imply geo restriction (e.g.
# http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html)
if '/video_no_available.mp4' in media_url:
continue
ext = determine_ext(media_url)
if (ext == 'm3u8' and platform != 'mon') or (ext == 'f4m' and platform != 'flash'):
continue
if ext == 'm3u8' or 'format=m3u8' in media_url or platform == 'mon':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif ext == 'f4m' or platform == 'flash':
manifest_url = update_url_query(
media_url.replace('manifest#live_hds.f4m', 'manifest.f4m'),
{'hdcore': '3.7.0', 'plugin': 'aasp-3.7.0.39.44'})
formats.extend(self._extract_f4m_formats(
manifest_url, video_id, f4m_id='hds', fatal=False))
else:
bitrate = int_or_none(xpath_text(relinker, 'bitrate'))
formats.append({
'url': media_url,
'tbr': bitrate if bitrate > 0 else None,
'format_id': 'http-%d' % bitrate if bitrate > 0 else 'http',
})
if not formats and geoprotection is True:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
formats.extend(self._create_http_urls(relinker_url, formats))
return dict((k, v) for k, v in {
'is_live': is_live,
'duration': duration,
'formats': formats,
}.items() if v is not None)
def _create_http_urls(self, relinker_url, fmts):
_RELINKER_REG = r'https?://(?P<host>[^/]+?)/(?:i/)?(?P<extra>[^/]+?)/(?P<path>.+?)/(?P<id>\w+)(?:_(?P<quality>[\d\,]+))?(?:\.mp4|/playlist\.m3u8).+?'
_MP4_TMPL = '%s&overrideUserAgentRule=mp4-%s'
_QUALITY = {
# tbr: w, h
'250': [352, 198],
'400': [512, 288],
'700': [512, 288],
'800': [700, 394],
'1200': [736, 414],
'1800': [1024, 576],
'2400': [1280, 720],
'3200': [1440, 810],
'3600': [1440, 810],
'5000': [1920, 1080],
'10000': [1920, 1080],
}
def test_url(url):
resp = self._request_webpage(
HEADRequest(url), None, headers={'User-Agent': 'Rai'},
fatal=False, errnote=False, note=False)
if resp is False:
return False
if resp.code == 200:
return False if resp.url == url else resp.url
return None
def get_format_info(tbr):
import math
br = int_or_none(tbr)
if len(fmts) == 1 and not br:
br = fmts[0].get('tbr')
if br > 300:
tbr = compat_str(math.floor(br / 100) * 100)
else:
tbr = '250'
# try extracting info from available m3u8 formats
format_copy = None
for f in fmts:
if f.get('tbr'):
br_limit = math.floor(br / 100)
if br_limit - 1 <= math.floor(f['tbr'] / 100) <= br_limit + 1:
format_copy = f.copy()
return {
'width': format_copy.get('width'),
'height': format_copy.get('height'),
'tbr': format_copy.get('tbr'),
'vcodec': format_copy.get('vcodec'),
'acodec': format_copy.get('acodec'),
'fps': format_copy.get('fps'),
'format_id': 'https-%s' % tbr,
} if format_copy else {
'width': _QUALITY[tbr][0],
'height': _QUALITY[tbr][1],
'format_id': 'https-%s' % tbr,
'tbr': int(tbr),
}
loc = test_url(_MP4_TMPL % (relinker_url, '*'))
if not isinstance(loc, compat_str):
return []
mobj = re.match(
_RELINKER_REG,
test_url(relinker_url) or '')
if not mobj:
return []
available_qualities = mobj.group('quality').split(',') if mobj.group('quality') else ['*']
available_qualities = [i for i in available_qualities if i]
formats = []
for q in available_qualities:
fmt = {
'url': _MP4_TMPL % (relinker_url, q),
'protocol': 'https',
'ext': 'mp4',
}
fmt.update(get_format_info(q))
formats.append(fmt)
return formats
@staticmethod
def _extract_subtitles(url, video_data):
STL_EXT = 'stl'
SRT_EXT = 'srt'
subtitles = {}
subtitles_array = video_data.get('subtitlesArray') or []
for k in ('subtitles', 'subtitlesUrl'):
subtitles_array.append({'url': video_data.get(k)})
for subtitle in subtitles_array:
sub_url = subtitle.get('url')
if sub_url and isinstance(sub_url, compat_str):
sub_lang = subtitle.get('language') or 'it'
sub_url = urljoin(url, sub_url)
sub_ext = determine_ext(sub_url, SRT_EXT)
subtitles.setdefault(sub_lang, []).append({
'ext': sub_ext,
'url': sub_url,
})
if STL_EXT == sub_ext:
subtitles[sub_lang].append({
'ext': SRT_EXT,
'url': sub_url[:-len(STL_EXT)] + SRT_EXT,
})
return subtitles
class RaiPlayIE(RaiBaseIE):
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/.+?-(?P<id>%s))\.(?:html|json)' % RaiBaseIE._UUID_RE
_TESTS = [{
'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html',
'md5': '8970abf8caf8aef4696e7b1f2adfc696',
'info_dict': {
'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391',
'ext': 'mp4',
'title': 'Report del 07/04/2014',
'alt_title': 'St 2013/14 - Espresso nel caffè - 07/04/2014',
'description': 'md5:d730c168a58f4bb35600fc2f881ec04e',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Rai Gulp',
'duration': 6160,
'series': 'Report',
'season': '2013/14',
'subtitles': {
'it': 'count:2',
},
},
'params': {
'skip_download': True,
},
}, {
# 1080p direct mp4 url
'url': 'https://www.raiplay.it/video/2021/03/Leonardo-S1E1-b5703b02-82ee-475a-85b6-c9e4a8adf642.html',
'md5': '2e501e8651d72f05ffe8f5d286ad560b',
'info_dict': {
'id': 'b5703b02-82ee-475a-85b6-c9e4a8adf642',
'ext': 'mp4',
'title': 'Leonardo - S1E1',
'alt_title': 'St 1 Ep 1 - Episodio 1',
'description': 'md5:f5360cd267d2de146e4e3879a5a47d31',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Rai 1',
'duration': 3229,
'series': 'Leonardo',
'season': 'Season 1',
},
}, {
'url': 'http://www.raiplay.it/video/2016/11/gazebotraindesi-efebe701-969c-4593-92f3-285f0d1ce750.html?',
'only_matching': True,
}, {
# subtitles at 'subtitlesArray' key (see #27698)
'url': 'https://www.raiplay.it/video/2020/12/Report---04-01-2021-2e90f1de-8eee-4de4-ac0e-78d21db5b600.html',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.raiplay.it/video/2020/09/Lo-straordinario-mondo-di-Zoey-S1E1-Lo-straordinario-potere-di-Zoey-ed493918-1d32-44b7-8454-862e473d00ff.html',
'only_matching': True,
}]
def _real_extract(self, url):
base, video_id = re.match(self._VALID_URL, url).groups()
media = self._download_json(
base + '.json', video_id, 'Downloading video JSON')
if try_get(
media,
(lambda x: x['rights_management']['rights']['drm'],
lambda x: x['program_info']['rights_management']['rights']['drm']),
dict):
raise ExtractorError('This video is DRM protected.', expected=True)
title = media['name']
video = media['video']
relinker_info = self._extract_relinker_info(video['content_url'], video_id)
self._sort_formats(relinker_info['formats'])
thumbnails = []
for _, value in media.get('images', {}).items():
if value:
thumbnails.append({
'url': urljoin(url, value),
})
date_published = media.get('date_published')
time_published = media.get('time_published')
if date_published and time_published:
date_published += ' ' + time_published
subtitles = self._extract_subtitles(url, video)
program_info = media.get('program_info') or {}
season = media.get('season')
info = {
'id': remove_start(media.get('id'), 'ContentItem-') or video_id,
'display_id': video_id,
'title': self._live_title(title) if relinker_info.get(
'is_live') else title,
'alt_title': strip_or_none(media.get('subtitle')),
'description': media.get('description'),
'uploader': strip_or_none(media.get('channel')),
'creator': strip_or_none(media.get('editor') or None),
'duration': parse_duration(video.get('duration')),
'timestamp': unified_timestamp(date_published),
'thumbnails': thumbnails,
'series': program_info.get('name'),
'season_number': int_or_none(season),
'season': season if (season and not season.isdigit()) else None,
'episode': media.get('episode_title'),
'episode_number': int_or_none(media.get('episode')),
'subtitles': subtitles,
}
info.update(relinker_info)
return info
class RaiPlayLiveIE(RaiPlayIE):
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/dirette/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'http://www.raiplay.it/dirette/rainews24',
'info_dict': {
'id': 'd784ad40-e0ae-4a69-aa76-37519d238a9c',
'display_id': 'rainews24',
'ext': 'mp4',
'title': 're:^Diretta di Rai News 24 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:4d00bcf6dc98b27c6ec480de329d1497',
'uploader': 'Rai News 24',
'creator': 'Rai News 24',
'is_live': True,
},
'params': {
'skip_download': True,
},
}]
class RaiPlayPlaylistIE(InfoExtractor):
_VALID_URL = r'(?P<base>https?://(?:www\.)?raiplay\.it/programmi/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'http://www.raiplay.it/programmi/nondirloalmiocapo/',
'info_dict': {
'id': 'nondirloalmiocapo',
'title': 'Non dirlo al mio capo',
'description': 'md5:98ab6b98f7f44c2843fd7d6f045f153b',
},
'playlist_mincount': 12,
}]
def _real_extract(self, url):
base, playlist_id = re.match(self._VALID_URL, url).groups()
program = self._download_json(
base + '.json', playlist_id, 'Downloading program JSON')
entries = []
for b in (program.get('blocks') or []):
for s in (b.get('sets') or []):
s_id = s.get('id')
if not s_id:
continue
medias = self._download_json(
'%s/%s.json' % (base, s_id), s_id,
'Downloading content set JSON', fatal=False)
if not medias:
continue
for m in (medias.get('items') or []):
path_id = m.get('path_id')
if not path_id:
continue
video_url = urljoin(url, path_id)
entries.append(self.url_result(
video_url, ie=RaiPlayIE.ie_key(),
video_id=RaiPlayIE._match_id(video_url)))
return self.playlist_result(
entries, playlist_id, program.get('name'),
try_get(program, lambda x: x['program_info']['description']))
class RaiIE(RaiBaseIE):
_VALID_URL = r'https?://[^/]+\.(?:rai\.(?:it|tv)|rainews\.it)/.+?-(?P<id>%s)(?:-.+?)?\.html' % RaiBaseIE._UUID_RE
_TESTS = [{
# var uniquename = "ContentItem-..."
# data-id="ContentItem-..."
'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html',
'info_dict': {
'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9',
'ext': 'mp4',
'title': 'TG PRIMO TEMPO',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1758,
'upload_date': '20140612',
},
'skip': 'This content is available only in Italy',
}, {
# with ContentItem in many metas
'url': 'http://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html',
'info_dict': {
'id': '1632c009-c843-4836-bb65-80c33084a64b',
'ext': 'mp4',
'title': 'Weekend al cinema, da Hollywood arriva il thriller di Tate Taylor "La ragazza del treno"',
'description': 'I film in uscita questa settimana.',
'thumbnail': r're:^https?://.*\.png$',
'duration': 833,
'upload_date': '20161103',
}
}, {
# with ContentItem in og:url
'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html',
'md5': '06345bd97c932f19ffb129973d07a020',
'info_dict': {
'id': 'efb17665-691c-45d5-a60c-5301333cbb0c',
'ext': 'mp4',
'title': 'TG1 ore 20:00 del 03/11/2016',
'description': 'TG1 edizione integrale ore 20:00 del giorno 03/11/2016',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2214,
'upload_date': '20161103',
}
}, {
# initEdizione('ContentItem-...'
'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined',
'info_dict': {
'id': 'c2187016-8484-4e3a-8ac8-35e475b07303',
'ext': 'mp4',
'title': r're:TG1 ore \d{2}:\d{2} del \d{2}/\d{2}/\d{4}',
'duration': 2274,
'upload_date': '20170401',
},
'skip': 'Changes daily',
}, {
# HLS live stream with ContentItem in og:url
'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html',
'info_dict': {
'id': '3156f2f2-dc70-4953-8e2f-70d7489d4ce9',
'ext': 'mp4',
'title': 'La diretta di Rainews24',
},
'params': {
'skip_download': True,
},
}, {
# ContentItem in iframe (see #12652) and subtitle at 'subtitlesUrl' key
'url': 'http://www.presadiretta.rai.it/dl/portali/site/puntata/ContentItem-3ed19d13-26c2-46ff-a551-b10828262f1b.html',
'info_dict': {
'id': '1ad6dc64-444a-42a4-9bea-e5419ad2f5fd',
'ext': 'mp4',
'title': 'Partiti acchiappavoti - Presa diretta del 13/09/2015',
'description': 'md5:d291b03407ec505f95f27970c0b025f4',
'upload_date': '20150913',
'subtitles': {
'it': 'count:2',
},
},
'params': {
'skip_download': True,
},
}, {
# Direct MMS URL
'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-b63a4089-ac28-48cf-bca5-9f5b5bc46df5.html',
'only_matching': True,
}, {
'url': 'https://www.rainews.it/tgr/marche/notiziari/video/2019/02/ContentItem-6ba945a2-889c-4a80-bdeb-8489c70a8db9.html',
'only_matching': True,
}]
def _extract_from_content_id(self, content_id, url):
media = self._download_json(
'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % content_id,
content_id, 'Downloading video JSON')
title = media['name'].strip()
media_type = media['type']
if 'Audio' in media_type:
relinker_info = {
'formats': [{
'format_id': media.get('formatoAudio'),
'url': media['audioUrl'],
'ext': media.get('formatoAudio'),
}]
}
elif 'Video' in media_type:
relinker_info = self._extract_relinker_info(media['mediaUri'], content_id)
else:
raise ExtractorError('not a media file')
self._sort_formats(relinker_info['formats'])
thumbnails = []
for image_type in ('image', 'image_medium', 'image_300'):
thumbnail_url = media.get(image_type)
if thumbnail_url:
thumbnails.append({
'url': compat_urlparse.urljoin(url, thumbnail_url),
})
subtitles = self._extract_subtitles(url, media)
info = {
'id': content_id,
'title': title,
'description': strip_or_none(media.get('desc')),
'thumbnails': thumbnails,
'uploader': media.get('author'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(media.get('length')),
'subtitles': subtitles,
}
info.update(relinker_info)
return info
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
content_item_id = None
content_item_url = self._html_search_meta(
('og:url', 'og:video', 'og:video:secure_url', 'twitter:url',
'twitter:player', 'jsonlink'), webpage, default=None)
if content_item_url:
content_item_id = self._search_regex(
r'ContentItem-(%s)' % self._UUID_RE, content_item_url,
'content item id', default=None)
if not content_item_id:
content_item_id = self._search_regex(
r'''(?x)
(?:
(?:initEdizione|drawMediaRaiTV)\(|
<(?:[^>]+\bdata-id|var\s+uniquename)=|
<iframe[^>]+\bsrc=
)
(["\'])
(?:(?!\1).)*\bContentItem-(?P<id>%s)
''' % self._UUID_RE,
webpage, 'content item id', default=None, group='id')
content_item_ids = set()
if content_item_id:
content_item_ids.add(content_item_id)
if video_id not in content_item_ids:
content_item_ids.add(video_id)
for content_item_id in content_item_ids:
try:
return self._extract_from_content_id(content_item_id, url)
except GeoRestrictedError:
raise
except ExtractorError:
pass
relinker_url = self._proto_relative_url(self._search_regex(
r'''(?x)
(?:
var\s+videoURL|
mediaInfo\.mediaUri
)\s*=\s*
([\'"])
(?P<url>
(?:https?:)?
//mediapolis(?:vod)?\.rai\.it/relinker/relinkerServlet\.htm\?
(?:(?!\1).)*\bcont=(?:(?!\1).)+)\1
''',
webpage, 'relinker URL', group='url'))
relinker_info = self._extract_relinker_info(
urljoin(url, relinker_url), video_id)
self._sort_formats(relinker_info['formats'])
title = self._search_regex(
r'var\s+videoTitolo\s*=\s*([\'"])(?P<title>[^\'"]+)\1',
webpage, 'title', group='title',
default=None) or self._og_search_title(webpage)
info = {
'id': video_id,
'title': title,
}
info.update(relinker_info)
return info
|
import cx_Oracle
from ddl_loader import db_conf
CURRENT_DB_CONNECTION: str = db_conf.DB_NAME
OWNER: str = db_conf.USER.upper()
_user: str = OWNER
_pwd: str = db_conf.PASSWORD
_service: str = db_conf.NAME
try:
cx_Oracle.init_oracle_client(lib_dir=r"D:\src\instantclient_19_3")
except cx_Oracle.ProgrammingError:
pass
_con = cx_Oracle.connect(_user, _pwd, _service)
def get_object_ddl_script_service(object_type: str, object_name: str) -> str:
"""Выгружает DDL скрипт объекта"""
with _con.cursor() as cur:
def output_type_handler(cursor, name, defaultType, size, precision, scale):
"""Настраиваем оракловый хэндлер для конвертации CLOB типа"""
if defaultType == cx_Oracle.CLOB:
return cursor.var(cx_Oracle.CLOB, arraysize=cursor.arraysize)
_con.outputtypehandler = output_type_handler
# Убираем размеры сегментов для таблиц
cur.execute(
"""
begin
DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM, 'STORAGE', false);
end;
"""
)
# Стандартный АПИ для оракла пакет DBMS. Выгружаем DDL скрипт объекта
cur.execute(
"""
select DBMS_METADATA.GET_DDL(:object_type, :object_name, :owner)
from dual
""", {'object_type': object_type.upper(), 'object_name': object_name.upper(), 'owner': _user.upper()}
)
(clob,) = cur.fetchone()
return clob.read().replace('"', '')
def get_object_error_msg_service(object_type: str, object_name: str) -> cx_Oracle:
"""Выгружает текст ошибки компиляции объекта"""
with _con.cursor() as cur:
cur.execute(
"""
select
err.NAME
, err.LINE as ERROR_LINE
, err.TEXT as ERROR_MSG
, substr(src.TEXT, err.POSITION) as ERROR_TEXT
from ALL_OBJECTS obj
join ALL_ERRORS err on obj.OBJECT_TYPE = err.TYPE and obj.OBJECT_NAME = err.NAME and err.OWNER = obj.OWNER
join ALL_SOURCE src on err.OWNER = src.OWNER and src.TYPE = err.TYPE and err.LINE = src.LINE and err.NAME = src.NAME
where 1 = 1
and obj.OWNER = :owner
and obj.STATUS = 'INVALID'
and obj.OBJECT_TYPE = :object_type
and obj.OBJECT_NAME = :object_name
order by err.OWNER, err.TYPE, err.NAME, err.SEQUENCE
""", {'object_type': object_type.upper(), 'object_name': object_name.upper(), 'owner': _user.upper()}
)
return cur.fetchall()
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import simpy
import numpy as np
SIM_TIME = 1_000_000
IAT_MEAN = 3
PROCESS_TIMES = [30, 50, 40]
CAPACITY_LIMIT = 0.9
def source(env, process):
while True:
yield env.timeout(np.random.exponential(IAT_MEAN))
env.process(operation(env, process))
def operation(env, process):
with process.request() as req:
yield req
yield env.timeout(np.random.exponential(PROCESS_TIMES[process_to_run]))
if __name__ == "__main__":
for process_to_run, process_time in enumerate(PROCESS_TIMES):
num_machines = int(SIM_TIME * CAPACITY_LIMIT / process_time)
print(f"프로세스 {process_to_run + 1}을 위한 기계 수: {num_machines}")
env = simpy.Environment()
process = simpy.Resource(env, capacity=num_machines)
env.process(source(env, process))
env.run(until=SIM_TIME)
utilization = process.count / num_machines
print(f"프로세스 {process_to_run + 1}의 이용률: {utilization}\n")
# In[ ]:
# In[ ]:
|
import unittest
from congressionalrecord.fdsys import cr_parser as cr
from congressionalrecord.fdsys import downloader as dl
import random
import os
from datetime import datetime,timedelta
import json
import re
import logging
logging.basicConfig(filename='tests.log',level=logging.DEBUG)
"""
These tests make sure that basic parser functions
run as expected, generating files full of JSON output
such that nothing that looks like
a speech exists outside of a "speech" JSON item.
"""
class testCRDir(unittest.TestCase):
def setUp(self):
pass
def test_crdir(self):
"""
CRDir pointed at correct path
"""
input_string = 'tests/test_files/CREC-2005-07-20'
crdir = cr.ParseCRDir(input_string)
self.assertEqual(crdir.cr_dir,input_string)
class testCRFile(unittest.TestCase):
def setUp(self):
input_string = 'tests/test_files/CREC-2005-07-20'
self.crdir = cr.ParseCRDir(input_string)
input_dir = os.path.join(input_string,'html')
input_file = random.choice(os.listdir(input_dir))
self.input_path = os.path.join(input_dir,input_file)
def test_top_level_keys(self):
"""
CRFile has all the right fixins' in the crdoc
"""
crfile = cr.ParseCRFile(self.input_path,self.crdir)
for x in ['doc_title','header','content','id']:
self.assertIn(x,crfile.crdoc.keys(),msg='{0} not in crdoc!'.format(x))
def test_content_length(self):
crfile = cr.ParseCRFile(self.input_path,self.crdir)
self.assertGreater(crfile.crdoc['content'],0,msg='No items in content!')
class testLineBreak(unittest.TestCase):
def setUp(self):
self.sp = re.compile(r'^(\s{1,2}|<bullet>)(?P<name>((((Mr)|(Ms)|(Mrs)|(Miss))\. (([-A-Z\'])(\s)?)+( of [A-Z][a-z]+)?)|((The ((VICE|ACTING|Acting) )?(PRESIDENT|SPEAKER|CHAIR(MAN)?)( pro tempore)?)|(The PRESIDING OFFICER)|(The CLERK)|(The CHIEF JUSTICE)|(The VICE PRESIDENT)|(Mr\. Counsel [A-Z]+))( \([A-Za-z.\- ]+\))?))\.')
def test_fixedLineBreak(self):
rootdir = 'tests/test_files/CREC-2005-07-20/json'
for apath in os.listdir(rootdir):
thepath = os.path.join(rootdir,apath)
with open(thepath,'r') as thefile:
thejson = json.load(thefile)
for item in thejson['content']:
if item['kind'] != 'speech':
for line in item['text'].split('\n'):
self.assertFalse(
self.sp.match(line),
'Check {0}'.format(apath))
|
#!/usr/bin/python
import paramiko, sys, os, socket
import threading
import subprocess
attacked_addr = sys.argv[1]
username = sys.argv[2]
number = sys.argv[3]
time = sys.argv[4]
basevm_type = sys.argv[5]
class myThread (threading.Thread):
def __init__(self, threadID, name):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.assign_number = 0
def run(self):
print "Starting " + self.name
if(self.threadID != 5):
self.assign_number = int(number)/5
else:
self.assign_number = int(number) - (int(number)/5)*4
for i in range(0, self.assign_number):
try:
response = ssh_connect()
if response == 1:
print "{}: {}".format(self.name, i)
elif response == 2:
print "socket error"
except Exception, e:
print e
pass
print "Exiting " + self.name
def ssh_connect():
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(attacked_addr, port=22, username=username, password="abcd")
except paramiko.AuthenticationException:
response = 1
except socket.error:
response = 2
ssh.close()
return response
# Set system date as the same as input
if time != "none":
if basevm_type == 'kvm':
os.system("ssh root@{0} date +%Y%m%d -s {1}".format(attacked_addr, time))
elif basevm_type == 'aws':
os.system("ssh -i TESTKEY.pem ec2-user@{0} date +%Y%m%d -s {1}".format(attacked_addr, time))
# Create new threads
thread1 = myThread(1, "Thread-1")
thread2 = myThread(2, "Thread-2")
thread3 = myThread(3, "Thread-3")
thread4 = myThread(4, "Thread-4")
thread5 = myThread(5, "Thread-5")
# Start new Threads
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
# Wait until threads are finished
thread1.join()
thread2.join()
thread3.join()
thread4.join()
thread5.join()
# Set system date to the correct value.
if time != "none":
if basevm_type == 'kvm':
correct_date = subprocess.check_output("date +%Y%m%d", shell=True)
correct_time = subprocess.check_output("date +%T", shell=True)
os.system("ssh root@{0} date +%Y%m%d -s {1}".format(attacked_addr, correct_date))
os.system("ssh root@{0} date +%T -s {1}".format(attacked_addr, correct_time))
os.system("ssh root@{0} sort --stable --reverse --key=1,2 /var/log/secure -o /var/log/secure".format(attacked_addr))
elif basevm_type == 'aws':
correct_date = subprocess.check_output("date +%Y%m%d", shell=True)
correct_time = subprocess.check_output("date +%T", shell=True)
os.system("ssh -i TESTKEY.pem ec2-user@{0} sudo date +%Y%m%d -s {1}".format(attacked_addr, correct_date))
os.system("ssh -i TESTKEY.pem ec2-user@{0} sudo date +%T -s {1}".format(attacked_addr, correct_time))
os.system("ssh -i TESTKEY.pem ec2-user@{0} sudo sort --stable --reverse --key=1,2 /var/log/secure -o /var/log/secure".format(attacked_addr))
|
from tensorflow.python.keras.layers import Conv2D, Layer
from tensorflow.python.keras.layers import MaxPooling2D, BatchNormalization
from tensorflow.python.keras import backend as K
import tensorflow as tf
class PreprocessImage(Layer):
""" Convolution Layer의 Input Format에 맞게 형변환 해주는 모듈
1. Resize
(batch size, image height, image width) -> (batch size, image height, image width, 1)
2. Type 변환
tf.uint8 -> tf.float32
3. Normalize
X range : [0, 255] -> [-1., 1.]
"""
def __init__(self, height=64, normalize=True, zero_mean=True, **kwargs):
self.height = height
self.normalize = normalize
self.zero_mean = zero_mean
kwargs.setdefault('trainable', False)
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
inputs = self.resize_inputs(inputs)
mask = 1 - tf.cast(
tf.reduce_all(tf.equal(inputs, 0.),
axis=1, keepdims=True), tf.float32)
mask = tf.tile(mask, [1, tf.shape(inputs)[1], 1, 1])
if self.normalize:
if self.zero_mean:
inputs = (inputs - 127.5) / 127.5
else:
inputs = inputs / 255.
return inputs, mask
def resize_inputs(self, inputs):
b, h, w = tf.unstack(tf.shape(inputs)[:3]) # Dynamic Shape
inputs = tf.cast(inputs, tf.float32)
inputs = tf.reshape(inputs, (b, h, w, 1))
def resize_by_height(inputs):
new_h = self.height
new_w = tf.cast(tf.math.ceil(w / h * new_h), tf.int32)
inputs = tf.image.resize(inputs, (new_h, new_w))
return inputs
inputs = tf.cond(tf.equal(h, self.height),
lambda: inputs,
lambda: resize_by_height(inputs))
inputs.set_shape([None, self.height, None, 1]) # Dynamic Shape to Static Shape
return inputs
def get_config(self):
config = super().get_config()
config.update({
"height": self.height,
"normalize": self.normalize,
"zero_mean": self.zero_mean
})
return config
class ConvFeatureExtractor(Layer):
""" Image Encoder Class,
Original Implementation와는 영상의 해상도가 달라, 좀 더 깊은 Feature Extractor로 구성
변경 사항
1. VGG Style에서 ResNet Style로 Feature Extractor 변경
2. VGG Style로 7 layer으로 구성된 것을, ResBlock 단위로 7 Block(== 14 layer)로 변경
3. Batch Normalization을 추가하여 빠르게 수렴할 수 있도록 함
"""
def __init__(self,
filters=(32, 64, 128, 128, 256, 256),
strides=(2, 2, 1, 2, 1, 2),
**kwargs):
assert len(filters) == len(strides), "filters의 리스트 크기와 strides의 리스트 크기는 동일해야 합니다."
self.filters = filters
self.strides = strides
f = filters[0]
s = strides[0]
self.conv1_1 = Conv2D(f, (3, 3), padding='same', use_bias=False)
self.norm1_1 = BatchNormalization()
self.conv1_2 = Conv2D(f, (3, 3), padding='same', use_bias=False)
self.norm1_2 = BatchNormalization()
self.maxpool1 = MaxPooling2D((s, s), (s, s), padding='same')
self.blocks = []
for f, s in zip(self.filters[1:], self.strides[1:]):
self.blocks.append(ResidualLayer(f, s))
self.final_conv = Conv2D(self.filters[-1], (2, 2), padding='valid')
self.final_norm = BatchNormalization()
super().__init__(**kwargs)
def call(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
x = self.conv1_1(x)
x = self.norm1_1(x)
x = K.relu(x)
x = self.conv1_2(x)
x = self.norm1_2(x)
x = K.relu(x)
x = self.maxpool1(x)
for block in self.blocks:
x = block(x)
x = self.final_conv(x)
x = self.final_norm(x)
outputs = K.relu(x)
mask = self.resize_mask(mask, outputs)
return outputs, mask
def resize_mask(self, mask, target):
target_shape = tf.shape(target)[1:3]
mask = tf.image.resize(mask, target_shape,
tf.image.ResizeMethod.BILINEAR)
mask = tf.cast(mask > 0.5, tf.float32)
return mask
def get_config(self):
config = super().get_config()
config.update({
"strides": self.strides,
"filters": self.filters,
})
return config
class ResidualLayer(Layer):
"""
Residual Connection이 포함된 ResNet-Block
"""
def __init__(self, filters, strides=1, **kwargs):
self.filters = f = filters
self.strides = s = strides
super().__init__(**kwargs)
self.skip = Conv2D(f, (1, 1), padding='same', use_bias=False)
self.conv1 = Conv2D(f, (3, 3), padding='same', use_bias=False)
self.bn1 = BatchNormalization()
self.conv2 = Conv2D(f, (3, 3), padding='same', use_bias=False)
self.bn2 = BatchNormalization()
self.pool = MaxPooling2D((s, s), (s, s), padding='same')
def call(self, inputs, **kwargs):
skipped = self.skip(inputs)
x = self.conv1(inputs)
x = self.bn1(x)
x = K.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = K.relu(skipped + x)
x = self.pool(x)
return x
def get_config(self):
config = super().get_config()
config.update({
"filters": self.filters,
"strides": self.strides
})
return config
class Map2Sequence(Layer):
""" CNN Layer의 출력값을 RNN Layer의 입력값으로 변환하는 Module Class
Transpose & Reshape을 거쳐서 진행
CNN output shape -> RNN Input Shape
(batch size, height, width, channels)
-> (batch size, width, height * channels)
"""
def call(self, inputs, **kwargs):
x = inputs[0]
mask = inputs[1]
b, _, w, _ = tf.unstack(tf.shape(x))
_, h, _, f = x.shape.as_list()
x = K.permute_dimensions(x, (0, 2, 1, 3))
outputs = tf.reshape(x, shape=[b, w, h * f])
mask = K.permute_dimensions(mask, (0, 2, 1, 3))
mask = tf.reshape(mask, shape=[b, w, h])
mask = tf.reduce_all(mask > 0.5, axis=-1)
return outputs, mask
class SpatialTransformer(Layer):
"""
* CAUTION *
STN 네트워크는 RARE 모델에서 포함시킨 것과 포함시키지 않은 것의 성능이 큰 차이가 없고,
STN이 포함될 경우, 이미지 크기를 고정시켜야 한다는 문제가 있어서, 제거하여 구현.
Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Borrowed from [4]_:
downsample_fator : float
A value of 1 will keep the orignal size of the image.
Values larger than 1 will down sample the image. Values below 1 will
upsample the image.
example image: height= 100, width = 200
downsample_factor = 2
output image will then be 50, 100
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
.. [3] https://github.com/EderSantana/seya/blob/keras1/seya/layers/attention.py
.. [4] https://github.com/sbillburg/CRNN-with-STN/blob/master/STN/spatial_transformer.py
"""
def __init__(self,
localization_net,
output_size,
**kwargs):
self.loc_net = localization_net
self.output_size = output_size
super(SpatialTransformer, self).__init__(**kwargs)
def build(self, input_shape):
self.loc_net.build(input_shape)
self.trainable_weights = self.loc_net.trainable_weights
def compute_output_shape(self, input_shape):
output_size = self.output_size
return (None,
int(output_size[0]),
int(output_size[1]),
int(input_shape[-1]))
def call(self, X, mask=None):
affine_transformation = self.loc_net.call(X)
output = self._transform(affine_transformation, X, self.output_size)
return output
def _repeat(self, x, num_repeats):
ones = tf.ones((1, num_repeats), dtype=tf.int32)
x = tf.reshape(x, shape=(-1,1))
x = tf.matmul(x, ones)
return tf.reshape(x, [-1])
def _interpolate(self, image, x, y, output_size):
batch_size = tf.shape(image)[0]
height = tf.shape(image)[1]
width = tf.shape(image)[2]
num_channels = tf.shape(image)[3]
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.float32)
height_float = tf.cast(height, dtype=tf.float32)
width_float = tf.cast(width, dtype=tf.float32)
output_height = output_size[0]
output_width = output_size[1]
x = .5*(x + 1.0)*width_float
y = .5*(y + 1.0)*height_float
x0 = tf.cast(tf.floor(x), tf.int32)
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), tf.int32)
y1 = y0 + 1
max_y = tf.cast(height - 1, dtype=tf.int32)
max_x = tf.cast(width - 1, dtype=tf.int32)
zero = tf.zeros([], dtype=tf.int32)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
flat_image_dimensions = width*height
pixels_batch = tf.range(batch_size)*flat_image_dimensions
flat_output_dimensions = output_height*output_width
base = self._repeat(pixels_batch, flat_output_dimensions)
base_y0 = base + y0*width
base_y1 = base + y1*width
indices_a = base_y0 + x0
indices_b = base_y1 + x0
indices_c = base_y0 + x1
indices_d = base_y1 + x1
flat_image = tf.reshape(image, shape=(-1, num_channels))
flat_image = tf.cast(flat_image, dtype=tf.float32)
pixel_values_a = tf.gather(flat_image, indices_a)
pixel_values_b = tf.gather(flat_image, indices_b)
pixel_values_c = tf.gather(flat_image, indices_c)
pixel_values_d = tf.gather(flat_image, indices_d)
x0 = tf.cast(x0, tf.float32)
x1 = tf.cast(x1, tf.float32)
y0 = tf.cast(y0, tf.float32)
y1 = tf.cast(y1, tf.float32)
area_a = tf.expand_dims(((x1 - x) * (y1 - y)), 1)
area_b = tf.expand_dims(((x1 - x) * (y - y0)), 1)
area_c = tf.expand_dims(((x - x0) * (y1 - y)), 1)
area_d = tf.expand_dims(((x - x0) * (y - y0)), 1)
output = tf.add_n([area_a*pixel_values_a,
area_b*pixel_values_b,
area_c*pixel_values_c,
area_d*pixel_values_d])
return output
def _meshgrid(self, height, width):
x_linspace = tf.linspace(-1., 1., width)
y_linspace = tf.linspace(-1., 1., height)
x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace)
x_coordinates = tf.reshape(x_coordinates, shape=(1, -1))
y_coordinates = tf.reshape(y_coordinates, shape=(1, -1))
ones = tf.ones_like(x_coordinates)
indices_grid = tf.concat([x_coordinates, y_coordinates, ones], 0)
return indices_grid
def _transform(self, affine_transformation, input_shape, output_size):
batch_size, _, _, num_channels = tf.unstack(tf.shape(input_shape))
affine_transformation = tf.reshape(affine_transformation, shape=(batch_size,2,3))
affine_transformation = tf.reshape(affine_transformation, (-1, 2, 3))
affine_transformation = tf.cast(affine_transformation, tf.float32)
output_height = output_size[0]
output_width = output_size[1]
indices_grid = self._meshgrid(output_height, output_width)
indices_grid = tf.expand_dims(indices_grid, 0)
indices_grid = tf.reshape(indices_grid, [-1]) # flatten?
indices_grid = tf.tile(indices_grid, tf.stack([batch_size]))
indices_grid = tf.reshape(indices_grid, tf.stack([batch_size, 3, -1]))
# transformed_grid = tf.batch_matmul(affine_transformation, indices_grid)
transformed_grid = tf.matmul(affine_transformation, indices_grid)
x_s = tf.slice(transformed_grid, [0, 0, 0], [-1, 1, -1])
y_s = tf.slice(transformed_grid, [0, 1, 0], [-1, 1, -1])
x_s_flatten = tf.reshape(x_s, [-1])
y_s_flatten = tf.reshape(y_s, [-1])
transformed_image = self._interpolate(input_shape,
x_s_flatten,
y_s_flatten,
output_size)
transformed_image = tf.reshape(transformed_image, shape=(batch_size,
output_height,
output_width,
num_channels))
return transformed_image
class DecodeImageContent(Layer):
"""
Image Format(jpg, jpeg, png)으로 압축된 이미지를 Decode하여
Tensorflow Array를 반환
"""
def call(self, inputs, **kwargs):
# From Rank 1 to Rank 0
inputs = tf.reshape(inputs,shape=())
image = tf.io.decode_image(inputs, channels=1,
expand_animations=False)
image = tf.expand_dims(image, axis=0)
image = tf.squeeze(image, axis=-1)
return image
__all__ = ["PreprocessImage",
"ConvFeatureExtractor",
"Map2Sequence",
"SpatialTransformer",
"DecodeImageContent"] |
#Image Transfer 완성본!
#기존에 있던 알고리즘을 사용하여 우리 프로젝트의 목적에 맞게 수정하고 함수 및 코드들을 추가해서 만들었다.
#라이브러러 준비
import os
import cv2
import sys
import numpy as np
import scipy.io
import scipy.misc
import tensorflow as tf # 텐져플로우 라이브러리 준비
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
#%matplotlib inline
#----------------------------이미지 준비 과정
# 완성본 디렉토리 위치
OUTPUT_DIR = 'output11/'
# 스타일 이미지의 이름
STYLE_IMAGE = 'Ss.jpg'
# 콘텐츠 이미지(바꿀이미지)의 이름.
CONTENT_IMAGE = 'OPENCV (1).jpg'
# Image dimensions constants.
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 400
COLOR_CHANNELS = 3
#오픈CV
img = CONTENT_IMAGE
original = "art.jpg"
#----------------------------openCV과정
def combine_two(input1, input2):
# combine_two( wartershed(img),original)
# original이 원본 , img = 변한 화면
# img1 = cv2.imread(input1, -1)#검은배경화면
# img2 = cv2.imread(input2, -1)#원본사진
img1 = cv2.imread(input1)
img2 = cv2.imread(input2)
# 사진 크기 조절
img1 = cv2.resize(img1, dsize=(300, 400))
img2 = cv2.resize(img2, dsize=(300, 400))
rows, cols, channels = (img1.shape)
roi = img2[0:rows, 0:cols]
img2gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
img1_fg = cv2.bitwise_and(img1, img1, mask=mask)
img2_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# 2개의 이미지를 합치면 바탕은 제거되고 logo부분만 합쳐짐.
dst = cv2.add(img1_fg, img2_bg)
# 합쳐진 이미지를 원본 이미지에 추가.
img2[0:rows, 0:cols] = dst
plt.imshow(img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(0)
# 중간 파일 제거
os.remove('middle.jpg')
# 마지막 파일 저장
cv2.imwrite('Final.jpg', img2)
def wartershed(input_im):
img = cv2.imread(input_im)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
sure_bg = cv2.dilate(opening, kernel, iterations=3)
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.5 * dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret, markers = cv2.connectedComponents(sure_fg)
markers = markers + 1
markers[unknown == 255] = 0
markers = cv2.watershed(img, markers)
img[markers == -1] = [0, 0, 0]
cv2.waitKey(0)
cv2.destroyAllWindows()
print(ret)
# markers==1 이면 배경
ret = int(ret)
for i in range(ret + 1):
if i != 3:
img[markers == i] = [0, 0, 0]
cv2.imwrite('middle.jpg', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return "middle.jpg"
#-------------------------IMAGE TRANSFER 조절
NOISE_RATIO = 0.6
# Constant to put more emphasis on content loss.
BETA = 5
# Constant to put more emphasis on style loss.
ALPHA = 100
# Path to the deep learning model. This is more than 500MB so will not be
# included in the repository, but available to download at the model Zoo:
# Link: https://github.com/BVLC/caffe/wiki/Model-Zoo
#
# Pick the VGG 19-layer model by from the paper "Very Deep Convolutional
# Networks for Large-Scale Image Recognition".
VGG_MODEL = 'imagenet-vgg-verydeep-19.mat'
# The mean to subtract from the input to the VGG model. This is the mean that
# when the VGG was used to train. Minor changes to this will make a lot of
# difference to the performance of model.
MEAN_VALUES = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
#-------------------------IMAGE TRANSFER VGG MODEL 준비
def load_vgg_model(path):
"""
Returns a model for the purpose of 'painting' the picture.
Takes only the convolution layer weights and wrap using the TensorFlow
Conv2d, Relu and AveragePooling layer. VGG actually uses maxpool but
the paper indicates that using AveragePooling yields better results.
The last few fully connected layers are not used.
Here is the detailed configuration of the VGG model:
0 is conv1_1 (3, 3, 3, 64)
1 is relu
2 is conv1_2 (3, 3, 64, 64)
3 is relu
4 is maxpool
5 is conv2_1 (3, 3, 64, 128)
6 is relu
7 is conv2_2 (3, 3, 128, 128)
8 is relu
9 is maxpool
10 is conv3_1 (3, 3, 128, 256)
11 is relu
12 is conv3_2 (3, 3, 256, 256)
13 is relu
14 is conv3_3 (3, 3, 256, 256)
15 is relu
16 is conv3_4 (3, 3, 256, 256)
17 is relu
18 is maxpool
19 is conv4_1 (3, 3, 256, 512)
20 is relu
21 is conv4_2 (3, 3, 512, 512)
22 is relu
23 is conv4_3 (3, 3, 512, 512)
24 is relu
25 is conv4_4 (3, 3, 512, 512)
26 is relu
27 is maxpool
28 is conv5_1 (3, 3, 512, 512)
29 is relu
30 is conv5_2 (3, 3, 512, 512)
31 is relu
32 is conv5_3 (3, 3, 512, 512)
33 is relu
34 is conv5_4 (3, 3, 512, 512)
35 is relu
36 is maxpool
37 is fullyconnected (7, 7, 512, 4096)
38 is relu
39 is fullyconnected (1, 1, 4096, 4096)
40 is relu
41 is fullyconnected (1, 1, 4096, 1000)
42 is softmax
"""
vgg = scipy.io.loadmat(path)
vgg_layers = vgg['layers']
def _weights(layer, expected_layer_name):
"""
Return the weights and bias from the VGG model for a given layer.
"""
W = vgg_layers[0][layer][0][0][0][0][0]
b = vgg_layers[0][layer][0][0][0][0][1]
layer_name = vgg_layers[0][layer][0][0][-2]
assert layer_name == expected_layer_name
return W, b
def _relu(conv2d_layer):
"""
Return the RELU function wrapped over a TensorFlow layer. Expects a
Conv2d layer input.
"""
return tf.nn.relu(conv2d_layer)
def _conv2d(prev_layer, layer, layer_name):
"""
Return the Conv2D layer using the weights, biases from the VGG
model at 'layer'.
"""
W, b = _weights(layer, layer_name)
W = tf.constant(W)
b = tf.constant(np.reshape(b, (b.size)))
return tf.nn.conv2d(
prev_layer, filter=W, strides=[1, 1, 1, 1], padding='SAME') + b
def _conv2d_relu(prev_layer, layer, layer_name):
"""
Return the Conv2D + RELU layer using the weights, biases from the VGG
model at 'layer'.
"""
return _relu(_conv2d(prev_layer, layer, layer_name))
def _avgpool(prev_layer):
"""
Return the AveragePooling layer.
"""
return tf.nn.avg_pool(prev_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Constructs the graph model.
graph = {}
graph['input'] = tf.Variable(np.zeros((1, IMAGE_HEIGHT, IMAGE_WIDTH, COLOR_CHANNELS)), dtype = 'float32')
graph['conv1_1'] = _conv2d_relu(graph['input'], 0, 'conv1_1')
graph['conv1_2'] = _conv2d_relu(graph['conv1_1'], 2, 'conv1_2')
graph['avgpool1'] = _avgpool(graph['conv1_2'])
graph['conv2_1'] = _conv2d_relu(graph['avgpool1'], 5, 'conv2_1')
graph['conv2_2'] = _conv2d_relu(graph['conv2_1'], 7, 'conv2_2')
graph['avgpool2'] = _avgpool(graph['conv2_2'])
graph['conv3_1'] = _conv2d_relu(graph['avgpool2'], 10, 'conv3_1')
graph['conv3_2'] = _conv2d_relu(graph['conv3_1'], 12, 'conv3_2')
graph['conv3_3'] = _conv2d_relu(graph['conv3_2'], 14, 'conv3_3')
graph['conv3_4'] = _conv2d_relu(graph['conv3_3'], 16, 'conv3_4')
graph['avgpool3'] = _avgpool(graph['conv3_4'])
graph['conv4_1'] = _conv2d_relu(graph['avgpool3'], 19, 'conv4_1')
graph['conv4_2'] = _conv2d_relu(graph['conv4_1'], 21, 'conv4_2')
graph['conv4_3'] = _conv2d_relu(graph['conv4_2'], 23, 'conv4_3')
graph['conv4_4'] = _conv2d_relu(graph['conv4_3'], 25, 'conv4_4')
graph['avgpool4'] = _avgpool(graph['conv4_4'])
graph['conv5_1'] = _conv2d_relu(graph['avgpool4'], 28, 'conv5_1')
graph['conv5_2'] = _conv2d_relu(graph['conv5_1'], 30, 'conv5_2')
graph['conv5_3'] = _conv2d_relu(graph['conv5_2'], 32, 'conv5_3')
graph['conv5_4'] = _conv2d_relu(graph['conv5_3'], 34, 'conv5_4')
graph['avgpool5'] = _avgpool(graph['conv5_4'])
return graph
#-----------------------------------------CONTENTS 이미지 손실 계산
def content_loss_func(sess, model):
"""
Content loss function as defined in the paper.
"""
def _content_loss(p, x):
# N is the number of filters (at layer l).
N = p.shape[3]
# M is the height times the width of the feature map (at layer l).
M = p.shape[1] * p.shape[2]
# Interestingly, the paper uses this form instead:
#
# 0.5 * tf.reduce_sum(tf.pow(x - p, 2))
#
# But this form is very slow in "painting" and thus could be missing
# out some constants (from what I see in other source code), so I'll
# replicate the same normalization constant as used in style loss.
return (1 / (4 * N * M)) * tf.reduce_sum(tf.pow(x - p, 2))
return _content_loss(sess.run(model['conv4_2']), model['conv4_2'])
#-----------------------------------------
# Layers to use. We will use these layers as advised in the paper.
# To have softer features, increase the weight of the higher layers
# (conv5_1) and decrease the weight of the lower layers (conv1_1).
# To have harder features, decrease the weight of the higher layers
# (conv5_1) and increase the weight of the lower layers (conv1_1).
STYLE_LAYERS = [
('conv1_1', 0.5),
('conv2_1', 1.0),
('conv3_1', 1.5),
('conv4_1', 3.0),
('conv5_1', 4.0),
]
def style_loss_func(sess, model):
"""
Style loss function as defined in the paper.
"""
def _gram_matrix(F, N, M):
"""
The gram matrix G.
"""
Ft = tf.reshape(F, (M, N))
return tf.matmul(tf.transpose(Ft), Ft)
def _style_loss(a, x):
"""
The style loss calculation.
"""
# N is the number of filters (at layer l).
N = a.shape[3]
# M is the height times the width of the feature map (at layer l).
M = a.shape[1] * a.shape[2]
# A is the style representation of the original image (at layer l).
A = _gram_matrix(a, N, M)
# G is the style representation of the generated image (at layer l).
G = _gram_matrix(x, N, M)
result = (1 / (4 * N**2 * M**2)) * tf.reduce_sum(tf.pow(G - A, 2))
return result
E = [_style_loss(sess.run(model[layer_name]), model[layer_name]) for layer_name, _ in STYLE_LAYERS]
W = [w for _, w in STYLE_LAYERS]
loss = sum([W[l] * E[l] for l in range(len(STYLE_LAYERS))])
return loss
#------------------------
def generate_noise_image(content_image, noise_ratio = NOISE_RATIO):
"""
Returns a noise image intermixed with the content image at a certain ratio.
"""
noise_image = np.random.uniform(
-20, 20,
(1, IMAGE_HEIGHT, IMAGE_WIDTH, COLOR_CHANNELS)).astype('float32')
# White noise image from the content representation. Take a weighted average
# of the values
input_image = noise_image * noise_ratio + content_image * (1 - noise_ratio)
return input_image
def load_image(path):
image = scipy.misc.imread(path)
# Resize the image for convnet input, there is no change but just
# add an extra dimension.
image = np.reshape(image, ((1,) + image.shape))
# Input to the VGG model expects the mean to be subtracted.
image = image - MEAN_VALUES
return image
def save_image(path, image):
# Output should add back the mean.
image = image + MEAN_VALUES
# Get rid of the first useless dimension, what remains is the image.
image = image[0]
image = np.clip(image, 0, 255).astype('uint8')
scipy.misc.imsave(path, image)
#----------------------------------텐져플로우 세션 준비
sess = tf.InteractiveSession()
#----------------------------------contents 이미지 준비
content_image = load_image(CONTENT_IMAGE)
imshow(content_image[0]) #애는 콘텐츠 이미지 보여주기 용
#----------------------------------style 이미지 준비
style_image = load_image(STYLE_IMAGE)
imshow(style_image[0]) #애는 스타일 이미지 보여주기 용
#----------------------------------VGG 모델 준비
model = load_vgg_model(VGG_MODEL)
#----------------------------------
print(model) # 생략 가능
#---------------------------------- 콘텐츠에 노이즈 생성
input_image = generate_noise_image(content_image)
imshow(input_image[0]) #보여주기용
#----------------------------------
sess.run(tf.initialize_all_variables())
#----------------------------------
sess.run(model['input'].assign(content_image))
content_loss = content_loss_func(sess, model)
#----------------------------------
# Construct style_loss using style_image.
sess.run(model['input'].assign(style_image))
style_loss = style_loss_func(sess, model)
#----------------------------------
# Instantiate equation 7 of the paper.
total_loss = BETA * content_loss + ALPHA * style_loss
#----------------------------------
optimizer = tf.train.AdamOptimizer(2.0)
train_step = optimizer.minimize(total_loss)
#----------------------------------
sess.run(tf.initialize_all_variables())
sess.run(model['input'].assign(input_image))
#---------------------------------- 반복 횟수 지정
ITERATIONS = 1000
#---------------------------------- 반복 시작------------------------------------
sess.run(tf.initialize_all_variables())
sess.run(model['input'].assign(input_image))
for it in range(ITERATIONS):
sess.run(train_step)
if it%100 == 0:
# Print every 100 iteration.
mixed_image = sess.run(model['input'])
print('Iteration %d' % (it))
print('sum : ', sess.run(tf.reduce_sum(mixed_image)))
print('cost: ', sess.run(total_loss))
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
filename = 'output11/%d.png' % (it) # OUPUT11이라는 이름의 파일에 저장
save_image(filename, mixed_image)
# ---------------------------------- 이미지 변화된 사진 저장-----
save_image('art.jpg', mixed_image)
# ---------------------------------- 위에 art.jpg를 이용하여 부분 합성 시작
combine_two(wartershed(img),original)
|
#!/usr/bin/env python
from setuptools import setup
VERSION = '0.1.0'
DESCRIPTION = "fix_dict: fix your dict and insert it into MongoDB"
LONG_DESCRIPTION = """
Removes dots "." from keys, as mongo doesn't like that.
Also, convert ints more than 8-bytes to string cause BSON can only handle up to 8-bytes ints.
Finaly, your lovely MongoDB can accept your dict and store it.
# Installation
```
$ pip install fixdict
```
# Quick Start
```python
>>>from fix_dict import fix_dict
>>>a = {"sen.li":112132312312}
>>>b = fix_dict(a)
>>> b
{'sen_li': '112132312312'}
"""
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
]
setup(
name="fix_dict",
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
keywords=('dict',
'python',
'mongodb',
'data srtucture',
'transform',
'fix',
'fix dict',
'remove dot'),
author="Ryan Luo",
author_email="luo_senmu@163.com",
url="https://github.com/Senmumu/fix_dict",
license="MIT License",
platforms=['any'],
test_suite="",
zip_safe=True,
install_requires=[],
packages=['fix_dict']
)
|
import pylab
# print('*****')
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu0')
# print('*****')
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.objectives import binary_crossentropy
from keras.regularizers import l2
from theano.ifelse import ifelse
from theano.scalar import float32, float64
from dirt_track.klass_map import show_stats, ROAD_CLASS, DIRT_CLASS, show_for_dataset, HOUSE_CLASS, SNOW_CLASS, \
TREE_CLASS, TOTAL_CLASSES, MODEL_NAME, GEO_NAME, L_CHANNEL_ONLY, MASKS, NAMES
from theano_test.helpers import path_pycharm_env
# path_pycharm_env()
import os
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
import pickle
import numpy as np
from keras import backend as K
import datetime
from keras.datasets import mnist
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
model = Sequential()
model.add(Convolution2D(200, 2, 2, border_mode='same', input_shape=(1,20,20)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
d = Dense(1500, W_regularizer=l2(1e-3), activation='relu')
model.add(d)
model.add(Dropout(0.5))
model.add(Dense(1))
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
c = theano.function([d.get_input(train=False)], d.get_output(train=False))
o = c(np.random.random((1,20000)).astype('float32'))
print(d.input_shape)
|
# Generated by Django 2.1 on 2019-07-26 17:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nijaherbs', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'ordering': ('name',), 'verbose_name': 'category', 'verbose_name_plural': 'categories'},
),
migrations.RenameField(
model_name='herb',
old_name='botanicalName',
new_name='botanical_Name',
),
migrations.RenameField(
model_name='herb',
old_name='englishName',
new_name='english_Name',
),
migrations.RenameField(
model_name='herb',
old_name='localName',
new_name='local_Name',
),
migrations.AddField(
model_name='herb',
name='description',
field=models.TextField(help_text='Enter description of the herb', null=True),
),
]
|
import sqlite3
conn= sqlite3.connect("NaaSClue_DATABASE.db")
conn.execute("CREATE TABLE ITEMS(I_ID INT PRIMARY KEY NOT NULL,I_NAME TEXT NOT NULL,QUANTITY INT NOT NULL,PRICE INT NOT NULL);")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (101,'dal',0,68)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (102,'wheat',300,45)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (103,'barley',500,54)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (104,'oats',40,54)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (105,'peanuts',450,45)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (106,'onion',3,54)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (107,'garlic',44,56)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (108,'ginger',440,54)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (109,'mustard',430,34)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (110,'jeera',443,43)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (111,'salt',434,54)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (112,'pepper',443,34)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (113,'black salt',434,34)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (114,'black pepper',345,43)")
conn.execute("INSERT INTO ITEMS (I_ID,I_NAME,QUANTITY,PRICE) VALUES (115,'potato',345,34)")
print("thank you \n database created")
conn.commit() |
# ---------------------------------------------------------
# author: Greg Linkowski
# project: Metapath Analysis
# for the KnowEnG big data center at UIUC
# funded by the NIH
#
# functions used in Pre-Processing of the network
#
# These functions were created to aid in the
# pre-processing of a network, prior to calculated the
# contained metapaths.
#
# Functions provided:
# readKeepFile(fname)
# readEdgeFile(datafile)
# applyCorrections(edges, fname)
# applyNormalization(edges, lowBound)
# applyThreshold(edges, threshold)
# applyKeepEdges(edges, kEdges)
# applyKeepLists(edges, lGenes, kEdges, iEdges)
# createNodeLists(edges, aGenes)
# createModEdgeFileName(name, kEdges, kGenes, tHold)
# writeModEdgeFilePlus(path, oname, nDict, gList, eArray)
# createGeneMapping(gList)
# createCountDict(aList)
# createMatrixList(eArray, kEdges, iEdges, gList, nDict)
# saveMatrixText(matrix, mname, mpath, integer)
# saveMatrixNumpy(matrix, mname, mpath, integer)
# clearFilesInDirectory(path)
# saveMatrixList(mList, mNames, mGenes, mpath)
# saveMatrixListPlus(mList, mNames, mGenes, mpath)
# saveKeyFile(mDict, path)
# saveGeneFile(mGenes, path)
# calcPathSimMatrix(matrix)
# createMPLengthOne(pList, pNames, path)
# createMPLengthTwo(pList, pNames, path)
# createMPLengthThree(pList, pNames, path)
# createMPLengthThreeFast(pList, path)
# createMPLengthFour(pList, pNames, path)
# createMetaPaths(pList, pNames, gList, depth, path)
# readPrimaryMatrices(nName, nPath)
# saveSelectGeneDegrees(nPath, nName, edgeArray, genesAll, humanRegex)
# ---------------------------------------------------------
import os.path
#import os
import sys
import numpy as np
import re
import time
import gzip
####### ####### ####### #######
# PARAMETERS
# Data type used when loading edge file to memory:
nodeDT = np.dtype('a30')
nodeDTSize = 30
# Whether to use the data-type for the matrices:
speedVsMemory = False # True favors speed, disables dtype
# Data-type for the path matrices:
matrixDT = np.float32 #TODO: any considerations here?
# Length to pad the matrix file names:
keyZPad = 5
# Considering consecutive edges of same type
keepDouble = True
keepTriple = True
# File extension to use when saving the matrix
matrixExt = '.gz' # '.txt' or '.gz' (gz is compressed)
# Whether to save a uncompressed text copy of the matrices
saveTextCopy = False
# Data delimiter to use in the output file:
textDelim = '\t'
# Whether to print non-error messages within these funcs
verbose = True
####### ####### ####### #######
######## ######## ######## ########
# Functions to set the global library parameters
# Input ----
# NOTE: an input value of...
# -1 keeps the parameter unchanged
# -2 resets parameter to default
def setParamVerbose(newVal) :
global verbose
if newVal :
verbose = True
else :
verbose = False
#end if
return
#end def ######## ######## ########
def setParamTextDelim(newVal) :
global textDelim
if str(newVal) == '-1' :
textDelim = textDelim
elif str(newVal) == '-2' :
textDelim = '\t'
else :
textDelim = str(newVal)
#end if
return
#end def ######## ######## ########
# def setParamFileZeroPad(newMval, newOval) :
# global fnMatrixZPad
# global fnOutputZPad
# if newMval == -1 :
# fnMatrixZPad = fnMatrixZPad
# elif newMval == -2 :
# fnMatrixZPad = 5
# else :
# fnMatrixZPad = newMval
# #end if
# if newOval == -1 :
# fnOutputZPad = fnOutputZPad
# elif newOval == -2 :
# fnOutputZPad = 3
# else :
# fnOutputZPad = newMval
# #end if
# return
# #end def ######## ######## ########
def setParamMatrixDT(newVal) :
global matrixDT
if newVal == -1 :
matrixDT = matrixDT
elif newVal == -2 :
matrixDT = np.float32
else :
matrixDT = newVal
#end if
return
#end def ######## ######## ########
def setParamSaveTextCopy(newVal) :
global saveTextCopy
if newVal :
saveTextCopy = True
else :
saveTextCopy = False
#end if
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: Read in the keep file corresponding to
# the specified network edge file
# The keep file specifies which genes & edges to
# keep in the final product, and provides the
# necessary regex characters to identify them.
# Input ----
# fname, str: path & name to keep file
# Returns ----
# keepGenes: list of Regex expressions for the
# genes to keep specified in file
# keepEdges: list of Regex expressions for the
# edges to keep specified in file
# indirEdges: list of indirect edge types
def readKeepFile(fname) :
# Make sure the keep file exists
if not os.path.isfile(fname) :
print("Please create a keep file: ", fname)
sys.exit()
#end if
# The lists to return
humanGenes = list()
keepGenes = list()
loseGenes = list()
keepEdges = list()
indirEdges = list()
tHold = 0.0
# Read the file
# f = open(fname, "rb")
f = open(fname, "r")
line = f.readline() # throw away the first line
section = 'header' # the section of the file being read
# read file line by line
for line in f :
# line = str(line)
line = line.rstrip()
if line == '':
continue
#end if
# split the line by columns
lv = line.split('\t')
# Sections headers (defined by all-caps)
# set the behavior for the lines that follow
if lv[0] == 'GENE TYPES' :
section = 'gene'
elif lv[0] == 'EDGE TYPES' :
section = 'edge'
elif lv[0] == 'THRESHOLD' :
section = 'threshold'
tHold = float(lv[2])
elif section == 'gene' :
# sort genes between kept & ignored
if (lv[2] == 'keep') or (lv[2] == 'yes') :
keepGenes.append(lv[1])
if lv[0].startswith('human') :
humanGenes.append(lv[1])
else :
loseGenes.append(lv[1])
#end if
elif section == 'edge' :
# sort kept edges & note indirect edges
if (lv[2] == 'keep') or (lv[2] == 'yes') :
keepEdges.append(lv[0])
if (lv[1] != 'direct') and (lv[1] != 'yes') :
indirEdges.append(lv[0])
#end if
#end loop
return humanGenes, keepGenes, loseGenes, keepEdges, indirEdges, tHold
#end def ######## ######## ########
######## ######## ######## ########
# Function: Read in the Knowledge Graph
# Input ----
# fname, str: path & name to the network edge file
# Returns ----
# Edges: (Nx4) matrix of char strings
# each row is: node, node, edge weight, edge type
# Nodes: dictionary of nodes in the edge list
# key = node name
# value = list of indices of the rows in
# the edge matrix where node (key) appears
# matrix of Edges (N,4), set of Vertices
def readEdgeFile(datafile) :
# get the number of lines in the file
nLines = sum( 1 for line in open(datafile, "r") )
# assign space for edge list
Edges = np.empty( (nLines,4), dtype=nodeDT)
# Edges = np.char.array( (nLines,4), itemsize=nodeDTSize )
# dictionary to hold Node indices
Nodes = dict()
nodeSet = set()
# Start reading from the file
# df = open(datafile, "rb")
df = open(datafile, "r")
i = 0
for line in df:
# extract the data from the file
# line = line.decode('UTF-8')
# print(line)
line = line.rstrip()
lv = line.split('\t')
# print(lv[0])
# temp = lv[1]
# insert into the edge list
Edges[i,0] = lv[0]
# Edges[i,0] = str(lv[0], encoding='utf-8')
# print(np.char.decode(Edges[i,0], 'ascii'))
Edges[i,1] = lv[1]
Edges[i,2] = lv[2]
Edges[i,3] = lv[3]
# add node locations to dict
if (lv[0] in nodeSet) :
Nodes[lv[0]].append(i)
else :
Nodes[lv[0]] = list()
Nodes[lv[0]].append(i)
nodeSet.add(lv[0])
#end if
if (lv[1] in nodeSet) :
Nodes[lv[1]].append(i)
else :
Nodes[lv[1]] = list()
Nodes[lv[1]].append(i)
nodeSet.add(lv[1])
#end if
i += 1
#end loop
# close the data file
df.close()
if verbose :
print(" file contained {:,} lines".format(nLines))
#TODO: check for Python v 2/3 before running this line ?
# Decode the edge array from type=bytes to type=str
Edges = np.char.decode(Edges, 'ascii')
return Edges, Nodes
#end def ######## ######## ########
######## ######## ######## ########
# Function: Fix known spelling mistakes, as outlined
# in the corrections file. The file contains two
# columns: the typo, the correction. If no file,
# then leave the array unchanged.
# Input ----
# edges, array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# Returns ----
# nothing
# Makes in-place corrections to the array
def applyCorrections(edges, fname) :
# Make no changes if no file found
if not os.path.isfile(fname) :
print("No file found, no corrections made.")
return #edges
#end if
checkDict = dict() # column 1
checkSet = set()
fixList = list() # column 2
# cf = open(fname, "rb")
cf = open(fname, 'r')
index = 0
for line in cf :
line = line.rstrip()
lv = line.split('\t')
checkSet.add(lv[0])
checkDict[lv[0]] = index
index += 1
fixList.append(lv[1])
#end loop
if verbose :
print(checkSet)
print(edges.shape)
for i in range(0, edges.shape[0]) :
if edges[i,0] in checkSet :
edges[i,0] = fixList[checkDict[edges[i,0]]]
#end if
if edges[i,1] in checkSet :
edges[i,1] = fixList[checkDict[edges[i,1]]]
#end if
if edges[i,3] in checkSet :
edges[i,3] = fixList[checkDict[edges[i,3]]]
#end if
#end loop
return #edges
#end def ######## ######## ########
######## ######## ######## ########
# Function: For each edge type, collect the weights
# and normalize to [0,1].
# Input ----
# edges, array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# lowBound, int: indicates what should be used as
# the lower bound on the weight. If 0, then set
# vmin = 0. Else, vmin = lowest weight.
# Returns ----
# nothing
# Makes in-place corrections to the array
def applyNormalization(edges, lowBound) :
#TODO: ? check & fix lower bound condition. Don't want edges
# to be given a weight of 0. There should be a small
# weight applied even to the lowest-weighted edge
# get the unique edge types in the network
eTypes = list( np.unique(edges[:,3]) )
eTypes.sort()
# print "contains the following edges:"#.format(infile)
# print eTypes
if verbose :
print("Normalizing edge type ...")
# Normalize values within each edge type in the graph
for et in eTypes :
if verbose :
print(" {}".format(et))
# extract the edge weights for only this edge type
weightStr = edges[edges[:,3]==et, 2]
# convert entries from str to float
weightFlt = np.zeros((len(weightStr),1))
for i in range(0,len(weightStr)) :
weightFlt[i] = float(weightStr[i])
#end loop
if lowBound == 0 :
vmin = 0
else :
vmin = float(np.amin(weightFlt))
#end if
vmax = float(np.amax(weightFlt))
vdif = vmax - vmin
for i in range(0, edges.shape[0]) :
# calculate normalized value & update array
if edges[i,3] == et :
if (vdif == 0) :
# if vmin=vmax, then there's only one value
temp = 1
else :
# else change value to range [0,1]
temp = (
float(edges[i,2]) - vmin) / vdif
#end if
edges[i,2] = str(temp)
#end if
#end loop
#end loop
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: Examine the edge weights. Throw out edges
# that are below the threshold value. (Only keep
# those that are at or above.)
# Input ----
# edges, array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# threshold, float: the value against which to test
# edge weights; throw out edges that are below
# Returns ----
# newEdges, str array: the modified edge array
def applyThreshold(edges, threshold) :
# Not using del[], because each del is O(n)
# ERROR CHECK: threshold meant to be applied after normalization
if threshold > 1 :
print("WARNING: threshold value {}".format(threshold)
+ " is outside the normalized range.")
print(" If network has been normalized, all"
+ " edges will be kept.")
#end if
# Get the weights in the network, count how
# many are at/above the threshold value
count = 0
for i in range(0, edges.shape[0]) :
if float(edges[i,2]) >= threshold :
count += 1
#end if
#end loop
# ERROR CHECK: If no edges are above threshold value ...
if count == 0 :
print("\nWARNING: No edge weights above threshold"
+ " {}, returning unaltered array.\n".format(threshold))
return edges
#end if
# temporary array to hold the new matrix
newEdges = np.empty([count,4], dtype=nodeDT)
# Read the weight of each edge, and add to
# temp array if at or above threshold
index = 0
for i in range(0, edges.shape[0]) :
weight = float(edges[i,2])
if weight >= threshold :
newEdges[index] = edges[i]
index += 1
#end loop
# If threshold is zero, return unaltered network
if threshold <= 0 :
# return edges
return np.char.decode(edges, 'ascii')
else :
# Decode the edge array from type=bytes to type=str
newEdges = np.char.decode(newEdges, 'ascii')
return newEdges
#end def ######## ######## ########
######## ######## ######## ########
# Function: Examine the edge weights. Throw out edges
# that are below the threshold value. (Only keep
# those that are at or above.)
# Input ----
# edges, str array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# kGenes, str list: regex of genes to keep
# each entry is just the first four chars of
# the gene name
# kEdges, str list: edge types (full) to keep
# threshold, float: the value against which to test
# edge weights; throw out edges that are below
# Returns ----
# newEdges, str array: the modified edge array
# NOTE: This assumes the network has been altered
# to just gene-gene edges !!!
def applyKeepEdges(edges, kEdges) :
keepIndex = list()
kEdgeSet = set(kEdges)
# Find indices corresponding to specified keep edges
for i in range(0, edges.shape[0]) :
if edges[i,3] in kEdgeSet :
keepIndex.append(i)
#end loop
newEdges = edges[keepIndex, :]
return newEdges
#end def ######## ######## ########
######## ######## ######## ########
# Function:
# Input ----
# edges, str array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# kGenes, str list: regex of genes to keep
# each entry is just the first four chars of
# the gene name
# kEdges, str list: edge types (full) to keep
# threshold, float: the value against which to test
# edge weights; throw out edges that are below
# Returns ----
# newEdges, str array: the modified edge array
def applyKeepLists(edges, lGenes, kEdges, iEdges) :
keepIndex = list()
kEdgeSet = set(kEdges)
# print ("----- applyKeepLists --------")
# print (kEdges)
# print (kEdgeSet)
# for s in kEdgeSet : print (".{}.".format(s))
# print(edges[0,3])
# print(str(edges[0,3]))
for i in range(0, edges.shape[0]) :
# Throw out non-kept edges
if edges[i,3] not in kEdgeSet :
# Skip this edge
# print ("drop1 {}".format(edges[i,:]))
continue
#end if
# list of matches to be found
m0 = list()
m1 = list()
# Check nodes for matches (column 1 & 2)
for gt in lGenes :
m0.append( re.match(gt, edges[i,0]) )
m1.append( re.match(gt, edges[i,1]) )
#end loop
# Throw out genes that match the non-keep list
# Check for any match with the non-keep list
if any(match != None for match in m1) :
# print ("drop2 {}".format(edges[i,:]))
# Skip this edge
continue
#ASSUMPTION: for indirect edges, col 0 contains
# a non-gene node
elif edges[i,3] not in iEdges :
if any(match != None for match in m0) :
# print ("drop3 {}".format(edges[i,:]))
# Skip this edge
continue
#end if
# Finally, if no objections
# add this to the list to keep
keepIndex.append(i)
#end loop
newEdges = edges[keepIndex, :]
return newEdges
#end def ######## ######## ########
######## ######## ######## ########
# Function: Create a node dictionary from the current
# edge list.
# Input ----
# edges, str array (N,4): edge array
# col 0: node, 1: node, 2: weight, 3: edge type
# Returns ----
# nodeDict: node dictionary
# key: str, name of node
# value: list of int, list of indices where
# these nodes occur in the edge list
def createNodeLists(edges, aGenes) :
nodeDict = dict()
nodeSet = set()
geneSet = set()
for i in range(0, edges.shape[0]) :
# Add the first node to the dictionary,
# using a set for look-up speed
if edges[i,0] in nodeSet :
nodeDict[edges[i,0]].append(i)
else :
nodeDict[edges[i,0]] = list()
nodeDict[edges[i,0]].append(i)
nodeSet.add(edges[i,0])
#end if
# Add the second node to the dictionary,
# using a set for look-up speed
if edges[i,1] in nodeSet :
nodeDict[edges[i,1]].append(i)
else :
nodeDict[edges[i,1]] = list()
nodeDict[edges[i,1]].append(i)
nodeSet.add(edges[i,1])
#end if
# list of matches to be found
m0 = list()
m1 = list()
# Check nodes for matches (column 1 & 2)
for gt in aGenes :
m0.append( re.match(gt, edges[i,0]) )
m1.append( re.match(gt, edges[i,1]) )
#end loop
# Matches mean node is a gene; add to set
if any(match != None for match in m0) :
geneSet.add(edges[i,0])
if any(match != None for match in m1) :
geneSet.add(edges[i,1])
#end if
#end loop
geneList = list(geneSet)
geneList.sort()
return nodeDict, geneList
#end def ######## ######## ########
######## ######## ######## ########
# Function: creates the name to use when saving the file
# Input ----
# name, str: the name of the original edge file
# kEdges, str list: list of edge types kept
# kGenes, str list: list of gene types kept
# tHold, float: threshold value for kept edge weights
# Returns ----
# oname, str: the new network name for the new files
def createModEdgeFileName(name, kEdges, kGenes, tHold) :
oname = name + "_g{}e{}t{}".format( len(kGenes),
len(kEdges), int(tHold*100) )
# for et in kEdges :
# oname = oname + et[0]
# #end loop
return oname
#end def ######## ######## ########
######## ######## ######## ########
# Function: write the modified network to a file
# This includes the nodeDict, geneList, and edge types
# Input ----
# path, str: path to the folder to save the file
# oname, str: new network name
# nDict, dict:
# keys, node names as strings
# values, int list of indexes: rows in eArray where
# the node appears
# eArray, (Nx4) array: the network
# Returns ----
def writeModEdgeFilePlus(path, oname, nDict, gList, eArray) :
newPath = path + oname + '/'
# If folder doesn't exist, create it
if not os.path.exists(newPath) :
os.makedirs(newPath)
#end if
# Save output: ordered list of genes
# Save output: row indices for each node in edge file
gfile = 'genes.txt'
nfile = 'indices.txt'
gf = open(newPath + gfile, 'w')
nf = open(newPath + nfile, 'w')
# gf = open(newPath + gfile, 'wb')
# nf = open(newPath + nfile, 'wb')
first = True
for gene in gList :
# Start every new line but the first with \n
# This avoids ending the file with a blank line
if first == True :
first = False
else :
gf.write("\n")
nf.write("\n")
#end if
gf.write("{}".format(gene))
nf.write("{}\t".format(gene, nDict[gene]))
fIndex = True
for item in nDict[gene] :
if fIndex == True :
fIndex = False
else :
nf.write(",")
#end if
nf.write("{}".format(item))
#end loop
#end loop
gf.close()
nf.close()
# Save output: list of edge types
eTypes = np.unique(eArray[:,3])
eTypes.sort()
efile = 'edges.txt'
ef = open(newPath + efile, 'w')
# ef = open(newPath + efile, 'wb')
first = True
for et in eTypes :
if first == True :
first = False
else :
ef.write("\n")
#end if
ef.write("{}".format(et))
#end loop
ef.close()
# Save output: the network (as an edge list)
ofile = 'network.txt'
of = open(newPath + ofile, 'w')
# of = open(newPath + ofile, 'wb')
first = True
for i in range(0, eArray.shape[0]) :
if first == True :
first = False
else :
of.write("\n")
#end if
of.write("{}\t{}\t{}\t{}".format(eArray[i,0],
eArray[i,1], eArray[i,2], eArray[i,3]))
#end loop
of.close()
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: create a mapping of genes to matrix indices
# ASSUMPTION: gList is properly ordered to match matrix
# Input ----
# gList, str list: ordered list of gene names
# Returns ----
# gDict, dict
# key, str: gene names
# value, int: row/col where gene appears in matrix
def createGeneMapping(gList) :
gDict = dict()
numG = 0
for gene in gList :
gDict[gene] = numG
numG += 1
#end loop
return gDict
#end def ######## ######## ########
######## ######## ######## ########
# Function: from a list of items, create a dictionary
# giving the count for how many times each appears
# Input ----
# aList, XX list: the list of items (with repetitions)
# Returns ----
# aDict, dict
# key, XX: unique items from the list
# value, int: how many times that item appears in the list
def createCountDict(aList) :
aSet = set() # using set to speed up membership checks
aDict = dict()
for item in aList :
if item in aSet :
aDict[item] += 1
else :
aSet.add(item)
aDict[item] = 1
#end loop
return aDict
#end def ######## ######## ########
######## ######## ######## ########
# Function: create a list of the primary matrices
# Input ----
# path, str: path to the folder to save the file
# oname, str: new network name
# nDict, dict:
# keys, node names as strings
# values, int list of indexes: rows in eArray where
# the node appears
# eArray, (Nx4) array: the network
# Returns ----
def createMatrixList(eArray, kEdges, iEdges, gList,
nDict):
# How far from Std Dev will be kept as "Medium"
stdGap = 0.75
# Get a gene-to-index mapping to use with the
# newly-created matrices
gDict = createGeneMapping(gList)
numG = len(gDict.keys())
# Create a set for faster membership checks
gSet = set(gList)
mList = list()
mNames = list()
iEdges.sort()
kEdges.sort()
# Define cut-offs for indirect edges
# ASSUMPTION: indirect edges, col 0 is non-gene node
iNames = dict()
# key: edge type + sm/md/lg
# value: integer tuple w/ low & high cutoffs
for et in iEdges :
# Get the degree of each node for this edge type
nodeList = list(eArray[eArray[:,3]==et, 0])
nodeCount = createCountDict(nodeList)
# Get the mean & std dev for the node degrees
mean = int(round(np.mean(nodeCount.values())))
std = int(round(np.std(nodeCount.values())))
stdPart = int(round(std * stdGap))
# Create the breakdown by small/med/large
tSml = [max(0, mean-(2*std)), mean - stdPart]
tMed = [tSml[1]+1, mean + stdPart]
tLrg = [tMed[1]+1, mean+(2*std)]
# Save those tuples to the look-up dict
iNames[et+"_SM"] = tSml
iNames[et+"_MD"] = tMed
iNames[et+"_LG"] = tLrg
#end loop
# print iNames
# Initialize empty file
# This file stores what types were kept & how many edges
# fn = path + oname + 'types.txt'
# fet = open(fn, 'wb')
# #fet.write("{}{}{}\n".format(et, delim, count))
# fet.close()
# Start creating matrices
for et in kEdges :
# Remove indirect edges if needed
if et in iEdges :
# Create lists corresponding to the new edge types
term_sm = list()
term_md = list()
term_lg = list()
# Separate out the edges of type et
thisArray = eArray[eArray[:,3]==et]
# Get the count of each node with that edge
checkSet = set()
termDict = dict()
for row in thisArray :
# TODO: what to do about bad gene mappings?
# Skip improperly mapped genes
if row[1] not in gSet :
continue
# Only add if the node is not a gene
if row[0] not in gSet :
# Else, increment count by 1
if row[0] in checkSet :
termDict[row[0]] += 1
# print "if", row[0]
# If not yet added, then set = 1
else :
# print "else", row[0]
checkSet.add(row[0])
termDict[row[0]] = 1
#end if
#end if
## Only add if the node is not a gene
#if row[1] not in gSet :
# # If not yet added, then set = 1
# if row[1] in checkSet :
# termDict[row[1]] == 1
# checkSet.add(row[1])
# # Else, increment count by 1
# else :
# termDict[row[1]] += 1
##end if
#end loop
# Assign to groups according to node degree
for term in termDict.keys() :
# get the tuples to check
tSml = iNames[row[3] + "_SM"]
tMed = iNames[row[3] + "_MD"]
tLrg = iNames[row[3] + "_LG"]
if (tSml[0] <= termDict[term] <= tSml[1]) :
term_sm.append(term)
elif (tMed[0] <= termDict[term] <= tMed[1]) :
term_md.append(term)
elif (tLrg[0] <= termDict[term] <= tLrg[1]) :
term_lg.append(term)
#end if
#end loop
# Create the first (small) matrix
# print "Creating matrix from edge type: {}".format(et+' < '+str(cutf[et][1]))
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=dt)
#end if
if verbose :
print(" building {}, at {} bytes".format(et, thisM.nbytes))
count = 0
for term in term_sm :
# list of all edges with this term
rowList = nDict[term]
# Join all genes connected to term X
for i in range(0, len(rowList)) :
#TODO: should gene connect back to self through the term?
for j in range(i+1, len(rowList)) :
#for j in range(0+1, len(rowList)) :
# Find two genes joined by term
# ASSUME: terms always in col 0, genes in col 1
gA = eArray[i,1]
gB = eArray[j,1]
# TODO: what to do about bad gene mappings?
# Skip improperly mapped genes
if gA not in gSet :
continue
elif gB not in gSet :
continue
# Increment the entry(s) in the array (symmetric)
thisM[gDict[gA],gDict[gB]] += 1
thisM[gDict[gB],gDict[gA]] += 1
count += 1
#end loop
#end loop
#end loop
if (count > 0) :
# save to a file
# fn = mpath + oname + et + '_sm'
# print " saving to {}".format(fn)
# np.save(fn, thisM)
# # This file stores what types were kept & how many edges
# fn = mpath + oname + 'types.txt'
# fet = open(fn, 'ab')
# fet.write("{}\t{}\n".format(et+'_sm', count))
# fet.close()
# ERROR CHECK: verify counts fit within
# specified data type
if np.amax(thisM) > warnVal :
print("WARNING: Path counts exceed" +
"{}, change data-type.".format(warnVal))
#end if
mList.append(thisM)
mNames.append(et+"_SM")
#end if
# Create the second (medium) matrix
# print "Creating matrix from edge type: {}".format(et+' < '+str(cutf[et][2]))
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=dt)
#end if
count = 0
for term in term_md :
# list of all edges with this term
rowList = nDict[term]
# Join all genes connected to term X
for i in range(0, len(rowList)) :
#TODO: should gene connect back to self through the term?
for j in range(i+1, len(rowList)) :
#for j in range(0+1, len(rowList)) :
# Find two genes joined by term
# ASSUME: terms always in col 0, genes in col 1
gA = eArray[i,1]
gB = eArray[j,1]
# TODO: what to do about bad gene mappings?
# Skip improperly mapped genes
if gA not in gSet :
continue
elif gB not in gSet :
continue
# Increment the entry(s) in the array (symmetric)
thisM[gDict[gA],gDict[gB]] += 1
thisM[gDict[gB],gDict[gA]] += 1
count += 1
#end loop
#end loop
#end loop
if (count > 0) :
# save to a file
# fn = mpath + oname + et + '_md'
# print " saving to {}".format(fn)
# np.save(fn, thisM)
# # This file stores what types were kept & how many edges
# fn = mpath + oname + 'types.txt'
# fet = open(fn, 'ab')
# fet.write("{}\t{}\n".format(et+'_md', count))
# fet.close()
# ERROR CHECK: verify counts fit within
# specified data type
if np.amax(thisM) > warnVal :
print("WARNING: Path counts exceed" +
"{}, change data-type.".format(warnVal))
#end if
mList.append(thisM)
mNames.append(et+"_MD")
#end if
# Create the third (large) matrix
# print "Creating matrix from edge type: {}".format(et+' < '+str(cutf[et][3]))
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=dt)
#end if
count = 0
for term in term_lg :
# list of all edges with this term
rowList = nDict[term]
# Join all genes connected to term X
for i in range(0, len(rowList)) :
#TODO: should gene connect back to self through the term?
for j in range(i+1, len(rowList)) :
#for j in range(0+1, len(rowList)) :
# Find two genes joined by term
# ASSUME: terms always in col 0, genes in col 1
gA = eArray[i,1]
gB = eArray[j,1]
# TODO: what to do about bad gene mappings?
# Skip improperly mapped genes
if gA not in gSet :
continue
elif gB not in gSet :
continue
# Increment the entry(s) in the array (symmetric)
thisM[gDict[gA],gDict[gB]] += 1
thisM[gDict[gB],gDict[gA]] += 1
count += 1
#end loop
#end loop
#end loop
if (count > 0) :
# save to a file
# fn = mpath + oname + et + '_lg'
# print " saving to {}".format(fn)
# np.save(fn, thisM)
# # This file stores what types were kept & how many edges
# fn = mpath + oname + 'types.txt'
# fet = open(fn, 'ab')
# fet.write("{}\t{}\n".format(et+'_lg', count))
# fet.close()
# ERROR CHECK: verify counts fit within
# specified data type
if np.amax(thisM) > warnVal :
print("WARNING: Path counts exceed" +
"{}, change data-type.".format(warnVal))
#end if
mList.append(thisM)
mNames.append(et+"_LG")
#end if
# If already direct, create the matrix
else :
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=matrixDT)
#end if
count = 0
# print "Creating matrix from edge type: {}".format(et)
thisArray = eArray[eArray[:,3]==et]
# increment entry at (i,j) = (gene0,gene1)
for row in thisArray :
# TODO: what to do about bad gene mappings?
# Skip improperly mapped genes
if row[0] not in gSet :
continue
elif row[1] not in gSet :
continue
thisM[gDict[row[0]],gDict[row[1]]] += 1
thisM[gDict[row[1]],gDict[row[0]]] += 1
count += 1
#end loop
# This file stores what types were kept & how many edges
# fn = mpath + oname + '.types.txt'
# fet = open(fn, 'ab')
# fet.write("{}\t{}\n".format(et, count))
# fet.close()
# ERROR CHECK: verify counts fit within
# specified data type
if np.amax(thisM) > warnVal :
print("WARNING: Path counts exceed" +
"{}, change data-type.".format(warnVal))
#end if
mList.append(thisM)
mNames.append(et)
#end if
#end loop
return mList, mNames
#end def ######## ######## ########
######## ######## ######## ########
# Function: create a list of the primary matrices
# Input ----
# path, str: path to the folder to save the file
# oname, str: new network name
# nDict, dict:
# keys, node names as strings
# values, int list of indexes: rows in eArray where
# the node appears
# eArray, (Nx4) array: the network
# Returns ----
def createMatrixListNoBinning(eArray, kEdges, iEdges, gList, nDict):
# Get a gene-to-index mapping to use with the
# newly-created matrices
gDict = createGeneMapping(gList)
numG = len(gDict.keys())
# Create a set for faster membership checks
gSet = set(gList)
# mList = list()
# mNames = list()
mList = dict()
iEdges.sort()
kEdges.sort()
# print(iEdges)
# print(kEdges)
# print(eArray)
# Start creating matrices
for et in kEdges :
if verbose :
print(" creating primary matrix {}".format(et))
#end if
# Convert indirect edges to direct
if et in iEdges :
# Separate out the edges of type et
thisArray = eArray[eArray[:,3]==et]
# Get the count of each node with that edge
checkSet = set()
termDict = dict()
for row in thisArray :
# Only add if the node is not a gene
if row[0] not in gSet :
# Else, increment count by 1
if row[0] in checkSet :
termDict[row[0]] += 1
# If not yet added, then set = 1
else :
checkSet.add(row[0])
termDict[row[0]] = 1
#end if
# Create the matrix
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=matrixDT)
#end if
if verbose :
print(" building {} (indirect), at {} bytes".format(et, thisM.nbytes))
#TODO: build from here.
count = 0
for term in termDict.keys() :
# list of all edges with this term
rowList = nDict[term]
# Join all genes connected to term X
for i in range(0, len(rowList)) :
#TODO: should gene connect back to self through the term?
for j in range(i+1, len(rowList)) :
# Find two genes joined by term
# ASSUME: terms always in col 0, genes in col 1
gA = eArray[i,1]
gB = eArray[j,1]
# Increment the entry(s) in the array (symmetric)
thisM[gDict[gA],gDict[gB]] += 1
thisM[gDict[gB],gDict[gA]] += 1
count += 1
#end loop
#end loop
#end loop
if (count > 0) :
# save to a file
# mList.append(thisM)
# mNames.append(et)
mList[et] = thisM
#end if
# If already direct, create the matrix
else :
if speedVsMemory :
thisM = np.zeros([numG, numG])
else :
thisM = np.zeros([numG,numG], dtype=matrixDT)
#end if
if verbose :
print(" building {} (direct), at {} bytes".format(et, thisM.nbytes))
#end if
# print(eArray)
# print(eArray[:,3])
count = 0
# print(et)
# print(eArray[:,3]==et)
# print(eArray[eArray[:,3]==et])
thisArray = eArray[eArray[:,3]==et]
# increment entry at (i,j) = (gene0,gene1)
for row in thisArray :
thisM[gDict[row[0]],gDict[row[1]]] += 1
thisM[gDict[row[1]],gDict[row[0]]] += 1
count += 1
#end loop
# mList.append(thisM)
# mNames.append(et)
mList[et] = thisM
#end if
#end loop
return mList
#end def ######## ######## ########
######## ######## ######## ########
# Function: save the given matrix as a .txt file
# Input ----
# matrix, (NxN) list: the values to save
# mname, str: name of the file to save
# mpath, str: path to the folder to save the file
# integer, bool: True means save values as int()
# Returns ----
# nothing
def saveMatrixText(matrix, mname, mpath, integer) :
# If folder doesn't exist, create it
if not os.path.exists(mpath) :
os.makedirs(mpath)
#end if
# Open the file
fout = open(mpath + mname + ".txt", "w")
# Write to the file
firstR = True
for i in range(0, matrix.shape[0]) :
# if not the first row, start with \n
if firstR :
firstR = False
else :
fout.write("\n")
#end if
firstC = True
for j in range(0, matrix.shape[1]) :
# if not the first col, start with \t
if firstC :
firstC = False
else :
fout.write("\t")
#end if
# Write the value to file
# If integer = True, write as an integer
if integer :
fout.write("{}".format( float(matrix[i,j]) ))
else :
fout.write("{}".format( matrix[i,j] ))
#end loop
fout.close()
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save the given matrix as a .npy file
# Input ----
# matrix, (NxN) list: the values to save
# mname, str: name of the file to save
# mpath, str: path to the folder to save the file
# integer, bool: True means save values as int()
# Returns ----
# nothing
def saveMatrixNumpy(matrix, mname, mpath, integer) :
# If folder doesn't exist, create it
if not os.path.exists(mpath) :
os.makedirs(mpath)
#end if
# Write to the file
#TODO: check if the fmt option is appropriate, or pointless
# np.save(mpath+mname, matrix)
if integer :
np.savetxt(mpath+mname+matrixExt, matrix, fmt='%u')
else :
np.savetxt(mpath+mname+matrixExt, matrix, fmt='%f')
#end if
#NOTE: In this case, the text file from savetxt() is much
# smaller than the binary file from save()
# VERIFICATION: save as a text-readable file
if saveTextCopy :
saveMatrixText(matrix, "t"+mname, mpath, integer)
#end if
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: delete the files within a directory
# Input ----
# path, str: the directory to empty
# Returns ----
# nothing
# NOTE: This only works if no sub-folders
def clearFilesInDirectory(path) :
# Get the list of files in the directory
filelist = ([f for f in os.listdir(path)
if os.path.isfile(os.path.join(path, f))])
# Remove all the files in that folder
for f in filelist :
os.remove(os.path.join(path, f))
# Delete the folder
# os.rmdir(path)
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save a list of matrices
# Input ----
# mList, list of NxN matrices: the matrices to save
# mGenes, list of str: names of genes in the matrix
# mpath, str: path to the folder to save the file
# Returns ----
# nothing
# Creates:
#
def saveMatrixList(mList, mGenes, mpath) :
# If folder doesn't exist, create it
if not os.path.exists(mpath) :
os.makedirs(mpath)
# otherwise, delete files in the directory
else :
clearFilesInDirectory(mpath)
#end if
# This file gives the corresponding gene names for
# each row/col of the matrix (rows & cols are same)
fgene = open(mpath+"genes.txt", "w")
# fgene = open(mpath+"genes.txt", "wb")
firstline = True
for gene in mGenes :
if firstline :
firstline = False
else :
fgene.write("\n")
#end if
fgene.write("{}".format(gene))
#end if
# This file tells which matrix corresponds to which path
fkey = open(mpath+"key.txt", "w")
# fkey = open(mpath+"key.txt", "wb")
mNames = list(mList.keys())
mNames.sort()
num = 0
firstline = True
for name in mNames :
# Write to the legend file
if firstline :
firstline = False
else :
fkey.write("\n")
#end if
fkey.write("{:05d}\t{}".format(num, name))
# Save each matrix as the corresponding number
saveMatrixNumpy(mList[name], str(num).zfill(keyZPad),
mpath, True)
# VERIFICATION: save as a text-readable file
if saveTextCopy :
saveMatrixText(mList[name], "t"+str(num).zfill(keyZPad),
mpath, True)
#end if
num += 1
#end loop
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save a list of matrices
# Input ----
# mList, list of NxN matrices: the matrices to save
# mNames, dict
# key, str: metapath names
# value, int: corresponding index number for mList
# mGenes, list of str: names of genes in the matrix
# mpath, str: path to the folder to save the file
# Returns ----
# nothing
# Creates:
#
def saveMatrixListPlus(mList, mNames, mGenes, mpath) :
# If folder doesn't exist, create it
if not os.path.exists(mpath) :
os.makedirs(mpath)
#end if
# This file gives the corresponding gene names for
# each row/col of the matrix (rows & cols are same)
fgene = open(mpath+"genes.txt", "wb")
firstline = True
for gene in mGenes :
if firstline :
firstline = False
else :
fgene.write("\n")
#end if
fgene.write("{}".format(gene))
#end if
fgene.close()
# Get the sorted list of all paths
nameList = list(mNames.keys())
nameList.sort()
# This file tells which matrix corresponds to which path
fkey = open(mpath+"key.txt", "wb")
fkey.write("NOTE: 't' - use matrix transpose\n")
firstline = True
for name in nameList :
if firstline :
firstline = False
else :
fkey.write("\n")
#end if
fkey.write("{}".format( str(mNames[name][0]).zfill(keyZPad) ))
if mNames[name][1] == True :
fkey.write(",t")
else :
fkey.write(", ")
fkey.write("\t{}".format(name))
#end loop
fkey.close()
for i in range(0, len(mList)) :
# Save each matrix as the corresponding number
saveMatrixNumpy(mList[i], str(i).zfill(keyZPad), mpath, True)
# VERIFICATION: save as a text-readable file
if saveTextCopy :
saveMatrixText(mList[i], "t"+str(i).zfill(keyZPad),
mpath, True)
#end loop
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save the key file for the metapath matrices
# Input ----
# mDict, dict
# key, str: metapath names
# value, [int, : corresponding index number for mList
# bool] : True means use matrix transpose
# path, str: path to the folder to save the file
# Returns ---- nothing
# Creates: a legend, mapping the path type on the right
# to the path matrix file on the left, where 't'
# indicates the transpose of that matrix should be used
def saveKeyFile(mDict, path) :
# If folder doesn't exist, create it
if not os.path.exists(path) :
os.makedirs(path)
#end if
# Get the sorted list of all paths
nameList = list(mDict.keys())
nameList.sort()
# This file tells which matrix corresponds to which path
fkey = open(path+"key.txt", "w")
# fkey = open(path+"key.txt", "wb")
fkey.write("NOTE: 't' means use matrix transpose\n")
firstline = True
for name in nameList :
if firstline :
firstline = False
else :
fkey.write("\n")
#end if
fkey.write("{}".format( str(mDict[name][0]).zfill(keyZPad) ))
if mDict[name][1] == True :
fkey.write(",t")
else :
fkey.write(", ")
fkey.write("\t{}".format(name))
#end loop
fkey.close()
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save the list of genes
# Input ----
# mGenes, list of str: list of genes in matrix
# ASSUMPTION: list is already properly ordered
# path, str: path to the folder to save the file
# Returns ---- nothing
# Creates: file containing ordered list of genes to
# use as the row/col headers for path matrices
def saveGeneFile(mGenes, path) :
# If folder doesn't exist, create it
if not os.path.exists(path) :
os.makedirs(path)
#end if
# This file gives the corresponding gene names for
# each row/col of the matrix (rows & cols are same)
fgene = open(path+"genes.txt", "w")
# fgene = open(path+"genes.txt", "wb")
firstline = True
for gene in mGenes :
if firstline :
firstline = False
else :
fgene.write("\n")
#end if
fgene.write("{}".format(gene))
#end if
fgene.close()
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: Calculate the gene-gene PathSim metric from
# a given metapath matrix
# Input ----
# matrix, numpy matrixDT: the metapath matrix (#gene x #gene)
# Returns ----
# Sxy, numpy matrixDT: the PathSim matrix
def calcPathSimMatrix(matrix) :
# PathSim = (Pxy + Pyx) / (Pxx + Pyy)
# numerator
Sxy = np.zeros( matrix.shape, dtype=matrixDT)
Sxy = np.add( Sxy, matrix.transpose() )
# Sxy = np.copy(matrix.transpose())
Sxy = np.add( Sxy, matrix)
# denominator
Pyy = matrix.diagonal()
# print Pyy.shape
Pyy = np.reshape( Pyy, [1,Pyy.shape[0]])
# print Pyy.shape
Pxx = np.reshape( Pyy, [Pyy.shape[1],1])
# print Pxx.shape
PyyPxx = np.add( Pxx, Pyy )
PyyPxx = np.add( PyyPxx, 1 ) # +1 so no divide by zero
# print PyyPxx.shape
# result
Sxy = np.divide( Sxy, PyyPxx )
return Sxy
#end def ######## ######## ########
######## ######## ######## ########
# Function: Read in the key.txt file regarding the
# metapath matrices
# Input ----
# path, str: path to the network files
# name, str: name of the network to use
# Returns ----
# keyDict, dict
# key, str: name of metapath
# value, tuple: int is matrix/file ID number
# bool where True means use matrix transpose
def readKeyFilePP(path) :
if not path.endswith('_MetaPaths/') :
if path.endswith('/') :
path = path[0:-1] + '_MetaPaths/'
else :
path = path + '_MetaPaths/'
#end if
fname = path + "key.txt"
# ERROR CHECK: verify file exists
if not os.path.isfile(fname) :
print( "ERROR: Specified file doesn't exist:" +
" {}".format(fname) )
sys.exit()
#end if
# The item to return
keyDict = dict()
# Read in the file
fk = open(fname, "r")
# fk = open(fname, "rb")
firstline = True
for line in fk :
# skip the first line
if firstline :
firstline = False
continue
#end if
# separate the values
line = line.rstrip()
lk = line.split('\t')
lv = lk[0].split(',')
transpose = False
if lv[1] == "t" :
transpose = True
#end if
# add to the dict
keyDict[lk[1]] = [int(lv[0]), transpose]
#end loop
fk.close()
return keyDict
#end def ######## ######## ########
######## ######## ######## ########
# Function: Load the matrix containing the number of paths
# of this type which join the nodes in the network
# Input ----
# mpTuple [int, bool]: indicates which matrix file to use
# path, str: path to the network files
# name, str: name of the network to use
# Returns ----
# matrix, int array: num paths between node pairs
def getPathMatrix(mpTuple, path, name, sizeOf) :
# zpad = fnMatrixZPad
zpad = keyZPad
# fname = (path + name + "_MetaPaths/" +
# "{}.npy".format(str(mpTuple[0]).zfill(zpad)) )
# fname = str('')
#TODO: adapt this to fit current situation
# maybe "if doesn't contain MetaPaths" ...
# prename = (path + name + "_MetaPaths/" +
# "{}".format(str(mpTuple[0]).zfill(zpad)) )
# if os.path.isfile(prename + '.gz') :
# fname = (path + name + "_MetaPaths/" +
# "{}.gz".format(str(mpTuple[0]).zfill(zpad)) )
# elif os.path.isfile(prename + '.txt') :
# fname = (path + name + "_MetaPaths/" +
# "{}.txt".format(str(mpTuple[0]).zfill(zpad)) )
prename = (path + name +
"{}".format(str(mpTuple[0]).zfill(zpad)) )
if os.path.isfile(prename + '.gz') :
fname = (path + name +
"{}.gz".format(str(mpTuple[0]).zfill(zpad)) )
elif os.path.isfile(prename + '.txt') :
fname = (path + name +
"{}.txt".format(str(mpTuple[0]).zfill(zpad)) )
else :
# ERROR CHECK: verify file exists
print ( "ERROR: Specified file doesn't exist:" +
" {}.txt/gz".format(str(mpTuple[0]).zfill(zpad)))
sys.exit()
#end if
# Load the matrix
# matrix = np.load(fname)
# matrix = np.loadtxt(fname)
# Declare the matrix
if speedVsMemory :
matrix = np.zeros([sizeOf, sizeOf])
else :
matrix = np.zeros([sizeOf, sizeOf], dtype=matrixDT)
#end if
# Read in the file, placing values into matrix
row = 0
with gzip.open(fname, 'rb') as fin :
for line in fin :
line = line.rstrip()
ml = line.split()
matrix[row,:] = ml[:]
row += 1
#end with
# Convert to transpose if flag==True
if mpTuple[1] :
return np.transpose(matrix)
else :
return matrix
#end def ######## ######## ########
def createMPLengthOne(pList, path) :
mNum = 0
mDict = dict()
# Create list of path names to loop over
pNames = list(pList.keys())
pNames.sort()
for p in pNames :
saveMatrixNumpy(pList[p], str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(pList[p])
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
mDict[p] = [mNum, False]
mNum += 1
#end loop
saveKeyFile(mDict, path)
return
#end def ######## ######## ########
def createMPLengthTwo(pList, path) :
mDict = readKeyFilePP(path)
mNum = len(mDict)
pNames = list(pList.keys())
pNames.sort()
for p1 in pNames :
for p2 in pNames :
# Optionally skipping consecutive edges
if not keepDouble :
if p1 == p2 :
continue
#end if
# The name of this path
name = p1+'-'+p2
# The name of the reversed path
nameRev = p2+'-'+p1
# Create new matrix if file doesn't already exist
if not os.path.isfile(path + str(mNum).zfill(keyZPad) + matrixExt) :
newM = np.zeros(pList[p1].shape, dtype=matrixDT)
newM = np.dot(pList[p1], pList[p2])
saveMatrixNumpy(newM, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(newM)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
#end if
# Add the matrix name & number to mDict
if name == nameRev : # (ie: typeA-typeA)
# Then add just this matrix to the list
mDict[name] = [mNum, False]
else :
# Add this path & note the reverse path
mDict[name] = [mNum, False]
# Reverse path uses transpose
mDict[nameRev] = [mNum, True]
#end if
mNum += 1
#end loop
#end loop
saveKeyFile(mDict, path)
return
#end def ######## ######## ########
def createMPLengthThree(pList, path) :
mDict = readKeyFilePP(path)
mNum = len(mDict)+1
pNames = list(pList.keys())
pNames.sort()
checkSet = set()
for p1 in pNames :
for p2 in pNames :
# Optionally skipping consecutive edges
if not keepDouble :
if p1 == p2 :
continue
#end if
for p3 in pNames :
# Optionally skipping consecutive edges
# Skip if i=j=k (three in a row)
if not keepTriple :
if (p1 == p2) and (p2 == p3) :
continue
if not keepDouble :
if p2 == p3 :
continue
#end if
# if verbose :
# print " creating path {}, {}-{}-{}".format((mNum+1), p1, p2, p3)
# The name of this path
name = p1+'-'+p2+'-'+p3
# The name of the reversed path
nameRev = p3+'-'+p2+'-'+p1
# Verify this path wasn't yet calculated
# if it has been, skip it
if name not in checkSet :
checkSet.add(name)
# Create new matrix if file doesn't already exist
if not os.path.isfile(path + str(mNum).zfill(keyZPad) + matrixExt) :
# Calculate the matrix
temp = np.dot(pList[p1], pList[p2])
newM = np.dot(temp, pList[p3])
# Save the data
saveMatrixNumpy(newM, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(newM)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
#end if
mDict[name] = [mNum, False]
# Check the reverse path (the transpose)
if nameRev not in checkSet :
checkSet.add(nameRev)
# Save the data
saveMatrixNumpy(newM, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(newM)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
mDict[nameRev] = [mNum, True]
#end if
#end if
mNum += 1
#end loop
#end loop
#end loop
saveKeyFile(mDict, path)
return
#end def ######## ######## ########
def createMPLengthThreeFast(pList, path) :
# Read in the list of matrices already made
mDict = readKeyFilePP(path)
# mNum = len(mDict)+1
# print( max(list(mDict.values())))
# mNum = max( list(mDict.values()) ) + 1
mNum = 0
for tup in mDict.values() :
mNum = max( mNum, tup[0] )
#end loop
# mNum += 1
# print (mNum)
# Get the length-1 paths (primary paths)
l1Names = list(pList.keys())
l1Names.sort()
# Separate out the length-2 paths
# l1Names = list()
l2Names = list()
lOthers = list()
mNames = list(mDict.keys())
mNames.sort()
for p in mNames :
length = p.count('-') + 1
if length == 2 :
l2Names.append(p)
# elif length == 1 :
# l1Names.append(p)
elif length > 2 :
lOthers.append(p)
#end loop
if len(lOthers) > 0 :
print("WARNING: in createMPLengthThreeFast(), list of metapath names" +
" contains {} that are greater than length-2.".format(len(lOthers)))
# end if
# ERROR CHECK: ensure lists aren't empty
if len(l1Names) == 0 :
print("ERROR: no paths of length 1 found.")
sys.exit()
elif len(l2Names) == 0 :
print("ERROR: no paths of length 2 found.")
sys.exit()
#end if
# the number of rows in the matrix (for later)
mRows = pList[l1Names[0]].shape[0]
# Create the length-3 matrices
checkSet = set()
for p1 in l1Names :
for p2 in l2Names :
mName = p1+'-'+p2
mNameSplit = mName.split('-')
# Optional: skip 2 consecutive edges
if not keepDouble :
if (mNameSplit[0] == mNameSplit[1]) or (mNameSplit[1] == mNameSplit[2]) :
continue
# Optional: skip 3 consecutive edges
if not keepTriple :
if (mNameSplit[0] == mNameSplit[1]) and (mNameSplit[1] == mNameSplit[2]) :
continue
#end if
# Name of the reverse path
# mNameRev = mNameSplit[::-1]
#TODO: there must be a cleaner way to do this
mNameRev = mNameSplit[2] + '-' + mNameSplit[1] + '-' + mNameSplit[0]
# Skip if this path was already calculated (is in checkSet)
if mName not in checkSet :
mNum += 1
checkSet.add(mName)
mDict[mName] = [mNum, False]
if verbose :
print(" #{}, {} & {}".format(mNum, mName, mNameRev))
# Check the reverse path (the transpose)
if mNameRev not in checkSet :
checkSet.add(mNameRev)
mDict[mNameRev] = [mNum, True]
#end if
# NOTE: the file may already exist but not be in mDict
# ie: the process was interrupted, and key.txt wasn't updated
# Create new matrix (if file doesn't already exist)
if not os.path.isfile(path + str(mNum).zfill(keyZPad) + matrixExt) :
# Calculate the new matrix
# read in the length-2 from file (faster than re-calculate)
mTwo = getPathMatrix(mDict[p2], path, '', mRows)
mThree = np.dot(pList[p1], mTwo)
# Save the data
saveMatrixNumpy(mThree, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(mThree)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
#end if
#end if
# mNum += 1
#end loop
#end loop
saveKeyFile(mDict, path)
return
#end def ######## ######## ########
def createMPLengthFour(pList, path) :
mDict = readKeyFilePP(path)
mNum = len(mDict)
pNames = list(pList.keys())
pNames.sort()
checkSet = set()
for p1 in pNames :
for p2 in pNames :
# Optionally skipping consecutive edges
if not keepDouble :
if p1 == p2 :
continue
#end if
for p3 in pNames :
# Optionally skipping consecutive edges
# Skip if h=i=j (three in a row)
if not keepTriple :
if (p1 == p2) and (p2 == p3) :
continue
if not keepDouble :
if p2 == p3 :
continue
#end if
for p4 in pNames:
# Optionally skipping consecutive edges
if not keepDouble :
if p3 == p4 :
continue
#end if
# Skip if i=j=k (three in a row)
if not keepTriple :
if (p2 == p3) and (p3 == p4) :
continue
#end if
# The name of this path
name = p1+'-'+p2+'-'+p3+'-'+p4
# The name of the reversed path
nameRev = p4+'-'+p3+'-'+p2+'-'+p1
# Verify this path wasn't yet calculated
# if it has been, skip it
if name not in checkSet :
checkSet.add(name)
# Create new matrix if file doesn't already exist
if not os.path.isfile(path + str(mNum).zfill(keyZPad) + matrixExt) :
# Calculate the matrix
temp1 = np.dot(pList[p1], pList[p2])
temp2 = np.dot(temp1, pList[p3])
newM = np.dot(temp2, pList[p4])
# Save the data
saveMatrixNumpy(newM, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(newM)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
#end if
mDict[name] = [mNum, False]
# Check the reverse path (the transpose)
if nameRev not in checkSet :
checkSet.add(nameRev)
# Save the data
saveMatrixNumpy(newM, str(mNum).zfill(keyZPad), path, True)
SxyMatrix = calcPathSimMatrix(newM)
saveMatrixNumpy(SxyMatrix, 'Sxy-'+str(mNum).zfill(keyZPad), path, False)
mDict[nameRev] = [mNum, True]
#end if
#end if
mNum += 1
#end loop
#end loop
#end loop
#end loop
saveKeyFile(mDict, path)
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: save a list of matrices
# Input ----
# pList, dict
# key, str: primary path names
# ie: the 1-level path matrices
# value, int: corresponding index number for mList
# mGenes, list of str: names of genes in the matrix
# mpath, str: path to the folder to save the file
# Returns ---- nothing
# Creates: The set of path matrices in the appropriate
# folder. These are simply named numerically, with a
# key/legend file provided. The list of genes used as
# row/col headers is also saved to that folder.
def createMetaPaths(pDict, gList, depth, path) :
#def createMetaPaths(pDict, pNames, gList, depth, path) :
pNames = list(pDict.keys())
pNames.sort()
maxDepth = 4
if depth > maxDepth :
print( "WARNING: Can only calculate up to " +
"{}-step metapaths.".format(maxDepth) )
elif depth < 1 :
print( "WARNING: Requested metapaths of length" +
" {};".format(depth) +
" Will return only 1-step paths.")
depth = 1
#end if
# Check if folder at specified path exists
# If directory exists, emtpy it
if os.path.exists(path) :
clearFilesInDirectory(path)
# Else, create the folder
else :
os.makedirs(path)
#end if
# The items to return
mDict = dict()
# The number of matrices created
mNum = 0
# The length to pad the file name / matrix number
zpad = keyZPad
#ERROR CHECK: verify gene list & matrix dimensions
if len(gList) != pDict[pNames[0]].shape[0] :
print( "ERROR: The provided list of genes" +
" does not match the matrix. No paths created.")
return
elif pDict[pNames[0]].shape[0] != pDict[pNames[0]].shape[1] :
print( "ERROR: The primary path matrix passed" +
" is not square.")
return
#end if
# Save the list of genes to file
saveGeneFile(gList, path)
#-------------------------------
# Create the 1-step paths
createMPLengthOne(pDict, path)
print(" finished creating paths of length 1")
if depth < 2 :
return
#end if
#-------------------------------
## Create the 2-step paths
createMPLengthTwo(pDict, path)
print(" finished creating paths of length 2")
if depth < 3 :
return
#end if
#-------------------------------
# Create the 3-step paths
# createMPLengthThree(pDict, path)
createMPLengthThreeFast(pDict, path)
print(" finished creating paths of length 3")
if depth < 4 :
return
#end if
#-------------------------------
# Create the 4-step paths
createMPLengthFour(pDict, path)
print(" finished creating paths of length 4")
# return mList, mDict
return
#end def ######## ######## ########
######## ######## ######## ########
# Function: Load the matrix containing the number of paths
# of this type which join the nodes in the network
# Input ----
# mpTuple [int, bool]: indicates which matrix file to use
# path, str: path to the network files
# name, str: name of the network to use
# Returns ----
# matrix, int array: num paths between node pairs
def getPrimaryMatrixGZip(mpTuple, path, name, sizeOf) :
prename = (path + name + "_MetaPaths/" +
"{}".format(str(mpTuple[0]).zfill(keyZPad)) )
if os.path.isfile(prename + '.gz') :
fname = (path + name + "_MetaPaths/" +
"{}.gz".format(str(mpTuple[0]).zfill(keyZPad)) )
elif os.path.isfile(prename + '.txt') :
fname = (path + name + "_MetaPaths/" +
"{}.txt".format(str(mpTuple[0]).zfill(keyZPad)) )
else :
# ERROR CHECK: verify file exists
print( "ERROR: Specified file doesn't exist:" +
" {}".format(fname) )
sys.exit()
#end if
fin = gzip.open(fname, 'rb')
# Declare the matrix
#TODO: declare the matrix data type? Can I?
if speedVsMemory :
matrix = np.zeros([sizeOf, sizeOf])
else :
matrix = np.zeros([sizeOf, sizeOf], dtype=matrixDT)
#end if
# matrix = np.zeros([sizeOf, sizeOf])
# Read in the file
row = 0
with gzip.open(fname, 'rb') as fin :
for line in fin :
line = line.rstrip()
lv = line.split()
matrix[row,:] = lv[:]
col = 0
# print lv
# for item in lv :
# matrix[row,col] = float(lv[col])
# col += 1
# #end loop
row += 1
#end with
# Convert to transpose if flag==True
if mpTuple[1] :
return np.transpose(matrix)
else :
return matrix
#end def ######## ######## ########
######## ######## ######## ########
# Function: read in the primary matrices
# Input ----
# nName, str: name of the network
# nPath, str: path to the network
# Returns ----
#TODO: no real need to return pNames
# # pNames, str list: sorted list of edge types / path
# # names included in the pList dict
# pList, dict
# key, str: edge/path name
# value, NxN matrix: the primary path matrix
# ie: the level-1 path matrices
def readPrimaryMatrices(nPath, nName) :
# Check for folder existence
path = nPath + nName + "_Primaries/"
if not os.path.exists(path) :
print("ERROR: Path doesn't exist: {}".format(path))
sys.exit()
#end if
# Items to return
# pNames = list()
# pList = list()
pList = dict()
# Read in the key file
fn = open(path + "key.txt", "r")
for line in fn :
line = line.rstrip()
lv = line.split('\t')
if verbose:
print(" reading matrix {}".format(lv[1]))
#end if
# # Append the matrix type name to pNames
# pNames.append(lv[1])
# Verify matrix file exists
if os.path.isfile( path + lv[0] + '.gz' ) :
fname = path + lv[0] + '.gz'
elif os.path.isfile( path + lv[0] + '.txt' ) :
fname = path + lv[0] + '.txt'
else :
print("ERROR: Unknown file name and extension" +
" for matrix {}.".format(lv[0]))
sys.exit()
#end if
# Append the primary path matrix to pList
fin = gzip.open(fname, 'rb')
# count # lines in file (size of matrix)
sizeOf = 0
with gzip.open(fname, 'rb') as fin :
for line in fin :
sizeOf += 1
#end with
# Declare the matrix
if speedVsMemory :
matrix = np.zeros([sizeOf, sizeOf])
else :
matrix = np.zeros([sizeOf, sizeOf], dtype=matrixDT)
#end if
# Read in the file, placing values into matrix
row = 0
with gzip.open(fname, 'rb') as fin :
for line in fin :
line = line.rstrip()
ml = line.split()
matrix[row,:] = ml[:]
row += 1
#end with
# pList.append( matrix )
pList[lv[1]] = matrix
# if speedVsMemory :
# if os.path.isfile( path + lv[0] + '.gz' ) :
# pList.append( np.loadtxt(path + lv[0] + '.gz') )
# elif os.path.isfile( path + lv[0] + '.txt' ) :
# pList.append( np.loadtxt(path + lv[0] + '.txt') )
# else :
# print("ERROR: Unknown file name and extension" +
# " for matrix {}.".format(lv[0]))
# sys.exit()
# #end if
# else :
# if os.path.isfile( path + lv[0] + '.gz' ) :
# pList.append( np.loadtxt(path + lv[0] + '.gz', dtype=matrixDT) )
# elif os.path.isfile( path + lv[0] + '.txt' ) :
# pList.append( np.loadtxt(path + lv[0] + '.txt', dtype=matrixDT) )
# else :
# print("ERROR: Unknown file name and extension" +
# " for matrix {}.".format(lv[0]))
# sys.exit()
# #end if
# #end if
if verbose :
# print " finished loading {}, total: {} bytes".format(lv[1], pList[len(pList)-1].nbytes)
print(" finished loading {}, total: {} bytes".format(lv[1], pList[lv[1]].nbytes))
print(" current time: {}".format( time.time() ))
#end loop
# pNames.sort()
# return pNames, pList
return pList
#end def ######## ######## ########
######## ######## ######## ########
# Function: count degree of specified genes by
# each edge type and save output to file
# Input ----
# nPath, str: path to the network files
# nName, str: name of the network (save location)
# edgeArray: (Nx4) matrix of char strings
# each row: node, node, edge weight, edge type
# !ASSUME: this is the gene-only version of the network
# genesAll, str list: list of all genes in network
# humanRegex, str list: regex for first 4 chars of genes to keep
# Returns ----
# Creates ----
# file called node-degree.txt ... TODO: explain
def saveSelectGeneDegrees(nPath, nName, edgeArray, genesAll, humanRegex) :
# If folder doesn't exist, create it
if not os.path.exists(nPath+nName+"/") :
os.makedirs(nPath+nName+"/")
#end if
# NOTE: Only considering human genes (at least for now)
# Build an index dictionary from the human genes
genesAll = np.unique(genesAll)
genesAll.sort()
gHumanDict = dict()
index = 0
for gene in genesAll :
# Look for matches to regex expression
ma = list()
for exp in humanRegex :
ma.append( re.match(exp, gene) )
# add gene only if one of the matches is positive
if any(match != None for match in ma) :
gHumanDict[gene] = index
index += 1
#end loop
# Get list of edge types
eTypes = np.unique(edgeArray[:,3])
eTypes.sort()
# Build an index dictionary from the edge types
eDict = dict()
index = 1 # col 0 reserved for 'all'
for et in eTypes :
eDict[et] = index
index += 1
#end loop
# matrix to store degree counts (col 0 reserved for 'all')
degreeMatrix = np.zeros([ len(gHumanDict), len(eTypes)+1 ])
# First, count degrees along SPECIFIED edge types
for row in edgeArray :
# by incrementing the matrix
if row[0] in gHumanDict :
degreeMatrix[ gHumanDict[row[0]], eDict[row[3]] ] += 1
if row[1] in gHumanDict :
degreeMatrix[ gHumanDict[row[1]], eDict[row[3]] ] += 1
#end loop
# Second, sum degrees along ALL edge types
degreeMatrix[:, 0] = np.sum(degreeMatrix, axis=1)
# Open the output file
fname = nPath+nName+'/node-degree.txt'
# ERROR CHECK: rename if file exists
if os.path.isfile(fname) :
print( "WARNING: Specified file already exists:" +
" {}".format(fname) )
i = 0
while os.path.isfile(fname) :
fname = nPath+nName+"/node-degree{:03d}.txt".format(i)
i += 1
print(" using new file name: {}".format(fname))
#end if
outf = open(fname, 'w')
# outf = open(fname, 'wb')
# Write column headers
outf.write("HEADER{}all".format(textDelim))
for et in eTypes :
outf.write("{}{}".format(textDelim, et))
# Write the matrix to file
gHumanList = list(gHumanDict.keys())
gHumanList.sort()
for i in range(len(gHumanList)) :
outf.write( "\n{}".format(gHumanList[i]) )
for j in range(degreeMatrix.shape[1]) :
outf.write( "{}{:.0f}".format(textDelim, degreeMatrix[i,j]) )
#end loop
outf.close()
return
#end def ######## ######## ######## |
"""Record used to represent a service block of an out of band invitation."""
from typing import Sequence
from marshmallow import EXCLUDE, fields, post_dump
from .....messaging.models.base import BaseModel, BaseModelSchema
from .....messaging.valid import (
DID_KEY_EXAMPLE,
DID_KEY_VALIDATE,
INDY_DID_EXAMPLE,
INDY_DID_VALIDATE,
)
class Service(BaseModel):
"""Record used to represent a service block of an out of band invitation."""
class Meta:
"""Service metadata."""
schema_class = "ServiceSchema"
def __init__(
self,
*,
_id: str = None,
_type: str = None,
did: str = None,
recipient_keys: Sequence[str] = None,
routing_keys: Sequence[str] = None,
service_endpoint: str = None,
):
"""Initialize a Service instance.
Args:
id: An identifier for this service block
type: A type for this service block
did: A did for the connection
recipient_key: A list of recipient keys in W3C did:key format
routing_keys: A list of routing keys in W3C did:key format
service_endpoint: An endpoint for the connection
"""
self._id = _id
self._type = _type
self.did = did
self.recipient_keys = list(recipient_keys) if recipient_keys else []
self.routing_keys = list(routing_keys) if routing_keys else []
self.service_endpoint = service_endpoint
class ServiceSchema(BaseModelSchema):
"""Service schema."""
class Meta:
"""ServiceSchema metadata."""
model_class = Service
unknown = EXCLUDE
_id = fields.Str(
required=True, data_key="id", metadata={"description": "Service identifier"}
)
_type = fields.Str(
required=True, data_key="type", metadata={"description": "Service type"}
)
did = fields.Str(
required=False,
validate=INDY_DID_VALIDATE,
metadata={"description": "Service DID", "example": INDY_DID_EXAMPLE},
)
recipient_keys = fields.List(
fields.Str(
validate=DID_KEY_VALIDATE,
metadata={
"description": "Recipient public key",
"example": DID_KEY_EXAMPLE,
},
),
data_key="recipientKeys",
required=False,
metadata={"description": "List of recipient keys"},
)
routing_keys = fields.List(
fields.Str(
validate=DID_KEY_VALIDATE,
metadata={"description": "Routing key", "example": DID_KEY_EXAMPLE},
),
data_key="routingKeys",
required=False,
metadata={"description": "List of routing keys"},
)
service_endpoint = fields.Str(
data_key="serviceEndpoint",
required=False,
metadata={
"description": "Service endpoint at which to reach this agent",
"example": "http://192.168.56.101:8020",
},
)
@post_dump
def post_dump(self, data, **kwargs):
"""Post dump hook."""
if "routingKeys" in data and not data["routingKeys"]:
del data["routingKeys"]
return data
|
import cv2 as cv2
import numpy as np
image = '../test_pictures/textbook/circuit6.png'
image = '../test_pictures/handwritten/0524_brandnew.jpg'
image = '../test_pictures/textbook/tough_circuit.png'
image = '../test_pictures/handwritten/3_loop_circuit_i.jpg'
image = '../test_pictures/handwritten/3_loop_new.jpg'
img = cv2.imread(image)
print(img.shape)
resized_img = cv2.resize(img, (int(img.shape[1] * 0.2), int(img.shape[0] * 0.2)), cv2.INTER_AREA)
loop_detection_img = img.copy()
img_x, img_y, img_z = img.shape
print(resized_img.shape)
cv2.imwrite('../test_pictures/handwritten/3_loop_new_r.jpg', resized_img)
# cv2.imwrite('../test_pictures/textbook/0524_tough.png', resized_img)
|
#encoding: UTF-8
#By Diego
#Compilador de Texto - Graphics
from Graphics import *
from Myro import pickAFile
#Lee Archivo-------------------ARCHIVO LEER-------------------
def leerDoc():
document=pickAFile()
return document
#DETERMINA COLOR DADO-------------------COLOR-------------------
def entradaColor(lineInput):
color = "black"
if (len(lineInput) == 5 and lineInput[0] == "c") or (len(lineInput) == 6):
color = lineInput[len(lineInput)-1]
return color
#CREA UNA VENTANA A PARTIR DEL CODIGO-------------------VENTANA-------------------
def crearVentana(lineInput):
win = Window(lineInput[3],int(lineInput[1]),int(lineInput[2]))
return win
#CREA UN RECTANGULO-------------------RECTANGULO-------------------
def crearRect(lineInput,win):
rect = Rectangle((int(lineInput[1]),int(lineInput[2])),(int(lineInput[3]),int(lineInput[4])))
rect.fill = Color(entradaColor(lineInput))
rect.draw(win)
#TRAZA UN CIRCULO-------------------CIRCULO-------------------
def crearCirc(lineInput,win):
circ = Circle((int(lineInput[1]),int(lineInput[2])),(int(lineInput[3])))
circ.fill = Color(entradaColor(lineInput))
circ.draw(win)
#TRAZA UNA LINEA-------------------LINEA-------------------
def crearLinea(lineInput,win):
linea1 = Line((int(lineInput[1]),int(lineInput[2])),(int(lineInput[3]),int(lineInput[4])))
linea1.color = Color(entradaColor(lineInput))
linea1.draw(win)
def main():
document = open(leerDoc(),"r")
for line in document:
if not "#" in line:
lineInput = line.split()
if lineInput[0] == "v":
win = crearVentana(lineInput)
if lineInput[0] == "r":
crearRect(lineInput,win)
if lineInput[0] == "c":
crearCirc(lineInput,win)
if lineInput[0] == "l":
crearLinea(lineInput,win)
main() |
"""Tests for `rust_phantom_style` settings."""
from rust_test_common import *
class TestPhantomStyle(TestBase):
def test_style_none(self):
self._override_setting('rust_phantom_style', 'none')
self._with_open_file('tests/error-tests/tests/cast-to-unsized-trait-object-suggestion.rs',
self._test_style_none)
def _test_style_none(self, view):
with UiIntercept(passthrough=True) as ui:
e = plugin.SyntaxCheckPlugin.RustSyntaxCheckEvent()
self._cargo_clean(view)
e.on_post_save(view)
self._get_rust_thread().join()
self.assertEqual(len(ui.phantoms), 0)
regions = ui.view_regions[view.file_name()]
# Extremely basic check, the number of unique regions displayed.
rs = [(r.a, r.b) for r in regions]
self.assertEqual(len(set(rs)), 4)
def test_style_popup(self):
self._override_setting('rust_phantom_style', 'popup')
self._with_open_file('tests/error-tests/tests/cast-to-unsized-trait-object-suggestion.rs',
self._test_style_popup)
def _test_style_popup(self, view):
with UiIntercept(passthrough=True) as ui:
e = plugin.SyntaxCheckPlugin.RustSyntaxCheckEvent()
self._cargo_clean(view)
e.on_post_save(view)
self._get_rust_thread().join()
self.assertEqual(len(ui.phantoms), 0)
regions = ui.view_regions[view.file_name()]
# Extremely basic check, the number of unique regions displayed.
rs = [(r.a, r.b) for r in regions]
self.assertEqual(len(set(rs)), 4)
# Trigger popup.
self.assertEqual(len(ui.popups), 0)
for region in regions:
messages.message_popup(view, region.begin(), sublime.HOVER_TEXT)
popups = ui.popups[view.file_name()]
self.assertEqual(len(popups), 1)
self.assertIn('cast to unsized type', popups[0]['content'])
ui.popups.clear()
# Trigger gutter hover.
for lineno in (12, 16):
pt = view.text_point(lineno - 1, 0)
messages.message_popup(view, pt, sublime.HOVER_GUTTER)
popups = ui.popups[view.file_name()]
self.assertEqual(len(popups), 1)
self.assertIn('cast to unsized type', popups[0]['content'])
ui.popups.clear()
|
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
from collections import OrderedDict
import json
import os
import shutil
import sys
from knack.log import get_logger
from knack.prompting import prompt_y_n
from knack.util import CLIError
from azdev.utilities import (
cmd, py_cmd, pip_cmd, display, get_ext_repo_paths, find_files, get_azure_config, get_azdev_config,
require_azure_cli, heading, subheading, EXTENSION_PREFIX)
logger = get_logger(__name__)
def add_extension(extensions):
ext_paths = get_ext_repo_paths()
if not ext_paths or ext_paths == ['_NONE_']:
raise CLIError('Extension repo path is empty. Please try `azdev extension repo add` to add an extension repo')
all_extensions = find_files(ext_paths, 'setup.py')
if extensions == ['*']:
paths_to_add = [os.path.dirname(path) for path in all_extensions
if 'site-packages' not in path and 'vendored_sdks' not in path]
else:
paths_to_add = []
for path in all_extensions:
folder = os.path.dirname(path)
long_name = os.path.basename(folder)
if long_name in extensions:
paths_to_add.append(folder)
extensions.remove(long_name)
# raise error if any extension wasn't found
if extensions:
raise CLIError('extension(s) not found: {}'.format(' '.join(extensions)))
for path in paths_to_add:
result = pip_cmd('install -e {}'.format(path), "Adding extension '{}'...".format(path))
if result.error:
raise result.error # pylint: disable=raising-bad-type
def remove_extension(extensions):
ext_paths = get_ext_repo_paths()
installed_paths = find_files(ext_paths, '*.*-info')
paths_to_remove = []
names_to_remove = []
if extensions == ['*']:
paths_to_remove = [os.path.dirname(path) for path in installed_paths]
names_to_remove = [os.path.basename(os.path.dirname(path)) for path in installed_paths]
else:
for path in installed_paths:
folder = os.path.dirname(path)
long_name = os.path.basename(folder)
if long_name in extensions:
paths_to_remove.append(folder)
names_to_remove.append(long_name)
extensions.remove(long_name)
# raise error if any extension not installed
if extensions:
raise CLIError('extension(s) not installed: {}'.format(' '.join(extensions)))
# removes any links that may have been added to site-packages.
for ext in names_to_remove:
pip_cmd('uninstall {} -y'.format(ext))
for path in paths_to_remove:
for d in os.listdir(path):
# delete the egg-info and dist-info folders to make the extension invisible to the CLI and azdev
if d.endswith('egg-info') or d.endswith('dist-info'):
path_to_remove = os.path.join(path, d)
display("Removing '{}'...".format(path_to_remove))
shutil.rmtree(path_to_remove)
def _get_installed_dev_extensions(dev_sources):
from glob import glob
installed = []
def _collect(path, depth=0, max_depth=3):
if not os.path.isdir(path) or depth == max_depth or os.path.split(path)[-1].startswith('.'):
return
pattern = os.path.join(path, '*.egg-info')
match = glob(pattern)
if match:
ext_path = os.path.dirname(match[0])
ext_name = os.path.split(ext_path)[-1]
installed.append({'name': ext_name, 'path': ext_path})
else:
for item in os.listdir(path):
_collect(os.path.join(path, item), depth + 1, max_depth)
for source in dev_sources:
_collect(source)
return installed
def list_extensions():
from glob import glob
azure_config = get_azure_config()
dev_sources = azure_config.get('extension', 'dev_sources', None)
dev_sources = dev_sources.split(',') if dev_sources else []
installed = _get_installed_dev_extensions(dev_sources)
installed_names = [x['name'] for x in installed]
results = []
for ext_path in find_files(dev_sources, 'setup.py'):
# skip non-extension packages that may be in the extension folder (for example, from a virtual environment)
try:
glob_pattern = os.path.join(os.path.split(ext_path)[0], '{}*'.format(EXTENSION_PREFIX))
_ = glob(glob_pattern)[0]
except IndexError:
continue
# ignore anything in site-packages folder
if 'site-packages' in ext_path:
continue
folder = os.path.dirname(ext_path)
long_name = os.path.basename(folder)
if long_name not in installed_names:
results.append({'name': long_name, 'install': '', 'path': folder})
else:
results.append({'name': long_name, 'install': 'Installed', 'path': folder})
return results
def _get_sha256sum(a_file):
import hashlib
sha256 = hashlib.sha256()
with open(a_file, 'rb') as f:
sha256.update(f.read())
return sha256.hexdigest()
def add_extension_repo(repos):
from azdev.operations.setup import _check_repo
az_config = get_azure_config()
env_config = get_azdev_config()
dev_sources = az_config.get('extension', 'dev_sources', None)
dev_sources = dev_sources.split(',') if dev_sources else []
for repo in repos:
repo = os.path.abspath(repo)
_check_repo(repo)
if repo not in dev_sources:
dev_sources.append(repo)
az_config.set_value('extension', 'dev_sources', ','.join(dev_sources))
env_config.set_value('ext', 'repo_paths', ','.join(dev_sources))
return list_extension_repos()
def remove_extension_repo(repos):
az_config = get_azure_config()
env_config = get_azdev_config()
dev_sources = az_config.get('extension', 'dev_sources', None)
dev_sources = dev_sources.split(',') if dev_sources else []
for repo in repos:
try:
dev_sources.remove(os.path.abspath(repo))
except CLIError:
logger.warning("Repo '%s' was not found in the list of repositories to search.", os.path.abspath(repo))
az_config.set_value('extension', 'dev_sources', ','.join(dev_sources))
env_config.set_value('ext', 'repo_paths', ','.join(dev_sources))
return list_extension_repos()
def list_extension_repos():
az_config = get_azure_config()
dev_sources = az_config.get('extension', 'dev_sources', None)
return dev_sources.split(',') if dev_sources else dev_sources
def update_extension_index(extensions):
import re
import tempfile
from .util import get_ext_metadata, get_whl_from_url
ext_repos = get_ext_repo_paths()
index_path = next((x for x in find_files(ext_repos, 'index.json') if 'azure-cli-extensions' in x), None)
if not index_path:
raise CLIError("Unable to find 'index.json' in your extension repos. Have "
"you cloned 'azure-cli-extensions' and added it to you repo "
"sources with `azdev extension repo add`?")
NAME_REGEX = r'.*/([^/]*)-\d+.\d+.\d+'
for extension in extensions:
# Get the URL
extension = extension[extension.index('https'):]
# Get extension WHL from URL
if not extension.endswith('.whl') or not extension.startswith('https:'):
raise CLIError('usage error: only URL to a WHL file currently supported.')
# TODO: extend to consider other options
ext_path = extension
# Extract the extension name
try:
extension_name = re.findall(NAME_REGEX, ext_path)[0]
extension_name = extension_name.replace('_', '-')
except IndexError:
raise CLIError('unable to parse extension name')
# TODO: Update this!
extensions_dir = tempfile.mkdtemp()
ext_dir = tempfile.mkdtemp(dir=extensions_dir)
whl_cache_dir = tempfile.mkdtemp()
whl_cache = {}
ext_file = get_whl_from_url(ext_path, extension_name, whl_cache_dir, whl_cache)
with open(index_path, 'r') as infile:
curr_index = json.loads(infile.read())
entry = {
'downloadUrl': ext_path,
'sha256Digest': _get_sha256sum(ext_file),
'filename': ext_path.split('/')[-1],
'metadata': get_ext_metadata(ext_dir, ext_file, extension_name)
}
if extension_name not in curr_index['extensions'].keys():
logger.info("Adding '%s' to index...", extension_name)
curr_index['extensions'][extension_name] = [entry]
else:
logger.info("Updating '%s' in index...", extension_name)
curr_index['extensions'][extension_name].append(entry)
# update index and write back to file
with open(os.path.join(index_path), 'w') as outfile:
outfile.write(json.dumps(curr_index, indent=4, sort_keys=True))
def build_extensions(extensions, dist_dir='dist'):
ext_paths = get_ext_repo_paths()
if not ext_paths or ext_paths == ['_NONE_']:
raise CLIError('Extension repo path is empty. Please try `azdev extension repo add` to add an extension repo')
all_extensions = find_files(ext_paths, 'setup.py')
paths_to_build = []
for path in all_extensions:
folder = os.path.dirname(path)
long_name = os.path.basename(folder)
if long_name in extensions:
paths_to_build.append(folder)
extensions.remove(long_name)
# raise error if any extension wasn't found
if extensions:
raise CLIError('extension(s) not found: {}'.format(' '.join(extensions)))
original_cwd = os.getcwd()
dist_dir = os.path.join(original_cwd, dist_dir)
for path in paths_to_build:
os.chdir(path)
command = 'setup.py bdist_wheel -b bdist -d {}'.format(dist_dir)
result = py_cmd(command, "Building extension '{}'...".format(path), is_module=False)
if result.error:
raise result.error # pylint: disable=raising-bad-type
os.chdir(original_cwd)
def publish_extensions(extensions, storage_account, storage_account_key, storage_container,
dist_dir='dist', update_index=False, yes=False):
from azure.multiapi.storage.v2018_11_09.blob import BlockBlobService
heading('Publish Extensions')
require_azure_cli()
# rebuild the extensions
subheading('Building WHLs')
try:
shutil.rmtree(dist_dir)
except Exception as ex: # pylint: disable=broad-except
logger.debug("Unable to clear folder '%s'. Error: %s", dist_dir, ex)
build_extensions(extensions, dist_dir=dist_dir)
whl_files = find_files(dist_dir, '*.whl')
uploaded_urls = []
subheading('Uploading WHLs')
for whl_path in whl_files:
whl_file = os.path.split(whl_path)[-1]
client = BlockBlobService(account_name=storage_account, account_key=storage_account_key)
exists = client.exists(container_name=storage_container, blob_name=whl_file)
# check if extension already exists unless user opted not to
if not yes:
if exists:
if not prompt_y_n(
"{} already exists. You may need to bump the extension version. Replace?".format(whl_file),
default='n'):
logger.warning("Skipping '%s'...", whl_file)
continue
# upload the WHL file
client.create_blob_from_path(container_name=storage_container, blob_name=whl_file,
file_path=os.path.abspath(whl_path))
url = client.make_blob_url(container_name=storage_container, blob_name=whl_file)
logger.info(url)
uploaded_urls.append(url)
if update_index:
subheading('Updating Index')
update_extension_index(uploaded_urls)
subheading('Published WHLs')
for url in uploaded_urls:
display(url)
if not update_index:
logger.warning('You still need to update the index for your changes!')
logger.warning(' az extension update-index <URL>')
|
#%%
import re
#%%
def replace_korean_book_name_with_english(text: str, default_book_name: str):
rules = [
(r"(창세기)", "Genesis"),
(r"(출애굽기)", "Exodus"),
(r"(레위기)", "Leviticus"),
(r"(민수기)", "Numbers"),
(r"(신명기)", "Deuteronomy"),
(r"(여호수아)", "Joshua"),
(r"(사사기)", "Judges"),
(r"(룻기)", "Ruth"),
(r"(사무엘상)", "I Samuel"),
(r"(사무엘하)", "II Samuel"),
(r"(열왕기상)", "I Kings"),
(r"(열왕기하)", "II Kings"),
(r"(역대상)", "I Chronicles"),
(r"(역대하)", "II Chronicles"),
(r"(에스라)", "Ezra"),
(r"(느헤미야)", "Nehemiah"),
(r"(에스더)", "Esther"),
(r"(욥기)", "Job"),
(r"(시편)", "Psalms"),
(r"(잠언)", "Proverbs"),
(r"(전도서)", "Ecclesiastes"),
(r"(아가)", "Song of Songs"),
(r"(이사야)", "Isaiah"),
(r"(예레미야)", "Jeremiah"),
(r"(예레미야\S?애가)", "Lamentations"),
(r"(에스겔)", "Ezekiel"),
(r"(다니엘)", "Daniel"),
(r"(호세아)", "Hosea"),
(r"(요엘)", "Joel"),
(r"(아모스)", "Amos"),
(r"(오바댜)", "Obadiah"),
(r"(요나)", "Jonah"),
(r"(미가)", "Micah"),
(r"(나훔)", "Nahum"),
(r"(하박국)", "Habakkuk"),
(r"(스바냐)", "Zephaniah"),
(r"(학개)", "Haggai"),
(r"(스가랴)", "Zechariah"),
(r"(말라기)", "Malachi"),
(r"(마태복음)", "Matthew"),
(r"(마가복음)", "Mark"),
(r"(누가복음)", "Luke"),
(r"(요한복음)", "John"),
(r"(사도행전)", "Acts"),
(r"(로마서)", "Romans"),
(r"(고린도\S?전서)", "1 Corinthians"),
(r"(고린도\S?후서)", "2 Corinthians"),
(r"(갈라디아서)", "Galatians"),
(r"(에베소서)", "Ephesians"),
(r"(빌립보서)", "Philippians"),
(r"(골로새서)", "Colossians"),
(r"(데살로니가\S?전서)", "1 Thessalonians"),
(r"(데살로니가\S?후서)", "2 Thessalonians"),
(r"(디모데\S?전서)", "1 Timothy"),
(r"(디모데\S?후서)", "2 Timothy"),
(r"(디도서)", "Titus"),
(r"(빌레몬서)", "Philemon"),
(r"(히브리서)", "Hebrews"),
(r"(야고보서)", "James"),
(r"(베드로\S?전서)", "1 Peter"),
(r"(베드로\S?후서)", "2 Peter"),
(r"(요한\S?1서)", "1 John"),
(r"(요한\S?2서)", "2 John"),
(r"(요한\S?3서)", "3 John"),
(r"(유다서)", "Jude"),
(r"(요한계시록)", "Revelation"),
]
book_name_used = None
for r in rules:
pattern, book_name = r
before = text
text = re.sub(pattern, book_name, text)
if before != text:
book_name_used = book_name
break
else:
if default_book_name is not None:
pattern = r"(\[[0-9]+\])"
repl = r"\1 " + re.escape(default_book_name)
text = re.sub(pattern, repl, text)
book_name_used = default_book_name
text = text.replace("장", ":")
text = text.replace("절", " ")
return text, book_name_used
#%%
def book_name_to_code(name: str):
rule_map = {
"Genesis": "gen",
"Exodus": "exo",
"Leviticus": "lev",
"Numbers": "num",
"Deuteronomy": "deu",
"Joshua": "jos",
"Judges": "jdg",
"Ruth": "rut",
"I Samuel": "1sa",
"II Samuel": "2sa",
"I Kings": "1ki",
"II Kings": "2ki",
"I Chronicles": "1ch",
"II Chronicles": "2ch",
"Ezra": "ezr",
"Nehemiah": "neh",
"Esther": "est",
"Job": "job",
"Psalms": "psa",
"Proverbs": "pro",
"Ecclesiastes": "ecc",
"Song of Songs": "sng",
"Isaiah": "isa",
"Jeremiah": "jer",
"Lamentations": "lam",
"Ezekiel": "ezk",
"Daniel": "dan",
"Hosea": "hos",
"Joel": "jol",
"Amos": "amo",
"Obadiah": "oba",
"Jonah": "jnh",
"Micah": "mic",
"Nahum": "nam",
"Habakkuk": "hab",
"Zephaniah": "zep",
"Haggai": "hag",
"Zechariah": "zec",
"Malachi": "mal",
"Matthew": "mat",
"Mark": "mrk",
"Luke": "luk",
"John": "jhn",
"Acts": "act",
"Romans": "rom",
"I Corinthians": "1co",
"II Corinthians": "2co",
"Galatians": "gal",
"Ephesians": "eph",
"Philippians": "php",
"Colossians": "col",
"I Thessalonians": "1th",
"II Thessalonians": "2th",
"I Timothy": "1ti",
"II Timothy": "2ti",
"Titus": "tit",
"Philemon": "phm",
"Hebrews": "heb",
"James": "jas",
"I Peter": "1pe",
"II Peter": "2pe",
"I John": "1jn",
"II John": "2jn",
"III John": "3jn",
"Jude": "jud",
"Revelation": "rev",
}
return rule_map[name]
|
import sys
print("module alpha_blend (")
print("(")
print(" input clk,")
print(" input [3:0] bg_a,")
print(" input [3:0] bg_r,")
print(" input [3:0] bg_g,")
print(" input [3:0] bg_b,")
print(" output [3:0] bga_r,")
print(" output [3:0] bga_g,")
print(" output [3:0] bga_b")
print(");")
print("")
print("always @(posedge clk) begin")
print("")
for colorname in ('r','g','b'):
print("case({ bg_a, bg_"+colorname+"})")
for alpha in range(0,16):
for color in range(0,16):
a = alpha/15.0
c = int(color*a)
#print(alpha,color,a,c)
digit = (alpha << 4) | color&0x0F;
#print(alpha,color,a,c)
sys.stdout.write(' \'h{:02X}: '.format(digit))
sys.stdout.write('bga_'+colorname+' <= 4\'h{:01X};\n'.format(c))
print("endcase")
print("end")
print("endmodule")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 从spark.sql中导入SparkSession类
from pyspark.sql import SparkSession
# 设置环境变量
import os
import sys
# 导入模块
from pyspark.mllib.recommendation import Rating, ALS, MatrixFactorizationModel
def create_context():
"""
创建SoarkContext失落的心
:return: SparkContext对象
"""
# 构建SparkSession实例对象
spark = SparkSession.builder \
.appName("RmdMovieUserExample") \
.master("local[*]") \
.getOrCreate()
# 获取SparkContext实例对象
return spark.sparkContext
def prepare_data(spark_context):
# 读取 u.item 电影信息数据
item_rdd = sc.textFile("../ml-100k/u.item")
# 创建 电影名称 与 电影ID 映射的字典
movie_title = item_rdd \
.map(lambda line: line.split("|")) \
.map(lambda a: (float(a[0]), a[1]))
# 将RDD转换字典
movie_title_dict = movie_title.collectAsMap()
return movie_title_dict
def load_model(spark_context):
try:
model = MatrixFactorizationModel.load(spark_context, '../datas/asl-model')
print(model)
return model
except Exception:
print("加载模型出错")
def recommend_movies(als, movies, user_id):
rmd_movies = als.recommendProducts(user_id, 10)
print("针对用户ID: " + str(user_id) + ", 推荐以下电影:")
for rmd in rmd_movies:
print("\t 针对用户ID: {0}, 推荐电影: {1}, 推荐评分: {2}".format(rmd[0], movies[rmd[1]], rmd[2]))
def recommend_users(als, movies, movie_id):
# 为了每个电影推荐十个用户
rmd_users = als.recommendUsers(movie_id, 10)
print("针对电影ID: {0}, 电影名:{1}, 推荐下列十个用户: ".format(movie_id, movies[movie_id]))
for rmd in rmd_users:
print('\t 推荐用户ID: {0}, 推荐评分: {1}'.format(rmd[0], rmd[2]))
def recommend(als_model, movie_dic):
# 推荐电影给用户
if sys.argv[1] == '--U':
recommend_movies(als_model, movie_dic, int(sys.argv[2]))
# 推荐用户给电影
if sys.argv[1] == '--M':
recommend_users(als_model, movie_dic, int(sys.argv[2]))
if __name__ == "__main__":
"""
-a. 加载模型
-b. 为用户对推荐电影
-c. 为电影对推荐用户
"""
# 设置运行python脚本的时候,传递两个参数,决定如何推荐
if len(sys.argv) != 3:
print("请输入2个参数: 要么是: --U user_id,要么是: --M moive_id")
exit(-1)
sc = create_context()
# 数据准备,就是加载电影数据信息,转换字典
print('============= 数据准备 =============')
movie_title_dic = prepare_data(sc)
print('============= 加载模型 =============')
als_load_model = load_model(sc)
type(als_load_model)
print('============= 预测推荐 =============')
recommend(als_load_model, movie_title_dic)
|
from django.shortcuts import render,HttpResponse,redirect
from app01 import models
# Create your views here.
def index(request, *args):
return render(request, "index.html") |
#!/usr/bin/python
import numpy as np
import abc
import random as r
import imp
class Agent:
'Class representing an agent'
ID_base = 0
def __init__(self, behav, maxVelocity = 2.0, drag = 0.5,startPos = np.array([0.0, 0.0]), startVel = np.array([0.0, 0.0]), timestep = 1.0/25.0, energ = 25):
Agent.ID_base += 1
self.ID = Agent.ID_base
self.pos = startPos
self.vel = startVel
self.mass = 1.0
self.behavior = behav
self.timestep = timestep
self.maxVel = maxVelocity
self.dragCoef = drag
self.energy = energ
self.distanceTravelled = 0
def updateAgent(self, agents):
force = 10 * self.behavior.compute(self, agents)
if self.energy <= 0: force *= 0
drag = -1.0 * self.dragCoef * self.vel
force = force + drag
self.vel = self.vel + self.timestep * (force / self.mass)
velMag = np.sqrt(np.dot(self.vel, self.vel))
if(velMag > self.maxVel):
nrmlzd = self.vel / velMag
self.vel = nrmlzd * self.maxVel
self.pos = self.pos + self.timestep * self.vel
class Behaviour:
'Class representing an agents behaviour'
@abc.abstractmethod
def compute(self, aself, agents):
print 'abstract base class'
class runAwayBehaviour(Behaviour):
'Prey Behaviour. Always runs away from everything within radius. Trapped when something else is close enough'
def __init__(self, distanceDanger = 5.0, distanceTrapped = 1.0):
self.thresDanger = distanceDanger
self.thresTrapped = distanceTrapped
def compute(self, aself, agents):
trapped = False
awayDirection = np.array([0.0, 0.0])
for a in agents:
if aself.ID == a.ID:
continue
distToA = np.linalg.norm(aself.pos - a.pos)
if(distToA < self.thresTrapped):
trapped = True
elif distToA < self.thresDanger:
awayDirection += (aself.pos - a.pos)
aself.energy = max(0, aself.energy - 1)
dirnorm = np.sqrt(np.dot(awayDirection, awayDirection))
if trapped:
return np.array([0, 0])
elif dirnorm > 0.01:
direct = awayDirection
nrml = awayDirection / dirnorm
return nrml
else:
return np.array([0, 0])
class randomBehaviour(Behaviour):
def compute(self, aself, agents):
res = np.array([r.random(), r.random()])
return res
class scriptedBehaviour(Behaviour):
def __init__(self, module):
self.scriptModule = imp.load_source("scriptedBehav", module)
def compute(self, aself, agents):
other = 1
if aself.ID == agents[1].ID:
other = 2
prey = agents[0]
otherHunter = agents[other]
relativePosPrey = prey.pos - aself.pos
relativePosHunter = otherHunter.pos - aself.pos
relatPreyOther = prey.pos - otherHunter.pos
'distance between prey & other hunter'
dist = np.linalg.norm(relatPreyOther)
preyArr = [relativePosPrey[0], relativePosPrey[1]]
otherHunterArr = [relativePosHunter[0], relativePosHunter[1]]
force = self.scriptModule.compute(preyArr, otherHunterArr, dist)
result = np.array(force)
distToPrey = np.linalg.norm(relativePosPrey)
if distToPrey < 3.5:
aself.energy = max(0, aself.energy - 1)
return result
|
from sqlalchemy import create_engine, MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker,scoped_session,Session
import os
import ulid
from dotenv import load_dotenv
load_dotenv()
SQLALCHEMY_DATABASE_URL = os.environ.get("DB_CONNECTION_PATH")
if SQLALCHEMY_DATABASE_URL == None:
raise Exception("db connection error")
SQLALCHEMY_DATABASE_URL += '?charset=utf8mb4'
engine = create_engine(SQLALCHEMY_DATABASE_URL, pool_recycle=360,pool_size=100)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base(bind=engine)
session = scoped_session(SessionLocal)
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() # pylint: disable=no-member
def get_db_instance():
return SessionLocal()
def get_ulid():
return ulid.new().str
|
from matplotlib import rc
rc("font", family="serif", size=11)
rc("text", usetex=True)
import daft
#Size: Ancho por alto
pgm = daft.PGM([7, 7], origin=[0.5, 0.5], observed_style="inner")
# Hierarchical parameters. Posicion: [Columna, Fila]
pgm.add_node(daft.Node("aH", r"$a_{H}$", 1, 7, scale=1.2))
pgm.add_node(daft.Node("ne", r"$n_e$", 2, 7, scale=1.2))
pgm.add_node(daft.Node("chbeta", r"$c\left(H\beta\right)$", 3, 7, scale=1.2))
pgm.add_node(daft.Node("xi", r"$\xi$", 5, 7, scale=1.2))
pgm.add_node(daft.Node("tau", r"$\tau$", 6, 7, scale=1.2))
pgm.add_node(daft.Node("aHe", r"$a_{He}$", 7, 7, scale=1.2))
pgm.add_node(daft.Node("FH", r"$F_{H}$", 2, 5.5, scale=1.4))
pgm.add_node(daft.Node("FHe", r"$F_{He}$", 6, 5.5, scale=1.4))
pgm.add_node(daft.Node("Te", r"$T_{e}", 4, 5.5, scale=1.2))
pgm.add_node(daft.Node("chiEw", r"$\chi_{EW}^{2}$", 2, 4, scale=1.4))
pgm.add_node(daft.Node("ChiTem", r"$\chi_{T}^{2}$", 6, 4, scale=1.4))
Latex_Likelihood = r'$\mathcal{L}\left(\left(F_{\lambda}\left(\lambda\right)/F_{H\beta}\right)_{obs}\mid\theta\right)$'
# pgm.add_node(daft.Node("Likelihood", Latex_Likelihood, 4, 3.5, scale=1.2))
pgm.add_node(daft.Node("Likelihood", Latex_Likelihood, 4, 3.1, aspect=3.2, observed=True, scale=1.6))
# Latent variable.
# pgm.add_node(daft.Node("w", r"$w_n$", 1, 1))
# Data.
# pgm.add_node(daft.Node("x", r"$x_n$", 2, 1, observed=True))
# Add in the edges.
pgm.add_edge("aH", "FH")
pgm.add_edge("ne", "FH")
pgm.add_edge("chbeta", "FH")
pgm.add_edge("xi", "FH")
pgm.add_edge("tau", "FH")
pgm.add_edge("Te", "FH")
pgm.add_edge("ne", "FHe")
pgm.add_edge("chbeta", "FHe")
pgm.add_edge("xi", "FHe")
pgm.add_edge("tau", "FHe")
pgm.add_edge("aHe", "FHe")
pgm.add_edge("Te", "FHe")
pgm.add_edge("Te", "ChiTem")
pgm.add_edge("FH", "Likelihood")
pgm.add_edge("FHe", "Likelihood")
pgm.add_edge("ChiTem", "Likelihood")
pgm.add_edge("chiEw", "Likelihood")
# pgm.add_edge("chbeta", "x")
# Render and save.
StoringDataFolder = '/home/vital/Workspace/X_ModelData/MCMC_databases/'
pgm.render()
# pgm.figure.savefig(StoringDataFolder + "nogray.pdf")
pgm.figure.savefig(StoringDataFolder + "nogray.png", dpi=150)
print 'done' |
__author__ = 'sunhao'
import os
import pickle
import warnings
import pandas as pd
import numpy as np
import statsmodels.api as sm
import ps
warnings.filterwarnings("ignore")
class FactorStat(object):
def __init__(self, context):
self.context = context
self._get_pos_hold()
self._get_group_ret()
self._get_month_ret()
self._stat_analysis()
def _cal_factor_pos(self, date):
df = self.context.df_factor.loc[date][['circ_mv', self.context.factor_name]].dropna(axis=0, how='any')
if self.context.factor_reciprocal:
df[self.context.factor_name] = 1 / df[self.context.factor_name]
df['mv_cut'] = pd.qcut(df['circ_mv'], 2, ['small', 'large'])
df['factor_cut'] = None
df.loc[df['mv_cut'] == 'small', 'factor_cut'] = pd.qcut(df.loc[df['mv_cut'] == 'small', self.context.factor_name], [0, 0.3, 0.7, 1], ['low', 'medium', 'high'])
df.loc[df['mv_cut'] == 'large', 'factor_cut'] = pd.qcut(df.loc[df['mv_cut'] == 'large', self.context.factor_name], [0, 0.3, 0.7, 1], ['low', 'medium', 'high'])
buy_date = ps.shift_n_trading_day(date, n=-1)
df = df.loc[list(set(df.index.tolist()) - set(self.context.cannot_buy_dict[buy_date]))]
df_large_high = df.query('mv_cut=="large" and factor_cut=="high"')
df_small_high = df.query('mv_cut=="small" and factor_cut=="high"')
df_large_low = df.query('mv_cut=="large" and factor_cut=="low"')
df_small_low = df.query('mv_cut=="small" and factor_cut=="low"')
if self.context.weighted_method == 'eql_weighted':
def cal_weight(df):
return pd.Series(index = df.index, name='eql_weighted').fillna(1/len(df))
return cal_weight(df_large_high), cal_weight(df_small_high), cal_weight(df_large_low), cal_weight(df_small_low)
if self.context.weighted_method == 'mv_weighted':
def cal_weight(df):
return df['circ_mv'] / df['circ_mv'].sum()
return cal_weight(df_large_high), cal_weight(df_small_high), cal_weight(df_large_low), cal_weight(df_small_low)
def _get_pos_hold(self):
self.large_high_pos = {}
self.small_high_pos = {}
self.large_low_pos = {}
self.small_low_pos = {}
for month_date in sorted(list(self.context.df_ret_dict.keys())):
buy_date = ps.shift_n_trading_day(month_date, -1)
self.large_high_pos[buy_date], self.small_high_pos[buy_date], self.large_low_pos[buy_date], self.small_low_pos[buy_date] =\
self._cal_factor_pos(month_date)
def _cal_ret(self, pos_hold_raw):
ret_list = []
turnover_dict = {}
delist_turnover_dict = {}
pos_hold = pos_hold_raw.copy()
rebalance_dates = sorted(list(pos_hold.keys()))
for i, date in enumerate(rebalance_dates):
pre_date = ps.shift_n_trading_day(date)
df = pd.merge(pos_hold[date].to_frame(), self.context.df_ret_dict[pre_date].iloc[:, 1:], left_index=True, right_index=True, how='left').fillna(0)
ret = df.iloc[:, 1:].apply(np.average, weights=df.iloc[:, 0])
if i == 0:
ret = ret.append(pd.Series(index=[date], data=0))
turnover_dict[date] = 1.0
if i < len(rebalance_dates) - 1:
nav_end = df.iloc[:, 1:].multiply(df.iloc[:, 0], axis=0).add(1).prod(axis=1)
wgt_end = nav_end / nav_end.sum()
cannot_sell_stock = self.context.cannot_sell_dict[df.iloc[:, -1].name]
delist_stock = self.context.delist_stock_dict[df.iloc[:, -1].name]
delist_stock = set(cannot_sell_stock).intersection(set(delist_stock))
if len(delist_stock)!=0:
cannot_sell_stock = set(cannot_sell_stock) - set(delist_stock)
delist_turnover = wgt_end.loc[wgt_end.index.isin(delist_stock)].sum()
else:
delist_turnover = 0
delist_turnover_dict[date] = delist_turnover
wgt_cannot_sell = wgt_end.loc[wgt_end.index.isin(cannot_sell_stock)]
origin_pos = pos_hold[df.iloc[:, -1].name]
new_pos = (1 - wgt_cannot_sell.sum()) * origin_pos
new_pos = new_pos.append(wgt_cannot_sell).groupby(level=0).sum()
pos_hold[df.iloc[:, -1].name] = new_pos
turnover_rate = pd.merge(wgt_end.to_frame(), new_pos.to_frame(), left_index=True, right_index=True, how='outer').fillna(0).diff(axis=1).abs().sum().iloc[-1]
ret_list.append(ret)
turnover_dict[df.iloc[:, -1].name] = turnover_rate
else:
ret_list.append(ret)
res = pd.concat(ret_list, axis=0).sort_index(ascending=True)
df_turnover = pd.Series(turnover_dict)
df_delist_turnover = pd.Series(delist_turnover_dict)
return res, df_turnover, df_delist_turnover
def _get_group_ret(self):
self._large_high_ret, self._large_high_turnover, self._large_high_delist_turnover = self._cal_ret(self.large_high_pos)
self._small_high_ret, self._small_high_turnover, self._small_high_delist_turnover = self._cal_ret(self.small_high_pos)
self._large_low_ret, self._large_low_turnover, self._large_low_delist_turnover = self._cal_ret(self.large_low_pos)
self._small_low_ret, self._small_low_turnover, self._small_low_delist_turnover = self._cal_ret(self.small_low_pos)
high_fee = (self._large_high_turnover.reindex(self._large_high_ret.index).fillna(0) + self._small_high_turnover.reindex(self._small_high_ret.index).fillna(0)) * self.context.fee +\
(self._large_high_delist_turnover.reindex(self._large_high_ret.index).fillna(0) + self._small_high_delist_turnover.reindex(self._small_high_ret.index).fillna(0)) * self.context.delist_loss
low_fee = (self._large_low_turnover.reindex(self._large_low_ret.index).fillna(0) + self._small_low_turnover.reindex(self._small_low_ret.index).fillna(0)) * self.context.fee +\
(self._large_low_delist_turnover.reindex(self._large_low_ret.index).fillna(0) + self._small_low_delist_turnover.reindex(self._small_low_ret.index).fillna(0)) * self.context.delist_loss
self._high_ret = (self._large_high_ret + self._small_high_ret) * 0.5
self._low_ret = (self._large_low_ret + self._small_low_ret) * 0.5
self.high_ret = self._high_ret - high_fee * 0.5
self.low_ret = self._low_ret - low_fee * 0.5
if self.context.factor_direction == 'pos':
self.diff_ret = self._high_ret - self._low_ret - high_fee * 0.5 - low_fee * 0.5
else:
self.diff_ret = self._low_ret - self._high_ret - high_fee * 0.5 - low_fee * 0.5
def _cal_month_ret(self, daily_ret):
daily_ret = daily_ret.copy()
daily_ret.index = [x[:-2] for x in daily_ret.index]
month_ret = daily_ret.groupby(level=0).apply(lambda x:x.add(1).prod() - 1)
return month_ret
def _get_month_ret(self):
self.high_month_ret = self._cal_month_ret(self.high_ret)
self.low_month_ret = self._cal_month_ret(self.low_ret)
self.diff_month_ret = self._cal_month_ret(self.diff_ret)
def _ols_stats(self, ret, option='nw'):
if option == 'nw':
maxlags = int(np.ceil(4 * (len(ret) / 100) ** (2 / 9)))
model = sm.OLS(ret.values, np.ones(len(ret))).fit(missing = 'drop', cov_type = 'HAC', cov_kwds={'maxlags':maxlags})
else:
model = sm.OLS(ret.values, np.ones(len(ret))).fit()
return model.tvalues[0], model.pvalues[0]
def _cal_stats(self, high, low, diff, option='nw'):
t_high, p_high = self._ols_stats(high, option=option)
t_low, p_low = self._ols_stats(low, option=option)
t_diff, p_diff = self._ols_stats(diff, option=option)
return [[t_high, t_low, t_diff],
[p_high, p_low, p_diff]]
def _stat_analysis(self):
month_stat = np.array([[self.high_month_ret.mean(), self.low_month_ret.mean(), self.diff_month_ret.mean()]] +\
self._cal_stats(self.high_month_ret, self.low_month_ret, self.diff_month_ret) +\
self._cal_stats(self.high_month_ret, self.low_month_ret, self.diff_month_ret, option='non-nw'))
daily_stat = np.array([[self.high_ret.mean(), self.low_ret.mean(), self.diff_ret.mean()]] +\
self._cal_stats(self.high_ret, self.low_ret, self.diff_ret) +\
self._cal_stats(self.high_ret, self.low_ret, self.diff_ret, option='non-nw'))
index = pd.MultiIndex.from_product([[self.context.factor_name], ['mean_ret', 't-value(nw)', 'p-value(nw)', 't-value', 'p-value']])
columns = pd.MultiIndex.from_product([['monthly', 'daily'], ['high', 'low', 'diff']])
df_stat = pd.DataFrame(np.concatenate((month_stat, daily_stat), axis=1), index=index, columns=columns)
self.df_stat = df_stat.applymap(lambda x: '%.4f' % x).astype(float)
def save(self, factor_name=None):
if factor_name:
ps.pickle_save(self, self.context.savedir+"{0}.pkl".format(factor_name))
else:
ps.pickle_save(self, self.context.savedir+"{0}.pkl".format(self.context.factor_name))
class FactorStat_size(FactorStat):
def __init__(self, context, cut_item='pb'):
self.context = context
self.cut_item = cut_item
self._get_pos_hold()
self._get_group_ret()
self._get_month_ret()
self._stat_analysis()
def _cal_factor_pos(self, date):
if self.context.factor_name == 'circ_mv':
df = self.context.df_factor.loc[date][[self.context.factor_name, self.cut_item]].dropna(axis=0, how='any')
df['bp'] = 1 / df[self.cut_item]
del df[self.cut_item]
df['mv_cut'] = pd.qcut(df['circ_mv'], 2, ['small', 'large'])
df['factor_cut'] = None
df.loc[df['mv_cut'] == 'small', 'factor_cut'] = pd.qcut(df.loc[df['mv_cut'] == 'small', 'bp'], [0, 0.3, 0.7, 1], ['low', 'medium', 'high'])
df.loc[df['mv_cut'] == 'large', 'factor_cut'] = pd.qcut(df.loc[df['mv_cut'] == 'large', 'bp'], [0, 0.3, 0.7, 1], ['low', 'medium', 'high'])
buy_date = ps.shift_n_trading_day(date, n=-1)
df = df.loc[list(set(df.index.tolist()) - set(self.context.cannot_buy_dict[buy_date]))]
df_large_high = df.query('mv_cut=="large" and factor_cut=="high"')
df_small_high = df.query('mv_cut=="small" and factor_cut=="high"')
df_large_low = df.query('mv_cut=="large" and factor_cut=="low"')
df_small_low = df.query('mv_cut=="small" and factor_cut=="low"')
df_large_medium = df.query('mv_cut=="large" and factor_cut=="medium"')
df_small_medium = df.query('mv_cut=="small" and factor_cut=="medium"')
if self.context.weighted_method == 'eql_weighted':
def cal_weight(df):
return pd.Series(index = df.index, name='eql_weighted').fillna(1/len(df))
return cal_weight(df_large_high), cal_weight(df_small_high), cal_weight(df_large_low), cal_weight(df_small_low), cal_weight(df_large_medium), cal_weight(df_small_medium)
if self.context.weighted_method == 'mv_weighted':
def cal_weight(df):
return df['circ_mv'] / df['circ_mv'].sum()
return cal_weight(df_large_high), cal_weight(df_small_high), cal_weight(df_large_low), cal_weight(df_small_low), cal_weight(df_large_medium), cal_weight(df_small_medium)
def _get_pos_hold(self):
self.large_high_pos = {}
self.small_high_pos = {}
self.large_low_pos = {}
self.small_low_pos = {}
self.large_medium_pos = {}
self.small_medium_pos = {}
for month_date in sorted(list(self.context.df_ret_dict.keys())):
buy_date = ps.shift_n_trading_day(month_date, -1)
self.large_high_pos[buy_date], self.small_high_pos[buy_date], self.large_low_pos[buy_date], self.small_low_pos[buy_date], self.large_medium_pos[buy_date], self.small_medium_pos[buy_date] =\
self._cal_factor_pos(month_date)
def _get_group_ret(self):
self._large_high_ret, self._large_high_turnover, self._large_high_delist_turnover = self._cal_ret(self.large_high_pos)
self._small_high_ret, self._small_high_turnover, self._small_high_delist_turnover = self._cal_ret(self.small_high_pos)
self._large_low_ret, self._large_low_turnover, self._large_low_delist_turnover = self._cal_ret(self.large_low_pos)
self._small_low_ret, self._small_low_turnover, self._small_low_delist_turnover = self._cal_ret(self.small_low_pos)
self._large_medium_ret, self._large_medium_turnover, self._large_medium_delist_turnover = self._cal_ret(self.large_medium_pos)
self._small_medium_ret, self._small_medium_turnover, self._small_medium_delist_turnover = self._cal_ret(self.small_medium_pos)
small_fee = (self._small_high_turnover.reindex(self._small_high_ret.index).fillna(0) + self._small_medium_turnover.reindex(self._small_medium_ret.index).fillna(0) + self._small_low_turnover.reindex(self._small_low_ret.index).fillna(0)) * self.context.fee +\
(self._small_high_delist_turnover.reindex(self._small_high_ret.index).fillna(0) + self._small_medium_delist_turnover.reindex(self._small_medium_ret.index).fillna(0) + self._small_low_delist_turnover.reindex(self._small_low_ret.index).fillna(0)) * self.context.delist_loss
large_fee = (self._large_high_turnover.reindex(self._large_high_ret.index).fillna(0) + self._large_medium_turnover.reindex(self._large_medium_ret.index).fillna(0) + self._large_low_turnover.reindex(self._large_low_ret.index).fillna(0)) * self.context.fee +\
(self._large_high_delist_turnover.reindex(self._large_high_ret.index).fillna(0) + self._large_medium_delist_turnover.reindex(self._large_medium_ret.index).fillna(0) + self._large_low_delist_turnover.reindex(self._large_low_ret.index).fillna(0)) * self.context.delist_loss
self._small_ret = (self._small_high_ret + self._small_medium_ret + self._small_low_ret) / 3
self._large_ret = (self._large_high_ret + self._large_medium_ret + self._large_low_ret) / 3
self.small_ret = self._small_ret - small_fee / 3
self.large_ret = self._large_ret - large_fee / 3
if self.context.factor_direction == 'pos':
self.diff_ret = self._small_ret - self._large_ret - small_fee / 3 - large_fee / 3
else:
self.diff_ret = self._large_ret - self._small_ret - small_fee / 3 - large_fee / 3
def _get_month_ret(self):
self.small_month_ret = self._cal_month_ret(self.small_ret)
self.large_month_ret = self._cal_month_ret(self.large_ret)
self.diff_month_ret = self._cal_month_ret(self.diff_ret)
def _cal_stats(self, small, large, diff, option='nw'):
t_small, p_small = self._ols_stats(small, option=option)
t_large, p_large = self._ols_stats(large, option=option)
t_diff, p_diff = self._ols_stats(diff, option=option)
return [[t_small, t_large, t_diff],
[p_small, p_large, p_diff]]
def _stat_analysis(self):
month_stat = np.array([[self.small_month_ret.mean(), self.large_month_ret.mean(), self.diff_month_ret.mean()]] +\
self._cal_stats(self.small_month_ret, self.large_month_ret, self.diff_month_ret) +\
self._cal_stats(self.small_month_ret, self.large_month_ret, self.diff_month_ret, option='non-nw'))
daily_stat = np.array([[self.small_ret.mean(), self.large_ret.mean(), self.diff_ret.mean()]] +\
self._cal_stats(self.small_ret, self.large_ret, self.diff_ret) +\
self._cal_stats(self.small_ret, self.large_ret, self.diff_ret, option='non-nw'))
index = pd.MultiIndex.from_product([[self.context.factor_name], ['mean_ret', 't-value(nw)', 'p-value(nw)', 't-value', 'p-value']])
columns = pd.MultiIndex.from_product([['monthly', 'daily'], ['small', 'large', 'diff']])
df_stat = pd.DataFrame(np.concatenate((month_stat, daily_stat), axis=1), index=index, columns=columns)
self.df_stat = df_stat.applymap(lambda x: '%.4f' % x).astype(float)
class FactorStat_mkt(FactorStat):
def __init__(self, context):
self.context = context
self._get_pos_hold()
self._get_group_ret()
self._get_month_ret()
self._stat_analysis()
def _get_pos_hold(self):
self.mkt_pos = {}
for month_date in sorted(list(self.context.df_ret_dict.keys())):
buy_date = ps.shift_n_trading_day(month_date, -1)
self.mkt_pos[buy_date] = self._cal_factor_pos(month_date)
def _cal_factor_pos(self, date):
df = self.context.df_factor.loc[date][['circ_mv']].dropna(axis=0, how='any')
if self.context.weighted_method == 'eql_weighted':
return pd.Series(index = df.index, name='eql_weighted').fillna(1/len(df))
if self.context.weighted_method == 'mv_weighted':
return df['circ_mv'] / df['circ_mv'].sum()
def _get_group_ret(self):
self._mkt_ret, self._mkt_turnover, self._delist_turnover = self._cal_ret(self.mkt_pos)
fee = self._mkt_turnover.reindex(self._mkt_ret.index).fillna(0) * self.context.fee + self._delist_turnover.reindex(self._mkt_ret.index).fillna(0) * self.context.delist_loss
self.mkt_ret = self._mkt_ret - fee
def _get_month_ret(self):
self.mkt_month_ret = self._cal_month_ret(self.mkt_ret)
def _cal_stats(self, mkt, option='nw'):
t_mkt, p_mkt = self._ols_stats(mkt, option=option)
return [[t_mkt],
[p_mkt]]
def _stat_analysis(self):
month_stat = np.array([[self.mkt_month_ret.mean()]] +\
self._cal_stats(self.mkt_month_ret) +\
self._cal_stats(self.mkt_month_ret, option='non-nw'))
daily_stat = np.array([[self.mkt_ret.mean()]] +\
self._cal_stats(self.mkt_ret) +\
self._cal_stats(self.mkt_ret, option='non-nw'))
index = pd.MultiIndex.from_product([['mkt'], ['mean_ret', 't-value(nw)', 'p-value(nw)', 't-value', 'p-value']])
columns = ['monthly', 'daily']
df_stat = pd.DataFrame(np.concatenate((month_stat, daily_stat), axis=1), index=index, columns=columns)
self.df_stat = df_stat.applymap(lambda x: '%.4f' % x).astype(float)
|
from django.conf.urls import include, url
from . import views
# Import custom views.
from student.views import announcement
from student.views import syllabus
from student.views import policy
from student.views import lecture
from student.views import lecture_note
from student.views import assignment
from student.views import quiz
from student.views import exam
from student.views import discussion
from student.views import peer_review
from student.views import credit
app_name="student"
urlpatterns = [
# Announcement
url(r'^course/(?P<id>\d+)/dashboard/announcements$', announcement.announcements_page, name='announcements_page'),
# Syllabus
url(r'^course/(?P<id>\d+)/dashboard/syllabus$', syllabus.syllabus_page, name='syllabus_page'),
# Grades & Policy
url(r'^course/(?P<id>\d+)/dashboard/policy$', policy.policy_page,name='policy_page'),
#Curriculumn Module
url(r'^course/(?P<id>\d+)/dashboard/modules$', module.modules_page,name='modules_page'),
url(r'^course/(?P<id>\d+)/dashboard/module$', module.module,name='module'),
# Curriculumn Module unit
url(r'^course/(?P<id>\d+)/dashboard/module/(\d+)/units$', module_unit.module_units_page,name='module_units_page'),
url(r'^course/(?P<id>\d+)/dashboard/module/(\d+)/view_module_unit$', module_unit.view_module_unit,name='view_module_unit'),
# Lecture
url(r'^course/(?P<id>\d+)/dashboard/lectures$', lecture.lectures_page,name='lectures_page'),
url(r'^course/(?P<id>\d+)/dashboard/lecture$', lecture.lecture,name='lecture'),
url(r'^shortcourse/(?P<id>\d+)/lectures$', lecture.shortcourselectures_page,name='shortcourselectures_page'),
url(r'^shortcourse/(?P<id>\d+)/shortcourselecture$', lecture.shortcourselecture,name='shortcourselecture'),
# Lecture Notes
url(r'^course/(?P<id>\d+)/dashboard/lecture/(\d+)/notes$', lecture_note.lecture_notes_page,name='lecture_notes_page'),
url(r'^course/(?P<id>\d+)/dashboard/lecture/(\d+)/view_lecture_note$', lecture_note.view_lecture_note,name='view_lecture_note'),
# Assignment(s)
url(r'^course/(?P<id>\d+)/dashboard/assignments$', assignment.assignments_page,name='assignments_page'),
url(r'^course/(?P<id>\d+)/dashboard/assignments_table$', assignment.assignments_table,name='assignments_table'),
url(r'^course/(?P<id>\d+)/dashboard/delete_assignment$', assignment.delete_assignment,name='delete_assignment'),
# Assignment
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)$', assignment.assignment_page, name='assignment_page'),
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)/submit_assignment$', assignment.submit_assignment,name='submit_assignment'),
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)/submit_e_assignment_answer$', assignment.submit_e_assignment_answer,name='submit_e_assignment_answer'),
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)/submit_mc_assignment_answer$', assignment.submit_mc_assignment_answer,name='submit_mc_assignment_answer'),
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)/submit_tf_assignment_answer$', assignment.submit_tf_assignment_answer,name='submit_tf_assignment_answer'),
url(r'^course/(?P<id>\d+)/dashboard/assignment/(\d+)/submit_r_assignment_answer$', assignment.submit_r_assignment_answer,name='submit_r_assignment_answer'),
# Quiz(zes)
url(r'^course/(?P<id>\d+)/dashboard/quizzes$', quiz.quizzes_page,name='quizzes_page'),
url(r'^course/(?P<id>\d+)/dashboard/quizzes_table$', quiz.quizzes_table,name='quizzes_table'),
url(r'^course/(?P<id>\d+)/dashboard/quiz_delete$', quiz.delete_quiz,name='delete_quiz'),
# Quiz
url(r'^course/(?P<id>\d+)/dashboard/quiz/(\d+)$', quiz.quiz_page,name='quiz_page'),
url(r'^course/(?P<id>\d+)/dashboard/quiz/(\d+)/submit_quiz$', quiz.submit_quiz,name='submit_quiz'),
url(r'^course/(?P<id>\d+)/dashboard/quiz/(\d+)/submit_tf_quiz_answer$', quiz.submit_tf_assignment_answer,name='submit_tf_assignment_answer'),
# Exam(s)
url(r'^course/(?P<id>\d+)/dashboard/exams$', exam.exams_page,name='exams_page'),
url(r'^course/(?P<id>\d+)/dashboard/exams_table$', exam.exams_table,name='exams_table'),
url(r'^course/(?P<id>\d+)/dashboard/delete_exam$', exam.delete_exam,name='delete_exam'),
# Exam
url(r'^course/(?P<id>\d+)/dashboard/exam/(\d+)$', exam.exam_page,name='exam_page'),
url(r'^course/(?P<id>\d+)/dashboard/exam/(\d+)/submit_exam$', exam.submit_exam,name='submit_exam'),
url(r'^course/(?P<id>\d+)/dashboard/exam/(\d+)/submit_mc_exam_answer$', exam.submit_mc_exam_answer,name='submit_mc_exam_answer'),
# Peer-Review
url(r'^course/(?P<id>\d+)/dashboard/peer_reviews$', peer_review.peer_reviews_page,name='peer_reviews_page'),
url(r'^course/(?P<id>\d+)/dashboard/peer_review/(\d+)$', peer_review.assignment_page,name='assignment_page'),
url(r'^course/(?P<id>\d+)/dashboard/peer_review/(\d+)/peer_review_modal$', peer_review.peer_review_modal,name='peer_review_modal'),
url(r'^course/(?P<id>\d+)/dashboard/peer_review/(\d+)/save_peer_review$', peer_review.save_peer_review,name='save_peer_review'),
url(r'^course/(?P<id>\d+)/dashboard/peer_review/(\d+)/delete_peer_review$', peer_review.delete_peer_review,name='delete_peer_review'),
url(r'^course/(?P<id>\d+)/dashboard/peer_review/(\d+)/update_assignment_marks$', peer_review.update_assignment_marks,name='update_assignment_marks'),
# Discussion
url(r'^course/(?P<id>\d+)/dashboard/discussion$', discussion.discussion_page,name='discussion_page'),
url(r'^course/(?P<id>\d+)/dashboard/threads_table$', discussion.threads_table,name='threads_table'),
url(r'^course/(?P<id>\d+)/dashboard/new_thread$', discussion.new_thread_modal,name='new_thread_modal'),
url(r'^course/(?P<id>\d+)/dashboard/insert_thread$', discussion.insert_thread,name='insert_thread'),
url(r'^course/(?P<id>\d+)/dashboard/delete_thread$', discussion.delete_thread,name='delete_thread'),
url(r'^course/(?P<id>\d+)/dashboard/thread/(\d+)$', discussion.thread_page,name='thread_page'),
url(r'^course/(?P<id>\d+)/dashboard/thread/(\d+)/posts_table$', discussion.posts_table,name='posts_table'),
url(r'^course/(?P<id>\d+)/dashboard/thread/(\d+)/new_post$', discussion.new_post_modal,name='new_post_modal'),
url(r'^course/(?P<id>\d+)/dashboard/thread/(\d+)/insert_post$', discussion.insert_post,name='insert_post'),
# Credit
url(r'^course/(?P<id>\d+)/dashboard/credit$', credit.credit_page,name='credit_page'),
url(r'^course/(?P<id>\d+)/dashboard/submit_credit_application$', credit.submit_credit_application,name='submit_credit_application'),
]
|
#!/usr/bin/env python
import smbus, time
# ls /dev/i2c-1 => smbus(1)
bus=smbus.SMBus(1)
# AT=0x21
R1=0x20
R2=0x21
R3=0x23
R4=0x22
R5=0x24
R6=0x25
R7=0x26
R8=0x27
# address check with $> i2cdetect -y 1
# print("test chenillard")
## bus.write_byte_data(R1,0x55,0x55)
## i=0
## mask=1<<i
## #mask=~mask
## M1=~mask
## M2=~(mask>>8)
## M3=~(mask>>16)
## M4=~(mask>>32)
## bus.write_byte_data(R1,M1,M2)
## bus.write_byte_data(R2,M3,M4)
## i+=1
## i%= 1<<32
## print i
def closeAll():
bus.write_byte_data(R1,0xff,0xff)
bus.write_byte_data(R2,0xff,0xff)
bus.write_byte_data(R3,0xff,0xff)
bus.write_byte_data(R4,0xff,0xff)
bus.write_byte_data(R5,0xff,0xff)
bus.write_byte_data(R6,0xff,0xff)
bus.write_byte_data(R7,0xff,0xff)
bus.write_byte_data(R8,0xff,0xff)
off=0xff
# Channel 1 est Y63 puis on tourne dans les aiguilles d'une montre
def shift8 (i, rev=False):
return ~(1<< (7-(i%8))) & 0xff if rev else ~(1<<(i%8)) & 0xff
## shift8(i, 1) # True
## shift8(i, 0) # False
def channelSelect (i):
"""
numerotation arbitraire zero correspond a Y63, puis
sens des aiguilles d'une montre jusqu'a 127 (X1)
"""
# assert(0<=i<128, "channel {} does not exist in [0-127]".format(i))
assert(0<=i<128)
closeAll() #<== close all relays
if i< 1*8 :# bank1
mask=shift8(i, 1) ; bus.write_byte_data(R1,off,mask)
elif i< 2*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R1,mask,off)
elif i< 3*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R2,off, mask)
elif i< 4*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R2,mask, off)
elif i< 5*8 :# bank2
mask=shift8(i, 0) ; bus.write_byte_data(R3,mask,off)
elif i< 6*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R3,off,mask)
elif i< 7*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R4,mask,off)
elif i< 8*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R4,off,mask)
elif i< 9*8 :# bank3
mask=shift8(i, 0) ; bus.write_byte_data(R6,mask,off)
elif i< 10*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R6,off,mask)
elif i< 11*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R5,mask,off)
elif i< 12*8 :
mask=shift8(i, 1) ; bus.write_byte_data(R5,off,mask)
elif i< 13*8 :# bank4
mask=shift8(i, 0) ; bus.write_byte_data(R8,mask,off)
elif i< 14*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R8,off,mask)
elif i< 15*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R7,off,mask)
elif i< 16*8 :
mask=shift8(i, 0) ; bus.write_byte_data(R7,mask,off)
return mask
# 1 DOIT ALLUMER y1
# 2 DOIT ALLUMER y2 etc.
# ...
def channelSelectY(y):
if y % 2: # si impaire
channelSelect((63-y)/2)
else:
channelSelect((y/2)+63)
def channelSelectX(x):
if x % 2: # si impaire
channelSelect( (65-x)/2 + 95 )
else:
channelSelect( (x+62)/2 )
if __name__ == '__main__':
for i in range(1, 65):
time.sleep(0.4)
channelSelectX(i)
for i in range(1, 65):
time.sleep(0.4)
channelSelectY(i)
# channelSelectY(1)
## chenillard
i=0 #8*8-1
for i in range(0, 128) :
mask=channelSelect(i);
print i, i/8, "{:08b}".format(mask)
time.sleep(0.4)
|
"""
1. open up webpages using requests
2. convert into lxml object
3. Get list of image from xpath - input xpath
4. save image - input save path
"""
import requests
import os
import sys
from requests.adapters import HTTPAdapter
from urllib.parse import urlparse, urljoin
from urllib3.util.retry import Retry
from time import sleep
from lxml import html
from os.path import dirname
from selenium import webdriver
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36",
"Accept": "image/webp,image/apng,image/*,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
}
URL = "http://quantum.spline.one/ecommerce.html"
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 *
(iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
# print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
print(f'\r{prefix} |{bar}| {percent} {suffix}', end='\r')
# Print New Line on Complete
if iteration == total:
print()
def chrome_image_download(url, filename):
browser = None
try:
options = webdriver.ChromeOptions()
options.arguments.extend(['no-sandbox', 'disable-gpu'])
browser = webdriver.Chrome(options=options)
browser.get(url)
el = browser.find_element_by_xpath("//img")
if not el:
raise Exception("No image found")
el.click()
# use headless and resize to the max height and width
# cannot be done without headless
# https://stackoverflow.com/questions/41721734/take-screenshot-of-full-page-with-selenium-python-with-chromedriver
with open(filename, 'wb') as f:
f.write(el.screenshot_as_png)
return browser.page_source
except Exception as err:
print(err)
finally:
if browser:
if browser.session_id:
browser.quit()
def save_file(folder, filename, resp):
if not os.path.isdir(folder) and not os.path.exists(folder):
os.makedirs(folder)
count = 1 # to rename file if already exist
while os.path.exists(f"{folder}/{filename}"):
filename = f"({count})_{filename}"
count += 1
with open(f"{folder}/{filename}", 'wb') as f:
f.write(resp._content)
# return count + 1
def init_session():
driver = requests.Session()
retries = Retry(total=5,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504])
driver.mount('http://', HTTPAdapter(max_retries=retries))
return driver
def get_image_links(response, el_xpath):
doc = html.fromstring(response.text)
links = doc.xpath(el_xpath)
print(f"{len(links)} image found")
return links
def main(url, image_xpath, folder):
# filename url xpath outputFolder
# url = sys.argv[1]
# image_xpath = sys.argv[2]
# folder = sys.argv[3]
print(f"{url} {image_xpath} {folder}")
driver = init_session()
response = driver.get(url, timeout=45)
links = get_image_links(response,
image_xpath) # default is None
base_url = os.path.dirname(url)
l = len(links)
if l == 0:
print(f"{l} links found, terminated.")
exit()
printProgressBar(0, l, prefix='Progress:', suffix='Complete', length=50)
for i, item in enumerate(links):
filename = str(item).split('/')[-1]
item_url = urljoin(base_url, item)
while True:
try:
response = driver.get(url=item_url, timeout=45)
break
except Exception as err:
print(err)
save_file(folder, filename, response)
sleep(30)
printProgressBar(i + 1, l, prefix='Progress:',
suffix='Complete', length=50)
if __name__ == "__main__":
# args: filename url xpath outputFolder
# python app_copy.py http://quantum.spline.one/ecommerce.html "//img[@class='img-fluid']/@src" spline_test
url = sys.argv[1]
image_xpath = sys.argv[2]
folder = sys.argv[3]
main(url, image_xpath, folder)
|
#!/usr/bin/env python3
import time
from flag_char import FlagChar
with open("map", "r") as f:
lines = f.read().strip().split('\n')
height, (width,) = len(lines), set(map(len, lines))
mapp = list(''.join(lines))
flag = []
with open("flag.txt", "r") as f:
tmp = f.read().strip()
for x, i in enumerate(tmp):
flag.append(FlagChar(i, (width // 2) - (len(tmp) // 2) + x, height // 2, width, height))
STARTING_POS = (0,0)
NOTHING = "0"
PIT = "1"
FLAG_CHAR = "2"
pos = STARTING_POS
running = True
updated_map = []
def index(x,y):
return x + y * width
def game_loop():
global updated_map
slow = 2
cnt = 0
while True:
if len(flag) == 0:
print("e\x00Congrats, you caught the whole flag.")
break
updated_map = mapp.copy()
# build map with current flag positions
for i in flag:
updated_map[index(i.x, i.y)] = FLAG_CHAR
output = []
# img (default room)
output.append("i")
# player pos
x,y = pos
west, north, east, south = get_neighbor_values(updated_map, x, y)
if north == PIT or west == PIT or south == PIT or east == PIT:
# pit: you feel cold
output.append("breeze")
if north == FLAG_CHAR or west == FLAG_CHAR or south == FLAG_CHAR or east == FLAG_CHAR:
# flag: you smell a flag
output.append("smell")
print("\x00".join(output))
if get_input():
cnt += 1
if cnt % slow == 0:
move_flag()
def move_flag():
global flag, updated_map
updated_map[index(pos[0], pos[1])] = "3"
for i in flag:
neighbors = get_neighbor_values(updated_map, i.x, i.y)
old_idx = index(i.x, i.y)
i.move((neighbors[0] != NOTHING,
neighbors[1] != NOTHING,
neighbors[2] != NOTHING,
neighbors[3] != NOTHING))
updated_map[old_idx] = NOTHING
updated_map[index(i.x, i.y)] = FLAG_CHAR
def check_catch():
for f in flag:
caught = f.catch(pos[0], pos[1])
if caught:
flag.remove(f)
print(f"f\x00{caught}")
def get_neighbor_values(m, x, y):
"""
return (west, north, east, south)
"""
w = map_pos(m, x - 1, y)
n = map_pos(m, x, y - 1)
e = map_pos(m, x + 1, y)
s = map_pos(m, x, y + 1)
return (w, n, e, s)
def map_pos(m, x, y):
if x < 0 or x > width - 1 or y < 0 or y > height - 1:
return "wall"
return m[index(x, y)]
def get_input():
global pos
valid = False
while not valid:
key = input()
if len(key) > 1:
print("e\x00Wrong key.")
x,y = pos
if key == "w":
if y == 0:
print("iw")
else:
pos = (x, y - 1)
valid = True
elif key == "a":
if x == 0:
print("iw")
else:
pos = (x - 1, y)
valid = True
elif key == "s":
if y == height - 1:
print("iw")
else:
pos = (x, y + 1)
valid = True
elif key == "d":
if x == width - 1:
print("iw")
else:
pos = (x + 1, y)
valid = True
# ignore other keys
check_catch()
cur_field = map_pos(updated_map, pos[0], pos[1])
if cur_field == PIT:
print("d")
exit()
return True
if __name__ == "__main__":
game_loop()
|
# AUTHOR: Caleb Hoffman
# CLASS: Optics and Photonics
# ASSIGNMENT: Computer Problem 1
# REMARKS: The program computes and displays three incident rays on a thin lens
import matplotlib.pyplot as plt
import math
# Main function
def main():
# Variables
d = 2
input_angle = float(input("Enter incident angle: "))*(math.pi/180)
xDomain = [0.1*x for x in range(-8, 1)]
m = math.tan(input_angle)
# Axis labels
plt.title("Thin Lens-Object at Infinity")
plt.xlabel("Optical Axis (cm)")
plt.ylabel("Ray Elevation (cm)")
# Imposes grid
plt.grid()
# Draws lens
plt.vlines(0, (-d/2), (d/2))
# This loop draws each ray at equal distance apart striking the lens
for i in range(3):
zRange = [m*x+((1-i)*(d/2)) for x in xDomain]
plt.plot(xDomain, zRange, color = "red")
# Displays the graph with computed ray
plt.show()
# Invokes main
main()
|
# Server and worker process for asynchronous parallel training
from mpi4py import MPI
from server import Server
from pprint import pprint
import time
import os
def test_intercomm(intercomm,rank):
if intercomm != MPI.COMM_NULL:
assert intercomm.remote_size == 1
assert intercomm.size == 1
assert intercomm.rank == 0
if rank == 0: # server
message = 'from_server'
root = MPI.ROOT
else: # worker
message = None
root = 0
message = intercomm.bcast(message, root)
if rank == 0:
assert message == None
else:
assert message == 'from_server'
class PTBase(object):
'''
Base class for Parallel Training framework
Common routine that every device process should excute first
'''
def __init__(self, config, device):
self.comm = MPI.COMM_WORLD
self.rank = self.comm.rank
self.size = self.comm.size
self.config = config
self.device = device
# if self.config['worker_type'] in ['EASGD', 'ASGD']:
# elif self.config['worker_type'] in ['avg', 'cdd']:
self.verbose = (self.rank == 0)
self.process_config()
self.get_data()
self.init_device()
self.build_model()
def process_config(self):
'''
load some config items
'''
# Add some items in
self.config['comm'] = self.comm
self.config['rank'] = self.rank
self.config['size'] = self.size
#self.config['syncrule'] = self.syncrule #TODO add syncrule into config
self.config['device'] = self.device
pid = os.getpid()
self.config['worker_id'] = pid
self.config['sock_data'] = (self.config['sock_data'] + int(pid)) % 65535 #int(self.device[-1])
self.config['verbose'] = self.verbose
self.model_name=self.config['name']
import yaml
with open(self.model_name+'.yaml', 'r') as f:
model_config = yaml.load(f)
self.config = dict(self.config.items()+model_config.items())
date = '-%d-%d' % (time.gmtime()[1],time.gmtime()[2])
import socket
self.config['weights_dir']+= '-'+self.config['name'] \
+ '-'+str(self.config['size'])+'gpu-' \
+ str(self.config['batch_size'])+'b-' \
+ socket.gethostname() + date + '/'
self.config['n_subb'] = self.config['file_batch_size']//self.config['batch_size']
if self.rank == 0:
if not os.path.exists(self.config['weights_dir']):
os.makedirs(self.config['weights_dir'])
if self.verbose: print "Creat folder: " + \
self.config['weights_dir']
else:
if self.verbose: print "folder exists: " + \
self.config['weights_dir']
if not os.path.exists(self.config['record_dir']):
os.makedirs(self.config['record_dir'])
if self.verbose: print "Creat folder: " + \
self.config['record_dir']
else:
if self.verbose: print "folder exists: " + \
self.config['record_dir']
# if self.config['sync_start'] and self.config['worker_type'] == 'EASGD':
# self.config['size'] = 1
if self.verbose: pprint(self.config)
def get_data(self):
'''
prepare filename and label list
'''
from helper_funcs import unpack_configs, extend_data
(flag_para_load, flag_top_5, train_filenames, val_filenames, \
train_labels, val_labels, img_mean) = unpack_configs(self.config)
if self.config['image_mean'] == 'RGB_mean':
image_mean = img_mean.mean(axis=-1).mean(axis=-1).mean(axis=-1)
#c01b to # c
#print 'BGR_mean %s' % image_mean #[ 122.22585297 116.20915222 103.56548309]
import numpy as np
image_mean = image_mean[:,np.newaxis,np.newaxis,np.newaxis]
if self.config['debug']:
train_filenames = train_filenames[:40]
val_filenames = val_filenames[:8]
env_train=None
env_val = None
train_filenames,train_labels,train_lmdb_cur_list,n_train_files=\
extend_data(self.config,train_filenames,train_labels,env_train)
val_filenames,val_labels,val_lmdb_cur_list,n_val_files \
= extend_data(self.config,val_filenames,val_labels,env_val)
if self.config['data_source'] == 'hkl':
self.data = [train_filenames,train_labels,\
val_filenames,val_labels,img_mean] # 5 items
else:
raise NotImplementedError('wrong data source')
if self.verbose: print 'train on %d files' % n_train_files
if self.verbose: print 'val on %d files' % n_val_files
def init_device(self):
gpuid = int(self.device[-1])
# pycuda and zmq set up
import pycuda.driver as drv
drv.init()
dev = drv.Device(gpuid)
ctx = dev.make_context()
self.drv = drv
self.dev = dev
self.ctx = ctx
import theano.sandbox.cuda
theano.sandbox.cuda.use(self.config['device'])
def build_model(self):
import theano
theano.config.on_unused_input = 'warn'
if self.model_name=='googlenet':
from models.googlenet import GoogLeNet
self.model = GoogLeNet(self.config)
elif self.model_name=='alexnet':
from models.alex_net import AlexNet
self.model = AlexNet(self.config)
elif self.model_name=='vggnet':
if self.config['pretrain']:
from models.vggnet_11_shallow import VGGNet_11 as VGGNet
else:
if self.config['source'] == 'lasagne':
from models.lasagne_model_zoo.vgg import VGG as VGGNet
elif self.config['source'] == 'Theano-MPI':
from models.vggnet_16 import VGGNet_16 as VGGNet
else:
raise NotImplementedError
self.model = VGGNet(self.config)
elif self.model_name=='customized':
from models.customized import Customized
self.model = Customized(self.config)
else:
raise NotImplementedError("wrong model name")
self.model.img_mean = self.data[4]
class PTServer(Server, PTBase):
'''
Genearl Server class in Parallel Training framework
Manage MPI connection requests from workers
'''
def __init__(self, port, config, device):
Server.__init__(self,port=port)
PTBase.__init__(self,config=config,device=device)
#######
self.info = MPI.INFO_NULL
self.port = MPI.Open_port(self.info)
self.service = 'parallel-training'
MPI.Publish_name(self.service, self.info, self.port)
self.worker_comm = {}
self.worker_rank = {}
self.first_worker_id = None
def close():
MPI.Unpublish_name(self.service, self.info, self.port)
print '[Server] Service unpublished'
MPI.Close_port(self.port)
print '[Server] Service port closed'
def process_request(self, worker_id, message):
# override Server class method, for connection related request
reply = None
if message in ['connect','sync_register']:
if self.first_worker_id == None:
self.first_worker_id = worker_id
print '[Server] recording worker is %s' % worker_id
reply = 'first'
return reply
def action_after(self, worker_id, message):
# override Server class method, for connection related action
if message == 'connect': # Connecting asynchronously started workers
intercomm = MPI.COMM_WORLD.Accept(self.port, self.info, root=0)
self.worker_comm[str(worker_id)] = intercomm #TODO BUG there's a small chance that worker processes started on different node have the same pid
self.worker_rank[str(worker_id)] = 0 # remote size = 1, remote rank=0
test_intercomm(intercomm, rank=0)
print '[Server] connected to worker', worker_id
if 'sync_register' in message: # Connecting synchronously started workers
self.worker_comm[str(worker_id)] = self.comm
worker_rank = self.comm.recv(source = MPI.ANY_SOURCE, tag=int(worker_id))
self.worker_rank[str(worker_id)] = int(worker_rank)
print '[Server] registered worker', worker_id
elif message == 'disconnect':
intercomm = self.worker_comm[str(worker_id)]
try:
intercomm.Disconnect()
except:
pass
self.worker_comm.pop(str(worker_id))
print '[Server] disconnected with worker', worker_id
if bool(self.worker_comm) == False:
# empty dict
self.ctx.pop()
exit(0)
class PTWorker(PTBase):
'''
General Worker class in Parallel Training framework
Start parallel loading process if needed
'''
def __init__(self, config, device):
PTBase.__init__(self, config = config, device = device)
def prepare_para_load(self):
if self.config['para_load']:
self.spawn_load()
self.para_load_init()
def spawn_load(self):
'parallel loading process'
num_spawn = 1
hostname = MPI.Get_processor_name()
mpiinfo = MPI.Info.Create()
mpiinfo.Set(key = 'host',value = hostname)
ninfo = mpiinfo.Get_nkeys()
if self.verbose: print ninfo
import sys
mpicommand = sys.executable
gpuid = self.device[-1] #str(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
if self.verbose: print gpuid
socketnum = 0
# adjust numactl according to the layout of copper nodes [1-8]
if int(gpuid) > 3:
socketnum=1
printstr = "rank" + str(self.rank) +":numa"+ str(socketnum)
if self.verbose: print printstr
# spawn loading process
# self.icomm= MPI.COMM_SELF.Spawn('numactl', \
# args=['-N',str(socketnum),mpicommand,\
# '../lib/base/proc_load_mpi.py',gpuid],\
file_dir = os.path.dirname(os.path.realpath(__file__)) # get the dir of PT.py
self.icomm= MPI.COMM_SELF.Spawn(mpicommand, \
args=[file_dir+'/proc_load_mpi.py', gpuid],\
info = mpiinfo, maxprocs = num_spawn)
self.config['icomm'] = self.icomm
def para_load_init(self):
# 0. send config dict (can't carry any special objects) to loading process
self.icomm.isend(self.config,dest=0,tag=99)
drv = self.drv
shared_x = self.model.shared_x
img_mean = self.data[4]
sock_data = self.config['sock_data']
import zmq
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://localhost:{0}'.format(sock_data))
#import theano.sandbox.cuda
#theano.sandbox.cuda.use(config.device)
import theano.misc.pycuda_init
import theano.misc.pycuda_utils
# pass ipc handle and related information
gpuarray_batch = theano.misc.pycuda_utils.to_gpuarray(
shared_x.container.value)
h = drv.mem_get_ipc_handle(gpuarray_batch.ptr)
# 1. send ipc handle of shared_x
sock.send_pyobj((gpuarray_batch.shape, gpuarray_batch.dtype, h))
# 2. send img_mean
self.icomm.send(img_mean, dest=0, tag=66)
def para_load_close(self):
# send an stop mode
self.icomm.send('stop',dest=0,tag=40) # TODO use this only when loading process is ready to receive mode
self.icomm.send('stop',dest=0,tag=40)
self.icomm.Disconnect()
self.ctx.detach()
def prepare_train_fn(self):
# to be defined in different type of PTWorkers child class
# to make sure model compiles correct updating function and allocate necessary extra param memory that correspond to the selected parallel worker type and parallel update type
raise NotImplementedError('Need to redefine this function in a child class of PTWorker')
def run(self):
# to be defined in child class
pass
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f)
#device = 'gpu' + str(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
server = PTServer(port=5555, config=config, device='gpu7')
server.run()
|
# define a function for calculating
# the area of a shapes
def calculate_area(name):\
# converting all charactersinto lower cases
name = name.lower()
# check for the conditions
if name == "rectangle":
l = int(input("Enter rectangle's length: "))
b = int(input("Enter rectangle's breadth: "))
# calculate area of rectangle
if l>0 and b>0:
rect_area = l * b
print(f"The area of rectangle is {rect_area}.")
else:
print("Sorry! Cannot have 0 dimension")
elif name == "square":
s = int(input("Enter square's side length: "))
# calculate area of square
if s>0:
sqt_area = s * s
print(f"The area of square is {sqt_area}.")
else:
print("Sorry! Cannot have 0 dimension")
elif name == "triangle":
h = int(input("Enter triangle's height length: "))
b = int(input("Enter triangle's breadth length: "))
# calculate area of triangle
if b>0 and h>0:
tri_area = 0.5 * b * h
print(f"The area of triangle is {tri_area}.")
else:
print("Sorry! Cannot have 0 dimension")
elif name == "circle":
r = int(input("Enter circle's radius length: "))
pi = 3.14
# calculate area of circle
if r>0 :
circ_area = pi * r * r
print(f"The area of triangle is {circ_area}.")
else:
print("Sorry! Cannot have 0 dimension")
elif name == 'parallelogram':
b = int(input("Enter parallelogram's base length: "))
h = int(input("Enter parallelogram's height length: "))
# calculate area of parallelogram
if b>0 and h>0:
para_area = b * h
print(f"The area of parallelogram is {para_area}.")
else:
print("Sorry! Cannot have 0 dimension")
elif name == 'trapezoid':
b1 = int(input("Enter trapezoid's base length 1: "))
b2 = int(input("Enter trapezoid's base length 2: "))
h = int(input("Enter trapezoid's height length: "))
# calculate area of trapezoid
if b1>0 and b2>0 and h>0:
trape_area = (b1+b2) * 0.5 * h
print(f"The area of trapezoid is {trape_area}.")
else:
print("Sorry! Cannot have 0 dimension")
else:
print("Sorry! This shape is not available")
|
from django.contrib import admin
from Hanosite.models import Mission
# Register your models here.
admin.site.register(Mission) |
#!/usr/bin/env python
# coding: utf-8
# 导入random模块
import random
life = 4 # 定义命数
shut = 8 # 游戏关数
select = ['left', 'right']
coins = 0
while life > 0:
randomChoice = random.choice(select)
while True:
userChoice = raw_input('请输入您要进入的洞口 [left(左), right(右)](如left): ')
if userChoice not in select:
print '无效的选择,请重新选择...'
continue
else:
break
if userChoice == randomChoice:
print '您好,我的朋友,送给你10个金币,继续下一关吧!'
coins += 10
shut -= 1
else:
print '去死吧!贪婪的人类!'
life -= 1
# coins = 0
if shut == 0:
print '干的不错,您已通关'
break
else:
print '您还有%s条命,共有金币%s个, 还剩下%s关' % (life, coins, shut)
print
else:
print '我还会再回来的......' |
"""Fetch info from the short url click"""
from pygmy.core.logger import log
try:
# Optional package
import geoip2.database
except Exception:
log.exception("ModuleNotFoundError: No module named 'geoip2'")
def parse_request(request):
"""Pass request object and returns parsed data dict.
Country is fetched from IP using maxmind db.
:param request:
:return:
"""
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
data = dict(
referrer=request.referrer,
user_agent=request.headers.get("User-Agent"),
country=ip_country(ip)
)
return data
def parse_header(request):
"""Pass request object and returns parsed data dict.
Country is fetched from IP using maxmind db.
:param request:
:return:
"""
ip = request.headers.get('Pygmy-App-User-Ip')
data = dict(
referrer=request.headers.get('Pygmy-Http-Rreferrer'),
user_agent=request.headers.get('Pygmy-Http-User-Agent'),
country=ip_country(ip)
)
return data
def ip_country(ip):
"""Get country from ip. Uses Geoip2 db.
:param ip:
:return: None/str
"""
c_iso_code = None
try:
reader = geoip2.database.Reader('pygmy/app/GeoLite2-Country.mmdb')
c = reader.country(ip)
c_iso_code = c.country.iso_code
except Exception as e:
log.error(e)
return c_iso_code
|
def is_pref(pref, Str):
if(len(pref) > len(Str)):
return False
if(pref != Str[0:len(pref)]):
return False
return True
def search(a, b):
queue = []
reminder_a = set()
reminder_b = set()
for i in range(0, len(a)):
for j in range(0, len(b)):
if(hash(a[i]) == hash(b[j]) and a[i] == b[j]):
return [[i], [j]]
if(is_pref(a[i], b[j])):
if(b[j][len(a[i]):] not in reminder_b):
reminder_b.add(b[j][len(a[i]):])
queue.append([False, [i], [j], a[i], b[j], b[j][len(a[i]):]])
print(i, j)
if(is_pref(b[j], a[i])):
if(a[i][len(b[j]):] not in reminder_a):
print(i, j)
print(a[i][len(b[j]):])
reminder_a.add(a[i][len(b[j]):])
queue.append([True, [i], [j], a[i], b[j], a[i][len(b[j]):]])
print("")
#aaaabbbaabbaaaaabbbabbba abbba
#aaa abb baa bbaaaa abbb aabbb aaabbb bbba
# for q in queue:
# q[0] = True if concatenated a-sequence is longer then concatenated b-sequence
# q[1] = sequence of a-indexes
# q[2] = sequence of b-indexes
# q[3] = concatenated a-sequence
# q[4] = concatenated b-sequence
# q[5] = queue[4] - queue[3] if queue[0] == True
while(queue != []):
L = len(queue)
for i in range(0, L):
cur = queue.pop(0)
if(cur[0]):
for j in range(0, len(b)):
if(is_pref(cur[5], b[j])):
if(cur[3] == cur[4] + b[j]):
return [cur[1], cur[2] + [j]]
rem = b[j][len(cur[5]):]
if(rem not in reminder_b):
reminder_b.add(rem)
print(cur[1], cur[2] + [j])
queue.append([False, cur[1], cur[2] + [j], cur[3], cur[4] + b[j], rem])
if(is_pref(b[j], cur[5])):
rem = cur[5][len(b[j]):]
if (rem not in reminder_a):
reminder_a.add(rem)
print(cur[1], cur[2] + [j])
queue.append([True, cur[1], cur[2] + [j], cur[3], cur[4] + b[j], rem])
else:
for j in range(0, len(a)):
if (is_pref(cur[5], a[j])):
if (cur[4] == cur[3] + a[j]):
return [cur[1] + [j], cur[2]]
rem = a[j][len(cur[5]):]
if (rem not in reminder_a):
reminder_a.add(rem)
print(cur[1] + [j], cur[2])
queue.append([True, cur[1] + [j], cur[2], cur[3] + a[j], cur[4], rem])
if (is_pref(a[j], cur[5])):
rem = cur[5][len(a[j]):]
if (rem not in reminder_b):
reminder_b.add(rem)
print(cur[1] + [j], cur[2])
queue.append([False, cur[1] + [j], cur[2], cur[3] + a[j], cur[4], rem])
return [[-1], [-1]]
def main():
print("Enter strings a_i in one line divided by space")
a = input().split()
print("Enter strings b_i in one line divided by space")
b = input().split()
answer = search(a, b)
if(answer[0][0] == -1):
print("There's no solution")
else:
print("a:", end = " ")
for i in range(0, len(answer[0])):
print(answer[0][i] + 1, end=" ")
print("\nb:", end = " ")
for i in range(0, len(answer[1])):
print(answer[1][i] + 1, end=" ")
print("")
for i in range(0, len(answer[0])):
print(a[answer[0][i]], end = "")
if(i != len(answer[0]) - 1):
print(" + ", end = "")
else:
print(" = ", end = "")
for i in range(0, len(answer[1])):
print(b[answer[1][i]], end = "")
if(i != len(answer[1]) - 1):
print(" + ", end = "")
if __name__ == "__main__": main() |
# Bucles determinados: FOR
# for variable in elemento a recorrer:
# cuerpo del bucle
for i in [1,2,4]: # lista
print("Hola")
for i in ["Primavera", "Verano", "Otoño", "Invierno"]:
print(i)
print("-----------------------")
for estaciones_agno in ["Primavera", "Verano", "Otoño", "Invierno"]:
print(estaciones_agno) |
from django import forms
from django.db.models import query
from django.forms import fields
from django.contrib.auth.forms import UserCreationForm, UsernameField
from leads.models import Agent, Lead
from django.contrib.auth import get_user_model
class LeadModelForm(forms.ModelForm):
class Meta:
model = Lead
fields = (
"first_name",
"last_name",
"age",
"agent",
)
class LeadForm(forms.Form):
first_name = forms.CharField(label="First Name")
last_name = forms.CharField(label="Last Name")
age = forms.IntegerField(min_value=0)
User = get_user_model()
class CustomUserCreationForm(UserCreationForm):
class Meta:
model = User
fields = ("username",)
field_classes = {'username': UsernameField}
class AssignAgentForm(forms.Form):
agent = forms.ModelChoiceField( queryset= Agent.objects.none())
def __init__( self, *args, **kwargs):
request = kwargs.pop("request")
agents = Agent.objects.filter(organisation = request.user.userprofile)
super(AssignAgentForm, self).__init__(*args, **kwargs)
self.fields["agent"].queryset = agents |
# String, String slicing and other functions in python 3
mystr = "Hello this is string"
print(mystr)
# string slicing
print(mystr[4])
print(mystr[:0])
print(mystr[:5])
# printing part of a string
print(mystr[0:5])
# lenght calculation of a string using len
print(len(mystr))
# string slicing using range [::this] - extended slice
print(mystr[0:5:2])
# with negative index
print(mystr[-4:])
print(mystr[-1:0])
# extended negative slice
print(mystr[::-1]) # a naive way to reverse the string not recommended
print(mystr[::-2])
""" String functions
:type - use in type casting
isalnum - alphanumeric [T/F]
isalpha - alphabetic [T/F]
endswith - checks the end of the string compares and returns [T/F]
count - counts the letters in one string
capitalize - first letter of the string is converted changed to capital
find - find any word in the string [gives the starting of the index to be found]
lower - converts into lowercase
upper - converts into uppercase
replace - (,) takes to params replaces the first one with the second param
"""
|
from django.forms import Form, ModelForm
from django.contrib.auth.forms import UserCreationForm
from account.models import Account, Transaction
class CreateAccountForm(Form, ModelForm):
class Meta:
model = Account
fields = ['name', 'balance_start', 'currency']
labels = {
'name': ('Name of your Budget'),
'balance_start': ('Amount'),
'currency': ('Currency')
}
class TransactionForm(Form, ModelForm):
class Meta:
model = Transaction
fields = ['amount', 'object', 'comment', 'status'] |
from django.contrib.auth.models import AbstractUser
from django.db import models
from itsdangerous import TimedJSONWebSignatureSerializer
from tinymce.models import HTMLField
from dailyfresh import settings
from utils.models import BaseModel
class User(BaseModel, AbstractUser):
"""用户模型类"""
class Meta(object):
db_table = 'df_user'
def generate_active_token(self):
"""生成加密数据"""
# 参数1:密钥,不能公开,用于解密
# 参数2:加密数据失效时间(1天)
serializer = TimedJSONWebSignatureSerializer(
settings.SECRET_KEY, 3600 * 24)
# 要加密的数据此处传入了一个字典,其格式是可以自定义的
# 只要包含核心数据 用户id 就可以了,self.id即当前用户对象的id
token = serializer.dumps({'confirm': self.id})
# 类型转换: bytes -> str
return token.decode()
class TestModel(BaseModel):
"""测试用"""
name = models.CharField(max_length=20)
# 商品详情,使用第三方的:HTMLField
goods_detail = HTMLField(default='', verbose_name='商品详情')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.