index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,500 | 83f0aafcff15202524b487ec34d57bea3a0e5de3 | from functools import reduce
my_list = [1,2,4]
print( list( map( lambda item: item*2, my_list ) ) )
print( reduce( lambda acc, item: acc + item, my_list ) ) |
989,501 | 769b34dd740aeb7926cf94c22fca0e1f219b1b60 | import textract
text = textract.process(r"q.doc")
text = text.decode()
print(text)
with open('111.txt','w',encoding='utf-8') as f:
f.write(str(text))
|
989,502 | d28285caa2a70558bde6e904f7eb803d19801ca0 | from django.contrib import admin
# Register your models here.
from .models import App_file
admin.site.register(App_file) |
989,503 | 105e271edfceb543360d596b98a69eaafbc78e19 | n = 118382
sorted_num = sorted(str(n), reverse=True)
print(sorted_num)
new_num = list(map(int, sorted_num))
print(int("".join(sorted_num)))
# join은 list를 다시 합쳐주는 함수
|
989,504 | 4e7dc3018a47c081f378bea5f915f7ef9caee644 | ##################################################
# Finding a Shared Spliced Motif
#
# http://rosalind.info/problems/LCSQ/
#
# Given: Two DNA strings s and t (each having length
# at most 1 kbp) in FASTA format.
#
# Return: A longest common subsequence of s and
# t. (If more than one solution exists, you may
# return any one.)
#
# AUTHOR : dohlee
##################################################
# Your imports here
from GC import Fasta
from GLOB import DPMatrix
# Your codes here
def longest_common_subsequence(seq1, seq2):
"""Return the longest common subsequence of two sequences."""
mat = DPMatrix(seq1, seq2, match=1, mismatch=0, gap=0)
mat.fill()
lcs = []
augSeq1, augSeq2 = mat.augmented_sequences()
for c1, c2 in zip(augSeq1, augSeq2):
if c1 == c2:
lcs.append(c1)
return ''.join(lcs)
if __name__ == '__main__':
# Load the data.
seq1, seq2 = [seq for header, seq in Fasta('../../datasets/rosalind_LCSQ.txt')]
# Print output
with open('../../answers/rosalind_LCSQ_out.txt', 'w') as outFile:
print(longest_common_subsequence(seq1, seq2), file=outFile)
|
989,505 | 58cd0f9cec9261cf6b387c07b123f4c3be7341cf | # Source: https://pymotw.com/2/socket/udp.html
import socket, sys, time
host = sys.argv[1]
textport = sys.argv[2]
n = sys.argv[3]
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(textport)
server_address = (host, port)
d= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
p = 1007
sa = ('localhost', p)
d.bind(sa)
i = 1
while 1:
while(i<=int(n)):
data = "Message " + str(i)
if not len(data):
break
# s.sendall(data.encode('utf-8'))
s.sendto(data.encode('utf-8'), server_address)
while True:
buf, address = d.recvfrom(port)
if not len(buf):
break
print ("Received %s bytes from %s %s: " % (len(buf), address, buf ))
break
i= i + 1
break
s.shutdown(1)
|
989,506 | 16154401ba585baaa1bd91226ab9b5a0f3c7d53e | import python_sns.config as config
import python_sns.core as core
from cyrm_python_tools_framework.framework import post_parse_log, run_id
import logging
# Constructs the logger
post_parse_log(config.TOOLNAME, "", run_id(), run_id())
LOGGER = logging.getLogger(config.TOOLNAME)
def main():
"""
Main entry point
"""
configuration_set = config.Configuration()
good, messages = configuration_set.valid()
if not good:
for msg in messages:
LOGGER.error(msg)
else:
try:
core.main(configuration_set)
except BaseException as err:
LOGGER.error(err)
if __name__ == "__main__":
main() |
989,507 | 56c30403e8db2c53d61bd992fbfe4a9a216a9cc7 | import sys, zipfile, os, shutil, json, traceback, logging
sys.path.append("../")
from lib.utils import download_and_extract_zip, parse_configurations
from lib.db_handler import db_handler
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
BASE_URL = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-"
EXTENSION = ".json.zip"
BASE_NAME = "nvdcve-1.1-"
OPERATING_SYSTEMS = None
with open("./operating_systems.json") as f:
OPERATING_SYSTEMS = json.load(f)
def main():
global OPERATING_SYSTEMS
duplicates = set()
inserted_vendors = set()
for year in range(2002,2021):
print("Parsing year %s"% (year,))
download_and_extract_zip(BASE_URL+str(year)+EXTENSION, DIR_PATH, BASE_NAME+str(year))
JSON_NAME = BASE_NAME + str(year) + ".json"
JSON_PATH = os.path.join(DIR_PATH, BASE_NAME+str(year), JSON_NAME)
c = 0
cves = []
with open(JSON_PATH, 'r') as f:
cves = json.load(f)
if len(cves) < 1:
print("File at path %s is empty!" % (JSON_PATH,))
sys.exit(1)
print_exception = False
for count, cve in enumerate(cves['CVE_Items']):
try:
cve_id = cve['cve']['CVE_data_meta']['ID']
description = cve['cve']['description']['description_data'][0]['value']
if "** REJECT ** DO NOT USE THIS CANDIDATE NUMBER." in description:
continue
published_date = cve['publishedDate'].split("T")
published_date = str(published_date[0]) + ' ' + str(''.join(published_date[1][:-1]))
last_modified_date = cve['lastModifiedDate'].split("T")
last_modified_date = str(last_modified_date[0]) + ' ' + str(''.join(last_modified_date[1][:-1]))
cwes = [x['value'] for x in cve['cve']['problemtype']['problemtype_data'][0]['description']]
tmp=cve['cve']['references']['reference_data']
if len(tmp) > 0:
reference_links= '|'.join([x['url'] for x in tmp])
else:
reference_links=None
cvss = 0
exploitability_score=None
impact_score=None
severity=None
cvss_access_vector=None
cvss_vector_string=None
cvss_access_complexity=None
cvss_authentication=None
cvss_confidentiality_impact=None
cvss_integrity_impact=None
cvss_availability_impact=None
tmp=cve['impact']
if "baseMetricV2" in tmp:
cvss = tmp['baseMetricV2']['cvssV2']['baseScore']
tmp = tmp["baseMetricV2"]
exploitability_score=tmp['exploitabilityScore']
impact_score=tmp['impactScore']
severity=tmp['severity']
cvss_access_vector=tmp['cvssV2']['accessVector']
cvss_vector_string=tmp['cvssV2']['vectorString']
cvss_access_complexity=tmp['cvssV2']['accessComplexity']
cvss_authentication=tmp['cvssV2']['authentication']
cvss_confidentiality_impact=tmp['cvssV2']['confidentialityImpact']
cvss_integrity_impact=tmp['cvssV2']['integrityImpact']
cvss_availability_impact=tmp['cvssV2']['availabilityImpact']
elif "baseMetricV3" in tmp:
cvss = tmp['baseMetricV3']['cvssV3']['baseScore']
tmp = tmp["baseMetricV3"]
exploitability_score=tmp['exploitabilityScore']
impact_score=tmp['impactScore']
severity=tmp['cvssV3']['baseSeverity']
cvss_access_vector=tmp['cvssV3']['attackVector']
cvss_vector_string=tmp['cvssV3']['vectorString']
cvss_access_complexity=tmp['cvssV3']['attackComplexity']
cvss_authentication="-"
cvss_confidentiality_impact=tmp['cvssV3']['confidentialityImpact']
cvss_integrity_impact=tmp['cvssV3']['integrityImpact']
cvss_availability_impact=tmp['cvssV3']['availabilityImpact']
db_handler.insert_cve(
cve_id,
description,
cvss,
published_date,
last_modified_date,
reference_links,
exploitability_score,
impact_score,
severity,
cvss_access_vector,
cvss_vector_string,
cvss_access_complexity,
cvss_authentication,
cvss_confidentiality_impact,
cvss_integrity_impact,
cvss_availability_impact
)
for cwe in cwes:
db_handler.insert_xref_cve_cwe(cve_id, cwe)
c+=1
parsed_config = parse_configurations(cve['configurations']['nodes'])
if len(parsed_config) == 0:
continue
for config in parsed_config:
if (config['vendor'],config['product']) in duplicates:
p_id = db_handler.get_product(config['product'])['product_id']
if p_id:
db_handler.insert_xref_cve_product(cve_id, db_handler.get_product(config['product'])['product_id'])
continue
else:
duplicates.add((config['vendor'],config['product']))
vendor_id = None
product_id = None
# ===============================
# Here insert version of the
# product later on in development
if config['vendor'] not in inserted_vendors:
inserted_vendors.add(config['vendor'])
vendor_id = db_handler.insert_vendor(str(config["vendor"]))
product_id = db_handler.insert_product(str(config["product"]), vendor_id)
else:
vendor_id = db_handler.get_vendor(str(config['vendor']))['vendor_id']
product_id = db_handler.insert_product(str(config['product']), vendor_id)
db_handler.insert_xref_cve_product(cve_id, product_id)
for n, operating_system in enumerate(OPERATING_SYSTEMS):
if config['product'] in operating_system['product_name'] and config['vendor'] in operating_system['vendor_name']:
db_handler.insert_operating_system(operating_system['name'], product_id, operating_system["os_type"])
OPERATING_SYSTEMS[n]["product_name"].remove(config['product'])
break
except Exception:
logging.warning(traceback.print_exc())
continue
print("Status[CVE]: %s/%s " % (count+1,len(cves['CVE_Items'])), end="\r")
print()
print("Successfully inserted %s CVE-s for year %s" % (c, year))
if c != len(cves):
print("Didn't insert %s cves cause of missformated data" % (len(cves['CVE_Items'])-c,))
print("Removing files for year %s.." % (year,))
shutil.rmtree(os.path.join(DIR_PATH, BASE_NAME + str(year)))
os.remove(os.path.join(DIR_PATH, BASE_NAME + str(year) + ".zip"))
print("Removed")
print("="*10)
if __name__ == '__main__':
main()
else:
print("cve.py is used as a standalone executable!")
sys.exit(1)
|
989,508 | a5a2719dc6157cabd34cf49912add11f86066c4d | #Range
#Range這個型別可用來創建並儲存特定範圍內的整數,故得名Range。
#必須特別注意的是,一旦Range被創建了,裡面的內容是不可修改的
#在Python中,我們有幾個方法可以創造Range。
#Range(stop)
#stop:停止點
#Range(start, stop)
#start:起始點
#stop:停止點
#Range(start, stop, step)
#start:起始點
#stop:停止點
#step:間隔
r1 = range(10)
r2 = range(5, 50, 5)
print(type(r1))
print(r1)
print(r2)
""" 若沒有給起始值,將預設為0
若沒有給間隔,將預設為1
遇到停止點後,創造的過程就會終止,因此Range中的數字將不會包含停止點 """
#Tuple
#Tuple可用來存放一組資料。
#這組資料的個數不限,型別也不須相同。
#同Range,一旦被創造,將無法修改內容。
#值與值之間,要以,隔開。
t1 = 10, 20
# it can hold different types of data
t2 = 10, 'hello world'
print(type(t1))
print(t1)
print(t2)
#List
#List即為Python中的陣列(Array)。
#如果你不知道什麼陣列也沒關係,讓哥來解釋給你聽。
#陣列是一種容器,可用來儲存眾多資料。與Tuple最大的不同處在於,
#針對一個以創建的陣列,你可以隨時增加或減少其內部資料的個數,也可以修改裡面的內容。
arr1 = [1, 2, 3]
arr2 = [10, 'hello world', 8.7]
#arr1[0]從1變成[1,2,3] 已經被取代了
arr1[0] = [1, 2, 3]
arr2.append('新增list數量')
del arr1[1]
print(type(arr1))
print(arr1)
print(arr2)
#向List添加元素,方法有如下4种方法(append(),extend(),insert(), +加号)
""" append() 追加单个元素到List的尾部,只接受一个参数
list1=['a','b']
list1.append('c')
list1
['a', 'b', 'c'] """
#extend() 将一个列表中每个元素分别添加到另一个列表中,只接受一个参数;extend()相当于是将list B 连接到list A上。
""" list1
['a', 'b', 'c']
list1.extend('d')
list1
['a', 'b', 'c', 'd']
"""
#3. insert() 将一个元素插入到列表中,但其参数有两个(如insert(1,"g")),第一个参数是索引点,即插入的位置,第二个参数是插入的元素。
""" list1
['a', 'b', 'c', 'd']
list1.insert(1,'x')
list1
['a', 'x', 'b', 'c', 'd'] """
# + 加号,将两个list相加,会返回到一个新的list对象,注意与前三种的区别。
# 前面三种方法(append, extend, insert)可对列表增加元素的操作,他们没有返回值,是直接修改了原数据对象。
# 注意:将两个list相加,需要创建新的list对象,从而需要消耗额外的内存,
# 特别是当list较大时,尽量不要使用“+”来添加list,而应该尽可能使用List的append()方法。
"""
list1=['a', 'x', 'b', 'c', 'd']
list2=['y','z']
list3=list1+list2
list3
['a', 'x', 'b', 'c', 'd', 'y', 'z'] """
#list 刪除方法del pop 切片删除 remove
""" 使用del删除指定元素
li = [1, 2, 3, 4]
del li[3]
print(li)
# Output [1, 2, 3] """
""" 2. 使用list方法pop删除元素
li = [1, 2, 3, 4]
li.pop(2)
print(li)
# Output [1, 2, 4] """
#注:指定pop参数,将会删除该位置的元素;无参数时默认删除最后一个元素
""" 3. 使用切片删除元素
li = [1, 2, 3, 4]
li = li[:2] + li[3:]
print(li)
# Output [1, 2, 4] """
""" 4. 使用list方法remove删除指定值的元素
li = [1, 2, 3, 4]
li.remove(3)
print(li)
# Output [1, 2, 4]
"""
#remove方法删除指定值的元素,与其他方法不同。
""" 宣告陣列可用[]
陣列內的資料型別不必相同
對於任何序列型別來說,我們可用[index]的語法來存取其中的元素。
那這個index要帶多少才能拿到想要的元素呢? 簡單來說,大部分的程式語言,
容器的index都是從0起算的。因此假設今天有個陣列有三個元素,他們對應的index就為0, 1, 2。
"""
#String (字串)
#其實我們一開始介紹過的基本型別String,也是一種序列型別喔。
#一個比較好理解的方式為,其實String就像是一堆字元排在一起組合而成的 (字元指的為一個字)。
#需要注意的是,字串的內容也是不能修改的。
str1 = 'hello python'
str2 = str1
# str2[0] = 'y'
# a = a + b could be written as a += b
str2 += ' journey'
print(str2 is str1)
print(str1)
result = str2.split(' ')
print(result)
result_back = '***'.join(result)
print(result_back)
""" 如果今天想要用兩個字串,組合成一個新的字串,我們可用+來做到這件事
假設今天我們想判斷兩個變數是否共享記憶體位置 (判斷兩者是否為同一個人),可用is來做到
兩個很重要String方法一定要知道,split & join
split可將一個字串用指定的方式(字串)拆散為陣列。
以上述的例子來說,我們將'hello python journy',
以空白' '隔開成陣列,於是他便成了['hello', 'python', 'journey']
join可將一個陣列,用指定的方式(字串)組合成字串。
以上述的例子來說,我們['hello', 'python', 'journey']用'***'組合,
就成了hello***python***journey
"""
#序列型別的操作
#1. 取出部分的內容
#若是想從一個序列容器中取出一部份的內容來使用,
#我們可以用seq[start:stop:step]這樣的語法來達成 (是不是有點眼熟啊!)
#必需要注意的是,上面的start, stop, step要填入的是元素的index。
str1 = 'hello world'
arr1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# mind the stop
arr2 = arr1[0:5]
# -1 represent the last element
arr3 = arr1[0:-1:2]
# you can ignore the args...
arr4 = arr1[:]
print(arr2)
print(arr3)
print(arr4)
print(arr4 is arr1)
print(str1[5:])
# print(arr1[:-1])
""" step不給預設為1
遇到stop則停止取出,所以index為stop的元素不會被取出
可用-1代表最後一個元素的index
start不給則預設為0
stop不給則會將start之後的元素都取出
seq[start:stop:step]會製造新的容器,因此他們不共享記憶體位置
猜猜看,最後一行會印出什麼,拿掉註解再執行,你猜對了嗎? """
""" 常見的序列容器操作方法
操作 描述
x in s 檢查X是否存在於S這個容器之中
x not in s 檢查X是否不存在於S這個容器之中
s + t 容器S與容器T的內容相加
s * n 三個容器S => s + s + s
len(s) 取得容器的長度 (裡面有幾個元素的意思)
min(s) 取得容器內的最小值 (前提是裡面的元素要能比大小啊!)
max(s) 取得容器內的最大值
s.index(x[,i[,j]]) X元素在S容器的索引值,如果有給i, j就只會在index為i~j的範圍找
s.count(x) X這個元素在S這個容器內出現幾次
2. 修改序列容器的內容
操作 描述
s[i] = x index為i的元素的內容置換為X
s[i:j] = t index從i到j的元素內容置換為X
s[i:j:k] = t index從i到j的元素,以step為k的方式,將內容置換為X
del s[i:j] 把index從i到j的元素刪除
del s[i:j:k] index從i到j的元素,以step為k的方式刪除元素
s.append(x) 將X塞到S容器的最後面
s.clear() 將S容器的內容全部刪除(same as del s[:])
s.copy() 複製S容器(same as s[:])
s.extend(t) 同 s = s + t
s.insert(i,x) 在S容器index為i的位置將X插入,原有的元素(們)將會往後移
s.pop([i]) 將index為i的元素取出,並將其移出容器
s.remove(x) 刪除第一個找到的X
s.reverse() 讓容器的內容順序顛倒
今天我們介紹了序列容器,這個東西不論在什麼語言都是非常重要的。
因為大部分的情況下,我們都要同時處理很多筆資料,
因此序列容器的操作都必須非常熟悉才行。
以機器學習來說,通常會取得很多數據給程式來學習,
這時候要怎麼操作儲存這些資料的容器,就是一大重點呢! """ |
989,509 | 6f3ecda1d6c69334283403f3c293893739f852cb | import numpy as np
from scipy.stats import multivariate_normal
from sklearn.cluster import KMeans
import time
def em_mog(X, k, max_iter=20):
"""
Learn a Mixture of Gaussians model using the EM-algorithm.
Args:
X: The data used for training [n, num_features]
k: The number of gaussians to be used
Returns:
phi: A vector of probabilities for the latent vars z of shape [k]
mu: A marix of mean vectors of shape [k, num_features]
sigma: A list of length k of covariance matrices each of shape [num_features, num_features]
w: A vector of weights for the k gaussians per example of shape [n, k] (result of the E-step)
"""
# Initialize variables
mu = None
sigma = [np.eye(X.shape[1]) for i in range(k)]
phi = np.ones([k,])/k
ll_prev = float('inf')
start = time.time()
#######################################################################
# TODO: #
# Initialize the means of the gaussians. You can use K-means! #
#######################################################################
initKmeans = KMeans(n_clusters=k, max_iter=max_iter).fit(X)
mu = initKmeans.cluster_centers_
#######################################################################
# END OF YOUR CODE #
#######################################################################
for l in range(max_iter):
# E-Step: compute the probabilities p(z==j|x; mu, sigma, phi)
w = e_step(X, mu, sigma, phi)
# M-step: Update the parameters mu, sigma and phi
phi, mu, sigma = m_step(w, X, mu, sigma, phi, k)
# Check convergence
ll = log_likelihood(X, mu, sigma, phi)
print('Iter: {}/{}, LL: {}'.format(l+1, max_iter, ll))
if ll/ll_prev > 0.999:
print('EM has converged...')
break
ll_prev = ll
# Get stats
exec_time = time.time()-start
print('Number of iterations: {}, Execution time: {}s'.format(l+1, exec_time))
# Compute final assignment
w = e_step(X, mu, sigma, phi)
return phi, mu, sigma, w
def log_likelihood(X, mu, sigma, phi):
"""
Returns the log-likelihood of the data under the current parameters of the MoG model.
"""
ll = None
#######################################################################
# TODO: #
# Compute the log-likelihood of the data under the current model. #
# This is used to check for convergnence of the algorithm. #
#######################################################################
ll = np.zeros((X.shape[0], 1))
k = mu.shape[0]
for i in range(k):
ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]
ll = sum(np.log(ll))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return ll
def e_step(X, mu, sigma, phi):
"""
Computes the E-step of the EM algorithm.
Returns:
w: A vector of probabilities p(z==j|x; mu, sigma, phi) for the k
gaussians per example of shape [n, k]
"""
w = None
#######################################################################
# TODO: #
# Perform the E-step of the EM algorithm. #
# Use scipy.stats.multivariate_normal.pdf(...) to compute the pdf of #
# of a gaussian with the current parameters. #
#######################################################################
w = np.zeros((X.shape[0], mu.shape[0]))
for i in range(mu.shape[0]):
w[:, i] = multivariate_normal(mu[i, :], sigma[i]).pdf(X)*phi[i]
#######################################################################
# END OF YOUR CODE #
#######################################################################
return w
def m_step(w, X, mu, sigma, phi, k):
"""
Computes the M-step of the EM algorithm.
"""
#######################################################################
# TODO: #
# Update all the model parameters as per the M-step of the EM #
# algorithm.
#######################################################################
phi = sum(w, 1) / w.shape[0]
mu = np.dot(w.T,X) / np.sum(w.T, axis=1)[:, np.newaxis]
for i in range(k):
M = np.zeros((X.shape[1], X.shape[1]))
for j in range(X.shape[0]):
M += np.dot((X[j, :] - mu[i, :])[:, np.newaxis], (X[j, :] - mu[i, :])[:, np.newaxis].T) * w[j, i]
sigma[i] = M / sum(w[:, i])
#######################################################################
# END OF YOUR CODE #
#######################################################################
return phi, mu, sigma
|
989,510 | 5c1551247674ff35e211d288233e0438cfbeba16 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ThreadLocal解决了全局变量需要加锁,局部变量传递麻烦的问题,下例演示了它的用法"""
import threading
local=threading.local()
def func(name):
print 'current thread:%s'%threading.currentThread().name
local.name=name
print '%s in %s' %(local.name,threading.currentThread().name)
t1=threading.Thread(target=func,args=('haobo',))
t2=threading.Thread(target=func,args=('lina',))
t1.start()
t2.start()
t1.join()
t2.join()
|
989,511 | 149bb7e5fce98be6aa57110d76e753472b120c83 | from flask import Flask
import folium
import folium.plugins as plugins
import numpy as np
import pandas as pd
import requests
import geopandas
import branca
from datetime import datetime, timedelta
from folium.plugins import FloatImage
from folium.plugins import Draw
from folium.plugins import MiniMap
from folium.features import GeoJsonPopup, GeoJsonTooltip
app = Flask(__name__)
@app.route('/')
def mapa():
response = requests.get(
"https://ide.dataintelligence-group.com/geoserver/glaciares/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=glaciares%3AR14_Subcuencas_Glaciares&maxFeatures=50&outputFormat=application%2Fjson"
)
data = response.json()
states = geopandas.GeoDataFrame.from_features(data, crs="EPSG:4326")
# return states.to_html(header="true", table_id="table")
m = folium.Map(location=[-33.48621795345005, -70.66557950912359], zoom_start=4)
w = folium.WmsTileLayer(url = 'https://ide.dataintelligence-group.com/geoserver/glaciares_r14/wms?',
layers = 'glaciares_r14:2021q1',
fmt ='image/png',
transparent = True,
name = "Glaciares",
control = True,
attr = "Mapa de Chile"
)
w.add_to(m)
popup = GeoJsonPopup(
fields=["COD_CUENCA"],
aliases=["NOM_CUENCA"],
localize=True,
labels=True,
style="background-color: yellow;",
)
tooltip = GeoJsonTooltip(
fields=["COD_CUENCA"],
aliases=["NOM_CUENCA:"],
localize=True,
sticky=False,
labels=True,
style="""
background-color: #F0EFEF;
border: 2px solid black;
border-radius: 3px;
box-shadow: 3px;
""",
max_width=800,
)
g = folium.GeoJson(
states,
tooltip=tooltip,
popup=popup
).add_to(m)
folium.LayerControl().add_to(m)
return m._repr_html_()
@app.route('/tabla')
def tabla():
response = requests.get(
"https://ide.dataintelligence-group.com/geoserver/glaciares/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=glaciares%3AR14_Subcuencas_Glaciares&maxFeatures=50&outputFormat=application%2Fjson"
)
data = response.json()
states = geopandas.GeoDataFrame.from_features(data, crs="EPSG:4326")
return states.to_html(header="true", table_id="table")
if __name__ == '__main__':
app.run()
|
989,512 | 0dd34e0e745813cce7829cda541539c182658ff0 | from django import forms
from .models import Encuesta
class EncuestaForm(forms.ModelForm):
CHOICES=[('1',' '),
('2',' '),
('3',' '),
('4',' '),
('5',' ')]
like = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple(attrs={'class' : 'custom'}),choices=CHOICES,label='1. ¿Te gustan las características del equipo?')
CHOICES2=[('6',' '),
('7',' '),
('8',' '),
('9',' '),
('10',' ')]
design = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=CHOICES2,label='2. ¿Cómo calificarías su diseño?')
more_like = forms.CharField(max_length=100, widget=forms.Textarea(attrs={'rows': 3, 'cols': 50}),label="3. ¿Qué es lo que más te gusta?")
CHOICES3=[('11',' '),
('12',' '),
]
price = forms.ChoiceField(widget=forms.RadioSelect(attrs={'class' : 'custom2'}),choices=CHOICES3,label='4. ¿Qué te parece su precio? ($2,899.00)')
class Meta:
model = Encuesta
fields= ['like','design','more_like','price']
|
989,513 | 2403f08b83156ac7265b0497a09fd2d28f1d3ce8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import thread
import threading
import RPi.GPIO as GPIO
from classes.ShiftRegister595 import ShiftRegister595
'''A standard pattern like so:
10000000
11000000
01000000
01100000
and so on.
'''
_pattern = [0x80, 0xC0, 0x40, 0x60, 0x20, 0x30, 0x10, 0x18, 0x08, 0x0C, 0x04, 0x06, 0x02, 0x03, 0x01, 0x00]
'''A class for animation of a band of LEDs.
The speed of the animation is controlled by delay and inbetween_delay. Where
delay determines the delay between switching direction and inbetween_delay
determines the delay between the shifts from one pattern element to the next.
'''
class FlowingLEDs:
'''Sets up this class.
:param shift_register: A reference to a shift_register which is used to
enable / disable the single LEDs.
:param delay: The delay between reversing the direction of the flow.
:param pattern: A bit pattern which describes which LEDs are on and off per
step.
'''
def __init__(self, shift_register, delay, pattern=_pattern):
self.__inbetween_loop_delay = 0.035 # 0.03
self.__loop_delay = delay
self.__shift_register = shift_register
self.__pattern = pattern
''' Cleans up everything.
'''
def __cleanup(self):
self.__shift_register.cleanup()
print("FlowingLEDs cleaned up.")
def __ascending_loop(self, stopper):
for i in range(len(self.__pattern) - 1, -1, -1):
if stopper.is_set():
return
self.__shift_register.serial_write(self.__pattern[i])
self.__shift_register.output()
stopper.wait(self.__inbetween_loop_delay)
def __descending_loop(self, stopper):
for i in range(0, len(self.__pattern), 1):
if stopper.is_set():
return
self.__shift_register.serial_write(self.__pattern[i])
self.__shift_register.output()
stopper.wait(self.__inbetween_loop_delay)
def __loop(self, arg1, stopper):
while not stopper.is_set():
self.__ascending_loop(stopper)
stopper.wait(self.__loop_delay)
self.__descending_loop(stopper)
stopper.wait(self.__loop_delay)
self.__cleanup()
def set_pattern(self, pattern):
self.__pattern = pattern
def set_delay(self, delay):
self.__loop_delay = delay
def set_inbetween_delay(self, delay):
self.__inbetween_loop_delay = delay
def start_flow(self):
self.stop_event = threading.Event()
thread.start_new_thread(self.__loop, (1, self.stop_event))
def stop_flow(self):
self.stop_event.set()
time.sleep(0.1)
|
989,514 | 01eaae1e4071dd2c03e8948948b6cb8d8ca30fce | """Rss feed
Revision ID: 96693ddf7038
Revises: 6ba25e05a1c7
Create Date: 2020-06-08 14:59:42.606219
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '96693ddf7038'
down_revision = '6ba25e05a1c7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('rss_feeds',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('image', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.add_column('news', sa.Column('link', sa.String(), nullable=True))
op.add_column('news', sa.Column('pub_date', sa.DateTime(), nullable=True))
op.add_column('news', sa.Column('rss_feed_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'news', 'rss_feeds', ['rss_feed_id'], ['id'])
op.drop_column('news', 'feedback')
op.drop_column('news', 'author')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('news', sa.Column('author', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('news', sa.Column('feedback', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'news', type_='foreignkey')
op.drop_column('news', 'rss_feed_id')
op.drop_column('news', 'pub_date')
op.drop_column('news', 'link')
op.drop_table('rss_feeds')
# ### end Alembic commands ###
|
989,515 | 9ad355c2f6a2ce7d3b9411029c01db43a8a2c2d1 | # Name: Zaki Ahmed, Bryan Rodriguez, John Tran
# Date: 22 Feb 2021
# Class: CS 362
# Assignment: Group Project: Part 1
from task import conv_num, conv_endian
import unittest
import task
import random
import datetime
class TestCase(unittest.TestCase):
def test1(self):
self.assertTrue(True)
def test100(self):
string = "12345"
output = 12345
self.assertEqual(output, conv_num(string))
def test101(self):
string = "-123.45"
output = -123.45
self.assertEqual(output, conv_num(string))
def test102(self):
string = ".45"
output = 0.45
self.assertEqual(output, conv_num(string))
def test103(self):
string = "123."
output = 123.0
self.assertEqual(output, conv_num(string))
def test104(self):
string = "0xAD4"
output = 2772
self.assertEqual(output, conv_num(string))
def test105(self):
string = "0xAZ4"
output = None
self.assertEqual(output, conv_num(string))
def test106(self):
string = "12345A"
output = None
self.assertEqual(output, conv_num(string))
def test107(self):
string = "12.3.45"
output = None
self.assertEqual(output, conv_num(string))
def test108(self):
string = "100000000000000"
output = 100000000000000
self.assertEqual(output, conv_num(string))
def test109(self):
string = ""
output = None
self.assertEqual(output, conv_num(string))
def test110(self):
string = " "
output = None
self.assertEqual(output, conv_num(string))
def test111(self):
string = "-0x."
output = None
self.assertEqual(output, conv_num(string))
def test112(self):
string = "-"
output = None
self.assertEqual(output, conv_num(string))
def test113(self):
string = "."
output = 0.0
self.assertEqual(output, conv_num(string))
def test114(self):
string = "12-34"
output = None
self.assertEqual(output, conv_num(string))
def test115(self):
string = 1234
output = None
self.assertEqual(output, conv_num(string))
def test116(self):
string = "12*34"
output = None
self.assertEqual(output, conv_num(string))
# Function 2 test cases...
def test201(self):
result = task.my_datetime(0)
self.assertEqual(result, "01-01-1970")
def test202(self):
result = task.my_datetime(123456789)
self.assertEqual(result, "11-29-1973")
def test203(self):
result = task.my_datetime(9876543210)
self.assertEqual(result, "12-22-2282")
def test204(self):
result = task.my_datetime(201653971200)
self.assertEqual(result, "02-29-8360")
def test205(self):
result = task.my_datetime(86399)
self.assertEqual(result, "01-01-1970")
def test206(self):
result = task.my_datetime(86400)
self.assertEqual(result, "01-02-1970")
def test207(self):
result = task.my_datetime(86401)
self.assertEqual(result, "01-02-1970")
def test208(self):
result = task.my_datetime(86400 * 2)
self.assertEqual(result, "01-03-1970")
def test209(self):
array1 = []
array2 = []
# Create loop for # of instances to randomly test...
for i in range(0, 10000):
# Generate random number of seconds from 0 till max range...
num = random.randint(0, 253402300799)
# Use Python internal tools to create correct value...
result1 = datetime.datetime.utcfromtimestamp(num)
string1 = result1.strftime("%m-%d-%Y")
# Run my function to obtain is output
string2 = task.my_datetime(num)
# Append random entries into respecitve arrays...
array1.append(string2)
array2.append(string1)
# Compare the two outputs
self.assertEqual(array1, array2)
def test210(self):
result = task.my_datetime(253402300799 * 2)
self.assertEqual(result, "12-31-9999")
# Function 3 test cases
def test301(self):
num = 954786
example = '0E 91 A2'
self.assertEqual(example, conv_endian(num, 'big'))
def test302(self):
num = 954786
example = '0E 91 A2'
self.assertEqual(example, conv_endian(num))
def test303(self):
num = -954786
example = '-0E 91 A2'
self.assertEqual(example, conv_endian(num))
def test304(self):
num = -95
example = '-5F'
self.assertEqual(example, conv_endian(num))
def test305(self):
num = 954786
example = 'A2 91 0E'
self.assertEqual(example, conv_endian(num, 'little'))
def test306(self):
num = -954786
example = '-A2 91 0E'
self.assertEqual(example, conv_endian(num, 'little'))
def test307(self):
example = '-A2 91 0E'
self.assertEqual(example, conv_endian(num=-954786, endian='little'))
def test308(self):
example = None
self.assertEqual(example, conv_endian(num=-954786, endian='small'))
def test309(self):
num = 954786
example = None
self.assertEqual(example, conv_endian(num, endian=''))
def test310(self):
num = 954786
example = None
self.assertEqual(example, conv_endian(num, 'icecream'))
def test311(self):
num = 1
example = '01'
self.assertEqual(example, conv_endian(num, 'big'))
def test312(self):
num = 1
example = '01'
self.assertEqual(example, conv_endian(num, 'little'))
def test313(self):
num = 1
example = None
self.assertEqual(example, conv_endian(num, 'bigfish'))
def test314(self):
num = 1
example = None
self.assertEqual(example, conv_endian(num, 'littlefish'))
def test315(self):
num = -1
example = '-01'
self.assertEqual(example, conv_endian(num, 'big'))
def test316(self):
num = -1
example = '-01'
self.assertEqual(example, conv_endian(num, 'little'))
def test317(self):
num = -1
example = None
self.assertEqual(example, conv_endian(num, 'bigfish'))
def test318(self):
num = -1
example = None
self.assertEqual(example, conv_endian(num, 'littlefish'))
def test319(self):
num = 95
example = '5F'
self.assertEqual(example, conv_endian(num))
if __name__ == '__main__':
unittest.main()
|
989,516 | 0aab06d5442fcc85a0182c71150d6a2a3262c14e | import numpy as np
def get_data(name):
data = np.genfromtxt('data/' + str(name) + '.csv',delimiter=',')
X = data[:,:-1]
y = data[:, -1]
return X, y |
989,517 | 44b1a38ed0d67dde28cb391e273d7e2c3800713c | class C(object):
def __init__(self, v):
self.__value = v
def show(self):
print(self.__value)
c1 = C(10)
#print(c1.__value)
c1.show()
#외부에서 접근 가능한 변수를 파이썬은 property, 루비는 attirbute라고 함
#Python에선 원래 getset없이 인스턴스 변수값에 직접 접근이 가능하지만,
#init에서 __value로 설정 해두면 직접 접근이 안되게 만들 수 있음(인스턴스 변수 안의 함수 사용도 안되게 됨) |
989,518 | 624994f372911ed86095b96da71164ed0a9ef8e5 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20150803_1532'),
]
operations = [
migrations.AddField(
model_name='organization',
name='password',
field=models.CharField(default=123, max_length=10),
preserve_default=False,
),
]
|
989,519 | 8426aa746d50c2fafae9f967be2fbc639c3728e9 | def main():
print("hello there")
def goodby():
print("'later")
|
989,520 | 2a90cd22c186ab5c1d64f98608b61bd084197b03 |
import ROOT
#Open the rootfile and get the workspace from the exercise_0
fInput = ROOT.TFile("Workspace_mumufit.root")
ws = fInput.Get("ws")
ws.Print()
#You can set constant parameters that are known
#If you leave them floating, the fit procedure will determine their uncertainty
ws.var("meanJpsi").setConstant(1)
#Set the RooModelConfig and let it know what the content of the workspace is about
model = ROOT.RooStats.ModelConfig()
model.SetWorkspace(ws)
model.SetPdf("totPDF")
#Here we explicitly set the value of the parameters for the null hypothesis
#We want no signal contribution, so cross_psi = 0
cross_psi = ws.var("cross_psi")
poi = ROOT.RooArgSet(cross_psi)
nullParams = poi.snapshot()
nullParams.setRealValue("cross_psi",0.)
#Build the profile likelihood calculator
plc = ROOT.RooStats.ProfileLikelihoodCalculator(ws.data("data"), model)
plc.SetParameters(poi)
plc.SetNullParameters(nullParams)
#We get a HypoTestResult out of the calculator, and we can query it.
htr = plc.GetHypoTest()
print "-------------------------------------------------"
print "The p-value for the null is ", htr.NullPValue()
print "Corresponding to a signifcance of ", htr.Significance()
print "-------------------------------------------------"
#PyROOT sometimes fails cleaning memory, this helps
del plc
|
989,521 | 71292c35b5ff4bebbe9bf51004841cf852fb3b84 | import pytest
import glob
import os
import sys
import pathlib
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_PROJECT = os.path.abspath(PATH_HERE+"/../")
PATH_DATA = os.path.abspath(PATH_PROJECT+"/data/abfs/")
PATH_HEADERS = os.path.abspath(PATH_PROJECT+"/data/headers/")
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
@pytest.mark.slow
@pytest.mark.parametrize("abfPath", glob.glob("data/abfs/*.abf"))
def test_saveABF1_forEveryFile(abfPath):
testOutput = pathlib.Path("testOutput")
if not testOutput.exists():
testOutput.mkdir()
abf = pyabf.ABF(abfPath)
# don't attempt to save ABFs with variable-length sweeps
if (abf.nOperationMode == 1):
return
abf.saveABF1(f"testOutput/{abf.abfID}.abf")
|
989,522 | c0e4f40e7c2d0da2a872f685dfbd81ce8d25cae1 | import numpy as np
from numba import njit
from PIL import Image, ImageDraw, ImageFont
@njit(cache=True)
def jit_cast(img, type_max=65535, dtype=np.uint16):
# casting to set dtype
return (type_max * img).astype(dtype)
def get_user_kp(preset, blue, red, magenta, green, cyan, yellow):
# check if a preset is chosen
if preset == 'None':
# construct scale from single colors
kp_full = 'brmgcy'
kp_bool = [blue, red, magenta, green, cyan, yellow]
user_kp = ''.join([a for a, b in zip(kp_full, kp_bool) if b is True])
else:
# process the preset
preset_dict = dict([('full', 'brmgcy'),
('warm', 'ry'),
('cold', 'bg'),
('compBY', 'by'),
('compRC', 'rc'),
('compMG', 'mg')])
user_kp = preset_dict[preset]
return user_kp
def draw_axis(image):
img_width = image.width
img_height = image.height
res_image = Image.new("RGB", (img_width, int(img_height / 0.825)), (255, 255, 255))
res_image.paste(image, (0, 0))
draw = ImageDraw.Draw(res_image)
bias = 0.1
width_bias = int(img_width * 0.5 * bias / 100)
str_width = int(img_width * (100 - bias) / 100)
draw.line((width_bias, int(img_height), str_width + width_bias, int(img_height)), fill=(0, 0, 0), width=2)
for i in range(0, 11):
x = int(str_width * i / 10 + width_bias)
draw.line((x, int(0.95 * img_height), x, int(1.05 * img_height)), fill="black", width=2)
fontsize = 12
font = ImageFont.truetype("arial", fontsize)
text_x = x - fontsize / 2
if text_x < 0:
text_x = 0
draw.text((text_x, 1.05 * img_height), str(i / 10), fill="black", font=font)
return res_image
|
989,523 | 444463239a934de597f38d42c549c61f5d819bc8 | import os
import unittest
from fbs.parser import load
from lang.kt.generate import generate_kt
from lang.py.generate import generate_py
from lang.rust.generate import generate_rust
from lang.swift.generate import generate_swift
from pathlib import Path
class CodeGeneratorTests(unittest.TestCase):
TEST_CASE = "tests/parser-cases/color.fbs"
TESTS_DIR = Path(__file__).parent.parent.absolute()
def setUp(self):
self.maxDiff = None
os.chdir(self.TESTS_DIR)
def tearDown(self):
os.rmdir("color")
pass
def test_rust(self):
generate_rust(self.TEST_CASE, load(self.TEST_CASE))
with open("color/color.rs") as f1:
os.remove("color/color.rs")
with open("tests/expected/golden-color.rs") as f2:
self.assertEqual(f2.read(), f1.read())
def test_kotlin(self):
generate_kt(self.TEST_CASE, load(self.TEST_CASE))
with open("color/color.kt") as f1:
os.remove("color/color.kt")
with open("tests/expected/golden-color.kt") as f2:
self.assertEqual(f2.read(), f1.read())
def test_swift(self):
generate_swift(self.TEST_CASE, load(self.TEST_CASE))
with open("color/color.swift") as f1:
os.remove("color/color.swift")
with open("tests/expected/golden-color.swift") as f2:
self.assertEqual(f2.read(), f1.read())
def test_py(self):
generate_py(self.TEST_CASE, load(self.TEST_CASE))
with open("color/color.py") as f1:
os.remove("color/color.py")
os.remove("color/__init__.py")
with open("tests/expected/golden-color.py") as f2:
self.assertEqual(f2.read(), f1.read())
if __name__ == "__main__":
unittest.main()
|
989,524 | af227ea45a874b851fc6d56805557e826d35e0a9 | """Docutils transforms used by Sphinx when reading documents."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from docutils import nodes
from sphinx import addnodes
from sphinx.transforms import SphinxTransform
if TYPE_CHECKING:
from docutils.nodes import Node
from sphinx.application import Sphinx
class RefOnlyListChecker(nodes.GenericNodeVisitor):
"""Raise `nodes.NodeFound` if non-simple list item is encountered.
Here 'simple' means a list item containing only a paragraph with a
single reference in it.
"""
def default_visit(self, node: Node) -> None:
raise nodes.NodeFound
def visit_bullet_list(self, node: nodes.bullet_list) -> None:
pass
def visit_list_item(self, node: nodes.list_item) -> None:
children: list[Node] = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if len(children) != 1:
raise nodes.NodeFound
if not isinstance(children[0], nodes.paragraph):
raise nodes.NodeFound
para = children[0]
if len(para) != 1:
raise nodes.NodeFound
if not isinstance(para[0], addnodes.pending_xref):
raise nodes.NodeFound
raise nodes.SkipChildren
def invisible_visit(self, node: Node) -> None:
"""Invisible nodes should be ignored."""
pass
class RefOnlyBulletListTransform(SphinxTransform):
"""Change refonly bullet lists to use compact_paragraphs.
Specifically implemented for 'Indices and Tables' section, which looks
odd when html_compact_lists is false.
"""
default_priority = 100
def apply(self, **kwargs: Any) -> None:
if self.config.html_compact_lists:
return
def check_refonly_list(node: Node) -> bool:
"""Check for list with only references in it."""
visitor = RefOnlyListChecker(self.document)
try:
node.walk(visitor)
except nodes.NodeFound:
return False
else:
return True
for node in self.document.findall(nodes.bullet_list):
if check_refonly_list(node):
for item in node.findall(nodes.list_item):
para = cast(nodes.paragraph, item[0])
ref = cast(nodes.reference, para[0])
compact_para = addnodes.compact_paragraph()
compact_para += ref
item.replace(para, compact_para)
def setup(app: Sphinx) -> dict[str, Any]:
app.add_transform(RefOnlyBulletListTransform)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
989,525 | baa50853bea0a3cb2b8e359bda26e35a88cb6da9 | """empty message
Revision ID: 1d0442758bfb
Revises: 2f8cb1401dd0
Create Date: 2015-08-16 21:59:56.248774
"""
# revision identifiers, used by Alembic.
revision = '1d0442758bfb'
down_revision = '2f8cb1401dd0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
989,526 | a6ae7d7164d9b869554a68b10a6cd02a37e5f0a6 | from Arvore_Final import Arvore
arvore = Arvore()
arvore.inserir(50)
arvore.inserir(30)
arvore.inserir(20)
arvore.mostrar()
arvore.balanceamento(30) # Passando o no problemático
arvore.mostrar()
|
989,527 | cf673db93851e2ddf06e7525b8a3238399bc0d59 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Create: 12-2018 - Carmelo Mordini <carmelo> <carmelo.mordini@unitn.it>
"""Module docstring
"""
import matplotlib.pyplot as plt
import sys
import numpy as np
from uncertainties import unumpy as unp
from scipy.integrate import cumtrapz, quad
from scipy.interpolate import interp1d
from scipy.optimize import brentq
from .functions import g12, g32, g52
from ..constants import pi, kB, mass, z32, z52
from ..constants import scattering_length as a_scatt, interaction_constant_g as g_int
from .hfsolver import solver_harmonic_trap, solver_LDA, physics_solver_mu, physics_solver, integrate_N_harmonic_trap
from .hfsolver import T_crit as _hf_T_crit, lambda_therm
from . import trapped_hartree_fock as thf
thismodule = sys.modules[__name__]
def theta(x):
return 0.5 + 0.5 * np.sign(x)
def n_crit(T):
return z32 / lambda_therm(T)**3
def T_crit(n, mf_correction=False):
Tc = _hf_T_crit(n)
if mf_correction:
Tc *= 1 + 1.3 * n * a_scatt**3
return Tc
def mu_HF(T, n):
"""
Returns mu vs T solving the true HF at given density n
"""
T, n = np.broadcast_arrays(np.atleast_1d(T), np.atleast_1d(n))
mu, _ = physics_solver_mu(n, T)
return mu
def eta_HF(n):
return g_int * n / kB / T_crit(n)
def mu_HF_approx(t, n):
"""
Returns mu / gn vs t = T/Tc at lowest order in eta
"""
eta = eta_HF(n)
return 1 + t**(3. / 2) - 2 * np.sqrt(pi) / z32 * t * np.sqrt(eta * (1 - t**(3. / 2)))
def p_HF(T, n):
"""
Returns pressure vs T solving the true HF at given density n
"""
mu, n0 = physics_solver_mu(n, T)
zeta = np.exp((mu - 2 * g_int * n) / kB / T)
p = g_int * (n**2 - 0.5 * n0**2) + kB * T * g52(zeta) / lambda_therm(T)**3
return p
def k_HF(T, n, h=1e-3):
"""
Returns the isothermal compressibility k
solve HF at density n(1 +/- h) and estimate the derivative dn / dmu
"""
mu_p, _ = physics_solver_mu(n * (1 + h), T)
mu_m, _ = physics_solver_mu(n * (1 - h), T)
dn_dmu = 2 * h / (mu_p - mu_m) # second order diff
return dn_dmu / n
def get_pressure(V, n, initial=0):
# pressure = cumtrapz(n, x=V, initial=np.nan)
# pressure = pressure[-1] - pressure
# pressure = - cumtrapz(n[::-1], x=V[::-1], initial=-initial)[::-1]
pressure = cumtrapz(n[::-1], x=-V[::-1], initial=0)[::-1]
return pressure + initial
def MF_density(zita, T):
return z32 / lambda_therm(T)**3 + kB * T / g_int * np.log(1 / zita)
def MF_pressure(zita, T):
return z52 + lambda_therm(T) / 4 / a_scatt * np.log(zita)**2
def TH_density(zita, T):
return g32(1 / zita) / lambda_therm(T)**3
def TH_pressure(zita, T):
return g52(1 / zita)
def n_ideal(mu0, T, V):
mu = mu0 - V
zita = np.exp(-mu / kB / T)
n = np.piecewise(zita, [zita <= 1, zita > 1],
[MF_density, TH_density], T)
return n
def p_ideal(mu0, T, V):
mu = mu0 - V
zita = np.exp(-mu / kB / T)
p = np.piecewise(zita, [zita <= 1, zita > 1],
[MF_pressure, TH_pressure], T)
return p * kB * T / lambda_therm(T)**3
def n_semi_ideal(mu0, T, V, split=False):
mu = mu0 - V
zita = np.exp(-np.abs(mu) / kB / T)
nt = g32(zita) / lambda_therm(T)**3
n0 = np.maximum(0, mu / g_int)
if split:
return n0, nt
else:
return n0 + nt
def k_semi_ideal(mu0, T, V):
mu = mu0 - V
zita = np.exp(-np.abs(mu) / kB / T)
k0 = 1 / g_int * theta(mu)
kt = -np.sign(mu) / kB / T / lambda_therm(T)**3 * g12(zita)
return (k0 + kt) / n_semi_ideal(mu0, T, V)**2
def p_semi_ideal(mu0, T, V):
n = n_semi_ideal(mu0, T, V)
# p = get_pressure(V, n)
p1 = p_HF(T, n[-1])
p = get_pressure(V, n, initial=p1)
return p
def n_hartree_fock(mu0, T, V, split=False, solver_kwargs={}):
"mu0, T: SI units"
n, n0 = solver_LDA(mu0, T, V)
if split:
nt = n - n0
return n0, nt
else:
return n
def n_hartree_fock_interp(mu0, T, V, split=False, solver_kwargs={}):
solver_kw = dict(omega_ho=2 * pi * 60, dr=3e-7, Rmax=3e-3)
solver_kw.update(solver_kwargs)
fun_n_sim, fun_n0_sim, mu_array, r, alpha = solver_harmonic_trap(
mu0, T, **solver_kw)
mu_local = np.atleast_1d(mu0 - V)
n = fun_n_sim(mu_local)
if split:
n0 = fun_n0_sim(mu_local)
nt = n - n0
return n0, nt
else:
return n
def k_hartree_fock(mu0, T, V, solver_kwargs={}):
n = n_hartree_fock(mu0, T, V, solver_kwargs=solver_kwargs)
k = - np.gradient(n, V) / n**2
return k
def p_hartree_fock(mu0, T, V, solver_kwargs={}):
n0, nt = n_hartree_fock(mu0, T, V, solver_kwargs=solver_kwargs, split=True)
n = n0 + nt
mu = mu0 - V - 2 * g_int * n
zeta = np.exp(mu / kB / T)
zeta = np.clip(zeta, a_min=None, a_max=1)
p = g_int * (n**2 - 0.5 * n0**2) + kB * T * g52(zeta) / lambda_therm(T)**3
# n = n_hartree_fock(mu0, T, V, solver_kwargs=solver_kwargs, split=False)
# mu = mu0 - V[-1] - 2*g_int*n[-1]
# zeta = np.exp(mu / kB / T)
# p1 = g_int*n[-1]**2 + kB * T * g52(zeta) / lambda_therm(T)**3
# p = get_pressure(V, n, initial=p1)
return p
def get_eos(mu0, T, V, n='hartree_fock', n_std=None):
if n in ['hartree_fock', 'semi_ideal', 'ideal']:
n = eval(f"n_{n}(mu0, T, V,)")
n_std = None
valid = n > 0
n = n.copy()
n[~valid] = np.nan
if n_std is not None:
n = unp.uarray(n, n_std)
print('-----------------\n', type(n))
mu_local = mu0 - V
u0 = mu_local / g_int / n
t0 = T / T_crit(n)
return t0, u0
def integrate_density(r, n, AR=1):
N = np.trapz(4 * np.pi * AR * n * r**2, x=r)
return N
# def get_N(mu0, T, omega_rho, AR, solver_kwargs={}):
# """ This requires mu0 and T in SI units"""
# solver_kw = dict(omega_ho=omega_rho, dr=3e-7, Rmax=0.5e-3)
# solver_kw.update(solver_kwargs)
# fun_n_sim, fun_n0_sim, mu_array, r, alpha = solver_harmonic_trap(mu0, T, **solver_kw)
# n = fun_n_sim(mu_array)
# return integrate_density(r, n, AR)
#
# def get_mu0(N, T, omega_rho, AR, mu0_lims=(-10, 100), solver_kwargs={}):
# """
# In: N, T in SI units
# returns mu0 in nK
# """
# def fun(mu0, N, T, omega_rho, AR, solver_kwargs):
# mu0 = mu0*1e-9*kB
# return get_N(mu0, T, omega_rho, AR, solver_kwargs) - N
# return brentq(fun, *mu0_lims, args=(N, T, omega_rho, AR, solver_kwargs))
def get_N(mu0, T, omega_rho, AR, density_model='hartree_fock', split=False, *args, **kwargs):
""" This requires mu0 and T in SI units"""
Rmax = kwargs.get('Rmax', 3e-3)
dr = kwargs.get('dr', 3e-7)
r = np.arange(0, Rmax, dr)
V = 0.5 * mass * omega_rho**2 * r**2
fun = getattr(thismodule, f"n_{density_model}")
kw = {'omega_ho': omega_rho, 'dr': dr, 'Rmax': Rmax}
kw.update(kwargs)
if split:
n0, nt = fun(mu0, T, V, split=True, solver_kwargs=kw)
N0 = integrate_density(r, n0, AR)
Nt = integrate_density(r, nt, AR)
return N0, Nt
else:
n = fun(mu0, T, V, solver_kwargs=kw)
return integrate_density(r, n, AR)
def get_mu0(N, T, omega_rho, AR, mu0_lims=(-30, 300), density_model='hartree_fock', *args, **kwargs):
"""
In: N, T in SI units
returns mu0 in nK
kwargs (to get_N):
dr: spatial res to compute density [default: 2e-6]
Rmax: max range to compute density [default: 3e-3]
"""
if mu0_lims == 'auto':
mu0_lims = _thf_mu0_lims(N, T, omega_rho, AR)
if density_model == 'hartree_fock':
# kw = {'omega_ho': omega_rho, 'dr': 2e-6, 'Rmax': 3e-3}
# kw.update(kwargs)
# args += (False, kw,)
return get_mu0_HF(N, T, omega_rho, AR, mu0_lims=mu0_lims)
def fun(mu0, N, T, omega_rho, AR, density_model, *args, **kwargs):
mu0 = mu0 * 1e-9 * kB
return get_N(mu0, T, omega_rho, AR, density_model, *args, **kwargs) - N
return brentq(fun, *mu0_lims, args=(N, T, omega_rho, AR, density_model,) + args)
def get_mu0_HF_quad(N, T, omega_rho, AR, mu0_lims=(-30, 300), **kwargs):
Rmax = kwargs.get('Rmax', 100e-6)
T1 = T * 1e9
mu_max = mu0_lims[1] * 1e-9 * kB
mu_min = mu_max - 0.5 * mass * omega_rho**2 * Rmax**2
dmu = 0.3e-9 * kB
_mu = np.arange(mu_min, mu_max, dmu)
n, n0 = physics_solver(_mu, T)
# wrap it to extrapolate: https://stackoverflow.com/a/2745496
def extrap1d(interpolator):
xmin = interpolator.x.min()
def pointwise(x):
# print(x)
if x < xmin:
return np.exp(x / T1) / lambda_therm(T)**3
else:
return interpolator(x)
def ufunclike(xs):
# print(type(xs))
xs = xs if isinstance(xs, np.ndarray) else np.array([xs])
return np.array(list(map(pointwise, xs)))
return ufunclike
density = extrap1d(
interp1d(_mu * 1e9 / kB, n, kind='linear', fill_value='extrapolate'))
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# # uu = np.arange(-1000, mu0_lims[1], 1)
# uu = _mu*1e9/kB * 2
# ax.semilogy(uu, density(uu), '-o')
# ax.plot(uu, np.exp(uu / T1) / lambda_therm(T)**3, '-o')
# plt.show()
def fun(mu0):
def integrand(mu):
return 4 * np.pi * AR * 1e-9 * kB * density(mu) * np.sqrt(2 * (mu0 - mu) * 1e-9 * kB) / (mass * omega_rho**2)**(3 / 2)
ret = quad(integrand, a=-np.inf, b=mu0)
return ret[0] - N
return brentq(fun, *mu0_lims)
def get_mu0_HF(N, T, omega_rho, AR, mu0_lims=(-30, 300), **kwargs):
"""
In: N, T in SI units
returns mu0 in nK
kwargs (to hf_solver):
dr: spatial res to compute density [default: 0.5 um]
Rmax: max range to compute density [default: 200 um]
"""
solver_kw = dict(omega_ho=omega_rho, dr=5e-7, Rmax=2e-4)
solver_kw.update(kwargs)
mu_upper = mu0_lims[1] * 1e-9 * kB
fun_n_sim, _, mus, r, _ = solver_harmonic_trap(mu_upper, T, **solver_kw)
ns = fun_n_sim(mus)
# import matplotlib.pyplot as plt
# fig, (ax, ax1) = plt.subplots(1, 2, sharex=True)
# ax.plot(r, ns)
# ax1.plot(r, mus * 1e9 / kB)
# ax1.axhline(mu0_lims[0])
# plt.show()
def fun(mu0):
mu0 = mu0 * 1e-9 * kB
where = mus <= mu0
return AR * integrate_N_harmonic_trap(mu0, omega_rho, ns[where], mus[where]) - N
return brentq(fun, *mu0_lims)
def _lda_bec_fraction(N, T, omega_rho, AR, mu0_lims=(-1000, 300), full_output=False, raise_error=True, **kwargs):
if mu0_lims == 'auto':
mu0_lims = _thf_mu0_lims(N, T, omega_rho, AR)
kw = dict(density_model='hartree_fock', dr=5e-7, Rmax=2e-4)
kw.update(**kwargs)
try:
mu0 = get_mu0(N, T, omega_rho, AR, mu0_lims=mu0_lims, **kw) # _interp
N0, Nt = get_N(kB * mu0 * 1e-9, T, omega_rho, AR, split=True, **kw)
except ValueError as e:
if raise_error:
raise(e)
else:
print(e)
return np.nan
if full_output:
return N0, Nt, mu0
else:
return N0 / (N0 + Nt)
lda_bec_fraction = np.vectorize(_lda_bec_fraction, excluded={'omega_rho', 'AR', 'mu0_lims', 'density_model'})
def _thf_mu0_lims(N, T, omega_rho, AR):
omega_ho = omega_rho / AR**(1/3)
eta = thf.eta(N, omega_ho)
Tc = thf.critical_temperature(N, omega_ho)
mu0 = thf.__mu_t(T / Tc, eta) * Tc * 1e9 # nK
lims = (mu0 * 0.5, mu0 * 2) if mu0 >= 0 else (mu0 * 2, mu0 * 0.5)
# print(f"estimate mu0 = {mu0:.3f} nK -- lims: ({lims[0]:.2f}, {lims[1]:.2f})")
return lims
def plot_pressure_data(ax, mu0, T, p, V, *args, **kwargs):
# Let's default to fit agains V, and plot against (inverse) fugacity
zita = np.exp(-(mu0 - V) / kB / T)
p = p * lambda_therm(T)**3 / kB / T
return ax.semilogx(zita, p, *args, **kwargs)
def compare_models(mu0, T, r, omega):
V = 0.5 * mass * omega**2 * r**2
fig, axes = plt.subplots(1, 3, figsize=(18, 4))
ax_n, ax_p, ax_eos = axes
for model in 'ideal', 'semi_ideal', 'hartree_fock':
n = eval(f"n_{model}(mu0, T, V)")
ax_n.plot(r * 1e6, n, label=model)
p = eval(f"p_{model}(mu0, T, V)")
plot_pressure_data(ax_p, mu0, T, p, V, label=model)
t, u = get_eos(mu0, T, V, n)
ax_eos.plot(t, u, label=model)
ax_eos.set(xlim=(0, 1.5), ylim=(-2.5, 3),
xlabel='T/Tc', ylabel='mu/gn')
ax_n.legend()
for ax in axes:
ax.grid(True)
return fig, axes
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'toolmanager'
# plt.rcParams['figure.dpi'] = 72
# import os, sys
# sys.path.insert(0, os.path.abspath('..'))
# from visualization import fig2clipboard
from hfsolver.physics import mass, kB
tf = 90
omega = 2 * np.pi * tf
r = np.arange(0, 50, 0.1) * 1e-6
Vtrap = 0.5 * mass * omega**2 * r**2
mu0 = 75 * 1e-9 * kB
T = 200e-9
fig, axes = compare_models(mu0, T, r, omega)
fig.suptitle(f"Compare models\nmu0 = {mu0*1e9/kB:g} nK, T = {T*1e9:g} nK")
n0, nt = n_hartree_fock(mu0, T, Vtrap, split=True)
n = n0 + nt
fig, ax = plt.subplots()
ax.plot(r * 1e6, n)
ax.plot(r * 1e6, nt, '--')
ax.plot(r * 1e6, n0)
ax.set(xlabel='r [um]', ylabel='n [at/m^3]',)
ax.grid(True)
fig.suptitle(f'HF density profile\nmu0 {mu0:g} nK, T {T*1e9:g} nK')
# mu = np.linspace(-10, 100, 20)
# AR = 10
# N = np.empty_like(mu)
# N0 = np.empty_like(mu)
# for j, mu0 in enumerate(mu):
# mu0 = mu0*1e-9*kB
# n0, nt = n_hartree_fock(mu0, T, Vtrap, split=True)
# n = n0 + nt
# N[j] = integrate_density(r, n, AR)
# N0[j] = integrate_density(r, n0, AR)
#
# NN = 5e6
# mu0_N = get_mu0(NN, T, omega, AR)
# print(f"found mu0 = {mu0_N:.2f}")
# fig, ax = plt.subplots()
# ax.plot(mu, N*1e-6, label='N')
# ax.plot(mu, (N-N0)*1e-6, label='Nth')
# ax.plot(mu, N0*1e-6, label='Nbec')
# ax.plot(mu0_N, NN*1e-6, 'or', mew=2, mfc='none')
# ax.set(xlabel='mu0 [nK]', ylabel='N [M]')
# ax.legend()
# ax.grid()
# fig.suptitle(f"Natoms vs mu0 at {T*1e9:.0f} nK\ntrap freqs: {tf/AR} x {tf} Hz")
fig, ax = plt.subplots()
t0, u0 = get_eos(mu0, T, V=Vtrap, n=n)
Rtf = np.sqrt(2 * mu0 / mass) / omega
dr = 0.5e-6
r_bins = np.arange(0.5 * Rtf, 1.5 * Rtf, dr)
V_bins = 0.5 * mass * omega**2 * r_bins**2
t, u, t_err, u_err = get_eos(mu0, T, V=Vtrap, n=n, V_bins=V_bins)
ax.plot(t0, u0, 'k')
ax.errorbar(t, u, xerr=t_err, yerr=u_err, marker='o', ls='')
ax.set(xlim=(0, 1.5), ylim=(-2.5, 3), xlabel='T/Tc', ylabel='mu/gn')
ax.grid(True)
plt.show()
|
989,528 | 5450792ab3d4947d806895ab96a50987d6293900 | from django.db import models
from django.contrib.auth.models import User
#minhas classes e funções
from ifrs.validator import validate_CPF
#############################################################################
##################### SERVIDOR ##############################################
#############################################################################
CHOICES_RESIDENCIA = (
(0, "Já morava em Ibirubá"),
(1, "Mudou-se para Ibirubá em função do concurso"),
(2, "Não mora em Ibirubá")
)
# Create your models here.
class Titulacao(models.Model):
descricao = models.CharField(max_length=15, verbose_name="Descrição")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name="Titulação" #nome dos objetos dessa tabela no singular
verbose_name_plural="Titulações" #nome dos objetos dessa tabela no plural
class RegimeTrabalho(models.Model):
descricao = models.CharField(max_length=6, verbose_name="Descrição")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"]
verbose_name="Regime de Trabalho"
verbose_name_plural="Regimes de Trabalho"
class SituacaoFuncional(models.Model):
descricao = models.CharField(max_length=15, verbose_name="Descrição")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"]
verbose_name="Situação Funcional"
verbose_name_plural="Situações Funcionais"
class Remuneracao(models.Model):
descricao = models.CharField(max_length=5, verbose_name="Descrição")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"]
verbose_name="Remuneração"
verbose_name_plural="Remunerações"
class Setor(models.Model):
descricao = models.CharField(max_length=60, verbose_name="Descrição")
setor_pai = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True, verbose_name="Setor Pai", related_name="pai")
remuneracao = models.ForeignKey(Remuneracao, on_delete=models.PROTECT, verbose_name="Remuneração")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"]
verbose_name_plural="Setores"
class Servidor(User):
cpf = models.CharField(unique=True, max_length=11, validators=[validate_CPF])
siape = models.CharField(unique=True, max_length=10)
titulacao = models.ForeignKey(Titulacao, on_delete=models.PROTECT, verbose_name="Titulação")
regime_trabalho = models.ForeignKey(RegimeTrabalho, on_delete=models.PROTECT, verbose_name="Regime de Trabalho")
situacao_funcional = models.ForeignKey(SituacaoFuncional, on_delete=models.PROTECT, verbose_name="Situação Funcional")
residencia = models.IntegerField(choices=CHOICES_RESIDENCIA, verbose_name="Residência")
setor = models.ForeignKey(Setor, on_delete=models.PROTECT, verbose_name="Setor de Exercício", null=True, blank=True, help_text="Setor onde o servidor está lotado atualmente.")
inicio = models.DateField(verbose_name="Início do Exercício", help_text="Data em que o servidor entrou em efetivo exercício no Campus.")
fim = models.DateField(verbose_name="Término do Exercício", blank=True, null=True, help_text="Último dia que o servidor esteve em efetivo exercício no Campus.")
observacoes = models.TextField(verbose_name="Observações", blank=True, null=True)
#tornando alguns campos da superclasse obrigatórios
User._meta.get_field('first_name').blank = False
User._meta.get_field('last_name').blank = False
User._meta.get_field('email').blank = False
#adicionando tip para alguns campos da superclasse
User._meta.get_field('first_name').help_text = "Por exemplo, se seu nome é 'João Carlos Martins da Silva', preencha neste campo 'João'"
User._meta.get_field('last_name').help_text = "Por exemplo, se seu nome é 'João Carlos Martins da Silva', preencha neste campo 'Carlos Martins da Silva'"
#Por padrão um novo usuário é Membro da Equipe
User._meta.get_field('is_staff').default = True
def save(self, force_insert=False, force_update=False):
if self._state.adding:
self.username = self.cpf # username é o CPF
self.set_password(self.siape) # a senha inicial é o SIAPE
super(Servidor, self).save(force_insert, force_update)
def __str__(self):
return self.first_name + " " + self.last_name
class Meta:
ordering = ["first_name", "last_name"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name_plural="Servidores" #nome dos objetos dessa tabela no plural
#############################################################################
###################### DOCENTE ##############################################
#############################################################################
class AreaConcurso(models.Model):
area = models.CharField(max_length=50, verbose_name="Área")
area_pai = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True, verbose_name="Área Pai", related_name="pai")
nivel = models.IntegerField(verbose_name="Nível")
def __str__(self):
return self.area
class Meta:
ordering = ["area"]
verbose_name="Área do Concurso"
verbose_name_plural="Áreas do Concurso"
class Docente(Servidor):
formacao_pedagogica = models.BooleanField(verbose_name="Formação Pedagógica", help_text="Marque está opção se você possui Licenciatura ou Formação Pedagógica")
area_concurso = models.ForeignKey(AreaConcurso, on_delete=models.PROTECT, verbose_name="Área Concurso", help_text="Área que consta no edital do concurso")
class Meta:
ordering = ["first_name", "last_name"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name_plural="Docentes" #nome dos objetos dessa tabela no plural
class MeuPerfilDocente(Docente):
class Meta:
proxy = True
ordering = ["first_name", "last_name"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name="Meu Perfil - Docente" #nome do objetos dessa tabela
verbose_name_plural="Meus Perfis - Docente" #nome dos objetos dessa tabela no plural
#############################################################################
########################## TAE ##############################################
#############################################################################
CHOICES_NIVEIS = (
(0, "A"),
(1, "B"),
(2, "C"),
(3, "D"),
(4, "E")
)
class Cargo(models.Model):
descricao = models.CharField(max_length=15, verbose_name="Descrição")
nivel = models.IntegerField(choices=CHOICES_NIVEIS, verbose_name="Nível")
def __str__(self):
return self.descricao
class Meta:
ordering = ["descricao"]
class TecnicoAdministrativo(Servidor):
cargo = models.ForeignKey(Cargo, on_delete=models.PROTECT)
class Meta:
ordering = ["first_name", "last_name"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name ="Técnico Administrativo em Educação"
verbose_name_plural="Técnicos Administrativos em Educação" #nome dos objetos dessa tabela no plural
class MeuPerfilTecnicoAdministrativo(TecnicoAdministrativo):
class Meta:
proxy = True
ordering = ["first_name", "last_name"] # - para ordem decrescente -- está ordenando nos select e combobox
verbose_name="Meu Perfil - TAE" #nome do objetos dessa tabela
verbose_name_plural="Meus Perfis - TAE" #nome dos objetos dessa tabela no plural |
989,529 | 5ef5e14ca0f0fe6a04dcfa14c93c2956ea1fb2b5 | print("Loading save file...")
file = open("store.txt", "r")
counter = int(file.read())
file.close()
print("Press Control-C to quit.")
while True:
try:
print("Checking "+str(counter)+"... ", end="")
number = counter
while number != 1:
if number % 2 == 0:
number /= 2
elif number % 2 == 1:
number = number * 3 + 1
print("Done. ")
counter += 1
except KeyboardInterrupt:
break
print("Saving data... ",end="")
file = open("store.txt", "w")
file.write(str(counter))
file.close()
print("Done.") |
989,530 | 096cb1c1e29c1ec8ea8451dad319d66e60b487fb | with open('3-input.txt') as f:
lines = [line.rstrip() for line in f]
skip = []
count = 2
for l in lines:
if count % 2 == 0:
skip.append(l)
count += 1
print(len(skip))
# tree = 0
# index = 0
# for l in lines:
# if l[index] == "#":
# tree += 1
# print(index)
# index = (index + 3) % int(len(l))
# print(tree)
def do_slope(right, down, input):
trees = 0
index = 0
for l in input:
if l[index] == "#":
trees += 1
index = (index + right) % len(l)
# elif down == 2:
# count = 2
# index = 0
# for l in input:
# if count % 2 == 0:
# print(l[index])
# if l[index] == "#":
# trees +=1
# index = (index + right) % int(len(l))
# count += 1
return trees
total = do_slope(3,1,lines)
#* do_slope(3,1,lines) * do_slope(5,1,lines) * do_slope(7,1,lines) * do_slope(1,2,skip)
print(total) |
989,531 | e4247a4da31e22e71521f1ada2b0d28a0fdb571f | VERSION = (1, 1, 1)
from .backends import EmailBackend
from .models import PRIORITY
from .utils import send_mail
|
989,532 | 687961844014c7a972d3d37912358f226d1abb50 | from turtle import*
import math
import time
def drawSquare(turtle, x, y, length = 100):
turtle.up()
turtle.goto(x,y)
turtle.setheading(270)
turtle.down()
for count in range(4):
turtle.forward(length)
turtle.right(90)
ttl = Turtle()
ttl.pencolor('red')
drawSquare(ttl,0,0)
ttl.pencolor('blue')
drawSquare(ttl,50,50,50)
time.sleep(20)
def drawRectangle(turtle, x, y, length = 300, width = 100):
turtle.up()
turtle.goto(x,y)
turtle.setheading(270)
turtle.down()
for j in range(2):
turtle.forward(length)
turtle.right(90)
turtle.forward(width)
turtle.right(90)
ttl = Turtle()
ttl.pencolor('red')
drawRectangle(ttl,-50,50)
time.sleep(20)
def drawTrap(turtle, x, y, up_length = 300, lo_length = 500, side = 100):
turtle.up()
turtle.goto(x,y)
turtle.setheading(180)
turtle.down()
turtle.forward(lo_length)
turtle.right(135)
turtle.forward(side)
turtle.right(45)
turtle.forward(up_length + side*math.cos(45))
turtle.right(45)
turtle.forward(side)
turtle.right(135)
turtle.forward(up_length + side*math.cos(45))
turtle.up()
turtle.goto(-250,0)
ttl = Turtle()
ttl.pencolor('blue')
#drawTrap(ttl, 250, -35)
drawTrap(ttl, 250 ,0)
time.sleep(4)
|
989,533 | 75e02a65c2f709a0bb6cb8e8fc58aa7a2f14880a | # -*- coding: utf-8 -*-
#
# builds a house xml used for the homepage of current multi definitions
#
import DataUnPacker
from ServerUtils import sanitize
import sys, os, time, codecs
from pprint import pprint
def buildHouseXML(root_path, xml):
cfg = os.path.join(root_path,'pkg','std','housing','itemdesc.cfg')
if not os.path.exists(cfg):
return False
houses=dict()
items = dict()
housetypes = set()
for typ, name, elem in DataUnPacker.DatafileReader(cfg,is_cfg=True):
if typ == 'House':
if 'MultiID' in elem:
houses[elem['Name'][0]]=(int(name,16),elem)
elif typ == 'Item':
if 'Script' in elem:
if elem['Script'][0] == 'housedeed':
if int(elem.get('VendorSellsFor',['0'])[0]):
if 'HouseObjType' in elem:
items[int(name,16)] = elem
housetypes.add(elem.get('HouseType',['UNKNOWN'])[0])
housetypes = sorted(list(housetypes))
pprint(housetypes)
usedtypes = dict()
mrcspawn = os.path.join(root_path,'config','mrcspawn.cfg')
if not os.path.exists(cfg):
return False
sellable = []
for typ, name, elem in DataUnPacker.DatafileReader(mrcspawn,is_cfg=True):
if typ == 'ProductGroup':
if name == 'Deeds_S':
for item in elem['Item']:
l = item.split()
sellable.append(int(l[0],16))
break
for deed_obj, deed_elem in items.items():
if deed_elem['HouseObjType'][0] not in houses:
continue
house_elem = houses[deed_elem['HouseObjType'][0]][1]
multiid = int(house_elem['MultiID'][0],16)
if (deed_obj not in sellable):
print('ignoring 0x{:X}: 0x{:X}'.format(multiid,deed_obj))
continue
h_type=deed_elem.get('HouseType',['UNKNOWN'])[0]
usedtypes[h_type]=usedtypes.get(h_type,0)+1
xml.write(' <house multiid="0x{:X}">\n'.format(multiid))
name = deed_elem['Desc'][0]
name = name.replace( # gnaaa...
'Baupla%ene/n% fuer einen ','').replace(
'Baupla%ene/n% fuer eine ','').replace(
'Baupla%ene/n% fuer ein ','').replace(
'Baupla%ene/n% fuer ','').replace(
'Baupla%ene/n% ','').replace(
'Aufbauanleitung%en% fuer ein ','').strip()
xml.write(' <name>{}</name>\n' .format( sanitize(name) ))
xml.write(' <type>{}</type>\n' .format( deed_elem.get( 'HouseType', ['UNKNOWN'] )[0] ))
xml.write(' <typeid>{}</typeid>\n' .format( housetypes.index(deed_elem.get( 'HouseType', ['UNKNOWN'] )[0] )))
xml.write(' <gold>{}</gold>\n' .format( deed_elem['VendorSellsFor'][0] ))
xml.write(' <ingots>{}</ingots>\n' .format( deed_elem.get( 'Barren', [0] )[0] ))
xml.write(' <boards>{}</boards>\n' .format( deed_elem.get( 'Bretter', [0] )[0] ))
xml.write(' <granites>{}</granites>\n' .format( deed_elem.get( 'Granit', [0] )[0] ))
xml.write(' <leathers>{}</leathers>\n' .format( deed_elem.get( 'Leder', [0] )[0] ))
xml.write(' <claystones>{}</claystones>\n' .format( deed_elem.get( 'Lehm', [0] )[0] ))
xml.write(' <marble>{}</marble>\n' .format( deed_elem.get( 'Marmor', [0] )[0] ))
xml.write(' <sandstones>{}</sandstones>\n' .format( deed_elem.get( 'Sandstein', [0] )[0] ))
xml.write(' <trunks>{}</trunks>\n' .format( deed_elem.get( 'Staemme', [0] )[0] ))
xml.write(' <clothes>{}</clothes>\n' .format( deed_elem.get( 'Stoff', [0] )[0] ))
xml.write(' <straws>{}</straws>\n' .format( deed_elem.get( 'Stroh', [0] )[0] ))
xml.write(' <crystals>{}</crystals>\n' .format( deed_elem.get( 'Kristall', [0] )[0] ))
xml.write(' </house>\n')
pprint(usedtypes)
return True
def write_xml(server_root, xmlpath):
with codecs.open(xmlpath,'w','ISO-8859-1') as f:
f.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n')
f.write('<houses>\n')
buildHouseXML(server_root, f )
f.write('</houses>\n')
return True
if __name__ == '__main__':
path = '/gameworld/Pol/'
s=time.time()
write_xml(path,'results/PergonHouses.xml')
e=time.time()
print('total time : {}'.format(e-s))
|
989,534 | 81277a09a5fbbec9b011666f40749ece4aaf077c | import torch
from torch import Tensor
from torch.nn import Module, functional
from torch.nn.modules.utils import _ntuple
class PeriodicPadNd(Module):
def forward(self, x: Tensor) -> Tensor:
return functional.pad(x, self.padding, 'circular')
def extra_repr(self) -> str:
return '{}'.format(self.padding)
class PeriodicPad2d(PeriodicPadNd):
def __init__(self, padding) -> None:
super(PeriodicPad2d, self).__init__()
self.padding = _ntuple(4)(padding)
class PeriodicPad3d(PeriodicPadNd):
def __init__(self, padding) -> None:
super(PeriodicPad3d, self).__init__()
self.padding = _ntuple(6)(padding)
def main():
p = PeriodicPad2d(2)
x = torch.arange(9).float().reshape(1, 1, 3, 3)
print(x)
y = p(x)
print(y)
if __name__ == '__main__':
main()
|
989,535 | d4954ee8b72a47c9f73e8212b345ac4bf501f809 | from com.abc.lib.college.student_ops import get_details, get_grade
name = input('Enter name : ')
roll = int(input('Enter roll : '))
marks = float(input('Enter marks : '))
gender = input('Enter gender : ')
print(get_details(name=name, roll=roll, marks=marks, gender=gender))
print(get_grade(marks=marks)) |
989,536 | 8f730ba3546f9da3107c0b2a8fba06db834b09f2 |
'''
* File Name : date_time_practice.py
* Language : Python
* Creation Date : 05-01-2021
* Last Modified : Wed Jan 6 10:29:09 2021
* Created By : David Hanson
'''
import datetime
print(datetime.time(3, 43, 2))
|
989,537 | 235bbb1a52305c3a1d26bc23aaf947e2b51a1266 | import mysql.connector
from mysql.connector import Error
#hi
def lambda_handler(event, context):
""" Connect to MySQL database """
try:
db = mysql.connector.connect(host=os.environ['HOST'],user='USERNAME',passwd='PASSWORD', port='PORT', database='kart')
if db.is_connected():
print('Connected to MySQL database')
print db
cur = db.cursor()
cur.execute("select * from employees")
myresult = cur.fetchall()
for x in myresult:
print(x)
except Error as e:
print(e)
finally:
db.close()
|
989,538 | 6a879f13aa11abf4698a65668dd8391714635559 | import xlsx_parce
import data_types
from typing import List, Tuple
import sys
import os
# number of symbols that must be equal in pns
root = 8
tail = 4
def compare_pns(components: List[data_types.Component]):
"""
compares all rows and gets similar pns
:param components:
:return:
"""
equal: List[Tuple[str, int, str, int]] = list()
similar: List[Tuple[str, int, str, int]] = list()
alternative: List[Tuple[str, int, str, int]] = list()
warning: str = ""
sorted_components = sorted(components, key=lambda x: x.pn)
# remove components without pns, we can not compare them by pn
sorted_components = [component for component in sorted_components if component.pn]
for (index, component) in enumerate(sorted_components[:-1]):
next_comp = sorted_components[index + 1]
if component.pn.lower() == next_comp.pn.lower():
similar.append((component.filename, component.row, next_comp.filename, next_comp.row))
if component.component_type != next_comp.component_type or component.footprint != next_comp.footprint:
warning += "Components in file %s row %i and file %s row %i have same partnumbers but different " \
"type or footprint" % (component.filename, component.row, next_comp.filename, next_comp.row)
equal.append((component.filename, component.row, next_comp.filename, next_comp.row))
if component.component_type not in data_types.parametrized and len(component.footprint) >= 4:
next_index = index + 1
while next_index < len(sorted_components) and \
sorted_components[next_index].pn.startswith(component.pn[:root + 1]):
if component.footprint == sorted_components[next_index].footprint:
if component.component_type == sorted_components[next_index].component_type:
similar.append((component.filename, component.row, sorted_components[next_index].filename,
sorted_components[next_index].row))
next_index += 1
for alternative_comp in components:
crosses = [alt for alt in alternative_comp.pn_alt if component.pn in alt]
if crosses and component.row != alternative_comp.row:
if component.component_type == alternative_comp.component_type:
if (component.row, next_comp.row) not in equal:
alternative.append((component.filename, component.row,
alternative_comp.filename, alternative_comp.row))
if equal:
print("These rows have equal pns")
print(equal)
if similar:
print("These rows have similar pns")
print(similar)
if alternative:
print('These rows have similar alternative pn')
print(alternative)
print(warning)
def compare_capacitors(components: List[data_types.Component]):
"""
compare capacitors by value and footprint
:param components:
:return:
"""
similar_caps: List[Tuple[str, int, str, int]] = list()
cap_sorted: List[data_types.Component] = sorted([component for component in components
if component.component_type == data_types.ComponentType.CAPACITOR
and component.details.absolute_pf_value],
key=lambda x: x.details.absolute_pf_value)
if len(cap_sorted) < 2:
return
for (index, cap) in enumerate(cap_sorted):
next_index = index + 1
while next_index < len(cap_sorted) and cap_sorted[next_index].details.absolute_pf_value \
== cap.details.absolute_pf_value and cap_sorted[next_index].footprint == cap.footprint:
similar_caps.append((cap.filename, cap.row,
cap_sorted[next_index].filename, cap_sorted[next_index].row))
next_index += 1
if similar_caps:
print("Similar capacitor rows: ")
print(similar_caps)
def compare_resistors(components: List[data_types.Component]):
"""
compare capacitors by value and footprint
:param components: list of components
:return:
"""
similar_resistors: List[Tuple[str, int, str, int]] = list()
res_sorted: List[data_types.Component] = sorted([component for component in components
if component.component_type == data_types.ComponentType.RESISTOR
and component.details.value],
key=lambda x: x.details.value)
if len(res_sorted) < 2:
return
for (index, res) in enumerate(res_sorted):
next_index = index + 1
while next_index < len(res_sorted) and res_sorted[next_index].details.value == res.details.value \
and res_sorted[next_index].footprint == res.footprint:
similar_resistors.append((res.filename, res.row,
res_sorted[next_index].filename, res_sorted[next_index].row))
next_index += 1
if similar_resistors:
print("Similar resistor rows: ")
print(similar_resistors)
if __name__ == '__main__':
if len(sys.argv) >= 2:
if os.path.exists(sys.argv[1]):
if os.path.isdir(sys.argv[1]):
folder = sys.argv[1]
components_list: List[data_types.Component] = list()
for filename in os.listdir(folder):
if os.path.splitext(filename)[1].lower() == '.xlsx' \
and os.access(os.path.join(folder, filename), os.R_OK):
try:
components_list.extend(xlsx_parce.get_components_from_xlxs(os.path.join(folder, filename)))
except PermissionError:
pass
else:
components_list: List[data_types.Component] = xlsx_parce.get_components_from_xlxs(sys.argv[1])
compare_pns(components_list)
compare_capacitors(components_list)
compare_resistors(components_list)
print("Search complited")
else:
print("Incorrect filename")
else:
print("No file")
|
989,539 | 7e17b46eee68d8a8406fd035d9b7d0820e14f3ab | print('\033[32m Olá! Seja bem vindo a aula de 66 \033[m')
# Crie um programa que leia vários números inteiros pelo teclado. O programa só vai parar quando o usuario digitar o valor 999, que é a condição de parada. No final, mostre quantos números foram digitados e qual foi a soma entre eles.
s = t = 0
while True:
n = int(input('Vem cá, fala aí um número [DIGITE 999 PRA PARAR]: '))
if n == 999:
break
s += n
t += 1
print(f'A soma dos números é {s} e o total de números digitados é {t}')
|
989,540 | d42a5957800148056a623d52e667f46957e4ade7 | from django.core.management.base import BaseCommand, CommandError
import zipfile
import re
import csv
from django.utils.six import PY3
from italian_utils.models import Comune
class Command(BaseCommand):
args = '<file_zip>'
help = "Importa l'elenco dei comuni proveniente dal sito istat"
def handle(self, *args, **options):
try:
zfile = zipfile.ZipFile(args[0])
found = None
for filename in zfile.namelist():
comuni_re = re.compile(r'.+comuni_italiani.+\.csv$')
if comuni_re.match(filename):
found = filename
break
if not found:
raise CommandError('File dei comuni non trovato')
csvstring = zfile.read(found)
if PY3:
import io
csvfile = io.TextIOWrapper(
io.BytesIO(csvstring),
encoding="latin-1"
)
else:
import StringIO
csvfile = StringIO.StringIO(csvstring)
comuni_reader = csv.DictReader(csvfile, delimiter=';')
for row in comuni_reader:
nome = row['Solo denominazione in italiano']
codice_catastale = row['Codice Catastale ']
if nome and codice_catastale:
c = Comune(nome=nome, codice_catastale=codice_catastale)
c.save()
self.stdout.write('Comuni importati correttamente')
except IndexError:
raise CommandError('Nessun file specificato')
except IOError:
raise CommandError('File non trovato o corrotto')
|
989,541 | 6d17806b5d1cea6a57f423bfc4d06d28a666d7ec | #!/usr/bin/env python2.7
import os
from scipy import integrate
from scipy.interpolate import griddata
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
import astropy.coordinates as coo
import astropy.units as u
import numpy as np, healpy as hp
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LinearSegmentedColormap
import argparse
#Define Powerlaw and Band
def powerlaw(E, alpha):
return E**alpha
def band(E, alpha, beta, Ep):
mask = (alpha - beta)*Ep >= E
return np.append((E[mask]/100)**alpha*np.exp(-E[mask]/Ep), ((alpha - beta)*Ep/100)**(alpha - beta)*np.exp(beta - alpha)*(E[~mask]/100)**beta)
#Define function to select spectral type
def spectra(typ, E, alpha, beta, Ep):
if typ == 'powerlaw':
return powerlaw(E, alpha)
else:
return band(E, alpha, beta, Ep)
#Function to calculate Total counts for each Thx and Thy
def countrate(typ, alpha, beta, Ep, qmask,inbase):
En = np.append(np.arange(30, 90, 10.), np.arange(100, 201,20.))
angles = np.loadtxt(inbase + 'eff_area_files/angle_list.txt')
data = {En[k]: np.array([]) for k in range(En.size)}
for i in range(En.size):
data[En[i]] = np.loadtxt( inbase + 'eff_area_files/angle_eff_area_%d.txt' % En[i])
K = np.zeros((angles.shape[0],3))
K[:,:2] = angles
for i in range(angles.shape[0]):
eff_area = np.zeros((En.size, 4))
for j in range(En.size):
eff_area[j,:] = data[En[j]][np.where(np.logical_and(data[En[j]][:,0]==angles[i,0], data[En[j]][:,1]==angles[i,1]))][:,2:]
eff_area[:,qmask] = 0
K[i,2] = integrate.simps(eff_area[:,:].sum(1)*spectra(typ, En, alpha, beta, Ep) ,En)
np.savetxt('countrate_thx_thy.txt', K, '%3.1f\t\t%3.1f\t\t%2.3f', header = ' Thx\t\tThy\t\tN_Total')
return K, data, angles
#Function to calculate fluxlimits for each Thx, Thy
def calc_fluxlimit(K, data, angles, tbin, typ, alpha, beta, Ep, qmask, inbase, far):
En = np.append(np.arange(30, 90, 10.), np.arange(100, 201,20.))
hdu = fits.open( inbase + 'rates/far{far:1.1f}_{tbin:s}_hist_tbin{tbin:s}.fits'.format(tbin=tbin, far=far))
quad = np.array(['A', 'B', 'C', 'D'])
quad = np.delete(quad, qmask)
cutoff_rate = 0
for i in range(quad.size):
cutoff_rate = cutoff_rate + hdu[1].header['RATE_%s' %quad[i]]
norm = cutoff_rate / K[:,2]
F = np.zeros((angles.shape[0],3))
F[:,:2] = angles
for i in range(angles.shape[0]):
F[i,2] = integrate.simps(spectra(typ, En, alpha, beta, Ep)*En,En)*(norm[i]*u.keV.to(u.erg))
np.savetxt('Fluxlimit_{tbin:s}.txt'.format(tbin=tbin), F, '%3.3f\t\t%3.3f\t\t%2.3e', header = ' ThX\t\tThY\t\tFlux')
return F
#Function to get Thx, Thy, earth angle and roll, rot of given ra,dec
def get_txty(mkfdata, trigtime, ra, dec, window=10):
"""
Calculate earth ra-dec and satellite ponting using astropy
Use pitch, roll and yaw information from the MKF file
"""
# x = -yaw
# y = +pitch
# z = +roll
# Read in the MKF file
sel = abs(mkfdata['time'] - trigtime) < window
# Get pitch, roll, yaw
# yaw is minus x
pitch = coo.SkyCoord( np.median(mkfdata['pitch_ra'][sel]) * u.deg, np.median(mkfdata['pitch_dec'][sel]) * u.deg )
roll = coo.SkyCoord( np.median(mkfdata['roll_ra'][sel]) * u.deg, np.median(mkfdata['roll_dec'][sel]) * u.deg )
roll_rot = np.median(mkfdata['roll_rot'][sel])
yaw_ra = (180.0 + np.median(mkfdata['yaw_ra'][sel]) ) % 360
yaw_dec = -np.median(mkfdata['yaw_dec'][sel])
minus_yaw = coo.SkyCoord( yaw_ra * u.deg, yaw_dec * u.deg )
# Earth - the mkffile has satellite xyz
earthx = np.median(mkfdata['posx'][sel]) * u.km
earthy = np.median(mkfdata['posy'][sel]) * u.km
earthz = np.median(mkfdata['posz'][sel]) * u.km
earth = coo.SkyCoord(-earthx, -earthy, -earthz, frame='icrs', representation='cartesian')
# Transient:
transient = coo.SkyCoord(ra * u.deg, dec * u.deg)
# Angles from x, y, z axes are:
ax = minus_yaw.separation(transient)
ay = pitch.separation(transient)
az = roll.separation(transient)
# the components are:
cx = np.cos(ax.radian) # The .radian is not really needed, but anyway...
cy = np.cos(ay.radian)
cz = np.cos(az.radian)
# Thetax = angle from z axis in ZX plane
# lets use arctan2(ycoord, xcoord) for this
thetax = u.rad * np.arctan2(cx, cz)
thetay = u.rad * np.arctan2(cy, cz)
return thetax.to(u.deg).value, thetay.to(u.deg).value, earth, roll, roll_rot
#Visibility mask , this excludes angles covered by SXT, UVVIT and LAXPC
def visible(thetax, thetay):
"""
Return a boolean mask based on what should be considered "visible" for CZTI
"""
mask = np.repeat(True, len(thetax))
# Angles outside +-90 are invalid
mask[ abs(thetax) > 90 ] = False
mask[ abs(thetay) > 90 ] = False
# This is based on a 30 degree cutoff on SXT/UVIT side
# If thetax < -22, thetay must be < -thetax
mask[ (thetax < -22.0) & (thetay >= -thetax) ] = False
# For thetax between -22 and 22, thetay < sqrt(30**2 - thetax**2)
mask[ (abs(thetax) <= 22.0) & (thetay > np.sqrt(30.**2 - thetax**2)) ] = False
# Based on 40 degree cutoff on LAXPC side
# If thetay < -30, thetax must be < -thetay
mask[ (thetay < -30) & (thetax >= -thetay) ] = False
# If thetay between -30 and 30, thetax < sqrt(40.**2 - thetay**2)
# Note - slightly conservative cut
mask[ (abs(thetay) <= 30) & (thetax > np.sqrt(40.**2 - thetay**2)) ] = False
# Line things up between laxpc and xuv:
# if thetax > 22 then thetay < thetax
mask[ (thetax > 22.0) & (thetay >= thetax) ] = False
# if thetay > 30 then thetax<thetay
mask[ (thetay > 30.0) & (thetax >= thetay) ] = False
return mask
#plorring function
def plot_mollview(data, title, czti_theta, czti_phi, roll_rot, plotfile, cmap, cticks=None, log=False, cmax=None):
RA = np.arange(30, 360, 30)
Dec = np.arange(-75, 75.1,15)
plt.figure(0,figsize=(16,9))
#with 180 deg rotation (LIGO Map Compatible)
if log:
data = np.log(data)
if cmax==None:
hp.mollview(data, title = '', min= np.nanmin(data), max= np.nanmax(data) ,rot=(180,0), cmap=cmap)
else:
hp.mollview(data, title = '', min= np.nanmin(data), max= cmax ,rot=(180,0), cmap=cmap)
hp.projscatter(czti_theta, czti_phi,color = 'r' ,marker='x', s=80)
hp.projtext(czti_theta, czti_phi,'CZTI', fontsize=17)
for i in range(RA.size):
hp.projtext(RA[i],0, '%2d$^h$' %(RA[i]/15), lonlat= True, fontsize=15)
hp.projtext(10,Dec[i], '%2d$^\circ$' %Dec[i], lonlat= True, fontsize=15)
hp.graticule(15, 30)
fig = plt.gcf()
fig.suptitle(title, fontsize=10)
ax = fig.axes[0]
cb = ax.images[0].colorbar
clow, chigh = cb.get_clim()
if cticks == None:
ticks = np.linspace(clow, chigh, 5)
if log:
ticknames = ["{:0.1e}".format(np.exp(x)) for x in ticks]
else:
ticknames = ["{:0.1e}".format(x) for x in ticks]
else:
ticks = np.linspace(clow, chigh, len(cticks))
ticknames = cticks
cb.set_ticks(ticks)
cb.set_ticklabels(ticknames)
cb.set_label("Flux Density ( erg cm$^{-2}$ s$^{-1}$ )", fontsize=15)
cb.ax.tick_params(labelsize=15)
plotfile.savefig()
#With CZTI Boresightat centre
if cmax==None:
hp.mollview(data, title= '', min=np.nanmin(data), max= np.nanmax(data), rot=(np.rad2deg(czti_phi), 90 - np.rad2deg(czti_theta), roll_rot), cmap=cmap)
else:
hp.mollview(data, title= '', min=np.nanmin(data), max= cmax, rot=(np.rad2deg(czti_phi), 90 - np.rad2deg(czti_theta), roll_rot), cmap=cmap)
hp.projscatter(czti_theta, czti_phi,color = 'r' ,marker='x')
hp.projtext(czti_theta, czti_phi,'CZTI')
for i in range(RA.size):
hp.projtext(RA[i],0, '%2d$^h$' %(RA[i]/15), lonlat= True, fontsize=10)
hp.projtext(0,Dec[i], '%2d$^\circ$' %Dec[i], lonlat= True, fontsize=10)
hp.graticule(15, 30)
fig = plt.gcf()
fig.suptitle(title + ' (CZTI frame)', fontsize=10)
ax = fig.axes[0]
cb = ax.images[0].colorbar
clow, chigh = cb.get_clim()
if cticks == None:
ticks = np.linspace(clow, chigh, 5)
if log:
ticknames = ["{:0.1e}".format(np.exp(x)) for x in ticks]
else:
ticknames = ["{:0.1e}".format(x) for x in ticks]
else:
ticks = np.linspace(clow, chigh, len(cticks))
ticknames = cticks
cb.set_ticks(ticks)
cb.set_ticklabels(ticknames)
plotfile.savefig()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("mkffile", type = str, help = " Level 1 mkf file")
parser.add_argument("locmap", type = str, help = "LIGO baystar file")
parser.add_argument("trigtime", type = str, help = "Trigtime in YYYY-MM-DDTHH:MM:SS format")
parser.add_argument("--only_map", dest='only_map', action='store_true') # If option not given, args.plotlc becomes False
parser.add_argument("--mask_quad", nargs='+',type = int, help = 'List of bad quadrants if any', default=[] )
parser.add_argument("--spectra", type = str, help = " Spectra type (powerlaw or band), default is Powerlaw with alpha = -1", default = 'powerlaw')
parser.add_argument("--alpha", type = float, help = "Photon powerlaw index for powerlaw function and band function, default = -1 ", default = -1)
parser.add_argument("--beta", type = float, help = " Beta value for Band function", default = -1)
parser.add_argument("--E_peak", type = float, help = "Epeak value for Band function", default = 150)
parser.add_argument("--inbase", type = str, help = "Base directroy for pixarea and angle files, default is current dir", default = './')
parser.add_argument("--far", type = str, help = "False alarm rate, default is 0.1", default = 0.1)
parser.add_argument("--outbase", type = str, help = "Output pdf file prefix, default is none", default = '')
args = parser.parse_args()
#Load mkf file and convert Trig time
print "Loading mkf data and calculating trigger time in AstroSat seconds...\n"
mkfdata = fits.getdata(args.mkffile, 1)
mission_time = Time(args.trigtime) - Time('2010-01-01 00:00:00')
trigtime = mission_time.sec
#Load sky localisation map
print "Loading localisaton map...\n"
probmap = hp.read_map(args.inbase + args.locmap)
NSIDE = fits.open(args.inbase + args.locmap)[1].header['NSIDE']
print "Calculating Thx, Thy values and visibility masks...\n"
#Calculate ThetaX ThetaY values.
theta, phi = hp.pix2ang(NSIDE, np.arange(0,hp.nside2npix(NSIDE),1))
ra = phi
dec = np.pi/2 - theta
thx, thy, earth, czti_z, roll_rot= get_txty(mkfdata, trigtime, np.rad2deg(ra), np.rad2deg(dec), 10)
#Caculate visibility mask
vismask = visible(thx, thy)
#CZTI pointing
czti_ra = czti_z.fk5.ra.rad
czti_dec = czti_z.fk5.dec.rad
czti_theta = np.pi/2 - czti_dec
czti_phi = czti_ra
#Earth and Focal Plane view
ref_map = np.zeros(hp.nside2npix(NSIDE))
#Earth and focal plane mask
mask = np.repeat(False, probmap.size)
earth_ra = earth.fk5.ra.rad
earth_dec = earth.fk5.dec.rad
earth_dist = earth.fk5.distance.km
earth_theta = np.pi/2 - earth_dec
earth_phi = earth_ra
earth_occult = np.arcsin(6378./earth_dist)
earth_vec = hp.ang2vec(earth_theta, earth_phi)
earthmask = hp.query_disc(NSIDE, earth_vec, earth_occult)
ref_map[earthmask] = 1
front_vec = hp.ang2vec(czti_theta , czti_phi)
front = hp.query_disc(NSIDE, front_vec, np.pi/2)
mask[front] = True
ref_map[~mask] = ref_map[~mask] + 2
mask[earthmask] = False
plotfile = PdfPages(args.outbase + 'Fluxlimits.pdf')
print "Plotting visibility plots...\n"
#Colormap
colors = [(1, 1, 0), (.5, 0, 0), (.1, 0, 0)]
cmap = LinearSegmentedColormap.from_list('VisMap', colors, N=4)
cmap.set_under("W")
plot_mollview(ref_map, 'CZTI Visibility', czti_theta, czti_phi, roll_rot, plotfile, cmap, ['Visible', 'Earth', 'Behind', 'Behind + Earth'])
#Colormap
cmap = plt.cm.YlOrRd
cmap.set_under("w")
plot_mollview(probmap, 'GW Localisation Map', czti_theta, czti_phi, roll_rot, plotfile, cmap)
skymap = np.copy(probmap)
skymap[~vismask] = np.nan
skymap[~mask] = np.nan
vis_prob = np.nansum(skymap)
print "\n Total Visible Probability = %6.2e" % (vis_prob*100) , "\n"
plot_mollview(skymap, "", czti_theta, czti_phi, roll_rot, plotfile, cmap, cmax = probmap.max())
if args.only_map:
plotfile.close()
quit()
# Calculate counterate from energy files.
if not os.path.isdir("./eff_area_files"):
os.symlink("/home/cztipoc/czti/trunk/users/sujay/eff_area_files", "./eff_area_files")
print "Calculating countrate and fluxlimits...\n"
K, data, angles = countrate(args.spectra, args.alpha, args.beta, args.E_peak, args.mask_quad,args.inbase)
for tbin in ['0.1', '1', '10']:
F = calc_fluxlimit(K, data, angles, tbin, args.spectra, args.alpha, args.beta, args.E_peak, args.mask_quad,args.inbase, args.far)
grid_tx, grid_ty = np.mgrid[-90:90.1:1, -90.:90.1:1]
points = F[:,:2]
grid_area = griddata(points, F[:,2], (grid_tx, grid_ty), method='nearest')
pixflux = np.zeros((thx.size))
pixflux[vismask] = grid_area[np.int16(np.round(thx[vismask] - 91)), np.int16(np.round(thy[vismask] - 91))]
pixflux[~mask] = np.nan
pixflux[pixflux==0] = np.nan
#Calclate weighted flux
fluxlim = np.nansum(pixflux * skymap)/ np.nansum(skymap)
#Colormap
cm = plt.cm.jet
cm.set_under("w")
#Plot
title= "{tbin:s} s binning, effective fluence limit = {fluence:0.2e} $ergs/cm^2$, flux limit {flux:0.2e} $ergs/cm^2/sec$".format(tbin=tbin, fluence=fluxlim, flux=fluxlim/float(tbin))
plot_mollview(pixflux, "", czti_theta, czti_phi, roll_rot, plotfile, cm, log=True)
print "\n At {tbin:s} s binning, Effective limit = {fluence:0.2e} ergs/cm^2 = {flux:0.3e} ergs/cm^2/sec \n".format(tbin=tbin, fluence=fluxlim, flux=fluxlim/float(tbin))
plotfile.close()
|
989,542 | 76c2ba80354736e9be915bb18125d61d93fdc99a | """
Author: Lizhou Cai
Course: CSCI 4270
File: hw7_part2.py
Purpose: Do optical flow on pairs of images
to detect motion of the camera and objects
"""
import cv2
import numpy as np
import numpy.linalg as LA
import os
import sys
def is_inlier(p1, p2, foe, tau):
"""
check if a line through two points is an inlier to foe with tolerance of tau
:param p1: the first point of the line
:param p2: the second point of the line
:param foe: the focus of expansion
:param tau: the tolerance
:return: if the line is a inlier
"""
p1 = np.concatenate((p1, [1]))
p2 = np.concatenate((p2, [1]))
a, b, c = np.cross(p1, p2)
return np.abs(np.dot(foe, (a, b)) + c) / LA.norm([a, b]) < tau
def get_inliers(kp1, kp2, foe, tau):
"""
return the inliers and outliers indices of motion vectors
:param kp1: set of points before motion
:param kp2: set of points after motion
:param foe: the focus of expansion
:param tau: the tolerance
:return: inliers and outliers indices
"""
inliers = []
outliers = []
for i in range(len(kp1)):
if is_inlier(kp1[i], kp2[i], foe, tau):
inliers.append(i)
else:
outliers.append(i)
return np.array(inliers), np.array(outliers)
def get_intersect(pair1, pair2):
"""
Returns the point of intersection
of two lines passing through two pairs of points
:param pair1: first pair of points
:param pair2: second pair of points
:return: the intersection
"""
# calculate the homogeneous coords
tmp = np.vstack((pair1, pair2))
h = np.hstack((tmp, np.ones((4, 1))))
# line through each pair of points
l1 = np.cross(h[0], h[1])
l2 = np.cross(h[2], h[3])
# get the intersect
x, y, z = np.cross(l1, l2)
x /= z
y /= z
return x, y
def RANSAC(kp1, kp2, iterations):
"""
Do RANSAC to find the best foe
:param kp1: keypoints before the motion
:param kp2: keypoints after the motion
:param iterations: the number of iterations for RANSAC
:return: best feo and corresponding inliers and outliers
"""
k_max = 0
m_foe = (0, 0)
m_inliers = []
m_outliers = []
for k in range(iterations):
# random select 2 different points as sample
sample = np.random.randint(0, len(kp1), 2)
if sample[0] == sample[1]:
continue
# calculate the line through the 2 points
p1 = kp1[sample[0]], kp2[sample[0]]
p2 = kp1[sample[1]], kp2[sample[1]]
# the intersection
foe = get_intersect(p1, p2)
if foe == (np.inf, np.inf):
continue
# calculate the inliers and outliers
inliers, outliers = get_inliers(kp1, kp2, foe, 5)
# update the best feo
if len(inliers) > k_max:
k_max = len(inliers)
m_foe = foe
m_inliers = inliers
m_outliers = outliers
return k_max, m_foe, m_inliers, m_outliers
def estimate_foe(start, end):
"""
Do least-squares estimate to estimate the foe
:param start: starting points
:param end:ending points
:return: the estimated foe
"""
A = np.zeros(start.shape)
diff = end - start
A[:, 0] = diff[:, 1]
A[:, 1] = -diff[:, 0]
b = start[:, 0] * diff[:, 1] - start[:, 1] * diff[:, 0]
foe = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, b))
foe = (int(foe[0]),int(foe[1]))
return foe
def draw_circles(img, points, color):
"""
draw circles on a image
:param img: the original image
:param points: the centers of circles to be drawn
:param color: the color of the circles
:return: the image with circles
"""
for p in points:
img = cv2.circle(img, (p[0], p[1]), 5, color, thickness=2)
return img
def draw_arrows(img, p1, p2, color):
"""
draw arrow line on a image
:param img: the original image
:param p1: the starting points of the arrows
:param p2: the ending points of the arrows
:param color: the color of arrows
:return: the image with arrows
"""
for i in range(p1.shape[0]):
x = tuple(p1[i].ravel())
y = tuple(p2[i].ravel())
img = cv2.arrowedLine(img, x, y, color, thickness=3)
return img
def draw_clusters(img, p1, p2, k, label, thres, padding):
"""
draw clusters points and motion vectors with bounding boxes
:param img: the original image
:param p1: the set of starting points
:param p2: the set of ending points
:param k: number of clusters
:param label: label for each data point
:param thres: threshold to get rid of small clusters
:param padding: the padding for the bounding box
:return: the result image
"""
for i in range(k):
color = np.random.uniform(low=0, high=255, size=3)
index = np.where(label == i)[0]
if len(index) <= thres:
continue
# plot for one cluster
start = p1[index]
end = p2[index]
img = draw_circles(img, start, color)
img = draw_circles(img, end, color)
img = draw_arrows(img, start, end, color)
min_x, min_y = np.amin(end, axis=0).astype(int) - padding
max_x, max_y = np.amax(end, axis=0).astype(int) + padding
img = cv2.rectangle(img, (min_x, min_y), (max_x, max_y), color, 2)
return img
if __name__ == "__main__":
# Handle the command-line arguments
if len(sys.argv) != 2:
print("Usage: %s in_path\n" % sys.argv[0])
sys.exit()
in_path = sys.argv[1]
if not os.path.exists('result/'):
os.mkdir('result/')
img_list = os.listdir(in_path)
img_list = [name for name in img_list
if 'jpg' in name.lower()
or 'png' in name.lower()]
img_list.sort()
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=200,
qualityLevel=0.5,
minDistance=10,
blockSize=3)
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(10, 10),
maxLevel=5,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# read in 2 images at a time
for i in range(0, len(img_list), 2):
j = i + 1
print('\nProcessing', img_list[i], img_list[j])
im1_path = os.path.join(in_path, img_list[i])
im2_path = os.path.join(in_path, img_list[j])
im1_name, im1_type = img_list[i].split('.')
im2_name, im2_type = img_list[j].split('.')
name = im1_name.split('_')[0]
im1 = cv2.imread(im1_path)
im2 = cv2.imread(im2_path)
im1_gry = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2_gry = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
kp1 = cv2.goodFeaturesToTrack(im1_gry, mask=None, **feature_params)
# calculate optical flow
kp2, st, err = cv2.calcOpticalFlowPyrLK(im1_gry, im2_gry, kp1, None, **lk_params)
# Select good points
kp1 = kp1[st == 1]
kp2 = kp2[st == 1]
# RANSAC
k_max, foe, inliers, outliers = RANSAC(kp1, kp2, 50)
foe = (int(foe[0]), int(foe[1]))
print('Number of keypoints %d' % len(kp1))
print('foe from RANSAC', foe)
print('Number of inliers %d' % k_max)
rate = k_max/len(kp1)
print("percent of inliers {:.1%}".format(rate))
inliers1, inliers2 = kp1[inliers], kp2[inliers]
outliers1, outliers2 = kp1[outliers], kp2[outliers]
im_out = im2.copy()
if rate < 0.15:
print("Not enough inliers, camera is not moving")
else:
foe_est = estimate_foe(inliers1, inliers2)
print("least-squares estimate of feo:", foe_est)
im_out = cv2.circle(im_out, (int(foe[0]), int(foe[1])), 15, (0, 0, 255), thickness=-1)
# draw the motion vectors and keypoints
im_out = draw_circles(im_out, kp1, (0, 255, 255))
im_out = draw_circles(im_out, kp2, (0, 255, 0))
im_out = draw_arrows(im_out, inliers1, inliers2, (0, 0, 255))
im_out = draw_arrows(im_out, outliers1, outliers2, (255, 0, 0))
out_name = 'result/' + name + '_output1.jpg'
cv2.imwrite(out_name, im_out)
# do kmeans clustering on the outlires left
all_values = outliers2
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 1.0)
num_reinitializations = 30
k = min(5, len(outliers2))
initialization_method = cv2.KMEANS_PP_CENTERS
ret, label, center = cv2.kmeans(all_values, k, None, criteria,
num_reinitializations, initialization_method)
# draw the clusters
im_out2 = im2.copy()
out_name = 'result/' + name + '_output2.jpg'
center = np.uint8(center)
# get rid of small clusters
thres = 0.2 * len(outliers2)
im_out2 = draw_clusters(im_out2, outliers1, outliers2, k, label, thres, 20)
cv2.imwrite(out_name, im_out2)
|
989,543 | d2d5883006de26ccd538f5d28b33543abc80f534 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from .country import Country
from .measure import Measure
|
989,544 | 43a3931b7ff9df06acfdd7344a51ac9a2959cf2f | #http://www.careercup.com/page?pid=facebook-interview-questions
def longestRepeated(string):
seen = set()
substrings = []
answer = ''
for i in range(len(string)):
substring = string[i]
seen = set(string[i])
for j in range(i+1, len(string)):
if string[j] in seen:
if len(substring) > len(answer):
answer = substring
break
else:
seen.update(string[j])
substring +=string[j]
return answer
def main():
print(longestRepeated("abbabcdaa"))
if __name__ == '__main__':
main() |
989,545 | 5fcdf61d89f6a52ad0bcee48c9e12fbe3fa43c3d | from PIL import Image
from numpy import asarray, argmax, array
LABELS = ('circle', 'line', 'arch')
def get_one_hot(label):
result = [0] * len(LABELS)
result[LABELS.index(label)] = 1
return result
def get_label(one_hot):
return LABELS[argmax(one_hot)[0]]
def __to2d__(img):
result = img.convert('L')
result = asarray(result)
x = []
for row in result:
we = []
x.append(we)
for value in row:
we.append(1 - value / 255)
return array(x).reshape(28, 28, 1)
class Figure:
def __init__(self, image, label) -> None:
super().__init__()
self.image = image.resize((28, 28), Image.ANTIALIAS)
self.label = label
self.encoding = __to2d__(image)
self.one_hot = get_one_hot(label)
|
989,546 | 209c85bc0979f10270bd7e53867baf9cea52aa00 | """
= EN =
A famous casino is suddenly faced with a sharp decline of their revenues.
They decide to offer Texas hold'em also online. Can you help them by writing
an algorithm that can rank poker hands?
Task
Create a poker hand that has a method to compare itself to another poker hand:
compare_with(self, other_hand)
A poker hand has a constructor that accepts a string containing 5 cards:
PokerHand("KS 2H 5C JD TD")
The characteristics of the string of cards are:
Each card consists of two characters, where
The first character is the value of the card:
2, 3, 4, 5, 6, 7, 8, 9, T(en), J(ack), Q(ueen), K(ing), A(ce)
The second character represents the suit:
S(pades), H(earts), D(iamonds), C(lubs)
A space is used as card separator between cards
The result of your poker hand compare can be one of these 3 options:
[ "Win", "Tie", "Loss" ]
Notes
- Apply the Texas Hold'em rules for ranking the cards.
- Low aces are NOT valid in this kata.
- There is no ranking for the suits.
= RU =
Известное казино неожиданно столкнулось с резким падением их доходов.
Они решают предложить Техасский Холдем также онлайн. Можете ли вы помочь им,
написав алгоритм, который может ранжировать покерные руки ?
задача
Создайте покерную комбинацию, в которой есть метод для сравнения себя с
другой покерной комбинацией:
сравнить_с собой (сам, другой_ручный)
В покерной руке есть конструктор, который принимает строку из 5 карт:
PokerHand ("KS 2H 5C JD TD")
Характеристики последовательности карт:
Каждая карта состоит из двух символов, где
Первый символ - это значение карты:
2, 3, 4, 5, 6, 7, 8, 9, T (en), J (ack), Q (ueen), K (ing), A (ce)
Второй символ представляет костюм:
S (pades), H (earts), D (iamonds), C (lubs)
Пробел используется как разделитель карт между картами
Результат вашей покерной руки может быть одним из следующих 3 вариантов:
["Win", "Tie", "Loss"]
Заметка:
Примените правила Техасского Холдема для ранжирования карт.
Низкие тузы НЕ действительны в этом слове.
Там нет рейтинга для костюмов.
"""
class PokerHand(object):
# Ценность каждой карты.
card_value = {'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14
}
# Ценность комбинаций карт.
COUPLE = 15
TWO_COUPLE = 16
SET = 17
STREET = 18
FLASH = 19
FULL_HOUSE = 20
KARE = 21
STREET_FLASH = 22
ROYAL_FLASH = 23
# Возможные варианты результата игры.
LOSS, TIE, WIN = 'Loss', 'Tie', 'Win'
def __init__(self, hand):
""" hand_value[] - ценность каждой из карт игрока.
hand_color[] - цвет каждой из карт игрока.
hand_couple - словарь содержащий только дубликаты карт, имеет вид:
{ценность карты: её кол-во у игрока}
"""
self.hand = hand.strip().split()
self.hand_value = sorted([self.card_value[card[0]]
for card in self.hand])
self.hand_color = [card[1] for card in self.hand]
self.hand_couple = {card: self.hand_value.count(card)
for card in self.hand_value
if self.hand_value.count(card) > 1}
def get_high_card(self):
""" Получет ценность самой высокой карты из имеющихся."""
return self.hand_value[4]
def is_color_all(self):
""" Проверяет одинаковая ли масть у всех карт."""
if len(set(self.hand_color)) == 1:
return True
else:
return False
def find_couple(self, quantity_card):
""" Проверяет есть ли указанное кол-во одинаковых карт в словаре."""
if self.hand_couple:
for value in self.hand_couple.values():
if quantity_card == value:
return True
return False
def is_couple(self):
""" Проверяет есть ли в картах комбинация ПАРА."""
return self.find_couple(2)
def is_two_couple(self):
""" Проверяет есть ли в картах комбинация ДВЕ ПАРЫ."""
if self.hand_couple:
count = 0
for value in self.hand_couple.values():
if value == 2:
count += 1
if count == 2:
return True
return False
def is_set(self):
""" Проверяет есть ли в картах комбинация СЕТ."""
return self.find_couple(3)
def is_street(self):
""" Проверяет есть ли в картах комбинация СТРИТ."""
hand_value_str = ''.join(map(str, self.hand_value))
street_value = ''.join([str(i) for i in range(2, 15)])
if hand_value_str in street_value:
return True
else:
return False
def is_full_house(self):
""" Проверяет есть ли в картах комбинация ФУЛЛ ХАУС."""
if self.is_couple() and self.is_set():
return True
return False
def is_flash(self):
""" Проверяет есть ли в картах комбинация ФЛЭШ."""
return self.is_color_all()
def is_kare(self):
""" Проверяет есть ли в картах комбинация КАРЕ."""
return self.find_couple(4)
def is_street_flash(self):
""" Проверяет есть ли в картах комбинация СТРИТ_ФЛЭШ."""
if self.is_color_all() and self.is_street():
return True
return False
def is_royal_flash(self):
""" Проверяет есть ли в картах комбинация РОЯЛ_ФЛЭШ."""
if self.is_street_flash() and 14 in self.hand_value:
return True
return False
def equal_card_win(self, other):
""" Детальное сравнивает ценности карт игрока и оппонента
Если кол-во очков одинаково, последовательно сравнивает ценность
каждой карты из отсортированного (от большего к меньшему) набора.
"""
for card_index, card_value in enumerate(self.hand_value[::-1]):
if card_value > other.hand_value[::-1][card_index]:
return self.WIN
elif card_value < other.hand_value[::-1][card_index]:
return self.LOSS
return self.TIE
def check_hand(self):
""" Проверяет совпала ли комбинация, если нет то берем старшую карту"""
if self.is_royal_flash():
return self.ROYAL_FLASH
elif self.is_street_flash():
return self.STREET_FLASH
elif self.is_kare():
return self.KARE
elif self.is_full_house():
return self.FULL_HOUSE
elif self.is_flash():
return self.FLASH
elif self.is_street():
return self.STREET
elif self.is_set():
return self.SET
elif self.is_two_couple():
return self.TWO_COUPLE
elif self.is_couple():
return self.COUPLE
else:
return self.get_high_card()
def compare_with(self, other):
""" Сравнивает ценность карт игрока и ценность карт опонента,
если ценность равна, запускает более детальное сравнение.
"""
player_hand = self.check_hand()
enemy_hand = other.check_hand()
if player_hand > enemy_hand:
return self.WIN
elif player_hand < enemy_hand:
return self.LOSS
elif player_hand == enemy_hand:
return self.equal_card_win(other)
|
989,547 | d9d925abe0c18e60a7b244e50a60cba52504c77b | import time
from random import randrange
from threading import *
class Client(Thread):
def __init__(self):
Thread.__init__(self)
self.running = True
def run(self):
while self.running:
with open("sf.txt", "w+") as f:
for line in f.read():
if '-1' in line:
contents = f.read()
contents.replace("-1", str(randrange(10)))
f.write(contents)
print(contents)
if '7' in line:
exit(1)
f.close()
time.sleep(5)
def stop(self):
self.running = False
b = Client()
b.start()
|
989,548 | 0163b59ed074f3f8e7b416f003fd3642f53bc66e | # coding=utf-8
'''Escribe un programa que escriba en pantalla los 30 primeros números naturales (del 1 al 30),
así como su media aritmética.'''
for numero in range(1,31):
print (numero) |
989,549 | c807c491ab0a931ec925ac090343d0ef7f9933cc | import os
import shutil
import sys
import tempfile
if sys.version_info.major == 3 and sys.version_info.minor >= 3:
impl = 'py_33'
else:
impl = 'py_old'
__path__.append(os.path.join(os.path.dirname(__file__), impl))
from py_curl.downloader import Downloader
from py_curl.machinery import PostRemoteFinder
from py_curl.logger import logger
sys.path_hooks += [PostRemoteFinder]
LOOPS = set(['sync'])
try:
import gevent # NOQA
LOOPS.append('gevent')
except ImportError:
logger.debug('gevent loop is not availible')
try:
import tulip # NOQA
LOOPS.append('tulip')
except ImportError:
logger.debug('tulip loop is not availible')
class TempDirsGC(object):
def __init__(self):
self._temp_dirs = []
def new_temp_dir(self):
temp_dir = tempfile.mkdtemp(prefix='py_curl')
self._temp_dirs.append(temp_dir)
return temp_dir
def __del__(self):
for d in self._temp_dirs:
shutil.rmtree(d, ignore_errors=True)
_TDGC = TempDirsGC()
def curl(*args, **kwargs):
p_locals = sys._getframe(1).f_locals
kwargs['temp_dir'] = _TDGC.new_temp_dir()
downloader = Downloader(*args, **kwargs)
for module_name, module_details in downloader.modules.items():
sys.path += [os.path.dirname(module_details['path'])]
module = __import__(module_name)
p_locals[module_details.get('alias') or module.__name__] = module
|
989,550 | 77f484aa811d5c5f54faa8e80524fba903b856d6 | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from gwo import gwo
from performance import printPerformance
#Load data
X_train = np.load("./data/X_train.npy")
y_train = np.load("./data/y_train.npy")
X_val = np.load("./data/X_val.npy")
y_val = np.load("./data/y_val.npy")
X_test= np.load("./data/X_test.npy")
y_test = np.load("./data/y_test.npy")
#Objective function
def objective_ensemble(trial):
clf1 = GradientBoostingClassifier(random_state=0,
learning_rate=0.05,
max_depth=3,
min_samples_leaf=2,
n_estimators=290,
subsample=0.66)
clf2 = RandomForestClassifier(n_jobs=-1,
max_depth=4,
max_features=0.75,
min_samples_leaf=2,
n_estimators=108)
clf3 = ExtraTreesClassifier(n_jobs=-1,
max_depth=4,
max_features=0.85,
min_samples_leaf=1,
n_estimators=294)
w1 = trial.suggest_float('weight1', 0.1, 0.9)
w2 = trial.suggest_float('weight2', 0.1, 0.9)
w3 = trial.suggest_float('weight3', 0.1, 0.9)
norm_w1, norm_w2, norm_w3 = w1/(w1+w2+w3), w2/(w1+w2+w3), w3/(w1+w2+w3)
prob1 = clf1.fit(X_train, y_train).predict_proba(X_val)[::,1]
prob2 = clf2.fit(X_train, y_train).predict_proba(X_val)[::,1]
prob3 = clf3.fit(X_train, y_train).predict_proba(X_val)[::,1]
ensemble_prob = prob1*norm_w1 + prob2*norm_w2 + prob3*norm_w3
aucroc = roc_auc_score(y_val, ensemble_prob)
return aucroc
best_position = gwo(objective_ensemble, max_iter=1000, n=50, dim=1, minx=0.1, max=0.9)
#Results
study_ensemble = {'weight1': 0.8714467832442298,
'weight2': 0.10055484383424314,
'weight3': 0.4926102019970062}
tuned_norm_w1 = study_ensemble['weight1']
tuned_norm_w2 = study_ensemble['weight2']
tuned_norm_w3 = study_ensemble['weight3']
clf1 = GradientBoostingClassifier(random_state=0,
learning_rate=0.05,
max_depth=3,
min_samples_leaf=2,
n_estimators=290,
subsample=0.66)
clf2 = RandomForestClassifier(n_jobs=-1,
max_depth=4,
max_features=0.75,
min_samples_leaf=2,
n_estimators=108)
clf3 = ExtraTreesClassifier(n_jobs=-1,
max_depth=4,
max_features=0.85,
min_samples_leaf=1,
n_estimators=294)
prob1 = clf1.fit(X_train, y_train).predict_proba(X_test)[::,1]
prob2 = clf2.fit(X_train, y_train).predict_proba(X_test)[::,1]
prob3 = clf3.fit(X_train, y_train).predict_proba(X_test)[::,1]
prob_ensemble = prob1*tuned_norm_w1 + prob2*tuned_norm_w2 + prob3*tuned_norm_w3
ensemble_aucroc = roc_auc_score(y_test, prob_ensemble)
clf1_aucroc = roc_auc_score(y_test, prob1)
clf2_aucroc = roc_auc_score(y_test, prob2)
clf3_aucroc = roc_auc_score(y_test, prob3)
df_en = printPerformance(y_test, prob_ensemble, auc_only=False)
df1 = printPerformance(y_test, prob1, auc_only=False)
df2 = printPerformance(y_test, prob2, auc_only=False)
df3 = printPerformance(y_test, prob3, auc_only=False)
df = pd.DataFrame([df_en, df1, df2, df3])
df.columns = ['AUCROC', 'AUCPR', 'ACC', 'BA', 'SN/RE', 'SP', 'PR', 'MCC', 'F1', 'CK']
df.to_csv("./outcomes/performance_full.csv", index=None)
feature_importances_gb = clf1.feature_importances_
feature_importances_rf = clf2.feature_importances_
feature_importances_ert = clf3.feature_importances_
attributes = list(X_train.columns)
fea_im_pair_gb = sorted(zip(feature_importances_gb, attributes), reverse=True)
fea_im_gb_fullset_values = [fea_im_pair_gb[i][0] for i in range(len(fea_im_pair_gb))]
fea_im_gb_fullset_columns = [fea_im_pair_gb[i][1] for i in range(len(fea_im_pair_gb))]
fea_im_pair_rf = sorted(zip(feature_importances_rf, attributes), reverse=True)
fea_im_rf_fullset_values = [fea_im_pair_rf[i][0] for i in range(len(fea_im_pair_rf))]
fea_im_rf_fullset_columns = [fea_im_pair_rf[i][1] for i in range(len(fea_im_pair_rf))]
fea_im_pair_ert = sorted(zip(feature_importances_ert, attributes), reverse=True)
fea_im_ert_fullset_values = [fea_im_pair_ert[i][0] for i in range(len(fea_im_pair_ert))]
fea_im_ert_fullset_columns = [fea_im_pair_ert[i][1] for i in range(len(fea_im_pair_ert))]
fig, axs = plt.subplots(3, figsize=(15,10), tight_layout=True)
fea_im_fullset_columns = [fea_im_gb_fullset_columns, fea_im_rf_fullset_columns, fea_im_ert_fullset_columns]
fea_im_fullset_values = [fea_im_gb_fullset_values, fea_im_rf_fullset_values, fea_im_ert_fullset_values]
colors=['C0','C1','C2']
# creating the bar plot
for i in range(3):
axs[i].bar(fea_im_fullset_columns[i], fea_im_fullset_values[i], width = 0.8, color=colors[i])
for ax in axs.flat:
ax.tick_params(axis='x', labelrotation=45, labelsize=12)
ax.tick_params(axis='y', labelsize=12)
ax.set(xlabel='Features (Variables)', ylabel='Feature Importance', ylim =(0, 0.5))
ax.xaxis.label.set_fontsize(15)
ax.yaxis.label.set_fontsize(15)
fig.savefig("./outcomes/fea_im_fullset.pdf")
sorted(zip(feature_importances_gb, attributes), reverse=True)[:10]
sorted(zip(feature_importances_rf, attributes), reverse=True)[:10]
sorted(zip(feature_importances_ert, attributes), reverse=True)[:10] |
989,551 | 5c9617ad749a2bf15229fbcf72c5c81c2e8188fb | # -*- coding: utf-8 -*-
"""
Created on Sun May 16 09:31:53 2021
@author: Muhammad Ayman Ezzat
Youmna Magdy Abdullah
"""
from utils import get_molecular_weight, linear_spectrum, subset_spectrum, reversed_map
def theoritical_spectrum(peptide_sequence):
"""Returns the theoritical spectrum of a given amino acid sequence.
INPUT :
peptide_sequence: string. The peptide sequence to get its theoritical spectrum
OUTPUT:
.: List. The theoritical spectrum of the given peptide sequence.
"""
linear_kmers = []
cyclic_kmers = []
for i in range(len(peptide_sequence)):
for j in range(i,len(peptide_sequence)):
linear_kmers.append(peptide_sequence[i:j+1])
for i in range(2,len(peptide_sequence)):
for j in range(i-1):
cyclic_kmers.append(peptide_sequence[i:len(peptide_sequence)]+peptide_sequence[0:j+1])
kmers = linear_kmers+cyclic_kmers
return sorted(list(map(get_molecular_weight,kmers)))
def find_oneMers(spectrum):
"""Returns the one-mers that consist the sequence of the given spectrum.
INPUT :
spectrum: array-like. The spectrum required to get its one-mers.
OUTPUT:
.: List. A list of one-mers the consist the given spectrum.
"""
candidates = list('_'*len(spectrum))
for i in range(len(spectrum)):
if spectrum[i] in reversed_map:
candidates[i] = reversed_map[spectrum[i]]
return [cantdidate for cantdidate in candidates if cantdidate != '_' ]
def isConsistant(spectrum,kmer):
"""Checks whether a given kmer is consistent with a given spectrum or not.
INPUT :
spectrum: array-like. The spectrum required to check the given kmer against.
kmer: string. The given kmer required to check its consistency.
OUTPUT:
.: bool. The consistency of the kmer against the spectrum.
"""
return subset_spectrum(spectrum,linear_spectrum(kmer))
def extend(oneMers,kmer):
"""Extends a given kmer.
INPUT :
oneMers: array-like. A list of amino acids that will be used to extend the given kmer.
kmer: string. The given kmer required to extend.
OUTPUT:
.: List. A list of extended kmers.
"""
extentions = []
for oneMer in oneMers:
if(oneMers.count(oneMer) > list(kmer).count(oneMer)):
extentions.append(kmer+oneMer)
return extentions
|
989,552 | 78df0fbf913b44aaca888020475dd08cb2ad4c85 | import os
COMPUTER_NAME = os.environ['COMPUTERNAME']
print("Computer: ", COMPUTER_NAME)
TARGET_VOXEL_MM = 1.00
MEAN_PIXEL_VALUE_NODULE = 41
LUNA_SUBSET_START_INDEX = 0
SEGMENTER_IMG_SIZE = 320
NDSB3_RAW_SRC_DIR = "C:/data/kaggle/dsb17/original/dsb17/stage1/"
LUNA16_RAW_SRC_DIR = "E:/data/kaggle/dsb17/original/luna/"
NDSB3_EXTRACTED_IMAGE_DIR = "E:/data/kaggle/dsb17/processed/dsb17_extracted_images/"
LUNA16_EXTRACTED_IMAGE_DIR = "E:/data/kaggle/dsb17/processed/luna16_extracted_images/"
NDSB3_NODULE_DETECTION_DIR = "E:/data/kaggle/dsb17/processed/dsb3_nodule_predictions/"
|
989,553 | f2bcc18042ea2747dfe8f1a3e8c236c218ece250 | from django.conf import settings
from django.db import models
from django.forms import ModelForm
from django.db.models.signals import post_delete
from django.dispatch import receiver
from back_end.settings import USE_S3, MEDIA_ROOT
import shutil
class Event(models.Model):
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
started_at = models.DateTimeField()
ended_at = models.DateTimeField(null=True, blank=True)
name = models.CharField(max_length=255)
creator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="created_events")
longitude = models.DecimalField(max_digits=11, decimal_places=6, null=True)
latitude = models.DecimalField(max_digits=11, decimal_places=6, null=True)
venue_id = models.CharField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through="Invite", blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = "event"
app_label = "nox"
ordering = ['-started_at']
class EventForm(ModelForm):
class Meta:
model = Event
fields = ['name', 'creator', 'started_at', 'ended_at', 'latitude', 'longitude', 'venue_id']
# Clean up the local event directory.
# This doesn't need to be done in production because S3 uses buckets, not directories.
@receiver(post_delete, sender=Event)
def event_delete_handler(sender, **kwargs):
if not USE_S3:
event = kwargs['instance']
dir_to_delete = "%s/event/%d" % (MEDIA_ROOT, event.id)
shutil.rmtree(dir_to_delete, ignore_errors=True) |
989,554 | b41926c0012001dab7346c718f16f8e62713be3e | import tensorflow as tf
import cv2
import numpy as np
import glob
im_list = glob.glob("/home/aiserver/muke/dataset/landmark-data/image/*")
tfrecord_file_train = "data/train.tfrecord"
tfrecord_file_test = "data/test.tfrecord"
im_size = 128
def write_data(begin, end, tfrecord_file):
writer = tf.python_io.TFRecordWriter(tfrecord_file)
index = [i for i in range(im_list.__len__())]
np.random.shuffle(index)
for i in range(begin, end):
im_d = im_list[index[i]]
im_l = im_list[index[i]].replace("/image/", "/label/").replace("jpg", "txt")
print(i, im_d)
data = cv2.imread(im_d)
sp = data.shape
im_point = []
for p in open(im_l).readlines():
p = p.replace("\n", "").split(",")
im_point.append(int(int(p[0]) * im_size / sp[1]))
im_point.append(int(int(p[1]) * im_size / sp[0]))
data = cv2.resize(data, (im_size, im_size))
#data = tf.gfile.FastGFile(im_d, "rb").read()
ex = tf.train.Example(
features = tf.train.Features(
feature = {
"image":tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[data.tobytes()])),
"label": tf.train.Feature(
int64_list=tf.train.Int64List(
value=im_point)),
}
)
)
writer.write(ex.SerializeToString())
writer.close()
write_data(0, int(im_list.__len__() * 0.9), tfrecord_file_train)
write_data(int(im_list.__len__() * 0.9), im_list.__len__(), tfrecord_file_test) |
989,555 | 00aee5ba24ad7229e19577f8569526cb0adde7a2 | """
Return list of messages given a datetime (empty is now) : [ (title, message), ]
Load, unload and reload messages give their name
"""
import yaml
import glob
import os.path
from datetime import datetime
from collections import OrderedDict
from messageApp.messages import Messages
class MessageApp():
def __init__(self, messagesDir='messages', filePattern="messages_"):
self._filePattern = os.path.join(messagesDir, filePattern)
self._loadedMessages = OrderedDict()
# return list of estcequecestbientot [ (title, message), ] at now
def getMessages(self):
time = datetime.now()
return self.getMessagesAtTime(time)
# given a datetime
# return list of estcequecestbientot [ (title, message), ]
def getMessagesAtTime(self, time):
estcequecestbientot = []
for _, messages in self._loadedMessages.items():
message = messages.getMessage(time)
if message:
estcequecestbientot.append(message)
return estcequecestbientot
# given a (string) messageNames (a , separate list)
# [re]-instanciate some Messages in _loadedMessages[] with it
def loadMessage(self, messageNames):
if type(messageNames) is str:
messageNames = (messageNames, )
for name in messageNames:
messageObject = self._getMessageObject(name)
if messageObject:
self._loadedMessages[name] = Messages(messageObject)
# given a (string) messageNames (a , separate list)
# unload some Messages in _loadedMessages[]
def unloadMessage(self, messageNames):
if type(messageNames) is str:
messageNames = (messageNames, )
for name in messageNames:
if (name in self._loadedMessages):
self._loadedMessages.pop(name)
# reload all _loadedMessages[]
def reload(self):
for name, _ in self._loadedMessages.items():
self.loadMessage(name)
# return (notloadedList, loadedList)
def listMessages(self):
loadedList = [ name for name, _ in self._loadedMessages.items() ]
notloadedList = list(set(self._getNameList()) - set(loadedList))
return notloadedList, loadedList
# given a name
# return a messageObject
# using _filePattern and the file system
def _getMessageObject(self, name):
nameList = self._getNameList()
if name in nameList:
filename = self._filePattern + name + '.yaml'
f = open(filename, 'r')
messageObject = yaml.load(f)
return messageObject
return None
# return an updated list of name of existing [messageObject]
# using _filePattern and the file system
def _getNameList(self):
fileList = glob.glob(self._filePattern + "*.yaml")
nameStartAt = len(self._filePattern)
nameList = [ name[nameStartAt:-5] for name in fileList ]
return nameList
|
989,556 | f766ae0bec3977c893b7f9412d3f1abfafa107f5 | import sys
import cv2 as cv
import numpy as np
import os
#np.seterr(over='ignore')
# проблемы с переполнением (были)
def is_pupil_center(a,b):
global c1, c2, c3, c4
if ((c1 + a > c1 * 2 + 12) or (c1 + a < c1 * 2 - 12)):
if ((c3 + a > c3 * 2 + 10) or (c3 + a < c3 * 2 - 10)):
return False
else:
return True
elif ((c2 + b > c2 * 2 + 12) or (c2 + b < c2 * 2 - 12)):
if ((c4 + b > c4 * 2 + 10) or (c4 + b < c4 * 2 - 10)):
return False
else:
return True
else:
return True
def find_pupil(argv, direct, filename):
## [load]
default_file = str(direct) + '/' + str(filename)
filename = argv[0] if len(argv) > 0 else default_file
# Read image
src = cv.imread(filename, cv.IMREAD_COLOR)
# If image is loaded not fine
if src is None:
print ('Error opening image!')
print ('Usage: hough_circle.py [image_name -- default ' + default_file + '] \n')
return -1
## [load]
## [convert]
lab = cv.cvtColor(src, cv.COLOR_BGR2LAB)
# -----Splitting the LAB image to different channels-------------------------
l, a, b = cv.split(lab)
# -----Applying CLAHE to L-channel-------------------------------------------
clahe = cv.createCLAHE(clipLimit=5.0, tileGridSize=(8, 8))
cl = clahe.apply(b)
# -----Merge the CLAHE enhanced L-channel with the a and b channel-----------
limg = cv.merge((l,a,cl))
#limg = cv.merge((cl, a, b))
# -----Converting image from LAB Color model to RGB model--------------------
src = cv.cvtColor(limg, cv.COLOR_LAB2BGR)
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
## [convert]
## [reduce_noise]
# Reduce the noise to avoid false circle detection
gray = cv.medianBlur(gray, 15)
# cv.imshow('gray', gray)
## [reduce_noise]
#ret, th1 = cv.threshold(gray, 127, 255, cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, \
cv.THRESH_BINARY, 11, 3)
# Otsu's thresholding
th3 = cv.threshold(gray,127,255,cv.THRESH_TRUNC)
# Otsu's thresholding after Gaussian filtering
blur = cv.GaussianBlur(gray, (5, 5), 0)
ret3, th4 = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# cv.imshow('th2', th2)
# cv.imshow('th3', th3)
# cv.imshow('th4', th4)
# cv.waitKey()
####### it works
# cv.imshow('th2', th2)
# gray = th2
## [houghcircles]
rows = gray.shape[0]
circles = cv.HoughCircles(gray, cv.HOUGH_GRADIENT, 1, rows / 8,
param1=50, param2=25, #50,25
minRadius=20, maxRadius=50) #80, 110 (15,50)
## [houghcircles]
## [draw]
global c1, c2, c3, c4, accuracy
if circles is not None:
#print filename
print('\n', default_file, " | ", end='')
circles = np.uint16(np.around(circles))
circles.setflags(write=True)
for i in circles[0, :]:
center = (i[0], i[1])
pupil = is_pupil_center(i[0], i[1])
# print true or false center is detected
print(center, " | ", pupil, " || ", end='')
# circle center
cv.circle(src, center, 1, (0, 255, 0), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (0, 255, 0), 1)
# print (center)
# if pupil center is true put new coordinates of center
if (pupil == True):
c1 = i[0]
c2 = i[1]
c3 = c1
c4 = c2
accuracy += 1
else:
#if not, put new fake coordinates of center
c3 = i[0]
c4 = i[1]
else:
print('\n', default_file, " |e ", end='')
circles = cv.HoughCircles(th2, cv.HOUGH_GRADIENT, 1, 20,
param1=20, param2=24, # 50,25
minRadius=20, maxRadius=60) # 80, 110 (15,50)
circles = np.uint16(np.around(circles))
circles.setflags(write=True)
for i in circles[0, :]:
center = (i[0], i[1])
pupil = is_pupil_center(i[0], i[1])
# print true or false center is detected
print(center, " | ", pupil, " || ", end='')
# circle center
cv.circle(src, center, 1, (0, 255, 0), 3)
# circle outline
radius = i[2]
cv.circle(src, center, radius, (127, 255, 0), 3)
# print (center)
# if pupil center is true put new coordinates of center
if (pupil == True):
c1 = i[0]
c2 = i[1]
c3 = c1
c4 = c2
else:
# if not, put new fake coordinates of center
c3 = i[0]
c4 = i[1]
## [draw]
## [display]
# cv.imshow(default_file, src)
# cv.waitKey(0)
# cv.destroyAllWindows()
## [display]
return
def main():
global c1,c2,c3,c4, accuracy
#первая фотография не определяется отдельной функцией, так что будет false
# real center
c1 = 698
c2 = 420
# imagine center
c3 = c1
c4 = c2
accuracy = 0;
number_of_files = 0
#-----------------ВВЕСТИ ПРЯМОЙ ПУТЬ К КАТAЛОГУ --------------------------
direct = "d:/Python/Eye/img/yellow"
for root, dirs, files in os.walk(direct):
for file in files:
if file.endswith(".png") or file.endswith(".jpeg") or file.endswith(".jpg"):
find_pupil(sys.argv[1:], direct, file)
number_of_files += 1
#print(os.path.join(root, file))
print ("\n\n%d / %d is defined.\n accuracy = %.2f" % (accuracy, number_of_files, (accuracy*100/number_of_files)))
if __name__ == '__main__':
main()
|
989,557 | 9ee101d6961ecb6751c49e5534917ef463d7c3fa | #pdf_merger.py
#from PyPDF2 import PdfFileReader, PdfFileWriter
import os
import glob
import sys
from PyPDF2 import PdfFileWriter, PdfFileReader
from pdf2image import convert_from_path, convert_from_bytes
def pdf_splitter(path):
fname = os.path.splitext(os.path.basename(path))[0]
pdf = PdfFileReader(path)
for page in range(pdf.getNumPages()):
pdf_writer = PdfFileWriter()
pdf_writer.addPage(pdf.getPage(page))
output_filename = '{}_page_{}.pdf'.format(fname, page+1)
with open('./temp/'+output_filename, 'wb') as out:
pdf_writer.write(out)
print('Created: {}'.format(output_filename))
def merger(output_path, input_paths):
pdf_writer = PdfFileWriter()
for path in input_paths:
pdf_reader = PdfFileReader('./temp/'+path)
for page in range(pdf_reader.getNumPages()):
page = pdf_reader.getPage(page)
#page.rotateClockwise(270)
print(page.extractText())
pdf_writer.addPage(page)
with open(output_path, 'wb') as fh:
pdf_writer.write(fh)
def to_jpg(file_name) :
path = r'C:\Program Files\poppler\bin'
images = convert_from_path(file_name, dpi = 300, poppler_path=path)
temp_name = file_name[:len(file_name)-4]
for i in range(len(images)):
images[i].save(f"./temp/{temp_name}_{i}.jpg")
if __name__ == '__main__':
os.makedirs('./temp', exist_ok=True)
# pdf 파일 페이지 단위로 분할하기
#file_name = input("편집 대상 pdf 파일명을 입력하시오(확장자 포함) : ")
file_name = "string.pdf"
'''
pdf_merge_list = []
firs_page = 1
last_page = 3
for i in range(firs_page, last_page+1):
merge_file_name = file_name_pre + "_page_" + str(i) + '.pdf'
pdf_merge_list.append(merge_file_name)
'''
try:
pdf_splitter(file_name)
except FileNotFoundError as file_err:
print("입력한 pdf 파일을 찾을 수 없습니다.")
sys.exit()
# 특정 페이지만 지정해서 합치기 할 경우
#pdf_merge_list_raw = ['10~17', '18~42', '44~61', '62~80', '82~98']
#pdf_merge_list_raw_total = [['2~22'], ['24~40'], ['42~69'], ['70~89'], ['90~107'], ['108~127']]
pdf_merge_list_raw_total = [['1~13']]
#pdf_merge_list_raw = ['82~98']
for cnt in range(len(pdf_merge_list_raw_total)):
pdf_merge_list_raw = pdf_merge_list_raw_total[cnt]
pdf_merge_list_num = [ ]
for i in range(len(pdf_merge_list_raw)):
print('리스트 분석....', pdf_merge_list_raw[i])
if '~' in str(pdf_merge_list_raw[i]):
list_anal = pdf_merge_list_raw[i].split('~')
for j in range(int(list_anal[0]), int(list_anal[1])+1):
print(j)
pdf_merge_list_num.append(j)
else:
print(pdf_merge_list_raw[i])
pdf_merge_list_num.append(pdf_merge_list_raw[i])
pdf_page_num = PdfFileReader(file_name).getNumPages()
if (pdf_merge_list_num[-1] > pdf_page_num):
print(f"{file_name} 페이지 수 : {pdf_page_num}, 입력하신 페이지 번호 초과")
sys.exit()
# 페이지 범위를 지정해서 합치기 할 경우
"""
firs_page = 1
last_page = 3
for i in range(firs_page, last_page+1):
pdf_merge_list_num.append(i)
"""
# 분할된 pdf 원하는 페이지만 합치기
file_name_pre = file_name.split('.')[0]
#pdf_merge_list = glob.glob(f'{file_name_pre}_*.pdf')
#pdf_merge_list.sort()
pdf_merge_list = []
for i in range(len(pdf_merge_list_num)):
merge_file_name = file_name_pre + "_page_" + str(pdf_merge_list_num[i]) + '.pdf'
pdf_merge_list.append(merge_file_name)
merger(f'{file_name_pre}_merge_result{pdf_merge_list_raw_total[cnt]}.pdf', pdf_merge_list)
|
989,558 | 58511c39b5eda458c0c21a2bc556228d8ecbd9f0 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Stackdriver Monitoring API wrapper."""
from gcloud.monitoring.client import Client
from gcloud.monitoring.connection import Connection
from gcloud.monitoring.label import LabelDescriptor
from gcloud.monitoring.label import LabelValueType
from gcloud.monitoring.metric import Metric
from gcloud.monitoring.metric import MetricDescriptor
from gcloud.monitoring.metric import MetricKind
from gcloud.monitoring.metric import ValueType
from gcloud.monitoring.query import Aligner
from gcloud.monitoring.query import Query
from gcloud.monitoring.query import Reducer
from gcloud.monitoring.resource import Resource
from gcloud.monitoring.resource import ResourceDescriptor
from gcloud.monitoring.timeseries import Point
from gcloud.monitoring.timeseries import TimeSeries
__all__ = (
'Client',
'Connection',
'LabelDescriptor', 'LabelValueType',
'Metric', 'MetricDescriptor', 'MetricKind', 'ValueType',
'Aligner', 'Query', 'Reducer',
'Resource', 'ResourceDescriptor',
'Point', 'TimeSeries',
'SCOPE',
)
SCOPE = Connection.SCOPE
|
989,559 | 71b4a904f91cf354113df79f66c1c6333fb44f49 | import db_util
def get_price(RFID):
q = "SELECT price FROM items WHERE rfid = RFID;"
c = db_util.db_open()
price = db_util.db_query(c, q)
return price
def get_name(RFID):
q = "SELECT name FROM items WHERE rfid = RFID;"
c = db_util.db_open()
name = db_util.db_query(c, q)
return name
def get_shelved(RFID):
pass
def get_shelved(name):
pass
def get_all_items():
q = "SELECT * FROM SE_DB.items;"
c = db_util.db_open()
items = db_util.db_query(c, q)
db_util.db_close(c)
return items
print('The price of this item is $' + get_price('cf?ed')) |
989,560 | c1b7b7db487935cbe2c26cdec203b922f16f9d93 | from heapq import heappush, heappop
def main():
N, M = map(int, input().split())
# 日数別に仕事を管理
works = [[] for _ in range(M)]
for i in range(N):
A, B = map(int, input().split())
if A > M:
continue
deadline = M - A
# heapqは最小値を返す仕組みしかないので、最大値を返すため-する
works[deadline].append(-B)
ans = 0
heapque = []
# i日目に始めることができる仕事で報酬の高いものを選ぶ
for i in range(M - 1, -1, -1):
for work in works[i]:
# heapqに候補を追加
heappush(heapque, work)
if heapque:
ans += heappop(heapque)
print(-ans)
main() |
989,561 | 02e79d5359538fd9d0b6e152af885e835568c999 | from django.http import HttpResponse
from django.shortcuts import render
from APP2.models import Student
import random
# Create your views here.
def lala(request):
return render(request, 'lala.html')
# 增加数据
def add_student(request):
student = Student()
student.s_name = 'Lily%d' % random.randrange(100)
student.save()
return HttpResponse('Add %s success' % student.s_name)
# 查询数据
def get_student(request):
students = Student.objects.all()
for student in students:
print(student.s_name)
context = {
'ha': 'haha',
'xi': 'xixi',
'students': students
}
return render(request, 'student_list.html', context=context)
# 更新与删除数据基于查询
def update_student(request):
student = Student.objects.get(pk=2)
student.s_name = 'Jack'
student.save()
return HttpResponse('update student success')
# 删除数据
def delete_student(request):
student = Student.objects.get(pk=3)
student.delete()
student.save()
return HttpResponse('delete student success')
|
989,562 | a451ed2d6ba0a5aeaf12e2791ed1c5863e4689d6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Forums',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=150)),
('topic_count', models.BigIntegerField()),
('post_count', models.BigIntegerField()),
('position', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('forum', models.ForeignKey(to='forums.Forums')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Threads',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_post_id', models.IntegerField(default=0)),
('title', models.CharField(max_length=150)),
('hits', models.BigIntegerField()),
('post_count', models.BigIntegerField()),
('locked', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('forum', models.ForeignKey(to='forums.Forums')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='thread',
field=models.ForeignKey(to='forums.Threads'),
preserve_default=True,
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
989,563 | fecacacee30d381affaf672dc7eb37cb7bb2a92d | import math
from PIL import Image
import asyncio
from io import BytesIO
import os
import mercantile
import fiona
from shapely import speedups
if speedups.available:
speedups.enable()
from shapely import geometry as shp
import ujson
from .tbutils import ObjDict, TileNotFound
def num2box(z, x, y, srid='4326'):
if srid == '4326':
return mercantile.bounds(x, y, z)
elif srid == '3857':
return mercantile.xy_bounds(x, y, z)
else:
raise ValueError('Invalid or unsupported SRID, please use 4326 or 3857')
def box2pix(box, world):
'''
bounding box to pixel bounds of source image
'''
left = (box[0] - world.W) / world.xres
upper = (world.N - box[3]) / world.yres
right = left + (box[2] - box[0]) / world.xres
lower = upper + (box[3] - box[1]) / world.yres
return tuple(
round(x) for x in (left, upper, right, lower)
)
def get_world_data(imagefile, imagesize):
# TODO: implement support for skewed images...
worldfile = imagefile[:-2]+imagefile[-1]+'w'
with open(worldfile, 'r') as f:
data = [float(x) for x in f.read().split('\n')]
xres = data[0]
yres = -data[3]
w = xres * imagesize[0]
h = yres * imagesize[1]
xw = data[4]
yn = data[5]
xe = xw + w
ys = yn - h
return ObjDict({
'xres': xres,
'yres': yres,
# 'width': w,
# 'height': h,
'N': yn,
'E': xe,
'S': ys,
'W': xw,
# 'box': (xe, ys, xw, yn),
})
def check_if_intersect(box, world):
if box[0] > world.E or box[1] > world.N or box[2] < world.W or box[3] < world.S:
raise TileNotFound
def bufferize(box, buffer):
xbuffer = (box[2] - box[0]) * buffer
ybuffer = (box[3] - box[1]) * buffer
return (
box[0] - xbuffer,
box[1] - ybuffer,
box[2] + xbuffer,
box[3] + ybuffer,
)
def crop(imagefile, box):
'''
faster crop for uncompressed images
'''
with Image.open(imagefile) as image:
world = get_world_data(imagefile, image.size)
check_if_intersect(box, world)
bounds = box2pix(box, world)
if image.tile[0][0] == 'raw':
iw, ih = image.size
offset = image.tile[0][2]
x = bounds[0]
y = bounds[1]
x1 = bounds[2]
y1 = bounds[3]
w = x1 - x
h = y1 - y
hcorr = min(h, ih-abs(y))
image.size = (iw, hcorr)
image.tile = [
(
'raw',
(0, 0, iw, hcorr),
offset + 4 * iw * max(0, y),
('RGBA', 0, 1),
)
]
return image.crop((x, min(0, y), x+w, min(h, y1)))
return image.crop(bounds)
class ImageSource:
'''
Class for generating tiles on demand from image source.
'''
def __init__(self, imagefile, executor, srid='4326',
frmt='PNG', tilesize=(256, 256), resample=Image.BILINEAR):
self.tilesize = tilesize
self.resample = resample
self.file = imagefile
self.executor = executor
self.format = frmt
self.srid = srid
async def modified(self):
return os.path.getmtime(self.file)
def get_tile(self, box):
return crop(self.file, box).resize(self.tilesize, self.resample)
async def __call__(self, z, x, y):
loop = asyncio.get_event_loop()
response = BytesIO()
box = list(num2box(z, x, y, self.srid))
tile = await loop.run_in_executor(self.executor, self.get_tile, box)
tile.save(response, format=self.format)
return response.getvalue()
def get_simplify_tolerance(box, relative_tolerance):
'''
returns distance passed to shapely.geometry.simplify
'''
diagonal = math.sqrt(
(box[2] - box[0])**2 + (box[3] - box[1])**2
)
return diagonal * relative_tolerance
class VectorSource:
'''
Class for generating tiles on demand from vector source.
'''
def __init__(self, vectorfile, executor,
srid='4326', buffer=0, relative_tolerance=.0005,
preserve_topology=True):
self.format = 'geojson'
self.file = vectorfile
self.executor = executor
self.srid = srid
self.buffer = buffer
self.relative_tolerance = relative_tolerance
self.preserve_topology = preserve_topology
async def modified(self):
return os.path.getmtime(self.file)
def get_tile(self, box):
features = []
geobox = shp.box(
*bufferize(box, self.buffer)
)
tolerance = get_simplify_tolerance(box, self.relative_tolerance)
with fiona.open(self.file, 'r') as cake:
for feat in cake:
cut = shp.shape(feat['geometry']).intersection(geobox)
if cut.is_empty:
continue
feat['geometry'] = shp.mapping(
cut.simplify(tolerance, self.preserve_topology)
)
features.append(feat)
return {
'type': 'FeatureCollection',
'features': features,
}
async def __call__(self, z, x, y):
loop = asyncio.get_event_loop()
box = num2box(z, x, y, self.srid)
tile = await loop.run_in_executor(self.executor, self.get_tile, box)
return ujson.dumps(tile)
|
989,564 | a939bdbb3eb84ebdcde269c96c2befd003ed73ac | st1=list(input())
i=0
while(i<len(st1)):
temp=st1[i]
st1[i]=st1[i+1]
st1[i+1]=temp
i+=2
print("".join(st1))
|
989,565 | 04f7fea9821aa04e033278142f80cee92abdf076 | import functools, os, requests
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from werkzeug.security import check_password_hash, generate_password_hash
from flask import (
flash, g, render_template, request, session, url_for, jsonify, redirect, json
)
from flask import current_app as app
from . import routes
clientID='40ca6a9cd34648fd80be50827fe46f7d'
clientSecret='27622b8b211f4ace9d57283a0cb06f89'
#token_endpoint = app.config['FATSECRET_TOKEN_ENDPOINT']
def authorization():
'''Implement OAuth2 authorization to que access_token as a backend client'''
url = 'https://oauth.fatsecret.com/connect/token'
client = BackendApplicationClient(client_id=clientID)
client.grant_type = 'client_credentials'
oauth = OAuth2Session(client=client, scope='basic')
token = oauth.fetch_token(token_url=url, client_id=clientID, client_secret=clientSecret)
session['oauth_token'] = token
return token
def refreshtoken():
'''Implement OAuth2 authorization to que access_token as a backend client'''
token = session['oauth_token']
refresh_url = 'https://oauth.fatsecret.com/connect/token'
def token_saver(token):
session['oauth_token'] = token
extra = {
'client_id': clientID,
'client_secret': clientSecret,
}
client = OAuth2Session(clientID, token=token, auto_refresh_url=refresh_url,
auto_refresh_kwargs=extra, token_updater=token_saver)
jsonify(client.get(refresh_url).json())
return jsonify(session['oauth_token'])
|
989,566 | 094ad59e84cfede97045699e13de6e9cac16839f | # 2839번 (브론즈1)
# 수학, 다이나믹 프로그래밍, 그리디 알고리즘, 브루트포스 알고리즘
def bags(kg):
b5 = kg//5
if kg - (b5 * 5) == 0:
return b5
for i in range(b5, -1, -1):
left = kg - (i * 5)
if left % 3 == 0:
return i + left // 3
return -1
n = int(input())
print(bags(n)) |
989,567 | d1f5d340a2ff887d2fa6732293f8965d2056e52f | from flask import Flask
import rocksdb
app = Flask(__name__)
db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True))
from app import views |
989,568 | 79d731729650ac91be9eacda3aac0601ba20dae8 | import numpy as np
import matplotlib.pylab as plt
import math
import rls
#-----------------------------------------------------------
# Example: sinus signal
#-----------------------------------------------------------
samples = 300
time = np.linspace(0.1, 5, samples)
true_signal = [math.sin(x) for x in time]
noise = np.random.normal(0, 0.1, samples)
y = true_signal + noise
rec_ls = rls.rls(samples, 2, time, y, 0.9, 100)
# use estimated parameters to calculate output at each time step
result = []
for i in range(samples):
par = rec_ls[i]
t = time[i]
result.append(par[0] + par[1] * t)
plt.plot(time, true_signal, linestyle='--', color='r',\
label='true signal')
# plt.plot(time, y, 'ro', label='samples')
plt.plot(time, result, color='g', label='recursive ls')
plt.legend()
plt.show()
|
989,569 | 48f96cfb21640094e2da6ed5fdb8827af2ba735f | import json
from flask import Flask
from flask_restful import reqparse, abort, Api, Resource
import app
from app.room import RoomList, Room
from app import models
#https://flask-restful.readthedocs.io/en/latest/quickstart.html#endpoints
app.api.add_resource(RoomList, '/room')
app.api.add_resource(Room, '/room/<room_id>')
if __name__ == '__main__':
models.init_database()
models.create_testdata()
app.app.run(host='0.0.0.0', debug = True) |
989,570 | fc04b33da491137ba86f664463243028f3164fb7 | import peewee
from datetime import datetime, date
from .core import JSONField
host = 'data.ultragis.com'
db_fetch = peewee.MySQLDatabase(host=host, database='fetch', user='root', password='Bayesian@2018', charset='utf8')
class BaseModel(peewee.Model):
class Meta:
database = db_fetch
# 信息采集表
class GatherInfo(BaseModel):
class Meta:
db_table = 'gather_full'
indexes = (
(('day', 'source'), False), (('title', 'subject'), False),
)
uuid = peewee.CharField(primary_key=True, max_length=50, help_text='url的hash值')
day = peewee.DateField(help_text='招标日期')
source = peewee.CharField(max_length=50, help_text='招标来源')
url = peewee.CharField(help_text='详情页请求网址')
end = peewee.DateField(null=True, help_text='截止日期')
title = peewee.CharField(null=True, help_text='标题')
area = peewee.CharField(null=True, max_length=50, help_text='地区(省、市等)')
subject = peewee.CharField(null=True, help_text='信息分类(招标、中标等)')
industry = peewee.CharField(null=True, help_text='行业分类(机械、软件等)')
pid = peewee.CharField(null=True, max_length=255)
tender = peewee.CharField(null=True, max_length=255)
budget = peewee.CharField(null=True, help_text='预算金额/中标金额')
tels = peewee.CharField(null=True, help_text='联系电话')
extends = peewee.TextField(null=True, help_text='扩展信息(json格式)')
time = peewee.DateTimeField(default=datetime.now, help_text='时间戳')
class ContentInfo(BaseModel):
class Meta:
db_table = 'content_full'
uuid = peewee.CharField(primary_key=True, max_length=50)
day = peewee.DateField(index=True, help_text='招标日期')
source = peewee.CharField(index=True, max_length=50, help_text='招标来源')
index_url = peewee.CharField(null=True, help_text='索引页网址')
top_url = peewee.CharField(null=True, help_text='框架详情页的顶层网址')
real_url = peewee.CharField(null=True, help_text='详情页转向之后的真实网址(redirect)')
html = peewee.BlobField(null=True, help_text='HTML内容(压缩)')
digest = peewee.TextField(null=True, help_text='关键字段摘要')
contents = peewee.TextField(null=True, help_text='招标详情(正文)')
attachments = peewee.TextField(null=True, help_text='附件信息')
time = peewee.DateTimeField(default=datetime.now, help_text='时间戳')
class Statistic(BaseModel):
id = peewee.PrimaryKeyField()
tab = peewee.CharField(max_length=50, help_text='表名')
col = peewee.CharField(max_length=50, null=True, help_text='列名')
tags = peewee.CharField(max_length=255, null=True, help_text='标签')
word = peewee.CharField(max_length=255, help_text='词')
count = peewee.IntegerField(help_text='出现次数')
info = JSONField(max_length=2000, null=True, help_text='附加信息')
time = peewee.DateTimeField(default=datetime.now, help_text='时间戳')
Statistic.create_table(True)
|
989,571 | dd6bfa1b22676e947b79f561e87c61ccb16e17cf | array = [int(x) for x in input().split()]
while array != [0,0,0]:
array.sort()
if array[2]**2 == array[0]**2 + array[1]**2:
print("right")
else:
print("wrong")
array = [int(x) for x in input().split()]
|
989,572 | 593d74eb46b8a882dba7ccd47878f3ef4380d184 | #!/usr/bin/env python
# This code is based on the sample at https://pypi.python.org/pypi/inotify
#
from __future__ import print_function, unicode_literals
import logging
import inotify.adapters
import sys
def usage():
print("""python watcher.py watchpath
watchpath is a folder to watch
Writes log messages to stdout
Note: this script watches directories recursively so watching a big file tree will be expensive!
""")
return
def configure_logging():
logging.basicConfig(level=logging.DEBUG)
def main(path):
try:
i = inotify.adapters.InotifyTree(path)
except Exception as e:
print("Make sure you have read permission on \"%s\", including subdirectories!" % path)
print("Error:", e)
return 1
try:
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
logging.info("NAMES=%s PATH=[%s] FILENAME=[%s]",
type_names, watch_path.decode('utf-8'),
filename.decode('utf-8'))
except KeyboardInterrupt:
pass
return 0
if __name__ == '__main__':
try:
path = sys.argv[1]
except:
usage()
exit(1)
configure_logging()
rval = main(path.encode('UTF-8'))
exit(rval)
|
989,573 | e7724b70881646cfa18298020a801b0921e429d4 | __copyright__ = "Copyright (C) 2020 University of Illinois Board of Trustees"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import numpy.linalg as la # noqa
import pyopencl as cl
from meshmode.array_context import PyOpenCLArrayContext
from meshmode.dof_array import thaw
from meshmode.mesh import BTAG_ALL, BTAG_NONE # noqa
from grudge.eager import EagerDGDiscretization
from grudge.shortcuts import make_visualizer
from grudge.dof_desc import DISCR_TAG_BASE, DTAG_BOUNDARY
from mirgecom.integrators import rk4_step
from mirgecom.diffusion import (
diffusion_operator,
DirichletDiffusionBoundary,
NeumannDiffusionBoundary)
from mirgecom.mpi import mpi_entry_point
import pyopencl.tools as cl_tools
@mpi_entry_point
def main():
cl_ctx = cl.create_some_context()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue,
allocator=cl_tools.MemoryPool(cl_tools.ImmediateAllocator(queue)))
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_parts = comm.Get_size()
from meshmode.distributed import MPIMeshDistributor, get_partition_by_pymetis
mesh_dist = MPIMeshDistributor(comm)
dim = 2
nel_1d = 16
if mesh_dist.is_mananger_rank():
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(
a=(-0.5,)*dim,
b=(0.5,)*dim,
nelements_per_axis=(nel_1d,)*dim,
boundary_tag_to_face={
"dirichlet": ["+x", "-x"],
"neumann": ["+y", "-y"]
}
)
print("%d elements" % mesh.nelements)
part_per_element = get_partition_by_pymetis(mesh, num_parts)
local_mesh = mesh_dist.send_mesh_parts(mesh, part_per_element, num_parts)
del mesh
else:
local_mesh = mesh_dist.receive_mesh_part()
order = 3
discr = EagerDGDiscretization(actx, local_mesh, order=order,
mpi_communicator=comm)
if dim == 2:
# no deep meaning here, just a fudge factor
dt = 0.0025/(nel_1d*order**2)
else:
raise ValueError("don't have a stable time step guesstimate")
source_width = 0.2
nodes = thaw(actx, discr.nodes())
boundaries = {
DTAG_BOUNDARY("dirichlet"): DirichletDiffusionBoundary(0.),
DTAG_BOUNDARY("neumann"): NeumannDiffusionBoundary(0.)
}
u = discr.zeros(actx)
vis = make_visualizer(discr)
def rhs(t, u):
return (
diffusion_operator(
discr, quad_tag=DISCR_TAG_BASE,
alpha=1, boundaries=boundaries, u=u)
+ actx.np.exp(-np.dot(nodes, nodes)/source_width**2))
rank = comm.Get_rank()
t = 0
t_final = 0.01
istep = 0
while True:
if istep % 10 == 0:
print(istep, t, discr.norm(u))
vis.write_vtk_file("fld-heat-source-mpi-%03d-%04d.vtu" % (rank, istep),
[
("u", u)
])
if t >= t_final:
break
u = rk4_step(u, t, dt, rhs)
t += dt
istep += 1
if __name__ == "__main__":
main()
# vim: foldmethod=marker
|
989,574 | b6e7c739dfd7ef2540ba0a14dd436c3a3e480323 | """ Resource Module """
import os
import json
import logging
from urllib.parse import urlparse
#pylint: disable=E0401
from astarte.exception import ResourceError
from astarte.utility import TMP_REFERENCE_DIR
L = logging.getLogger(__name__)
class Parser:
""" Resource Parser Module.
"""
@classmethod
def search(cls, target, _id=None):
""" search resource folder.
Arguments:
target(str): target folder path.
_id(str): resource id path.
Returns:
filepath(str): filepath
name(str): result name.
bounds(tuple): result bounds. (start x, start y, end x, end y)
"""
if _id:
target = '%s/id' % target
info = urlparse(target)
base_folder = os.path.join(TMP_REFERENCE_DIR, info.netloc)
if not os.path.exists(base_folder):
raise ResourceError('Could not find base directory : %s' % info.netloc)
for f in os.listdir(base_folder):
if f.find('%s.json' % info.scheme) != -1:
with open(os.path.join(base_folder, f), 'r') as jf:
data = json.load(jf)
result = Parser.query(data, info.path)
if not result:
ResourceError('Could not find target Infomation : %s' % info.path)
return Parser.path(base_folder, info.path, _id), result['name'], result['bounds']
L.warning('Could not Found Resource.')
return None, None, None
@classmethod
def path(cls, base_folder, path, _id=None):
""" Search Path.
"""
for i in path.split('/'):
base_folder = os.path.join(base_folder, i)
if _id != None:
for j in _id.split('/'):
base_folder = os.path.join(base_folder, j)
return base_folder
@classmethod
def query(cls, d, q):
""" Search query.
"""
keys = q.split('/')
nd = d
for k in keys:
if k == '':
continue
if k in nd:
nd = nd[k]
else:
return None
return nd |
989,575 | 65ad93f02fe6964b3e8156af5648728f6b527c86 | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
"""User, Commit, Tag, Branch and Draft classes.
:class:`User` defines the basic concept of a user with an action.
:class:`Commit` defines the structure of a commit.
:class:`Tag` defines the structure of a commit tag.
:class:`Branch` defines the structure of a branch.
:class:`Draft` defines the structure of a draft.
"""
from typing import Any, Dict, Optional, Tuple, Type, TypeVar
from ..utility import EqMixin, ReprMixin, common_loads
class User(ReprMixin, EqMixin):
"""This class defines the basic concept of a user with an action.
Arguments:
name: The name of the user.
date: The date of the user action.
"""
_T = TypeVar("_T", bound="User")
_repr_attrs = ("date",)
def __init__(self, name: str, date: int) -> None:
self.name = name
self.date = date
def _repr_head(self) -> str:
return f'{self.__class__.__name__}("{self.name}")'
def _loads(self, contents: Dict[str, Any]) -> None:
self.name = contents["name"]
self.date = contents["date"]
@classmethod
def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:
"""Loads a :class:`User` instance from the given contents.
Arguments:
contents: A dict containing all the information of the commit::
{
"name": <str>
"date": <int>
}
Returns:
A :class:`User` instance containing all the information in the given contents.
"""
return common_loads(cls, contents)
def dumps(self) -> Dict[str, Any]:
"""Dumps all the user information into a dict.
Returns:
A dict containing all the information of the user::
{
"name": <str>
"date": <int>
}
"""
return {"name": self.name, "date": self.date}
class Commit(ReprMixin, EqMixin):
"""This class defines the structure of a commit.
Arguments:
commit_id: The commit id.
parent_commit_id: The parent commit id.
message: The commit message.
committer: The commit user.
"""
_T = TypeVar("_T", bound="Commit")
_repr_attrs: Tuple[str, ...] = ("parent_commit_id", "message", "committer")
_repr_maxlevel = 2
def __init__(
self,
commit_id: str,
parent_commit_id: Optional[str],
message: str,
committer: User,
) -> None:
self.commit_id = commit_id
self.parent_commit_id = parent_commit_id
self.message = message
self.committer = committer
def _repr_head(self) -> str:
return f'{self.__class__.__name__}("{self.commit_id}")'
def _loads(self, contents: Dict[str, Any]) -> None:
self.commit_id = contents["commitId"]
self.parent_commit_id = contents["parentCommitId"]
self.message = contents["message"]
self.committer = User.loads(contents["committer"])
@classmethod
def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:
"""Loads a :class:`Commit` instance for the given contents.
Arguments:
contents: A dict containing all the information of the commit::
{
"commitId": <str>
"parentCommitId": <str> or None
"message": <str>
"committer": {
"name": <str>
"date": <int>
}
}
Returns:
A :class:`Commit` instance containing all the information in the given contents.
"""
return common_loads(cls, contents)
def dumps(self) -> Dict[str, Any]:
"""Dumps all the commit information into a dict.
Returns:
A dict containing all the information of the commit::
{
"commitId": <str>
"parentCommitId": <str> or None
"message": <str>
"committer": {
"name": <str>
"date": <int>
}
}
"""
return {
"commitId": self.commit_id,
"parentCommitId": self.parent_commit_id,
"message": self.message,
"committer": self.committer.dumps(),
}
class _NamedCommit(Commit):
"""This class defines the structure of a named commit.
:class:`_NamedCommit` is the base class of :class:`Tag` and :class:`Branch`.
Arguments:
name: The name of the named commit.
commit_id: The commit id.
parent_commit_id: The parent commit id.
message: The commit message.
committer: The commit user.
"""
_T = TypeVar("_T", bound="_NamedCommit")
_repr_attrs = ("commit_id",) + Commit._repr_attrs
def __init__( # pylint: disable=too-many-arguments
self,
name: str,
commit_id: str,
parent_commit_id: Optional[str],
message: str,
committer: User,
) -> None:
super().__init__(commit_id, parent_commit_id, message, committer)
self.name = name
def _repr_head(self) -> str:
return f'{self.__class__.__name__}("{self.name}")'
def _loads(self, contents: Dict[str, Any]) -> None:
super()._loads(contents)
self.name = contents["name"]
@classmethod
def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:
"""Loads a :class:`_NamedCommit` instance for the given contents.
Arguments:
contents: A dict containing all the information of the named commit::
{
"name": <str>
"commitId": <str>
"parentCommitId": <str> or None
"message": <str>
"committer": {
"name": <str>
"date": <int>
}
}
Returns:
A :class:`_NamedCommit` instance containing all the information in the given contents.
"""
return common_loads(cls, contents)
def dumps(self) -> Dict[str, Any]:
"""Dumps all the named commit information into a dict.
Returns:
A dict containing all the information of the named commit::
{
"name": <str>
"commitId": <str>
"parentCommitId": <str> or None
"message": <str>
"committer": {
"name": <str>
"date": <int>
}
}
"""
contents = super().dumps()
contents["name"] = self.name
return contents
class Tag(_NamedCommit):
"""This class defines the structure of the tag of a commit.
Arguments:
name: The name of the tag.
commit_id: The commit id.
parent_commit_id: The parent commit id.
message: The commit message.
committer: The commit user.
"""
class Branch(_NamedCommit):
"""This class defines the structure of a branch.
Arguments:
name: The name of the branch.
commit_id: The commit id.
parent_commit_id: The parent commit id.
message: The commit message.
committer: The commit user.
"""
class Draft(ReprMixin, EqMixin):
"""This class defines the basic structure of a draft.
Arguments:
number: The number of the draft.
title: The title of the draft.
"""
_T = TypeVar("_T", bound="Draft")
_repr_attrs = ("title",)
def __init__(self, number: int, title: str) -> None:
self.number = number
self.title = title
def _repr_head(self) -> str:
return f"{self.__class__.__name__}({self.number})"
def _loads(self, contents: Dict[str, Any]) -> None:
self.number = contents["number"]
self.title = contents["title"]
@classmethod
def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:
"""Loads a :class:`Draft` instance from the given contents.
Arguments:
contents: A dict containing all the information of the draft::
{
"number": <int>
"title": <str>
}
Returns:
A :class:`Draft` instance containing all the information in the given contents.
"""
return common_loads(cls, contents)
def dumps(self) -> Dict[str, Any]:
"""Dumps all the information of the draft into a dict.
Returns:
A dict containing all the information of the draft::
{
"number": <int>
"title": <str>
}
"""
return {"number": self.number, "title": self.title}
|
989,576 | 6b9c92a288da24d8c102601756ec01bd1de4c195 | ## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
from CvPythonExtensions import *
import CvUtil
import random
from math import sqrt
import sys
import RFCUtils #Rhye
utils = RFCUtils.RFCUtils()
import Consts as con
import StoredData #Rhye
import cPickle as pickle
gc = CyGlobalContext() #Rhye
"""
NOTES ABOUT THE MAP UTILITIES
generatePlotTypes(), generateTerrainTypes(), and addFeatures() are mandatory functions for all map scripts.
FractalWorld, HintedWorld, and MultilayeredFractal classes are different ways to generatePlotTypes. (Fractal
world is Soren's baby. HintedWorld is Andy's baby. MultilayeredFractal is Bob's baby.) There is no C++ default
for the plot generation process. Each map script must handle the process on its own, typically by calling
one of these three classes (or subclassing them).
TerrainGenerator is the only primary method for generating terrain types. Call, subclass, or replace it to
set terrain types. FeatureGenerator is the primary method for adding Features to the map.
The minor functions at the end are either children of HintedWorld or, in the case of findStartingPlot, an
alternative method to the default process for placing the starting units for each civ.
- Bob Thomas September 23, 2005
"""
class FractalWorld:
def __init__(self, fracXExp=CyFractal.FracVals.DEFAULT_FRAC_X_EXP,
fracYExp=CyFractal.FracVals.DEFAULT_FRAC_Y_EXP):
self.gc = CyGlobalContext()
self.map = self.gc.getMap()
self.iNumPlotsX = self.map.getGridWidth()
self.iNumPlotsY = self.map.getGridHeight()
self.mapRand = self.gc.getGame().getMapRand()
self.iFlags = self.map.getMapFractalFlags()
self.plotTypes = [PlotTypes.PLOT_OCEAN] * (self.iNumPlotsX*self.iNumPlotsY)
self.fracXExp = fracXExp
self.fracYExp = fracYExp
self.continentsFrac = CyFractal()
self.hillsFrac = CyFractal()
self.peaksFrac = CyFractal()
# init User Input variances
self.seaLevelChange = self.gc.getSeaLevelInfo(self.map.getSeaLevel()).getSeaLevelChange()
self.seaLevelMax = 100
self.seaLevelMin = 0
self.hillGroupOneRange = self.gc.getClimateInfo(self.map.getClimate()).getHillRange()
self.hillGroupOneBase = 25
self.hillGroupTwoRange = self.gc.getClimateInfo(self.map.getClimate()).getHillRange()
self.hillGroupTwoBase = 75
self.peakPercent = self.gc.getClimateInfo(self.map.getClimate()).getPeakPercent()
self.stripRadius = 15
def checkForOverrideDefaultUserInputVariances(self):
# Subclass and override this function to customize/alter/nullify
# the XML defaults for user selections on Sea Level, Climate, etc.
return
def initFractal(self, continent_grain = 2, rift_grain = 2, has_center_rift = True, invert_heights = False, polar = False):
"For no rifts, use rift_grain = -1"
iFlags = self.iFlags
if invert_heights:
iFlags += CyFractal.FracVals.FRAC_INVERT_HEIGHTS
if polar:
iFlags += CyFractal.FracVals.FRAC_POLAR
if rift_grain >= 0:
self.riftsFrac = CyFractal()
self.riftsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, rift_grain, self.mapRand, 0, self.fracXExp, self.fracYExp)
if has_center_rift:
iFlags += CyFractal.FracVals.FRAC_CENTER_RIFT
self.continentsFrac.fracInitRifts(self.iNumPlotsX, self.iNumPlotsY, continent_grain, self.mapRand, iFlags, self.riftsFrac, self.fracXExp, self.fracYExp)
else:
self.continentsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, continent_grain, self.mapRand, iFlags, self.fracXExp, self.fracYExp)
def shiftPlotTypes(self):
stripRadius = self.stripRadius
best_split_x, best_split_y = 0,0
if self.map.isWrapX():
best_split_x = self.findBestSplitX(stripRadius)
if self.map.isWrapY():
best_split_y = self.findBestSplitY(stripRadius)
self.shiftPlotTypesBy(best_split_x, best_split_y)
def shiftPlotTypesBy(self, xshift, yshift):
if xshift > 0 or yshift > 0:
iWH = self.iNumPlotsX * self.iNumPlotsY
buf = self.plotTypes[:]
for iDestY in range(self.iNumPlotsY):
for iDestX in range(self.iNumPlotsX):
iDestI = self.iNumPlotsX*iDestY + iDestX
iSourceX = iDestX + xshift
iSourceY = iDestY + yshift
iSourceX %= self.iNumPlotsX
iSourceY %= self.iNumPlotsY
iSourceI = self.iNumPlotsX*iSourceY + iSourceX
self.plotTypes[iDestI] = buf[iSourceI]
def findBestSplitY(self, stripRadius):
stripSize = 2*stripRadius
if stripSize > self.iNumPlotsX:
return 0
numPlots = self.iNumPlotsX * self.iNumPlotsY
stripCenterIndex = stripRadius
piLandWeights = self.calcWeights(stripRadius)
scores = [0]*self.iNumPlotsY
for y in range(self.iNumPlotsY):
landScore = 0
bFoundLand = False
for x in range(self.iNumPlotsX):
i = y*self.iNumPlotsX + x
assert (i >= 0 and i < numPlots)
if self.plotTypes[i] == PlotTypes.PLOT_LAND:
landScore += 1
bFoundLand = True
if bFoundLand:
landScore += 30 # the first land is worth about 10 plots of land
for i in range(stripSize):
yy = y + i - stripCenterIndex
yy %= self.iNumPlotsY
scores[yy] += landScore * piLandWeights[i]
best_split_y, lowest_score = argmin(scores)
return best_split_y
def findBestSplitX(self, stripRadius):
stripSize = 2*stripRadius
if stripSize > self.iNumPlotsX:
return 0
numPlots = self.iNumPlotsX * self.iNumPlotsY
stripCenterIndex = stripRadius
piLandWeights = self.calcWeights(stripRadius)
scores = [0]*self.iNumPlotsX
for x in range(self.iNumPlotsX):
landScore = 0
bFoundLand = False
for y in range(self.iNumPlotsY):
i = y*self.iNumPlotsX + x
assert (i >= 0 and i < numPlots)
if self.plotTypes[i] == PlotTypes.PLOT_LAND:
landScore += 1
bFoundLand = True
if bFoundLand:
landScore += 30 # the first land is worth about 10 plots of land
for i in range(stripSize):
xx = x + i - stripCenterIndex
xx %= self.iNumPlotsX
scores[xx] += landScore * piLandWeights[i]
best_split_x, lowest_score = argmin(scores)
return best_split_x
def calcWeights(self, stripRadius):
stripSize = 2*stripRadius
landWeights = [0]*stripSize
for i in range(stripSize):
distFromStart = i+1
distFromEnd = stripSize-i
distFromEdge = min(distFromStart, distFromEnd)
landWeight = distFromEdge
distFromCenter = stripRadius - distFromEdge
if distFromCenter <= 1:
landWeight *= stripRadius
if distFromCenter == 0:
landWeight *= 2
landWeights[i] = landWeight
return landWeights
def generatePlotTypes(self, water_percent=78, shift_plot_types=True, grain_amount=3):
# Check for changes to User Input variances.
self.checkForOverrideDefaultUserInputVariances()
self.hillsFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, grain_amount, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.peaksFrac.fracInit(self.iNumPlotsX, self.iNumPlotsY, grain_amount+1, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
water_percent += self.seaLevelChange
water_percent = min(water_percent, self.seaLevelMax)
water_percent = max(water_percent, self.seaLevelMin)
iWaterThreshold = self.continentsFrac.getHeightFromPercent(water_percent)
iHillsBottom1 = self.hillsFrac.getHeightFromPercent(max((self.hillGroupOneBase - self.hillGroupOneRange), 0))
iHillsTop1 = self.hillsFrac.getHeightFromPercent(min((self.hillGroupOneBase + self.hillGroupOneRange), 100))
iHillsBottom2 = self.hillsFrac.getHeightFromPercent(max((self.hillGroupTwoBase - self.hillGroupTwoRange), 0))
iHillsTop2 = self.hillsFrac.getHeightFromPercent(min((self.hillGroupTwoBase + self.hillGroupTwoRange), 100))
iPeakThreshold = self.peaksFrac.getHeightFromPercent(self.peakPercent)
for x in range(self.iNumPlotsX):
for y in range(self.iNumPlotsY):
i = y*self.iNumPlotsX + x
val = self.continentsFrac.getHeight(x,y)
if val <= iWaterThreshold:
self.plotTypes[i] = PlotTypes.PLOT_OCEAN
else:
hillVal = self.hillsFrac.getHeight(x,y)
if ((hillVal >= iHillsBottom1 and hillVal <= iHillsTop1) or (hillVal >= iHillsBottom2 and hillVal <= iHillsTop2)):
peakVal = self.peaksFrac.getHeight(x,y)
if (peakVal <= iPeakThreshold):
self.plotTypes[i] = PlotTypes.PLOT_PEAK
else:
self.plotTypes[i] = PlotTypes.PLOT_HILLS
else:
self.plotTypes[i] = PlotTypes.PLOT_LAND
if shift_plot_types:
self.shiftPlotTypes()
return self.plotTypes
cardinal_directions = (1,0), (0,1), (-1,0), (0, -1)
class HintedWorld(FractalWorld):
def __init__(self, w=16, h=8, fracXExp=CyFractal.FracVals.DEFAULT_FRAC_X_EXP,
fracYExp=CyFractal.FracVals.DEFAULT_FRAC_Y_EXP):
FractalWorld.__init__(self, fracXExp, fracYExp)
self.plotsPerBlockX = self.iNumPlotsX/w
self.plotsPerBlockY = self.iNumPlotsY/h
if not self.iFlags & CyFractal.FracVals.FRAC_WRAP_X:
w += 1
if not self.iFlags & CyFractal.FracVals.FRAC_WRAP_Y:
h += 1
self.w, self.h = w,h # the map is divided into 'w' blocks by 'h' blocks
self.data = [None]*(w*h)
self.mapRand = CyGlobalContext().getGame().getMapRand()
self.continents = []
def normalizeBlock(self, x, y):
map = CyMap()
if map.isWrapX():
x = x % self.w
if map.isWrapY():
y = y % self.h
return x,y
def setValue(self, x, y, val):
x,y = self.normalizeBlock(x,y)
if self.inBounds(x,y):
self.data[self.w*y + x] = val
return True
else:
return False
def getValue(self, x, y):
x,y = self.normalizeBlock(x,y)
if self.inBounds(x,y):
return self.data[self.w*y + x]
else:
return None
def blockToPlot(self, blockx, blocky):
scalex, scaley = self.plotsPerBlockX, self.plotsPerBlockY
plotx, ploty = scalex*(blockx), scaley*(blocky)
return (int(plotx), int(ploty))
# nested class to describe a continent in the hinted world
class Continent:
def __init__(self, world, numBlocks, x, y, maxradius):
self.world = world
self.centerx = x
self.centery = y
self.targetNumBlocks = numBlocks
self.maxradius = maxradius
self.blocks = [(x,y)] # (x,y) coords of blocks that compose the continent
self.rects = [] # one (x,y,w,h) rect. of plots for each (x,y) block'
if numBlocks <= 1:
self.done = True
else:
self.done = False
def addBlock(self, x, y):
self.blocks.append((x,y))
scalex, scaley = self.world.plotsPerBlockX, self.world.plotsPerBlockY
rect = int(x*scalex), int(y*scaley), int(1*scalex), int(1*scaley)
self.rects.append(rect)
if len(self.blocks) >= self.targetNumBlocks:
self.done = True
def recalculateRects(self):
scalex, scaley = self.world.plotsPerBlockX, self.world.plotsPerBlockY
self.rects = []
for (x,y) in self.blocks:
rect = int(x*scalex), int(y*scaley), int(1*scalex), int(1*scaley)
self.rects.append(rect)
def containsPlot(self, x, y): # could add bRemoveParentRect here
point = (x,y)
for rect in self.rects:
if pointInRect(point, rect):
return True
return False
def getCenterPlot(self):
scalex, scaley = self.world.plotsPerBlockX, self.world.plotsPerBlockY
x = scalex*(self.centerx+0.5)
y = scaley*(self.centery+0.5)
return x,y
def findStartingPlot(self, playerID):
validFn = lambda playerID, x, y: self.containsPlot(x,y)
return findStartingPlot(playerID, validFn) # call global fn
def addContinent(self, numBlocks, x=-1, y=-1, maxDist=-1, maxRadius=-1):
if (x == -1):
x = self.mapRand.get(self.w, "Add Continent Width PYTHON")
if (y == -1):
y = self.mapRand.get(self.h, "Add Continent Height PYTHON")
foundx, foundy = self.findValid(x,y, maxDist)
if (foundx == -1 and foundy == -1):
return None
else:
return self.__addContinentAt(numBlocks, foundx, foundy, maxRadius)
def __addContinentAt(self, numBlocks, x, y, maxradius=-1):
land_value = 192 + self.mapRand.get(64, "Add Continent At PYTHON")
self.setValue(x,y, land_value)
cont = HintedWorld.Continent(self,numBlocks,x,y,maxradius)
self.continents.append(cont)
return cont
def expandContinentBy(self, cont, numBlocks):
# this plot is not valid; choose an in-bounds plot adjacent to an existing plot and try again:
#print "expand continent by", numBlocks
blockOrder = CvUtil.shuffle(len(cont.blocks), self.mapRand)
for blockIndex in blockOrder:
x,y = cont.blocks[blockIndex]
dirOrder = CvUtil.shuffle(len(cardinal_directions), self.mapRand)
for dirIndex in dirOrder:
dx, dy = cardinal_directions[dirIndex]
if self.isValid(x+dx,y+dy, cont):
cont.addBlock(x+dx,y+dy)
land_value = 208 + self.mapRand.get(48, "Expand Continent PYTHON")
self.setValue(x+dx, y+dy, land_value)
#print "\tadded block", x+dx, y+dy
if (numBlocks > 1):
return self.expandContinentBy(cont, numBlocks-1)
else:
return True
print "\tcould not expand continent:"
printMap(self.data, self.w, self.h, cont.centerx, cont.centery)
cont.done = True
return False
def buildAllContinents(self):
all_done = False
while not all_done:
all_done = True
for cont in self.continents:
if not cont.done:
self.expandContinentBy(cont, 1) #expand by 1 block
all_done = False
def shiftHintsToMap(self):
map = CyMap()
wrapX = map.isWrapX()
wrapY = map.isWrapY()
splitx, splity = 0,0
#self.printHints()
if (wrapX):
splitx = self.bestHintsSplitX()
if (wrapY):
splity = self.bestHintsSplitY()
self.shiftHintsBy(splitx, splity)
#self.printHints()
def bestHintsSplitX(self):
scores = [0]*self.w
for x in range(self.w):
for y in range(self.h):
if self.getValue(x, y) >= 192: scores[x] += 1
if self.getValue(x-1, y) >= 192: scores[x] += 1
best_split, best_score = argmin(scores)
return best_split
def bestHintsSplitY(self):
scores = [0]*self.h
for x in range(self.w):
for y in range(self.h):
if self.getValue(x, y) >= 192: scores[y] += 1
if self.getValue(x, y-1) >= 192: scores[y] += 1
best_split, best_score = argmin(scores)
return best_split
def shiftHintsBy(self, splitx, splity):
print "shifting hints by ", splitx, splity
if splitx != 0 or splity != 0:
buf = self.data[:]
# shift the values in self.data left by best_split
for x in range(self.w):
for y in range(self.h):
i = y*self.w + x
self.setValue(x-splitx, y-splity, buf[i])
# shift all continents' blocks left by best_split
for cont in self.continents:
cont.blocks = [self.normalizeBlock(x-splitx, y-splity) for (x,y) in cont.blocks]
cont.recalculateRects()
# self.data must represent a rect where w = 2*h,
# and where both w and h are exponents of 2
def __doInitFractal(self):
self.shiftHintsToMap()
# don't call base method, this overrides it.
size = len(self.data)
minExp = min(self.fracXExp, self.fracYExp)
iGrain = None
for i in range(minExp):
width = (1 << (self.fracXExp - minExp + i))
height = (1 << (self.fracYExp - minExp + i))
if not self.iFlags & CyFractal.FracVals.FRAC_WRAP_X:
width += 1
if not self.iFlags & CyFractal.FracVals.FRAC_WRAP_Y:
height += 1
if size == width*height:
iGrain = i
assert(iGrain != None)
iFlags = self.map.getMapFractalFlags()
self.continentsFrac.fracInitHints(self.iNumPlotsX, self.iNumPlotsY, iGrain, self.mapRand, iFlags, self.data, self.fracXExp, self.fracYExp)
def isValid(self, x, y, cont=None):
if not self.inBounds(x, y):
return False
if cont and cont.maxradius > 0:
if abs(x - cont.centerx) + abs(y - cont.centery) > cont.maxradius:
return False
val = self.getValue(x,y)
if val != None:
return False
for dx in range(-1,2):
for dy in range(-1,2):
val = self.getValue(x+dx, y+dy)
if val != None and val >= 192 and ((not cont) or (x+dx, y+dy) not in cont.blocks):
return False
return True
def findValid(self, x, y, dist=-1):
if (dist == -1):
dist = max(self.w, self.h)
if (dist > 0):
foundx, foundy = self.findValid(x, y, dist-1)
if (foundx != -1 and foundy != -1):
return foundx, foundy
plots = []
for dx in range(-dist, dist+1):
for dy in range(-dist, dist+1):
if max(abs(dx), abs(dy)) == dist:
plots.append((x+dx, y+dy))
plotOrder = CvUtil.shuffle(len(plots), self.mapRand)
for plotIndex in plotOrder:
tryx, tryy = plots[plotIndex]
if self.isValid(tryx, tryy): return tryx, tryy
return -1, -1
def printHints(self, markerx=-1, markery=-1):
printMap(self.data, self.w, self.h, markerx, markery)
def inBounds(self, x, y):
x,y = self.normalizeBlock(x,y)
return (0 <= x < self.w and 0 <= y < self.h)
def generatePlotTypes(self, water_percent=-1, shift_plot_types=False):
for i in range(len(self.data)):
if self.data[i] == None:
self.data[i] = self.mapRand.get(48, "Generate Plot Types PYTHON")
self.__doInitFractal()
if (water_percent == -1):
numPlots = len(self.data)
numWaterPlots = 0
for val in self.data:
if val < 192: # XXX what is this???
numWaterPlots += 1
water_percent = int(100*numWaterPlots/numPlots)
return FractalWorld.generatePlotTypes(self, water_percent, shift_plot_types) # call superclass
def printMap(data, w, h, markerx=-1, markery=-1):
print "-"*(w+2)
hrange = range(h)
hrange.reverse()
for y in hrange:
str = "|"
for x in range(w):
val = data[y*w + x]
if (x,y) == (markerx, markery):
str += "O"
elif val != 0:
str += "X"
else:
str += " "
str += "|"
print str
print "-"*(w+2)
'''
SIRIAN's "MULTILAYERED FRACTAL" INSTRUCTIONS
Since some map scripting concepts demanded the ability to use more than one
fractal instance to generate plot types, I set out to create a new class that
would use multiple "regional fractals" to assemble a complex map.
MultilayeredFractal duplicates the effects of FractalWorld for each layer
in turn. GeneratePlotsByRegion is the controlling function. You will need to
customize this function for each usage, but the rest of the class will stand
as written unless your needs fall outside the normal intended usage.
I've included an enormous amount of power over the layers, but this does mean
a long list of parameters that you must understand and organize for each layer.
Each layer must be passed this list of arguments:
Regional Variables Key:
iWaterPercent,
iRegionWidth, iRegionHeight,
iRegionWestX, iRegionSouthY,
iRegionGrain, iRegionHillsGrain,
iRegionPlotFlags, iRegionTerrainFlags,
iRegionFracXExp, iRegionFracYExp,
bShift, iStrip,
rift_grain, has_center_rift,
invert_heights
Most of these should be self-explanatory, but I'll discuss the rest.
-------------------------------------------------
Grain is the density of land vs sea. Higher numbers generate more and smaller land masses.
HillsGrain is the density of highlands vs flatlands.
Peaks are included in highlands and work off the same density numbers.
Flags are special variables to pass to the fractal generator.
* FRAC_POLAR will eliminate straight edges along the border of your region.
* FRAC_WRAP_X will "spread out" the fractal horizontally and cancel FRAC_POLAR's vertical component.
* FRAC_WRAP_Y will "spread out" the fractal vertically and cancel FRAC_POLAR's horizontal component.
The Polar flag causes a maximum "height value" to be returned for any coordinates
with a zero component. (0,0 or 0,15 or 71,0 - for instance.) This can cause
problems for terrain and features on maps that put land plots in the zero row
or column. This will also cause a problem for any fractal region you generate.
I've included shortcuts for typical uses, but you may need to customize the flags
for some purposes. PlotFlags and TerrainFlags give you full control.
FracXExp is the width of the source fractal.
FracYExp is the height of the source fractal.
These exponents are raised to powers of two. So a value of FracXExp = 7
means 2^7, which would be 128 units wide. FracXExp = 6 would be only 64
units wide. FracYExp works the same way.
Default values are 7 for FracXExp and 6 for FracYExp, or 128x64 matrix.
I've poked around with the fractals quite a bit. Values lower than 5 seem to
distort the fractal's definition too much, so I don't recommend them even for
use with very small regions. 6x5 proved to be the smallest that I trust. Higher
exponents will generate more defined and refined fractal outputs, but at the
cost of increased calculation times. I would not recommend using exponents
higher than 9. (Larger than 512 even in only one direction is hopeless for Civ4's
True Pathfinding processes, anyway. The game would be unplayably slow!) So I
recommend sticking with 7 as maximum exponent unless your map will be more than
32 (4x4) plot blocks (128 plots) in at least one dimension. Sticking between the
ranges of 6 and 8 for whole maps, and 5 and 7 for regions, is recommended.
Shift is a boolean flag as to whether or not to shift plots in that region.
Strip value has to do with "shifting" the plots to reduce rough edges. This
process overlaps with the Polar flags, though, so if you are using Polar flags,
shifting won't do anything for you along the edges that are Polar-shifted.
The strip size needs to scale appropriately to the size of the region being
shifted. As of this writing, I have not yet determined all the best values to
fit with certain sizes of regions. (And these are a moving target based on map
sizes!) I will try to figure this out and update these notes again before release.
rift_grain has to do with forced strips of water running in a mostly vertical
direction. They simulate the Atlantic and Pacific Oceans separating Earth's
two primary land regions. You can turn off the Atlantic rift by setting
has_center_rift to false. You can turn off all rifts by setting rift_grain
to -1. For most regional fractals, you will probably want to disable rifts.
invert_heights is not a function I have needed, but it seems clear enough. It
has to do with results returned by the fractal generator and could be used
instead of adjusting the height values, in some cases. I always adjust the
height values, though, so it has seemed like a redundant option. It's there
in case somebody wanted to use it, though.
-------------------------------------------------
GeneratePlotsInRegion is a fully automated process. If you want to layer land
plots, all you need to do is call this function over and over from the
controlling function: GeneratePlotsByRegion
Each region needs to be defined by the map scripter, then organized in the
controlling function. Pass in the necessary arguments to generatePlotsInRegion
and get back a region of land, already "layered" on to the global plot array.
The global plot array begins as all water. Each layer of fractalized plots is
applied in turn, overwriting the previous layer. Water plots in each layer are
ignored. Land plots of any type are assigned to the applicable plot. The water
"left over" at the end of the process will be whatever plots went untouched by
any of the regional layers' land plots. If regions overlap, landforms may overlap,
too. This allows both separate-distinct regional use, and layering over a single
area with as many passes as the scripter selects.
For most uses, you can use a new subclass to override GeneratePlotsByRegion
and not have to mess with the rest of the class. GeneratePlotsByRegion is the
controlling function and must be customized for each applicable map script.
- Bob Thomas July 13, 2005
'''
# This class can be called instead of FractalWorld or HintedWorld.
# MultilayeredFractal enables multiple fractals to be
# layered over a single map, to generate plot types.
# Use GeneratePlotsByRegion to organize your fractal layers.
class MultilayeredFractal:
def __init__(self, fracXExp=CyFractal.FracVals.DEFAULT_FRAC_X_EXP,
fracYExp=CyFractal.FracVals.DEFAULT_FRAC_Y_EXP):
self.gc = CyGlobalContext()
self.map = self.gc.getMap()
self.iW = self.map.getGridWidth()
self.iH = self.map.getGridHeight()
self.dice = self.gc.getGame().getMapRand()
self.iFlags = self.map.getMapFractalFlags() # Defaults for that map type.
self.iTerrainFlags = self.map.getMapFractalFlags() # Defaults for that map type.
self.iHorzFlags = CyFractal.FracVals.FRAC_WRAP_X + CyFractal.FracVals.FRAC_POLAR # Use to prevent flat edges to north or south.
self.iVertFlags = CyFractal.FracVals.FRAC_WRAP_Y + CyFractal.FracVals.FRAC_POLAR # Use to prevent flat edges to east or west.
self.iRoundFlags = CyFractal.FracVals.FRAC_POLAR # Use to prevent flat edges on all sides.
self.plotTypes = [] # Regional array
self.wholeworldPlotTypes = [PlotTypes.PLOT_OCEAN] * (self.iW*self.iH) # Global
self.fracXExp = fracXExp
self.fracYExp = fracYExp
# Note: there is no checkForOverrideDefaultUserInputVariances()
# function for MultilayeredFractal. You should control any
# user input variances per region, in your region definitions.
def shiftRegionPlots(self, iRegionWidth, iRegionHeight, iStrip=15):
# Minimizes land plots along the region's edges by shifting the coordinates.
stripRadius = min(15, iStrip)
stripRadius = max(3, iStrip)
best_split_x, best_split_y = 0,0
best_split_x = self.findBestRegionSplitX(iRegionWidth, iRegionHeight, stripRadius)
best_split_y = self.findBestRegionSplitY(iRegionWidth, iRegionHeight, stripRadius)
self.shiftRegionPlotsBy(best_split_x, best_split_y, iRegionWidth, iRegionHeight)
def shiftRegionPlotsBy(self, xshift, yshift, iRegionWidth, iRegionHeight):
if xshift > 0 or yshift > 0:
iWH = iRegionWidth * iRegionHeight
buf = self.plotTypes[:]
for iDestY in range(iRegionHeight):
for iDestX in range(iRegionWidth):
iDestI = iRegionWidth*iDestY + iDestX
iSourceX = iDestX + xshift
iSourceY = iDestY + yshift
iSourceX %= iRegionWidth # normalize
iSourceY %= iRegionHeight
iSourceI = iRegionWidth*iSourceY + iSourceX
self.plotTypes[iDestI] = buf[iSourceI]
def findBestRegionSplitY(self, iRegionWidth, iRegionHeight, stripRadius):
stripSize = 2*stripRadius
if stripSize > iRegionWidth:
return 0
numPlots = iRegionWidth * iRegionHeight
stripCenterIndex = stripRadius
piLandWeights = self.calcWeights(stripRadius)
scores = [0]*iRegionHeight
for y in range(iRegionHeight):
landScore = 0
bFoundLand = False
for x in range(iRegionWidth):
i = y*iRegionWidth + x
assert (i >= 0 and i < numPlots)
if self.plotTypes[i] == PlotTypes.PLOT_LAND:
landScore += 1
bFoundLand = True
if bFoundLand:
landScore += 30 # the first land is worth about 10 plots of land
for i in range(stripSize):
yy = y + i - stripCenterIndex
yy %= iRegionHeight
scores[yy] += landScore * piLandWeights[i]
best_split_y, lowest_score = argmin(scores)
return best_split_y
def findBestRegionSplitX(self, iRegionWidth, iRegionHeight, stripRadius):
stripSize = 2*stripRadius
if stripSize > iRegionWidth:
return 0
numPlots = iRegionWidth * iRegionHeight
stripCenterIndex = stripRadius
piLandWeights = self.calcWeights(stripRadius)
scores = [0]*iRegionWidth
for x in range(iRegionWidth):
landScore = 0
bFoundLand = False
for y in range(iRegionHeight):
i = y*iRegionWidth + x
assert (i >= 0 and i < numPlots)
if self.plotTypes[i] == PlotTypes.PLOT_LAND:
landScore += 1
bFoundLand = True
if bFoundLand:
landScore += 30 # the first land is worth about 10 plots of land
for i in range(stripSize):
xx = x + i - stripCenterIndex
xx %= iRegionWidth
scores[xx] += landScore * piLandWeights[i]
best_split_x, lowest_score = argmin(scores)
return best_split_x
def calcWeights(self, stripRadius):
stripSize = 2*stripRadius
landWeights = [0]*stripSize
for i in range(stripSize):
distFromStart = i+1
distFromEnd = stripSize-i
distFromEdge = min(distFromStart, distFromEnd)
landWeight = distFromEdge
distFromCenter = stripRadius - distFromEdge
if distFromCenter <= 1:
landWeight *= stripRadius
if distFromCenter == 0:
landWeight *= 2
landWeights[i] = landWeight
return landWeights
def generatePlotsInRegion(self, iWaterPercent,
iRegionWidth, iRegionHeight,
iRegionWestX, iRegionSouthY,
iRegionGrain, iRegionHillsGrain,
iRegionPlotFlags, iRegionTerrainFlags,
iRegionFracXExp = -1, iRegionFracYExp = -1,
bShift = True, iStrip = 15,
rift_grain = -1, has_center_rift = False,
invert_heights = False):
# This is the code to generate each fractal.
# Determine and pass in the appropriate arguments from the controlling function.
#
# Init local variables
water = iWaterPercent
iWestX = iRegionWestX
# Note: if you pass bad regional dimensions so that iEastX > self.iW, BOOM! So don't do that. I could close out that possibility, but better that I not, so that you get an error to warn you of erroneous regional parameters. - Sirian
iSouthY = iRegionSouthY
# Init the plot types array and the regional fractals
self.plotTypes = [] # reinit the array for each pass
self.plotTypes = [PlotTypes.PLOT_OCEAN] * (iRegionWidth*iRegionHeight)
regionContinentsFrac = CyFractal()
regionHillsFrac = CyFractal()
regionPeaksFrac = CyFractal()
regionContinentsFrac.fracInit(iRegionWidth, iRegionHeight, iRegionGrain, self.dice, iRegionPlotFlags, iRegionFracXExp, iRegionFracYExp)
regionHillsFrac.fracInit(iRegionWidth, iRegionHeight, iRegionHillsGrain, self.dice, iRegionTerrainFlags, iRegionFracXExp, iRegionFracYExp)
regionPeaksFrac.fracInit(iRegionWidth, iRegionHeight, iRegionHillsGrain+1, self.dice, iRegionTerrainFlags, iRegionFracXExp, iRegionFracYExp)
iWaterThreshold = regionContinentsFrac.getHeightFromPercent(water)
iHillsBottom1 = regionHillsFrac.getHeightFromPercent(max((25 - self.gc.getClimateInfo(self.map.getClimate()).getHillRange()), 0))
iHillsTop1 = regionHillsFrac.getHeightFromPercent(min((25 + self.gc.getClimateInfo(self.map.getClimate()).getHillRange()), 100))
iHillsBottom2 = regionHillsFrac.getHeightFromPercent(max((75 - self.gc.getClimateInfo(self.map.getClimate()).getHillRange()), 0))
iHillsTop2 = regionHillsFrac.getHeightFromPercent(min((75 + self.gc.getClimateInfo(self.map.getClimate()).getHillRange()), 100))
iPeakThreshold = regionPeaksFrac.getHeightFromPercent(self.gc.getClimateInfo(self.map.getClimate()).getPeakPercent())
# Loop through the region's plots
for x in range(iRegionWidth):
for y in range(iRegionHeight):
i = y*iRegionWidth + x
val = regionContinentsFrac.getHeight(x,y)
if val <= iWaterThreshold: pass
else:
hillVal = regionHillsFrac.getHeight(x,y)
if ((hillVal >= iHillsBottom1 and hillVal <= iHillsTop1) or (hillVal >= iHillsBottom2 and hillVal <= iHillsTop2)):
peakVal = regionPeaksFrac.getHeight(x,y)
if (peakVal <= iPeakThreshold):
self.plotTypes[i] = PlotTypes.PLOT_PEAK
else:
self.plotTypes[i] = PlotTypes.PLOT_HILLS
else:
self.plotTypes[i] = PlotTypes.PLOT_LAND
if bShift:
# Shift plots to obtain a more natural shape.
self.shiftRegionPlots(iRegionWidth, iRegionHeight, iStrip)
# Once the plot types for the region have been generated, they must be
# applied to the global plot array.
#
# Default approach is to ignore water and layer the lands over one another.
# If you want to layer the water, too, or some other combination, then
# create a subclass and override this function. Customize in your override.
#
# Apply the region's plots to the global plot array.
for x in range(iRegionWidth):
wholeworldX = x + iWestX
for y in range(iRegionHeight):
i = y*iRegionWidth + x
if self.plotTypes[i] == PlotTypes.PLOT_OCEAN: continue
wholeworldY = y + iSouthY
iWorld = wholeworldY*self.iW + wholeworldX
self.wholeworldPlotTypes[iWorld] = self.plotTypes[i]
# This region is done.
return
def generatePlotsByRegion(self):
# Sirian's MultilayeredFractal class, controlling function.
# You -MUST- customize this function for each use of the class.
#
# The rest of this function from CvMapGeneratorUtil.py is provided
# to you as a template. You will have to build your own version for
# use with your map scripts, according to your designs.
#
# The following object indexes custom grain amounts per world size.
# Add a new column for each desired global or regional grain setting.
# (Grains are used to control fractal results. Larger grains create
# smaller patches of similar values. Small grains create large patches.)
#
# Here is an example of obtaining grain sizes to fit with map sizes.
sizekey = self.map.getWorldSize()
sizevalues = {
#Rhye - RFCRAND
## WorldSizeTypes.WORLDSIZE_DUEL: (3,2,1,2),
## WorldSizeTypes.WORLDSIZE_TINY: (3,2,1,2),
## WorldSizeTypes.WORLDSIZE_SMALL: (3,2,1,2),
WorldSizeTypes.WORLDSIZE_STANDARD: (4,2,1,2),
WorldSizeTypes.WORLDSIZE_LARGE: (4,2,1,2),
WorldSizeTypes.WORLDSIZE_HUGE: (5,2,1,2)
}
# You can add as many grain entries as you like.
# Seed them all from the matrix using the following type of line:
(iGrainOne, iGrainTwo, iGrainThree, iGrainFour) = sizevalues[sizekey]
# The example is for four grain values. You may not need that many.
# Check scripts that use MultilayeredFractal for more examples.
# Define the regions (necessary to any use of generatePlotsByRegion)
# Start by initializing regional definitions.
# All regions must be rectangular. (The fractal only feeds on these!)
# Obtain region width and height by any method you care to design.
# Obtain WestX and EastX, NorthY and SouthY, to define the boundaries.
#
# Note that Lat and Lon as used here are different from the use for
# the generation of terrain types and features. Sorry for the ambiguity!
#
# Latitude and Longitude are values between 0.0 and 1.0
# Latitude South to North is 0.0 to 1.0
# Longitude West to East is 0.0 to 1.0
# Plots are indexed by X,Y with 0,0 in SW corner.
#
# Here is an example set of definitions
regiononeWestLon = 0.05
regiononeEastLon = 0.35
regiontwoWestLon = 0.45
regiontwoEastLon = 0.95
regiontwoNorthLat = 0.95
regiontwoSouthLat = 0.45
subcontinentLargeHorz = 0.2
subcontinentLargeVert = 0.32
subcontinentLargeNorthLat = 0.6
subcontinentLargeSouthLat = 0.28
subcontinentSmallDimension = 0.125
subcontinentSmallNorthLat = 0.525
subcontinentSmallSouthLat = 0.4
# You can then use these longitudes and latitudes crossed with grid sizes
# to enable one definition to fit any map size, map width, map height.
# Define your first region here.
NiTextOut("Generating Region One (Python Map_Script_Name) ...")
# Set dimensions of your region. (Below is an example).
regiononeWestX = int(self.iW * regiononeWestLon)
regiononeEastX = int(self.iW * regiononeEastLon)
regiononeNorthY = int(self.iH * regiononeNorthLat)
regiononeSouthY = int(self.iH * regiononeSouthLat)
regiononeWidth = regiononeEastX - regiononeWestX + 1
regiononeHeight = regiononeNorthY - regiononeSouthY + 1
regiononeWater = 70
# With all of your parameters set, pass them in to the plot generator.
self.generatePlotsInRegion(regiononeWater,
regiononeWidth, regiononeHeight,
regiononeWestX, regiononeSouthY,
regiononeGrain, iGrainOne,
self.iFlags, self.iTerrainFlags,
-1, -1,
True, 15,
2, False,
False
)
# Define additional regions.
# Regions can overlap or add on to other existing regions.
# Example of a subcontinent region appended to region one from above:
NiTextOut("Generating subcontinent for Region One (Python Map_Script_Name) ...")
scLargeWidth = int(subcontinentLargeHorz * self.iW)
scLargeHeight = int(subcontinentLargeVert * self.iH)
scRoll = self.dice.get((regiononeWidth - scLargeWidth), "Large Subcontinent Placement - Map_Script_Name PYTHON")
scWestX = regiononeWestX + scRoll
scEastX = scWestX + scLargeWidth
scNorthY = int(self.iH * subcontinentLargeNorthLat)
scSouthY = int(self.iH * subcontinentLargeSouthLat)
# Clever use of dice rolls can inject some randomization in to definitions.
scShape = self.dice.get(3, "Large Subcontinent Shape - Map_Script_Name PYTHON")
if scShape == 2: # Massive subcontinent!
scWater = 55; scGrain = 1; scRift = 2
elif scShape == 1: # Standard subcontinent.
scWater = 66; scGrain = 2; scRift = 2
else: # scShape == 0, Archipelago subcontinent.
scWater = 77; scGrain = archGrain; scRift = -1
# Each regional fractal needs its own uniquely defined parameters.
# With proper settings, there's almost no limit to what can be done.
self.generatePlotsInRegion(scWater,
scLargeWidth, scLargeHeight,
scWestX, scSouthY,
scGrain, iGrainOne,
self.iRoundFlags, self.iTerrainFlags,
6, 6,
False, 9,
scRift, False,
False
)
# Once all your of your fractal regions (and other regions! You do not have
# to make every region a fractal-based region) have been processed, and
# your plot generation is complete, return the global plot array.
#
# All regions have been processed. Plot Type generation completed.
return self.wholeworldPlotTypes
'''
Regional Variables Key:
iWaterPercent,
iRegionWidth, iRegionHeight,
iRegionWestX, iRegionSouthY,
iRegionGrain, iRegionHillsGrain,
iRegionPlotFlags, iRegionTerrainFlags,
iRegionFracXExp, iRegionFracYExp,
bShift, iStrip,
rift_grain, has_center_rift,
invert_heights
'''
class TerrainGenerator:
"If iDesertPercent=35, then about 35% of all land will be desert. Plains is similar. \
Note that all percentages are approximate, as values have to be roughened to achieve a natural look."
def __init__(self, iDesertPercent=40, iPlainsPercent=10, #iDesertPercent=32, iPlainsPercent=18, #Rhye
fSnowLatitude=0.7, fTundraLatitude=0.6,
#fGrassLatitude=0.1, fDesertBottomLatitude=0.2, #Rhye
fGrassLatitude=0.06, fDesertBottomLatitude=0.10, #Rhye #0.12
#fDesertTopLatitude=0.5, fracXExp=-1, #Rhye
fDesertTopLatitude=0.31, fracXExp=-1, #Rhye #0.35
fracYExp=-1, grain_amount=4):
self.gc = CyGlobalContext()
self.map = CyMap()
grain_amount += self.gc.getWorldInfo(self.map.getWorldSize()).getTerrainGrainChange()
self.grain_amount = grain_amount
self.iWidth = self.map.getGridWidth()
self.iHeight = self.map.getGridHeight()
self.mapRand = self.gc.getGame().getMapRand()
self.iFlags = 0 # Disallow FRAC_POLAR flag, to prevent "zero row" problems.
if self.map.isWrapX(): self.iFlags += CyFractal.FracVals.FRAC_WRAP_X
if self.map.isWrapY(): self.iFlags += CyFractal.FracVals.FRAC_WRAP_Y
self.deserts=CyFractal()
self.plains=CyFractal()
self.variation=CyFractal()
iDesertPercent += self.gc.getClimateInfo(self.map.getClimate()).getDesertPercentChange()
iDesertPercent = min(iDesertPercent, 100)
iDesertPercent = max(iDesertPercent, 0)
self.iDesertPercent = iDesertPercent
self.iPlainsPercent = iPlainsPercent
self.iDesertTopPercent = 100
self.iDesertBottomPercent = max(0,int(100-iDesertPercent))
self.iPlainsTopPercent = 100
self.iPlainsBottomPercent = max(0,int(100-iDesertPercent-iPlainsPercent))
self.iMountainTopPercent = 75
self.iMountainBottomPercent = 60
fSnowLatitude += self.gc.getClimateInfo(self.map.getClimate()).getSnowLatitudeChange()
fSnowLatitude = min(fSnowLatitude, 1.0)
fSnowLatitude = max(fSnowLatitude, 0.0)
self.fSnowLatitude = fSnowLatitude
fTundraLatitude += self.gc.getClimateInfo(self.map.getClimate()).getTundraLatitudeChange()
fTundraLatitude = min(fTundraLatitude, 1.0)
fTundraLatitude = max(fTundraLatitude, 0.0)
self.fTundraLatitude = fTundraLatitude
fGrassLatitude += self.gc.getClimateInfo(self.map.getClimate()).getGrassLatitudeChange()
fGrassLatitude = min(fGrassLatitude, 1.0)
fGrassLatitude = max(fGrassLatitude, 0.0)
self.fGrassLatitude = fGrassLatitude
fDesertBottomLatitude += self.gc.getClimateInfo(self.map.getClimate()).getDesertBottomLatitudeChange()
fDesertBottomLatitude = min(fDesertBottomLatitude, 1.0)
fDesertBottomLatitude = max(fDesertBottomLatitude, 0.0)
self.fDesertBottomLatitude = fDesertBottomLatitude
fDesertTopLatitude += self.gc.getClimateInfo(self.map.getClimate()).getDesertTopLatitudeChange()
fDesertTopLatitude = min(fDesertTopLatitude, 1.0)
fDesertTopLatitude = max(fDesertTopLatitude, 0.0)
self.fDesertTopLatitude = fDesertTopLatitude
self.fracXExp = fracXExp
self.fracYExp = fracYExp
self.initFractals()
def initFractals(self):
self.deserts.fracInit(self.iWidth, self.iHeight, self.grain_amount, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.iDesertTop = self.deserts.getHeightFromPercent(self.iDesertTopPercent)
self.iDesertBottom = self.deserts.getHeightFromPercent(self.iDesertBottomPercent)
self.plains.fracInit(self.iWidth, self.iHeight, self.grain_amount+1, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.iPlainsTop = self.plains.getHeightFromPercent(self.iPlainsTopPercent)
self.iPlainsBottom = self.plains.getHeightFromPercent(self.iPlainsBottomPercent)
self.variation.fracInit(self.iWidth, self.iHeight, self.grain_amount, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.terrainDesert = self.gc.getInfoTypeForString("TERRAIN_DESERT")
self.terrainPlains = self.gc.getInfoTypeForString("TERRAIN_PLAINS")
self.terrainIce = self.gc.getInfoTypeForString("TERRAIN_SNOW")
self.terrainTundra = self.gc.getInfoTypeForString("TERRAIN_TUNDRA")
self.terrainGrass = self.gc.getInfoTypeForString("TERRAIN_GRASS")
def getLatitudeAtPlot(self, iX, iY,roll1):
"""given a point (iX,iY) such that (0,0) is in the NW,
returns a value between 0.0 (tropical) and 1.0 (polar).
This function can be overridden to change the latitudes; for example,
to make an entire map have temperate terrain, or to make terrain change from east to west
instead of from north to south"""
lat = abs((self.iHeight / 2) - iY)/float(self.iHeight/2) # 0.0 = equator, 1.0 = pole
# Adjust latitude using self.variation fractal, to mix things up:
lat += (128 - self.variation.getHeight(iX, iY))/(255.0 * 5.0)
# Limit to the range [0, 1]:
if lat < 0:
lat = 0.0
if lat > 1:
lat = 1.0
return lat
def getWorldShapeInfo( self, iParameter ): #Rhye
scriptDict = pickle.loads( gc.getGame().getScriptData() )
return scriptDict['lWorldShapeInfo'][iParameter]
def generateTerrain(self):
roll1 = self.getWorldShapeInfo(0) #Rhye
terrainData = [0]*(self.iWidth*self.iHeight)
#Rhye
if (self.map.isWrapX() == False and self.map.isWrapY() == False): #bigger central tropical area, always
self.iDesertBottom = self.iDesertBottom +0.06
self.iDesertTop = self.iDesertTop +0.08
self.iPlainsBottom = self.iPlainsBottom +0.08
self.iPlainsTop = self.iPlainsTop +0.10
elif (self.map.isWrapX() == True and self.map.isWrapY() == True): #bigger central icy area, always
self.fSnowLatitude = self.fSnowLatitude -0.04
self.fTundraLatitude = self.fTundraLatitude -0.08
elif (self.map.isWrapX() == False and self.map.isWrapY() == False and self.getWorldShapeInfo(10) == True): #slimmer icy area at borders, with innersea
self.fSnowLatitude = self.fSnowLatitude +0.04
self.fTundraLatitude = self.fTundraLatitude +0.08
for x in range(self.iWidth):
for y in range(self.iHeight):
iI = y*self.iWidth + x
terrain = self.generateTerrainAtPlot(x, y,roll1) #Rhye
terrainData[iI] = terrain
return terrainData
def generateTerrainAtPlot(self,iX,iY, roll1): #Rhye
lat = self.getLatitudeAtPlot(iX,iY,roll1) #Rhye
if (self.map.plot(iX, iY).isWater()):
return self.map.plot(iX, iY).getTerrainType()
terrainVal = self.terrainGrass
#Rhye
fGrassLatitudeTop = 0.50
if (CyMap().getClimate() == 0): #temperate
fGrassLatitudeTop = 0.53 #a bit more plains
if lat >= self.fSnowLatitude:
terrainVal = self.terrainIce
elif lat >= self.fTundraLatitude:
terrainVal = self.terrainTundra
elif lat >= fGrassLatitudeTop: #Rhye
terrainVal = self.terrainGrass #Rhye
elif lat < self.fGrassLatitude:
terrainVal = self.terrainGrass
else:
desertVal = self.deserts.getHeight(iX, iY)
plainsVal = self.plains.getHeight(iX, iY)
if ((desertVal >= self.iDesertBottom) and (desertVal <= self.iDesertTop) and (lat >= self.fDesertBottomLatitude) and (lat < self.fDesertTopLatitude)):
terrainVal = self.terrainDesert
elif ((plainsVal >= self.iPlainsBottom) and (plainsVal <= self.iPlainsTop)):
terrainVal = self.terrainPlains
if (terrainVal == TerrainTypes.NO_TERRAIN):
return self.map.plot(iX, iY).getTerrainType()
return terrainVal
class FeatureGenerator:
def __init__(self, iJunglePercent=82, iForestPercent=60, #self, iJunglePercent=80, iForestPercent=60, #Rhye
jungle_grain=5, forest_grain=6,
fracXExp=-1, fracYExp=-1):
## #Rhye
## if (CyMap().getClimate() == 1): #tropical
## iJunglePercent += 6
self.gc = CyGlobalContext()
self.map = CyMap()
self.mapRand = self.gc.getGame().getMapRand()
self.jungles = CyFractal()
self.forests = CyFractal()
self.iFlags = 0 # Disallow FRAC_POLAR flag, to prevent "zero row" problems.
if self.map.isWrapX(): self.iFlags += CyFractal.FracVals.FRAC_WRAP_X
if self.map.isWrapY(): self.iFlags += CyFractal.FracVals.FRAC_WRAP_Y
self.iGridW = self.map.getGridWidth()
self.iGridH = self.map.getGridHeight()
self.iJunglePercent = iJunglePercent
self.iForestPercent = iForestPercent
jungle_grain += self.gc.getWorldInfo(self.map.getWorldSize()).getFeatureGrainChange()
forest_grain += self.gc.getWorldInfo(self.map.getWorldSize()).getFeatureGrainChange()
self.jungle_grain = jungle_grain
self.forest_grain = forest_grain
self.fracXExp = fracXExp
self.fracYExp = fracYExp
self.__initFractals()
self.__initFeatureTypes()
def __initFractals(self):
self.jungles.fracInit(self.iGridW, self.iGridH, self.jungle_grain, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.forests.fracInit(self.iGridW, self.iGridH, self.forest_grain, self.mapRand, self.iFlags, self.fracXExp, self.fracYExp)
self.iJungleBottom = self.jungles.getHeightFromPercent((100 - self.iJunglePercent)/2)
self.iJungleTop = self.jungles.getHeightFromPercent((100 + self.iJunglePercent)/2)
self.iForestLevel = self.forests.getHeightFromPercent(self.iForestPercent)
def __initFeatureTypes(self):
self.featureIce = self.gc.getInfoTypeForString("FEATURE_ICE")
self.featureJungle = self.gc.getInfoTypeForString("FEATURE_JUNGLE")
self.featureForest = self.gc.getInfoTypeForString("FEATURE_FOREST")
self.featureOasis = self.gc.getInfoTypeForString("FEATURE_OASIS")
def getWorldShapeInfo( self, iParameter ): #Rhye
scriptDict = pickle.loads( gc.getGame().getScriptData() )
return scriptDict['lWorldShapeInfo'][iParameter]
def addFeatures(self):
"adds features to all plots as appropriate"
roll1 = self.getWorldShapeInfo(0) #Rhye
for iX in range(self.iGridW):
for iY in range(self.iGridH):
self.addFeaturesAtPlot(iX, iY, roll1) #Rhye
def getLatitudeAtPlot(self, iX, iY, roll1): #Rhye
"returns a value in the range of 0.0 (tropical) to 1.0 (polar)"
return abs((self.iGridH/2) - iY)/float(self.iGridH/2) # 0.0 = equator, 1.0 = pole
def addFeaturesAtPlot(self, iX, iY, roll1): #Rhye
"adds any appropriate features at the plot (iX, iY) where (0,0) is in the SW"
lat = self.getLatitudeAtPlot(iX, iY, roll1) #Rhye
pPlot = self.map.sPlot(iX, iY)
#Rhye
if (self.map.isWrapX() == False and self.map.isWrapY() == False): #bigger central tropical area, always
self.iJungleTop = self.iJungleTop +0.10
for iI in range(self.gc.getNumFeatureInfos()):
if pPlot.canHaveFeature(iI):
if self.mapRand.get(10000, "Add Feature PYTHON") < self.gc.getFeatureInfo(iI).getAppearanceProbability():
pPlot.setFeatureType(iI, -1)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addIceAtPlot(pPlot, iX, iY, lat)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addJunglesAtPlot(pPlot, iX, iY, lat)
if (pPlot.getFeatureType() == FeatureTypes.NO_FEATURE):
self.addForestsAtPlot(pPlot, iX, iY, lat)
def addIceAtPlot(self, pPlot, iX, iY, lat):
if pPlot.canHaveFeature(self.featureIce):
if (self.map.isWrapX() and not self.map.isWrapY()) and (iY == 0 or iY == self.iGridH - 1):
pPlot.setFeatureType(self.featureIce, -1)
elif (self.map.isWrapY() and not self.map.isWrapX()) and (iX == 0 or iX == self.iGridW - 1):
pPlot.setFeatureType(self.featureIce, -1)
elif (not self.map.isWrapY()):
rand = self.mapRand.get(100, "Add Ice PYTHON")/100.0
if rand < 8 * (lat - (1.0 - (self.gc.getClimateInfo(self.map.getClimate()).getRandIceLatitude() / 2.0))):
pPlot.setFeatureType(self.featureIce, -1)
elif rand < 4 * (lat - (1.0 - self.gc.getClimateInfo(self.map.getClimate()).getRandIceLatitude())):
pPlot.setFeatureType(self.featureIce, -1)
def addJunglesAtPlot(self, pPlot, iX, iY, lat):
if pPlot.canHaveFeature(self.featureJungle):
iJungleHeight = self.jungles.getHeight(iX, iY)
#Rhye - start
fModifier = -1.0
if (CyMap().getClimate() == 1): #tropical
fModifier = -2.0
if (CyMap().getClimate() == 3): #cold
fModifier = +3.5
#if self.iJungleTop >= iJungleHeight >= self.iJungleBottom + (self.iJungleTop - self.iJungleBottom)*(self.gc.getClimateInfo(self.map.getClimate()).getJungleLatitude()*lat: #Rhye
if self.iJungleTop >= iJungleHeight >= int(self.iJungleBottom + (self.iJungleTop - self.iJungleBottom)*(self.gc.getClimateInfo(self.map.getClimate()).getJungleLatitude()+fModifier)*lat): #.getJungleLatitude()*lat: #Rhye
pPlot.setFeatureType(self.featureJungle, -1)
def addForestsAtPlot(self, pPlot, iX, iY, lat):
if pPlot.canHaveFeature(self.featureForest):
if self.forests.getHeight(iX, iY) >= self.iForestLevel:
pPlot.setFeatureType(self.featureForest, -1)
def getAreas():
"Returns a list of CyArea objects representing all the areas in the map (land and water)"
gc = CyGlobalContext()
map = CyMap()
areas = []
for i in range(map.getIndexAfterLastArea()):
area = map.getArea(i)
if not area.isNone():
areas.append(area)
return areas
#def findStartingPlot(playerID, validFn = None):
def findStartingPlot(playerID, lEurasia, lIsland1, lIsland2, lAfrica, lAmerica, lRolls, lRandomContinents, validFn = None):
gc = CyGlobalContext()
map = CyMap()
player = gc.getPlayer(playerID)
player.AI_updateFoundValues(True)
#iRange = player.startingPlotRange() #Rhye - startingPlotWithinRange is empty
iPass = 0
#print ("arguments:", lEurasia, lIsland1, lIsland2, lAfrica, lAmerica, lRolls)
bSecondPass = False
bThirdPass = False
while (true):
iBestValue = 0
pBestPlot = None
for iX in range(map.getGridWidth()):
for iY in range(map.getGridHeight()):
#if validFn != None and not validFn(playerID, iX, iY):
if validFn != None and not validFn(playerID, iX, iY, lEurasia, lIsland1, lIsland2, lAfrica, lAmerica, lRolls, lRandomContinents, bSecondPass, bThirdPass):
#print(playerID, iX, iY, "NON E' VALIDO")
continue
pLoopPlot = map.plot(iX, iY)
#Rhye - start
if (pLoopPlot.isStartingPlot()):
#print(playerID, iX, iY, " E' GIA' ASSEGNATO")
continue
#Rhye - end
val = pLoopPlot.getFoundValue(playerID)
#print(playerID, iX, iY, "prima:", val)
#Rhye - start
if (playerID < con.iNumMajorPlayers and val>=40):
val *= utils.blindSpecificStartingModifiers(playerID, iX, iY, lEurasia, lIsland1, lIsland2, lAfrica, lAmerica, lRolls, lRandomContinents)
val /= 100
#print(playerID, iX, iY, "dopo:", val)
#pass
#Rhye - end
if val > iBestValue:
valid = True
#Rhye - startingPlotWithinRange is empty
## for iI in range(gc.getMAX_CIV_PLAYERS()):
## if (gc.getPlayer(iI).isAlive()):
## if (iI != playerID):
## if gc.getPlayer(iI).startingPlotWithinRange(pLoopPlot, playerID, iRange, iPass):
## valid = False
## break
if valid:
iBestValue = val
pBestPlot = pLoopPlot
if pBestPlot != None:
#print (playerID, "plot found")
return map.plotNum(pBestPlot.getX(), pBestPlot.getY())
print ("player", playerID, "pass", iPass, "iBestValue", iBestValue, "pBestPlot", pBestPlot, "failed")
iPass += 1
if (iPass >= 2):
bSecondPass = True #contains less locks
print(playerID, "bSecondPass")
if (iPass >= 4):
bThirdPass = True #contains less locks
print(playerID, "bThirdPass")
if (iPass >= 6):
validFn = None #if it doesn't find anything, remove the locks
print(playerID, "unlocked")
if (iPass >= 8):
print("aborted")
return map.plotNum(2, 0)
return -1
def argmin(list):
best = None
best_index = None
for i in range(len(list)):
val = list[i]
if (best == None) or (val < best):
best_index = i
best = val
return (best_index, best)
def pointInRect(point, rect):
x,y=point
rectx,recty,rectw,recth = rect
if rectx <= x < rectx + rectw:
if recty <= y < recty + recth:
return True
return False
class BonusBalancer:
def __init__(self):
self.gc = CyGlobalContext()
self.map = CyMap()
self.resourcesToBalance = ('BONUS_ALUMINUM', 'BONUS_COAL', 'BONUS_COPPER', 'BONUS_HORSE', 'BONUS_IRON', 'BONUS_OIL', 'BONUS_URANIUM')
self.resourcesToEliminate = ('BONUS_MARBLE', )
def isSkipBonus(self, iBonusType):
type_string = self.gc.getBonusInfo(iBonusType).getType()
return ((type_string in self.resourcesToBalance) or (type_string in self.resourcesToEliminate))
def isBonusValid(self, eBonus, pPlot, bIgnoreUniqueRange, bIgnoreOneArea, bIgnoreAdjacent):
"Returns true if we can place a bonus here"
iX, iY = pPlot.getX(), pPlot.getY()
if (not bIgnoreOneArea) and self.gc.getBonusInfo(eBonus).isOneArea():
if self.map.getNumBonuses(eBonus) > 0:
if self.map.getArea(pPlot.getArea()).getNumBonuses(eBonus) == 0:
return False
if not bIgnoreAdjacent:
for iI in range(DirectionTypes.NUM_DIRECTION_TYPES):
pLoopPlot = plotDirection(iX, iY, DirectionTypes(iI))
if not pLoopPlot.isNone():
if (pLoopPlot.getBonusType(-1) != -1) and (pLoopPlot.getBonusType(-1) != eBonus):
return False
if not bIgnoreUniqueRange:
uniqueRange = self.gc.getBonusInfo(eBonus).getUniqueRange()
for iDX in range(-uniqueRange, uniqueRange+1):
for iDY in range(-uniqueRange, uniqueRange+1):
pLoopPlot = plotXY(iX, iY, iDX, iDY)
if not pLoopPlot.isNone() and pLoopPlot.getBonusType(-1) == eBonus:
return False
return True
def normalizeAddExtras(self):
for i in range(self.gc.getMAX_CIV_PLAYERS()):
if (self.gc.getPlayer(i).isAlive()):
start_plot = self.gc.getPlayer(i).getStartingPlot() # returns a CyPlot
startx, starty = start_plot.getX(), start_plot.getY()
plots = [] # build a list of the plots near the starting plot
for dx in range(-5,6):
for dy in range(-5,6):
x,y = startx+dx, starty+dy
pLoopPlot = self.map.plot(x,y)
if not pLoopPlot.isNone():
plots.append(pLoopPlot)
resources_placed = []
for pass_num in range(4):
bIgnoreUniqueRange = pass_num >= 1
bIgnoreOneArea = pass_num >= 2
bIgnoreAdjacent = pass_num >= 3
for bonus in range(self.gc.getNumBonusInfos()):
type_string = self.gc.getBonusInfo(bonus).getType()
if (type_string not in resources_placed) and (type_string in self.resourcesToBalance):
for (pLoopPlot) in plots:
if (pLoopPlot.canHaveBonus(bonus, True)):
if self.isBonusValid(bonus, pLoopPlot, bIgnoreUniqueRange, bIgnoreOneArea, bIgnoreAdjacent):
pLoopPlot.setBonusType(bonus)
resources_placed.append(type_string)
#print "placed", type_string, "on pass", pass_num
break # go to the next bonus
|
989,577 | 4c63dbff475821ce119c56f4d8188a4fd389dd57 | import time
import signal
import logging
import re
from datetime import datetime
from twisted.words.protocols import irc
from twisted.internet import protocol, reactor
from cardinal.plugins import PluginManager, EventManager
from cardinal.exceptions import (
CommandNotFoundError,
ConfigNotFoundError,
InternalError,
PluginError,
)
class CardinalBot(irc.IRCClient, object):
"""Cardinal, in all its glory"""
logger = None
"""Logging object for CardinalBot"""
factory = None
"""Should contain an instance of CardinalBotFactory"""
user_regex = re.compile(r'^(.*?)!(.*?)@(.*?)$')
"""Regex for identifying a user's nick, ident, and vhost"""
plugin_manager = None
"""Instance of PluginManager"""
event_manager = None
"""Instance of EventManager"""
storage_path = None
"""Location of storage directory"""
uptime = None
"""Time that Cardinal connected to the network"""
booted = None
"""Time that Cardinal was first launched"""
@property
def network(self):
return self.factory.network
@network.setter
def network(self, value):
self.factory.network = value
@property
def nickname(self):
return self.factory.nickname
@nickname.setter
def nickname(self, value):
self.factory.nickname = value
@property
def password(self):
"""Twisted.irc.IRCClient server password setting"""
return self.factory.server_password
@password.setter
def password(self, value):
self.factory.server_password = value
@property
def reloads(self):
return self.factory.reloads
@reloads.setter
def reloads(self, value):
self.factory.reloads = value
@property
def storage_path(self):
return self.factory.storage_path
def __init__(self):
"""Initializes the logging"""
self.logger = logging.getLogger(__name__)
self.irc_logger = logging.getLogger("%s.irc" % __name__)
# State variables for the WHO command
self.who_lock = {}
self.who_cache = {}
self.who_callbacks = {}
def signedOn(self):
"""Called once we've connected to a network"""
self.logger.info("Signed on as %s" % self.nickname)
# Give the factory access to the bot
if self.factory is None:
raise InternalError("Factory must be set on CardinalBot instance")
# Give the factory the instance it created in case it needs to
# interface for error handling or metadata retention.
self.factory.cardinal = self
# Attempt to identify with NickServ, if a password was given
if self.factory.password:
self.logger.info("Attempting to identify with NickServ")
self.msg("NickServ", "IDENTIFY %s" % (self.factory.password,))
# Creates an instance of EventManager
self.logger.debug("Creating new EventManager instance")
self.event_manager = EventManager(self)
# Register events
self.event_manager.register("irc.raw", 2)
self.event_manager.register("irc.invite", 2)
self.event_manager.register("irc.privmsg", 3)
self.event_manager.register("irc.notice", 3)
self.event_manager.register("irc.nick", 2)
self.event_manager.register("irc.mode", 3)
self.event_manager.register("irc.topic", 3)
self.event_manager.register("irc.join", 2)
self.event_manager.register("irc.part", 3)
self.event_manager.register("irc.kick", 4)
self.event_manager.register("irc.quit", 2)
# Create an instance of PluginManager, giving it an instance of ourself
# to pass to plugins, as well as a list of initial plugins to load.
self.logger.debug("Creating new PluginManager instance")
self.plugin_manager = PluginManager(self, self.factory.plugins)
# Attempt to join channels
for channel in self.factory.channels:
self.join(channel)
# Set the uptime as now and grab the boot time from the factory
self.uptime = datetime.now()
self.booted = self.factory.booted
def joined(self, channel):
"""Called when we join a channel.
channel -- Channel joined. Provided by Twisted.
"""
self.logger.info("Joined %s" % channel)
def lineReceived(self, line):
"""Called for every line received from the server."""
self.irc_logger.info(line)
parts = line.split(' ')
command = parts[1]
# Don't fire if we haven't booted the event manager yet
if self.event_manager:
self.event_manager.fire("irc.raw", command, line)
# Call Twisted handler
super(CardinalBot, self).lineReceived(line)
def irc_PRIVMSG(self, prefix, params):
"""Called when we receive a message in a channel or PM."""
# Break down the user into usable groups
user = re.match(self.user_regex, prefix)
channel = params[0]
message = params[1]
self.logger.debug(
"%s!%s@%s to %s: %s" %
(user.group(1), user.group(2), user.group(3), channel, message)
)
self.event_manager.fire("irc.privmsg", user, channel, message)
# If the channel is ourselves, this is actually a PM to us, and so
# we'll update the channel variable to the sender's username to make
# replying a little easier.
if channel == self.nickname:
channel = user.group(1)
# Attempt to call a command. If it doesn't appear to PluginManager to
# be a command, this will just fall through. If it matches command
# syntax but there is no matching command, then we should catch the
# exception.
try:
self.plugin_manager.call_command(user, channel, message)
except CommandNotFoundError:
# This is just an info, since anyone can trigger it, not really a
# bad thing.
self.logger.info(
"Unable to find a matching command", exc_info=True)
def who(self, channel, callback):
"""Lists the users in a channel.
Keyword arguments:
channel -- Channel to list users of.
callback -- A callback that will receive the list of users.
Returns:
None. However, the callback will receive a single argument,
which is the list of users.
"""
if channel not in self.who_callbacks:
self.who_callbacks[channel] = []
self.who_callbacks[channel].append(callback)
self.logger.info("WHO list requested for %s" % channel)
if channel not in self.who_lock:
self.logger.info("Making WHO request to server")
# Set a lock to prevent trying to track responses from the server
self.who_lock[channel] = True
# Empty the cache to ensure no old users show up.
# TODO: Add actual caching and user tracking.
self.who_cache[channel] = []
# Send the actual WHO command to the server. irc_RPL_WHOREPLY will
# receive a response when the server sends one.
self.sendLine("WHO %s" % channel)
def irc_RPL_WHOREPLY(self, *nargs):
"Receives reply from WHO command and sends to caller"
response = nargs[1]
# Same format as other events (nickname!ident@hostname)
user = (
response[5], # nickname
response[2], # ident
response[3], # hostname
)
channel = response[1]
self.who_cache[channel].append(user)
def irc_RPL_ENDOFWHO(self, *nargs):
"Called when WHO output is complete"
response = nargs[1]
channel = response[1]
self.logger.info("Calling WHO callbacks for %s" % channel)
for callback in self.who_callbacks[channel]:
callback(self.who_cache[channel])
del self.who_callbacks[channel]
del self.who_lock[channel]
def irc_NOTICE(self, prefix, params):
"""Called when a notice is sent to a channel or privately"""
user = re.match(self.user_regex, prefix)
channel = params[0]
message = params[1]
# Sent by network, not a real user
if not user:
self.logger.debug(
"%s sent notice to %s: %s" % (prefix, channel, message)
)
return
self.logger.debug(
"%s!%s@%s sent notice to %s: %s" %
(user.group(1), user.group(2), user.group(3), channel, message)
)
# Lots of NOTICE messages when connecting, and event manager may not be
# initialized yet.
if self.event_manager:
self.event_manager.fire("irc.notice", user, channel, message)
def irc_NICK(self, prefix, params):
"""Called when a user changes their nick"""
user = re.match(self.user_regex, prefix)
new_nick = params[0]
self.logger.debug(
"%s!%s@%s changed nick to %s" %
(user.group(1), user.group(2), user.group(3), new_nick)
)
self.event_manager.fire("irc.nick", user, new_nick)
def irc_TOPIC(self, prefix, params):
"""Called when a new topic is set"""
user = re.match(self.user_regex, prefix)
channel = params[0]
topic = params[1]
self.logger.debug(
"%s!%s@%s changed topic in %s to %s" %
(user.group(1), user.group(2), user.group(3), channel, topic)
)
self.event_manager.fire("irc.topic", user, channel, topic)
def irc_MODE(self, prefix, params):
"""Called when a mode is set on a channel"""
user = re.match(self.user_regex, prefix)
channel = params[0]
mode = ' '.join(params[1:])
# Sent by network, not a real user
if not user:
self.logger.debug(
"%s set mode on %s (%s)" % (prefix, channel, mode)
)
return
self.logger.debug(
"%s!%s@%s set mode on %s (%s)" %
(user.group(1), user.group(2), user.group(3), channel, mode)
)
# Can get called during connection, in which case EventManager won't be
# initialized yet
if self.event_manager:
self.event_manager.fire("irc.mode", user, channel, mode)
def irc_JOIN(self, prefix, params):
"""Called when a user joins a channel"""
user = re.match(self.user_regex, prefix)
channel = params[0]
self.logger.debug(
"%s!%s@%s joined %s" %
(user.group(1), user.group(2), user.group(3), channel)
)
self.event_manager.fire("irc.join", user, channel)
def irc_PART(self, prefix, params):
"""Called when a user parts a channel"""
user = re.match(self.user_regex, prefix)
channel = params[0]
if len(params) == 1:
reason = "No Message"
else:
reason = params[1]
self.logger.debug(
"%s!%s@%s parted %s (%s)" %
(user.group(1), user.group(2), user.group(3), channel, reason)
)
self.event_manager.fire("irc.part", user, channel, reason)
def irc_KICK(self, prefix, params):
"""Called when a user is kicked from a channel"""
user = re.match(self.user_regex, prefix)
nick = params[1]
channel = params[0]
if len(params) == 2:
reason = "No Message"
else:
reason = params[2]
self.logger.debug(
"%s!%s@%s kicked %s from %s (%s)" %
(user.group(1), user.group(2), user.group(3),
nick, channel, reason)
)
self.event_manager.fire("irc.kick", user, channel, nick, reason)
def irc_QUIT(self, prefix, params):
"""Called when a user quits the network"""
user = re.match(self.user_regex, prefix)
if len(params) == 0:
reason = "No Message"
else:
reason = params[0]
self.logger.debug(
"%s!%s@%s quit (%s)" %
(user.group(1), user.group(2), user.group(3), reason)
)
self.event_manager.fire("irc.quit", user, reason)
def irc_unknown(self, prefix, command, params):
"""Called when Twisted doesn't understand an IRC command.
Keyword arguments:
prefix -- User sending command. Provided by Twisted.
command -- Command that wasn't recognized. Provided by Twisted.
params -- Params for command. Provided by Twisted.
"""
# A user has invited us to a channel
if command == "INVITE":
# Break down the user into usable groups
user = re.match(self.user_regex, prefix)
channel = params[1]
self.logger.debug("%s invited us to %s" % (user.group(1), channel))
# Fire invite event, so plugins can hook into it
self.event_manager.fire("irc.invite", user, channel)
# TODO: Call matching plugin events
def config(self, plugin):
"""Returns a given loaded plugin's config.
Keyword arguments:
plugin -- String containing a plugin name.
Returns:
dict -- Dictionary containing plugin config.
Raises:
ConfigNotFoundError - When config can't be found for the plugin.
"""
if self.plugin_manager is None:
self.logger.error(
"PluginManager has not been initialized! Can't return config "
"for plugin: %s" % plugin
)
raise PluginError("PluginManager has not yet been initialized")
try:
config = self.plugin_manager.get_config(plugin)
except ConfigNotFoundError:
# Log and raise the exception again
self.logger.exception(
"Couldn't find config for plugin: %s" % plugin
)
raise
return config
def sendMsg(self, channel, message, length=None):
"""Wrapper command to send messages.
Keyword arguments:
channel -- Channel to send message to.
message -- Message to send.
length -- Length of message. Twisted will calculate if None given.
"""
self.logger.info("Sending in %s: %s" % (channel, message))
self.msg(channel, message, length)
def send(self, message):
"""Send a raw message to the server.
Keyword arguments:
message -- Message to send.
"""
self.logger.info("Sending to server: %s" % message)
self.sendLine(message)
def disconnect(self, message=''):
"""Wrapper command to quit Cardinal.
Keyword arguments:
message -- Message to insert into QUIT, if any.
"""
self.logger.info("Disconnecting from network")
self.plugin_manager.unload_all()
self.factory.disconnect = True
self.quit(message)
class CardinalBotFactory(protocol.ClientFactory):
"""The interface between Cardinal and the Twisted library"""
logger = None
"""Logger object for CardinalBotFactory"""
protocol = CardinalBot
"""Tells Twisted to look at the CardinalBot class for a client"""
disconnect = False
"""Keeps track of whether disconnect was triggered by CardinalBot"""
network = None
"""Network to connect to"""
server_password = None
"""Network password, if any"""
nickname = None
"""Nick to connect with"""
password = None
"""NickServ password, if any"""
channels = []
"""Channels to join upon connection"""
plugins = []
"""Plugins to load upon connection"""
cardinal = None
"""When CardinalBot is started, holds its instance"""
minimum_reconnection_wait = 10
"""Minimum time in seconds before reconnection attempt"""
maximum_reconnection_wait = 300
"""Maximum time in connections before reconnection attempt"""
last_reconnection_wait = None
"""Time in seconds since last reconnection attempt"""
booted = None
"""Datetime object holding time Cardinal first started up"""
reloads = 0
"""Keeps track of plugin reloads from within Cardinal"""
def __init__(self, network, server_password=None, channels=None,
nickname='Cardinal', password=None, plugins=None,
storage=None):
"""Boots the bot, triggers connection, and initializes logging.
Keyword arguments:
network -- A string containing the server to connect to.
channels -- A list of channels to connect to.
nickname -- A string with the nick to connect as.
password -- A string with NickServ password, if any.
plugins -- A list of plugins to load on boot.
"""
if plugins is None:
plugins = []
if channels is None:
channels = []
self.logger = logging.getLogger(__name__)
self.network = network.lower()
self.server_password = server_password
self.password = password
self.channels = channels
self.nickname = nickname
self.plugins = plugins
self.storage_path = storage
# Register SIGINT handler, so we can close the connection cleanly
signal.signal(signal.SIGINT, self._sigint)
self.booted = datetime.now()
def _sigint(self, signal, frame):
"""Called when a SIGINT is received.
Set disconnect to true since this was user-triggered, and make Cardinal
send a valid IRC QUIT.
"""
self.disconnect = True
if self.cardinal:
self.cardinal.quit('Received SIGINT.')
def clientConnectionLost(self, connector, reason):
"""Called when we lose connection to the server.
Keyword arguments:
connector -- Twisted IRC connector. Provided by Twisted.
reason -- Reason for disconnect. Provided by Twisted.
"""
# This flag tells us if Cardinal was told to disconnect by a user. If
# not, we'll attempt to reconnect.
if not self.disconnect:
self.logger.info(
"Connection lost (%s), reconnecting in %d seconds." %
(reason, self.minimum_reconnection_wait)
)
# Reset the last reconnection wait time since this is the first
# time we've disconnected since a successful connection and then
# wait before connecting.
self.last_reconnection_wait = self.minimum_reconnection_wait
time.sleep(self.minimum_reconnection_wait)
connector.connect()
else:
self.logger.info(
"Disconnected successfully (%s), quitting." % reason
)
reactor.stop()
def clientConnectionFailed(self, connector, reason):
"""Called when a connection attempt fails.
Keyword arguments:
connector -- Twisted IRC connector. Provided by Twisted.
reason -- Reason connection failed. Provided by Twisted.
"""
# If we disconnected on our first connection attempt, then we don't
# need to calculate a wait time, we can just use the minimum time
if not self.last_reconnection_wait:
wait_time = self.minimum_reconnection_wait
else:
# We'll attempt to reconnect after waiting twice as long as the
# last time we waited, unless it exceeds the maximum wait time, in
# which case we'll wait that long instead
wait_time = self.last_reconnection_wait * 2
if wait_time > self.maximum_reconnection_wait:
wait_time = self.maximum_reconnection_wait
self.logger.info(
"Could not connect (%s), retrying in %d seconds" %
(reason, wait_time)
)
# Update the last connection wait time, then wait and try to connect
self.last_reconnection_wait = wait_time
time.sleep(wait_time)
connector.connect()
|
989,578 | f99060e4765b4c16be77ce94ee95c56f384a1594 | t= int(input())
for _ in range(t):
a, b = map(int, input().split())
s = a + b
S = str(s)
c = 0
matches = {'0':6,'1':2,'2':5,'3':5,'4':4,'5':5,'6':6,'7':3,'8':7,'9':6}
for d in S:
if d in matches.keys():
c += matches[d]
print(c)
|
989,579 | 200cdd1ac8abcd619da2de5a6c52c0ce7755c16d | """
多分支语句:
多分支:
if 表达式1:
表达式1成立执行的代码
elif 表达式2:
表达式2成立执行的代码
elif 表达式3:
表达式3成立执行的代码
else:
三个条件都不满足执行的代码
A[90,100]
B[80,90)
C[70,80)
D[60,69]
E < 60
跟电脑猜石头剪子布,打印输赢
1.计算机随机生成:0.石头 1.剪刀 2.布
random.randint
2.该你出了:0.石头 1.剪刀 2.布
input()
3.比较输赢
if - elif - else
1.赢了
2.平局
3.输了
"""
import random
# score = int(input("请输入你要查询的分数"))
# if score >= 90 and score <= 100:
# print("A")
# elif score >= 80 and score < 90:
# print("B")
# elif score >= 70 and score < 80:
# print("C")
# elif score >= 60 and score < 70:
# print("D")
# else:
# print("E")
# num = 生成一个随机数区间位于[0,2]
# your_num = input("")
# if 你出了石头电脑出了剪刀 or 你出了剪刀电脑出了布 or 你出了布电脑出了石头:
# print("你赢了")
# elif 你出的跟电脑出的一样:
# print("平局")
# else:
# print("你输了")
#计算机随机生成的.
cmp_num = random.randint(0,2)
print("计算机准备完毕")
your_num = int(input("该你了 0:石头 1:剪刀 2:布\n"))
if (your_num == 0 and cmp_num == 1) or (your_num == 1 and cmp_num == 2) or (your_num == 2 and cmp_num == 0):
print("你赢了")
elif your_num == cmp_num:
print("平局")
else:
print("你输了")
|
989,580 | 40d73e206fa2738c381b2ce3ad9be1c14c92ef38 | import os
import csv
fname1 = 'granite.rho_u.txt'
fname2 = 'granite.table.txt'
fnames = [fname1, fname2]
def returnReader(fname, delimiter):
with open(fname, 'r') as infile:
reader = csv.reader(infile, delimiter=delimiter)
return list(reader)
def removeOutput(oname):
if oname in os.listdir(os.getcwd()):
os.remove(oname)
for fname in fnames:
oname = fname.replace('.txt', '.csv')
removeOutput(oname=oname)
with open(oname, 'w') as outfile:
reader = returnReader(fname=fname, delimiter=' ')
for row in reader:
if len(row) == 0:
pass
else:
rowstr = ",".join(row).replace(",,", ',')
try:
if rowstr[0] == ',':
rowstr = rowstr[1:]
outfile.write(rowstr + '\n')
else:
outfile.write(rowstr + '\n')
except:
outfile.write(rowstr + '\n')
outfile.close() |
989,581 | da68cc323c07bdbcd960de28dc743073cb157cc2 | import struct
packed = struct.pack('>i4sh', 7, b'spam', 8)
print(packed)
|
989,582 | 786bcc26874e9b14994f7f92323915459adc17ab | from openpyxl import load_workbook
wb = load_workbook('btc.xlsx', data_only = True)
ws = wb.active
values = []
for row in range(2, 460):
for column in "B":
cell_name = "{}{}".format(column, row)
values.append(ws[cell_name].value)
for string in values:
strings = str(string)
print strings[slice(5)]
|
989,583 | 5d391c70c1988abac130cbd2eab0f064fae0261e | from django.urls import path,include
from .views import *
from django.conf.urls.static import static
from django.conf import settings
# routers=DefaultRouter()
# routers.register('listings/',Listing_view)
urlpatterns = [
path('listings/<int:pk>/',Listing_detail.as_view()),
# path('listings/<int:pk>/',Listing_detail_1.as_view()),
# path('listings/',Listing_view),
path('listings/',Listing_view.as_view()),
path('listings/search',Listing_search.as_view()),
# path('listings/searchs',Listing_searchs.as_view()),
path('listings/home',Listing_home.as_view())
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
989,584 | 63d0ca83d5d8a3a700413070e35a09e3997f70c4 | import os
import sys
from colorama import Fore
class Helper(object):
@staticmethod
def get_fg_colorcode_by_identifier(identifier):
if identifier == 'black':
return Fore.BLACK
elif identifier == 'cyan':
return Fore.CYAN
elif identifier == 'magenta':
return Fore.MAGENTA
elif identifier == 'yellow':
return Fore.YELLOW
elif identifier == 'blue':
return Fore.BLUE
elif identifier == 'green':
return Fore.GREEN
elif identifier == 'red':
return Fore.RED
elif identifier == 'white':
return Fore.WHITE
elif identifier == 'reset':
return Fore.RESET
raise ValueError("Color identifier {} is unknown.".format(identifier))
|
989,585 | 91a0058709fa8ed988e762790f87bf8ad1130f2e | """
EXERCÍCIO 075: Análise de Dados em uma Tupla
Desenvolva um programa que leia quatro valores pelo teclado e guarde-os em uma tupla. No final, mostre:
A) Quantas vezes apareceu o valor 9.
B) Em que posição foi digitado o primeiro valor 3.
C) Quais foram os números pares.
"""
def main():
pass
if __name__ == '__main__':
main()
|
989,586 | 9cf7c52ea5675d259eb60f7af771b2603d4b6410 | # -*- coding: utf-8 -*-
"""
Copyright(c) 2018 Gabriel Ramos Rodrigues Oliveira
"""
class No:
def __init__(self,item=None,proximo=None):
self.item = item
self.proximo = proximo
def __repr__(self):
return "No({})".format(self.item.__repr__())
def __str__(self):
return self.item.__str__()
class iterador:
def __init__(self, lista):
self.atual = lista.primeiro
def __next__(self):
if self.atual.proximo is None:
raise StopIteration
else:
self.atual = self.atual.proximo
return self.atual.item
class ListaEncadeada:
def __init__(self):
self.primeiro = self.ultimo = No()
self.tamanho_lista = 0
def __len__(self):
return int(self.tamanho_lista)
def __str__(self):
saida = f'('
content = ', '.join(x.__repr__() for x in self)
saida+= content + ')'
return saida
def __repr__(self):
saida = f'{self.__class__.__name__}('
content = ', '.join(x.__repr__() for x in self)
saida+= content + ')'
return saida
def __iter__(self):
return iterador(self)
def __getitem__(self, i):
'''
Busca de elemento por meio da atribuição - item[index] -
:param i: Index do item a ser buscado
'''
cont = -1
aux = self.primeiro
while aux is not None and cont < i:
cont += 1
aux = aux.proximo
if aux is None:
raise IndexError('list index out of range')
return aux.item
def __contains__(self,item):
'''
Método utilizado para saber se o item está contido na lista por meio de
- item in ListaEncadeada -
'''
aux = self.primeiro.proximo
while not aux is None and aux.item != item:
aux = aux.proximo
try:
return aux.item is not None
except:
return False
@property
def __vazia(self):
'''
Verifica se a lista está vazia
:return : Retorna se a lista é vazia ou não
'''
return self.primeiro == self.ultimo
def inserir(self,item):
'''
Adiciona um item qualquer, no final da lista
:param item: Elemento a ser adicionado no final da lista
'''
self.ultimo.proximo = No(item,None)
self.ultimo = self.ultimo.proximo
self.tamanho_lista += 1
def inserir_inicio(self,item):
'''
Adiciona um item qualquer, no inicio da lista
:param item: Elemento a ser adicionado no final da lista
'''
self.primeiro.proximo = No(item,self.primeiro.proximo)
if self.__vazia:
self.ultimo = self.primeiro.proximo
self.tamanho_lista += 1
def inserir_ordenado(self,item):
'''
Adiciona um item qualquer, no inicio da lista
:param item: Elemento a ser adicionado no final da lista
'''
if self.__vazia:
self.inserir(item)
return
no_anterior = self.primeiro
no_atual = self.primeiro.proximo
while not no_atual is None and no_atual.item < item:
no_anterior = no_atual
no_atual = no_anterior.proximo
no_anterior.proximo = No(item,no_atual)
if no_atual is None:
self.ultimo = no_anterior.proximo
self.tamanho_lista += 1
def remover_ultimo(self):
'''
Remove o ultimo item da lista
'''
if self.__vazia:
return None
aux = self.primeiro
while aux.proximo != self.ultimo:
aux = aux.proximo
item = self.ultimo.item
ultimo = aux
ultimo.proximo = None
del aux
self.tamanho_lista += -1
return item
def remover_primeiro(self):
'''
Remove o primeiro item da lista
'''
if self.__vazia:
return None
aux = self.primeiro.proximo
self.primeiro.proximo = aux.proximo
item = aux.item
if self.ultimo == aux:
self.ultimo = self.primeiro
aux.prox = None
del aux
self.tamanho_lista += -1
return item
if __name__ == '__main__':
l = ListaEncadeada()
l.inserir('a')
l.inserir('b')
l.inserir('cavalaria')
print(l)
l.remover_primeiro()
print(l)
|
989,587 | 9d9bcb66111b95b39086e4954e9d7227b54b2c76 | """
Tag: bit, string
Given a start IP address ip and a number of ips we need to cover n, return
a representation of the range as a list (of smallest possible length) of CIDR blocks.
A CIDR block is a string consisting of an IP, followed by a slash, and then
the prefix length. For example: "123.45.67.89/20". That prefix length "20"
represents the number of common prefix bits in the specified range.
Example 1: Input: ip = "255.0.0.7", n = 10
Output: ["255.0.0.7/32","255.0.0.8/29","255.0.0.16/32"]
Explanation:
The initial ip address, when converted to binary, looks like this (spaces added for clarity):
255.0.0.7 -> 11111111 00000000 00000000 00000111
The address "255.0.0.7/32" specifies all addresses with a common prefix of 32 bits to the given address,
ie. just this one address.
The address "255.0.0.8/29" specifies all addresses with a common prefix of 29 bits to the given address:
255.0.0.8 -> 11111111 00000000 00000000 00001000
Addresses with common prefix of 29 bits are:
11111111 00000000 00000000 00001000
11111111 00000000 00000000 00001001
11111111 00000000 00000000 00001010
11111111 00000000 00000000 00001011
11111111 00000000 00000000 00001100
11111111 00000000 00000000 00001101
11111111 00000000 00000000 00001110
11111111 00000000 00000000 00001111
The address "255.0.0.16/32" specifies all addresses with a common prefix of 32 bits to the given address,
ie. just 11111111 00000000 00000000 00010000.
In total, the answer specifies the range of 10 ips starting with the address 255.0.0.7 .
There were other representations, such as:
["255.0.0.7/32","255.0.0.8/30", "255.0.0.12/30", "255.0.0.16/32"],
but our answer was the shortest possible.
Also note that a representation beginning with say, "255.0.0.7/30" would be incorrect,
because it includes addresses like 255.0.0.4 = 11111111 00000000 00000000 00000100
that are outside the specified range.
Note:
- ip will be a valid IPv4 address.
- Every implied address ip + x (for x < n) will be a valid IPv4 address.
- n will be an integer in the range [1, 1000].
"""
from typing import List
class Solution:
def ipToInt(self, ip: str) -> int:
ans = 0
for x in ip.split('.'):
ans = 256 * ans + int(x)
return ans
def intToIP(self, x: int) -> str:
return ".".join(str((x >> i) % 256)
for i in (24, 16, 8, 0))
def ipToCIDR(self, ip: str, n: int) -> List[str]:
# get IP Addresses starting with the given start IP Address that cover n IPs
start = self.ipToInt(ip)
ans = []
while n:
mask = max(33 - (start & -start).bit_length(),
33 - n.bit_length())
ans.append(self.intToIP(start) + '/' + str(mask))
start += 1 << (32 - mask)
n -= 1 << (32 - mask)
return ans
assert Solution().ipToCIDR("255.0.0.7", 10) == [
"255.0.0.7/32", "255.0.0.8/29", "255.0.0.16/32"]
print('Tests Passed!!')
|
989,588 | 58cbd9c5b54d5e45c5ebd6ea591be3c0a1d96ada | from .memes import *
def setup(bot):
bot.add_cog(Memes(bot))
|
989,589 | 9e8767a3f020c80ea221884d8c3f34ed53c517a9 | #!/usr/bin/python
import re
import paramiko
def look():
FILE = '/opt/jboss-as-7.1.1.Final/standalone/configuration/standalone.xml'
r = re.compile('jndi.*pool')
for line in open(FILE):
if r.search(line):
print line.split()[1:3]
look()
|
989,590 | a5c59de9684795e3832835784e2ac2e8dc2bdd19 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 08:14:22 2017
@author: SHAHZAD
This file will select top 100 feature from benign and malware pmi value
"""
if __name__ == "__main__":
text_file = open("full_sorted_pmi_benign_out.txt", "r")
b_feature1 = text_file.readlines()
text_file.close()
text_file = open("full_sorted_pmi_malware_output.txt", "r")
m_feature1 = text_file.readlines()
text_file.close()
beningn_feature_list = []
malware_feature_list = []
count = 0
for perm in b_feature1: # selecting top 100 feature
if (count > 99):
break
b_feature = perm.split(' ')
count = count + 1
beningn_feature_list.append(b_feature[0])
count = 0
for perm1 in m_feature1: # selecting top 100 feature
if (count > 99):
break
m_feature = perm1.split(' ')
count = count + 1
malware_feature_list.append(m_feature[0])
with open('beningn_top100.txt', 'w+') as op:
for feature in beningn_feature_list:
op.write(feature + '\n')
with open('malware_top100.txt', 'w+') as op:
for feat in malware_feature_list:
op.write(feat + '\n') |
989,591 | adf8485865964cf5c44fe24b60d2b9e23c14a44f | # Solitamos y almacenamos los números
print("Introduce un numero")
n1=int(input())
print("Escribe otro numero")
n2=int(input())
# Suma de los numeros
suma = n1+n2
## Imprimimos el resultado
## Importante: Hay que convertir el suma a String
print("La suma de ambos números es: "+str(suma))
|
989,592 | e7968a23bff3fc27e9a496fd66cdcc0e40193d64 | """
Section 2
Parallelism with MultiProcessing > multiprocessing(5) : Queue, Pipe
keyword : queue, pipe, Communication between processes
"""
from multiprocessing import Process, Pipe, current_process
from multiprocessing.process import parent_process
import time
import os
# 프로세스 통신 구현 : Pipe
# 부모-자식 프로세스간 1:1 연결
# 실행함수
def worker(id, baseNum, conn):
process_id = os.getpid()
process_name = current_process().name
# 누적
sub_total = 0
# 계산
for i in range(baseNum):
sub_total += 1
# produce
conn.send(sub_total)
conn.close()
# 정보 출력
print(f'Process ID : {process_id}, Process name : {process_name}, ID : {id}')
print(f'Result : {sub_total}')
def main():
# 부모 프로세스 아이디
parent_process_id = os.getpid()
# 출력
print(f'Parent Process ID : {parent_process_id}')
# 시작 시간
start_time = time.time()
# Pipe 선언
parent_conn, child_conn = Pipe()
# 생성
p = Process(name='1', target=worker, args=(1, 100000000, child_conn))
# 시작
p.start()
# Join
p.join()
# 순수 계산 시간
print('---- %s seconds ----' % (time.time() - start_time))
print()
print(f'Main-Processing Total count = {parent_conn.recv()}')
print('Main-Process is done!')
if __name__ == '__main__':
main() |
989,593 | 8a08e791e832d2a687d1bdaae10a46fd0ab048d6 | # ============ Base imports ======================
from io import StringIO
from functools import partial
# ====== External package imports ================
import numpy as np
import psycopg2 as psy
# ====== Internal package imports ================
# ============== Logging ========================
import logging
from src.modules.utils.setup import setup, IndentLogger
logger = IndentLogger(logging.getLogger(''), {})
# =========== Config File Loading ================
from src.modules.utils.config_loader import get_config
conf = get_config()
# ================================================
class DatabaseIO:
"""Class which mediates all interactions with the database
"""
def __init__(self, testing=False):
"""Defines the schemas, and creates psycopg2 connection to the database
Note: the schema in this class should always match that in the database
:param testing: boolean, if True, nothing is written to the database, it just prints the commands which will be run
"""
self.write_role = conf.db.write_role
self.conn = psy.connect(
database=conf.db.db_name,
user=conf.db.user,
password=conf.db.pw,
host=conf.db.host,
port=conf.db.port,
)
self.testing = testing
#TODO: update the schemas to include annotation tables
self.schemas = {"raw":
{"cameras":
((
"id",
"site_name",
"object_id",
"cctv_id",
"location",
"kota",
"kota_en",
"kecamatan",
"kelurahan",
"versi",
"lattitude",
"longitude",
"url",
"fps",
"height",
"width"
),
(
"uuid",
"varchar(64)",
"integer",
"integer",
"varchar(64)",
"varchar(32)",
"varchar(32)",
"varchar(32)",
"varchar(32)",
"varchar(16)",
"float",
"float",
"varchar(128)",
"integer",
"integer",
"integer"
)),
"video_metadata":
((
"id",
"file_md5_chunk_7mb",
"file_name",
"camera_id",
"time_start_subtitles",
"time_end_subtitles",
"file_location",
"file_path"
),
(
"uuid",
"varchar(32)",
"varchar(256)",
"uuid",
"varchar(32)",
"varchar(32)",
"varchar(8)",
"varchar(256)"
)),
"subtitles":
((
"video_id",
"subtitle_number",
"display_time_start",
"display_time_end",
"subtitle_text"
),
(
"uuid",
"integer",
"varchar(16)",
"varchar(16)",
"varchar(32)"
)),
"packet_stats":
((
"video_id",
"pts_time",
"dts_time",
"size",
"pos",
"flags"
),
(
"uuid",
"varchar(16)",
"varchar(16)",
"varchar(16)",
"varchar(16)",
"varchar(16)"
)),
"frame_stats":
((
"video_id",
"key_frame",
"pkt_pts_time",
"pkt_dts_time",
"best_effort_timestamp_time",
"pkt_size",
"pict_type",
"coded_picture_number",
),
(
"uuid",
"varchar(16)",
"varchar(16)",
"varchar(16)",
"varchar(16)",
"integer",
"char(1)",
"integer"
))
},
"main": {
"db_failures":
((
"time",
"description"
),
(
"varchar(32)",
"json"
))
},
"results": {
"models":
((
"model_number",
"pipeline_config",
"datetime_created",
),(
"integer",
"json",
"timestamp NULL DEFAULT Now()",
)),
"box_motion":
((
"model_number",
"video_id",
"video_file_name",
"frame_number",
"mean_x",
"mean_y",
"mean_delta_x",
"mean_delta_y",
"magnitude",
"angle_from_vertical",
"box_id",
"datetime_created",
),(
"integer",
"uuid",
"varchar(256)",
"integer",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"integer",
"timestamp NULL DEFAULT Now()",
)),
"boxes":
((
"model_number",
"video_id",
"video_file_name",
"frame_number",
"xtl",
"ytl",
"xbr",
"ybr",
"objectness",
"pedestrian",
"bicycle",
"car",
"motorbike",
"bus",
"train",
"truck",
"semantic_segment_bottom_edge_mode",
"box_id",
"datetime_created",
),
(
"integer",
"uuid",
"varchar(256)",
"int4",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"float8",
"varchar(32)",
"integer",
"timestamp NULL DEFAULT Now()",
)),
"frame_stats":
((
"model_number",
"video_id",
"video_file_name",
"frame_number",
"pedestrian_counts",
"bicycle_counts",
"car_counts",
"motorbike_counts",
"bus_counts",
"train_counts",
"truck_counts",
"pedestrian_sums",
"bicycle_sums",
"car_sums",
"motorbike_sums",
"bus_sums",
"train_sums",
"truck_sums",
"datetime_created",
),
(
"integer",
"uuid",
"varchar(256)",
"integer",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"double precision",
"timestamp NULL DEFAULT Now()",
))
}
}
class RunSql(object):
"""Decorator class which wraps sql-generating functions and runs generic sql
"""
def __init__(self, decorated):
""" Stores function to be decorated
:param decorated: function to be decorated
"""
self.decorated = decorated
def __call__(self, dbio, *args, **kwargs):
"""called when the decorated function is called
:param dbio: database connection to use for connecting
:param args: arguments to be passed to the decorated function
:param kwargs: keyword arguments to be passed to the decorated function
:return: None
"""
sql = self.decorated(dbio, *args, **kwargs)
if not dbio.testing:
logger.debug("'execute' will run\n{}".format(sql))
cur = dbio.conn.cursor()
cur.execute(sql)
cur.close()
dbio.conn.commit()
else:
logger.info("'execute' will run\n{}".format(sql))
def __get__(self, dbio, owner):
"""I'm not 100% sure about the need for this, something about being a decorator defined within a class in
order to have access to the parent class instance
"""
return partial(self.__call__, dbio)
class RunSqlSelect(object):
"""Decorator class which wraps sql-generating functions and runs a select statement and returns result of the select statement
"""
def __init__(self, decorated):
""" Stores function to be decorated
:param decorated: function to be decorated
"""
self.decorated = decorated
def __call__(self, dbio, *args, **kwargs):
"""called when the decorated function is called
:param dbio: database connection to use for connecting
:param args: arguments to be passed to the decorated function
:param kwargs: keyword arguments to be passed to the decorated function
:return: results: a tuple of tuples, where each inner tuple contains the values for a row returned by this
query; columns: tuple containing column names
"""
sql = self.decorated(dbio, *args, **kwargs)
if not dbio.testing:
logger.debug(f"running select:{sql}")
cur = dbio.conn.cursor()
cur.execute(sql)
results = cur.fetchall()
columns = [desc[0] for desc in cur.description]
cur.close()
dbio.conn.commit()
return results, columns
else:
logger.debug("will run:{sql}")
return None, None
def __get__(self, dbio, owner):
"""I'm not 100% sure about the need for this, something about being a decorator defined within a class in
order to have access to the parent class instance
"""
return partial(self.__call__, dbio)
"""Decorator class which wraps sql-generating functions and runs them using the copy expert function
Good for copying large volumnes of data to the database
"""
class CopyExpert(object):
def __init__(self, decorated):
""" Stores function to be decorated
:param decorated: function to be decorated
"""
self.decorated = decorated
def __call__(self, dbio, *args, **kwargs):
"""called when the decorated function is called
:param dbio: database connection to use for connecting
:param args: arguments to be passed to the decorated function
:param kwargs: keyword arguments to be passed to the decorated function
:return: None
"""
sql, f = self.decorated(dbio, *args, **kwargs)
if not dbio.testing:
logger.debug("'copy_expert' will run\n{}".format(sql))
cur = dbio.conn.cursor()
cur.copy_expert(sql, f)
cur.close()
dbio.conn.commit()
f.close()
else:
logger.info("'copy_expert' will run\n{}".format(sql))
f.close()
def __get__(self, dbio, owner):
"""I'm not 100% sure about the need for this, something about being a decorator defined within a class in
order to have access to the parent class instance
"""
return partial(self.__call__, dbio)
@RunSql
def create_schema(self, schema):
""" create a schema in the database
:param schema: name of schema
:return: sql statement that gets run on the database
"""
sql = f'set role {self.write_role}; ' \
+ f'CREATE SCHEMA IF NOT EXISTS {schema};'
return sql
@RunSql
def create_table(self, schema, table):
""" create a table in the database
:param schema: name of schema which will contain the table
:param table: name of table to be created
:return: sql statement that gets run on the database
"""
fields = ", ".join([" ".join(t) for t in zip(self.schemas[schema][table][0], self.schemas[schema][table][1])])
sql = f'set role {self.write_role}; ' \
+ f'CREATE TABLE IF NOT EXISTS {schema}.{table} ( {fields} );'
return sql
@RunSql
def drop_table(self, schema, table):
""" drop a table from the database
:param schema: name of schema which contains the table to be dropped
:param table: name of the table to be dropped
:return: sql statement that gets run on the database
"""
sql = f'set role {self.write_role}; ' \
+ f'DROP TABLE IF EXISTS {schema}.{table};'
return sql
@RunSql
def drop_schema(self, schema):
""" drop a schema from the database
:param schema: name of schema to be dropped
:return: sql statement that gets run on the database
"""
sql = f'set role {self.write_role}; ' \
+ f'DROP SCHEMA IF EXISTS {schema};'
return sql
@CopyExpert
def copy_file_to_table(self, schema, table, filepath):
""" copy a file from a filepath to a table in the database
Note: schema, table, and table fields come from this class, and they should match what's in the database as well
as what's in the file
:param schema: name of schema containing the relevant table
:param table: name of table which will contain the information
:return: sql statement that gets run on the database
"""
fields = ", ".join(self.schemas[schema][table][0])
sql = f'set role {self.write_role}; ' \
f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \',\' CSV header;'
return sql, open(filepath, 'r')
@CopyExpert
def copy_np_array_to_table(self, schema, table, a):
""" copy a numpy array to a table in the database
Note: schema, table, and table fields come from this class, and they should match what's in the database as well
as what's in the file
:param schema: name of schema containing the relevant table
:param table: name of table which will contain the information
:param a: array to be copied
:return: sql statement that gets run on the database
"""
fields = ", ".join(self.schemas[schema][table][0])
sql = f'set role {self.write_role}; ' \
f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \',\' CSV header;'
return sql, StringIO(np.array2string(a, separator=","))
@CopyExpert
def copy_string_to_table(self, schema, table, s, separator=","):
""" copy a generic string to a table in the database
Note: schema, table, and table fields come from this class, and they should match what's in the database as well
as what's in the file
:param schema: name of schema containing the relevant table
:param table: name of table which will contain the information
:param s: string to be copied
:param separator: delimiter used in this string
:return: sql statement that gets run on the database
"""
#fields = (separator + " ").join(self.schemas[schema][table][0])
fields = s.split("\n")[0].replace(separator, ",")
sql = f'set role {self.write_role}; ' \
f'COPY {schema}.{table}( {fields} ) FROM stdin WITH DELIMITER \'{separator}\' CSV header;'
return sql, StringIO(s)
@RunSql
def insert_into_table(self, schema, table, fields, values):
"""inserts a single row into a table
:param schema: name of schema contianing the table to be written to
:param table: name of table to be written to
:param fields: iterable, column names which match the database and the columns
:param values: iterable, values to be inserted for the specified column names
:return: sql statement that gets run on the database
"""
sql = f'set role {self.write_role}; ' \
f'INSERT INTO {schema}.{table} ( {", ".join(fields)} ) VALUES ( {", ".join(values)} );'
return sql
@RunSqlSelect
def get_camera_id(self, camera_name):
"""Get the id of a particular camera from the cameras table
Note: schema and tables are hardcoded, so if that changes, this should also change
:param camera_name: name of camera for which you want the id
:return: sql statement which gets run on the database
"""
sql = 'set role {}; '.format(self.write_role) \
+ f"SELECT id, site_name FROM raw.cameras WHERE site_name = '{camera_name}'"
return sql
@RunSqlSelect
def get_camera_list(self):
"""Get a list of cameras.
:return: sql statement which gets run on the database
"""
sql = 'set role {}; '.format(self.write_role) \
+ f"SELECT site_name FROM raw.cameras"
return sql
@RunSqlSelect
def _get_video_info(self, file_name):
"""get the information for a video by matching its file name to the ones in the raw.video_metadata table
Note: schema and table are hardcoded, so if that changes, this should also change
:param file_name: name to match against files in the table
:return:sql statement which gets run on the database
"""
sql = f"set role {self.write_role}; "\
+ "select * from "\
+ f"(select * from raw.video_metadata where file_name like '{file_name}') as vid "\
+ "left join "\
+ "raw.cameras as cams "\
+ "on "\
+ "(cams.id = vid.camera_id); "
return sql
def get_video_info(self, file_name):
"""gets video information and returns it as a dictionary
:param file_name: name to match against files in the metadata_table
:return: dictionary containing key value pairs with column names as keys and row values as value
"""
info, colnames = self._get_video_info(file_name)
if info is None or len(info)==0:
return None
return dict(zip(colnames, info[0]))
@RunSqlSelect
def get_video_annotations(self, file_name):
"""Gets video annotation data from the database
Note: schema and table are hard coded, so if that changes, this should change as well
:param file_name: name of file for which to retrieve video annotations
:return: sql statement which gets run on the database
"""
sql = f"SET role {self.write_role}; " \
+ f"SELECT * FROM validation.cvat_frames_interpmotion " \
+ f"WHERE name = '{file_name}'; "
return sql
@RunSqlSelect
def get_annotated_video_list(self):
sql = f"SET role {self.write_role}; " \
+ f"SELECT DISTINCT name FROM validation.cvat_frames_interpmotion;"
return sql
@RunSqlSelect
def get_results_boxes(self, model_no, file_name):
"""Gets all boxes from the results.boxes table
Note: schema and table are hard coded, so if that changes, this should change as well
:param file_name: to match in the results boxes table
:param model_no: the model number to get results for
:return: sql statement which gets run on the database
"""
sql = f"SET role {self.write_role}; " \
+ f"SELECT * FROM results.boxes " \
+ f"WHERE video_file_name = '{file_name}'' and model_number = '{model_no}'"
return sql
@RunSqlSelect
def get_results_motion(self, model_no, file_name):
sql = f"SET role {self.write_role}; " \
+ f"WITH foo as ( " \
+ f"SELECT * FROM results.boxes " \
+ f"WHERE video_file_name = '{file_name}' and model_number = '{model_no}')," \
+ f"foo_mot as ( " \
+ f"SELECT mean_delta_x, mean_delta_y, magnitude, box_id, model_number from results.box_motion " \
+ f"WHERE video_file_name = '{file_name}' and model_number = '{model_no}') " \
+ f"SELECT foo.*, foo_mot.mean_delta_x, foo_mot.mean_delta_y, foo_mot.magnitude " \
+ f"from foo " \
+ f"LEFT JOIN foo_mot " \
+ f"ON foo.box_id=foo_mot.box_id and foo.model_number=foo_mot.model_number;"
return sql
@RunSql
def upload_semantic_segments_to_boxes(self, data):
"""script to upload semantic segment information to boxes
Note: schema and table are hard coded, so if that changes, this should change as well
:param data: tuple of strings to be written to the table
:return: sql statement which gets run on the database
"""
#data_str = 'array["' + '","'.join(data) + '"]'
data_str = "array['" + "','".join(data) + "']"
sql = f"SET role {self.write_role}; " \
+ f"update results.boxes " \
+ f"set semantic_segment_bottom_edge_mode = ({data_str})[id];"
return sql
@RunSqlSelect
def get_max_model_number(self):
return f"SET role {self.write_role}; Select max(models.model_number) from results.models;"
def create_all_schemas_and_tables(self):
"""function which creates all schemas and tables specifed by this class in self.schemas
"""
for schema, tables in self.schemas.items():
self.create_schema(schema)
for table in tables.keys():
self.create_table(schema, table)
if __name__ == "__main__":
dbio = DatabaseIO()
import pdb; pdb.set_trace()
|
989,594 | 8ec0bb424b433beceb15295b62a3b62dae44b77b | from setuptools import setup
setup(
name='pyuptodate',
version='0.1.0',
author='Tarek Amr',
author_email='',
url='https://github.com/gr33ndata/pyuptodate',
packages=['pyuptodate'],
scripts=['pyuptodate/pyuptodate.py'],
license='LICENSE.txt',
description='Check all installed Python modules and update them',
long_description=open('README.rst').read(),
install_requires=["setuptools"],
)
|
989,595 | dcea1e6c482057ba4fa5b0eedec45f91c7fbe570 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2017 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IEDA Slender Node Adapter
Sitemap index: http://get.iedadata.org/sitemaps/
Sitemap index has links to separate sitemap docs for each IEDA partner. The
links on the sitemap pages get dataset landing pages that have the SDO JSON-LD
scripts in the html header.
Note that there's no ability to support archival in this approach (unless one
attempts to track when a record identifier used to appear but no longer does).
IEDA receives content from 3 contributing repositories. For now, MN will only
hold objects from EarthChem.
"""
import datetime
import io
import logging
import pprint
import requests
import sys
import xml.etree.ElementTree as ET
import d1_client.mnclient_2_0
import d1_common.checksum
import d1_common.const
import d1_common.date_time
import d1_common.system_metadata
import d1_common.types.dataoneTypes_v2_0 as v2
import d1_common.types.exceptions
import d1_common.wrap.access_policy
import d1_common.xml
import d1_client
import schema_org
IEDA_SITE_MAP = "http://get.iedadata.org/sitemaps/usap_sitemap.xml"
SCIMETA_FORMAT_ID = 'http://www.isotc211.org/2005/gmd'
SCIMETA_RIGHTS_HOLDER = 'CN=urn:node:mnTestIEDA,DC=dataone,DC=org'
SCIMETA_SUBMITTER = 'CN=urn:node:mnTestIEDA,DC=dataone,DC=org'
SCIMETA_AUTHORITATIVE_MEMBER_NODE = 'urn:node:mnTestIEDA'
# BASE_URL = 'https://gmn.dataone.org/ieda'
BASE_URL = 'https://gmn.test.dataone.org/mn'
# CERT_PEM_PATH = './client_cert.pem'
# CERT_KEY_PATH = './client_key_nopassword.pem'
CERT_PEM_PATH = './urn_node_mnTestIEDA.pem'
CERT_KEY_PATH = './urn_node_mnTestIEDA.key'
NS_DICT = {
'gmd': 'http://www.isotc211.org/2005/gmd',
'gco': 'http://www.isotc211.org/2005/gco',
}
def main():
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.DEBUG,
)
resource_list = schema_org.load_resources_from_sitemap(IEDA_SITE_MAP)
logging.info('Found resources: {}'.format(len(resource_list)))
gmn_client = create_gmn_client()
for resource_dict in resource_list:
logging.info('-' * 80)
entry_dict = schema_org.load_schema_org(resource_dict)
result_dict = {
**resource_dict,
**entry_dict,
}
logging.info(pprint.pformat(result_dict))
if 'error' in result_dict:
logging.error(
'error="{}" url="{}"'.format(result_dict['error'], result_dict['url'])
)
continue
# {
# 'date_modified': '2018-01-25T15:55:08-05:00',
# 'id': 'doi:10.7265/N5F47M23',
# 'metadata_format': None,
# 'metadata_url': 'http://get.iedadata.org/metadata/iso/usap/609539iso.xml',
# 'url': 'http://get.iedadata.org/metadata/iso/609539'
# }
sid = result_dict['id']
pid = result_dict['url']
logging.info('schema.org. sid="{}" pid="{}"'.format(sid, pid))
if is_in_gmn(gmn_client, pid):
logging.info('Skipped. Already in GMN.')
continue
scimeta_xml_bytes = download_scimeta_xml(result_dict['metadata_url'])
pid_sysmeta_pyxb = generate_system_metadata(scimeta_xml_bytes, pid, sid)
head_sysmeta_pyxb = get_sysmeta(gmn_client, sid)
# logging.info(sysmeta_pyxb.toxml('utf-8'))
if head_sysmeta_pyxb:
head_pid = head_sysmeta_pyxb.identifier.value()
logging.info(
'SID already exists on GMN. Adding to chain. head_pid="{}"'
.format(head_pid)
)
gmn_client.update(
head_pid, io.BytesIO(scimeta_xml_bytes), pid, pid_sysmeta_pyxb
)
else:
logging.info(
'SID does not exist on GMN. Starting new chain. pid="{}"'.format(pid)
)
gmn_client.create(pid, io.BytesIO(scimeta_xml_bytes), pid_sysmeta_pyxb)
def download_scimeta_xml(scimeta_url):
try:
return requests.get(scimeta_url).content
except requests.HTTPError as e:
raise AdapterException(
'Unable to download SciMeta. error="{}"'.format(str(e))
)
def parse_doi(iso_xml):
"""Get the DOI from an ISO XML doc"""
tree = ET.parse('iso.xml')
root = tree.getroot()
doi_el = root.findall(
'.gmd:identificationInfo/gmd:MD_DataIdentification/gmd:citation/'
'gmd:CI_Citation/gmd:identifier/gmd:MD_Identifier/gmd:code/'
'gco:CharacterString', NS_DICT
)[0]
return doi_el.text.strip()
def create_gmn_client():
return d1_client.mnclient_2_0.MemberNodeClient_2_0(
base_url=BASE_URL,
cert_pem_path=CERT_PEM_PATH,
cert_key_path=CERT_KEY_PATH,
retries=1,
)
def is_in_gmn(gmn_client, did):
try:
gmn_client.getSystemMetadata(did)
except d1_common.types.exceptions.NotFound:
return False
return True
def get_sysmeta(gmn_client, sid):
try:
return gmn_client.getSystemMetadata(sid)
except d1_common.types.exceptions.NotFound:
return None
def generate_system_metadata(scimeta_bytes, pid, sid):
"""
:param scimeta_bytes: Bytes of the node's original metadata document.
:param native_identifier_sid: Node's system identifier for this object, which
becomes the series ID.
:param record_date: Date metadata document was created/modified in the source
system. Becomes dateUploaded.
:param sysmeta_settings_dict: A dict containing node-specific system metadata
properties that will apply to all science metadata documents loaded into GMN.
This function generates a system metadata document for describing the science
metadata record being loaded. Some of the fields, such as checksum and size,
are based off the bytes of the science metadata object itself. Other system
metadata fields are passed to D1ClientManager in a dict which is configured in
the main adapter program. Note that the checksum is assigned as an arbitrary
version identifier to accommodate the source system's mutable content
represented in the target system's immutable content standard.
"""
sysmeta_pyxb = v2.systemMetadata()
sysmeta_pyxb.seriesId = sid
sysmeta_pyxb.formatId = SCIMETA_FORMAT_ID
sysmeta_pyxb.size = len(scimeta_bytes)
sysmeta_pyxb.checksum = d1_common.checksum.create_checksum_object_from_string(
scimeta_bytes
)
sysmeta_pyxb.identifier = pid
sysmeta_pyxb.dateUploaded = d1_common.date_time.utc_now()
sysmeta_pyxb.dateSysMetadataModified = datetime.datetime.now()
sysmeta_pyxb.rightsHolder = SCIMETA_RIGHTS_HOLDER
sysmeta_pyxb.submitter = SCIMETA_SUBMITTER
sysmeta_pyxb.authoritativeMemberNode = SCIMETA_AUTHORITATIVE_MEMBER_NODE
sysmeta_pyxb.originMemberNode = SCIMETA_AUTHORITATIVE_MEMBER_NODE
sysmeta_pyxb.accessPolicy = v2.AccessPolicy()
with d1_common.wrap.access_policy.wrap_sysmeta_pyxb(sysmeta_pyxb) as ap:
ap.clear()
ap.add_public_read()
return sysmeta_pyxb
class AdapterException(Exception):
pass
if __name__ == '__main__':
sys.exit(main())
|
989,596 | 3f6c3eae50dfea8e9f1298e0cf8121e898653eb8 | from django.test import TestCase
from main_app.models import Park
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_PARK_URL = reverse('park:create')
def create_park(**params):
return Park.create_park(**params)
class PublicParkAPITests(TestCase):
"""Test the Park API (public)"""
#old tests
# def setup(self):
# self.client = APIClient()
# def test_create_valid_user_success(self):
# """Test creating user with valid info is successful"""
# payload = {
# 'email': 'test@test.com'
# }
# res = self.client.post(CREATE_USER_URL, payload)
# self.assertEqual(res.status_code, status.HTTP_201_CREATED)
# user = get_user_model().objects.get(**res.data)
|
989,597 | 68df0fbd2be1fec5f7c20fcb5cb9c555629976f1 | from bisect import bisect_left,bisect_right
import string
dic = {c:[] for c in string.ascii_lowercase}
n = int(input())
s = list(input())
q = int(input())
for i,c in enumerate(s,start=1):
dic[c].append(i)
for i in range(q):
p,l,r = map(str,input().split())
if p =='1':
j = int(l)
if r == s[j-1]:
continue
c = s[j-1]
dic[c].pop(bisect_left(dic[c],j))
dic[r].insert(bisect_left(dic[r],j),j)
s[j-1] = r
else:
ans = 0
l = int(l)
r = int(r)
for c in string.ascii_lowercase:
pl = bisect_left(dic[c],l)
if pl < len(dic[c]):
if dic[c][pl] <= r:
ans += 1
print(ans)
|
989,598 | f8991a4cc5f673d31c9fc890d67610a0b992f404 | '''
Created on Apr 17, 2013
@package: ally core http
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides HTTP specifications for indexes.
'''
from ally.core.impl.processor.render.json import createJSONBlockForIndexed, \
createJSONBlockForContent
from ally.core.impl.processor.render.xml import createXMLBlockForIndexed, \
createXMLBlockForContent
from ally.indexing.spec.model import Action
from ally.indexing.spec.perform import feedValue
# --------------------------------------------------------------------
NAME_BLOCK_REST = 'rest' # The block name for REST resources injection from reference URLs.
NAME_BLOCK_CLOB = 'clob' # The block name for character clobs injection from reference URLs.
ACTION_REFERENCE = 'reference' # The action name to get the block reference.
ACTION_CHECK_CLOB = 'check_clob' # The action name to check if the block is for a clob content.
ACTION_ERROR_STATUS = 'error_status' # The action name for error status.
ACTION_ERROR_MESSAGE = 'error_message' # The action name for error message.
# --------------------------------------------------------------------
# Provides the HTTP block definitions.
BLOCKS_HTTP = {}
# We provide the XML block definitions.
BLOCKS_HTTP.update(createXMLBlockForIndexed(NAME_BLOCK_REST,
injectAttributes={ACTION_ERROR_STATUS: 'ERROR', ACTION_ERROR_MESSAGE: 'ERROR_TEXT'},
captureAttributes={ACTION_REFERENCE: ACTION_REFERENCE}))
BLOCKS_HTTP.update(createXMLBlockForContent(NAME_BLOCK_CLOB,
Action(ACTION_CHECK_CLOB, feedValue('true'), final=False),
injectAttributes={ACTION_ERROR_STATUS: 'ERROR', ACTION_ERROR_MESSAGE: 'ERROR_TEXT'},
captureAttributes={ACTION_REFERENCE: ACTION_REFERENCE}))
# We provide the JSON block definitions.
BLOCKS_HTTP.update(createJSONBlockForIndexed(NAME_BLOCK_REST,
injectAttributes={ACTION_ERROR_STATUS: 'ERROR', ACTION_ERROR_MESSAGE: 'ERROR_TEXT'},
captureAttributes={ACTION_REFERENCE: ACTION_REFERENCE}))
BLOCKS_HTTP.update(createJSONBlockForContent(NAME_BLOCK_CLOB,
Action(ACTION_CHECK_CLOB, feedValue('true'), final=False),
injectAttributes={ACTION_ERROR_STATUS: 'ERROR', ACTION_ERROR_MESSAGE: 'ERROR_TEXT'},
captureAttributes={ACTION_REFERENCE: ACTION_REFERENCE}))
|
989,599 | d5feac8cdf178e268812596bd00f438b3121bac8 | # -*- coding: UTF-8 -*-
# Copyright 2016 Red Hat, Inc.
# Part of clufter project
# Licensed under GPLv2+ (a copy included | http://gnu.org/licenses/gpl-2.0.txt)
"""stringiter-combine filter"""
__author__ = "Jan Pokorný <jpokorny @at@ Red Hat .dot. com>"
from itertools import chain
from ..filter import Filter
def stringiter_combine(flt_ctxt, in_objs):
"""Combine multiple string-iter objects"""
return ('stringiter',
chain(*tuple(o('stringiter', protect_safe=True) for o in in_objs)))
@Filter.deco(('string-iter', ) * 2, 'string-iter')
def stringiter_combine2(flt_ctxt, in_objs):
return stringiter_combine(flt_ctxt, in_objs)
@Filter.deco(('string-iter', ) * 3, 'string-iter')
def stringiter_combine3(flt_ctxt, in_objs):
return stringiter_combine(flt_ctxt, in_objs)
@Filter.deco(('string-iter', ) * 4, 'string-iter')
def stringiter_combine4(flt_ctxt, in_objs):
return stringiter_combine(flt_ctxt, in_objs)
# ...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.