index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,100 | 4c8f9c3c68e17f0d362349ffce53e1ce2c3502bd | import requests
import secrets
base_url = 'https://newsapi.org/v2/top-headlines'
params = {
"apiKey": secrets.NEWSAPI_KEY,
"country": "us",
"q": "new hampshire"
}
response = requests.get(base_url, params)
result = response.json()
#print(result)
#print(result['articles'])
for article in result['articles']:
#print(f"{article['source']['name']}")
print(f"{article['title']}") |
986,101 | ccf897450b3a69387a40ab3f9923e5335a5f94eb | def sieve_flavius(min_n, max_n) -> set:
"""
Return set with lucky numbers.
"""
pointer = 1
lst = list(range(1, max_n + 1, 2))
while pointer < len(lst):
new_lst = []
num = lst[pointer]
for i in range(len(lst)):
if (i + 1) % num != 0:
new_lst.append(lst[i])
lst = new_lst
pointer += 1
ind = 0
while lst[ind] < min_n:
ind += 1
return set(lst[ind:])
def ulam(min_n, max_n) -> set:
"""
Return set with ulam numbers.
"""
ulams = [1, 2]
sums = [0 for i in range(2 * max_n)]
newUlam = 2
sumIndex = 1
while newUlam < max_n:
for i in ulams:
if i < newUlam:
sums[i + newUlam] += 1
while sums[sumIndex] != 1:
sumIndex += 1
newUlam = sumIndex
sumIndex += 1
ulams.append(newUlam)
ind_down = 0
print(ulams)
while ulams[ind_down] < min_n:
ind_down += 1
ind_up = -1
while ulams[ind_down] > max_n:
ind_up -= 1
return set(ulams[ind_down:ind_up])
def even(min_n, max_n) -> set:
"""
Return set with even numbers.
"""
even_s = {x for x in range(min_n, max_n + 1) if x % 2 == 0}
print()
return even_s
print(even(12, 32))
# sieve_flavius_set = sieve_flavius()
# ulam_set = ulam()
# even_set = even()
|
986,102 | 2bb4195d434dd14ac1f501325f655e52c8f7a413 | '''
==============
3D quiver plot
==============
Demonstrates plotting directional arrows at points on a 3d meshgrid.
'''
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make the grid
x, y, z = np.meshgrid(np.arange(-0.8, 1, 0.2),
np.arange(-0.8, 1, 0.2),
np.arange(-0.8, 1, 0.8))
# Make the direction data for the arrows
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1, normalize=True)
plt.show()
|
986,103 | 375af454734cba9450428d84a303f559b87b8c50 | from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import *
import sys
import math
import threading
import time
from server import Server
class Updates(QThread):
is_active = False
_signal = pyqtSignal(str)
_msg_queue = []
def __init__(self):
super(Updates, self).__init__()
def __del__(self):
self.wait()
def run(self):
is_active = True
while is_active or _msg_queue:
if not self._msg_queue:
time.sleep(0.1)
continue
msg = self._msg_queue.pop(0)
self._signal.emit(msg)
def msger(self, msg:str):
self._msg_queue.append(msg)
class Updates(QThread):
is_active = False
_signal = pyqtSignal(int, str)
_msg_queue = []
def __init__(self):
super(Updates, self).__init__()
def __del__(self):
self.wait()
def run(self):
is_active = True
while is_active or _msg_queue:
if not self._msg_queue:
time.sleep(0.1)
continue
msg = self._msg_queue.pop(0)
self._signal.emit(msg[0], msg[1])
def msger(self, sid:int, msg:str):
self._msg_queue.append((sid, msg))
class PyQtGUI(QWidget):
_progress_bar = None
_thread = None
_is_working = False
_flag_init = False
slave_workers = []
def __init__(self):
super().__init__()
self._thread = Updates()
self._thread._signal.connect(self.msg_listener)
self._server = Server(self.update_listener, self._thread.msger)
self.initUI()
def closeEvent(self, event):
self._server.stop()
event.accept()
def msg_listener(self, slave_id, msg):
if msg == 'new':
new_slave = dict()
new_slave['id'] = slave_id
new_slave['progress'] = QProgressBar()
new_slave['work'] = 0
new_slave['label'] = QLabel('Worker ==> {}'.format(slave_id))
self.slave_layout.addWidget(new_slave['label'])
self.slave_layout.addWidget(new_slave['progress'])
new_slave['progress'].setValue(0)
self.slave_workers.append(new_slave)
elif slave_id != 0:
for slave in self.slave_workers:
if slave['id'] == slave_id:
if msg == 'update':
slave['work'] += 1
slave['progress'].setValue(100 / self._server.WORK_SIZE * slave['work'])
elif msg == 'done':
slave['progress'].setValue(0)
slave['work'] = 0
elif msg == 'die':
slave['progress'].deleteLater()
slave['label'].deleteLater()
else:
print("Unknown message: " + msg)
else:
for i in range(self.attributes_listbox.count()):
if self.attributes_listbox.item(i).text() == msg:
self.attributes_listbox_out.addItem(self.attributes_listbox.takeItem(i))
break
def update_listener(self, progress:int):
self._progress_bar.setValue(math.floor(100 / self.total_progress * (progress + 1)))
if progress + 1 == self.total_progress:
self.reset_buttons()
def reset_buttons(self):
self._thread.is_active = False
self._is_working = False
self._stop_proc_button.setEnabled(False)
def initUI(self):
def _select_directory():
in_selected_arff.clear()
check = QFileDialog.getExistingDirectory(None, 'Select directory', str(sys.path[0]))
if not check:
return
self.attributes_listbox.clear()
self.attributes_listbox_out.clear()
in_selected_arff.setText(check)
for att in self._server.get_image_paths(check):
item = QListWidgetItem("%s" % (str(att)))
self.attributes_listbox.addItem(item)
self.total_progress:int = self.attributes_listbox.count()
self._flag_init = True
def _start_process():
if self._is_working:
QMessageBox.critical(None, "Error", "Can't start a new task without finishing or stopping the previous one")
return
if not self._flag_init:
QMessageBox.critical(None, "Error", "No Starting Directory selected!")
return
self.update_listener(0)
self.attributes_listbox_out.clear()
self.attributes_listbox.clear()
for att in self._server.get_image_paths(in_selected_arff.text()):
item = QListWidgetItem("%s" % (str(att)))
self.attributes_listbox.addItem(item)
self.total_progress:int = self.attributes_listbox.count()
self._is_working = True
self._stop_proc_button.setEnabled(True)
self._thread.start()
try:
self._server.start(in_selected_arff.text())
except Exception as err:
_stop_process()
QMessageBox.critical(None, "Error", str(err))
def _stop_process():
self.reset_buttons()
self._server.stop()
self._progress_bar = QProgressBar()
self._progress_bar.setValue(0)
grid_box_lay = QGridLayout()
button_layout = QHBoxLayout()
in_select_button = QPushButton('Select')
in_select_button.clicked.connect(_select_directory)
button_layout.addWidget(in_select_button)
in_selected_arff = QLineEdit()
in_selected_arff.setReadOnly(True)
in_selected_arff.setPlaceholderText("Press 'Select' to choose the start")
button_layout.addWidget(in_selected_arff)
lab1 = QLabel("To read:")
grid_box_lay.addWidget(lab1, 1, 0, 1, 1)
self.attributes_listbox = QListWidget()
grid_box_lay.addWidget(self.attributes_listbox, 2, 0, 1, 1)
lab1 = QLabel("Done:")
grid_box_lay.addWidget(lab1, 1, 1, 1, 1)
self.attributes_listbox_out = QListWidget()
grid_box_lay.addWidget(self.attributes_listbox_out, 2, 1, 1, 1)
start_proc_button = QPushButton('Start')
start_proc_button.clicked.connect(_start_process)
button_layout.addWidget(start_proc_button)
self._stop_proc_button = QPushButton('Stop')
self._stop_proc_button.setEnabled(False)
self._stop_proc_button.clicked.connect(_stop_process)
button_layout.addWidget(self._stop_proc_button)
grid_box_lay.addWidget(self._progress_bar, 3, 0, 1, 2)
main_vbox = QGridLayout()
main_vbox.setColumnStretch(1, 3)
main_vbox.setColumnStretch(0, 1)
input_gbox = QGroupBox("Input")
input_gbox.setLayout(button_layout)
main_vbox.addWidget(input_gbox, 0, 1)
attributes_group = QGroupBox("Results")
attributes_group.setLayout(grid_box_lay)
main_vbox.addWidget(attributes_group, 1, 1)
self.slave_layout = QVBoxLayout()
slave_parent = QVBoxLayout()
slave_parent.addLayout(self.slave_layout)
slave_parent.addStretch()
slave_group = QGroupBox("Workers:")
slave_group.setLayout(slave_parent)
main_vbox.addWidget(slave_group, 0, 0, 2, 1)
self.setLayout(main_vbox)
self.setWindowTitle("PD5 Tesseract")
self.setGeometry(300, 300, 1024, 640)
self.show()
def main():
app = QApplication(sys.argv)
ex = PyQtGUI()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
986,104 | 2cd8d6a508aea75427a6e317f32b76c5ebb5b12e | # Generated by Django 3.1 on 2020-12-07 00:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='League',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name your league', max_length=255)),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('captains', models.ManyToManyField(related_name='captain_of', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name your league', max_length=255)),
('regular_start', models.DateTimeField(blank=True, null=True)),
('regular_end', models.DateTimeField(blank=True, null=True)),
('playoffs_start', models.DateTimeField(blank=True, null=True)),
('playoffs_end', models.DateTimeField(blank=True, null=True)),
('league', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seasons', to='leagues.league')),
],
),
migrations.CreateModel(
name='Circuit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('region', models.CharField(choices=[('W', 'West'), ('E', 'East'), ('A', 'All')], max_length=1)),
('tier', models.CharField(choices=[('1', 'Tier 1'), ('2', 'Tier 2'), ('3', 'Tier 3'), ('0', 'No Tier')], max_length=1)),
('name', models.CharField(blank=True, help_text='Optionally specify a manual name for this league', max_length=255, null=True)),
('season', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='circuits', to='leagues.season')),
],
),
]
|
986,105 | cf7665a9328afbd06c8ffec3ebe7ca4f4622e32b | from __future__ import annotations
from typing import Generic, TypeVar
_T = TypeVar('_T')
__all__ = ('Result',)
class Result(Generic[_T]):
"""
A class to encapsulate the result of an operation
This is useful for operations which can either return a value
or an error message.
Args:
ok: True if ok, False if failed
value: the value returned by the operation
info: an error message if the operation failed
Example
-------
.. code::
from emlib.result import Result
import re
from fractions import Fraction
def parsefraction(txt: str) -> Result[Fraction]:
match = re.match(r"([0-9]+)\/([1-9][0-9]*)", txt)
if not match:
return Result.Fail(f"Could not parse '{txt}' as fraction")
num = int(match.group(1))
den = int(match.group(2))
return Result.Ok(Fraction(num, den))
if fraction := parsefraction("4/5"):
print(f"Fraction ok: {fraction.value}) # prints 'Fraction(4, 5)'
Typing
------
To make typing analysis work better it is possible to indicate the kind of
value wrapped by the Result class. See the return type declared in ``parsefraction``
in the example above
"""
def __init__(self, ok: bool, value: _T | None = None, info: str = ''):
self.ok: bool = ok
self.value: _T | None = value
self.info: str = info
def __bool__(self) -> bool:
return self.ok
@property
def failed(self) -> bool:
"""True if operation failed"""
return not self.ok
def __repr__(self):
if self.ok:
return f"Result(ok, value={self.value})"
else:
return f'Result(failed, info="{self.info}")'
@classmethod
def Fail(cls, info: str, value=None) -> Result:
"""Create a Result object for a failed operation."""
if not isinstance(info, str):
raise TypeError(f"The info parameter should be a str, got {info}")
return cls(False, value=value, info=info)
@classmethod
def Ok(cls, value: _T | None = None) -> Result:
"""Create a Result object for a successful operation."""
return cls(True, value=value) |
986,106 | 3da660c5130b2a32457e0a16fe78294d5e28ccc5 | import postgresql
db = postgresql.open("pq://postgres:123456@localhost/rc3")
consultar = db.prepare("SELECT * from cep")
dados = consultar()
for row in dados:
print("CEP: {} MUNICIPIO: {}".format(row["cep"], row["municipio"])) |
986,107 | ec1b77901e40d8d7b90e0c2a4da0857fdb5e3025 | def catchpa(input):
stringInput = str(input)
stringLength = len(stringInput)
total = 0
halfwayRound = stringLength / 2
for x in range(stringLength):
# check the number halfway around
if x + halfwayRound > stringLength - 1:
if stringInput[x] == stringInput[x - halfwayRound]:
total = total + int(stringInput[x])
elif stringInput[x] == stringInput[x + halfwayRound]:
# Add to total
total = total + int(stringInput[x])
return total
test1 = catchpa(1212)
if test1 == 6:
print('test1 passed')
test2 = catchpa(1221)
if test2 == 0:
print('test2 passed')
test3 = catchpa(123425)
if test3 == 4:
print('test3 passed')
test4 = catchpa(123123)
if test4 == 12:
print('test4 passed')
test5 = catchpa(12131415)
if test5 == 4:
print('test5 passed')
result = catchpa(21752342814933766938172121674976879111362417653261522357855816893656462449168377359285244818489723869987861247912289729579296691684761143544956991583942215236568961875851755854977946147178746464675227699149925227227137557479769948569788884399379821111382536722699575759474473273939756348992714667963596189765734743169489599125771443348193383566159843593541134749392569865481578359825844394454173219857919349341442148282229689541561169341622222354651397342928678496478671339383923769856425795211323673389723181967933933832711545885653952861879231537976292517866354812943192728263269524735698423336673735158993853556148833861327959262254756647827739145283577793481526768156921138428318939361859721778556264519643435871835744859243167227889562738712953651128317624673985213525897522378259178625416722152155728615936587369515254936828668564857283226439881266871945998796488472249182538883354186573925183152663862683995449671663285775397453876262722567452435914777363522817594741946638986571793655889466419895996924122915777224499481496837343194149123735355268151941712871245863553836953349887831949788869852929147849489265325843934669999391846286319268686789372513976522282587526866148166337215961493536262851512218794139272361292811529888161198799297966893366553115353639298256788819385272471187213579185523521341651117947676785341146235441411441813242514813227821843819424619974979886871646621918865274574538951761567855845681272364646138584716333599843835167373525248547542442942583122624534494442516259616973235858469131159773167334953658673271599748942956981954699444528689628848694446818825465485122869742839711471129862632128635779658365756362863627135983617613332849756371986376967117549251566281992964573929655589313871976556784849231916513831538254812347116253949818633527185174221565279775766742262687713114114344843534958833372634182176866315441583887177759222598853735114191874277711434653854816841589229914164681364497429324463193669337827467661773833517841763711156376147664749175267212562321567728575765844893232718971471289841171642868948852136818661741238178676857381583155547755219837116125995361896562498721571413742)
print(result)
|
986,108 | 0b31d6a4efd7287b7ae5147e348f5dc6170551ec | # %% Defines functions needed for the rest of the program
import matplotlib.pylab as plt
import numpy as np
import scipy
#import seaborn as sns #uncomment these for nicer plots
#sns.set('talk')
#Get Hartree potential from electron density
def getVH(ns,N,Rmax):
h = Rmax/N
A = np.diag(-2*np.ones(N),0)+np.diag(np.ones(N-1),1)+np.diag(np.ones(N-1),-1)
r=np.linspace(h,Rmax,N)
tmp = -4*np.pi*h**2*r*ns
tmp[-1] -= 1
U = np.linalg.solve(A,tmp)
return U/r
#Get wave function from hartree potential
def getpsi(V,N,Rmax,Z):
h = Rmax/N
r = np.linspace(h,Rmax,N)
c = 1/h**2-Z/r+V
A = np.diag(c,0)+np.diag(-np.ones(N-1)/(2*h**2),1)+np.diag(-np.ones(N-1)/(2*h**2),-1)
(E, w) = np.linalg.eig(A)
eps = np.min(E).real
f = w[:,np.argmin(E)]
f = f/np.sqrt(np.trapz(f**2,r))*np.sign(f[0])
psi = 1/np.sqrt(4*np.pi)*f/r
return (psi,eps)
# Calculate energy
def getE(eps,VH,Vxc,epsxc,ns,Z):
return Z*eps-Z*4*np.pi*np.trapz((VH*ns/2+Vxc*ns-epsxc*ns)*r**2,r)
def solve(ns,MaxIters,N,Rmax,Z):
#Solve the self consistency problem
for i in range(MaxIters):
VH = getVH(ns,N,Rmax)
(psi,eps) = getpsi(VH,N,Rmax,Z)
ns = np.abs(psi)**2
E[i] = getE(eps,VH,0,0,ns,Z)
if np.abs(E[i]-E[i-1])<1e-5/27.21:
break
return ns, E
#%% Task 1. Solves the Ansats Hartree-Fock problem.
# Given alpha values
a = [0.297104, 1.236745, 5.749982, 38.216677]
# Init of matrices from Thijssen 4.3.2
# h matrix
h = np.zeros((4, 4))
for p in range(4):
for q in range(4):
h[p, q] = 4*np.pi/(a[p]+a[q])*(3/4*a[q] *
(1-a[q]/(a[p]+a[q]))*np.sqrt(np.pi/(a[p]+a[q]))-1)
# Q matrix (given by eq 4.17 in Thijssen)
Q = np.zeros((4, 4, 4, 4))
for p in range(4):
for q in range(4):
for r in range(4):
for s in range(4):
Q[p, r, q, s] = 2*np.pi**(5/2)/((a[p]+a[q])
* (a[r]+a[s])*np.sqrt(a[p]+a[q]+a[r]+a[s]))
# S matrix
S = np.zeros((4, 4))
for p in range(4):
for q in range(4):
S[p, q] = (np.pi/(a[p]+a[q]))**(3/2)
# Inital values
C = [1, 1, 1, 1]
#Normalizing C according to eq. 4.19 in Thijssen
def normalize(C):
return C/np.sqrt(np.matmul(C, np.matmul(S, C)))
F = np.zeros((4, 4))
#Evaluating eq. 4.21 in Thijssen
def getEg(C, h, Q):
def con(Q, C):
return np.tensordot(Q, C, axes=([0], [0]))
return 2*np.matmul(C, np.matmul(h, C))+con(con(con(con(Q, C), C), C), C)
MaxIters = 15
E = np.zeros(MaxIters)
for i in range(MaxIters):
C = normalize(C)
E[i] = getEg(C, h, Q)
print(E[i])
if np.abs(E[i]-E[i-1]) < 1e-5/27.21:
break
# Set F(C)
for p in range(4):
for q in range(4):
F[p, q] = h[p, q]+np.matmul(C, np.matmul(Q[p, :, q, :], C))
# Solve eigenvalue problem
(eps, w) = scipy.linalg.eig(F, S)
if any(eps.imag != 0):
raise Exception('complex eig')
# Get best C
C = w[:, np.argmin(eps.real)]
C = normalize(C)
print("Eg = ", getEg(C, h, Q))
print("C= ", C)
# Print the radial PDF
Rmax = 7
h = 0.006
N = int(np.round(Rmax/h))
r = np.linspace(h, Rmax, N)
plt.figure(figsize=(8, 6))
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel("Radial PDF", fontsize=18)
plt.plot(r, 4*np.pi*r**2 *
np.sum([C[i]*np.exp(-a[i]*r**2) for i in range(4)], axis=0)**2)
plt.savefig('task1.pdf')
task1density = 4*np.pi*r**2 * np.sum([C[i]*np.exp(-a[i]*r**2) for i in range(4)], axis=0)**2
task1r = r
#%% task 2. Solves Poisson's equation to get the Hartree potential
Rmax = 10 # atomic
Z=1
N=1000 #Number of grid points
h = Rmax/N #Stepsize
r = np.linspace(h,Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Hydrogen density
VH=getVH(ns,N,Rmax) #Get Hydrogen Hartree potential
VHanalytic = 1/r-(1+1/r)*np.exp(-2*r)
#Plot
plt.figure(figsize=(8, 6))
plt.plot(r,VH, label = r'Hartree method')
plt.plot(r,VHanalytic, '--' ,label = r'Analytical')
plt.legend()
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel("Potential [Ha]", fontsize=18)
plt.savefig('task2.pdf')
#%% task 3. Solves the Kohn-Sham equation to get the energy of the hydrogen atom.
Rmax = 6 # atomic
Z=1
N=1000 #Number of grid points
h = Rmax/N #Stepsize
r = np.linspace(h,Rmax,N)
(psi,eps) = getpsi(0,N,Rmax,Z) #Get hydrogen wave function
print("eps = ", eps)
ns= np.abs(psi)**2
print("E = ",getE(eps,0,0,0,ns,Z)) #Get hydrogen ground state energy
#Plot
plt.figure(figsize=(8, 6))
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel(r"Radial PDF [$a_0^{-1}$]", fontsize=18)
plt.plot(r,4*np.pi*r**2*ns)
plt.savefig('task3.pdf')
task3density = 4*np.pi*r**2*ns
task3r = r
#%% Task 4. Calculates dependence on Rmax of the iterative Hartee-Fock method.
Z= 2
N=800 #Number of grid points
Rmax = 6 # atomic
h = 0.006 #Stepsize
r = np.linspace(h,Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Guess initial density
MaxIters = 15 #Max number of iterations
E = np.zeros(MaxIters)
conIters = 10
Rmaxlist = np.linspace(4,10,conIters)
Econv = np.zeros(conIters)
#Iterates over Rmax
for i in range(conIters):
N = int(np.round(Rmaxlist[i]/h))
r = np.linspace(h,Rmaxlist[i],N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Guess initial density
E = solve(ns,MaxIters,N,Rmaxlist[i],Z)[1] # Solves the self consistency problem
Econv[i] = E[E!=0][-1]
print("E = ", Econv[i], "at rmax", Rmaxlist[i]," and i ",i)
#%% Plots E for Rmax values
plt.figure(figsize=(6.15, 4.6))
plt.plot(Rmaxlist,Econv)
plt.xlabel(r"$R_{max}$ [$a_0$]", fontsize=18)
plt.ylabel(r"Energy [Ha]", fontsize=18)
plt.tight_layout()
plt.savefig('task4_rmax.pdf')
#%% Task 4. Plots dependence on stepsize h of the iterative Hartee-Fock method.
Rmax = 7 # atomic
h = 0.006 #Stepsize
MaxIters = 15 #Max number of iterations
E = np.zeros(MaxIters)
conIters = 10
hlist = np.logspace(np.log10(0.04),np.log10(0.0025),conIters)
Econvh = np.zeros(conIters)
#Iterates over h
for i in range(conIters):
N = int(np.round(Rmax/hlist[i]))
r = np.linspace(hlist[i],Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Guess initial density
E = solve(ns,MaxIters,N,Rmax,Z)[1] # Solves the self consistency problem
Econvh[i] = E[E!=0][-1]
print("E = ", Econvh[i], "at h", hlist[i]," and i ",i)
#%% Plots E for h values
plt.figure(figsize=(6.15, 4.6))
plt.plot(hlist[1:],Econvh[1:])
plt.xlabel(r"h [$a_0$]", fontsize=18)
plt.ylabel("Energy [Ha]", fontsize=18)
plt.xlim(0.025,0)
plt.tight_layout()
plt.savefig('task4_h.pdf')
#%% Plot the final electron density for Task 4
Rmax = 7
h = 0.006
N = int(np.round(Rmax/h))
r = np.linspace(h,Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r)
MaxIters = 15 #Max number of iterations
E = np.zeros(MaxIters)
ns, E = solve(ns,MaxIters,N,Rmax,Z)
print("E = ", E[E!=0][-1])
#Plot ns
plt.figure(figsize=(8, 6))
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel("Radial PDF", fontsize=18)
plt.plot(r,4*np.pi*r**2*ns)
# plt.savefig('task4.pdf')
task4density = 4*np.pi*r**2*ns
task4r = r
#%% Plot the electron densities together for tasks 1,4,5 and 6.
plt.figure(figsize=(8, 6))
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel(r"Radial PDF [$a_0^{-1}$]", fontsize=18)
plt.plot(task1r,task1density, label ="Ansatz Hartree-Fock")
plt.plot(task4r,task4density,'--', label ="FD Hartree-Fock")
plt.plot(task5r,task5density,'-.', label ="With exchange")
plt.plot(task6r,task6density,':', label ="With exchange-correlation")
plt.xlim([0,3])
plt.legend()
plt.savefig('wavefuncs.pdf')
#%% Task 5 Hartree-Fock with exchange correction.
Z= 2
Rmax = 7 # atomic
h = 0.006 #Stepsize
N=int(np.round(Rmax/h)) #Number of grid points
r = np.linspace(h,Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Guess initial density
#Exchange potential
def getepsx(n,Z):
return -3/4*(3*Z*ns/np.pi)**(1/3)
def getVx(n,Z):
return -1*(3*Z*ns/np.pi)**(1/3)
epsx= getepsx(Z*ns,Z)
#ndepsx= 1/3*epsx
Vx = getVx(Z*ns,Z)
MaxIters = 30 #Max number of iterations
E = np.zeros(MaxIters)
Test = np.zeros(MaxIters)
#Solve the self consistency problem
for i in range(MaxIters):
VH = Z*getVH(ns,N,Rmax)
(psi,eps) = getpsi(VH+Vx,N,Rmax,Z)
ns = np.abs(psi)**2
epsx= getepsx(Z*ns,Z)
Vx = getVx(Z*ns,Z)
E[i] =getE(eps,VH,Vx,epsx,ns,Z)
if np.abs(E[i]-E[i-1])<1e-5/27.21:
break
print("E = ", E[i])
task5r=r
task5density=4*np.pi*r**2*ns
#Plot
plt.figure()
plt.plot(E[:i])
plt.figure(figsize=(8, 6))
plt.xlabel(r"Distance [$a_0$]", fontsize=18)
plt.ylabel("Radial PDF", fontsize=18)
plt.plot(r,4*np.pi*r**2*ns)
plt.savefig('task5.pdf')
# %% Task 6 Hartree-Fock with exchange-correlation correction.
Z= 2
Rmax = 7 # atomic
h = 0.006 #Stepsize
N=int(np.round(Rmax/h)) #Number of grid points
r = np.linspace(h,Rmax,N)
ns=1/np.pi*Z**3*np.exp(-2*Z*r) #Guess initial density
A = 0.0311
B = -0.048
C = 0.002
D = -0.0116
gamma = -0.1423
beta1 = 1.0529
beta2 = 0.3334
def getrs(n,Z):
return (3/(4*np.pi*Z*ns))**(1/3)
def getepsc(n,Z):
rs=getrs(n,Z)
epsc=(rs>=1)*gamma/(1+beta1*np.sqrt(rs)+beta2*rs)
epsc=epsc+(rs<1)*(A*np.log(rs)+B+C*rs*np.log(rs)+D*rs)
return epsc
def getepsx(n,Z):
return -3/4*(3*Z*ns/np.pi)**(1/3)
def getVx(n,Z):
return -1*(3*Z*ns/np.pi)**(1/3)
def getVc(n,Z):
rs=getrs(n,Z)
Vc=(rs>=1)*getepsc(n,Z)*(1+7/6*beta1*np.sqrt(rs)+4/3*beta2*rs)/(1+beta1*np.sqrt(rs)+beta2*rs)
Vc+=(rs<1)*(A*np.log(rs)+B-A/3+2/3*C*rs*np.log(rs)+(2*D-C)*rs/3)
return Vc
epsxc = getepsx(Z*ns,Z)+getepsc(Z*ns,Z)
Vxc = getVx(Z*ns,Z) + getVc(Z*ns,Z)
MaxIters = 30 #Max number of iterations
E = np.zeros(MaxIters)
#Solve the self consistency problem
for i in range(MaxIters):
VH = 2*getVH(ns,N,Rmax)
(psi,eps) = getpsi(VH+Vxc,N,Rmax,Z)
ns = np.abs(psi)**2
#Update eps and V with new ns
epsxc = getepsx(Z*ns,Z) + getepsc(Z*ns,Z)
Vxc = getVx(Z*ns,Z) + getVc(Z*ns,Z)
E[i] =getE(eps,VH,Vxc,epsxc,ns,Z)
if np.abs(E[i]-E[i-1])<1e-5/27.21:
break
print("E = ", E[i])
task6r=r
task6density=4*np.pi*r**2*ns
#Plot
plt.plot(r,4*np.pi*r**2*ns)
plt.figure()
plt.plot(E[:i])
|
986,109 | 401fb2a79c5924994551f6a718782dfa7ac44c66 | numeropositivo = -1
potencia = -1
while (numeropositivo <= 0 or potencia <= 0):
numeropositivo = int(input("Digite una base: "))
potencia = int(input("Digite una potencia: "))
if(numeropositivo <= 0 or potencia <= 0):
print ("Error. solo numeros positivos")
acumulador = numeropositivo
while (potencia > 1):
potencia = potencia - 1
numeropositivo = numeropositivo * acumulador
print(numeropositivo)
|
986,110 | 171bf28c36f1d241e9d1d7a196080539b90aa4eb | from django.db import models
from django.forms import ModelForm
from django.contrib.auth.hashers import make_password
import datetime
# Create your models here.
class UserEntity(models.Model):
entity_type_id = models.IntegerField(max_length=2, default='0')
group_id = models.IntegerField(max_length=2, default='0')
fname = models.CharField(max_length=30, default='')
lname = models.CharField(max_length=30, default='')
sex = models.CharField(max_length=1, default='')
date_of_birth = models.CharField(max_length=10, default='')
address_1 = models.CharField(max_length=255, default='')
address_2 = models.CharField(max_length=30, default='')
city = models.CharField(max_length=255, default='')
region = models.CharField(max_length=255, default='')
country = models.CharField(max_length=255, default='')
zipcode = models.CharField(max_length=255, default='')
email = models.EmailField(max_length=255, default='')
username = models.CharField(max_length=30, default='')
password = models.CharField(max_length=255, default='')
phone = models.CharField(max_length=20, default='')
image = models.CharField(max_length=255, default='')
create_time = models.DateTimeField(default=datetime.datetime.now())
update_time = models.DateTimeField(default=datetime.datetime.now())
is_active = models.BooleanField(default='1')
recent_ip = models.CharField(max_length=15, default='')
def hash_password(self, raw_password):
self.password = make_password(raw_password)
|
986,111 | 43feabb3520bcbee81a1293ed9807c2038f43750 | import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output,State
from app import app
from app import server
from apps import stock_forecasting,world_gdp_analysis,home,tweet_analysis,topic_modeling
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content'),
])
# links method
@app.callback(Output('page-content', 'children'),
Input('url', 'pathname'))
def display_page(pathname):
if pathname == '/apps/stock_forecasting':
return stock_forecasting.layout
elif pathname == '/apps/world_gdp_analysis':
return world_gdp_analysis.layout
elif pathname == '/apps/home':
return home.layout
elif pathname == '/apps/tweet_analysis':
return tweet_analysis.layout
elif pathname == '/apps/topic_modeling':
return topic_modeling.layout
else:
return ''
if __name__ == '__main__':
app.run_server(debug=False) |
986,112 | ff8b3cdc92449cf3a1913c83667f4f3aa472b630 | # Generated by Django 3.1.3 on 2020-11-20 15:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('results_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='results',
name='link',
field=models.CharField(max_length=36, unique=True, verbose_name='Ссылка'),
),
]
|
986,113 | f68a30fb549c95a1492d70c52ea66909bfac9057 | # consider a problem that online game designers and internet radio providers face:
# This is important in gaming because every players can communicate and listeners can tuned in and getting all the data
# It is a typical boardcast problem
# 1. the boardcast host has some information that the listeners all need to recieve.
# (simplest solution) Define:
# - Baordcasting host to keep all information in the list and send individual message to each ---> uncontrolled flooding.
# How it works? (it generates many more unnecessary messages than our first stragety)
# 1. Each message starts with a ttl(time to live) value set to some number greater than or equal to the number of edges between boardcast host and its most distant listener
# 2. Each routers get a copy of the message and passes the message on to all of its neighboring routers.
# 3. When the message is passed on the ttl is decreased.
# 4. Each router continues to send copies of the message to all its neighors until the ttl value reaches 0.
# (Prim's algorithm) Define:
# - Construct a minimum weight spanning tree solution defined as T for G = (V,E), where T is a subset of E that connects all the vertices in V. The sum of the weights of the edges in T is minimized.
# How is works?
# - The broadcast host simply sends a single copy of the broadcast message into the network.
# - Each router forwards the message to any neighbor that is part of the spanning tree, excluding the neighbor that just sent it the message.
#
### Develop the Prim's alorithm ###
# It belongs to a family of algorithms called the "greedy algorithm", because at each step we will choose the cheapest next step(lowest weight in this case).
## The basic idea:
# """ While T is not yet a spanning tree
# Find an edge that is safe to add to the tree
# Add the new edge to T """
## define a safe edge:
# any edge that connects a vertex that is in the spanning tree to a vertex that is not in the spanning tree.
# This ensures that the tree will always remain a tree and therefore have no cycles.
# same to Dijkstra's algorithm, it also use a priority queue to select the next vertex to add to the growing graph.
from pythonds.graphs import PriorityQueue, Graph, Vertex
def prim(G,start):
pq = PriorityQueue()
for v in G:
# the distance to all the other vertices are initialized to infinity.
v.setDistance(sys.maxsize)
v.setPred(None)
# we begin with the starting vertex as A for example
start.setDistance(0)
# we add initially the vertices which are the neighors of A to the priority queue
pq.buildHeap([(v.getDistance(),v) for v in G])
while not pq.isEmpty():
# look for the smallest distance
currentVert = pq.delMin()
# examine its neighbors
for nextVert in currentVert.getConnections():
# update the new distances
newCost = currentVert.getWeight(nextVert)
if nextVert in pq and newCost<nextVert.getDistance():
# set their predecessor links and new distance values
nextVert.setPred(currentVert)
nextVert.setDistance(newCost)
pq.decreaseKey(nextVert,newCost)
|
986,114 | cc5d404284687ae931868341da3a266027667285 | import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
class SI:
"""
Only consider susceptibles and infectives.
Infectives can't recover.
"""
def __init__(self,
N: int,
r: int,
beta: float,
I0: int) -> None:
"""
:param N: total population, fixed
:param r: number of contacts per person per time
:param beta: probability of disease transmission in a contact
:param I0: initial infectives population
"""
self.N = N
self.r = r
self.beta = beta
self.I0 = I0
def predict(self, t: np.ndarray) -> np.ndarray:
return self.N * self.I0 / (self.I0 + (self.N - self.I0) * np.exp(-self.r*self.beta*t))
def show(self, t_begin: float, t_end: float) -> None:
fig, ax = plt.subplots(1, 1)
ax.set_title('SI Model\n' +
r'$r=%d,\,\beta=%.6f$' % (self.r, self.beta))
ax.set_xlabel('Time')
ax.set_ylabel('Fraction')
ax.set_xlim(t_begin, t_end)
ax.set_ylim(0, 1)
plot_x = np.linspace(t_begin, t_end, 100)
plot_I = self.predict(plot_x)
plot_S = self.N - plot_I
ax.plot(plot_x, plot_I / self.N, label='Infectives')
ax.plot(plot_x, plot_S / self.N, label='Susceptibles')
plt.legend()
plt.show()
class SIS:
"""
Only consider susceptibles and infectives.
Infectives can recover and may be infected again.
"""
def __init__(self,
N: int,
r: int,
beta: float,
gamma: float,
I0: int) -> None:
"""
:param N: total population, fixed
:param r: number of contacts per person per time
:param beta: probability of disease transmission in a contact
:param gamma: probability of recovery
:param I0: initial infectives population
"""
self.N = N
self.r = r
self.beta = beta
self.gamma = gamma
self.I0 = I0
def predict(self, t: np.ndarray) -> np.ndarray:
rbg = self.r * self.beta - self.gamma
Nrbg = self.N * rbg / (self.r * self.beta)
return Nrbg / (1 + (Nrbg / self.I0 - 1) * np.exp(-rbg * t))
def show(self, t_begin: float, t_end: float) -> None:
fig, ax = plt.subplots(1, 1)
ax.set_title('SIS Model\n' +
r'$r=%d,\,\beta=%.6f,\,\gamma=%.6f$' % (self.r, self.beta, self.gamma))
ax.set_xlabel('Time')
ax.set_ylabel('Fraction')
ax.set_xlim(t_begin, t_end)
ax.set_ylim(0, 1)
plot_x = np.linspace(t_begin, t_end, 100)
plot_I = self.predict(plot_x)
plot_S = self.N - plot_I
ax.plot(plot_x, plot_I / self.N, label='Infectives')
ax.plot(plot_x, plot_S / self.N, label='Susceptibles')
plt.legend()
plt.show()
class SIR:
"""
Consider susceptibles, infectives and removed.
Infectives can recover and won't be infected again.
"""
def __init__(self,
N: int,
r: int,
beta: float,
gamma: float,
I0: int,
R0: int) -> None:
"""
:param N: total population, fixed
:param r: number of contacts per person per time
:param beta: probability of disease transmission in a contact
:param gamma: probability of recovery
:param I0: initial infectives population
:param R0: initial removed population
"""
self.N = N
self.r = r
self.beta = beta
self.gamma = gamma
self.I0 = I0
self.R0 = R0
def predict(self, t: np.ndarray) -> np.ndarray:
def fun(_, y):
""" y = [S, I, R] """
return np.array([-self.r * self.beta * y[1] * y[0] / self.N,
self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],
self.gamma * y[1]])
res = solve_ivp(fun=fun,
t_span=(0, np.max(t)),
y0=np.array([self.N - self.I0 - self.R0, self.I0, self.R0]),
method='RK45',
t_eval=t)
return res.y
def show(self, t_begin: float, t_end: float) -> None:
fig, ax = plt.subplots(1, 1)
ax.set_title('SIR Model\n' +
r'$r=%d,\,\beta=%.6f,\,\gamma=%.6f$' % (self.r, self.beta, self.gamma))
ax.set_xlabel('Time')
ax.set_ylabel('Fraction')
ax.set_xlim(t_begin, t_end)
ax.set_ylim(0, 1)
plot_x = np.linspace(t_begin, t_end, 100)
plot_S = self.predict(plot_x)
plot_S, plot_I, plot_R = plot_S[0], plot_S[1], plot_S[2]
ax.plot(plot_x, plot_I / self.N, label='Infectives')
ax.plot(plot_x, plot_S / self.N, label='Susceptibles')
ax.plot(plot_x, plot_R / self.N, label='Removed')
plt.legend()
plt.show()
class SEIR:
"""
Consider susceptibles, exposed, infectives and removed.
Infectives can recover and won't be infected again.
Exposed cannot infect others.
"""
def __init__(self,
N: int,
r: int,
beta: float,
sigma: float,
gamma: float,
E0: int,
I0: int,
R0: int) -> None:
"""
:param N: total population, fixed
:param r: number of contacts per person per time
:param beta: probability of disease transmission in a contact
:param sigma: probability of exposed -> infectives
:param gamma: probability of recovery
:param E0: initial exposed population
:param I0: initial infectives population
:param R0: initial removed population
"""
self.N = N
self.r = r
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.E0 = E0
self.I0 = I0
self.R0 = R0
self.S0 = self.N - E0 - I0 - R0
def predict(self, t: np.ndarray) -> np.ndarray:
def fun(_, y):
""" y = [S, E, I, R] """
return np.array([-self.r * self.beta * y[2] * y[0] / self.N,
self.r * self.beta * y[2] * y[0] / self.N - self.sigma * y[1],
self.sigma * y[1] - self.gamma * y[2],
self.gamma * y[2]])
res = solve_ivp(fun=fun,
t_span=(0, np.max(t)),
y0=np.array([self.S0, self.E0, self.I0, self.R0]),
method='RK45',
t_eval=t)
return res.y
def show(self, t_begin: float, t_end: float) -> None:
fig, ax = plt.subplots(1, 1)
ax.set_title('SEIR Model\n' +
r'$r=%d,\,\beta=%.6f,\,\sigma=%.6f,\,\gamma=%.6f$' % (self.r, self.beta, self.sigma, self.gamma))
ax.set_xlabel('Time')
ax.set_ylabel('Fraction')
ax.set_xlim(t_begin, t_end)
ax.set_ylim(0, 1)
plot_x = np.linspace(t_begin, t_end, 100)
plot_S = self.predict(plot_x)
plot_S, plot_E, plot_I, plot_R = plot_S[0], plot_S[1], plot_S[2], plot_S[3]
ax.plot(plot_x, plot_I / self.N, label='Infectives')
ax.plot(plot_x, plot_E / self.N, label='Exposed')
ax.plot(plot_x, plot_S / self.N, label='Susceptibles')
ax.plot(plot_x, plot_R / self.N, label='Removed')
plt.legend()
plt.show()
def main():
# model = SI(N=1000, r=120, beta=0.005, I0=1)
# model = SIS(N=1000, r=100, beta=0.005, gamma=0.1, I0=1)
# model = SIR(N=1000, r=100, beta=0.003, gamma=0.1, I0=1, R0=0)
# model.show(t_begin=0, t_end=100)
# model = SIR(N=1000, r=100, beta=0.003, gamma=0.1, I0=20, R0=400)
# model.show(t_begin=0, t_end=100)
# model = SEIR(N=1000, r=100, beta=0.003, sigma=0.3, gamma=0.1, E0=40, I0=20, R0=0)
# model.show(t_begin=0, t_end=100)
model = SEIR(N=10000, r=20, beta=0.03, sigma=0.1, gamma=0.1, E0=0, I0=1, R0=0)
model.show(t_begin=0, t_end=140)
if __name__ == '__main__':
main()
|
986,115 | 6e03f793e2c26189e2231deb0264c2428b7bfe04 | from numpy import *
from scipy.optimize import minimize
from random import random
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import *
'''
O SQP é um modela um problema de otimização não linear para uma dada iteração x_k, em um
subproblema de Programação Quadrática (QP), uma vez resolvido esta instância de problema,
esta solução é usada para construir uma nova iteração x_k + 1. Eventualmente x_k
converge para x*.
'''
def registro (x):
print((x[0],x[1],f(x)))
xMin = -5.0
xMax = 5.0
bnds=((xMin,xMax),(xMin,xMax))
f = lambda x: (-1.0) * ((9.0-(x[0]-3.0)**2) * ((x[1]**3)/(27.0*sqrt(3.0))))
h1 = lambda x : x[0]/sqrt(3) - x[1]
h2 = lambda x : 6 - (x[0]+ sqrt(3) * x[1])
h3 = lambda x : x[0]
bnds=((0,100),(0,5))
cons=(\
{'type':'ineq','fun':h1},\
{'type':'ineq','fun':h2},\
{'type':'ineq','fun':h3})
x0 = array([1.0, 1.0])
res= minimize(f,x0,method='SLSQP',bounds=bnds,constraints=cons,callback=registro,options={'disp':True})
minimo = [res.x[0],res.x[1],res.fun]#[res.x[0],res.x[1],res.fun]
fig = figure()
ax = fig.add_subplot(111, projection='3d')
X = np.arange(xMin, xMax, 0.05)
Y = np.arange(xMin, xMax, 0.05)
X, Y = np.meshgrid(X, Y)
Z = f([X,Y])
superficie = ax.plot_surface(X, Y, Z, cmap=cm.hot,linewidth=1.0, antialiased=True,alpha=0.8)
ax.scatter(minimo[0],minimo[1],minimo[2],marker=["o","^"][0],c=["r","b"][1],s=10)
ax.text(minimo[0],minimo[1],minimo[2],s=str(minimo))
show()
|
986,116 | 4eebd7d87d4773b136f09d993396fca1889c5f48 | import numpy as np
import matplotlib.pyplot as plt
import biorbd_casadi as biorbd
from bioptim import OdeSolver, CostType, Solver, PlotType, SolutionIntegrator, Shooting
from transcriptions import HumanoidOCP, Models
def main():
n_shooting = 30
# ode_solver = OdeSolver.RK4(n_integration_steps=5)
# ode_solver = OdeSolver.RK4()
# ode_solver = OdeSolver.IRK()
ode_solver = OdeSolver.COLLOCATION()
n_threads = 8
# for human in Humanoid2D:
human = Models.HUMANOID_10DOF
# --- Solve the program --- #
humanoid = HumanoidOCP(
biorbd_model_path=human.value,
n_shooting=n_shooting,
ode_solver=ode_solver,
n_threads=n_threads,
nb_phases=1,
seed=42,
)
add_custom_plots(humanoid.ocp)
humanoid.ocp.add_plot_penalty(CostType.ALL)
print("number of states: ", humanoid.ocp.v.n_all_x)
print("number of controls: ", humanoid.ocp.v.n_all_u)
humanoid.ocp.print(to_console=True, to_graph=False)
solv = Solver.IPOPT(show_online_optim=True, show_options=dict(show_bounds=True))
solv.set_maximum_iterations(1000)
solv.set_linear_solver("ma57")
solv.set_print_level(5)
sol = humanoid.ocp.solve(solv)
# --- Show results --- #
sol.graphs(show_bounds=True)
sol.print_cost()
out = sol.integrate(
shooting_type=Shooting.SINGLE,
keep_intermediate_points=False,
merge_phases=True,
integrator=SolutionIntegrator.SCIPY_DOP853,
)
plt.figure()
# ocp in blue
plt.plot(sol.time, sol.states["q"].T, label="ocp", marker=".", color="blue")
# integration in red
plt.plot(out.time, out.states["q"].T, label="integrated", marker="+", color="red")
plt.legend()
plt.show()
sol.animate(n_frames=0)
def plot_com(x, nlp):
com_func = biorbd.to_casadi_func("CoMPlot", nlp.model.model.CoM, nlp.states["q"].mx, expand=False)
com_dot_func = biorbd.to_casadi_func(
"Compute_CoM",
nlp.model.model.CoMdot,
nlp.states["q"].mx,
nlp.states["qdot"].mx,
expand=False,
)
q = nlp.states["q"].mapping.to_second.map(x[nlp.states["q"].index, :])
qdot = nlp.states["qdot"].mapping.to_second.map(x[nlp.states["qdot"].index, :])
return np.concatenate((np.array(com_func(q)[1:, :]), np.array(com_dot_func(q, qdot)[1:, :])))
def plot_qddot(x, u, nlp):
return np.array(nlp.dynamics_func(x, u, []))[nlp.states["qdot"].index, :]
def plot_contact_acceleration(x, u, nlp):
qddot = nlp.states["qddot"] if "qddot" in nlp.states else nlp.controls["qddot"]
acc_x = biorbd.to_casadi_func(
"acc_0",
nlp.model.model.rigidContactAcceleration(nlp.states["q"].mx, nlp.states["qdot"].mx, qddot.mx, 0).to_mx(),
nlp.states["q"].mx,
nlp.states["qdot"].mx,
qddot.mx,
expand=False,
)
q = nlp.states["q"].mapping.to_second.map(x[nlp.states["q"].index, :])
qdot = nlp.states["qdot"].mapping.to_second.map(x[nlp.states["qdot"].index, :])
if "qddot" in nlp.states:
qddot = nlp.states["qddot"].mapping.to_second.map(x[qddot.index, :])
else:
qddot = nlp.controls["qddot"].mapping.to_second.map(u[qddot.index, :])
return np.array(acc_x(q, qdot, qddot)[list(nlp.model.rigidContactAxisIdx(0)), :])
def add_custom_plots(ocp):
for i, nlp in enumerate(ocp.nlp):
ocp.add_plot(
"com",
lambda t, x, u, p: plot_com(x, nlp),
phase=i,
legend=["CoMy", "Comz", "CoM_doty", "CoM_dotz"],
)
for i, nlp in enumerate(ocp.nlp):
ocp.add_plot(
"qddot",
lambda t, x, u, p: plot_qddot(x, u, nlp),
phase=i,
legend=["qddot"],
plot_type=PlotType.INTEGRATED,
)
if __name__ == "__main__":
main()
|
986,117 | 1fbdc52c71560c1eaeccddcbf7caf752567625e5 | # days = int(input("How many days:"))
# years = days // 365
# weeks = (days % 365) // 7
# days = days - ((years * 365) + (weeks * 7))
# print("Years:", years)
# print("Weeks:", weeks)
# print("Days:", days)
user_input = input()
number = int(user_input)
binary = bin(number)
print(binary) |
986,118 | 9dd50a9ed058363baa58fb0f35670cf2628c6479 | from urllib import request
from bs4 import BeautifulSoup
import os
def main():
dic=load_in().split("\n")
print("Display all words")
display(dic)
ans=merriam_webster(dic)
print("Display definitions")
display(ans)
write_in(ans)
def load_in():
try:
f = open('./txt/input.txt', 'r')
dic=f.read()
f.close()
return dic
except IOError:
print("cannot find input file")
exit(1)
def write_in(ans):
with open('./txt/output.txt','w+') as fp:
for ele in ans:
fp.write(ele+" "+ans[ele]+"\n")
print("writing "+ele+"'s definitions...")
fp.close()
print("write done")
def merriam_webster(dic):
ans=dict()
for word in dic:
url="https://www.merriam-webster.com/dictionary/"+word
print("requesting "+url+"...")
req=request.urlopen(url)
req=req.read()
print("requested succeeded")
soup=BeautifulSoup(req,'html.parser')
try:
dif=soup.find_all('span',class_="dtText")[0].text.split('\n')[0].strip(':')
print("word: "+word)
print("deffinition: "+dif)
ans[word]=dif
except:
print("cannot find definitions")
print()
return ans
def display(dic):
print("---------------------------")
for ele in dic:
try:
print(ele+dic[ele])
except:
print(ele)
print("---------------------------")
if __name__ == '__main__':
main()
|
986,119 | 92ef595636fc08ab2da87744dfc249e6c97a3171 | # -*- coding: utf-8 -*-
import time
import scrapy
from copy import deepcopy
import json
import re
import requests
from requests.adapters import HTTPAdapter
from .s3_upload import UploatS3
from urllib.parse import urlparse
from selenium.webdriver import Chrome
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))
class NhcSpider(scrapy.Spider):
name = 'nhc'
allowed_domains = ['nhc.gov.cn']
start_urls = ['http://www.nhc.gov.cn/wjw/zcfg/list.shtml', 'http://www.nhc.gov.cn/wjw/zcjd/list.shtml']
base_url = 'http://www.nhc.gov.cn'
def parse(self, response):
print(response.status)
if response.status != 200:
driver = Chrome()
driver.get(response.request.url)
time.sleep(2)
href_li = driver.find_elements_by_xpath('//div[@class="list"]//li/a')
for i in href_li:
href = self.base_url + i.get_attribute('href')
# print(href)
yield scrapy.Request(url=href, callback=self.parse_item)
page = re.findall(r"'page_div',(\d*),", driver.page_source)[0]
driver.quit()
print(page)
for p in range(2, int(page) + 1):
url = response.request.url.replace('list', 'list_{}'.format(p))
# print(url)
yield scrapy.Request(url=url, callback=self.parse_page)
else:
href_li = response.xpath('//div[@class="list"]//li/a/@href').extract()
for i in href_li:
href = self.base_url + i
# print(href)
yield scrapy.Request(url=href, callback=self.parse_item)
page = re.findall(r"'page_div',(\d*),", response.text)[0]
print(page)
for p in range(2, int(page) + 1):
url = response.request.url.replace('list', 'list_{}'.format(p))
# print(url)
yield scrapy.Request(url=url, callback=self.parse_page)
def parse_page(self, response):
if response.status != 200:
yield scrapy.Request(url=response.request.url, callback=self.parse_page, dont_filter=True)
else:
href_li = response.xpath('//div[@class="list"]//li/a/@href').extract()
for i in href_li:
href = self.base_url + i
# print(href)
yield scrapy.Request(url=href, callback=self.parse_item)
def parse_item(self, response):
if response.status != 200 or '<title>' not in response.text:
yield scrapy.Request(url=response.request.url, callback=self.parse_item, dont_filter=True)
else:
item = {'main': {}, 'ass': []}
item_main = {}
indexid = response.request.url.split('/')[-1]
body = response.xpath('//div[@class="list"]//text()').extract()
body_str = ''.join(body)
pub_ = re.findall(r'\d{4}-\d{1,2}-\d{1,2}', body_str)
# if not pub_:
# pub_ = re.findall(r'\d{4}-\d{1,2}-\d{1,2}', requests.get(response.request.url).text)
pub_date = pub_[0]
# try:
title = response.xpath('//title/text()').extract_first().strip()
# except Exception:
# title = ''
# if not title:
# print(response.text)
# else:
# title = title
# text_li = response.xpath('//div[@id="xw_box"]//p/text()').extract()
# if not text_li:
# text_li = response.xpath('//div[@id="xw_box"]//text()').extract()
# text = ' '.join(text_li).strip()
t = title + '.html'
s3u = UploatS3()
text_link = s3u.uploat(response.text.encode('utf-8'), t, 'nhc', pub_date.replace('-', '/'), indexid)
annex_name = response.xpath('//div[@id="xw_box"]//p/a/text()').extract()
an_url = response.xpath('//div[@id="xw_box"]//p/a/@href').extract()
annex_url = []
for a_url in an_url:
if a_url.startswith('http'):
url = a_url
else:
r_url = response.request.url
url = '/'.join(r_url.split('/')[:-1]) + '/' + a_url
annex_url.append(url)
print(response.request.url)
print(annex_url)
if annex_name and annex_url:
annex_dict = dict(zip(annex_name, annex_url))
# print('附件对应关系:{}'.format(annex_dict))
for name, f_url in annex_dict.items():
item_ass = {}
if re.findall(r'.*html', f_url):
name = name + '.html'
item_ass['linktype'] = 0
else:
item_ass['linktype'] = 1
name = name + '.' + f_url.split('.')[-1]
name = name.replace('/', '')
content = s.get(f_url).content
s3u = UploatS3()
s3_url = s3u.uploat(content, name, 'nhc', pub_date.replace('-', '/'), indexid)
item_ass['annexname'] = name
item_ass['policyid'] = indexid
item_ass['annexurl'] = s3_url
item['ass'].append(item_ass)
item_main['policyid'] = indexid
item_main['pubtime'] = pub_date
item_main['title'] = title
item_main['policybodyurl'] = text_link
item_main['isvalid'] = 1
item_main['resourceid'] = '爬虫'
item_main['recordid'] = '卫生健康委员会'
item['main'] = item_main
# print(item['main']['title'] + ' {}'.format(item['main']['pubtime']))
# print(item)
# print(item['ass'])
yield item
|
986,120 | 62a8c18039dc787519dd4d5a772dfa81abe7a324 | from rest_framework.views import APIView, Response
from myapp.models import User, File, UserBrowseFile, UserKeptFile, Team, TeamMember
from myapp.views import chk_token
from myapp.serializers import TeamMemberSer, TeamSer
class CreateTeam(APIView):
def post(self, request):
token = request.META.get('HTTP_TOKEN')
name = request.POST.get('team_name')
if name is None:
return Response({
'info': '参数不完整',
'code': 400,
}, status=400)
print(token)
user_id = chk_token(token)
if isinstance(user_id, Response):
return user_id
u = User.objects.get(pk=user_id)
t = Team.objects.create(
creator=u,
name=name
)
return Response({
'info': 'success',
'code': 200,
'data': TeamSer(t).data
}, status=200)
class JoinTeam(APIView):
def get(self, request):
token = request.META.get('HTTP_TOKEN')
team_id = request.GET.get('team_id')
if team_id is None:
return Response({
'info': '参数不完整',
'code': 400,
}, status=400)
user_id = chk_token(token)
if isinstance(user_id, Response):
return user_id
u = User.objects.get(pk=user_id)
t = Team.objects.get(pk=team_id)
if TeamMember.objects.filter(team=t, member=u):
return Response({
'info': '你已经加入该团队',
'code': 403,
}, status=403)
tm = TeamMember.objects.create(team=t, member=u)
return Response({
'info': 'success',
'code': 200,
'data': TeamMemberSer(tm).data
}, status=200)
class ExitTeam(APIView):
def get(self, request):
token = request.META.get('HTTP_TOKEN')
team_id = request.GET.get('team_id')
if team_id is None:
return Response({
'info': '参数不完整',
'code': 400,
}, status=400)
user_id = chk_token(token)
if isinstance(user_id, Response):
return user_id
u = User.objects.get(pk=user_id)
t = Team.objects.get(pk=team_id)
tm = TeamMember.objects.filter(team=t, member=u)
if len(tm) <= 0:
return Response({
'info': '未加入该团队 无法退出',
'code': 403,
}, status=403)
res = TeamMemberSer(tm.get()).data
# t_id = tm.get().team.pk
tm.get().delete()
return Response({
'info': 'success',
'code': 200,
'data': res
}, status=200)
|
986,121 | 3f9d3f46bbb47439615b89e1dc0e2bcf44a074c4 | from .models import *
import random
from django.shortcuts import get_object_or_404
from django.http import Http404
from itertools import chain
from django.contrib.auth import get_user_model
import numpy as np
def get_probes(outline_with_rubrics, assign, strategy='random'):
subs = assign.assign_submissions.all()
User = get_user_model()
sub_ids = set()
for sub in subs:
sub_ids.add(sub.sub_id)
print(assign.assignment_peergrading_profile.all()[0].n_probes)
print(assign.assignment_peergrading_profile.all()[0].peerdist)
probes = []
user_ids = []
if strategy == 'cyclic':
for user in chain(assign.assignment_peergrading_profile.all()[0].instructor_graders.all(), assign.assignment_peergrading_profile.all()[0].ta_graders.all()):
user_ids.append(user.email)
if strategy == "select-ta":
for user in assign.assignment_peergrading_profile.all()[0].ta_for_probes.all():
user_ids.append(user.email)
for idx in range(assign.assignment_peergrading_profile.all()[0].n_probes):
id1 = random.choice(tuple(sub_ids))
# right now we are choosing random id to give probe and not removing from set
user_id = user_ids[idx % len(user_ids)]
sub = get_object_or_404(
assign.assign_submissions.all(), sub_id=id1)
sub_ids.remove(id1)
grader = get_object_or_404(User, email=user_id)
print(sub)
print(grader)
try:
probe = ProbeSubmission.objects.create(
parent_sub=sub, probe_grader=grader)
probes.append({'probe_id': probe.probe_id})
print("probe id created", probe.probe_id)
except:
print(
'########################### probeSubmission object already exists#################################################')
raise Http404
print("should be same probe id ", probe.probe_id)
for q in outline_with_rubrics:
cur_ques = Question.objects.get(ques_id=q['qid'])
ques_sub = sub.submissions.all().get(question=cur_ques)
ques = ProbeSubmissionQuestion.objects.create(
parent_probe_sub=probe, parent_ques=ques_sub)
ques_com = ProbeSubmissionQuestionComment.objects.create(
parent_ques=ques)
for sq in q['sub_questions']:
sub_ques = SubQuestion.objects.get(sques_id=sq['sqid'])
sub_ques = ProbeSubmissionSubquestion.objects.create(
parent_probe_ques=ques, parent_sub_ques=sub_ques)
sub_ques_com = ProbeSubmissionSubquestionComment.objects.create(
parent_subques=sub_ques)
return probes
def get_outline_with_rubrics(assign):
outline_with_rubrics = []
assign_questions = assign.questions.all()
for q in assign_questions:
ques = {
"qid": q.ques_id,
"max_marks": q.max_marks,
"min_marks": q.min_marks,
"rubrics": [],
"sub_questions": [],
}
sub_questions = q.sub_questions.all()
g_rubrics = q.g_rubrics.all()
for gr in g_rubrics:
g_rub = {
"rubric_id": gr.rubric_id,
"marks": gr.marks,
"description": gr.description,
}
ques["rubrics"].append(g_rub)
for sq in sub_questions:
sub_ques = {
"sqid": sq.sques_id,
"max_marks": sq.max_marks,
"min_marks": sq.min_marks,
"sub_rubrics": []
}
g_subrubrics = sq.g_subrubrics.all()
for gsr in g_subrubrics:
gs_rub = {
"sub_rubric_id": gsr.sub_rubric_id,
"marks": gsr.marks,
"description": gsr.description,
}
sub_ques["sub_rubrics"].append(gs_rub)
ques["sub_questions"].append(sub_ques)
outline_with_rubrics.append(ques)
return outline_with_rubrics
def sanitization_check(assign, test_questions):
assign_questions = assign.questions.all()
questions = []
error_flag = False
errors = []
for q in assign_questions:
ques = {
"qid": q.qid,
"max_marks": q.max_marks,
"min_marks": q.min_marks,
"sub_questions": []
}
sub_questions = q.sub_questions.all()
for sq in sub_questions:
sub_ques = {
"sqid": sq.sques_id,
"max_marks": sq.max_marks,
"min_marks": sq.min_marks,
}
ques["sub_questions"].append(sub_ques)
questions.append(ques)
# [VAL_CHECK_0] All keys must be there
for tq in test_questions:
tqid = tq.get('qid', None)
tminm = tq.get('min_marks', None)
tmaxm = tq.get('max_marks', None)
trub = tq.get('rubrics', -1)
tcom = tq.get('comment', -1)
tsubq = tq.get('sub_questions', -1)
if not (tqid and tminm and tmaxm) or tsubq == -1 or trub == -1 or tcom == -1:
error_flag = True
error = "Key error in question payload"
errors.append(error)
if error_flag:
return errors
# [VAL_CHECK_1] All test questions should be in questions and marks should add up properly
for test_question in test_questions:
found = False
test_ques_marks = 0
test_ques_rubric_marks = 0
for ques in questions:
if ques['qid'] == test_question['qid'] and \
ques['max_marks'] == test_question['max_marks'] and \
ques['min_marks'] == test_question['min_marks']:
if ques['sub_questions']:
sq_max_marks = 0
sq_min_marks = 0
else:
sq_max_marks = ques['max_marks']
sq_min_marks = ques['min_marks']
for test_sq in test_question['sub_questions']:
sq_found = False
sq_max_marks += test_sq['max_marks']
sq_min_marks += test_sq['min_marks']
for sub_ques in ques['sub_questions']:
if sub_ques['sqid'] == test_sq['sqid'] and \
sub_ques['min_marks'] == test_sq['min_marks'] and \
sub_ques['max_marks'] == test_sq['max_marks']:
sq_found = True
break
if sq_max_marks == ques['max_marks'] and sq_min_marks == ques['min_marks']:
found = True
tq_marks = 0
if test_question['comment']['marks']:
tq_marks += test_question['comment']['marks']
for rb in test_question['rubrics']:
if rb['selected']:
tq_marks += rb['marks']
for tsq in test_question['sub_questions']:
tsq_marks = 0
if tsq['comment']['marks']:
tsq_marks += tsq['comment']['marks']
for srb in tsq['sub_rubrics']:
if srb['selected']:
tsq_marks += srb['marks']
if not (tsq['min_marks'] <= tsq_marks <= tsq['max_marks']):
error_flag = True
tq_marks += tsq_marks
if not (ques['min_marks'] <= tq_marks <= ques['max_marks']):
error_flag = True
break
if not found or not sq_found:
error_flag = True
error = test_question['qid'] + \
" - Payload question Either not found, or some sub-question not found, or marks error"
errors.append(error)
# [VAL_CHECK_2] All questions should be in test questions
for ques in questions:
found = False
for test_question in test_questions:
if ques['qid'] == test_question['qid'] and \
ques['max_marks'] == test_question['max_marks'] and \
ques['min_marks'] == test_question['min_marks']:
found = True
for sub_ques in ques['sub_questions']:
sq_found = False
for test_sq in test_question['sub_questions']:
if sub_ques['sqid'] == test_sq['sqid'] and \
sub_ques['min_marks'] == test_sq['min_marks'] and \
sub_ques['max_marks'] == test_sq['max_marks']:
sq_found = True
break
break
if not found or not sq_found:
error_flag = True
error = ques['qid'] + \
" - Database question either not found, or some sub-question not found, or marks error"
errors.append(error)
if not error_flag:
return "ok"
else:
return errors
def match_making(P_papers, NP_papers, P_students, NP_students, peerdist):
p_len = len(P_papers)
np_len = len(NP_papers)
match = []
for i in range(np_len):
for j in range((peerdist+1)//2):
cur = (i + j + 1) % np_len
match.append((NP_students[i], NP_papers[cur]))
counter = int(0)
for i in range(p_len):
for j in range((peerdist+1)//2):
cur = (counter) % np_len
match.append((P_students[i], NP_papers[cur]))
counter += 1
for i in range(p_len):
for j in range((peerdist)//2): # not +1 because k/2 paper
cur = (i + j + 1) % p_len
match.append((P_students[i], P_papers[cur]))
counter = 0
for i in range(np_len):
for j in range((peerdist)//2):
cur = (counter) % p_len
match.append((NP_students[i], P_papers[cur]))
counter += 1
return match
#
|
986,122 | ec3a382e2bba261bec4b90d7eb0a5beb42517a8e | import os
import secrets
from conf.settings import BASE_DIR
WORKING_DIR = os.path.join(BASE_DIR, 'temp_files')
def random_str():
return secrets.token_hex(nbytes=16)
def get_random_name(extension):
return os.path.join(WORKING_DIR, '{}.{}'.format(random_str(), extension))
|
986,123 | 8ac31755e29f27a216333fb3005b67e117c9b82e | from data_structure import TreeNode, build_binary_tree, ds_print
class Solution:
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
def flatten_append(node, append_node):
if node is None:
return append_node
node.right = flatten_append(node.left, flatten_append(node.right, append_node))
node.left = None
return node
flatten_append(root, None)
if __name__ == "__main__":
root = build_binary_tree((((3,), 2, (4,)), 1, (5, (6,))))
Solution().flatten(root)
ds_print(root)
|
986,124 | 1ac35417b642bcd55580447ee8ad3c639b97b8b2 | from PySide2.QtCore import QObject, Signal
from sos.core.database_manager import DatabaseManager
class DatabaseModel(QObject, DatabaseManager):
modelUpdated = Signal()
def __init__(self):
QObject.__init__(self)
DatabaseManager.__init__(self)
def notify_admin(self):
self.modelUpdated.emit() |
986,125 | 25e2dacc6b6afabcef447b17c3f5c2ff24dae914 |
class LoanParameter(object):
registry = {}
def __init__(self, name):
self.name = name
self.totals_loan = 0
self.count = 1
@classmethod
def create_item(cls, x):
try:
return cls.registry[x]
except KeyError:
new_item = cls(x)
cls.registry[x] = new_item
return new_item
def loan_amount(self, amt):
self.totals_loan = self.totals_loan + amt
def set_loan_type(self,type):
self.type = type
def set_number_of_loans(self):
self.count = self.count + 1
def get_number_of_loans(self):
return self.count
def getLoanAmount(self):
return self.totals_loan
def to_tuple(self):
return (self.name, self.totals_loan,self.count)
def __str__(self):
return self.name |
986,126 | 4c14a10ee7c7c4faea1e6e2608f512ff52a064b8 | #!/usr/bin/env python3
import config
config.Daemon().run()
|
986,127 | c6de61af0880ca6dbea614d2a9a29a762e6a639e | # Generated by Django 2.1.3 on 2018-12-04 07:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Blog', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Blog',
new_name='BlogModel',
),
migrations.RenameField(
model_name='blogmodel',
old_name='Blog_Content',
new_name='Content',
),
migrations.RenameField(
model_name='blogmodel',
old_name='Blog_Date',
new_name='Date',
),
migrations.RenameField(
model_name='blogmodel',
old_name='Blog_Image',
new_name='Image',
),
migrations.RenameField(
model_name='blogmodel',
old_name='Blog_Tittle',
new_name='Tittle',
),
]
|
986,128 | 51b684f69b0c6f4da64fde9f8fc587bbdc3a65a1 | #!/usr/bin/env python
import sys
import time
import getopt
import socket
import threading
import numpy as np
from pyMonster import Client
def usage():
print """
Usage: ./run_client_monster.py [options]
Options:
-h, --help show this help message and exit
-a, --address host IP address (default is "localhost")
-p, --port TCP port (default is 50001)
"""
def prompt_usage():
print """
Possible commands are:
q, CTRL+C stop server and quit
b <in_bits> broadcast bits to the server
"""
def welcome():
print """
Welcome to the Monster configuration client!
Use the prompt to send/receive messages.
"""
if __name__ == "__main__":
opts, args = getopt.getopt(
sys.argv[1:],
"ha:p:",
["help", "address", "port"])
host = "localhost"
device = "/dev/aerfx2_0"
port = 50003
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-a", "--address"):
host = a
elif o in ("-p", "--port"):
port = int(a)
print "Welcome!"
print "\tHost", host
print "\tPort", port
my_client = Client(host=host, port=port)
while 1:
try:
cmd = raw_input("> ")
if cmd == 'q':
print "Stopping client...",
my_client.stop()
print "done!"
break
elif len(cmd) > 1 and cmd[0] == 'b':
msg = cmd.split()[1]
my_client.send(msg)
else:
print "Unrecognized command."
prompt_usage()
except KeyboardInterrupt:
print "Interrupted!"
print "Stopping client...",
my_client.stop()
print "done!"
break
|
986,129 | 9eda6dbde478c58ac20fb4831e84784e5600b481 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**testsQWidgetComponent.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
This module defines units tests for :mod:`manager.qwidgetComponent` module.
**Others:**
"""
#**********************************************************************************************************************
#*** External imports.
#**********************************************************************************************************************
import os
import sys
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from PyQt4.QtGui import QApplication
#**********************************************************************************************************************
#*** Internal imports.
#**********************************************************************************************************************
from manager.qwidgetComponent import QWidgetComponentFactory
#**********************************************************************************************************************
#*** Module attributes.
#**********************************************************************************************************************
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2012 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "thomas.mansencal@gmail.com"
__status__ = "Production"
__all__ = ["RESOURCES_DIRECTORY", "UI_FILE" , "APPLICATION" , "QWidgetComponentFactoryTestCase"]
RESOURCES_DIRECTORY = os.path.join(os.path.dirname(__file__), "resources")
UI_FILE = os.path.join(RESOURCES_DIRECTORY, "standard.ui")
APPLICATION = QApplication(sys.argv)
#**********************************************************************************************************************
#*** Module classes and definitions.
#**********************************************************************************************************************
class QWidgetComponentFactoryTestCase(unittest.TestCase):
"""
This class defines :func:`manager.qwidgetComponent.QWidgetComponentFactory` factory units tests methods.
"""
def testRequiredAttributes(self):
"""
This method tests presence of required attributes.
"""
requiredAttributes = ("name",
"uiFile",
"activated",
"initializedUi",
"deactivatable")
for attribute in requiredAttributes:
self.assertIn(attribute, dir(QWidgetComponentFactory()))
def testRequiredMethods(self):
"""
This method tests presence of required methods.
"""
requiredMethods = ("activate",
"deactivate",
"initializeUi",
"uninitializeUi")
for method in requiredMethods:
self.assertIn(method, dir(QWidgetComponentFactory()))
if __name__ == "__main__":
import manager.tests.utilities
unittest.main()
|
986,130 | 13ec7a492b1b9c900a6d2662ae49cd578d44dc21 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from types import *
import pygame
import os
import cPickle
import random
import gzip
from MAP import mapgen, mazegen, generalmap
from UTIL import queue, const, colors, eztext, load_image, misc
from IMG import images
displayOpts = ['fore', 'back', 'both']
# Eztext courtesy of http://www.pygame.org/project-EzText-920-.html
class Handler():
def __init__(self, cPos):
self.cursorPos = cPos
self.currentTile = 0
self.sideImg, sideRect = load_image.load_image('sidebar.bmp')
self.npcImg = pygame.Surface((30, 30))
self.npcImg.fill(colors.red)
#self.npcImg, npcR = load_image('npc.bmp')
self.drawMode = False
self.cursorColor = colors.white
self.offset = 0
self.numImages = len(mapImages)
self.topX = 0
self.topY = 0
self.visited = []
self.BFSQueue = queue.Queue()
self.mouseAction = 'draw'
self.selecting = False
self.selectBoxPoints = None
self.placeNPC = False
def drawBox(self, pos, color):
(x, y) = pos
boxPoints = ((x, y), (x, y + blocksize),
(x + blocksize, y + blocksize), (x + blocksize, y))
pygame.draw.lines(gridField, color, True, boxPoints, 1)
def switchTile(self):
self.currentTile += 1
self.currentTile = self.currentTile % self.numImages
#@tail_call_optimized
def floodFillBFS(self, pieceLocation):
if (pieceLocation is None):
return
(x, y) = pieceLocation
entryList = []
for (Cx, Cy) in const.CARDINALS:
if (myMap.getEntry(x, y) == myMap.getEntry(x + Cx, y + Cy)
and (x + Cx, y + Cy) not in self.visited
and ~self.BFSQueue.has((x + Cy, y + Cy))):
self.BFSQueue.push((x + Cx, y + Cy))
entryList += [(x + Cx, y + Cy)]
self.visited += [(x + Cx, y + Cy)]
else:
entryList += [None]
if (entryList == [None, None, None, None]):
return (x, y)
else:
return [(x, y)] + [self.floodFillBFS(self.BFSQueue.pop())] \
+ [self.floodFillBFS(self.BFSQueue.pop())] \
+ [self.floodFillBFS(self.BFSQueue.pop())] \
+ [self.floodFillBFS(self.BFSQueue.pop())]
def floodFill(self, tile, start):
(x, y) = start
x = x / blocksize
y = y / blocksize
self.visited = [(x, y)]
self.BFSQueue.reset()
floodArea = misc.flatten(self.floodFillBFS((x, y)))
floodArea = list(set(floodArea))
for entry in floodArea:
(x, y) = entry
myMap.setEntry(x, y, tile)
def getInput(self, msg):
#get file name
input = None
txtbx = eztext.Input(maxlength=300, color=(255, 0, 0), prompt=msg)
inputWindow = pygame.Surface((1200, 100))
while input is None:
# make sure the program is running at 30 fps
clock.tick(30)
# events for txtbx
events = pygame.event.get()
# process other events
for event in events:
# close it x button si pressed
if event.type == pygame.QUIT:
os.sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
input = txtbx.getValue()
# clear the screen
inputWindow.fill((25, 25, 25))
# update txtbx
txtbx.update(events)
# blit txtbx on the sceen
txtbx.draw(inputWindow)
gridField.blit(inputWindow, (100, 100))
screen.blit(gridField, (0, 0))
# refresh the display
pygame.display.flip()
return input
def fillChest(self):
menuBox = pygame.Surface((150, 250))
itemsList = range(86, 102) + [112, 113, 114, 117, 117]
for i in range(len(itemsList)):
menuBox.blit(mapImages[itemsList[i]],
(15 + ((i) % 4) * blocksize,
50 + ((i) / 4) * blocksize))
chestItems = []
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return chestItems
if event.type == pygame.MOUSEBUTTONDOWN:
(mx, my) = event.pos
if 115 <= mx < 235 and 150 <= my < 330:
itemNum = itemsList[(mx - 115) / blocksize
+ (my - 150) / blocksize * 4]
if itemNum in range(86, 99):
chestItems.append((itemNum - const.FRUIT1, 1))
elif itemNum == const.GOLD:
chestItems.append((itemNum - const.FRUIT1,
int(self.getInput('Enter amount of gold: '))))
elif (itemNum == const.SPELLBOOK
or itemNum == const.PARCHMENT):
chestItems.append((itemNum - const.FRUIT1,
int(self.getInput('Enter spell number: '))))
elif itemNum in [112, 113, 114]:
chestItems.append((itemNum - const.FRUIT1,
int(self.getInput("Enter weapon level: ")),
[int(self.getInput("Enter plus Str: ")),
int(self.getInput("Enter plus Int: ")),
int(self.getInput("Enter plus Dex "))]))
elif itemNum in [const.SHIELD, const.BPLATE,
const.HELMET]:
chestItems.append((itemNum - const.FRUIT1,
int(self.getInput("Enter armor level: ")),
int(self.getInput("Enter resist: "))))
for item in chestItems:
menuBox.blit(mapImages[item[0] + const.FRUIT1],
(len(chestItems) * blocksize, 15))
screen.blit(menuBox, (100, 100))
pygame.display.flip()
def getFilename(self):
return self.getInput('Enter filename: ')
def saveMap(self):
filename = self.getFilename()
ball = myMap.getMapBall()
try:
save = gzip.GzipFile(os.getcwd() + '/MAP/LEVELS/' + filename, 'wb')
cPickle.dump(ball, save)
save.close()
except IOError, message:
print 'Cannot load map:', filename
return
def loadMap(self):
filename = self.getFilename()
try:
save = gzip.GzipFile(os.getcwd() + '/MAP/LEVELS/' + filename, 'rb')
ball = cPickle.load(save)
save.close()
myMap.installBall(ball)
except IOError, message:
print 'Cannot load map:', filename
return
def generateMap(self, rooms):
if rooms > 0:
MG = mapgen.Generator(myMap.DIM)
MG.generateMap(rooms)
myMap.installBall(MG.getMapBall())
else:
MG = mazegen.Generator(myMap.DIM, 1)
MG.generateMap()
myMap.installBall(MG.getMapBall())
def place(self, x, y, tile):
if self.placeNPC:
myMap.NPCs.append(((x, y), self.getInput('Enter NPC type: '),
self.getInput('Enter message: ')))
else:
if self.currentTile == const.CHEST:
myMap.addChest((x, y), self.fillChest())
level = None
elif self.currentTile == const.ITEMSDOOR:
level = int(self.getInput('Itemshop level: '))
elif self.currentTile == const.ARMRYDOOR:
level = int(self.getInput('Armory level: '))
elif self.currentTile == const.BLKSMDOOR:
level = int(self.getInput('Blacksmith level: '))
elif self.currentTile == const.MAGICDOOR:
level = int(self.getInput('Magicshop level: '))
else:
level = None
myMap.setEntry(x, y, tile, level)
def removeNPC(self, x, y):
for n in myMap.NPCs:
if n[0] == (x, y):
myMap.NPCs.remove(n)
return
def event_handler(self, event):
(x, y) = self.cursorPos
self.drawBox((x, y), colors.black)
if event.key == pygame.K_RIGHT:
if (x + blocksize < myMap.DIM * blocksize):
x += blocksize
if (x < myMap.DIM * blocksize and x == 20 * blocksize
+ self.topX * blocksize):
self.topX += 1
if event.key == pygame.K_LEFT:
if (x - blocksize >= 0):
x -= blocksize
if x > 0 and x == self.topX * blocksize:
self.topX -= 1
if event.key == pygame.K_UP:
if (y - blocksize >= 0):
y -= blocksize
if y > 0 and y == self.topY * blocksize:
self.topY -= 1
if event.key == pygame.K_DOWN:
if (y + blocksize < myMap.DIM * blocksize):
y += blocksize
if (y < myMap.DIM * blocksize
and y == 20 * blocksize + self.topY * blocksize):
self.topY += 1
if event.key == pygame.K_t:
self.switchTile()
if event.key == pygame.K_SPACE:
self.place(x / blocksize, y / blocksize, self.currentTile)
if event.key == pygame.K_ESCAPE:
os.sys.exit()
if event.key == pygame.K_d:
self.drawMode = not self.drawMode
if event.key == pygame.K_s:
self.saveMap()
if event.key == pygame.K_l:
self.loadMap()
if event.key == pygame.K_f:
self.floodFill(self.currentTile, (x, y))
if event.key == pygame.K_g:
self.generateMap(int(self.getInput('Enter number of rooms: ')))
if event.key == pygame.K_c:
myMap.changeDimensions(int(self.getInput('Enter new dimension: ')))
if event.key == pygame.K_e:
self.offset += 32
if self.offset == 128:
self.offset = 0
if event.key == pygame.K_x:
self.removeNPC(x / blocksize, y / blocksize)
if event.key == pygame.K_n:
print 'NPCs: '
print myMap.NPCs
if self.drawMode:
myMap.setEntry(x / blocksize, y / blocksize, self.currentTile)
self.cursorPos = (x, y)
def select(self, start):
startX, startY = start
endX = startX
endY = startY
self.selectBoxPoints = None
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
self.selectBox = self.selectBoxPoints
return (endX, endY)
(tempX, tempY) = pygame.mouse.get_pos()
if tempX > 600:
tempX = 600
pygame.mouse.set_pos([tempX, tempY])
if tempY > 600:
tempY = 600
pygame.mouse.set_pos([tempX, tempY])
endX = tempX / blocksize + 1
endY = tempY / blocksize + 1
self.updateDisplay()
self.selectBoxPoints = ((startX * blocksize,
startY * blocksize),
(startX * blocksize,
(startY + (endY - startY))
* blocksize),
(endX * blocksize, endY * blocksize),
((startX + (endX - startX))
* blocksize, startY * blocksize))
pygame.draw.lines(gridField, colors.red, True,
self.selectBoxPoints, 1)
screen.blit(gridField, (0, 0))
pygame.display.flip()
def move(self, start):
(p1, p2, p3, p4) = self.selectBoxPoints
sX, sY = start
xDim = (p3[0] - p1[0]) / blocksize
yDim = (p3[1] - p1[1]) / blocksize
(tempX, tempY) = pygame.mouse.get_pos()
xOffset = (tempX / blocksize) - (p1[0] / blocksize)
yOffset = (tempY / blocksize) - (p1[1] / blocksize)
oldTopX = ((tempX / blocksize) - xOffset)
oldTopY = ((tempY / blocksize) - yOffset)
newTopX = None
newTopY = None
selectionImg = pygame.Surface((xDim * blocksize, yDim * blocksize))
emptyImg = pygame.Surface((xDim * blocksize, yDim * blocksize))
for i in range(xDim):
for j in range(yDim):
selectionImg.blit(mapImages[myMap.getEntry(oldTopX + i,
oldTopY + j)], (i * blocksize, j * blocksize))
emptyImg.blit(mapImages[0], (i * blocksize, j * blocksize))
while True:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
if newTopX is None or newTopY is None:
return
else:
myMap.mapMove((sX / blocksize, sY / blocksize),
(xDim, yDim), (newTopX, newTopY))
return
elif (event.type == pygame.MOUSEBUTTONDOWN
and event.button == 3):
return
elif event.type == pygame.MOUSEMOTION:
(tempX, tempY) = pygame.mouse.get_pos()
# upper left hand corner
newTopX = ((tempX / blocksize) - xOffset)
newTopY = ((tempY / blocksize) - yOffset)
oldTopX = p1[0] / blocksize
oldTopY = p1[1] / blocksize
if oldTopX == newTopX and oldTopY == newTopY:
pass
elif (0 <= newTopX * blocksize and (newTopX +
((p3[0] - p1[0]) / blocksize))
* blocksize < 1200 and
0 <= newTopX * blocksize and
(newTopY + ((p3[1] - p1[1]) / blocksize))
* blocksize < 1200):
self.selectBoxPoints = (
(newTopX * blocksize, newTopY * blocksize),
(newTopX * blocksize, (newTopY + ((p3[1] - p1[1])
/ blocksize)) * blocksize),
((newTopX + ((p3[0] - p1[0]) / blocksize))
* blocksize, (newTopY + ((p3[1] - p1[1])
/ blocksize)) * blocksize),
((newTopX + ((p3[0] - p1[0]) / blocksize))
* blocksize, newTopY * blocksize))
(p1, p2, p3, p4) = self.selectBoxPoints
self.updateDisplay()
gridField.blit(emptyImg, (sX * blocksize,
sY * blocksize))
gridField.blit(selectionImg, (newTopX * blocksize,
newTopY * blocksize))
pygame.draw.lines(gridField, colors.red, True,
self.selectBoxPoints, 1)
screen.blit(gridField, (0, 0))
pygame.display.flip()
def mouseHandler(self, e):
(mx, my) = e.pos
if (0 <= mx < gridField.get_width()
and 0 <= my < gridField.get_height()):
if e.button == 1:
if self.mouseAction == 'draw':
if self.placeNPC:
myMap.NPCs.append(((mx / blocksize, my / blocksize),
self.getInput('Enter NPC type: '),
self.getInput('Enter message: ')))
else:
if self.currentTile == const.CHEST:
myMap.addChest((mx / blocksize, my / blocksize),
self.fillChest())
level = None
elif self.currentTile == const.ITEMSDOOR:
level = int(self.getInput('Itemshop level: '))
elif self.currentTile == const.ARMRYDOOR:
level = int(self.getInput('Armory level: '))
elif self.currentTile == const.BLKSMDOOR:
level = int(self.getInput('Blacksmith level: '))
elif self.currentTile == const.MAGICDOOR:
level = int(self.getInput('Magicshop level: '))
else:
level = None
myMap.setEntry(mx / blocksize, my / blocksize,
self.currentTile, level)
self.cursorPos = ((mx / blocksize) * blocksize,
(my / blocksize) * blocksize)
elif self.mouseAction == 'select':
if self.selectBoxPoints is not None:
(p1, p2, p3, p4) = self.selectBoxPoints
if p1[0] <= mx < p3[0] and p1[1] <= my < p3[1]:
self.move((p1[0], p1[1]))
else:
self.selection = ((mx / blocksize,
my / blocksize), self.select((mx / blocksize,
my / blocksize)))
else:
self.selection = ((mx / blocksize, my / blocksize),
self.select((mx / blocksize, my / blocksize)))
elif e.button == 3:
pass
elif (gridField.get_width() + 50 <= mx < gridField.get_width() + 80
and 170 <= my < 200):
self.placeNPC = not self.placeNPC
elif (gridField.get_width() + 50 <= mx < gridField.get_width() + 170
and 200 <= my < 440):
if e.button == 1:
self.currentTile = (self.offset + (mx - gridField.get_width()
- 45) / blocksize + (my - 200) / blocksize * 4)
elif e.button == 3:
myMap.defaultBkgd = (self.offset + (mx - gridField.get_width()
- 45) / blocksize + (my - 200) / blocksize * 4)
elif (gridField.get_width() + 65 <= mx < gridField.get_width() + 95
and 500 <= my < 530):
self.offset -= 32
if self.offset < 0:
self.offset = 96
elif (gridField.get_width() + 95 <= mx < gridField.get_width() + 125
and 500 <= my < 530):
self.offset += 32
if self.offset == 128:
self.offset = 0
elif (gridField.get_width() + 50 <= mx < gridField.get_width() + 80
and 530 <= my < 560):
myMap.mapCut()
elif (gridField.get_width() + 80 <= mx < gridField.get_width() + 110
and 530 <= my < 560):
myMap.mapCopy(self.selection)
elif (gridField.get_width() + 110 <= mx < gridField.get_width() + 140
and 530 <= my < 560):
myMap.mapPaste()
elif (gridField.get_width() + 65 <= mx < gridField.get_width() + 95
and 560 <= my < 590):
self.mouseAction = 'draw'
elif (gridField.get_width() + 95 <= mx < gridField.get_width() + 125
and 560 <= my < 590):
self.mouseAction = 'select'
def mouseUpdate(self):
(mx, my) = pygame.mouse.get_pos()
if 650 <= mx < 770 and 200 <= my < 440:
boxPoints = ((mx, my), (mx, my + blocksize),
(mx + blocksize, my + blocksize),
(mx + blocksize, my))
pygame.draw.lines(screen, colors.red, True, boxPoints, 1)
def updateDisplay(self):
gridField.fill(colors.black)
for i in range(self.topX, self.topX + 40):
for j in range(self.topY, self.topY + 40):
if myMap.getEntry(i, j) in range(24, 86):
gridField.blit(mapImages[myMap.defaultBkgd],
((i - self.topX) * blocksize, (j - self.topY) * blocksize))
gridField.blit(mapImages[myMap.getEntry(i, j)],
((i - self.topX) * blocksize, (j - self.topY) * blocksize))
if (i, j) == myMap.heroStart:
gridField.blit(mapImages[const.HEROSTART],
((i - self.topX) * blocksize,
(j - self.topY) * blocksize))
if myMap.shops is not None:
for s in myMap.shops:
if myMap.shops[s][0] == 'itemshop':
(sX, sY) = s
gridField.blit(mapImages[128],
(sX * blocksize - blocksize,
sY * blocksize - (2 * blocksize)))
if myMap.shops[s][0] == 'magicshop':
(sX, sY) = s
gridField.blit(mapImages[129],
(sX * blocksize - blocksize,
sY * blocksize - (2 * blocksize)))
if myMap.shops[s][0] == 'blacksmith':
(sX, sY) = s
gridField.blit(mapImages[130],
(sX * blocksize - blocksize,
sY * blocksize - (2 * blocksize)))
if myMap.shops[s][0] == 'armory':
(sX, sY) = s
gridField.blit(mapImages[131],
(sX * blocksize - blocksize,
sY * blocksize - (2 * blocksize)))
if myMap.shops[s][0] == 'tavern':
(sX, sY) = s
gridField.blit(mapImages[132],
(sX * blocksize - blocksize,
sY * blocksize - (3 * blocksize)))
for n in myMap.NPCs:
(x, y) = n[0]
gridField.blit(self.npcImg, ((x - self.topX) * blocksize,
(y - self.topY) * blocksize))
(x, y) = self.cursorPos
x = x - self.topX * blocksize
y = y - self.topY * blocksize
if self.drawMode:
self.cursorColor = colors.yellow
else:
self.cursorColor = colors.white
if self.selectBoxPoints is not None:
pygame.draw.lines(gridField, colors.red, True,
self.selectBoxPoints, 1)
boxPoints = ((x, y), (x, y + blocksize),
(x + blocksize, y + blocksize), (x + blocksize, y))
pygame.draw.lines(gridField, self.cursorColor, True, boxPoints, 1)
self.sideImg, sideRect = load_image.load_image('sidebar.bmp')
if self.placeNPC:
self.sideImg.blit(self.npcImg, (50, 50))
else:
self.sideImg.blit(mapImages[self.currentTile], (50, 50))
self.sideImg.blit(mapImages[myMap.defaultBkgd], (50, 130))
if self.mouseAction == 'draw':
self.sideImg.blit(images.editorImages[5], (50, 80))
else:
self.sideImg.blit(images.editorImages[6], (50, 80))
self.sideImg.blit(self.npcImg, (50, 170))
for i in range(8):
for j in range(4):
self.sideImg.blit(mapImages[self.offset + j + (4 * i)],
(50 + j * blocksize, 200 + (i * blocksize)))
toolBox = pygame.Surface((90, 90))
toolBox.blit(images.editorImages[0], (15, 0))
toolBox.blit(images.editorImages[1], (45, 0))
toolBox.blit(images.editorImages[2], (0, 30))
toolBox.blit(images.editorImages[3], (30, 30))
toolBox.blit(images.editorImages[4], (60, 30))
toolBox.blit(images.editorImages[5], (15, 60))
toolBox.blit(images.editorImages[6], (45, 60))
self.sideImg.blit(toolBox, (50, 500))
(x, y) = self.cursorPos
entryBox = pygame.Surface((150, 30))
entryBox.fill(colors.black)
if pygame.font:
font = pygame.font.SysFont("arial", 20)
entry = font.render(str(myMap.getEntry((x + self.topX) / blocksize,
(y + self.topY) / blocksize)) + ' ' + 'x:' + str(x) + ' y:'
+ str(y), 1, colors.white, colors.black)
entryBox.blit(entry, (0, 0))
self.sideImg.blit(entryBox, (80, 50))
if self.drawMode:
msgBox = pygame.Surface((186, 60))
msgBox.fill(colors.grey)
if pygame.font:
font = pygame.font.SysFont("arial", 24)
msgText = font.render('draw', 1, colors.red, colors.yellow)
msgBox.blit(msgText, (10, 10))
self.sideImg.blit(msgBox, (50, 100))
#pygame.display.flip()
screen.blit(self.sideImg, (1200, 0))
# Set the height and width of the screen
size = [1400, 800]
screen = pygame.display.set_mode(size)
images.load()
mapImages = images.mapImages
pygame.init()
pygame.key.set_repeat(50, 100)
clock = pygame.time.Clock()
cursorPos = (0, 0)
myMap = generalmap.edMap()
myHandler = Handler(cursorPos)
blocksize = 30
gridField = pygame.Surface([2 * const.DIM * blocksize,
2 * const.DIM * blocksize])
os.sys.setrecursionlimit(15000)
def main():
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
myHandler.event_handler(event)
if event.type == pygame.MOUSEBUTTONDOWN:
# or event.type == pygame.MOUSEBUTTONUP:
myHandler.mouseHandler(event)
if event.type == pygame.QUIT:
os.sys.exit()
myHandler.mouseUpdate()
myHandler.updateDisplay()
screen.blit(gridField, (0, 0))
pygame.display.flip()
if __name__ == '__main__':
main()
|
986,131 | 803a88f791f56e99c0f79d4bfde827304cc5f5f5 | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from eralchemy import render_er
Base = declarative_base()
fav_characters = Table('user_people', Base.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('person_id', Integer, ForeignKey('people.id'))
)
fav_planets = Table('user_planets', Base.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('planet_id', Integer, ForeignKey('planets.id'))
)
fav_starships = Table('user_starships', Base.metadata,
Column('user_id', Integer, ForeignKey('users.id')),
Column('starship_id', Integer, ForeignKey('starships.id'))
)
class User(Base):
__tablename__ = 'users'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, unique=True, primary_key=True)
username = Column(String(100), unique=True, nullable=False)
password = Column(String(20), nullable=False)
characters = relationship("Person", secondary=fav_characters)
planets = relationship("Planet", secondary=fav_planets)
starships = relationship("Starship", secondary=fav_starships)
class Person(Base):
__tablename__ = 'characters'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, unique=True, primary_key=True)
name = Column(String(100), nullable=False)
height = Column(String(10))
mass = Column(String(10))
hair_color = Column(String(20))
skin_color = Column(String(20))
eye_color = Column(String(20))
birth_year = Column(String(10))
gender = Column(String(10))
homeworld_id = Column(String(200), ForeignKey("planets.id"))
homeworld = relationship("Planet", back_populates="characters")
class Planet(Base):
__tablename__ = 'planets'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, unique=True, primary_key=True)
characters = relationship("Person", uselist=False)
name = Column(String(100), nullable=False)
rotation_period = Column(String(100))
orbital_period = Column(String(100))
diameter = Column(String(100))
climate = Column(String(200))
gravity = Column(String(100))
terrain = Column(String(200))
surface_water = Column(String(100))
population = Column(String(100))
class Starship(Base):
__tablename__ = 'starships'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, unique=True, primary_key=True)
name = Column(String(100), nullable=False)
model = Column(String(100), nullable=False)
manufacturer = Column(String(100))
cost_in_credits = Column(String(200))
length = Column(String(100))
max_atmosphering_speed = Column(String(100))
crew = Column(String(100))
passengers = Column(String(100))
cargo_capacity = Column(String(100))
consumables = Column(String(100))
hyperdrive_rating = Column(String(100))
mglt = Column(String(100))
starship_class = Column(String(200))
def to_dict(self):
return {}
## Draw from SQLAlchemy base
render_er(Base, 'diagram.png') |
986,132 | 47f2bef63b1a952feffeabfe7fad38a1bef4885a | """
|**********************************************************************;
* Project : VYPcode compiler 2019
* Authors : Michal Horky (xhorky23), Matus Mucka (xmucka03)
|**********************************************************************;
"""
from tests.testBase import TestBaseCases
class BasicWhile(TestBaseCases.TestBase):
STDIN = ""
source_code = """
void main(void) {
int a;
a = 10;
while (0 < a) {
print(a);
int c;
a = a - 1;
}
}
"""
STDOUT = "10987654321"
|
986,133 | 2e530849050a210c5857bc2ad264cd9323533513 | from click import echo, style
from json_database.database_api import DatabaseAPI
from json_database.recognizer.DatabaseListener import DatabaseListener
from json_database.recognizer.DatabaseParser import DatabaseParser
def value_converter(value):
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
if value == 'true':
return True
elif value == 'false':
return False
return value.strip('"')
def execute_operator(operator, value1, value2):
if operator == '=':
return value1 == value2
elif operator == '!=':
return value1 != value2
elif operator == '>':
return value1 > value2
elif operator == '<':
return value1 < value2
elif operator == '>=':
return value1 >= value2
elif operator == '<=':
return value1 <= value2
else:
return False
class CommandParser(DatabaseListener):
def __init__(self, database_path):
self.api = DatabaseAPI(database_path)
def exitSelectStatement(self, ctx):
column_names, row_ids, table_name = [], [], ''
is_where = False
for child in ctx.children:
if isinstance(child, DatabaseParser.ColumnNameContext):
column_names.append(child.getText())
elif isinstance(child, DatabaseParser.TableNameContext):
table_name = child.getText()
if self.api.get_table(table_name) is None:
echo(style('Table with name %s does not exist.' % table_name, fg='red'), err=True)
return
elif isinstance(child, DatabaseParser.WhereContext):
is_where = True
table = self.api.get_table(table_name)
for i in range(len(table)):
if self.is_acceptable(table[i], child):
row_ids.append(i)
self.api.print_table_rows(table_name, column_names, row_ids, is_where)
def exitCreateStatement(self, ctx):
table_name = ctx.getChild(1).getText()
if self.api.get_table(table_name) is not None:
echo(style('Table with name %s already exists.' % table_name, fg='blue'))
else:
self.api.create_table(table_name)
def exitInsertStatement(self, ctx):
table_name = ctx.getChild(1).getText()
if self.api.get_table(table_name) is None:
echo(style('Table with name %s does not exist. Creating...' % table_name, fg='blue'))
self.api.create_table(table_name)
column_names, values = [], []
for child in ctx.children:
if isinstance(child, DatabaseParser.ColumnNameContext):
column_names.append(child.getText())
elif isinstance(child, DatabaseParser.ValueContext):
values.append(value_converter(child.getText()))
self.api.insert_row(table_name, column_names, values)
def exitUpdateStatement(self, ctx):
table_name = ctx.getChild(0).getText()
if self.api.get_table(table_name) is None:
echo(style('Table with name %s does not exist. Nothing to update.' % table_name, fg='red'), err=True)
return
column_names, values, row_ids = [], [], []
is_where = False
for child in ctx.children:
if isinstance(child, DatabaseParser.ColumnNameContext):
column_names.append(child.getText())
elif isinstance(child, DatabaseParser.ValueContext):
values.append(value_converter(child.getText()))
elif isinstance(child, DatabaseParser.WhereContext):
is_where = True
table = self.api.get_table(table_name)
for i in range(len(table)):
if self.is_acceptable(table[i], child):
row_ids.append(i)
self.api.update_rows(table_name, column_names, values, row_ids, is_where)
def exitDropStatement(self, ctx):
table_names = []
for child in ctx.children:
if isinstance(child, DatabaseParser.TableNameContext):
table_names.append(child.getText())
self.api.drop_tables(table_names)
def exitDeleteStatement(self, ctx):
table_name = ctx.getChild(1).getText()
if self.api.get_table(table_name) is None:
echo(style('Table with name %s does not exist. Nothing to delete.' % table_name, fg='red'), err=True)
return
row_ids = []
is_where = False
for child in ctx.children:
if isinstance(child, DatabaseParser.WhereContext):
is_where = True
table = self.api.get_table(table_name)
for i in range(len(table)):
if self.is_acceptable(table[i], child):
row_ids.append(i)
self.api.delete_table_rows(table_name, row_ids, is_where)
def is_acceptable(self, table_row, ctx):
if isinstance(ctx, DatabaseParser.WhereContext):
left = self.is_acceptable(table_row, ctx.getChild(0))
if ctx.getChildCount() >= 3:
right = self.is_acceptable(table_row, ctx.getChild(2))
return left or right
return left
elif isinstance(ctx, DatabaseParser.WhereANDContext):
column = value_converter(ctx.getChild(0).getText())
value = value_converter(ctx.getChild(2).getText())
operator = ctx.getChild(1).getText()
left = column in table_row.keys() and execute_operator(operator, table_row[column], value)
if ctx.getChildCount() >= 5:
right = self.is_acceptable(table_row, ctx.getChild(-1))
return left and right
return left
|
986,134 | b4525fbf70100111b7d2a9b3d6ba28c883985d74 | ########################################################################################################################
# @author Oriol Aranda (https://github.com/oriolaranda/)
# @date Oct 2021
########################################################################################################################
import argparse
import json
from functools import partial
from os import path
import numpy as np
import nibabel as nib
import tensorflow as tf
from tqdm import tqdm
from itertools import accumulate
from operator import add
from utils import resize_image
@tf.autograph.experimental.do_not_convert
def brain_dataset(sample, source_dir, verbose=0):
def _generator(names):
image_name, label_name = names
if verbose:
print("Training on sample:", source_dir + str(image_name[2:], 'utf-8'))
image_dir = source_dir + str(image_name[2:], 'utf-8')
label_dir = source_dir + str(label_name[2:], 'utf-8')
x = np.array(nib.load(image_dir).get_fdata())[:, :, 2:-1, :]
y = np.array(nib.load(label_dir).get_fdata())[:, 2:-1, :]
y_ = np.zeros(y.shape)
y_[(y > 0) & (y < 4)] = 1
x = np.moveaxis(x, -1, 0)
y = np.expand_dims(y_, -1)
y = np.moveaxis(y, -1, 0)
yield x, y
dataset = tf.data.Dataset.from_generator(
_generator,
output_types=(tf.float32, tf.float32),
output_shapes=((4, 240, 240, 152), (1, 240, 240, 152)),
args=(sample,))
return dataset
def sets_creator(data, datasets_p, source_dir, resize_shape):
def dataset_gen(samples):
def preproc_fn(x, y):
if resize_shape != (240, 240, 152):
assert len(resize_shape) == 3 and all(s > 0 for s in resize_shape), \
f"Resize shape is wrong! {resize_shape}?"
x, y = resize_image(x, y, resize_shape)
x = tf.image.per_image_standardization(x)
return x, y
brain_mri_dataset = partial(brain_dataset, source_dir=source_dir)
_dataset = tf.data.Dataset.from_tensor_slices(samples)
_dataset = _dataset.interleave(lambda x: brain_mri_dataset(x), num_parallel_calls=tf.data.experimental.AUTOTUNE)
_dataset = _dataset.map(preproc_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return _dataset
# nth position to split each set; accumulate probabilities to calculate each n
train_n, valid_n, test_n = (int(p * len(data)) for p in accumulate(datasets_p, add))
split_samples = data[:train_n], data[train_n:valid_n], data[valid_n:test_n]
train, valid, test = ((dataset_gen(samples), len(samples)) for samples in split_samples)
return train, valid, test
def _bytes_feature(value):
"""Returns a bytes_list from a string / bytes."""
if isinstance(value, type(tf.constant(0))): # if value ist tensor
value = value.numpy() # get value of tensor
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def serialize_sample(img, label):
""" Creates a tf.train.Example message ready to be written to a file."""
# Create a dictionary mapping the feature name to the tf.train.Example-compatible data type.
features = {
'img': _bytes_feature(tf.io.serialize_tensor(img)),
'label': _bytes_feature(tf.io.serialize_tensor(label)),
}
# Create a Features message using tf.train.Example
sample = tf.train.Example(features=tf.train.Features(feature=features))
return sample.SerializeToString()
def serialize_dataset(dataset_generator, dataset_size):
def serialize_gen():
for sample in tqdm(dataset_generator, total=dataset_size):
yield serialize_sample(*sample)
return serialize_gen
def _write_dataset(dataset, name, dataset_size, num_shards, target_dir):
for i in range(num_shards):
shard_dataset = dataset.shard(num_shards=num_shards, index=i)
serialized_shard = tf.data.Dataset.from_generator(serialize_dataset(shard_dataset, dataset_size // num_shards),
output_types=tf.string, output_shapes=())
writer = tf.data.experimental.TFRecordWriter(target_dir + f"{name}_{i}.tfrecord")
writer.write(serialized_shard)
print(f"TFRecord {name}_{i} saved!")
print(f"TFRecords for {name} written!!")
def _write_info(info, target_dir):
json_path = path.join(target_dir, 'info.json')
with open(json_path, 'w') as f:
json.dump(info, f)
print("Datasets info written!")
def set_dir(*funcs, target):
return tuple(partial(f, target_dir=target) for f in funcs)
def main(args):
source_json = path.join(args.source_dir, "dataset.json")
assert path.exists(args.source_sir), f"The source dir couldn't be found! {args.source_dir}"
assert path.exists(source_json), f"Json file in the source dir couldn't be found! {source_json}"
assert len(args.split) == 3 and sum(args.split) == 1, f"Split arguments does not sum up to 1: {args.split}"
with open(source_json) as f:
dataset = json.load(f)
data = [(d['image'], d['label']) for d in dataset['training']]
(train, valid, test), sizes = zip(*sets_creator(data, tuple(args.split), args.source_dir, tuple(args.reshape)))
sizes = dict(zip(('train_size', 'valid_size', 'test_size'), sizes))
shards = dict(zip(('train_shard', 'valid_shard', 'test_shard'), (16, 4, 4)))
info = {"total_size": len(data), **sizes, **shards}
if args.target_dir:
assert path.exists(args.target_dir), "Target dir doesn't exist!"
write_dataset, write_info = set_dir(_write_dataset, _write_info, target=args.target_dir)
write_dataset(train, 'train', info['train_size'], info['train_shard'])
write_dataset(valid, 'valid', info['valid_size'], info['valid_shard'])
write_dataset(test, 'test', info['test_size'], info['test_shard'])
write_info(info)
print(f"Done!! The entire dataset has been written in TFRecord format in '{args.target_dir}'")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--source-dir", type=str, required=True,
help="Path: Source data directory. The directory must contain the dataset.json file,"
"and the two folders imagesTr, labelsTr.")
parser.add_argument("--target-dir", type=str, default=None,
help="Path: Target data directory. It must exist. It is where the TFRecord data will be "
"written to")
parser.add_argument("--split", type=tuple, default=(0.7, 0.15, 0.15),
help="Tuple: Ratios into which the dataset will be divided (all sum up to 1). "
"Train, validation and test set. Default=(0.7, 0.15, 0.15).")
parser.add_argument("--reshape", type=tuple, default=(240, 240, 152),
help="Tuple: Shape of the written data. Default (240, 240, 152) is the original shape, so no "
"resize will be applied. ")
_args, _ = parser.parse_known_args()
main(_args)
|
986,135 | 1e5b1ceb407181ac8907752e38ffbe099d7307b1 | ######################################################
### Setting additional software licenses and paths ###
######################################################
############################
## Gurobi Solver Software ##
############################
# Once Gurobi is installed, license path can be retrieved
# by entering "gurobi_cl --license" in a command/terminal prompt.
# Please copy and paste it in the following path variable
Gurobi_license_path = '/home/diego/gurobi.lic'
# Several exceptions of Gurobi software (such as its installation or license expiration) are properly manged
# if and only if "gurobi_cl" file path is assigned to the following variable.
# "gurobi_cl" file path can be retrieved by entering "which gurobi_cl" in a command/terminal prompt.
Gurobi_cl_path = '/home/diego/anaconda3/bin/gurobi_cl'
|
986,136 | 793129d38b549a390c4c7f4860cbb53eb0f99a36 | class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for c in tokens:
if c not in ["+", "-", "*", "/"]:#c.lstrip('+-').isdigit():
stack.append(int(c))
else:
r = stack.pop() #Mistake pull this ahead to share, and meaning ful name
l = stack.pop()
if c == "+":
stack.append(l+r)
elif c == "-":
stack.append(l-r)
elif c == "*":
stack.append(l*r)
elif c == "/":
if l*r < 0 and l % r != 0:
stack.append(l/r+1)
else:
stack.append(l/r)
#stack.append( num2/num1 + (1 if (num1 * num2 < 0 and num1 % num2 !=0) else 0))
# -4/2, -1/10 #Mistake order or /, and () for if else
#Mistake divide by zero, two pops() has no order, check keys 2-3 related possible mistake could make
return stack.pop()
# "1" "1" "+" # 0, 1, /
s = Solution()
print s.evalRPN(["4","-2","/","2","-3","-","-"])
print s.evalRPN(["4","13","5","/","+"])
print s.evalRPN(["10","6","9","3","+","-11","*","/","*","17","+","5","+"])
print s.evalRPN(["3","-4","+"]) |
986,137 | 822f1702e3995f88d38c42330928460c0c0dd4c8 | import glob
from os import path
import wave
from m3u8 import M3U8
from src.utils import eprint
CONCAT_WAV = "concat.wav"
def create_combined_wav(folder_path: str, wav_file_path: str) -> wave.Wave_write:
sample: wave.Wave_read = wave.open(wav_file_path, 'rb')
concat_wav: wave.Wave_write = wave.open(path.join(folder_path, CONCAT_WAV), 'wb')
concat_wav.setparams(sample.getparams())
concat_wav.setnchannels(1)
sample.close()
return concat_wav
def concat_audio(folder_path, variant_playlist: M3U8):
wav_files = glob.glob(path.join(folder_path, "*.wav"))
if not wav_files:
eprint("Folder doesn't contain any wav files")
return
file_name_dict = dict()
for wav_file in wav_files:
file_name = path.split(wav_file)[1].split('.')[0]
file_name_dict[file_name] = wav_file
output_wav = create_combined_wav(folder_path, wav_files[0])
for segment in variant_playlist.segments:
file_name = path.split(segment.uri)[1].split('.')[0]
file_path = file_name_dict[file_name]
chunk_reader: wave.Wave_read = wave.open(file_path, 'rb')
output_wav.writeframes(chunk_reader.readframes(chunk_reader.getnframes()))
chunk_reader.close()
output_wav.close() |
986,138 | a1132ab22c14ed343349d26f24dcbfac09daf3b0 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from turbomail.control import interface
from turbomail import Message
from_ = (('Asset Summary', 'Asset.Summary@cn.flextronics.com'))
debug_list = ['Colin.Qi', 'Sucre.Su']
email_config = {'mail.on': True,
'mail.transport': "smtp",
'mail.smtp.server': '10.201.13.88',
'mail.manager': 'demand',
'mail.message.encoding': "utf-8",
'mail.smtp.debug': False,
}
def parse_mail(mail_list):
mails = []
for mail in mail_list:
mails.append((mail.replace('.', ' '), mail + '@cn.flextronics.com'))
return mails
def send_mail(subject, body, author=None, **kwargs):
interface.start(email_config)
msg = Message(author or from_, parse_mail(kwargs.get('to', [])), subject)
msg.cc = parse_mail(kwargs.get('cc', []))
bcc = kwargs.get('bcc', [])
if not bcc:
if kwargs.get('debug', True):
bcc = debug_list
msg.bcc = parse_mail(bcc)
msg.plain = subject
msg.rich = body
[msg.attach(attachment) for attachment in kwargs.get('attachments', [])]
msg.send()
interface.stop()
|
986,139 | 86bc865ca7d52fc61e01688d82fab3da12f8590f | import hmac
import hashlib
import base64
import json
from typing import Optional
from fastapi import FastAPI, Cookie, Body
from fastapi.responses import Response
from fake_db import USERS
from fake_settings import SECRET_KEY, PASSWORD_SALT
app = FastAPI()
secret_key = SECRET_KEY
pswd_salt = PASSWORD_SALT
users = USERS
def salt_my_password_baby(password: str, password_salt: str = pswd_salt) -> str:
"""возврашает salt хеш пароля"""
_password = password + password_salt
salt_password_bytes = hashlib.sha256(_password.encode())
salt_password = salt_password_bytes.hexdigest()
return salt_password
def verify_password(username: str, user_password: str) -> bool:
"""верифицирует пароль"""
_inner_salt_password = salt_my_password_baby(user_password).lower()
_salt_password_from_db = users.get(username)['password'].lower()
return _inner_salt_password == _salt_password_from_db
def sign_cookie(data: str) -> None:
"""возвращает подписанные куки"""
return hmac.new(
secret_key.encode(),
msg=data.encode(),
digestmod=hashlib.sha256
).hexdigest().upper()
def get_username_from_signed_cookie(signed_str: str) -> Optional[str]:
"""возвращает decode имя из куки"""
try:
username_b64, sign = signed_str.split('.')
username = base64.b64decode(username_b64.encode()).decode()
except ValueError:
return None
if sign_cookie(username) == sign:
return username
def index_response(index) -> Response:
"""пробует удалить невалидный куки и ответить индексом"""
response = Response(index, media_type='text/html')
try:
response.delete_cookie(key='username')
except Exception as ex:
print(ex)
return response
def get_index_html(path: str) -> str:
"""забирает шаблон"""
with open(path, 'r') as f:
index = f.read()
return index
@app.get('/')
def index_page(username: Optional[str] = Cookie(default=None)):
index = index_response(get_index_html('templates/index.html'))
if not username:
return index
valid_username = get_username_from_signed_cookie(username)
if not valid_username or None:
return index
try:
users[valid_username]
except KeyError:
index
return Response(
f'Привет: {valid_username} <br/> Твой баланс: {users.get(valid_username)["balance"]}', media_type='text/html')
@app.post("/login")
def proccess_login_page(data: dict = Body(...)):
username = data.get('username')
password = data.get('password')
user = users.get(username)
if not user or not verify_password(username, password):
return Response(
json.dumps({
'success': False,
'message': 'я вас не знаю!'
}),
media_type='application/json')
response = Response(
json.dumps({
'success': True,
'message': f'Привет: {username} <br/> Твой баланс: {user["balance"]}'
}),
media_type='application/json')
username_signed = f'{base64.b64encode(username.encode()).decode()}.{sign_cookie(username)}'
response.set_cookie(key='username', value=username_signed)
return response
|
986,140 | 67a4ea54cd5e6c930cc53b79b3174bc5919dbd9b | square =[]
for i in range(1, 11):
square.append(i**2)
print(square)
fav_numbers = {'eric': 17, 'ever': 4}
for name, number in fav_numbers.items():
print(name + ' loves ' + str(number)) |
986,141 | 7db8faa9f7f7a4bfcc6449b9fcbd92545949cbe4 | import hashlib
import time
def createHash():
for i in range(10000000):
md5 = hashlib.md5()
md5.update(str(i).encode('utf-8'))
yield (md5.hexdigest(),i)
def checkHash(hash,cHash):
if(hash==cHash[0]):
print("找到密码:",cHash[1])
return 1
else:
return 0
def main():
hash=input("请输入要破解的md5密文\n")
md5s = hashlib.md5()
md5s.update(hash.encode('utf-8'))
print("开始破解")
time1=time.time()
for cHash in createHash():
if(checkHash(md5s.hexdigest(),cHash)):
print("密文及其对应的密码:",cHash[0],cHash[1])
time2=time.time()
print("穷举了:",cHash[1]+1,"个密码\n共花费时间:",time2-time1,"\n平均速率:",(cHash[1]+1)/(time2-time1),"秒")
break
main()
|
986,142 | d3efbbcbbcd1757d214f9917fe2b7394e457928b | #Extremely simple text adventure
#by me :)
#Sorry coause it's in spanish, hope you get what the code is about
import time
import sys
import cmd
pagina0 = ['Te despierta un rayo de luz proveniendo de la entrada a la cueva.', 'derecha', 'izquierda', 'pagina1', 'pagina2']
pagina1 = ['moriste','Oyes un sonido extremadamente alto y sientes algo caliente en tu cuello. Pisaste una mina.']
pagina2 = ['Encuentras la salida, pero ves un oso cerca de la entrada.', 'pelear', 'hacerte el muerto', 'pagina3', 'pagina4']
pagina3 = ['De alguna manera consigues asustar al oso y te deja en paz.', 'irse', 'quedarte donde estás', 'pagina5', 'pagina6']
pagina4 = ['Te tirasal suelo y aguantas la respiración. El oso te ignora y se va.', 'levantarse', 'quedarte donde estás', 'pagina5', 'pagina6']
pagina5 = ['Ves un barco, pero hay gente cerca. Tienen armas, pero encontraste un revolver detras de un arbol', 'acercarse', 'alejarse', 'pagina7', 'pagina8']
pagina6 = ['moriste', 'Un meteorito cae sobre tu cabeza. No te dio tiempo a asustarte.']
pagina7 = ['ganaste','Te acercas lentamente, y un meteorito cae unos diez metros de donde estas. Te levantas en la enfermería del barco.']
pagina8 = ['moriste','Corres hacia la distancia y mueres de deshidratacion unos días después.']
def salir():
salir = None
while salir is None:
salir = input('salir (sí o no)?').lower()
if salir not in ["sí", "no"]:
print("Escribe un comando correcto.")
salir = None
if salir == "sí":
sys.exit()
return salir
respuesta = None
while respuesta is None:
respuesta = input(f'salir ({paginaActual[1]} o {paginaActual[2]})?').lower()
if respuesta != paginaActual[1] and respuesta != paginaActual[2]:
print("Escribe un comando correcto.")
respuesta = None
if respuesta == paginaActual[1]:
paginaActual = paginaActual[3]
elif respuesta == paginaActual[2]:
paginaActual = paginaActual[4]
elif paginaActual[0] == ('moriste'):
print('Fin...')
print('====================')
print('')
print('Tristemente no encontraste un final feliz...')
print('Pero prueba de nuevo!')
print('salir = "s"')
salir()
elif paginaActual[0] == ('ganaste'):
print('Fin!')
print('====================')
print('')
print('Escapaste! Finalmente podrás reunirte con tu familia...')
print('O lo que queda de ella... Parece ser que viajaste en el tiempo hacia el futuro')
print('y tu único relativo vivo es tu nieta Antonieta. Mejor que nada!')
salir()
|
986,143 | d6f3179bcccdc1b9f9f80453f06f7d240221b0be | import os
def createdb(name):
try:
if not os.path.isdir('database/' + name):
os.mkdir("database/" + name)
return True
else:
return "DB already exist"
except OSError:
return "Os error except while create " + name + " db"
def getfrdb(name):
try:
d = 'database/' + name
[os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d, o))]
return d
except:
return"Error while listing db-s"
def deldb(name):
try:
os.rmdir("database/" + name)
return True
except OSError:
return "Os error except while deleting db"
def createtb(dbname, name):
try:
if not os.path.isdir("database/" + dbname + "/" + name):
os.mkdir("database/" + dbname + "/" + name)
return True
else:
return "TB already exist"
except OSError:
return "Os error except while create " + name + " table"
def getfrtb(dbname, name):
try:
d = 'database/' + dbname + '/' + name
[os.path.join(d, o) for o in os.listdir(d)
if os.path.isdir(os.path.join(d, o))]
return d
except:
return"Error while listing tables"
def deltb(dbname, name):
try:
os.rmdir("database/"+dbname + "/" + name)
return True
except:
return "Error while deleting table"
def createln(dbname, tbname, name):
try:
if not os.path.isfile("database/" + dbname + "/" + tbname + "/" + name):
with open("database/" + dbname + '/' + tbname + '/' + name, mode="w", encoding="utf-8") as f:
return True
else:
return "File already exist"
except:
return "Error while creating line"
def writetoln(dbname, tbname, name, string):
try:
if os.path.isfile("database/" + dbname + "/" + tbname + "/" + name):
with open("database/" + dbname + '/' + tbname + '/' + name, mode="a", encoding="utf-8") as f:
f.write("\n" + string)
return True
else:
createln(dbname, tbname, name)
with open("database/" + dbname + '/' + tbname + '/' + name, mode="a", encoding="utf-8") as f:
f.write("\n" + string)
return True
except:
return "Error while creating line"
def shfromlnst(dbname, tbname, name, string): #Search with string
try:
with open("database/" + dbname + '/' + tbname + '/' + name, mode="r", encoding="utf-8") as f:
s = f.read()
print(string + " str shf")
print(s + " item shf")
if string in s:
return True
return False
except:
return "Error in shfromlnst"
def shfromlnit(dbname, tbname, name, integ): #Search with int
try:
with open("database/" + dbname + '/' + tbname + '/' + name, mode="r", encoding="utf-8") as f:
a = f.read()
if(integ in a):
return True
else:
return False
except Exception as e:
return e
def getfrln(dbname, tbname, name):
path = 'database/' + dbname + '/' + tbname + '/' + name
files = []
for r, d, f in os.walk(path):
for file in f:
files.append(os.path.join(r, file))
files1 = ""
for f in files:
files1 += f + "\n"
return files1
def delln(dbname, tbname, name):
try:
os.remove(dbname + '/' + tbname + '/' + name)
return True
except:
return 'Error while deleting ln' |
986,144 | 8522870e2b5ab31ac5976d5892d5cf0251d3e2d2 | #!python
import os, sys, string, time, BaseHTTPServer, getopt, re, subprocess, webbrowser
from datetime import date
from datetime import time
from datetime import datetime
from operator import itemgetter
import multiprocessing
import hashlib
shellv = os.environ["SHELL"]
_BINARY_DIST = False
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
_BINARY_DIST = True
#print sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
application_path = ""
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
#print application_path
#NEED INSTALL DIR
#CWD print os.path.abspath(".")
#internal DIR print sys.path[0]
CSI="\x1B["
reset=CSI+"m"
OK_GREEN = CSI+'32m'
WARNING_YELLOW = CSI+'\033[93m'
ERROR_RED = CSI+'\033[91m'
ENDC = CSI+'0m'
_METAMOSDIR = resource_path(sys.path[0])
INITIAL_UTILS = "%s%sUtilities"%(_METAMOSDIR, os.sep)
INITIAL_SRC = "%s%ssrc"%(_METAMOSDIR, os.sep)
_NUM_LINES = 10
_PROG_NAME_DICT = {}
_PUB_DICT = {}
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
mapping = dict((key, value) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
enums['mapping'] = mapping
return type('Enum', (), enums)
STEP_NAMES = enum("ASSEMBLE", "ANNOTATE", "SCAFFOLD")
STEP_OUTPUTS = enum(".asm.contig", ".hits", ".linearize.scaffolds.final")
INPUT_TYPE = enum("FASTQ", "FASTA", "CONTIGS", "SCAFFOLDS", "ORF_FA", "ORF_AA")
SCORE_TYPE = enum("ALL", "LAP", "ALE", "CGAL", "SNP", "FRCBAM", "ORF", "REAPR", "N50")
SCORE_WEIGHTS = dict()
_failFast = True
class AtomicCounter(object):
def __init__(self, initval=0):
self.val = multiprocessing.RawValue('i', initval)
self.lock = multiprocessing.Lock()
def increment(self):
with self.lock:
origVal = self.val.value
self.val.value += 1
return origVal
_atomicCounter = AtomicCounter(0)
_envCounter = AtomicCounter(0)
class Settings:
asmfiles = []
runfiles = []
kmer = "55"
threads = 16
rundir = ""
taxa_level = "class"
local_krona = False
annotate_unmapped = False
task_dict = []
noblastdb = False
doscaffolding = False
VERBOSE = False
OUTPUT_ONLY = False
PREFIX = ""
OSTYPE = ""
OSVERSION = ""
MACHINETYPE = ""
METAMOSDIR = ""
METAMOS_UTILS = ""
METAMOS_JAVA = ""
FASTQC = ""
SRA = ""
AMOS = ""
BAMBUS2 = ""
SOAPDENOVO = ""
SOAPDENOVO2 = ""
METAIDBA = ""
CA = ""
BLASR = ""
NEWBLER = ""
VELVET = ""
VELVET_SC = ""
METAVELVET = ""
SPARSEASSEMBLER = ""
EAUTILS = ""
KMERGENIE = ""
R = ""
MGCAT = ""
METAPHYLER = ""
BOWTIE = ""
BOWTIE2 = ""
SAMTOOLS = ""
METAGENEMARK = ""
FRAGGENESCAN = ""
PROKKA = ""
SIGNALP = ""
FCP = ""
PHMMER = ""
PHYMM = ""
BLAST = ""
PHYLOSIFT = ""
DB_DIR = ""
BLASTDB_DIR = ""
KRONA = ""
REPEATOIRE = ""
LAP = ""
ALE = ""
FRCBAM = ""
FREEBAYES = ""
CGAL = ""
REAPR = ""
QUAST = ""
MPI = ""
BINARY_DIST = 0
nopsutil = False
nopysam = False
def __init__(self, kmer = None, threads = None, rundir = None, taxa_level = "", localKrona = False, annotateUnmapped = False, doScaffolding = False, verbose = False, outputOnly = False, update = False):
configureEnvironment(INITIAL_UTILS)
if (Settings.rundir != "" and update == False):
return
if (kmer == None or threads == None or rundir == None):
print "Error settings is uninitialized and no intialization provided\n"
raise(Exception)
_BINARY_DIST = False
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
_BINARY_DIST = True
#print sys._MEIPASS
except Exception:
pass
try:
import pysam
if verbose:
print "Found pysam in %s"%(pysam.__file__)
except ImportError:
Settings.nopysam = True
print "Could not import pysam, disabling."
try:
import psutil
if verbose:
print "Found psutil in %s"%(psutil.__file__)
except ImportError:
Settings.nopsutil = True
print "Could not import psutil, disabling."
Settings.rundir = rundir
Settings.kmer = kmer
Settings.threads = threads
Settings.rundir = rundir
Settings.taxa_level = taxa_level
Settings.local_krona = localKrona
Settings.doscaffolding = doScaffolding
Settings.annotate_unmapped = annotateUnmapped
Settings.task_dict = []
Settings.PREFIX = "proba"
Settings.VERBOSE = verbose
Settings.OUTPUT_ONLY = outputOnly
Settings.OSTYPE = "Linux"
Settings.OSVERSION = "0.0"
Settings.MACHINETYPE = "x86_64"
getMachineType()
Settings.METAMOSDIR = sys.path[0]
Settings.METAMOS_DOC = "%s%sdoc"%(Settings.METAMOSDIR, os.sep)
Settings.METAMOS_UTILS = "%s%sUtilities"%(Settings.METAMOSDIR, os.sep)
Settings.METAMOS_JAVA = "%s%sjava:%s"%(Settings.METAMOS_UTILS,os.sep,os.curdir)
Settings.noblastdb = False
_DB_PATH = "%s/DB/"%(Settings.METAMOS_UTILS)
_BLASTDB_PATH = _DB_PATH
if _BINARY_DIST:
#need to change KronaTools.pm to external Taxonomy directory
try:
_DB_PATH = "%s/DB/"%(application_path)
_BLASTDB_PATH = _DB_PATH + os.sep + "blastdbs"+os.sep
if "BLASTDB" in os.environ and len(os.environ["BLASTDB"]) != 0:
_BLASTDB_PATH == os.environ["BLASTDB"]
if not os.path.exists(_BLASTDB_PATH):
print "Error: cannot find BLAST DB directory, yet path set via $BLASTDB: %s. Disabling blastdb dependent programs"%(os.environ["BLASTDB"])
Settings.noblastdb = True
elif not os.path.exists(_BLASTDB_PATH):
print "Error: cannot find BLAST DB directory, expected it in %s. Disabling blastdb dependent programs"%(_BLASTDB_PATH)
Settings.noblastdb = True
except KeyError:
#_DB_PATH = "./DB/"
Settings.noblastdb = True
pass
if not os.path.exists(_DB_PATH):
print "Error: cannot find DB directory in %s, was it deleted? oops, it is required to run MetAMOS!"%(_DB_PATH)
sys.exit(1)
elif Settings.rundir != "":
if "BLASTDB" in os.environ and len(os.environ["BLASTDB"]) != 0:
_BLASTDB_PATH == os.environ["BLASTDB"]
if not os.path.exists(_BLASTDB_PATH):
print "Error: cannot find BLAST DB directory, yet path set via $BLASTDB: %s. Disabling blastdb dependent programs"%(os.environ["BLASTDB"])
Settings.noblastdb = True
elif not os.path.exists("%s%srefseq_protein.00.pin"%(_BLASTDB_PATH, os.sep)):
print "Error: cannot find BLAST DB directory, expected it in %s. Disabling blastdb dependent programs"%(_BLASTDB_PATH)
Settings.noblastdb = True
Settings.DB_DIR = _DB_PATH
Settings.BLASTDB_DIR = _BLASTDB_PATH
Settings.BINARY_DIST = _BINARY_DIST
Settings.AMOS = "%s%sAMOS%sbin"%(Settings.METAMOSDIR, os.sep, os.sep)
Settings.BAMBUS2 = Settings.AMOS
Settings.SOAPDENOVO = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.SOAPDENOVO2 = "%s%scpp%s%s-%ssoap2/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.METAIDBA = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.CA = "%s%sCA%s%s-%s%sbin"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE.replace("x86_64", "amd64"), os.sep)
Settings.NEWBLER = "%s%snewbler%s%s-%s"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.VELVET = "%s%scpp%s%s-%s%svelvet"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.VELVET_SC = "%s%scpp%s%s-%s%svelvet-sc"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.METAVELVET = "%s%scpp%s%s-%s%sMetaVelvet"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.SPARSEASSEMBLER = "%s%scpp%s%s-%s%sSparseAssembler"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.EAUTILS = "%s%scpp%s%s-%s%seautils"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.KMERGENIE = "%s%scpp%s%s-%s%skmergenie"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.R = "%s%scpp%s%s-%s%sR"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
Settings.PHYMM = "%s%sperl%sphymm%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, os.sep)
Settings.METAPHYLER = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.BOWTIE = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.BOWTIE2 = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.SAMTOOLS = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.METAGENEMARK = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.FRAGGENESCAN = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.PROKKA = "%s%scpp%s%s-%s/prokka/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
runp = True
if 1:
try:
kronalibf = open("%s%scpp%s%s-%s/prokka/bin/prokka"%(Settings.METAMOS_UTILS,os.sep,os.sep, Settings.OSTYPE, Settings.MACHINETYPE))
except IOError:
#this is initPipeline, skip
runp = False
if _BINARY_DIST and runp:
#need to change PROKKA to external db directory
kronalibf = open("%s%scpp%s%s-%s/prokka/bin/prokka"%(Settings.METAMOS_UTILS,os.sep,os.sep, Settings.OSTYPE, Settings.MACHINETYPE))
data = kronalibf.read()
if "my $DBDIR = \"$FindBin::RealBin/../db\";" not in data:
kronalibf.close()
else:
dd = data.replace("my $DBDIR = \"$FindBin::RealBin/../db\";","my $DBDIR = \"%s/prokka/db\";"%(Settings.DB_DIR))
kronalibf.close()
kronalibf = open("%s%scpp%s%s-%s/prokka/bin/prokka"%(Settings.METAMOS_UTILS,os.sep,os.sep, Settings.OSTYPE, Settings.MACHINETYPE), 'w')
kronalibf.write(dd)
kronalibf.close()
# also need to change phylosift to external DB
os.system("cp %s%sphylosift%sphylosiftrc %s%sphylosift%sphylosiftrc.orig"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.METAMOSDIR, os.sep, os.sep))
testIn = open("%s%sphylosift%sphylosiftrc.orig"%(Settings.METAMOSDIR, os.sep, os.sep), 'r')
testOut = open("%s%sphylosift%sphylosiftrc"%(Settings.METAMOSDIR, os.sep, os.sep), 'w')
for line in testIn.xreadlines():
if "marker_path" in line:
testOut.write("$marker_path=\"%s%sshare%sphylosift\";\n"%(Settings.DB_DIR, os.sep, os.sep))
elif "ncbi_path" in line:
testOut.write("$ncbi_path=\"%s%sshare%sphylosift\";\n"%(Settings.DB_DIR, os.sep, os.sep))
else:
testOut.write(line.strip() + "\n")
testIn.close()
testOut.close()
Settings.SIGNALP = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.FCP = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.PHMMER = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.MGCAT = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.BLAST = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.PHYLOSIFT = "%s%sPhyloSift"%(Settings.METAMOSDIR, os.sep)
Settings.KRONA = "%s%sKronaTools%sbin"%(Settings.METAMOSDIR,os.sep,os.sep)
if _BINARY_DIST and runp:
#need to change KronaTools.pm to external Taxonomy directory
kronalibf = open("%s%sKronaTools%slib%sKronaTools.pm"%(Settings.METAMOSDIR,os.sep,os.sep,os.sep))
data = kronalibf.read()
if "my $taxonomyDir = \"$libPath/../taxonomy\";" not in data:
kronalibf.close()
else:
dd = data.replace("my $taxonomyDir = \"$libPath/../taxonomy\";","my $taxonomyDir = \"%s/taxonomy\";"%(Settings.DB_DIR))
kronalibf.close()
kronalibf = open("%s%sKronaTools%slib%sKronaTools.pm"%(Settings.METAMOSDIR,os.sep,os.sep,os.sep),'w')
kronalibf.write(dd)
kronalibf.close()
os.system("ln -s %s/taxonomy %s%sKronaTools%staxonomy"%(Settings.DB_DIR,Settings.METAMOSDIR,os.sep,os.sep))
Settings.REPEATOIRE = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.LAP = "%s%sLAP"%(Settings.METAMOSDIR, os.sep)
Settings.ALE = "%s%scpp%s%s-%s/ALE/src"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.CGAL = "%s%scpp%s%s-%s/cgal"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.REAPR = "%s%scpp%s%s-%s/REAPR"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.FRCBAM = "%s%scpp%s%s-%s/FRCbam/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.FREEBAYES = "%s%scpp%s%s-%s/freebayes/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
Settings.QUAST = "%s%squast"%(Settings.METAMOSDIR, os.sep)
Settings.MPI = "%s%smpiexec"%(Settings.METAMOSDIR, os.sep)
libcounter = 1
readcounter = 1
class Read:
format = ""
maxlen = 150
qformat = "Sanger"
filtered = False
mated = True
path = ""
fname = ""
id = 0
sid = ""
def __init__(self,format,path,mated=True,interleaved=False):
global readcounter
self.id = readcounter
readcounter +=1
self.format = format
self.path = path
self.fname = os.path.basename(self.path)
self.mated = mated
self.interleaved = interleaved
#self.init()
#self.validate()
class readLib:
format = ""
mean = 0
stdev = 0
mmin = 0
mmax = 0
mated = True
interleaved = False
innie = True
linkerType = "titanium"
frg = ""
f1 = ""
f2 = ""
f12 = ""
reads = []
readDict = {}
pairDict = {}
def __init__(self,format,mmin,mmax,f1,f2="",mated=True,interleaved=False,innie=True,linkerType="titanium"):
global libcounter
self.id = libcounter
self.sid = "lib"+str(libcounter)
libcounter +=1
self.format = format
self.mated=mated
self.interleaved=interleaved
self.innie=innie
self.linkerType=linkerType
self.mmin = mmin
self.mmax = mmax
self.f1 = f1
self.f2 = f2
self.f1.sid = self.sid
self.readDict[f1.id] = self.f1
if f2 != "":
self.readDict[f2.id] = self.f2
self.pairDict[f1.id] = f2.id
self.pairDict[f2.id] = f1.id
self.f2.sid = self.sid
self.reads = []
self.reads.append(f1)
if self.f2 != "":
self.reads.append(f2)
self.initLib()
self.validateLib()
def getPair(self,readId):
try:
return self.readDict[self.pairDict[readId]]
except KeyError:
#no pair for read
return -1
def initLib(self):
self.mean = (self.mmin+self.mmax)/2.0
self.stdev = 0.1*self.mean
#count num reads
#check pairs
#if self.interleaved:
# f12 = self.f1.path
#else:
#need to shuffle em
# if self.f1.format == "fasta" and self.f2.format == "fasta":
# pass
# elif self.f2.format = "fastq" and self.f1.format == "fastq":
# pass
# f12 = ""
def validateLib(self):
pass
def concatLib(self):
#combine two libs of same format & w/ same insert size into one
pass
def toggleInterleaved(self):
#if interleaved switch to two files, else vice versa
pass
def filterReads(self):
#remove all Reads with N, etc
pass
def __str__(self):
pass
def getDefaultWeight(sa):
if sa == SCORE_TYPE.LAP or sa == SCORE_TYPE.ALE or sa == SCORE_TYPE.CGAL:
return 0.333333333
elif sa == SCORE_TYPE.ORF:
return 0
else:
return 1
def nearlyEqual(a, b, epsilon = 0.0001):
absA = abs(float(a))
absB = abs(float(b))
diff = abs(float(a) - float(b))
if a == b:
return True
elif (a * b == 0): # a or b or both are zero
# relative error is not meaningful here
return diff < (epsilon * epsilon)
else: # use relative error
return diff / (absA + absB) < epsilon
def initValidationScores(weights = dict()):
for score in SCORE_TYPE.reverse_mapping.keys():
if score in weights:
SCORE_WEIGHTS[score] = weights[score]
elif len(weights) == 0:
SCORE_WEIGHTS[score] = getDefaultWeight(score)
else:
SCORE_WEIGHTS[score] = 0
def updateConfigCommands(infileName, opts):
# build the list of commands
commands = ""
for o, a in opts:
if o == "-f" or o == "--force":
continue
if o == "-d" or o == "--projectdir":
continue
if "--" in o:
commands = "%s %s=%s"%(commands, o, a)
else:
commands = "%s %s %s"%(commands, o, a)
tempFileName = "%s.tmp"%(infileName)
tempFile = open(tempFileName, 'w')
infile = open(infileName, 'r')
for line in infile.xreadlines():
if "command:" in line:
tempFile.write("command:\t%s\n"%(commands.strip()))
else:
tempFile.write(line)
infile.close()
tempFile.close()
os.system("mv %s %s"%(tempFileName, infileName))
def updateLibInfo(infileName, lib):
tempFileName = "%s.tmp"%(infileName)
tempFile = open(tempFileName, 'w')
infile = open(infileName, 'r')
written = False
for line in infile.xreadlines():
if "lib%d"%(lib.id) in line:
if written == False:
written = True
tempFile.write("lib%dformat:\t%s\n"%(lib.id, lib.format))
tempFile.write("lib%dmated:\t%s\n"%(lib.id, lib.mated))
tempFile.write("lib%dinnie:\t%s\n"%(lib.id, lib.innie))
tempFile.write("lib%dinterleaved\t%s\n"%(lib.id, lib.interleaved))
if lib.mated:
if lib.interleaved:
tempFile.write("lib%df1:\t%s,%d,%d,%d,%d\n"%(lib.id, lib.f1.fname, lib.mmin, lib.mmax, lib.mean, lib.stdev))
else:
tempFile.write("lib%df1:\t%s,%d,%d,%d,%d\n"%(lib.id, lib.f1.fname, lib.mmin, lib.mmax, lib.mean, lib.stdev))
tempFile.write("lib%df2:\t%s,%d,%d,%d,%d\n"%(lib.id, lib.f2.fname, lib.mmin, lib.mmax, lib.mean, lib.stdev))
else:
tempFile.write("lib%dfrg:\t%s\n"%(lib.id, lib.f1.fname))
else:
tempFile.write(line)
infile.close()
tempFile.close()
os.system("mv %s %s"%(tempFileName, infileName))
def readConfigInfo(infile, filePrefix=""):
readlibs = []
asmcontigs = []
workflow = ""
libs = []
readobjs = []
format = ""
mean = 0
stdev = 0
mmin = 0
mmax = 0
mated = True
interleaved = False
innie = True
linkerType = "titanium"
frg = ""
f1 = ""
f2 = ""
currlibno = 0
newlib = ""
libadded = False
nlib = None
lib = None
for line in infile.xreadlines():
line = line.replace("\n","")
if "#" in line:
continue
elif "inherit:" in line:
wfc = line.replace("\n", "").split(":")
if len(wfc) < 2:
continue
workflow = wfc[1].strip()
elif "asmcontigs:" in line:
asmc = line.replace("\n","").split("asmcontigs:")
if len(asmc) < 2 or len(asmc[1].strip()) == 0:
continue
contigs = asmc[1].strip().split(",")
for contig in contigs:
if (len(contig.strip()) > 0):
asmcontigs.append(contig)
elif "format:" in line:
if f1 and not libadded:
nread1 = Read(format,f1,mated,interleaved)
readobjs.append(nread1)
nread2 = ""
nlib = readLib(format,mmin,mmax,nread1,nread2,mated,interleaved,innie,linkerType)
readlibs.append(nlib)
libadded = False
format = line.replace("\n","").split(":")[-1].strip()
elif "mated:" in line:
mated = str2bool(line.replace("\n","").split(":")[-1].strip())
elif "interleaved:" in line:
interleaved = str2bool(line.replace("\n","").split(":")[-1].strip())
elif "innie:" in line:
innie = str2bool(line.replace("\n","").split(":")[-1].strip())
elif "linker:" in line:
linkerType = line.replace("\n","").split(":")[-1].strip()
elif "f1:" in line:
data = line.split("f1:")
f1 = "%s%s"%(filePrefix, data[1].strip().split(",")[0])
inf = data[1].strip().split(",")
mean = int(inf[3])
stdev = int(inf[4])
mmin = int(inf[1])
mmax = int(inf[2])
libs.append(f1)
elif "f2:" in line:
data = line.split("f2:")
f2 = "%s%s"%(filePrefix,data[1].strip().split(",")[0])
inf = data[1].split(",")
mean = int(inf[3])
stdev = int(inf[4])
mmin = int(inf[1])
mmax = int(inf[2])
libs.append(f2)
nread1 = Read(format,f1,mated,interleaved)
readobjs.append(nread1)
nread2 = Read(format,f2,mated,interleaved)
readobjs.append(nread2)
nlib = readLib(format,mmin,mmax,nread1,nread2,mated,interleaved,\
innie,linkerType)
readlibs.append(nlib)
libadded = True
elif "frg" in line:
data = line.split("frg:")
frg = "%s%s"%(filePrefix,data[1].strip().split(",")[0])
mated = False
f1 = frg
libs.append(frg)
if f1 and not libadded:
nread1 = Read(format,f1,mated,interleaved)
readobjs.append(nread1)
nread2 = ""
nlib = readLib(format,mmin,mmax,nread1,nread2,mated,interleaved,innie,\
linkerType)
readlibs.append(nlib)
return (asmcontigs, readlibs, workflow)
def concatContig(ctgfile):
if len(sys.argv) < 3:
print "usage: contig_file out_file"
contig_file = open(ctgfile,'r')
out_file = open(ctgfile+".merged",'w')
out_data = ""
for line in contig_file.xreadlines():
if ">" not in line:
out_data += line.replace("\n","")
width = 60
pp = 0
out_file.write(">seq\n")
while pp+60 < len(out_data):
out_file.write(out_data[pp:pp+60]+"\n")
pp +=60
out_file.write(out_data[pp:]+"\n")
out_file.close()
contig_file.close()
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def sizeFastaFile(fileName):
if not os.path.exists(fileName):
return 0
p = subprocess.Popen("java -cp %s/java:. SizeFasta -t %s"%(Settings.METAMOS_UTILS, fileName), shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: cannot size file, return size 0\n"
return 0
else:
return int(checkStdout)
def getMD5Sum(fileName):
if not os.path.exists(fileName):
return ""
md5 = hashlib.md5()
with open(fileName,'rb') as f:
for chunk in iter(lambda: f.read(128*md5.block_size), ''):
md5.update(chunk)
return md5.hexdigest()
def getMachineType():
p = subprocess.Popen("echo `uname`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine OS, defaulting to %s"%(Settings.OSTYPE)
else:
Settings.OSTYPE = checkStdout.strip()
p = subprocess.Popen("echo `uname -r`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine OS version, defaulting to %s"%(Settings.OSVERSION)
else:
Settings.OSVERSION = checkStdout.strip()
p = subprocess.Popen("echo `uname -m`", shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkStderr != "":
print "Warning: Cannot determine system type, defaulting to %s"%(Settings.MACHINETYPE)
else:
Settings.MACHINETYPE = checkStdout.strip()
def getCommandOutput(theCommand, checkForStderr):
p = subprocess.Popen(theCommand, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(checkStdout, checkStderr) = p.communicate()
if checkForStderr and checkStderr != "":
return ""
else:
return checkStdout.strip()
def getFromPath(theCommand, theName, printWarning = True):
deprecated_list = ["METAIDBA","PHYMM"]
result = getCommandOutput("which %s"%(theCommand), True)
if theName.upper() not in deprecated_list and result == "" and printWarning:
print "Warning: %s is not found, some functionality will not be available"%(theName)
return ""
else:
return os.path.dirname(result.strip())
def cmdExists(cmd):
result = False
try:
result = subprocess.call(cmd,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
except OSError:
result = False
return result
def initConfig(kmer, threads, theRundir, taxaLevel, localKrona, annotateUnmapped, verbose, outputOnly, doScaffolding):
Settings(kmer, threads, theRundir, taxaLevel, localKrona, annotateUnmapped, verbose, outputOnly, doScaffolding, True)
getMachineType()
if not os.path.exists(Settings.METAMOS_UTILS):
Settings.METAMOSDIR = os.getcwd()
print "Script is running from: %s"%(Settings.METAMOSDIR)
Settings.METAMOS_UTILS = "%s%sUtilities"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.METAMOS_UTILS):
print "Error: cannot find metAMOS utilities. Will not run pipeline"
sys.exit(1)
Settings.METAMOS_JAVA = "%s%sjava:%s"%(Settings.METAMOS_UTILS, os.sep, os.curdir)
Settings.METAMOS_DOC = "%s%sdoc"%(Settings.METAMOS_UTILS, os.sep)
# SRA
Settings.SRA = "%s%scpp%s%s-%s%ssra%sbin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep, os.sep)
if not os.path.exists(Settings.SRA + os.sep + "srapath"):
Settings.SRA = getFromPath("srapath", "SRA")
sraMD5 = getMD5Sum(Settings.SRA + os.sep + "srapath")
# FastQC
Settings.FASTQC = "%s%sFastQC"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.FASTQC + os.sep + "fastqc"):
Settings.FASTQC = getFromPath("fastqc", "FastQC")
fastqcMD5 = getMD5Sum(Settings.FASTQC + os.sep + "fastqc")
# now check for assemblers
# 1. AMOS
Settings.AMOS = "%s%sAMOS%s%s-%s%sbin"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.AMOS + os.sep + "toAmos_new"):
Settings.AMOS = getFromPath("toAmos_new", "AMOS")
if not os.path.exists(Settings.AMOS + os.sep + "toAmos_new"):
print "Error: cannot find AMOS in %s or %s. Please check your path and try again."%(Settings.METAMOSDIR + os.sep + "AMOS", Settings.AMOS)
sys.exit(1)
amosMD5 = getMD5Sum(Settings.AMOS + os.sep + "toAmos_new")
Settings.BAMBUS2 = Settings.AMOS
bambusMD5 = getMD5Sum(Settings.BAMBUS2 + os.sep + "OrientContigs")
# 2. Soap
Settings.SOAPDENOVO = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.SOAPDENOVO + os.sep + "SOAPdenovo-63mer"):
Settings.SOAPDENOVO = ""
soapMD5 = getMD5Sum(Settings.SOAPDENOVO + os.sep + "SOAPdenovo-63mer")
Settings.SOAPDENOVO2 = "%s%scpp%s%s-%s/soap2/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.SOAPDENOVO2 + os.sep + "SOAPdenovo-63mer"):
Settings.SOAPDENOVO2 = ""
soapMD5 = getMD5Sum(Settings.SOAPDENOVO2 + os.sep + "SOAPdenovo-63mer")
# 3. CA
Settings.CA = "%s%sCA%s%s-%s%sbin"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE.replace("x86_64","amd64"), os.sep)
if not os.path.exists(Settings.CA + os.sep + "gatekeeper"):
Settings.CA = getFromPath("gatekeeper", "Celera Assembler")
CAMD5 = getMD5Sum(Settings.CA + os.sep + "gatekeeper")
# BLASR goes with CA
Settings.BLASR = "%s/../../../smrtanalysis/current/analysis/bin"%(Settings.CA)
if not os.path.exists(Settings.BLASR + os.sep + "blasr"):
Settings.BLASR = getFromPath("blasr", "BLASR")
blasrMD5 = getMD5Sum(Settings.BLASR + os.sep + "blasr")
# 4. Newbler
Settings.NEWBLER = "%s%snewbler%s%s-%s"%(Settings.METAMOSDIR, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.NEWBLER + os.sep + "runProject"):
Settings.NEWBLER = getFromPath("runProject", "Newbler")
newblerMD5 = getMD5Sum(Settings.NEWBLER + os.sep + "runProject")
# 5. meta-IDBA
Settings.METAIDBA = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.METAIDBA + os.sep + "metaidba"):
Settings.METAIDBA = getFromPath("metaidba", "METAIDBA")
metaidbaMD5 = getMD5Sum(Settings.METAIDBA + os.sep + "metaidba")
# when searching for velvet, we ignore paths because there are so many variations of velvet (velvet, velvet-sc, meta-velvet that all have a velveth/g and we have no way to tell if we got the right one
#6. velvet
Settings.VELVET = "%s%scpp%s%s-%s%svelvet"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.VELVET + os.sep + "velvetg"):
Settings.VELVET = ""
velvetMD5 = getMD5Sum(Settings.VELVET + os.sep + "velvetg")
#7. velvet-sc
Settings.VELVET_SC = "%s%scpp%s%s-%s%svelvet-sc"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.VELVET_SC + os.sep + "velvetg"):
Settings.VELVET_SC = ""
velvetSCMD5 = getMD5Sum(Settings.VELVET_SC + os.sep + "velvetg")
#8. metavelvet
Settings.METAVELVET = "%s%scpp%s%s-%s%sMetaVelvet"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.METAVELVET + os.sep + "meta-velvetg"):
Settings.METAVELVET = getFromPath("meta-velvetg", "METAVELVET")
metaVelvetMD5 = getMD5Sum(Settings.METAVELVET + os.sep + "meta-velvetg")
# 8. SparseAssembler
Settings.SPARSEASSEMBLER = "%s%scpp%s%s-%s%sSparseAssembler"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.SPARSEASSEMBLER + os.sep + "SparseAssembler"):
Settings.SPARSEASSEMBLER = getFromPath("SparseAssembler", "SparseAssembler")
sparseAssemblerMD5 = getMD5Sum(Settings.SPARSEASSEMBLER + os.sep + "SparseAssembler")
Settings.KRONA = "%s%sKronaTools%sbin"%(Settings.METAMOSDIR,os.sep,os.sep)
if not os.path.exists(Settings.KRONA + os.sep + "ktImportTaxonomy"):
Settings.KRONA = getFromPath("Krona", "ktImportTaxonomy")
kronaMD5 = getMD5Sum(Settings.KRONA + os.sep + "ktImportTaxonomy")
# now for repeatoire
Settings.REPEATOIRE = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.REPEATOIRE + os.sep + "repeatoire"):
Settings.REPEATOIRE = getFromPath("repeatoire", "Repeatoire")
else:
Settings.REPEATOIRE += os.sep + "repeatoire"
repeatoireMD5 = getMD5Sum(Settings.REPEATOIRE + os.sep + "repeatoire")
# now for the mappers
Settings.BOWTIE = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.BOWTIE + os.sep + "bowtie"):
Settings.BOWTIE = getFromPath("bowtie", "Bowtie")
bowtieMD5 = getMD5Sum(Settings.BOWTIE + os.sep + "bowtie")
Settings.BOWTIE2 = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.BOWTIE2 + os.sep + "bowtie2"):
Settings.BOWTIE2 = getFromPath("bowtie2", "Bowtie2")
bowtie2MD5 = getMD5Sum(Settings.BOWTIE + os.sep + "bowtie2")
Settings.SAMTOOLS = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.SAMTOOLS + os.sep + "samtools"):
Settings.SAMTOOLS = getFromPath("samtools", "samtools")
samtoolsMD5 = getMD5Sum(Settings.SAMTOOLS + os.sep + "samtools")
# now the gene callers
Settings.METAGENEMARK = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.METAGENEMARK + os.sep + "gmhmmp"):
Settings.METAGENEMARK = getFromPath("gmhmmp", "MetaGeneMark")
gmhmmpMD5 = getMD5Sum(Settings.METAGENEMARK + os.sep + "gmhmmp")
Settings.PROKKA = "%s%scpp%s%s-%s/prokka/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.PROKKA + os.sep + "prokka"):
Settings.PROKKA = getFromPath("prokka", "Prokka")
prokkaMD5 = getMD5Sum(Settings.PROKKA + os.sep + "prokka")
Settings.SIGNALP = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.SIGNALP + os.sep + "signalp"):
Settings.SIGNALP = getFromPath("signalp", "SignalP+")
signalpMD5 = getMD5Sum(Settings.SIGNALP + os.sep + "signalp")
Settings.FRAGGENESCAN = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.FRAGGENESCAN + os.sep + "FragGeneScan"):
Settings.FRAGGENESCAN = getFromPath("FragGeneScan","FragGeneScan")
fraggenescanMD5 = getMD5Sum(Settings.FRAGGENESCAN + os.sep + "FragGeneScan")
# now for the annotation
Settings.METAPHYLER = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.METAPHYLER + os.sep + "metaphylerClassify"):
Settings.METAPHYLER = getFromPath("metaphylerClassify", "metaphylerClassify")
metaphylerMD5 = getMD5Sum(Settings.METAPHYLER + os.sep + "metaphylerClassify")
Settings.FCP = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.FCP + os.sep + "nb-classify"):
Settings.FCP = getFromPath("nb-classify", "FCP")
fcpMD5 = getMD5Sum(Settings.FCP + os.sep + "nb-classify")
Settings.PHMMER = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.PHMMER + os.sep + "phmmer"):
Settings.PHMMER = getFromPath("phmmer", "PHmmer")
phmmerMD5 = getMD5Sum(Settings.PHMMER + os.sep + "phmmer")
Settings.MGCAT = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.MGCAT + os.sep + "mgcat"):
Settings.MGCAT = getFromPath("mgcat", "mgcat")
mgcatMD5 = getMD5Sum(Settings.MGCAT + os.sep + "mgcat")
Settings.PHYMM = "%s%sperl%sphymm%s"%(Settings.METAMOS_UTILS, os.sep, os.sep,os.sep)
if not os.path.exists(Settings.PHYMM + os.sep + "scoreReads.pl"):
Settings.PHYMM = getFromPath("phymm", "Phymm")
phymmMD5 = getMD5Sum(Settings.PHYMM + os.sep + "scoreReads.pl")
Settings.BLAST = "%s%scpp%s%s-%s"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.BLAST + os.sep + "blastall"):
Settings.BLAST = getFromPath("blastall", "blast")
blastMD5 = getMD5Sum(Settings.BLAST + os.sep + "blastall")
# currently only supported on Linux 64-bit and only from one location
Settings.PHYLOSIFT = "%s%sphylosift"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.PHYLOSIFT + os.sep + "bin" + os.sep + "phylosift"):
print "Warning: PhyloSift was not found, will not be available\n"
Settings.PHYLOSIFT = ""
phylosiftMD5 = getMD5Sum(Settings.PHYLOSIFT + os.sep + "bin" + os.sep + "phylosift")
Settings.EAUTILS = "%s%scpp%s%s-%s%seautils"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.EAUTILS + os.sep + "fastq-mcf"):
Settings.EAUTILS = getFromPath("fastq-mcf", "EA-UTILS")
eautilsMD5 = getMD5Sum(Settings.EAUTILS + os.sep + "fastq-mcf")
Settings.KMERGENIE = "%s%scpp%s%s-%s%skmergenie"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.KMERGENIE + os.sep + "kmergenie"):
Settings.KMERGENIE = getFromPath("kmergenie", "KmerGenie")
kmergenieMD5 = getMD5Sum(Settings.KMERGENIE + os.sep + "kmergenie")
Settings.R = "%s%scpp%s%s-%s%sR"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE, os.sep)
if not os.path.exists(Settings.R + os.sep + "R"):
Settings.R = getFromPath("R", "R package")
rMD5 = getMD5Sum(Settings.R + os.sep + "R")
# now for the validators
Settings.LAP = "%s%sLAP"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.LAP + os.sep + "aligner" + os.sep + "calc_prob.py"):
Settings.LAP = getFromPath("calc_prop.py", "LAP")
lapMD5 = getMD5Sum(Settings.LAP + os.sep + "aligner" + os.sep + "calc_prob.py")
Settings.ALE = "%s%scpp%s%s-%s/ALE/src"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.ALE + os.sep + "ALE"):
Settings.ALE = getFromPath("ALE", "ALE")
aleMD5 = getMD5Sum(Settings.ALE + os.sep + "ALE")
Settings.CGAL = "%s%scpp%s%s-%s/cgal"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.CGAL + os.sep + "cgal"):
Settings.CGAL = getFromPath("cgal", "CGAL")
cgalMD5 = getMD5Sum(Settings.CGAL + os.sep + "cgal")
Settings.REAPR = "%s%scpp%s%s-%s/REAPR"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.REAPR + os.sep + "reapr"):
Settings.REAPR = getFromPath("reapr", "REAPR")
reaprMD5 = getMD5Sum(Settings.REAPR + os.sep + "reapr")
Settings.FRCBAM = "%s%scpp%s%s-%s/FRCbam/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.FRCBAM + os.sep + "FRC"):
Settings.FRCBAM = getFromPath("FRC", "FRCbam")
frcMD5 = getMD5Sum(Settings.FRCBAM + os.sep + "FRC")
Settings.FREEBAYES = "%s%scpp%s%s-%s/freebayes/bin"%(Settings.METAMOS_UTILS, os.sep, os.sep, Settings.OSTYPE, Settings.MACHINETYPE)
if not os.path.exists(Settings.FREEBAYES + os.sep + "freebayes"):
Settings.FREEBAYES = getFromPath("freebayes", "FreeBayes")
freebayesMD5 = getMD5Sum(Settings.FREEBAYES + os.sep + "freebayes")
Settings.QUAST = "%s%squast"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.QUAST + os.sep + "quast.py"):
Settings.QUAST = getFromPath("quast.py", "QUAST")
quastMD5 = getMD5Sum(Settings.QUAST + os.sep + "quast.py")
Settings.MPI = "%s%smpiexec"%(Settings.METAMOSDIR, os.sep)
if not os.path.exists(Settings.MPI):
Settings.MPI = getFromPath("mpiexec", "MPI", False)
if Settings.MPI == "":
Settings.MPI = getFromPath("openmpiexec", "OPENMPI", False)
if Settings.MPI != "":
Settings.MPI = "%s%s%s"%(Settings.MPI, os.sep, "openmpiexec")
else:
Settings.MPI = "%s%s%s"%(Settings.MPI, os.sep, "mpiexec")
if not os.path.exists(Settings.MPI):
print "Warning: MPI is not available, some functionality may not be available"
mpiMD5 = getMD5Sum(Settings.MPI)
# finally store the configuration
if Settings.rundir != "":
conf = open("%s/pipeline.conf"%(Settings.rundir),'w')
if Settings.BINARY_DIST and 1:
prevtmpdirs = []
try:
bdf = open("%s/prevruns.tmp"%(application_path),'r')
for line in bdf.xreadlines():
prevtmpdirs.append(line.replace("\n",""))
for pdir in prevtmpdirs:
if os.path.exists("%s"%(pdir)):
os.system("rm -rf %s"%(pdir))
bdf.close()
bdf = open("%s/prevruns.tmp"%(application_path),'w')
bdf.close()
except IOError:
#do not have permissions to write to install dir, store in tmp?
#tf, tf_path = tempfile.mkstemp("prevruns.tmp",'w')
bdf = open("%s/prevruns.tmp"%(application_path),'w')
bdf.write("%s\n"%(sys._MEIPASS))
bdf.close()
except TypeError:
bdf = open("%s/prevruns.tmp"%(application_path),'w')
bdf.write("%s\n"%(sys._MEIPASS))
bdf.close()
conf.write("#Configuration summary\n")
conf.write("OS:\t\t\t%s\nOS Version:\t\t%s\nMachine:\t\t%s\n"%(Settings.OSTYPE, Settings.OSVERSION, Settings.MACHINETYPE))
conf.write("metAMOS main dir:\t%s\nmetAMOS Utilities:\t%s\nmetAMOS Java:\t\t%s\n"%(Settings.METAMOSDIR, Settings.METAMOS_UTILS, Settings.METAMOS_JAVA))
conf.write("AMOS:\t\t\t%s\t%s\n"%(Settings.AMOS, amosMD5))
conf.write("BAMBUS2:\t\t%s\t%s\n"%(Settings.BAMBUS2, bambusMD5))
conf.write("SOAPDENOVO:\t\t\t%s\t%s\n"%(Settings.SOAPDENOVO, soapMD5))
conf.write("SOAPDENOVO2:\t\t\t%s\t%s\n"%(Settings.SOAPDENOVO2, soapMD5))
conf.write("METAIDBA:\t\t%s\t%s\n"%(Settings.METAIDBA, metaidbaMD5))
conf.write("Celera Assembler:\t%s\t%s\n"%(Settings.CA, CAMD5))
conf.write("NEWBLER:\t\t%s\t%s\n"%(Settings.NEWBLER, newblerMD5))
conf.write("Velvet:\t\t\t%s\t%s\nVelvet-SC:\t\t%s\t%s\n"%(Settings.VELVET, velvetMD5, Settings.VELVET_SC, velvetSCMD5))
conf.write("MetaVelvet:\t\t%s\t%s\n"%(Settings.METAVELVET, metaVelvetMD5))
conf.write("SparseAssembler:\t%s\t%s\n"%(Settings.SPARSEASSEMBLER, sparseAssemblerMD5))
conf.write("metaphylerClassify:\t\t\t%s\t%s\n"%(Settings.METAPHYLER, metaphylerMD5))
conf.write("Bowtie:\t\t\t%s\t%s\n"%(Settings.BOWTIE, bowtieMD5))
conf.write("Bowtie2:\t\t\t%s\t%s\n"%(Settings.BOWTIE2, bowtie2MD5))
conf.write("samtools:\t\t\t%s\t%s\n"%(Settings.SAMTOOLS, samtoolsMD5))
conf.write("M-GCAT:\t\t\t%s\t%s\n"%(Settings.MGCAT, mgcatMD5))
conf.write("METAGENEMARK:\t\t\t%s\t%s\n"%(Settings.METAGENEMARK, gmhmmpMD5))
conf.write("FRAGGENESCAN:\t\t%s\t%s\n"%(Settings.FRAGGENESCAN, fraggenescanMD5))
conf.write("PROKKA:\t\t\t%s\t%s\n"%(Settings.PROKKA, prokkaMD5))
conf.write("SIGNALP:\t\t\t%s\t%s\n"%(Settings.SIGNALP, signalpMD5))
conf.write("FCP:\t\t\t%s\t%s\n"%(Settings.FCP, fcpMD5))
conf.write("PHMMER:\t\t\t%s\t%s\n"%(Settings.PHMMER, phmmerMD5))
conf.write("PHYMM:\t\t\t%s\t%s\n"%(Settings.PHYMM, phymmMD5))
conf.write("BLAST:\t\t\t%s\t%s\n"%(Settings.BLAST, blastMD5))
conf.write("PHYLOSIFT:\t\t%s\t%s\n"%(Settings.PHYLOSIFT, phylosiftMD5))
conf.write("SRA:\t\t\t\t%s\t%s\n"%(Settings.SRA, sraMD5))
conf.write("FASTQC:\t\t\t%s\t%s\n"%(Settings.FASTQC, fastqcMD5))
conf.write("EAUTILS:\t\t%s\t%s\n"%(Settings.EAUTILS, eautilsMD5))
conf.write("KMERGENIE:\t\t%s\t%s\n"%(Settings.KMERGENIE, kmergenieMD5))
conf.write("REPEATOIRE:\t\t%s\t%s\n"%(Settings.REPEATOIRE, repeatoireMD5))
conf.write("KRONA:\t\t\t%s\t%s\n"%(Settings.KRONA, kronaMD5))
conf.write("LAP:\t\t\t%s\t%s\n"%(Settings.LAP, lapMD5))
conf.write("ALE:\t\t\t%s\t%s\n"%(Settings.ALE, aleMD5))
conf.write("CGAL:\t\t\t%s\t%s\n"%(Settings.CGAL, cgalMD5))
conf.write("REAPR:\t\t\t%s\t%s\n"%(Settings.REAPR, reaprMD5))
conf.write("FRCBAM:\t\t\t%s\t%s\n"%(Settings.FRCBAM, frcMD5))
conf.write("FREEBAYES:\t\t\t%s\t%s\n"%(Settings.FREEBAYES, freebayesMD5))
conf.write("QUAST:\t\t\t%s\t%s\n"%(Settings.QUAST, quastMD5))
conf.close()
return Settings
def setFailFast(fail):
global _failFast
_failFast = fail
def run_process(settings,command,step=""):
outf = ""
workingDir = ""
if step != "":
workingDir = "%s/%s/out"%(settings.rundir, step)
if not os.path.exists(workingDir):
workingDir = ""
step = string.upper(step)
if not os.path.exists(settings.rundir+os.sep+"Logs"):
# create Log directory
os.system("mkdir %s/Logs"%(settings.rundir))
# create the log of commands
commandf = open(settings.rundir + os.sep + "Logs" + os.sep + "COMMANDS.log", 'w')
commandf.close()
# open command log file for appending (it should have been created above)
commandf = open(settings.rundir + os.sep + "Logs" + os.sep + "COMMANDS.log", 'a')
if step not in settings.task_dict:
print "Starting Task = %s.%s"%(step.lower(), step)
dt = datetime.now().isoformat(' ')[:-7]
commandf.write("|%s|# [%s]\n"%(dt,step))
outf = open(settings.rundir+os.sep+"Logs"+os.sep+step+".log",'w')
settings.task_dict.append(step)
# create started file
startedf = open(settings.rundir + os.sep + "Logs" + os.sep + step.lower() + ".started", 'w')
startedf.close()
else:
outf = open(settings.rundir+os.sep+"Logs"+os.sep+step+".log",'a')
if settings.VERBOSE or settings.OUTPUT_ONLY:
print "*** metAMOS running command: %s\n"%(command)
if settings.OUTPUT_ONLY == False:
stdout = ""
stderr = ""
if workingDir == "":
p = subprocess.Popen(command, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,close_fds=True,executable="/bin/bash")
else:
p = subprocess.Popen(command, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE,close_fds=True,executable="/bin/bash", cwd=workingDir)
fstdout,fstderr = p.communicate()
rc = p.returncode
if rc != 0 and _failFast and "rm " not in command and "ls " not in command and "unlink " not in command and "ln " not in command and "mkdir " not in command and "mv " not in command and "cat" not in command:
# flush all error/output streams
outf.flush()
outf.write(fstdout+fstderr)
outf.close()
commandf.flush()
dt = datetime.now().isoformat(' ')[:-7]
commandf.write("|%s| "%(dt)+command+"\n")
commandf.close()
global _atomicCounter
if _atomicCounter.increment() == 0:
print ERROR_RED+"*****************************************************************"
print "*************************ERROR***********************************"
print "During %s, the following command failed with return code %d:"%(step.lower(), rc)
print ">>",command
print ""
print "*************************DETAILS***********************************"
print "Last %d commands run before the error (%s/Logs/COMMANDS.log)"%(_NUM_LINES, settings.rundir)
p = subprocess.Popen("tail -n %d %s/Logs/COMMANDS.log"%(_NUM_LINES, settings.rundir), shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,close_fds=True, executable="/bin/bash")
(checkStdout, checkStderr) = p.communicate()
val = p.returncode
print "%s"%(checkStdout)
print "Last %d lines of output (%s/Logs/%s.log)"%(_NUM_LINES, settings.rundir, step)
p = subprocess.Popen("tail -n %d %s/Logs/%s.log"%(_NUM_LINES, settings.rundir, step), shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,close_fds=True, executable="/bin/bash")
(checkStdout, checkStderr) = p.communicate()
val = p.returncode
print "%s"%(checkStdout)
print "Please veryify input data and restart MetAMOS. If the problem persists please contact the MetAMOS development team."
print "*************************ERROR***********************************"
print "*****************************************************************"+ENDC
# also make sure this step will be re-run on restart
os.system("rm %s%sLogs%s%s.ok"%(settings.rundir, os.sep, os.sep, step.lower()))
#sys.exit(rc)
raise
if step == "":
print fstdout,fstderr
else:
outf.write(fstdout+fstderr)
outf.close()
dt = datetime.now().isoformat(' ')[:-7]
commandf.write("|%s| "%(dt)+command+"\n")
commandf.close()
def recruitGenomes(settings,query,genomeDir,outDir,stepName, top=1):
print "recruiting genomes.."
setFailFast(False)
run_process(settings, "%s/mgcat -M -r %s -d %s -o %s -p %d"%(settings.MGCAT,query,genomeDir,outDir,settings.threads), stepName.title())
setFailFast(True)
gtr = []
if os.path.exists("%s/recruited_genomes.lst"%(outDir)):
rg = open("%s/recruited_genomes.lst"%(outDir),'r')
rglist = []
cnt = 0
for genome in rg.xreadlines():
genome = genome.replace("\n","")
seq,mumi = genome.split(",")
if os.path.exists(seq):
rglist.append([float(mumi),seq])
cnt +=1
print "done! recruited %d genomes!"%(cnt)
rglist.sort()
i = 0
while i < len(rglist) and i < top:
gtr.append(rglist[i][1])
i+=1
else:
print "Error: recruiting references failed"
return gtr
def getProgramCitations(settings, programName, comment="#"):
global _PUB_DICT
global _PROG_NAME_DICT
cite = ""
if len(_PUB_DICT) == 0:
try:
cite = open("%s/%s"%(settings.METAMOS_DOC, "citations.rst"), 'r')
except IOError:
#print "no citations file! cannot print!"
return ("","")
for line in cite.xreadlines():
(line, sep, commentLine) = line.partition(comment)
splitLine = line.strip().split("\t")
if len(splitLine) >= 3:
name = splitLine[0]
commonName = splitLine[1]
citation = splitLine[2]
elif len(splitLine) >= 2:
name = splitLine[0]
commonName = splitLine[1]
citation = "NA"
else:
continue
_PROG_NAME_DICT[name] = commonName
_PUB_DICT[name] = citation
try:
return (_PROG_NAME_DICT[programName], _PUB_DICT[programName])
except KeyError:
return(programName, "UNKNOWN")
def getProgramParams(configDir, fileName, module="", prefix="", comment="#", separator=""):
# we process parameters in the following priority:
# first: current directory
# second: user home directory
# third: metAMOS directory
# a parameter specifeid in the current directory takes priority over all others, and so on down the line
dirs = [configDir + os.sep + "config", os.path.expanduser('~') + os.sep + ".metAMOS", os.getcwd()]
optDict = {}
cmdOptions = ""
for curDir in dirs:
spec = ""
curFile = curDir + os.sep + fileName
try:
spec = open(curFile, 'r')
except IOError as e:
continue
read = False
if module == "":
read = True
for line in spec.xreadlines():
(line, sep, commentLine) = line.partition(comment)
line = line.strip()
if line == "[" + module + "]":
read = True
continue
elif read == True and line.startswith("["):
break
if read:
if (line != ""):
if (line.endswith("\\")):
for next in spec:
next = next.strip()
line = line.replace("\\", "") + next.replace("\\", "")
if not next.endswith("\\"):
break
splitLine = line.split();
optDict[splitLine[0]] = separator.join(splitLine[1:]).strip()
spec.close()
for option in optDict:
cmdOptions += prefix + option + " " + optDict[option] + " "
return cmdOptions
def getAvailableMemory(settings):
if settings.nopsutil:
return 0
import psutil
cacheusage=0
if 'linux' in settings.OSTYPE.lower():
cacheusage = psutil.cached_phymem()
memusage = `psutil.phymem_usage()`.split(",")
freemem = long(memusage[2].split("free=")[-1])+long(cacheusage)
percentfree = float(memusage[3].split("percent=")[-1].split(")")[0])
avram = (freemem/1000000000)
return avram
def getSelectedAssembler(settings):
if settings.rundir == "":
print "Error: attempted to get selected assembler before initialization"
raise (JobSignalledBreak)
elif not os.path.exists("%s/Validate/out/%s.asm.selected"%(settings.rundir, settings.PREFIX)):
print "Error: attempted to get selected assembler before validation"
raise (JobSignalledBreak)
else:
return getCommandOutput("cat %s/Validate/out/%s.asm.selected"%(settings.rundir, settings.PREFIX), False)
def getSelectedKmer(settings):
kmer = ""
if os.path.exists("%s/Assemble/out/%s.kmer"%(settings.rundir, settings.PREFIX)):
stats = open("%s/Assemble/out/%s.kmer"%(settings.rundir, settings.PREFIX), 'r')
kmer = stats.read().strip()
stats.close()
return kmer
def getEstimatedGenomeSize(settings):
genomeSize = 0
if os.path.exists("%s/Assemble/out/%s.genomesize"%(settings.rundir, settings.PREFIX)):
stats = open("%s/Assemble/out/%s.genomesize"%(settings.rundir, settings.PREFIX), 'r')
genomeSize = int(stats.read().strip())
stats.close()
return genomeSize
def getVersion():
#look for pattern like: MetAMOS [VERSION] README
version = "UNKNOWN"
filePath = "%s%sREADME.md"%(sys.path[0], os.sep)
try:
sys._MEIPASS
filePath = "%s%sREADME.md"%(sys._MEIPASS, os.sep)
except Exception:
filePath = "%s%sREADME.md"%(sys.path[0], os.sep)
if os.path.exists(filePath):
readme_file = open(filePath, 'r')
for line in readme_file.xreadlines():
if "# MetAMOS" in line:
version = line.strip().split("# MetAMOS")[1]
version = version.strip().split("README")[0]
break
readme_file.close()
import workflow
wfs = workflow.getSupportedWorkflowNames("%s/Utilities/workflows"%(sys.path[0]), False)
return version + " workflows: " + ",".join(wfs)
def configureEnvironment(utilPath):
global _envCounter
if _envCounter.increment() == 0:
if "PYTHONPATH" not in os.environ:
os.environ["PYTHONPATH"] = ""
else:
ppath = os.environ["PYTHONPATH"]
#os.environ["PYTHONPATH"] = ""
os.environ["PYTHONPATH"]+=utilPath+os.sep+"python"+os.pathsep
os.environ["PYTHONPATH"]+=utilPath+os.sep+"ruffus"+os.pathsep
os.environ["PYTHONPATH"] += utilPath+os.sep+"python"+os.sep+"lib"+os.pathsep
os.environ["PYTHONPATH"] += utilPath+os.sep+"python"+os.sep+"lib"+os.sep+"python"+os.pathsep
os.environ["PYTHONPATH"] += utilPath+os.sep+"python"+os.sep+"lib64"+os.pathsep
os.environ["PYTHONPATH"] += utilPath+os.sep+"python"+os.sep+"lib64"+os.sep+"python"+os.pathsep
os.environ["PYTHONPATH"] += utilPath+os.pathsep
if "PERL5LIB" not in os.environ:
os.environ["PERL5LIB"] = INITIAL_SRC+os.sep+"phylosift"+os.sep+"lib"+os.sep
else:
os.environ["PERL5LIB"] = INITIAL_SRC+os.sep+"phylosift"+os.sep+"lib"+os.sep + os.pathsep + os.environ["PERL5LIB"]
try:
os.environ["PYTHONPATH"] += sys._MEIPASS + os.pathsep
os.environ["PYTHONHOME"] = sys._MEIPASS + os.pathsep
except Exception:
pass
try:
sys._MEIPASS
#if we are here, frozen binary
except Exception:
#else normal mode, add site dir
import site
site.addsitedir(utilPath+os.sep+"python"+os.sep+"lib"+os.sep+"python")
site.addsitedir(utilPath+os.sep+"python"+os.sep+"lib64"+os.sep+"python")
sys.path.append(utilPath)
sys.path.append(utilPath+os.sep+"python")
sys.path.append(utilPath+os.sep+"ruffus")
sys.path.append(utilPath+os.sep+"python"+os.sep+"lib"+os.sep+"python")
sys.path.append(utilPath+os.sep+"python"+os.sep+"lib64"+os.sep+"python")
try:
sys.path.append(sys._MEIPASS)
except Exception:
pass
sys.path.append("/usr/lib/python")
#remove imports from pth file, if exists
nf = []
if 'bash' in shellv or cmdExists('export'):
os.system("export PYTHONPATH=%s:$PYTHONPATH"%(utilPath+os.sep+"python"))
os.system("export PYTHONPATH=%s:$PYTHONPATH"%(utilPath+os.sep+"python"+os.sep+"lib"+os.sep+"python"))
elif cmdExists('setenv'):
os.system("setenv PYTHONPATH %s:$PYTHONPATH"%(utilPath+os.sep+"python"))
os.system("setenv PYTHONPATH %s:$PYTHONPATH"%(utilPath+os.sep+"python"+os.sep+"lib"+os.sep+"python"))
else:
print "Warning: could not set PYTHONPATH. Unknown shell %s, some functionality may not work\n"%(shellv)
# finally set LD path
libPath = os.path.abspath(utilPath + os.sep + ".." + os.sep + "lib")
if os.path.exists(libPath):
oldLDPath = ""
needToAdd = True
if "LD_LIBRARY_PATH" in os.environ:
oldLDPath = os.environ["LD_LIBRARY_PATH"]
if libPath in oldLDPath:
needToAdd = False
elif "DYLD_FALLBACK_LIBRARY_PATH" in os.environ:
oldLDPath = os.environ["DYLD_FALLBACK_LIBRARY_PATH"]
if libPath in oldLDPath:
needToAdd = False
if needToAdd:
os.environ["DYLD_FALLBACK_LIBRARY_PATH"] = libPath + os.pathsep + oldLDPath
os.environ["LD_LIBRARY_PATH"] = libPath + os.pathsep + oldLDPath
def translateToSRAURL(settings, name):
oldDyLD = ""
if "DYLD_FALLBACK_LIBRARY_PATH" in os.environ:
oldDyLD = os.environ["DYLD_FALLBACK_LIBRARY_PATH"]
del os.environ["DYLD_FALLBACK_LIBRARY_PATH"]
result = getCommandOutput("%s%ssrapath %s"%(Settings.SRA, os.sep, name), True)
if result == name or result == "./%s"%(name) or result == os.path.abspath(name):
result = ""
if oldDyLD != "":
os.environ["DYLD_FALLBACK_LIBRARY_PATH"] = oldDyLD
return result
|
986,145 | 116e9973aaf03e09104f6308d754fc9ccea6f497 | import numpy as np
import nibabel as nib
from pathlib import Path
from collections import namedtuple
from utils.dataset_structuring import acdc, general
import constants
Info = namedtuple('Info', ['affine', 'header'])
def get_train_val_paths(dataset_name, k_split):
"""
This function splits the samples from the dataset directory in two sets: train and val,
creating two Dataset objects using them
For the ACDC dataset the split is fixed, exactly as done by Baumgarter et al.
For imogen and mmwhs, the split factor is controlled by k_split
"""
dataset_dir = Path.cwd() / 'datasets' / dataset_name
if constants.acdc_root_dir == dataset_name or constants.acdc_test_dir == dataset_name:
split_dict = acdc.acdc_train_val_split(dataset_dir)
elif constants.mmwhs_test == dataset_name:
split_dict = general.train_val_split(dataset_dir, k_split=0)
else:
split_dict = general.train_val_split(dataset_dir, k_split=k_split)
return split_dict
def read_img_mask_pair(image_path, dset_name=constants.acdc_root_dir,
seg_type=constants.multi_class_seg):
"""
Args:
image_path - pathlib.Path: path to image
dset_name - string: since the original datasets have different structures (and i don't want
to modify them, paths will have to be contructed accordingly)
seg_type - string: multi class or whole heart
Finds the corresponding ground truth label for each input
Loads files
return:
ndarray: image and mask pair
"""
if constants.imatfib_root_dir in dset_name:
# construct path to the label in imatfib dir structure
mask_path = image_path.parent.parent / 'gt'
if seg_type == constants.whole_heart_seg:
mask_path = mask_path / 'oneregion'
else:
mask_path = mask_path / seg_type
mask_path = mask_path / (image_path.stem + image_path.suffix)
elif constants.mmwhs_root_dir in dset_name:
parts = image_path.stem.split('.')
name = ''.join(parts[0]+'mapped.' + parts[1])
mask_path = image_path.parent.parent / 'ground-truth' / \
seg_type / (name + image_path.suffix)
else:
# add _gt to get the path to the label
name_with_ext = image_path.parts[-1]
only_name = name_with_ext.split('.')[0]
gt_name_with_ext = only_name + '_gt.nii.gz'
mask_path = Path(str(image_path).replace(name_with_ext, gt_name_with_ext))
image, _ = read_image(image_path, type="pred")
mask, info = read_image(mask_path)
return image, mask, info
def read_image(image_path, type="gt"):
image_info = nib.load(image_path)
image = np.array(image_info.dataobj)
if type == "pred":
image = image.astype(np.float32)
info = Info(image_info.affine, image_info.header)
return image, info
|
986,146 | 6d24e55d11a7a734568fc1a934e028a27d5852d1 | def comb(k, start):
global card_comb
if k == 3:
card_comb.append(choose.copy())
return
for i in range(start, len(cards)):
choose.append(cards[i])
comb(k + 1, i + 1)
choose.pop()
n, m = map(int, input().split())
cards = list(map(int, input().split()))
card_comb = []
choose = []
comb(0, 0)
max_value = 0
for one_list in card_comb:
sum_value = sum(one_list)
if max_value < sum_value <= m:
max_value = sum_value
print(max_value)
|
986,147 | c05e1c3142d99e92478cbb30ae97b403677aa9f0 | # pyspark --executor-memory 3g --num-executors 12 --packages com.databricks:spark-avro_2.11:4.0.0
from pyspark import SparkContext
from pyspark.python.pyspark.shell import spark
from pyspark.sql import SQLContext
sc = SparkContext(appName="Parquet2Avro")
sqlContext = SQLContext(sc)
# sqlContext.setConf('spark.driver.extraClassPath', '/usr/spark-2.3.0/jars/avro-1.8.2.jar')
# sqlContext.setConf('spark.executor.extraClassPath', '/usr/spark-2.3.0/jars/avro-1.8.2.jar')
part = spark.read.format('parquet').load("hdfs://namenode:8020/hossein-parquet-data/part.parquet")
part.write.format("com.databricks.spark.avro").mode('overwrite') \
.save("hdfs://namenode:8020/hossein-avro-data/part.avro")
part_avro = spark.read.format("com.databricks.spark.avro").load("hdfs://namenode:8020/hossein-avro-data/part.avro")
print(part_avro.schema)
|
986,148 | 1a5106db9557b40616bc75d268df4bdaed23faf1 | from django import forms
from users.models import Profile
class RegisterForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('first_name', 'last_name', 'bio',)
widgets = {
'first_name': forms.TextInput(attrs={'placeHolder':'First Name'}),
'last_name': forms.TextInput(attrs={'placeHolder': 'Last Name'}),
'bio': forms.TextInput(attrs={'placeHolder': 'Bio'}),
}
def signup(self, request, user):
# Save your user
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
# Save your profile
profile = Profile()
profile.user = user
profile.first_name = self.cleaned_data['first_name']
profile.last_name = self.cleaned_data['last_name']
profile.bio = self.cleaned_data['bio']
profile.save() |
986,149 | 6db4ffc5e2938c438ae8aff486db3e5eeeba8495 | '''
cachenone.py
'''
import heapq
import numpy as np
from scipy.stats import entropy
from sklearn.ensemble import RandomForestClassifier
import helper
class CacheNone:
def __init__(self):
# pairs assigned to this node
self.pairs = None # list of (ltable_id, rtable_id)
self.features = None # numpy array of features
def prepare(self, table_A, table_B, feature_info, pairs):
self.pairs = pairs
self.features = np.zeros( (len(self.pairs), len(feature_info)), dtype=np.float32 )
def compute_features(self, required_features, feature_info, table_A, table_B):
if len(required_features)==0:
return None
# no cache, therefore fetch each pair, then compute required features
for k, pair in enumerate(self.pairs):
ltuple = table_A.loc[pair[0]]
rtuple = table_B.loc[pair[1]]
for f in required_features:
fs = feature_info.iloc[f]
lattr = getattr(fs, 'left_attribute')
rattr = getattr(fs, 'right_attribute')
ltok = getattr(fs, 'left_attr_tokenizer')
rtok = getattr(fs, 'right_attr_tokenizer')
simfunc = nodes.helper.sim_name2func[ getattr(fs, 'simfunction') ]
if ltok==None:
value = simfunc(ltuple[lattr], rtuple[rattr])
else:
ltokfunc = nodes.helper.tok_name2func[ltok]
rtokfunc = nodes.helper.tok_name2func[rtok]
value = simfunc( ltokfunc(ltuple[lattr]), rtokfunc(rtuple[rattr]) )
if np.isnan(value):
value = 0
self.features[k,f] = value
def apply(self, rf: RandomForestClassifier, k: int, exclude_pairs: set) -> list:
# prediction
proba = rf.predict_proba(self.features)
entropies = np.transpose(entropy(np.transpose(proba), base=2))
# select top k, return list of pairs of (index, entropy)
candidates = [ (self.pairs[k],v) for k,v in enumerate(entropies) if self.pairs[k] not in exclude_pairs ]
top_k = heapq.nlargest(k, candidates, key=lambda p: p[1])
return top_k
|
986,150 | c9787a6254b47238c59f177ffc57b0087000b410 | import datetime
import json
from decimal import Decimal
import pendulum
def json_dumps(obj):
def to_json(python_object):
if isinstance(python_object, (pendulum.Pendulum, datetime.datetime)):
return python_object.timestamp()
if isinstance(python_object, Decimal):
return float(python_object) # str(python_object).rstrip("0").rstrip(".")
return str(python_object)
return json.dumps(obj, default=to_json)
|
986,151 | aae63355b5efae5c1cb53bd45c98bf39f7a22f75 | from pigeon.channels import Channel
from pigeon.utils import json_dumps, json_loads
class InvalidActionError(Exception):
pass
class Action(object):
type_mapper = {
'subscribe': 'on_subscribe',
'unsubscribe': 'on_unsubscribe',
}
def __init__(self, action_type, channel):
self.channel = Channel.find_or_create(channel)
self.type = action_type
self.perform = getattr(self, self.type_mapper.get(self.type))
@classmethod
def from_json(cls, json):
try:
payload = json_loads(json)
except TypeError, ValueError:
raise InvalidActionError("Invalid Action")
try:
action_type = payload['type']
channel = payload['channel']
except KeyError:
raise InvalidActionError("Action must contain "
"a type and a channel.")
if action_type not in cls.type_mapper:
raise InvalidActionError("Action type must "
"be one of %r" % self.type_mapper.keys())
return cls(action_type, channel)
def to_json(self):
return json_dumps({
'type': self.type,
'channel': self.channel,
})
def on_subscribe(self, handler):
self.channel.subscribe(handler)
def on_unsubscribe(self, handler):
self.channel.unsubscribe(handler)
|
986,152 | 47e4cb0eb544ee98663ebccc1d2382c60c095e69 | # pylint: disable=missing-docstring
from dpgv4 import create_screenshot, create_thumbnail
from .util import sample_filename
def test_screenshot() -> None:
input_file = sample_filename("Test Image - 2141.mp4")
expected_output_file = sample_filename("Test Image - 2141.screenshot")
with open(input_file, "rb") as inp, open(expected_output_file, "rb") as expected:
assert create_screenshot(inp, 10) == expected.read()
def test_thumbnail() -> None:
# this is not a good test, because a change in PIL image scaling algorithm may cause
# pixel differences and then the test will spuriously fail
input_file = sample_filename("Test Image - 2141.screenshot")
expected_output_file = sample_filename("Test Image - 2141.thumbnail")
with open(input_file, "rb") as inp, open(expected_output_file, "rb") as expected:
assert create_thumbnail(inp.read()) == expected.read()
|
986,153 | 16ffa7e540b2a45213341d912cfecb8d1a3b2bae | import time
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common import exceptions
from selenium.webdriver.chrome.options import *
import unidecode
url="https://twitter.com/search?q=content%20writer%20needed&src=typed_query&f=live"
driver=webdriver.Chrome(executable_path='D:\drivers\chromedriver.exe')
driver.get(url)
t_out=5
try:
element_pres = EC.presence_of_element_located((By.XPATH, '//div[@class="css-901oao r-1awozwy r-13gxpu9 r-6koalj r-18u37iz r-16y2uox r-1qd0xha r-a023e6 r-vw2c0b r-1777fci r-eljoum r-dnmrzs r-bcqeeo r-q4m81j r-qvutc0"]'))
WebDriverWait(driver, t_out).until(element_pres)
except TimeoutException:
print("Page not loaded")
with open('../twitter.csv', 'a') as f:
f.write("Name,Link,Post\n")
new_height=0
time.sleep(2)
while True:
tweets = driver.find_elements_by_xpath('//div[@class="css-1dbjc4n r-1iusvr4 r-16y2uox r-1777fci r-1mi0q7o"]')
print(len(tweets))
for tweet in tweets:
handle_link=tweet.find_element_by_xpath('.//a[@class="css-4rbku5 css-18t94o4 css-1dbjc4n r-1loqt21 r-1wbh5a2 r-dnmrzs r-1ny4l3l"]')
handle_name=tweet.find_element_by_xpath('.//a[@class="css-4rbku5 css-18t94o4 css-1dbjc4n r-1loqt21 r-1wbh5a2 r-dnmrzs r-1ny4l3l"]//div[@class="css-1dbjc4n r-1awozwy r-18u37iz r-dnmrzs"]')
post=tweet.find_element_by_xpath('.//div[@class="css-901oao r-hkyrab r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0"]')
print(handle_name.text)
print(handle_link.get_attribute("href"))
print(post.text)
post_refine=post.text.replace(","," ")
print(post_refine)
if unidecode.unidecode(post.text)==post.text and unidecode.unidecode(handle_name.text)==handle_name.text:
with open('../twitter.csv', 'a') as f:
f.write(handle_name.text +","+handle_link.get_attribute("href")+","+post_refine+"\n")
last_height = driver.execute_script("return document.body.scrollHeight")
print("nh:",new_height)
print("lh:",last_height)
driver.execute_script("window.scrollTo("+str(new_height)+","+str(last_height)+");")
time.sleep(5)
new_height = last_height |
986,154 | fddcbcd9c19926856b326190da4570d4dc553af0 | """ Close to possible approach """
# _end_ = '_end_'
#
#
# class Solution(object):
# # @param A : string
# # @param B : list of strings
# # @return an integer
# def wordBreak(self, A, B):
# if len(A) == 0:
# return 0
# if len(B) == 0:
# return 0
# trie = self.get_trie(B)
# print trie['b']['_end_']
#
# print trie
#
# root = trie
# l = []
# x = []
# for i, letter in enumerate(A):
# if letter in root:
# root = root[letter]
# l.append(letter)
# if _end_ in root:
# if i+1 < len(A) and A[i+1] in root:
# pass
# else:
# x.extend(l)
# l = []
# root = trie
#
# print ''.join(x)
# print x
# print A
#
# if ''.join(x) == A:
# return 1
# else:
# return 0
#
# @staticmethod
# def get_trie(words):
# root = {}
#
# for word in words:
# current_dict = root
# for letter in word:
# current_dict = current_dict.setdefault(letter, {})
#
# current_dict[_end_] = _end_
#
# return root
""" Solution Approach """
class Solution(object):
# @param A : string
# @param B : list of strings
# @return an integer
def wordBreak(self, A, B):
self.B = B
self.dp = [[-1]*len(A)]*len(A)
self.trie = self.get_trie(B)
return self.words(0, A)
def words(self, index, string):
if index == len(string):
return True
result = False
i = index
# while i < len(string):
# if self.dp[index][i] != -1:
# result = self.dp[index][i]
# else:
# sub_str = string[index:i+1]
# if self.search_trie(sub_str):
# result |= self.words(i+1, string)
#
# self.dp[index][i] = result
# i += 1
while i < len(string):
sub_str = string[index:i+1]
if self.search_trie(sub_str):
result |= self.words(i+1, string)
i += 1
return result
def search_trie(self, word):
root = self.trie
for letter in word:
if letter in root:
root = root[letter]
else:
return False
else:
if '_end_' in root:
return True
else:
return False
@staticmethod
def get_trie(words):
root = {}
for word in words:
current_dict = root
for letter in word:
current_dict = current_dict.setdefault(letter, {})
current_dict['_end_'] = '_end_'
return root
s = 'myinterviewtrainer'
dict = ["trainer", "my", "interview"]
# s = 'baaaaabbabaaababaabbbba'
# dict = [ "aaa", "abbabbbabb", "bbaaababa", "aba", "bab", "bba", "baa", "aa", "baabaaaaa", "ababbaaaa", "aaaaaa", "b", "aaabb", "aaaaba", "babbbaaba", "b", "babbb", "bbaaaaa", "bbaaa", "baaaaaa", "aa", "aaabba", "baaabaa", "bbabbab", "abbb", "bbabbb", "aaabaaa", "a", "aaabbabbaa", "baaaaab", "baabbbab", "ba", "baab" ]
#
# s = "aaaabbbbababbaababbbabbabaaabbaaaabbababbaabababaabbbababaaababbbbbaaababababbbbbaaaabbabbabaabbababbaaaaabbaababababbbaaaabaaabaabaababbabaaabaaababababbaabbbbbaabbabbaaaaabbabbbabbbbaaaaabababbaababbabbbabbbababaabaababbbaaaaababababbaabaabaabbbbaaabbbbbbababbabbabaabbaababbbbbbabaababbbbababbabbbbbbabbbbbbaabbbbbbabaabbabaabbbaaaababaababbbabaabbbbabbbbbbbababbaabbbaaabaabaabaabbbab"
# dict = [ "bbabaaaaba", "abbaa", "bbabbaaba", "bbaabbab", "ab", "b", "abaaaababa", "aa", "babaa", "aaa", "baa", "ab", "baaabbbba", "aaaba", "a", "bbaababaab", "baaaaaaa", "aaab", "bbabbbbaaa", "ab", "aaa", "bbb", "a", "bab", "aaaaaa", "aa", "b", "ababaabbb", "bbb", "babbbbba", "bbabb", "ab", "a", "baabaabbb", "aaabab", "aba", "a", "babba", "aaaababbbb", "b", "baab", "baabbbb", "babbb", "ababaa", "babbaa", "abaaa", "babababab", "bab", "aa", "abbaa", "abb", "bbbaaaaba", "bbbabababb", "aaaa", "ba", "bbaabbbaab", "bababb", "bbbb", "baaabbaab", "bababbbaaa", "bbaab", "ab", "bbbaaa", "aaaa", "aab", "baabaabaa", "bb", "ba", "bbbb", "abbaababab", "baaaaaa", "baaabbbb", "baab" ]
# s = "babbbbaabbaabaabaabaaabaababaaaabbbbbabbaabbabbbbababaabbabbbabbbaaabaababaaaababbbbabbbbbbaaaabaaabaabbbaaaabaaabbbaabababbbaaaabbabbbabaaabbbabaaabbbaaaaaabaabbabbaabbbbbaababbbbabbabbaabbbabaababaaaabbaabbbaabaabbbbbbaabbbaaaabbaaaaaabaabbaababbbabbbbbbaabbaabbbabbbaabbbaaaabbbaaaabbbabbaababaaabbababbaabbabbabaabbbbaaaabbaababababbbbbabbbbabbaaabbaabaaaaabbaaaaaaaaaaababaaabbbaababbbbbbbabbababbaabbaaaababbbabbaaabbbbbabbbaabbaaaaabbbbbbabbbbbabbabbbabbabbababbabaabaabbabababbababaababbaababbabaabbaaaabbbaa"
# dict = [ "bbba", "aaaa", "abaa", "aba", "aabaaa", "baabbaab", "bbbabbbaaa", "abaabbbbba", "abaa", "aba", "bbabbbbabb", "aab", "baaabbbaaa", "b", "baba", "aaba", "baaba", "abb", "aaaa", "baaabbbaa", "ab" ]
s = "aababaaabaaababbbabbbaabababaaabbaabaabbabbaabbbbbbbabbbbabaaabaabaabbaaaaabbabaababbbabbbbbbaaaabbbaaaaaabaaaaaabbbbbbbabbbbbbbbaaabaaababbbaaaabaaaabaaaabbabbbabaabbabbabaaaabbabaaabbabbabbbabbabbaabbbabaabaabbbbbbbaabababbbbbbababbbaabaabbbabababbbbbaaaababbbabaaabaabbaababbbabbbbbaabbaaaaabbbbbaaaaaaaaaaaabbabbbabbaaabaaaaaabaabababaabaaaabaaabbbbbaaabbaabbababbabbbbaabaabaabaaaabbbaababbaabbbbbabaaababbabbbabbbbbabaababbbbbaabbbbabaabbabbababaaaabbbbabbbaaaabaabbbbaaaaababaaabaabbabaababbabbbababaaababbaabbbaaabaabbbaabbbbbbaaabaabbbbbabaaababaaabbbbbbaaaabababaaabbbbbbaabbaaabbbabaabbabababbabaabbaaabbaaabbaabbbbbababbaabbabbb"
dict = [ "baaaaaabba", "babbaababb", "abb", "bababaabab", "baaa", "ab", "ab", "bb", "abbaaaa", "bbababa", "bbbbbbab", "abbaaabba", "aaaabbab", "abaaab", "babab", "aabaaab", "aabaabbabb", "aa", "bb", "ab", "a", "a", "bbaaab", "aba", "ba", "bbabbaabab", "aaabbbbbb", "abbaaaabbb", "aabaabbaa", "bbba", "abbabbba", "abbbbabb", "bbaaba", "abbbbaab", "bba", "bbbbaabba", "ababbabaab", "baabba", "ababbaabb", "bbaab", "a", "bbba", "aaaa", "aaabbbabba", "bab", "baaaabaa", "ab", "aaabbaab", "bab", "aa", "ababababab", "aabbaaaba", "abbaaba", "bbaabaa" ]
print Solution().wordBreak(s, dict) |
986,155 | e240380dda3b30c4632258e4b4eb8a5dc4c14360 | from cumulusci.core.exceptions import CumulusCIException
class GithubIssuesError(CumulusCIException):
pass
class LastReleaseTagNotFoundError(CumulusCIException):
pass
|
986,156 | ff55c33641148fc01c096f4fa60e9b76ac6f4047 | import numpy as np
from scipy.signal import gaussian
def add_distortions(spectra: np.ndarray, level: float = 0.1, seed: int = 42) -> np.ndarray:
"""
Adds random distortions with max height of "level" to the set of spectra.
:param spectra: (N, M) array, M-1 spectra with N wavenumbers, wavenumbers in first column
:param level: Max height of added distortion, relative to normalized intensity
:param seed: Random seed
:return: the altered spectra, shape (N, M) array
"""
np.random.seed(seed)
spectra: np.ndarray = spectra.copy()
for i in range(spectra.shape[1]-1):
intensities: np.ndarray = spectra[:, i+1]
# for each, first normalize, then add the distortion, then scale back up to orig dimensions
minVal, maxVal = intensities.min(), intensities.max()
intensities -= minVal
intensities /= (maxVal - minVal)
# Bend Baseline
randInt = np.random.rand() * level
randFreq = 5e-5 + np.random.rand() * 1e-3
randOffset = np.random.rand() * 1000
distortion = np.sin(spectra[:, 0] * randFreq + randOffset)
for j in range(np.random.randint(1, 5)):
distortion += 0.5 * np.random.rand() * np.sin(spectra[:, 0] * randFreq * (j+3) + (j+1) * randOffset)
distortion -= distortion.min()
distortion /= distortion.max()
intensities = (1 - randInt) * intensities + randInt * distortion
intensities *= (maxVal - minVal)
intensities += minVal
spectra[:, i+1] = intensities
return spectra
def add_ghost_peaks(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:
np.random.seed(seed)
spectra: np.ndarray = spectra.copy()
minDistortWidth, maxDistortWidth = round(spectra.shape[0] * 0.6), round(spectra.shape[0] * 0.9)
minDistortStd, maxDistortStd = 20, 40
for i in range(spectra.shape[1]-1):
intensities: np.ndarray = spectra[:, i+1]
# for each, first normalize, then add the distortion, then scale back up to orig dimensions
minVal, maxVal = intensities.min(), intensities.max()
intensities -= minVal
intensities /= (maxVal - minVal)
# Add fake peaks
gaussSize: int = int(round(np.random.rand() * (maxDistortWidth - minDistortWidth) + minDistortWidth))
gaussStd: float = np.random.rand() * (maxDistortStd - minDistortStd) + minDistortStd
randGauss = gaussian(gaussSize, gaussStd) * np.random.rand() * maxLevel
start = int(round(np.random.rand() * (len(intensities) - gaussSize)))
intensities[start:start + gaussSize] += randGauss
intensities *= (maxVal - minVal)
intensities += minVal
spectra[:, i+1] = intensities
return spectra
def add_noise(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:
"""
Adds random noise to the spectra..
:param spectra: (N, M) array, M-1 spectra with N wavenumbers, wavenumbers in first column
:param maxLevel: max Level of noise
:param seed: random seed
:return: new Spectra (N, M) array
"""
np.random.seed(seed)
spectra = spectra.copy()
spectra[:, 1:] *= (1-maxLevel/2) + np.random.rand(spectra.shape[0], spectra.shape[1]-1) * maxLevel
return spectra
|
986,157 | c0832b4f081c43cc37b6cf3332f666eb4ad8f9db | # -*- coding: utf-8 -*-
from sage.all import shuffle, randint, ceil, next_prime, log, cputime, mean, variance, set_random_seed, sqrt
from copy import copy
from sage.all import GF, ZZ
from sage.all import random_matrix, random_vector, vector, matrix, identity_matrix
from sage.stats.distributions.discrete_gaussian_integer import DiscreteGaussianDistributionIntegerSampler \
as DiscreteGaussian
from estimator.estimator import preprocess_params, stddevf
def gen_fhe_instance(n, q, alpha=None, h=None, m=None, seed=None):
"""
Generate FHE-style LWE instance
:param n: dimension
:param q: modulus
:param alpha: noise rate (default: 8/q)
:param h: hamming weight of the secret (default: 2/3n)
:param m: number of samples (default: n)
"""
if seed is not None:
set_random_seed(seed)
q = next_prime(ceil(q)-1, proof=False)
if alpha is None:
alpha = ZZ(8)/q
n, alpha, q = preprocess_params(n, alpha, q)
stddev = stddevf(alpha*q)
if m is None:
m = n
K = GF(q, proof=False)
A = random_matrix(K, m, n)
if h is None:
s = random_vector(ZZ, n, x=-1, y=1)
else:
S = [-1, 1]
s = [S[randint(0, 1)] for i in range(h)]
s += [0 for _ in range(n-h)]
shuffle(s)
s = vector(ZZ, s)
c = A*s
D = DiscreteGaussian(stddev)
for i in range(m):
c[i] += D()
return A, c
def dual_instance0(A):
"""
Generate dual attack basis.
:param A: LWE matrix A
"""
q = A.base_ring().order()
B0 = A.left_kernel().basis_matrix().change_ring(ZZ)
m = B0.ncols()
n = B0.nrows()
r = m-n
B1 = matrix(ZZ, r, n).augment(q*identity_matrix(ZZ, r))
B = B0.stack(B1)
return B
def dual_instance1(A, scale=1):
"""
Generate dual attack basis for LWE normal form.
:param A: LWE matrix A
"""
q = A.base_ring().order()
n = A.ncols()
B = A.matrix_from_rows(range(0, n)).inverse().change_ring(ZZ)
L = identity_matrix(ZZ, n).augment(B)
L = L.stack(matrix(ZZ, n, n).augment(q*identity_matrix(ZZ, n)))
for i in range(0, 2*n):
for j in range(n, 2*n):
L[i, j] = scale*L[i, j]
return L
def balanced_lift(e):
"""
Lift e mod q to integer such that result is between -q/2 and q/2
:param e: a value or vector mod q
"""
from sage.rings.finite_rings.integer_mod import is_IntegerMod
q = e.base_ring().order()
if is_IntegerMod(e):
e = ZZ(e)
if e > q//2:
e -= q
return e
else:
return vector(balanced_lift(ee) for ee in e)
def apply_short1(y, A, c, scale=1):
"""
Compute `y*A`, `y*c` where y is a vector in the integer row span of
``dual_instance(A)``
:param y: (short) vector in scaled dual lattice
:param A: LWE matrix
:param c: LWE vector
"""
m = A.nrows()
y = vector(ZZ, 1/ZZ(scale) * y[-m:])
a = balanced_lift(y*A)
e = balanced_lift(y*c)
return a, e
def log_mean(X):
return log(mean([abs(x) for x in X]), 2)
def log_var(X):
return log(variance(X).sqrt(), 2)
def silke(A, c, beta, h, m=None, scale=1, float_type="double"):
"""
:param A: LWE matrix
:param c: LWE vector
:param beta: BKW block size
:param m: number of samples to consider
:param scale: scale rhs of lattice by this factor
"""
from fpylll import BKZ, IntegerMatrix, LLL, GSO
from fpylll.algorithms.bkz2 import BKZReduction as BKZ2
if m is None:
m = A.nrows()
L = dual_instance1(A, scale=scale)
L = IntegerMatrix.from_matrix(L)
L = LLL.reduction(L, flags=LLL.VERBOSE)
M = GSO.Mat(L, float_type=float_type)
bkz = BKZ2(M)
t = 0.0
param = BKZ.Param(block_size=beta,
strategies=BKZ.DEFAULT_STRATEGY,
auto_abort=True,
max_loops=16,
flags=BKZ.VERBOSE|BKZ.AUTO_ABORT|BKZ.MAX_LOOPS)
bkz(param)
t += bkz.stats.total_time
H = copy(L)
import pickle
pickle.dump(L, open("L-%d-%d.sobj"%(L.nrows, beta), "wb"))
E = []
Y = set()
V = set()
y_i = vector(ZZ, tuple(L[0]))
Y.add(tuple(y_i))
E.append(apply_short1(y_i, A, c, scale=scale)[1])
v = L[0].norm()
v_ = v/sqrt(L.ncols)
v_r = 3.2*sqrt(L.ncols - A.ncols())*v_/scale
v_l = sqrt(h)*v_
fmt = u"{\"t\": %5.1fs, \"log(sigma)\": %5.1f, \"log(|y|)\": %5.1f, \"log(E[sigma]):\" %5.1f}"
print
print fmt%(t,
log(abs(E[-1]), 2),
log(L[0].norm(), 2),
log(sqrt(v_r**2 + v_l**2), 2))
print
for i in range(m):
t = cputime()
M = GSO.Mat(L, float_type=float_type)
bkz = BKZ2(M)
t = cputime()
bkz.randomize_block(0, L.nrows, stats=None, density=3)
LLL.reduction(L)
y_i = vector(ZZ, tuple(L[0]))
l_n = L[0].norm()
if L[0].norm() > H[0].norm():
L = copy(H)
t = cputime(t)
Y.add(tuple(y_i))
V.add(y_i.norm())
E.append(apply_short1(y_i, A, c, scale=scale)[1])
if len(V) >= 2:
fmt = u"{\"i\": %4d, \"t\": %5.1fs, \"log(|e_i|)\": %5.1f, \"log(|y_i|)\": %5.1f,"
fmt += u"\"log(sigma)\": (%5.1f,%5.1f), \"log(|y|)\": (%5.1f,%5.1f), |Y|: %5d}"
print fmt%(i+2, t, log(abs(E[-1]), 2), log(l_n, 2), log_mean(E), log_var(E), log_mean(V), log_var(V), len(Y))
return E
|
986,158 | 91cf833cf2d5ef32ee0b43f8f2a542c41849defe | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import dados_comuns as dados
import statistics
ROOT_PATH = '/Users/regisalbuquerque/Documents/drive/regis/mestrado/resultados/comp_v12_LB_DDM_DDD__allbases/'
TAM = 30
bases = dados.bases_sinteticas
#Adiciona os métodos (DESDD + baselines)
metodos = ['V12_HOM_OnlineBagging_DDM_RetreinaTodosComBufferWarning']
for baseline in dados.baselines:
metodos.append(baseline)
def busca_dados(metodos, bases):
resumo = {'metodo':[], 'base': [], 'taxas': [], 'taxas_part': [], 'media': [], 'media_part': []}
for base in bases:
for metodo in metodos:
dataset_aux = pd.read_csv(ROOT_PATH + metodo + '_' + base + '_pareto__exec_1_drift.csv')
resumo['metodo'].append(metodo)
resumo['base'].append(base)
taxas = dataset_aux['taxa'].values
resumo['taxas'].append(dataset_aux['taxa'].values)
resumo['media'].append(dataset_aux['taxa'].mean())
taxas_part = []
#Calcula a particao
particao = int(len(dataset_aux['taxa'].values)/TAM)
for it in range(1,TAM+1):
index = particao*it
taxas_part.append(taxas[index-1])
resumo['taxas_part'].append(taxas_part)
resumo['media_part'].append(statistics.mean(taxas_part))
return pd.DataFrame(data=resumo)
dt_resumo = busca_dados(metodos, bases)
print(dt_resumo)
|
986,159 | 038963cf323efeb842975098e63d94932e6fe15e | from django.forms import ModelForm, TextInput, CharField
from .models import City
class CityForm(ModelForm):
#name = CharField(max_length=25)
class Meta:
model = City
fields = ['name']
widgets = {'name': TextInput(attrs={'class':'input', 'placeholder':'City Name'})} |
986,160 | 27194ec08fb723048b28067d272337320a76f801 | def resolve():
a,b=input().split()
if a==b:
print('H')
else:
print('D')
resolve() |
986,161 | ad94dbd61f67c8874a88cf3055cffc1574c85294 | from player import HumanPlayer, AutoPilot, StillLearning
from prompts import full_turn
from random import shuffle, choice
import jsonlog
p1 = HumanPlayer("Player 1")
p2 = AutoPilot("Player 2")
p1.opponent = p2
p2.opponent = p1
players = (p1, p2)
jsonlog.initiate_game(players, 1)
for player in players:
shuffle(player.board.deck)
for _ in range(7):
player.board.draw()
coin_toss = choice([True, False])
first = p1 if coin_toss else p2
second = p2 if coin_toss else p1
def all_turns():
turn_number = 1
while True:
yield first, turn_number
yield second, turn_number
turn_number += 1
keep_playing = True
for player, turn_number in all_turns():
jsonlog.initiate_turn(player, turn_number)
keep_playing = full_turn(player)
if not keep_playing:
break
|
986,162 | 369c2ea37f9cad2c22ee70b57600d08e5c6139df | import pandas as pd
import os
def obtain_txt_train_images_file(csv_file_path, txt_path, files_path):
if not os.path.isfile(csv_file_path):
print('.csv file does not exists.')
return
print('Creating annotations in txt format.. \n')
print('Reading csv file...')
train = pd.read_csv(csv_file_path)
print('Reading completed.')
train.head()
data = pd.DataFrame()
data['format'] = train['images_name']
# as the images are in train_images folder, add train_images before the image name
for i in range(data.shape[0]):
print('Loaded row n°: ', i)
dir = str(data['format'][i]).split('_')[0] + '_annotated_images/'
data['format'][i] = files_path + dir + data['format'][i]
# add xmin, ymin, xmax, ymax and class as per the format required
for i in range(data.shape[0]):
print('Saved row n°: ', i)
data['format'][i] = data['format'][i] + ',' + str(train['x_min'][i]) + ',' + str(train['y_min'][i]) + ',' + str(train['x_max'][i]) + ',' + str(train['y_max'][i]) + ',' + train['paper_category'][i]
data.to_csv(txt_path, header=None, index=None, sep=' ')
|
986,163 | 8e55700f8d59271d63f2e7fa1eadc1872d2c13f7 | from time import sleep
from time import time
import numpy as np
import time, random, threading, sys
import multiprocessing
from Worker import *
import Constants
import os
tf.reset_default_graph()
if len(sys.argv) > 1:
Constants.MODE = int(sys.argv[1])
Constants.EMBED_METHOD = int(sys.argv[2])
Constants.LAYERS = int(sys.argv[3])
Constants.LEARNING_RATE = float(sys.argv[4])
Constants.MIN_LEARNING_RT = float(sys.argv[5])
Constants.NUM_FEATURES = int(sys.argv[6])
Constants.INPUT = int(sys.argv[7])
Constants.TARGET = int(sys.argv[8])
Constants.RANGE = int(sys.argv[9])
Constants.BATCH_SIZE = int(sys.argv[10])
Constants.EMBED_SIZE = int(sys.argv[11])
Constants.LAMBDA_REGUL = float(sys.argv[12])
Constants.TH_CENT = float(sys.argv[13])
Constants.TEST_NUM = int(sys.argv[14])
IN_RANK_NAME = 'RANK'
if Constants.INPUT == Constants.VALUE:
IN_RANK_NAME = 'VALUE'
OUT_RANK_NAME = 'RANK'
if Constants.TARGET == Constants.VALUE:
OUT_RANK_NAME = 'VALUE'
METHOD_NAME = 'GCN'
if Constants.EMBED_METHOD == Constants.S2VEC:
METHOD_NAME = 'S2VEC'
RANGE_NAME = '01'
if Constants.RANGE == Constants.RANGE_11:
RANGE_NAME = '11'
if Constants.MODE == Constants.TEST or Constants.MODE == Constants.REAL_NET:
Constants.LOAD = True
if Constants.MODE == Constants.REAL_NET:
Constants.NUM_WORKER = 1
Constants.SUMMARY_NAME = METHOD_NAME + NORM_NAME + \
"_LAYER=" + str(Constants.LAYERS) + \
"_LR=" + "{:.0e}".format(Constants.LEARNING_RATE) + "-" + \
"{:.0e}".format(Constants.MIN_LEARNING_RT) + \
"_F=" + str(Constants.NUM_FEATURES) + \
"_IN=" + IN_RANK_NAME + \
"_OUT=" + OUT_RANK_NAME + \
"_RANGE=" + RANGE_NAME + \
"_BATCH=" + str(Constants.BATCH_SIZE) + \
"_EMBED=" + str(Constants.EMBED_SIZE) + \
"_LAMBDA=" + str(Constants.LAMBDA_REGUL) + \
"_TH=" + "{:.2f}".format(Constants.TH_CENT) + \
"_" + str(Constants.TEST_NUM)
Constants.MODEL_PATH = "./Model/" + Constants.SUMMARY_NAME + '/'
print('\n\n', Constants.SUMMARY_NAME, '\n\n')
epochs = tf.Variable(0,dtype=tf.int32,name='epochs',trainable=False)
epochs_test = tf.Variable(0,dtype=tf.int32,name='epochs_test',trainable=False)
total_graphs = tf.Variable(0,dtype=tf.int32,name='total_graphs',trainable=False)
train_nodes = tf.Variable(0,dtype=tf.int32,name='train_nodes',trainable=False)
test_nodes = tf.Variable(0,dtype=tf.int32,name='test_nodes',trainable=False)
learning_rate = tf.train.polynomial_decay( Constants.LEARNING_RATE,
train_nodes,
Constants.MAX_STEPS//2,
Constants.LEARNING_RATE*0.01)
"""
Initializes tensorflow variables
"""
os.environ["CUDA_VISIBLE_DEVICES"]='0'
#config = tf.ConfigProto()
config = tf.ConfigProto(device_count={"CPU":4})
config.intra_op_parallelism_threads=4
config.inter_op_parallelism_threads=4
config.allow_soft_placement=True
config.log_device_placement=False
config.gpu_options.allow_growth = True
with tf.Session(config=config) as session:
with tf.device("/cpu:0"):
summary_writer = tf.summary.FileWriter("./Summary/"+Constants.SUMMARY_NAME)
summary = Summary(summary_writer, Constants.MODE)
master_worker = Worker('global', session, learning_rate, epochs, epochs_test, total_graphs, train_nodes, test_nodes, summary)
workers = []
for i in range(Constants.NUM_WORKER):
print (i)
workers.append(Worker(i, session, learning_rate, epochs, epochs_test, total_graphs, train_nodes, test_nodes, summary))
saver = tf.train.Saver(max_to_keep=1)
if Constants.LOAD:
print ("Loading....")
c = tf.train.get_checkpoint_state(Constants.MODEL_PATH)
saver.restore(session,c.model_checkpoint_path)
print ("Graph loaded!")
else:
session.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
"""
Initializes the worker threads
"""
worker_threads = []
for i in range(Constants.NUM_WORKER):
t = threading.Thread(target=workers[i].work, args=(coord,saver))
t.start()
sleep(0.5)
worker_threads.append(t)
coord.join(worker_threads)
|
986,164 | 6ef72d5b365d931bd71ce6e666f424310bbca9b2 |
# Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import datetime as dt
# Defining scrape all function to connect to mongo and establish communication between our code and db
# Set up Splinter Executable path
def scrape_all():
#initiate headless driver for deployment
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless = True) # setting true so we dont see scraping in action. happens behind the scene
# setting our two variables to the two function returned by mars_news
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemispheres": hemispheres_data(browser)
}
# When we create the HTML template, we'll create paths to the dictionary's values,
# which lets us present our data on our template.
#stops webdriver and return data
browser.quit()
return data
# Function that gets the Mars news
def mars_news(browser):
# Visit the mars nasa news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Search all elements with the tag div with attribute list_text, then wait 1 second before searching components
browser.is_element_present_by_css('div.list_text', wait_time=1)
# set up html parser
html = browser.html
news_soup = soup(html, 'html.parser')
# Add error handling if webpage's format changes and no longer matches HTML elements
try:
slide_elem = news_soup.select_one('div.list_text')
# CSS works from right to left, such as returning the last item on the list instead of the first.
# When using select_one, the first matching element returned will be a <li /> element with a class of slide
# and all nested elements within it
# Use the parent element to find the first `a` tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
# if there is an error, python will continue to run rest of code, however if AttributeError, return nothing
except AttributeError:
return None,None
return news_title, news_p
# Function that gets Mars image
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# after clicking full image we can now parse the full-sized image
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
# print(img_soup.prettify())
try:
# Find the relative image url
# .get('src') pulls the link to the image
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
# img_url_rel
except AttributeError:
return None
# the above pulls the link to the image by pointing BeautifulSoup to where the image will be,
# instead of grabbing the URL directly. So when it updates we get an updated image.
# if we copy and paste this link into a browser, it won't work. This is because it's only a partial link,
# as the base URL isn't included
# Create base url:
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
# Create Mars Facts function:
def mars_facts():
# instead of scraping an entire table, we can just import it into pandas
try:
# Use 'read_html" to scrape the facts table into a dataframe
df = pd.read_html('https://galaxyfacts-mars.com')[0]
# This is a general exception
except BaseException:
return None
# Assigns columns and set index of df
df.columns=['description', 'Mars', 'Earth']
df.set_index('description', inplace=True)
# df
# The Pandas function read_html() specifically searches for and returns a list of tables found in the HTML.
# By specifying an index of 0, we're telling Pandas to pull only the first table it encounters, or the first item in the list
# you can convert a table back into it's html format, add bootstrap
return df.to_html(classes="table table-striped")
# quit once you're done to free computer memory
# browser.quit()
def hemispheres_data(browser):
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
# finds all of the images and titles
html = browser.html
mars_hemi = soup(html, 'html.parser')
img_title = mars_hemi.find('section', class_='block')
hemispheres= img_title.find_all('div', class_='item')
# 3. Write code to retrieve the image urls and titles for each hemisphere.
for item in hemispheres:
title = item.find('h3').text
partial_href = item.find('a')['href']
img_link = browser.links.find_by_partial_href(partial_href)[1]
img_link.click()
full_image_elem = browser.links.find_by_text('Sample')
full_img_url = full_image_elem['href']
mars_img_title = {
"img_url": full_img_url,
"title": title,
}
hemisphere_image_urls.append(mars_img_title)
browser.back()
return hemisphere_image_urls
# Our Main class that will run the code:
if __name__ == "__main__":
# if running as script, print scraped data
print(scrape_all()) |
986,165 | 4c3cef5cc030cd9d5f7c0ac23e10d7a8f5f15c27 | import os
from tool.IO_Handle import IO
from tool.tool_base.base import base
class setting(base):
BASE_DIR = os.path.dirname(os.path.abspath("__file__"))
input_path = os.path.join(BASE_DIR,"input")
output_path = os.path.join(BASE_DIR, "output")
input_type = 'md'
output_type = "txt"
if __name__ == "__main__":
# for file_path in base.input_list:
# datas = IO.read_file(file_path)
# dh = dir_handle()
# files = dh.get_html_iter()
# [run_md(file).run() for file in files]
st = setting()
print(st.input_path) |
986,166 | 410ffbd7099b66b7fcf5221f23923ed01858435e | # 컴퓨터과학의 농담중에 문제 하나를 정규표현식으로 풀게되면 문제가 2개가 된다. 라는 말이있다.
gusik = """
정규표현식(regular expression) :
정규표현식은 일정한 규칙을 가진 문자열을 표현하는 방법으로, 그러한 문자열을 식별할 때 사용함.
문자열 규칙의 집합이 복잡해지면, 외계어?가 되며, 문자열을 다룰때 매우 유용하지만 읽고 해석하기에는 매우 난해하긴함.
하지만 정규표현식도 여러개로 나열한 규칙들의 모임이라, 하나하나 쪼개보면 어렵지 않음.
"""
# 정규 표현식 > 문자열 판단
"""
^ : 이 기호 뒤에 오는 문자, 문자열, 하위표현식이 문자열의 맨 앞에 오는 패턴.
(특정 ~으로 시작하는 문자열 인가?)
$ : 이 기호 앞에 오는 문자, 문자열, 하위표현식이 문자열의 맨 뒤에 오는 패턴.
(특정 ~으로 끝나는 문자열 인가?)
(정규표현식 마지막에 주로 사용, 이 기호를 쓰지 않은 것은 .* 과 동일)
| : 이 기호 양 옆에 오는 문자, 문자열, 하위표현식 중에서 양 옆에서 하나라도 포함되는 패턴.
(or 연산자, ~ 또는 ~ 가 들어가는 문자열 인가?)
(| 여러개를 같이 사용할 수 있다. ex> a|b|c|d 이럴때는 네 개중 한 개라도 문자열에 포함되는지 판단)
"""
# 정규표현식 > 범위, 특수문자
"""
0-9 : 모든 숫자(0~9)
a-z : 모든 영문 소문자(a~z)
A-Z : 모든 영문 대문자(A~Z)
\d : 모든 숫자 (== [0~9])
\D : 숫자를 제외한 모든 문자 ( == [^0~9])
\w : 영문 대소문자, 숫자, 밑줄문자 ( == [a-zA-Z0-9_])
\W : 영문 대소문자, 숫자, 밑줄문자를 제외한 모든 문자 ( == [^a-zA-Z0-9_])
\s : 모든 화이트 스페이스 ( == [ \t\n\r\f\v])(차례대로, 공백, 탭키, 개행, 캐리지리턴, 폼피드, 수직탭)
\S : 공백( )을 제외한 모든 화이트 스페이스 ( == [^ \t\n\r\f\v])
"""
# 정규 표현식 > 범위 판단
"""
[] : 이 기호(대괄호) 안에 문자, 범위가 들어가며, 대괄호 안에 있는 문자중 하나라도 포함되는 패턴.
* : 이 기호 앞에 오는 문자, 문자열, 하위표현식, 대괄호로 묶은 문자들이 0개 이상 있는 패턴.
(0개 이상이여서 대상이 문자열에 없어도 패턴을 만족)
+ : 이 기호 앞에 오는 문자, 문자열, 하위표현식, 대괄호로 묶은 문자들이 1개 이상 있는 패턴.
(* 과 비슷하지만, +는 1개 이상이라 대상이 있어야만 패턴을 만족)
? : 이 기호 앞에 오는 문자, 문자열, 하위표현식, 대괄호로 묶은 문자들이 0개 또는 1개 있는 패턴.
(이 기호 앞에오는 대상은 1개 아니면 없어야 패턴을 만족)
. : 아무 문자가(글자, 숫자, 기호, 공백) 1개만 있는 패턴.
(이 기호는 정말 아무 문자 1개를 의미 하기에, 공백 1칸도 패턴을 만족)
(\n, 개행문자를 제외한 모든 문자를 의미)
?! : 이 기호 뒤에 오는 문자, 문자열, 하위표현식이 해당 위치에 포함되지 않는(0개만 있는) 패턴.
(이 기호를 쓴 해당 위치에서만 해당, 배제했던 대상이 다른 위치에서 포함해도 패턴을 만족)
(문자열 전체에서 배제할려면 ^과 $를 앞뒤에 붙임)
{개수} : 이 기호 앞에 오는 문자, 문자열, 하위표현식, 대괄호로 묶은 문자들이 {}안의 개수 만큼 있는 패턴.
( 대상{n} : 대상이 n개 있는가?)
{ 시작개수, 끝개수 } : 대상이 시작개수 이상 끝 개수 이하의 개수만큼(시작개수~끝개수) 있는 패턴.
[^ ] : 이 기호(대괄호) 안에 문자, 범위가 들어가며, 대괄호 안에 있는 문자들을 포함하지 않는 패턴.
(일반 [] 대괄호에 not(논리부정)연산자를 결합)
(^[] 와 [^ ]은 서로 다른 패턴임, 유의)
"""
# 정규표현식 > 그룹
"""
() : 이 기호 안에 오는 것들은 그룹(=하위표현식) 으로 묶는 패턴.
(정규 표현식에서 하위 표현식이 가장 먼저 우선순위를 가짐, 괄호 연산같이)
""" |
986,167 | 93e174187f2b5da69499b86eb4df86a147ac8104 | # -*-coding:utf-8-*-
from .solver import model
def irt(src, theta_bnds=[-4, 4],
alpha_bnds=[0.25, 2], beta_bnds=[-2, 2], in_guess_param='default',
model_spec='2PL',
mode='memory', is_mount=False, user_name=None):
if model_spec == '2PL':
mod = model.IRT_MMLE_2PL()
else:
raise Exception('Unknown model specification.')
# load
mod.load_data(src, is_mount, user_name)
mod.load_param(theta_bnds, alpha_bnds, beta_bnds)
mod.load_guess_param(in_guess_param)
# solve
mod.solve_EM()
# post
item_param_dict = mod.get_item_param()
user_param_dict = mod.get_user_param()
return item_param_dict, user_param_dict
|
986,168 | 9c385ab58c308a59a3d5c8707dafecb1b3e3dbc4 | import pygame
import locations
from scenes.effect_scene import EffectScene
class PopeScene(EffectScene):
def __init__(self, screen, font, text):
super().__init__(screen, font, text)
self.pope_sprite = pygame.transform.scale(pygame.image.load('images/pope.png'), (192, 192))
def show_enemy_stats(self):
return False
def show_enemy_sprite(self):
return False
def render(self):
super(PopeScene, self).render()
self.screen.blit(self.pope_sprite, locations.ENEMY_SPRITE_LOCATION)
|
986,169 | 0fed66b75610646f70389872ca09f697e94a2e86 | import user_info
import policy_checker
import password_checker
import report_gen
import database_store
import passgenerator
import user_info_checker
import sys
import getpass
class UserClass:
def __init__(self, data):
self.first_name = data[0]
self.last_name = data[1]
self.birthday = data[2]
if len(data) > 3:
self.password = data[3]
def set_password(self):
self.password = getpass.getpass(prompt="Please enter your next password: ")
def menu_options():
print("""Please enter one of the following options:
1) Password Strength Checker
2) Password Generator
3) Exit""")
menu_option = input(">>> ")
while menu_option != '1' and menu_option != '2' and menu_option != '3':
print("Please only enter one of the specified values.")
print("""Please enter one of the following options:
1) Password Strength Checker
2) Password Generator
3) Exit""")
menu_option = input(">>> ")
return menu_option
def menu():
counter = 0
finished = False
menu_option = menu_options()
while finished is False:
if menu_option == '1':
if counter == 0:
user = UserClass(user_info.get_userinfo_password())
else:
user.set_password()
issues = policy_checker.policy_check(user.password)
common = password_checker.password_check(user.password)
personal = user_info_checker.user_info_check(user.first_name, user.last_name, user.birthday, user.password)
file_name = report_gen.report_generator(issues, personal, common, user.password)
if file_name is not True and file_name is not False:
database_store.data_store(user.first_name, user.last_name, user.birthday, user.password, file_name)
else:
database_store.data_store(user.first_name, user.last_name, user.birthday, user.password)
elif menu_option == '2':
if counter == 0:
user = UserClass(user_info.get_userinfo())
pass_list = passgenerator.random_password(user.first_name, user.last_name, user.birthday)
for item in pass_list:
database_store.data_store(user.first_name, user.last_name, user.birthday, item)
elif menu_option == '3':
sys.exit()
again = input("\nWould you like to run the program again? Y or N ").upper()
while again != "Y" and again != "N":
print("Please only enter Y or N")
again = input("\nWould you like to run the program again? Y or N ").upper()
if again == "N":
finished = True
elif again == "Y":
menu_option = menu_options()
counter += 1
if __name__ == '__main__':
menu()
|
986,170 | f829e731fb85bed58be5bee9b53c5771f670e95a | from pathlib import Path
cross_domain_settings = ['laptops_to_restaurants', 'restaurants_to_laptops']
in_domain_settings = ['laptops14', 'restaurants14', 'restaurants15']
all_settings = cross_domain_settings + in_domain_settings
num_splits = 3
base = str(Path.home()) + '/private-nlp-architect/nlp_architect/models/absa_neural/data/conll/' |
986,171 | 5c90a350261802e7fd3d5cfce0bba2685dbaed93 | #!/usr/bin/env python
########################################################
#
# Python version of the Time-independent Free Energy
# reconstruction script (a.k.a. reweight) based on the
# algorithm proposed by Tiwary and Parrinello
# JPCB 2014 doi:10.1021/jp504920s.
#
# The script is meant to be used as an analysis tool for
# a Molecular Dynamics simulation where the Metadynamics
# enhanced sampling technique is used to calculate a
# system's Free Energy.
#
# L.S.
# l.sutto@ucl.ac.uk v1.0 - 23/04/2015
########################################################
import os.path
import argparse
import numpy as np
from math import log, exp, ceil
d = """
========================================================================
Time-independent Free Energy reconstruction script (a.k.a. reweight)
based on the algorithm proposed by Tiwary and Parrinello JPCB 2014
Typical usages:
1) to project your metadynamics FES on CVs you did not
bias during your metadynamics run
2) to estimate the error on your FE profiles by comparing them with
the FE profiles obtained integrating the metadynamics bias
e.g. using plumed sum_hills
Example:
reweight.py -bsf 5.0 -kt 2.5 -fpref fes2d- -nf 80 -fcol 3
-colvar COLVAR -biascol 4 -rewcol 2 3
takes as input 80 FES files: fes2d-0.dat, fes2d-1.dat, ..., fes2d-79.dat
obtained using a well-tempered metadynamics with bias factor 5
and containing the free energy in the 3rd column and the COLVAR file
containing the bias in the 4th column and outputs the FES projected
on the CVs in column 2 and 3 of COLVAR file.
Check http://www.ucl.ac.uk/chemistry/research/group_pages/prot_dynamics
for the most updated version of the script
L.S.
l.sutto@ucl.ac.uk v1.0 - 23/04/2015
=========================================================================
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=d, epilog=" ")
parser.add_argument("-bsf", type=float, help="biasfactor used in the well-tempered metadynamics, if omitted assumes a non-well-tempered metadynamics")
parser.add_argument("-kt", type=float, default="2.49", help="kT in the energy units of the FES files (default: %(default)s)")
parser.add_argument("-fpref", default="fes", help="FES filenames prefix as generated with plumed sum_hills --stride. Expects FPREF%%d.dat (default: %(default)s)")
parser.add_argument("-nf", type=int, default=100, help="number of FES input files (default: %(default)s)")
parser.add_argument("-fcol", type=int, default=2, help="free energy column in the FES input files (first column = 1) (default: %(default)s)")
parser.add_argument("-colvar", default="COLVAR", help="filename containing original CVs, reweighting CVs and metadynamics bias")
parser.add_argument("-rewcol", type=int, nargs='+', default=[ 2 ], help="column(s) in colvar file containing the CV to be reweighted (first column = 1) (default: %(default)s)")
parser.add_argument("-biascol", type=int, nargs='+', default=[ 4 ], help="column(s) in colvar file containing any energy bias (metadynamic bias, walls, external potentials..) (first column = 1) (default: %(default)s)")
parser.add_argument("-min", type=float, nargs='+', help="minimum values of the CV in colvar file, if omitted find it")
parser.add_argument("-max", type=float, nargs='+', help="maximum values of the CV in colvar file, if omitted find it")
parser.add_argument("-bin", type=int, default=50, help="number of bins for the reweighted FES (default: %(default)s)")
parser.add_argument("-outfile", default="fes_rew.dat", help="output FES filename (default: %(default)s)")
parser.add_argument("-v", "--verbose", action='store_true', help="be verbose")
parser.print_help()
########################################################
# PARSING INPUTS
########################################################
args = parser.parse_args()
# Well-Tempered Metadynamics or not
tempered = (args.bsf > 0)
# biasfactor for Well-Tempered
gamma = args.bsf
# kT in energy units (kJ or kcal)
kT = args.kt
# input FES file prefix
fesfilename = args.fpref
# number of FES file generated with sum_hills stride option
# the more the better
numdat = args.nf
# column in FES file corresponding to the Free Energy
# NB: the first column is 0
col_fe = args.fcol - 1
# name of the file containing the CVs on which to project the FES and the bias
datafile = args.colvar
# list with the columns of the CVs on which to project the FES
# NB: the first column is 0
col_rewt = [ i-1 for i in args.rewcol ]
numrewt = len(col_rewt)
# list with column numbers of your datafile containing the bias
# and any external bias/restraint/walls
# NB: the first column is 0
col_bias = [ i-1 for i in args.biascol ]
# NB: if I don't define -min or -max in the input, I will find their value scanning the COLVAR file
s_min = args.min
s_max = args.max
# grid size for the reweighted FES
ngrid = args.bin
# output FES filename
out_fes_xy = args.outfile
# print some output while running
verbose = args.verbose
########################################################
########################################################
# CHECK IF NECESSARY FILES EXIST BEFORE STARTING
########################################################
if not os.path.isfile(datafile):
print "ERROR: file %s not found, check your inputs" % datafile
exit(1)
for i in range(numdat):
fname = '%s%d.dat' % (fesfilename,i)
if not os.path.isfile(fname):
print "ERROR: file %s not found, check your inputs" % fname
exit(1)
########################################################
########################################################
# FIRST PART: calculate c(t)
# This part is independent on the number of CVs being biased
# c(t) represents an estimate of the reversible
# work performed on the system until time t
########################################################
if verbose: print "Reading FES files.."
# calculates ebetac = exp(beta c(t)), using eq. 12 in eq. 3 in the JPCB paper
ebetac = []
for i in range(numdat):
if verbose and numdat > 10 and i%(numdat/10)==0: print "%d of %d (%.0f%%) done" % (i,numdat,(i*100./numdat))
########################################
# set appropriate format for FES file names, NB: i starts from 0
fname = '%s%d.dat' % (fesfilename,i)
# fname = '%s.%d' % (fesfilename,i+1)
########################################
data = np.loadtxt(fname)
s1, s2 = 0., 0.
if tempered:
for p in data:
exponent = -p[col_fe]/kT
s1 += exp(exponent)
s2 += exp(exponent/gamma)
else:
for p in data:
s1 += exp(-p[col_fe]/kT)
s2 = len(data)
ebetac += s1 / s2,
# this would be c(t):
# coft = [ kT*log(x) for x in ebetac ]
########################################################
# SECOND PART: Boltzmann-like sampling for reweighting
########################################################
if verbose: print "Calculating CV ranges.."
# NB: loadtxt takes care of ignoring comment lines starting with '#'
colvar = np.loadtxt(datafile)
# find min and max of rew CV
numcolv = 0
if not s_min:
s_min = [ 9e99 ] * numrewt
calc_smin = True
if not s_max:
s_max = [ -9e99 ] * numrewt
calc_smax = True
for row in colvar:
numcolv += 1
for i in range(numrewt):
col = col_rewt[i]
val = row[col]
if calc_smin:
if val < s_min[i] : s_min[i] = val
if calc_smax:
if val > s_max[i] : s_max[i] = val
if verbose:
for i in range(numrewt):
print "CV[%d] range: %10.5f ; %10.5f" % (i,s_min[i],s_max[i])
# build the new square grid for the reweighted FES
s_grid = [[ ]] * numrewt
for i in range(numrewt):
ds = (s_max[i] - s_min[i])/(ngrid-1)
s_grid[i] = [ s_min[i] + n*ds for n in range(ngrid) ]
if verbose: print "Grid ds CV[%d]=%f" % (i,ds)
if verbose: print "Calculating reweighted FES.."
# initialize square array numrewt-dimensional
fes = np.zeros( [ ngrid ] * numrewt)
# go through the CV(t) trajectory
denom = 0.
i = 0
for row in colvar:
i += 1
# build the array of grid indeces locs corresponding to the point closest to current point
locs = [[ ]] * numrewt
for j in range(numrewt):
col = col_rewt[j]
val = row[col]
diff = np.array([ abs(gval - val) for gval in s_grid[j] ])
locs[j] = [diff.argmin()] # find position of minimum in diff array
#find closest c(t) for this point of time
indx = int(ceil(float(i)/numcolv*numdat))-1
bias = sum([row[j] for j in col_bias])
ebias = exp(bias/kT)/ebetac[indx]
fes[locs] += ebias
denom += ebias
# ignore warnings about log(0) and /0
np.seterr(all='ignore')
fes /= denom
fes = -kT*np.log(fes)
# set FES minimum to 0
fes -= np.min(fes)
########################################################
# OUTPUT RESULTS ON FILE
########################################################
if verbose: print "Saving results on %s" % out_fes_xy
# save the FES in the format: FES(x,y) (one increment of y per row)
#np.savetxt('fes_rew_matlabfmt.dat', fes, fmt='%.8e', delimiter=' ')
# print the FES in the format:
# x,y,z,FES(x,y,z) for 3D
# x,y,FES(x,y) for 2D
# x,FES(x) for 1D
with open(out_fes_xy, 'w') as f:
if numrewt==3:
for nz,z in enumerate(s_grid[2]):
for ny,y in enumerate(s_grid[1]):
for nx,x in enumerate(s_grid[0]):
f.write('%20.12f %20.12f %20.12f %20.12f\n' % (x,y,z,fes[nx][ny][nz]))
f.write('\n')
elif numrewt==2:
for ny,y in enumerate(s_grid[1]):
for nx,x in enumerate(s_grid[0]):
f.write('%20.12f %20.12f %20.12f\n' % (x,y,fes[nx][ny]))
f.write('\n')
elif numrewt==1:
for nx,x in enumerate(s_grid[0]):
f.write('%20.12f %20.12f\n' % (x,fes[nx]))
f.close()
|
986,172 | ac690c794a8449208d78500bb383220ed9369e5e | """
the string "ABC" in binary is:
2^x 7654 3210
A: 0b 0100 0001
B: 0b 0100 0010
C: 0b 0100 0011
together:
0b | 0100 0001 | 0100 0010 | 0100 0011
or without spaces
0b010000010100001001000011
to convert it to base 64 (which is 2^6) we need to split
binary representation by 6 bits instead of 8 (as above)
0b | 010000 | 010100 | 001001 | 000011
Now we need 64 characters to represent 64-base, let's choose a set
{A-Z, a-z, 0-9, +, /}
A:000000 = 0
B:000001 = 1
C:000010 = 2
D:000011 = 3
...
Q:010000 = 16
...
/:111111 = 63
total is 64 chars
last letter: 000011 is 3 in decimal, which is 4th in order (=D)
the whole encoded string is then: QUJD
If the total number bits is not divisible by 6, say "A", ord('A') = 65
binary 0b 0100 0001
converting to 6 bits, 8 not divisible by 6, neither 16, but 24 is, so
adding padding for a total of 24 bits
0b 0100 0001 | 0000 0000 | 0000 0000
now we can regroup by 6
0b 010000|010000|------|------
2^x 543210 543210 543210 543210
0b 010000|010000|------|------
the last two characters (resulting for padding) replaced with "="
2^4 = 16, so 010000 = 16 which is 17th letter which is Q, so we have
QQ==
"""
import base64
def print_strings(original, encoded, decoded):
print("Original {}".format(original))
print("base64 encoded {}".format(encoded))
print("base64 decoded {}".format(decoded))
print("\nExample with perfect match")
s = b"ABC"
e = base64.standard_b64encode(s)
d = base64.standard_b64decode(e)
print_strings(s, e, d)
print("\nExample with padding")
s = b"A"
e = base64.standard_b64encode(s)
d = base64.standard_b64decode(e)
print_strings(s, e, d)
print("\nother example")
s = b"hellodiana"
e = base64.standard_b64encode(s)
print_strings(s, e, s)
|
986,173 | d0c8f4bcb658002bc57739c550a51a025e9dda61 | from flask import Flask, json, jsonify, redirect, render_template, request
import statistics
# Configure application
app = Flask(__name__)
numbers = []
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
# Get user input one number at a time
number = request.form.get("number")
# Ensure user input was submitted
if not number:
return redirect("/")
# Check that input is a number
try:
float(request.form.get("number"))
except:
print("Input is not a number")
return redirect("/")
# Add user input to list of numbers
numbers.append(float(number))
return redirect("/")
else:
# Ensure that there is at least 10 items in numbers list
if len(numbers) > 9:
# Calculate median
median = statistics.median(numbers)
else:
median = "Please add at least 10 numbers"
return render_template("index.html", numbers=numbers, median=median)
@app.route("/list", methods=["GET", "POST"])
def numbers_list():
if request.method == "POST":
# Get user input as a list
numbers = request.form.get("list")
# Ensure user input was submitted
if not numbers:
return redirect("/")
else:
numbers_list = json.loads(numbers)
# todo: Check for only numbers in list
# Ensure that there is at least 10 items in numbers list
if len(numbers_list) > 9:
# Calculate median
median = statistics.median(numbers_list)
else:
median = "Please add at least 10 numbers"
return render_template("index.html", numbers=numbers_list, median=median)
else:
return redirect("/")
@app.route('/api/median', methods=['POST'])
def calculate_median():
# Get numbers as JSON
numbers = request.json["numbers"]
# Ensure that numbers is a list of at least 10 items
if (isinstance(numbers, list) and len(numbers) >= 10):
# Calculate and return median
return jsonify(statistics.median(numbers))
else:
return "Bad request", 400
if __name__ == "__main__":
app.run() |
986,174 | 2c7bead58afc936f3ef8fa35b662199e01b2a865 | # -*- coding: utf-8 -*-
from Autodesk.Revit import DB, UI
import pickle
import os
from tempfile import gettempdir
uiapp = __revit__ # noqa F821
uidoc = uiapp.ActiveUIDocument
app = uiapp.Application
doc = uidoc.Document
class CustomISelectionFilter(UI.Selection.ISelectionFilter):
def __init__(self, nom_class):
self.nom_class = nom_class
def AllowElement(self, e):
if isinstance(e, self.nom_class):
return True
else:
return False
def main():
tempfile = os.path.join(gettempdir(), "ViewPort")
source_vp_reference = uidoc.Selection.PickObject(
UI.Selection.ObjectType.Element,
CustomISelectionFilter(DB.Viewport),
"Select Source Viewport")
source_vp = doc.GetElement(source_vp_reference.ElementId)
source_vp_xyz = source_vp.GetBoxCenter()
point = (source_vp_xyz.X, source_vp_xyz.Y, source_vp_xyz.Z)
with open(tempfile, "w") as fp:
pickle.dump(point, fp)
if __name__ == "__main__":
main()
|
986,175 | 026431be29cdbf37806b324616ceee3cb8651849 | primera = str(input('Dime una frase'))
ultima= str(input('Letra que quieres buscar'))
a = (primera.find(ultima))
z = (primera.rfind(ultima))
if(a == -1):
print('')
else:
print(a, z) |
986,176 | 19d57c6228503ff5568464df9cb3750ee85540ea |
# A very simple Flask Hello World app for you to get started with...
from flask import Flask, redirect, render_template, request, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import login_user, LoginManager, UserMixin,logout_user,login_required,current_user
from werkzeug.security import check_password_hash, generate_password_hash
from flask_migrate import Migrate
app = Flask(__name__)
app.config["DEBUG"] = True
SQLALCHEMY_DATABASE_URI = "mysql+mysqlconnector://{username}:{password}@{hostname}/{databasename}".format(
username="dheetiInterns",
password="Dsqlpass",
hostname="dheetiInterns.mysql.pythonanywhere-services.com",
databasename="dheetiInterns$comments",
)
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 299
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
app.secret_key = "something only you know"
login_manager = LoginManager()
login_manager.init_app(app)
class User(UserMixin, db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128))
password_hash = db.Column(db.String(128))
EmpId = db.Column(db.Integer)
MangId = db.Column(db.Integer)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
return self.username
# all_users = {
# "admin": User(username = "admin", password_hash= generate_password_hash("secret"),EmpId=1,MangId=2),
# "CEO": User(username ="CEO",password_hash= generate_password_hash("secret"),EmpId=2),
# "depH1": User(username ="DepH",password_hash= generate_password_hash("secret"),EmpId=3,MangId =2),
# "Intern": User(username ="Intern",password_hash= generate_password_hash("secret"),EmpId=4,MangId =3)
# }
@login_manager.user_loader
def load_user(user_id):
return User.query.filter_by(username=user_id).first()
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(4096))
class Feed(db.Model):
__tablename__ = "feed"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
idno = db.Column(db.String(30))
email = db.Column(db.String(30))
phno = db.Column(db.String(30))
grade = db.Column(db.String(30))
feed = db.Column(db.String(4096))
posted = db.Column(db.DateTime, default="5/7/2020")
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "GET":
return render_template("main_page.html")
if request.method == "POST":
name = request.form['name']
idno = request.form['idno']
email = request.form['email']
phno = request.form['phone']
grade = request.form['grade']
feeds = request.form['feed']
feed = Feed(name= name, idno = idno ,email = email,phno = phno, grade = grade,feed = feeds)
db.session.add(feed)
db.session.commit()
return redirect('/allfeedback')
@app.route('/allfeedback', methods = ['GET'])
def feeds():
if current_user.is_authenticated:
# return render_template('feedbacks.html', comments=Comment.query.all())
# return render_template('feedbacks.html', query = Feed.query.all())
return render_template('feedbacks.html', query = Feed.query.filter(Feed.id > current_user.id))
# return current_user.username
if not current_user.is_authenticated:
return render_template('feedbacks.html')
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "GET":
return render_template("login_page.html", error=False)
user = load_user(request.form["username"])
if user is None:
return render_template("login_page.html", error=True)
# Check this portion of code
# user = all_users[username]
if not user.check_password(request.form["password"]):
return render_template("login_page.html", error=True)
login_user(user)
return redirect(url_for('feeds'))
@app.route("/logout/")
@login_required
def logout():
logout_user()
return redirect(url_for('index')) |
986,177 | 62767d21486edb46505dcc74f5fe916f84a7d73f | # -*- coding: utf-8 -*-
import os
from pathlib import Path
import pytest
from brainhacker.utils.download import _url_to_local_path, _fetch_file, mne_data_path
def test_url_to_local_path(tmpdir):
url = 'https://www.google.com/'
with pytest.raises(ValueError):
_url_to_local_path(url, tmpdir)
url = 'https//www.google.com/data/folder'
with pytest.raises(ValueError):
_url_to_local_path(url, tmpdir)
url = 'https://www.google.com/data/folder'
dest = os.path.join(tmpdir, 'data', 'folder')
assert dest == _url_to_local_path(url, tmpdir)
|
986,178 | 0c9e72f91f3336880c1765cad86a566e5d9edf65 | #
# Copyright 2020, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any commercial
# use must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
"""
================
doi_validator.py
================
Contains classes and functions for validation of DOI records and the overall
DOI workflow.
"""
import re
from typing import Optional
import requests
from pds_doi_service.core.db.doi_database import DOIDataBase
from pds_doi_service.core.entities.doi import Doi
from pds_doi_service.core.entities.doi import DoiStatus
from pds_doi_service.core.entities.exceptions import DuplicatedTitleDOIException
from pds_doi_service.core.entities.exceptions import IllegalDOIActionException
from pds_doi_service.core.entities.exceptions import InvalidIdentifierException
from pds_doi_service.core.entities.exceptions import InvalidRecordException
from pds_doi_service.core.entities.exceptions import SiteURLNotExistException
from pds_doi_service.core.entities.exceptions import TitleDoesNotMatchProductTypeException
from pds_doi_service.core.entities.exceptions import UnexpectedDOIActionException
from pds_doi_service.core.entities.exceptions import UnknownNodeException
from pds_doi_service.core.util.config_parser import DOIConfigUtil
from pds_doi_service.core.util.general_util import get_logger
from pds_doi_service.core.util.node_util import NodeUtil
# Get the common logger and set the level for this file.
logger = get_logger(__name__)
MIN_LID_FIELDS = 4
MAX_LID_FIELDS = 6
"""The expected minimum and maximum fields expected within a LID"""
class DOIValidator:
doi_config_util = DOIConfigUtil()
# The workflow_order dictionary contains the progression of the status of a DOI:
workflow_order = {
DoiStatus.Error: 0,
DoiStatus.Unknown: 0,
DoiStatus.Reserved: 1,
DoiStatus.Draft: 2,
DoiStatus.Review: 3,
DoiStatus.Pending: 4,
DoiStatus.Registered: 5,
DoiStatus.Findable: 5,
DoiStatus.Deactivated: 5,
}
def __init__(self, db_name=None):
self._config = self.doi_config_util.get_config()
# If database name is specified from user, use it.
default_db_file = db_name if db_name else self._config.get("OTHER", "db_file")
self._database_obj = DOIDataBase(default_db_file)
def _check_node_id(self, doi: Doi):
"""
Checks if the provided Doi object has a valid node ID assigned.
Parameters
----------
doi : Doi
The Doi object to check.
Raises
------
UnknownNodeException
If the Doi object has an unrecognized node ID assigned, or no
node ID assigned at all.
"""
try:
if not doi.node_id:
raise UnknownNodeException("Doi object does not have a node ID value assigned.")
NodeUtil.validate_node_id(doi.node_id)
except UnknownNodeException as err:
msg = (
f"Invalid Node ID for DOI record with identifier {doi.pds_identifier}.\n"
f"Reason: {str(err)}.\n"
"Please use the --node option to specify the apporpriate PDS node ID for the transaction."
)
raise UnknownNodeException(msg)
def _check_field_site_url(self, doi: Doi):
"""
If the site_url field is defined for the provided Doi object, check to
see if it is online. This check is typically only made for release
requests, which require a URL field to be set.
Parameters
----------
doi : Doi
The Doi object to check.
Raises
------
SiteURLNotExistException
If the site URL is defined for the Doi object and is not reachable.
"""
logger.debug("doi,site_url: %s,%s", doi.doi, doi.site_url)
if doi.site_url:
try:
response = requests.get(doi.site_url, timeout=10)
status_code = response.status_code
logger.debug("from_request status_code,site_url: %s,%s", status_code, doi.site_url)
# Handle cases when a connection can be made to the server but
# the status is greater than or equal to 400.
if status_code >= 400:
# Need to check its an 404, 503, 500, 403 etc.
raise requests.HTTPError(f"status_code,site_url {status_code,doi.site_url}")
else:
logger.info("Landing page URL %s is reachable", doi.site_url)
except (requests.exceptions.ConnectionError, Exception):
raise SiteURLNotExistException(
f"Landing page URL {doi.site_url} is not reachable. Request "
f"should have a valid URL assigned prior to release.\n"
f"To bypass this check, rerun the command with the --force "
f"flag provided."
)
def _check_field_title_duplicate(self, doi: Doi):
"""
Check the provided Doi object's title to see if the same title has
already been used with a different DOI record.
Parameters
----------
doi : Doi
The Doi object to check.
Raises
------
DuplicatedTitleDOIException
If the title for the provided Doi object is in use for another record.
"""
query_criterias = {"title": [doi.title]}
# Query database for rows with given title value.
columns, rows = self._database_obj.select_latest_rows(query_criterias)
# keep rows with same title BUT different identifier
rows_with_different_identifier = [row for row in rows if row[columns.index("identifier")] != doi.pds_identifier]
if rows_with_different_identifier:
identifiers = ",".join([row[columns.index("identifier")] for row in rows_with_different_identifier])
status = ",".join([row[columns.index("status")] for row in rows_with_different_identifier])
dois = ",".join([row[columns.index("doi")] for row in rows_with_different_identifier])
msg = (
f"The title '{doi.title}' has already been used for records "
f"{identifiers}, status: {status}, doi: {dois}. "
"A different title should be used.\nIf you want to bypass this "
"check, rerun the command with the --force flag provided."
)
raise DuplicatedTitleDOIException(msg)
def _check_field_title_content(self, doi: Doi):
"""
Check that the title of the provided Doi object contains the type of
PDS product (bundle, collection, document, etc...).
Parameters
----------
doi : Doi
The Doi object to check.
Raises
------
TitleDoesNotMatchProductTypeException
If the title for the provided Doi object does not contain the
type of PDS product.
"""
product_type_specific_split = doi.product_type_specific.split(" ")
# The suffix should be the last field in product_type_specific so
# if it has many tokens, check the last one.
product_type_specific_suffix = product_type_specific_split[-1]
logger.debug("product_type_specific_suffix: %s", product_type_specific_suffix)
logger.debug("doi.title: %s", doi.title)
if not product_type_specific_suffix.lower() in doi.title.lower():
msg = (
f"DOI with identifier '{doi.pds_identifier}' and title "
f"'{doi.title}' does not contains the product-specific type "
f"suffix '{product_type_specific_suffix.lower()}'. "
"Product-specific type suffix should be in the title.\n"
"If you want to bypass this check, rerun the command with the "
"--force flag provided."
)
raise TitleDoesNotMatchProductTypeException(msg)
def _check_for_preexisting_identifier(self, doi: Doi):
"""
For the identifier assigned to the provided Doi object, check that
the latest transaction for the same identifier has a matching DOI value.
Parameters
----------
doi : Doi
The Doi object to validate.
Raises
------
IllegalDOIActionException
If the check fails.
"""
# The database expects each field to be a list.
query_criterias = {"ids": [doi.pds_identifier]}
# Query database for rows with given id value.
columns, rows = self._database_obj.select_latest_rows(query_criterias)
for row in rows:
existing_record = dict(zip(columns, row))
if doi.doi != existing_record["doi"]:
raise IllegalDOIActionException(
f"There is already a DOI {existing_record['doi']} associated "
f"with PDS identifier {doi.pds_identifier} "
f"(status={existing_record['status']}).\n"
f"You cannot modify a DOI for an existing PDS identifier."
)
def _check_for_preexisting_doi(self, doi: Doi):
"""
For Doi objects with DOI already assigned, this check ensures the DOI
value is not already in use for a different PDS identifier.
Parameters
----------
doi : Doi
The Doi object to validate.
Raises
------
ValueError
If the provided Doi object does not have a DOI value assigned to check.
UnexpectedDOIActionException
If the check fails.
"""
if not doi.doi:
raise ValueError(f"Provided DOI object (id {doi.pds_identifier}) does not have a DOI value assigned.")
# The database expects each field to be a list.
query_criterias = {"doi": [doi.doi]}
# Query database for rows with given DOI value (should only ever be
# at most one)
columns, rows = self._database_obj.select_latest_rows(query_criterias)
for row in rows:
existing_record = dict(zip(columns, row))
if doi.pds_identifier != existing_record["identifier"]:
raise UnexpectedDOIActionException(
f"The DOI ({doi.doi}) provided for record identifier "
f"{doi.pds_identifier} is already in use for record "
f"{rows[0][columns.index('identifier')]}.\n"
f"Are you sure you want to assign the new identifier {doi.pds_identifier}?\n"
f"If so, use the --force flag to bypass this check."
)
def _check_identifier_fields(self, doi: Doi):
"""
Checks the fields of a Doi object used for identification for consistency
and validity.
Parameters
----------
doi : Doi
The parsed Doi object to validate
Raises
------
InvalidRecordException
If any of the identifier field checks fail
"""
# Make sure we have an identifier to key off of
if not doi.pds_identifier:
raise InvalidRecordException(
"Record provided with missing PDS identifier field. "
"Please ensure a LIDVID or similar identifier is provided for "
"all DOI requests."
)
# Make sure the doi and id fields are consistent, if present
if doi.doi and doi.id:
prefix, suffix = doi.doi.split("/")
if suffix != doi.id:
raise InvalidRecordException(
f"Record for {doi.pds_identifier} has inconsistent "
f"DOI ({doi.doi}) and ID ({doi.id}) fields. Please reconcile "
"the inconsistency and resubmit the request."
)
def _check_lidvid_field(self, doi: Doi):
"""
Checks the pds_identifier field of a Doi to ensure it conforms
to the LIDVID format.
Parameters
----------
doi : Doi
The parsed Doi object to validate
Raises
------
InvalidIdentifierException
If the PDS identifier field of the DOI does not conform to
the LIDVID format. These exceptions should be able to be bypassed
when the --force flag is provided.
"""
vid: Optional[str]
if "::" in doi.pds_identifier:
lid, vid = doi.pds_identifier.split("::")
else:
lid = doi.pds_identifier
vid = None
lid_tokens = lid.split(":")
try:
# Make sure the prescribed static fields are correct
required_prefix_elements = ["urn", "nasa", "pds"]
if lid_tokens[:3] != required_prefix_elements:
raise InvalidIdentifierException(f"LIDVID must start with elements {required_prefix_elements}")
# Make sure we got the minimum number of fields, and that
# the number of fields is consistent with the product type
if not MIN_LID_FIELDS <= len(lid_tokens) <= MAX_LID_FIELDS:
raise InvalidIdentifierException(
f"LIDVID must contain only between {MIN_LID_FIELDS} "
f"and {MAX_LID_FIELDS} colon-delimited fields, "
f"got {len(lid_tokens)} field(s)"
)
# Now check each field for the expected set of characters
token_regex = re.compile(r"[a-z0-9-._]*")
for index, token in enumerate(lid_tokens):
if not token_regex.fullmatch(token):
raise InvalidIdentifierException(
f"LID field {index + 1} ({token}) is invalid. "
f"Fields must only consist of lowercase letters, digits, "
f"hyphens (-), underscores (_) or periods (.), per PDS SR Sec. 6D.2"
)
# Make sure the VID conforms to a version number
version_regex = re.compile(r"^\d+\.\d+$")
if vid and not version_regex.fullmatch(vid):
raise InvalidIdentifierException(
f"Parsed VID ({vid}) does not conform to a valid version identifier. "
"Version identifier must consist only of a major and minor version "
"joined with a period (ex: 1.0), per PDS SR Sec. 6D.3"
)
# Finally, ensure the whole identifier conforms to the length constraint
identifier_max_length = 255
if not len(doi.pds_identifier) <= identifier_max_length:
raise InvalidIdentifierException(
f"LIDVID {doi.pds_identifier} does not conform to PDS identifier max length constraint "
f"({identifier_max_length}), per PDS SR Sec. 6D"
)
except InvalidIdentifierException as err:
raise InvalidIdentifierException(
f"The record identifier {doi.pds_identifier} (DOI {doi.doi}) "
f"does not conform to a valid LIDVID format.\n"
f"Reason: {str(err)}\n"
"If the identifier is not intended to be a LIDVID, use the "
"--force option to bypass the results of this check."
)
def _check_field_workflow(self, doi: Doi):
"""
Check that there is not a record in the Sqlite database with same
identifier but a higher status than the current action (see workflow_order)
Parameters
----------
doi : Doi
The parsed Doi object to check the status of.
Raises
------
UnexpectedDOIActionException
If the provided Doi object has an unrecognized status assigned, or if
the previous status for the Doi is higher in the workflow ordering than
the current status.
"""
if doi.status is not None and doi.status not in self.workflow_order:
msg = (
f"Unexpected DOI status of '{doi.status.value}' from label. "
f"Valid values are "
f"{[DoiStatus(key).value for key in self.workflow_order.keys()]}"
)
logger.error(msg)
raise UnexpectedDOIActionException(msg)
# The database expects each field to be a list.
query_criterias = {"doi": [doi.doi]}
# Query database for rows with given doi value.
columns, rows = self._database_obj.select_latest_rows(query_criterias)
for row in rows:
existing_record = dict(zip(columns, row))
doi_str = existing_record["doi"]
prev_status = existing_record["status"]
# Check the rankings of the current and previous status to see if
# we're moving backwards through the workflow. For example, a status
# of 'Findable' (5) is higher than 'Review' (3), so a released
# DOI record being moved back to review would trip this warning.
if self.workflow_order[prev_status] > self.workflow_order[doi.status]: # type: ignore
msg = (
f"There is a record for identifier {doi.pds_identifier} "
f"(DOI: {doi_str}) with status: '{prev_status.lower()}'.\n"
f"Are you sure you want to restart the workflow from step "
f"'{doi.status}'?\nIf so, use the --force flag to bypass the "
f"results of this check."
)
raise UnexpectedDOIActionException(msg)
def validate_reserve_request(self, doi: Doi):
"""
Perform the suite of validation checks applicable to a reserve request
on the provided Doi object.
Parameters
----------
doi : Doi
The parsed Doi object to validate.
"""
# For reserve requests, need to make sure there is not already an
# existing DOI with the same PDS identifier
self._check_for_preexisting_identifier(doi)
self._check_node_id(doi)
self._check_identifier_fields(doi)
self._check_lidvid_field(doi)
self._check_field_title_duplicate(doi)
self._check_field_title_content(doi)
def validate_update_request(self, doi: Doi):
"""
Perform the suite of validation checks applicable to an update request
on the provided Doi object.
Parameters
----------
doi : Doi
The parsed Doi object to validate.
"""
# For update requests, need to check if there are any other DOI records
# using the same PDS identifier
self._check_for_preexisting_doi(doi)
self._check_node_id(doi)
self._check_identifier_fields(doi)
self._check_lidvid_field(doi)
self._check_field_title_duplicate(doi)
self._check_field_title_content(doi)
# Also need to check if we're moving backwards through the workflow,
# i.e. updating an already released record.
self._check_field_workflow(doi)
def validate_release_request(self, doi: Doi):
"""
Perform the suite of validation checks applicable to a release request
on the provided Doi object.
Parameters
----------
doi : Doi
The parsed Doi object to validate.
"""
# For release requests, need to check if there are any other DOI records
# using the same PDS identifier
if doi.doi:
self._check_for_preexisting_doi(doi)
self._check_node_id(doi)
self._check_identifier_fields(doi)
self._check_lidvid_field(doi)
self._check_field_title_duplicate(doi)
self._check_field_title_content(doi)
# Release requests require a valid URL assigned, so check for that here
self._check_field_site_url(doi)
|
986,179 | e3ebcbec2bcf22959d4c4ef070ef06f2d1b3e055 | #python
#
# UVIslandPack
#
# Author: Mark Rossi (small update by Cristobal Vila for Modo 11.x)
# Version: .4
# Compatibility: Modo 11.x
#
# Purpose: To fit every UV island in the selected UV map to 0-1 range and then array them in a grid so that each island has its own
# discrete range in UV space.
#
# Use: Select the mesh layer, select the UV map, and run the script. However, if you select any polygons, then the script will only run on
# the islands that the polygon/s belong to. Moreover, the order in which you select the islands dictates the array order, provided that
# you do NOT double-click on any of the polygons to select the entire island/s. The script takes two arguments, both numbers. The first
# specifies the number of islands per row, the second specifies the amount of UV space padding between islands. The default values are
# 5 and 0.001, respectively. If you specify one argument then you must specify both.
#
# For example: @uvIslandPack.py 3 0.01
args = lx.args()
split = len(args) > 1 and float(args[0]) or 5.0
pad = len(args) > 1 and float(args[1]) or 0.001
row = 0.0
count = 0.0
layer = lx.eval("query layerservice layer.index ? current")
polys = [p.strip("()").split(",")[1] for p in lx.evalN("query layerservice selection ? poly")]
unproc = polys or lx.evalN("query layerservice polys ? visible")
lx.eval("escape")
lx.eval("tool.set actr.auto on")
while unproc:
lx.eval("select.element %s polygon set %s" %(layer, unproc[0]))
lx.eval("select.polygonConnect uv")
lx.eval("uv.fit entire true") # October 2017: This changed. It previously was 'uv.fit false'
lx.eval("tool.set TransformScale on")
lx.eval("tool.viewType uv")
lx.eval("tool.setAttr xfrm.transform SX %s" %(1.0 - pad))
lx.eval("tool.setAttr xfrm.transform SY %s" %(1.0 - pad))
lx.eval("tool.doApply")
lx.eval("tool.set TransformScale off")
lx.eval("tool.set TransformMove on")
lx.eval("tool.viewType uv")
lx.eval("tool.setAttr xfrm.transform U %s" %count)
lx.eval("tool.setAttr xfrm.transform V %s" %row)
lx.eval("tool.doApply")
lx.eval("tool.set TransformMove off")
island = set(lx.evalN("query layerservice polys ? selected"))
unproc = [p for p in unproc if p not in island]
count += 1.0
if count == split:
count = 0.0
row += 1.0
lx.eval("select.drop polygon")
lx.eval("tool.set TransformMove on")
lx.eval("tool.reset")
lx.eval("tool.viewType uv")
lx.eval("tool.setAttr xfrm.transform U %s" %(pad * .5))
lx.eval("tool.setAttr xfrm.transform V %s" %(pad * .5))
lx.eval("tool.doApply")
lx.eval("tool.set TransformMove off")
lx.eval("tool.set actr.auto off") |
986,180 | b2190bbb93b35040f0a7c77cb438287387d9c0ae | # Generated by Django 2.1.11 on 2019-12-03 00:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('producers', '0003_auto_20181218_1934'),
]
operations = [
migrations.RemoveField(
model_name='producer',
name='competition_count',
),
migrations.RemoveField(
model_name='producer',
name='dataset_count',
),
migrations.RemoveField(
model_name='producer',
name='organizer_count',
),
migrations.RemoveField(
model_name='producer',
name='participant_count',
),
migrations.RemoveField(
model_name='producer',
name='submission_count',
),
migrations.RemoveField(
model_name='producer',
name='user_count',
),
]
|
986,181 | 7d994ed5babce160bad2d1a945577c947e914da5 | def merge_data(env):
print(env.file["train_file_list"]) |
986,182 | 24e134709ed3db9766bd87b94196681162323986 | """
Question:
We have n chips, where the position of the ith chip is position[i].
We need to move all the chips to the same position. In one step, we can change the position
of the ith chip from position[i] to:
position[i] + 2 or position[i] - 2 with cost = 0.
position[i] + 1 or position[i] - 1 with cost = 1.
Return the minimum cost needed to move all the chips to the same position.
Example:
Input: position = [1,2,3]
Output: 1
Explanation: First step: Move the chip at position 3 to position 1 with cost = 0.
Second step: Move the chip at position 2 to position 1 with cost = 1.
Total cost is 1.
Input: position = [2,2,2,3,3]
Output: 2
Explanation: We can move the two chips at poistion 3 to position 2.
Each move has cost = 1. The total cost = 2.
Input: position = [1,1000000000]
Output: 1
"""
#min_cost_to_move_chips_to_the_same_position_1217.py
import pytest
from typing import List
class Solution:
def min_cost_to_move_chips(self, position: List[int]) -> int:
even, odd = 0, 0
for x in position:
if x%2 == 0:
even += 1
else:
odd += 1
return min(odd, even)
@pytest.mark.timeout(3)
@pytest.mark.parametrize(
"arr, ans", [([1,2,3], 1), ([2,2,2,3,3], 2), ([1,1000000000], 1)]
)
def test_min_cost_to_move_chips(arr, ans):
sol1 = Solution()
assert sol1.min_cost_to_move_chips(arr) == ans
# pytest daily_coding_challenge/october_2020/min_cost_to_move_chips_to_the_same_position_1217.py --maxfail=4 |
986,183 | 8d886b3951eb019c6a2bdb75fdef366931baf499 | # -*- encoding: utf-8 -*-
"""
8.6.1 k均值聚类
"""
from sklearn import datasets as dss
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['FangSong']
plt.rcParams['axes.unicode_minus'] = False
X_blob, y_blob = dss.make_blobs(n_samples=[300,400,300], n_features=2)
X_circle, y_circle = dss.make_circles(n_samples=1000, noise=0.05, factor=0.5)
X_moon, y_moon = dss.make_moons(n_samples=1000, noise=0.05)
y_blob_pred = KMeans(init='k-means++', n_clusters=3).fit_predict(X_blob)
y_circle_pred = KMeans(init='k-means++', n_clusters=2).fit_predict(X_circle)
y_moon_pred = KMeans(init='k-means++', n_clusters=2).fit_predict(X_moon)
plt.subplot(131)
plt.title('团状簇')
plt.scatter(X_blob[:,0], X_blob[:,1], c=y_blob_pred)
plt.subplot(132)
plt.title('环状簇')
plt.scatter(X_circle[:,0], X_circle[:,1], c=y_circle_pred)
plt.subplot(133)
plt.title('新月簇')
plt.scatter(X_moon[:,0], X_moon[:,1], c=y_moon_pred)
plt.show()
|
986,184 | 6b5bb2ef5b4bf3d294f8b37478ce302921401e2a | from urllib.parse import urlsplit, urlunsplit, unquote, parse_qs
from pathlib import PurePosixPath
from types import SimpleNamespace
from .gpm import gpm_url_to_data
from .spoofy import get_spoofy_url_or_none
# For a given list of URLs, each URL is converted into an object containing data about the item the URL refers to.
# This is then used to query Spotify and produce either a Spotify album, artist, or track URL
async def remap_urls(urls):
result = [await get_spoofy_url_or_none(await gpm_url_to_data(url)) for url in urls]
if (len(result) == 0):
return None
return result
# All Google Play Music URLs start with
# https://play.google.com/music
def validate_url(url):
if url is not None:
if url.hostname == 'play.google.com':
if (len(url.path) > 0):
if (url.path[0] == 'music'):
return True
return False
# Splits all found URLs into their component parts, retaining only GPM URLs
def split_urls(urls):
return list(filter(lambda split_url: validate_url(split_url), map(lambda url: complete_urlsplit(url), urls)))
# Returns an array of path segments, or an empty array for an empty path
def split_path(path):
result = PurePosixPath(unquote(path)).parts
if len(result) > 0:
return list(result[1:])
else:
return []
# Works like urlsplit but also splits the path into its parts
# such that '/path/to/here' because an array of strings of the form ['path', 'to', 'here']
# this is helpful for reading the ID out of the GPM URLs
# This version also returns None instead of throwing an exception.
# For our usage, this causes it to return None when encountering non-URL content or a malformed URL
# and this causes it to get filtered out later, which is what we want
def complete_urlsplit(url):
try:
result = urlsplit(url)
return SimpleNamespace(**{
'scheme': result.scheme,
'netloc': result.netloc,
'path': split_path(result.path),
'query': parse_qs(result.query),
'fragment': result.fragment,
'username': result.username,
'password': result.password,
'hostname': result.hostname,
'port': result.port
})
except:
return None
|
986,185 | 3905899f5312fb402fd15819ae8ed246e5969787 | from . import global_values as g
from . import utilities as util
from . import entities
from . import levels
import math as m
import random as r
class Creature(entities.Entity):
def __init__(self, name, rect, animation_system, max_health, move_speed, max_move_speed, **_kwargs):
kwargs = {"solid":True, "collision_dict":{}, "cw":1, "ch":1}
kwargs.update(_kwargs)
entities.Entity.__init__(self, rect, **kwargs)
self.name = name
self.max_health = max_health
self.health = self.max_health
self.move_speed = move_speed
#the current action of a Creature will determine it's logic and g.animations
self.current_actions = frozenset(["static"])
self.graphics = animation_system
self.self_vx = 0
self.self_vy = 0
self.self_vx_keep = self.vx_keep
self.self_vy_keep = self.vy_keep
self.min_animation_velocity = 0.5
#check for sightline between this creature and either another creature or a target point
def check_sightline(self, target, collision_dict=None):
if collision_dict is None:
collision_dict = self.collision_dict
if isinstance(target, entiites.Entity):
target = target.rect.midpoint
sightline = check_line_collision(self.rect.midbottom, target, collision_dict, [self])
return sightline
def accelerate_self(self, angle, magnitude=None):
if magnitude is None:
magnitude = self.move_speed
self.self_vx += m.cos(angle)*magnitude
self.self_vy += m.sin(angle)*magnitude
v_direction = util.get_angle(0, 0, self.self_vx, self.self_vy)
v_mag = util.get_magnitude(self.self_vx, self.self_vy)
#cap velocity if it is too high
if v_mag > self.move_speed:
self.self_vx = m.cos(v_direction)*self.move_speed
self.self_vy = m.sin(v_direction)*self.move_speed
def accelerate_self_cardinal(self, horizontal, vertical, magnitude):
if horizontal:
if vertical:
self.accelerate_self(m.pi/4, magnitude)
else:
self.accelerate_self(0, magnitude)
elif vertical:
self.accelerate_self(m.pi/2, magnitude)
def update_position_and_velocity(self):
self.clamp_velocity()
self.move(self.vx, self.vy)
self.move(self.self_vx, self.self_vy)
self.slow_velocity()
def change_health(self, amount):
self.health += amount
if self.health > self.max_health:
self.health = self.max_health
elif self.health <= 0:
self.delete()
def delete(self):
entities.Entity.delete(self)
def set_action(self):
self.current_actions = set()
def set_animation(self):
self.graphics.set_animation(self.current_actions)
def update(self):
entities.Entity.update(self)
self.set_action()
def slow_velocity(self):
entities.Entity.slow_velocity(self)
self.self_vx *= self.self_vx_keep
self.self_vy *= self.self_vy_keep
def die(self):
self.delete()
def draw(self):
self.set_animation()
entities.Entity.draw(self)
class Player(Creature):
def __init__(self, rect, animation_system, health, acceleration, **_kwargs):
kwargs = {"cw":1, "ch":0.5, "overwrite_player":True}
kwargs.update(_kwargs)
if kwargs["overwrite_player"]:
if g.player:
g.player.delete()
g.player = self
del kwargs["overwrite_player"]
Creature.__init__(self, "player", rect, animation_system, health, acceleration, acceleration, **kwargs)
def update(self):
Creature.update(self)
self.set_action()
def set_action(self):
self.current_actions = set()
diagonal_limit = 1
if self.self_vx < -self.min_animation_velocity:
if self.self_vy < -diagonal_limit:
self.current_actions.add("upleft")
elif self.self_vy > diagonal_limit:
self.current_actions.add("downleft")
else:
self.current_actions.add("left")
elif self.self_vx > self.min_animation_velocity:
if self.self_vy < -diagonal_limit:
self.current_actions.add("upright")
elif self.self_vy > diagonal_limit:
self.current_actions.add("downright")
else:
self.current_actions.add("right")
elif self.self_vy < -self.min_animation_velocity:
self.current_actions.add("up")
elif self.self_vy > self.min_animation_velocity:
self.current_actions.add("down")
else:
self.current_actions.add("static")
self.current_actions = frozenset(self.current_actions)
def move_to_spawn_point(self):
spawn_points = g.current_level.get_tagged_structures(set("player_spawn_point"))
chosen_spawn_point = r.choice(spawn_points)
self.rect.center = chosen_spawn_point.rect.center
self.set_from_rect()
g.camera.center(self)
def draw(self):
Creature.draw(self)
class Player_Spawn_Point(levels.Structure):
def __init__(self, tile):
levels.Structure.__init__(self, tile, tile.rect.w, tile.rect.h, None, tags=set("player_spawn_point"), visible=False)
|
986,186 | ab160636b3728daaed472d88d996b4403968b736 | #!/usr/bin/env python
# coding: utf-8
# # Assignment - 5
# # Question 1
# # Perform Bubble sort using function in python.
# # Solution:
# In[1]:
def bubble_sort(l):
n= len(l)
for i in range(0,n):
swapped = 0
for j in range(0,n-1-i):
if l[j] > l[j+1]:
l[j],l[j+1] = l[j+1],l[j]
swapped = 1
if swapped == 0:
break
return l
li = [12, 34, 5, 23, 10, 15]
res = bubble_sort(li)
print(res)
# # Question 2
# # Perform Selection sort using function in python.
# # Solution:
# In[2]:
def select_sort(l):
n = len(l)
for i in range(0, n-1):
pos = i
for j in range(i+1, n):
if l[pos] > l[j]:
pos = j
l[pos],l[i] = l[i],l[pos]
return l
li = [33, 54, 21, 43, 30, 15]
res = select_sort(li)
print(res)
# # Question 3
# # Perform Insertion sort using function in python.
# # Solution:
# In[3]:
def insert_sort(l):
n = len(l)
for j in range(1,n):
key = l[j]
i = j-1
while(i >=0 and l[i] > key):
l[i+1] = l[i]
i = i-1
l[i+1] = key
return l
li = [21, 24, 10, 0, 7, 15, 4]
res = insert_sort(li)
print(res)
# In[ ]:
|
986,187 | 3b5066a7fba0001982977d5c389228bd2d8d67e8 |
#ImportModules
import ShareYourSystem as SYS
#Definition of an instance
MyProducer=SYS.ProducerClass().produce(
"Catchers",
['First','Second','Third','Four'],
SYS.CatcherClass,
)
#Catch with a relative path
MyProducer['<Catchers>FirstCatcher'].grasp(
'/NodePointDeriveNoder/<Catchers>SecondCatcher'
).catch(
'Relatome',
{'MyStr':"hello"}
)
#Catch with a direct catch
MyProducer['<Catchers>FirstCatcher'].grasp(
MyProducer['<Catchers>ThirdCatcher']
).catch(
'Relatome',
{'MyInt':3}
)
#Catch with a CatchDict
MyProducer['<Catchers>FirstCatcher'].grasp(
SYS.GraspDictClass(
**{
'HintVariable':'/NodePointDeriveNoder/<Catchers>FourCatcher',
'MyFloat':5.5
}
)
).catch(
'Relatome'
)
#Definition the AttestedStr
SYS._attest(
[
'MyProducer is '+SYS._str(
MyProducer,
**{
'RepresentingBaseKeyStrsListBool':False,
'RepresentingAlineaIsBool':False
}
)
]
)
#Print
|
986,188 | 2c11edbc40e6827175a12d880f070f2f2976f8df | import pygame
'''
A reimplementation of the knots and crosses game - this time using the
pygame module to create an interactive user interface, allowing players
to use the mouse instead.
'''
pygame.font.init()
white_colour = (255, 255, 255)
clock = pygame.time.Clock()
tick_rate = 60
font = pygame.font.SysFont('comicsacs', 75)
background_image = pygame.image.load('board.png')
background_image = pygame.transform.scale(background_image, (500, 500))
cross_image = pygame.image.load('cross.png')
cross_image = pygame.transform.scale(cross_image, (125, 125))
knot_image = pygame.image.load('knot.png')
knot_image = pygame.transform.scale(knot_image, (125, 125))
# size = 3
def buildBoard(size):
board = []
for i in range(0,size):
row = [" "]*size
board.append(row)
return board
class Board: # a game of knots and crosses
def __init__(self):
self.board = buildBoard(3)
self.finishedGame = False
self.spacesFilled = 0
self.turn = 0 # 0 or 1
'''
def __str__(self):
prettyRepresentation = "\t\t" + "=====================\n" \
"\t\t" + "||" + " " + self.board[0][0] + " | " + self.board[0][1] + " | " + self.board[0][2] + " ||" + "\n" \
"\t\t" + "||" + "_____|_____|_____" + "||" + "\n" \
"\t\t" + "||" + " " + self.board[1][0] + " | " + self.board[1][1] + " | " + self.board[1][2] + " ||" + "\n" \
"\t\t" + "||" + "_____|_____|_____" + "||" + "\n" \
"\t\t" + "||" + " " + self.board[2][0] + " | " + self.board[2][1] + " | " + self.board[2][2] + " ||" + "\n" \
"\t\t" + "||" + " | | " + "||" + "\n" \
"\t\t" + "====================="
return prettyRepresentation
'''
def checkPositionEmpty(self, row, column):
if self.board[row][column] == " ":
return True
def checkBoardFull(self): # assists in determining draw
return (self.spacesFilled == (len(self.board)*len(self.board)))
# "x and y coordinates"
def checkWin(self, input):
checkThis = input
win = False
winCount = 0
moveRight = 0
for k in range(0, len(self.board)): # run three times
winCount = 0
for i in range(0, len(self.board)):
if (self.board[i][moveRight] == checkThis):
winCount += 1
if winCount == 3:
win = True
self.finishedGame = True
return win
else:
moveRight += 1
moveDown = 0
for j in range(0, len(self.board)):
winCount = 0
for i in range(0, len(self.board)):
if (self.board[moveDown][i] == checkThis):
winCount += 1
if winCount == 3:
win = True
self.finishedGame = True
return win
else:
moveDown += 1
if self.board[0][0] == checkThis:
if self.board[1][1] == checkThis:
if self.board[2][2] == checkThis:
win = True
self.finishedGame = True
return win
if self.board[0][2] == checkThis:
if self.board[1][1] == checkThis:
if self.board[2][0] == checkThis:
win = True
self.finishedGame = True
return win
return win # if reached here, "win" is False
def draw(self, row, column):
if self.checkPositionEmpty(row, column):
if self.turn == 0:
self.board[row][column] = "O"
self.turn = 1
elif self.turn == 1:
self.board[row][column] = "X"
self.turn = 0
self.spacesFilled += 1
class Game:
def __init__(self):
self.screen_width = 500
self.screen_height = 600
self.screen_title = "Knots and Crosses"
self.game_screen = pygame.display.set_mode((self.screen_width, self.screen_height))
self.game_screen.fill(white_colour)
pygame.display.set_caption(self.screen_title)
self.score_O = 0
self.score_X = 0
def game_loop(self):
board = Board()
tie = False
did_win = False
is_game_over = False
while not is_game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
is_game_over = True
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
print(pos)
if (0 < pos[0] < 164) and (0 < pos[1] < 141):
print('top left')
board.draw(0, 0)
elif (186 < pos[0] < 327) and (9 < pos[1] < 141):
print('top middle')
board.draw(0, 1)
elif (349 < pos[0] < 494) and (3 < pos[1] < 139):
print('top right')
board.draw(0, 2)
elif (1 < pos[0] < 163) and (177 < pos[1] < 317):
print('middle left')
board.draw(1, 0)
elif (185 < pos[0] < 329) and (178 < pos[1] < 318):
print('middle middle')
board.draw(1, 1)
elif (350 < pos[0] < 494) and (179 < pos[1] < 317):
print('middle right')
board.draw(1, 2)
elif (1 < pos[0] < 161) and (353 < pos[1] < 494):
print('bottom left')
board.draw(2, 0)
elif (184 < pos[0] < 329) and (354 < pos[1] < 491):
print('bottom middle')
board.draw(2, 1)
elif (351 < pos[0] < 494) and (355 < pos[1] < 495):
print('bottom right')
board.draw(2, 2)
# print(event)
self.game_screen.fill(white_colour)
self.game_screen.blit(background_image, (0, 0))
points_O = font.render(str(self.score_O), True, (0, 0, 0))
points_X = font.render(str(self.score_X), True, (0, 0, 0))
self.game_screen.blit(points_O, (20, 520))
self.game_screen.blit(points_X, (450, 520))
if board.board[0][0] == "X":
self.game_screen.blit(cross_image, (25, 10))
elif board.board[0][0] == "O":
self.game_screen.blit(knot_image, (25, 10))
if board.board[0][1] == "X":
self.game_screen.blit(cross_image, (195, 13))
elif board.board[0][1] == "O":
self.game_screen.blit(knot_image, (195, 13))
if board.board[0][2] == "X":
self.game_screen.blit(cross_image, (354, 10))
elif board.board[0][2] == "O":
self.game_screen.blit(knot_image, (354, 10))
if board.board[1][0] == "X":
self.game_screen.blit(cross_image, (25, 184))
elif board.board[1][0] == "O":
self.game_screen.blit(knot_image, (25, 184))
if board.board[1][1] == "X":
self.game_screen.blit(cross_image, (192, 184))
elif board.board[1][1] == "O":
self.game_screen.blit(knot_image, (192, 184))
if board.board[1][2] == "X":
self.game_screen.blit(cross_image, (357, 184))
elif board.board[1][2] == "O":
self.game_screen.blit(knot_image, (357, 184))
if board.board[2][0] == "X":
self.game_screen.blit(cross_image, (25, 359))
elif board.board[2][0] == "O":
self.game_screen.blit(knot_image, (25, 359))
if board.board[2][1] == "X":
self.game_screen.blit(cross_image, (192, 359))
elif board.board[2][1] == "O":
self.game_screen.blit(knot_image, (192, 359))
if board.board[2][2] == "X":
self.game_screen.blit(cross_image, (357, 359))
elif board.board[2][2] == "O":
self.game_screen.blit(knot_image, (357, 359))
if board.checkWin("X"):
did_win = True
self.score_X += 1
text = font.render("X wins", True, (0, 0, 0))
self.game_screen.blit(text, (160, 510))
pygame.display.update()
clock.tick(1)
break
elif board.checkWin("O"):
did_win = True
self.score_O += 1
text = font.render("O wins", True, (0, 0, 0))
self.game_screen.blit(text, (160, 510))
pygame.display.update()
clock.tick(1)
break
if board.checkBoardFull():
tie = True
text = font.render("Draw", True, (0, 0, 0))
self.game_screen.blit(text, (190, 510))
pygame.display.update()
clock.tick(1)
break
pygame.display.update()
clock.tick(tick_rate)
if did_win or tie:
self.game_loop()
if __name__ == "__main__":
pygame.init()
game = Game()
game.game_loop()
pygame.quit()
quit() |
986,189 | cc513bcc3509e52440a984ab7078738692715f59 | #encrypt
#import rand in registration
def encrypt(string,j):
s=[]
for i in range(len(string)):
s.append(str(ord(string[i])-j))
s=str("".join(s))
return s
def regencrypt(string,rand):
x=[]
for i in range(len(string)):
x.append(str(ord(string[i])-rand))
x=str("".join(x))
return x
|
986,190 | 19a999c9809051c298b4057ec323d4e0f8d862fb | # -*- coding:utf-8 -*-
import unittest
from database.core import DatabaeTemplate
import database.core
from database.factory import OracleConnectionFactory
CONNECT_URL = 'epayment/Epay789*QWE@localhost:15211/tyzf'
HOST = 'localhost'
USERNAME = 'epayment'
PASSWORD = 'Epay789*QWE'
PORT = 15215
SERVICE = 'tyzf'
class DBTest(unittest.TestCase):
def setUp(self):
connect_factory = OracleConnectionFactory(host=HOST, username=USERNAME,
password=PASSWORD, port=PORT, service=SERVICE)
self.db_template = DatabaeTemplate(connect_factory=connect_factory)
def test_connect(self):
sql = 'SELECT count(*) as count from T_CITY'
count = self.db_template.query_for_int(sql)
print count
def test_query_blob(self):
sql = """
SELECT ACCOUNT_ID as accountId, PARAM_CODE as code, PARAM_VALUE as value
FROM T_PAYMENT_ACCOUNT_PARAM
WHERE PARAM_CODE='PARENT_ACCOUNT_PID' AND ORG_ID LIKE 'Ali%' AND PARAM_VALUE IS NOT NULL
"""
results = self.db_template.query_list(sql, outputtypehandler=database.core.output_type_handler,
row_factory=database.core.makedict)
if results is not None:
for res in results:
print res
else:
print "results is None"
def test_desc(self):
sql = "DESC T_PAYMENT_ORDER"
self.db_template.execute(sql)
if __name__ == '__main__':
unittest.main()
|
986,191 | 2402dfa0dc83c2277c92a3cc3c11129e36282635 | #chapter03\prime2.py
import math
m = int(input("请输入一个整数(>1):"))
k = int(math.sqrt(m))
flag = True #先假设所输整数为素数
i = 2
while (i <= k and flag == True):
if (m % i == 0): flag = False #可以整除,肯定不是素数,结束循环
else: i += 1
if (flag == True): print(m, "是素数!")
else: print(m, "是合数!")
input()
|
986,192 | 668b59fd627e7df23f3617e696a0bc787a067798 | """Exceptions used in pyparam"""
class PyParamException(Exception):
"""Base exception for pyparam"""
class PyParamTypeError(PyParamException):
"""When parameter type is not supported"""
class PyParamValueError(PyParamException):
"""When parameter value is improper"""
class PyParamNameError(PyParamException):
"""Any errors related to parameter names"""
|
986,193 | 9b25135be1688019781c69a16d63a64f6161d721 | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
from util.gen_sal_map import vid_sal_map
#from util.solver import _gaussian_distribution2d as gauss_dist2d
def KLDiv(MDN_outputs, fix_data):
sal_map = gen_sal_map(*MDN_outputs)
fix_data = fix_data.data.cpu().numpy()
fix_map = vid_sal_map(fix_data)
KLDiv_loss = nn.KLDivLoss()
fix_map = torch.from_numpy(fix_map)
# Normalize by the sum to have a prob. distribution:
if len(sal_map.size()) == 2:
sal_map /= torch.sum(sal_map.contiguous().view(-1))
fix_map /= torch.sum(fix_map.contiguous().view(-1))
sal_map = sal_map.contiguous().view(1, *sal_map.size())
fix_map = fix_map.contiguous().view(1, *fix_map.size())
else:
slmap_sum = torch.sum(sal_map.contiguous().view(sal_map.size(0),-1), dim=1)
sal_map /= sal_map.contiguous().view(*slmap_sum.size(), 1, 1).expand_as(sal_map)
fxmap_sum = torch.sum(fix_map.contiguous().view(fix_map.size(0),-1), dim=1)
fix_map /= fix_map.contiguous().view(*fxmap_sum.size(), 1, 1).expand_as(fix_map)
sal_map = Variable(sal_map.squeeze())
fix_map = Variable(fix_map.squeeze())
KLD = KLDiv_loss(torch.log(sal_map), fix_map).data[0]
return KLD
def gen_sal_map(out_pi, out_mu_x, out_mu_y, out_sigma, out_corr):
out_pi, out_mu_x, out_mu_y, out_sigma, out_corr = out_pi.data, out_mu_x.data, out_mu_y.data, out_sigma.data, out_corr.data
xGrid, yGrid = np.meshgrid(np.linspace(1, 112, 112), np.linspace(1, 112, 112))
map_locations = torch.zeros(112*112, 2).cuda()
xGrid = xGrid.reshape(112*112).astype(np.float32)
yGrid = yGrid.reshape(112*112).astype(np.float32)
map_locations[:,0] = torch.from_numpy(xGrid.copy()).cuda()
map_locations[:,1] = torch.from_numpy(yGrid.copy()).cuda()
del xGrid, yGrid
if len(out_pi.size()) == 2:
N, KMIX = out_pi.size()
else:
N = 1
KMIX = out_pi.size(0)
map_locations = map_locations.expand(N, *map_locations.size())/112
out_pi_all = out_pi.expand(112*112, *out_pi.size())
out_pi_all = out_pi_all.contiguous().view(KMIX, N, 112*112)
sal_results = torch.zeros(1, N, 112*112).cuda()
# Generate saliency map from different gaussians in a loop to avoid memory overuse:
for k in range(KMIX):
sal_results = sal_results + out_pi_all[k,:,:].contiguous().view(1, N, 112*112) *gauss_dist2d(out_mu_x[:,k].contiguous().view(N,1), out_mu_y[:,k].contiguous().view(N,1), out_sigma[:,k].contiguous().view(N,1), out_corr[:,k].contiguous().view(N,1), map_locations)
sal_results = sal_results/KMIX
sal_results = sal_results.squeeze()
return sal_results
def gauss_dist2d(out_mu_x, out_mu_y, out_sigma, out_corr, fix_data):
oneDivTwoPI = 1.0 / (2.0*math.pi)
nFrames, nFixs,_ = fix_data.size()
KMIX = out_mu_x.size(1)
# combine x and y mean values
out_mu_xy = torch.cat((out_mu_x.unsqueeze(2), out_mu_y.unsqueeze(2)),2)
# braodcast subtraction with mean and normalization to sigma
fix_data = fix_data.expand(KMIX, *fix_data.size())
out_mu_xy = out_mu_xy.expand(nFixs, *out_mu_xy.size())
out_mu_xy = out_mu_xy.contiguous().view(fix_data.size())
out_sigma = out_sigma.expand(nFixs, *out_sigma.size())
out_sigma = out_sigma.contiguous().view(fix_data.size()[:-1])
out_corr = out_corr.expand(nFixs, *out_corr.size())
out_corr = out_corr.contiguous().view(fix_data.size()[:-1])
result = (fix_data - out_mu_xy)
result = result[:,:,:,0]**2 + result[:,:,:,1]**2 - 2*out_corr*result.prod(3)
result = result * torch.reciprocal(out_sigma**2)
result = result * -0.5 * torch.reciprocal(1-out_corr**2)
result = oneDivTwoPI * torch.reciprocal(out_sigma**2) * torch.reciprocal(torch.sqrt(1-out_corr**2)) * torch.exp(result)
return result
|
986,194 | 625ab6d2dc40752b3b2456136e3d1a3ef21d32e7 | '''
Code to investigate environment dependence on the
line-of-sight displacement
Author(s): ChangHoon Hahn
'''
import numpy as np
import scipy as sp
import os.path
import cosmolopy as cosmos
# --- Local ---
from dlos import Dlos
from corr_spec.corr_corrdata import CorrCorrData
class DlosPhotoz(Dlos):
def __init__(self, cat_corr, **kwargs):
""" Child class of Dlos class that describes line-of-sight displacement
using the photometric redshift of the collided galaxy.
dLOS_photoz = Dc(z_upw) - Dc(z_photoz)
Notes
-----
* Very clunky because it has to communicate with dLOS parent class
"""
super(DlosPhotoz, self).__init__(cat_corr, **kwargs)
if self.cat_corr['catalog']['name'] != 'nseries':
raise NotImplementedError()
self.dlos = None
self.dlos_photoz = None
self.file_name = self.file()
self.dlos_file = super(DlosPhotoz, self).file()
def file(self):
""" Name of dLOS + galaxy environment file
"""
dlos_filename = super(DlosPhotoz, self).file()
photoz_str = 'DLOS_photoz_'
file_name = photoz_str.join(
dlos_filename.split('DLOS_')
)
return file_name
def build(self):
""" Calculate the line-of-sight displacement using assigned
photometric redshift
"""
self.kwargs.pop('clobber', None)
# Read in mock catalog with assigned photometric redshifts
# and calculate the line-of-sight displacement between the
# upweighted galaxy and the photometric redshift of the
# collided galaxy
photoz_cat_corr = {
'catalog': self.cat_corr['catalog'].copy(),
'correction': {'name': 'photoz'}
}
dataclass = Data('data', photoz_cat_corr)
dataclass.read()
cosmo = dataclass.cosmo()
coll = np.where(dataclass.wfc == 0)
dlos_actual = (cosmos.distance.comoving_distance(dataclass.z[coll], **cosmo) - \
cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']
dlos_photoz = (cosmos.distance.comoving_distance(dataclass.photoz[coll], **cosmo) - \
cosmos.distance.comoving_distance(dataclass.zupw[coll], **cosmo)) * cosmo['h']
# each value of d_NN corresponds to a dLOS value
# in dLOS file
print self.file_name
np.savetxt(self.file_name,
np.c_[dlos_actual, dlos_photoz],
fmt=['%10.5f', '%10.5f'],
header='Columns : dLOS, dLOS_photoz'
)
return None
def read(self, **kwargs):
""" Read both dLOS and dLOS_photoz values
"""
if not os.path.isfile(self.file_name):
self.build()
elif 'clobber' in self.kwargs.keys():
if self.kwargs['clobber']:
self.build()
# read dLOS file from parent class
super(DlosPhotoz, self).read()
self.dlos, self.dlos_photoz = np.loadtxt(
self.file_name,
skiprows=1,
unpack=True,
usecols=[0, 1]
)
return None
if __name__=="__main__":
cat_corr = {'catalog': {'name': 'nseries', 'n_mock': 1}, 'correction': {'name': 'photoz'}}
dlos_class = DlosPhotoz(cat_corr)
dlos_class.build()
|
986,195 | 14a0b3961a3de532130d8a875c931a6f8b503300 | #! /usr/bin/env python
top = '..'
def build(bld):
bld.program(
source = 'test.c',
target = 'test_recontext',
use = 'recontext',
rpath = bld.top_dir + '/build/src',
install_path = None,
)
|
986,196 | 59f98f0a9f61e0a9294b4c4da5f031bd1735fd81 | '''
Import selected columns of the GGSN CSV data to pandas.
python globul_ggsn_select_to_pandas.py /path/to/ggsn.h5 /path/to/ggsn.csv
Author: Axel.Tidemann@telenor.com
'''
import sys
import pandas as pd
from globul_to_pandas import to_hdf5
usecols = ['IMSI', 'cell_ID', 'recordType', 'recordOpeningDate', 'recordOpeningTime']
csv_kwargs = {'parse_dates': { 'timestamp': ['recordOpeningDate', 'recordOpeningTime'] },
'date_parser': lambda x: pd.to_datetime(x, coerce=True),
'converters': { col: str for col in usecols },
'index_col': 'timestamp',
'usecols': usecols,
'chunksize': 50000,
'error_bad_lines': False}
to_hdf5(sys.argv[1], sys.argv[2], csv_kwargs)
|
986,197 | e76e1f0dceee1e716fc549e33da6e4ccd177bc98 | from __future__ import division, print_function
import os
import cStringIO as StringIO
from subprocess import Popen, PIPE
from pymatgen.util.io_utils import which
from pymatgen.util.string_utils import list_strings
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"Mrgscr",
"Mrggkk",
"Mrgddb",
"Anaddb",
]
class ExecWrapper(object):
"""This class runs an executable in a subprocess."""
def __init__(self, executable=None, verbose=0):
"""
Args:
executable:
path to the executable.
verbose:
Verbosity level.
"""
if executable is None:
executable = self.name
self.executable = which(executable)
self.verbose = int(verbose)
if self.executable is None:
msg = "Cannot find executable %s is PATH\n Use export PATH=/dir_with_exec:$PATH" % executable
raise self.Error(msg)
assert os.path.basename(self.executable) == self._name
def __str__(self):
return "%s" % self.executable
@property
def name(self):
return self._name
def execute(self, cwd=None, **kwargs):
"""Execute the executable in a subprocess."""
args = [self.executable, "<", self.stdin_fname, ">", self.stdout_fname, "2>", self.stderr_fname]
self.cmd_str = " ".join(args)
p = Popen(self.cmd_str, shell=True, stdout=PIPE, stderr=PIPE, cwd=cwd)
(self.stdout_data, self.stderr_data) = p.communicate()
self.returncode = p.returncode
if self.returncode != 0:
with open(self.stdout_fname, "r") as out, open(self.stderr_fname, "r") as err:
self.stdout_data = out.read()
self.stderr_data = err.read()
if self.verbose:
print("*** stdout: ***\n", self.stdout_data)
print("*** stderr ***\n", self.stderr_data)
raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str))
class MrgscrError(Exception):
"""Error class for Mrgscr"""
class Mrgscr(ExecWrapper):
_name = "mrgscr"
Error = MrgscrError
def merge_qpoints(self, files_to_merge, out_prefix, cwd=None):
"""
Execute mrgscr in a subprocess to merge files_to_merge. Produce new file with prefix out_prefix
If cwd is not None, the child's current directory will be changed to cwd before it is executed.
"""
# We work with absolute paths.
files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]
nfiles = len(files_to_merge)
if self.verbose:
print("Will merge %d files with output_prefix %s" % (nfiles, out_prefix))
for (i, f) in enumerate(files_to_merge):
print(" [%d] %s" % (i, f))
if nfiles == 1:
raise self.Error("merge_qpoints does not support nfiles == 1")
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrgscr.stdin", "mrgscr.stdout", "mrgscr.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = StringIO.StringIO()
inp.write(str(nfiles) + "\n") # Number of files to merge.
inp.write(out_prefix + "\n") # Prefix for the final output file:
for filename in files_to_merge:
inp.write(filename + "\n") # List with the files to merge.
inp.write("1\n") # Option for merging q-points.
inp.seek(0)
self.stdin_data = [s for s in inp]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
class MrggkkError(Exception):
"""Error class for Mrggkk."""
class Mrggkk(ExecWrapper):
_name = "mrggkk"
Error = MrggkkError
def merge(self, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0, cwd=None):
"""
Merge GGK files, return the absolute path of the new database.
Args:
gswfk_file:
Ground-state WFK filename
dfpt_files:
List of 1WFK files to merge.
gkk_files:
List of GKK files to merge.
out_gkk:
Name of the output GKK file
binascii:
Integer flat. 0 --> binary output, 1 --> ascii formatted output
cwd:
Directory where the subprocess will be executed.
"""
raise NotImplementedError("This method should be tested")
out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)
# We work with absolute paths.
gswfk_file = absath(gswfk_file)
dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]
if self.verbose:
print("Will merge %d 1WF files, %d GKK file in output %s" %
(len(dfpt_nfiles), (len_gkk_files), out_gkk))
for (i, f) in enumerate(dfpt_files):
print(" [%d] 1WF %s" % (i, f))
for (i, f) in enumerate(gkk_files):
print(" [%d] GKK %s" % (i, f))
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = StringIO.StringIO()
inp.write(out_gkk + "\n") # Name of the output file
inp.write(str(binascii) + "\n") # Integer flag: 0 --> binary output, 1 --> ascii formatted output
inp.write(gswfk_file + "\n") # Name of the groud state wavefunction file WF
#dims = len(dfpt_files, gkk_files, ?)
dims = " ".join([str(d) for d in dims])
inp.write(dims + "\n") # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files
# Names of the 1WF files...
for fname in dfpt_files:
inp.write(fname + "\n")
# Names of the GKK files...
for fname in gkk_files:
inp.write(fname + "\n")
inp.seek(0)
self.stdin_data = [s for s in inp]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
return out_gkk
class MrgddbError(Exception):
"""Error class for Mrgddb."""
class Mrgddb(ExecWrapper):
_name = "mrgddb"
Error = MrgddbError
def merge(self, ddb_files, out_ddb, description, cwd=None):
"""Merge DDB file, return the absolute path of the new database."""
# We work with absolute paths.
ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
out_ddb = out_ddb if cwd is None else os.path.join(os.path.abspath(cwd), out_ddb)
if self.verbose:
print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
for (i, f) in enumerate(ddb_files):
print(" [%d] %s" % (i, f))
# Handle the case of a single file since mrgddb uses 1 to denote GS files!
if len(ddb_files) == 1:
with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
for line in inh:
out.write(line)
return out_ddb
self.stdin_fname, self.stdout_fname, self.stderr_fname = (
"mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr")
if cwd is not None:
self.stdin_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.stdout_fname, self.stderr_fname])
inp = StringIO.StringIO()
inp.write(out_ddb + "\n") # Name of the output file.
inp.write(str(description) + "\n") # Description.
inp.write(str(len(ddb_files)) + "\n") # Number of input DDBs.
# Names of the DDB files.
for fname in ddb_files:
inp.write(fname + "\n")
inp.seek(0)
self.stdin_data = [s for s in inp]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
try:
self.execute(cwd=cwd)
except self.Error:
raise
return out_ddb
class AnaddbError(Exception):
"""Error class for Anaddb."""
class Anaddb(ExecWrapper):
_name = "anaddb"
Error = AnaddbError
#def make_stdin(self):
# # Files file
# inp = StringIO.StringIO()
# inp.write(self.input_fname + "\n") # Input file.
# inp.write(self.stdout_fname + "\n") # Output file.
# inp.write(ddb_file + "\n") # DDB file
# inp.write("dummy_band2eps" + "\n")
# inp.write("dummy1" + "\n")
# inp.write("dummy2" + "\n")
# inp.write("dummy3" + "\n")
# inp.seek(0)
# self.stdin_data = [s for s in inp]
def diagonalize_1q(self, ddb_file, cwd=None):
# We work with absolute paths.
ddb_file = os.path.abspath(ddb_file)
self.stdin_fname, self.input_fname, self.stdout_fname, self.stderr_fname = (
"anaddb.stdin", "anaddb.input", "anaddb.stdout", "anaddb.stderr")
if cwd is not None:
self.stdin_fname, self.input_fname, self.stdout_fname, self.stderr_fname = \
map(os.path.join, 3 * [cwd], [self.stdin_fname, self.inp_fname, self.stdout_fname, self.stderr_fname])
# Files file
inp = StringIO.StringIO()
inp.write(self.input_fname + "\n") # Input file.
inp.write(self.stdout_fname + "\n") # Output file.
inp.write(ddb_file + "\n") # DDB file
inp.write("dummy_band2eps" + "\n")
inp.write("dummy1" + "\n")
inp.write("dummy2" + "\n")
inp.write("dummy3" + "\n")
inp.seek(0)
self.stdin_data = [s for s in inp]
with open(self.stdin_fname, "w") as fh:
fh.writelines(self.stdin_data)
# Get the q-point from the DDB file
with open(ddb_file, "r") as fh:
nfound = 0
tag = " qpt "
for line in fh:
print(line)
if line.startswith(tag):
nfound += 1
# Coordinates of the q-points.
qcoords_str = line.split()[1:4]
#qcoords_str = [ s.replace("D", "E") for s in qcoords_str]
qpoint = map(float, qcoords_str)
if nfound != 1:
raise self.Error("Found %s occurrences of tag %s in file %s" % (nfound, tag, ddb_file))
# Write simple input file for the anaddb code.
with open(self.input_fname, "w") as inp:
inp.write('# Flags\n')
inp.write(' ifcflag 1 # Interatomic force constant flag\n\n')
inp.write('# Wavevector grid number 1 (coarse grid, from DDB)\n\n')
inp.write(' brav 1 # Bravais Lattice : 1-S.C., 2-F.C., 3-B.C., 4-Hex.\n')
inp.write(' ngqpt 1 1 1 # Q-mesh\n')
inp.write(' nqshft 1 # number of q-shifts\n')
inp.write(' q1shft %f %f %f' % tuple(qpoint))
#inp.write('# Effective charges
#inp.write(' asr 1 ! Acoustic Sum Rule. 1 => imposed asymetrically
#inp.write(' chneut 1 ! Charge neutrality requirement for effective charges.
#inp.write('# Interatomic force constant info
#inp.write(' dipdip 1 ! Dipole-dipole interaction treatment
#inp.write(' ifcana 1 ! Analysis of the IFCs
#inp.write(' ifcout 20 ! Number of IFC's written in the output, per atom
#inp.write(' natifc 1 ! Number of atoms in the cell for which ifc's are analysed
#inp.write(' atifc 1 ! List of atoms
#inp.write('
#inp.write('# This line added when defaults were changed (v5.3) to keep the previous, old behaviour
#inp.write('# symdynmat 0
if self.verbose:
print("Will diagonalize DDB file : %s" % ddb_file)
try:
self.execute(cwd=cwd)
except self.Error:
raise
# Get frequencies from the output file
# TODO
#with open(self.stdout_fname, "r") as out:
#print(out.readlines())
#for line in out:
# if line: raise
#return frequencies
|
986,198 | 408f3543bda73edb679b8fea93389153da6e623a | import torch
import torch.nn as nn
from transformer1.layers import PostitionalandWordEncoding , DecoderBlock
class Decoder(nn.Module):
def __init__(self , output_vocab ,embedding_dim , num_head , num_layers):
super(Decoder , self).__init__()
self.embdding_dim = embedding_dim
self.num_head = num_head
self.output_vocab = output_vocab
self.num_layers = num_layers
self.postion_embedding = PostitionalandWordEncoding(self.embdding_dim , self.output_vocab)
self.feed_forward = nn.ModuleList(
[
DecoderBlock(self.embdding_dim, self.num_head) for _ in range(self.num_layers)
]
)
self.fc_out = nn.Linear(self.embdding_dim , self.output_vocab)
def forward(self , x , enc_out , src_mask , trg_mask):
print("in decoder")
embedding = self.postion_embedding(x)
#shape ==> (N , trg_len , embedding_size)
for layers in self.feed_forward:
out = layers(embedding , enc_out ,enc_out ,src_mask , trg_mask)
out = self.fc_out(out)
return out
if __name__ == "__main__":
x = torch.randint(0, 100, size=(64, 16))
enc_out = torch.rand(size = (64 , 12 , 512))
layer = Decoder(512 , 16 , 200 , 6)
out = layer(x , enc_out , None , None)
print(out.shape)
|
986,199 | 188b0fced5305d1ddf9ce19f78a32357a14cc570 | from parsers.MainParser import Parser
from config import *
import re
import os
def create_cache(lines):
with open(CACHE, 'a') as file:
file.write('\n'.join(lines))
def get_cache():
if os.path.exists(CACHE):
lines = open(CACHE, 'r').readlines()
return lines
else:
return []
def get_lines():
prs = Parser(URL)
try:
d = prs.get_data(SELECTOR)
except:
d = get_cache()
for index, s in enumerate(d):
d[index] = re.sub(r'(\<(/?[^>]+)>)', '', str(s))
create_cache(d)
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.