text stringlengths 38 1.54M |
|---|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedVPEventAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedVPEventAI')
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.active = False
def setActive(self, state):
self.active = state
def vpDestroyed(self):
if self.active:
return
self.active = True
self.sendUpdate('vpDestroyed', [])
taskMgr.doMethodLater(10.0, self.setActive, self.uniqueName('vpDestroyed'), extraArgs=[False])
|
# myeven(start, stop)
# 此函数用来生成从 start开始到stop结束(不包含)区间内的一系列偶数
# def myeven(start, stop):
# ....
#
# evens = list(myeven(10, 20))
# print(evens) # [10, 12, 14, 16, 18]
# for x in myeven(21, 30):
# print(x) # 22, 24, 26, 28
#
# L = [x**2 for x in myeven(3, 10)]
# print(L) # 16 36 64
def myeven(start, stop):
while start < stop:
if start % 2 == 0:
yield start
start += 1
evens = list(myeven(10, 20))
print(evens) # [10, 12, 14, 16, 18]
for x in myeven(21, 30):
print(x) # 22, 24, 26, 28
L = [x ** 2 for x in myeven(3, 10)]
print(L) # 16 36 64 |
from runners.python import Submission
class JulesSubmission(Submission):
def run(self, s):
list_size = 256
num_list = [x for x in range(list_size)]
pos = 0
skip = 0
# s = '3, 4, 1, 5'
size_list = [int(x) for x in s.split(',')]
for size in size_list:
# new_list = list_size[position:position+int(size)][::-1] +
new_list = num_list[:]
for el in range(size // 2):
# print((pos + el) % list_size, (pos + size - 1 - el) % list_size)
new_list[(pos + el) %
list_size] = num_list[(pos + size - 1 - el) % list_size]
new_list[(pos + size - 1 - el) %
list_size] = num_list[(pos + el) % list_size]
num_list = new_list[:]
pos = (pos + size + skip) % list_size
skip += 1
# print(num_list, pos)
return num_list[0] * num_list[1]
|
import os ,cv2
#face_Dir='/home/chenchaocun/Glint360k/output/'
face_Dir='/Users/chenchaocun/Downloads/1122/'
face_path = os.listdir(face_Dir)
f= open('/Users/chenchaocun/Downloads/glint360k.txt', 'w+')
for name in face_path:
for jpg in os.listdir(face_Dir+name):
print(face_Dir+name+'/'+jpg)
line_txt= name+'/'+jpg +' '+ name.split('_')[1]+'\n'
f.write(line_txt)
f.close()
|
def birthdayCakeCandles(ar):
maxVal = ar[0]
cnt = 0
for i in ar:
if i > maxVal:
maxVal = i
cnt = 1
elif i == maxVal:
cnt += 1
print cnt
a = [3, 2, 1, 3]
birthdayCakeCandles(a)
|
# -*- coding: utf8 -*-
def assert_equal5(given, expected, msg=None):
if given == expected: return
raise AssertionError("%s != %s" % (given, expected))
def assert5(given):
__debug__ and assert_equal5(given, given)
def test5():
for i in range(20):
assert5(i)
|
from google import auth
from typing import Dict
class SecretPayload:
data: bytes
class AccessSecretVersionResponse:
payload: SecretPayload
class SecretManagerServiceClient:
def __init__(
self,
credentials: auth.credentials.Credentials = ...
):...
def access_secret_version(
self, request: Dict[str, str]
) -> AccessSecretVersionResponse: ...
|
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from blog.forms import ContactForm
from django.core.urlresolvers import reverse
def index(request):
context={}
if request.method =='POST':
VistaContacto(request)
else:
#Si no es post es una carga normal y continua el flujo
return render(request, 'index.html',context)
def VistaContacto(request):
if request.method == 'POST':
form = ContactForm(request.POST)
print('si es un post')
if form.is_valid():
print('si es valido')
form.save(commit=True)
print('si se ha salvado')
return HttpResponseRedirect(reverse('index'))
else:
print(form.errors)
else:
print('no es unn post')
return render(request,'index.html')
|
#settings.py
#默认设置
HOST="127.0.0.1"
PORT=5555
ADDR=HOST,PORT
def center(root,width=300,height=150):
#设置窗口居中
screenWidth=root.winfo_screenwidth()
screenHeight=root.winfo_screenheight()
x=(screenWidth-width)/2
y=(screenHeight-height)/2
root.geometry("%dx%d+%d+%d" % (width,height,x,y)) |
from Blocks import *
import torch.nn.init as init
import torch.nn.functional as F
import pdb
import math
#from layers import *
def croppCenter(tensorToCrop,finalShape):
org_shape = tensorToCrop.shape
diff = org_shape[2] - finalShape[2]
croppBorders = int(diff/2)
return tensorToCrop[:,
:,
croppBorders:org_shape[2]-croppBorders,
croppBorders:org_shape[3]-croppBorders,
croppBorders:org_shape[4]-croppBorders]
def convBlock(nin, nout, kernel_size=3, batchNorm = False, layer=nn.Conv3d, bias=True, dropout_rate = 0.0, dilation = 1):
if batchNorm == False:
return nn.Sequential(
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
else:
return nn.Sequential(
nn.BatchNorm3d(nin),
nn.PReLU(),
nn.Dropout(p=dropout_rate),
layer(nin, nout, kernel_size=kernel_size, bias=bias, dilation=dilation)
)
def convBatch(nin, nout, kernel_size=3, stride=1, padding=1, bias=False, layer=nn.Conv2d, dilation = 1):
return nn.Sequential(
layer(nin, nout, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, dilation=dilation),
nn.BatchNorm2d(nout),
#nn.LeakyReLU(0.2)
nn.PReLU()
)
class LiviaNet(nn.Module):
def __init__(self, nClasses):
super(LiviaNet, self).__init__()
# Path-Top
#self.conv1_Top = torch.nn.Conv3d(1, 25, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True)
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(25, 25, batchNorm = True)
self.conv3_Top = convBlock(25, 25, batchNorm = True)
self.conv4_Top = convBlock(25, 50, batchNorm = True)
self.conv5_Top = convBlock(50, 50, batchNorm = True)
self.conv6_Top = convBlock(50, 50, batchNorm = True)
self.conv7_Top = convBlock(50, 75, batchNorm = True)
self.conv8_Top = convBlock(75, 75, batchNorm = True)
self.conv9_Top = convBlock(75, 75, batchNorm = True)
self.fully_1 = nn.Conv3d(150, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 100, kernel_size=1)
self.final = nn.Conv3d(100, nClasses, kernel_size=1)
def forward(self, input):
# get the 3 channels as 5D tensors
y_1 = self.conv1_Top(input[:,0:1,:,:,:])
y_2 = self.conv2_Top(y_1)
y_3 = self.conv3_Top(y_2)
y_4 = self.conv4_Top(y_3)
y_5 = self.conv5_Top(y_4)
y_6 = self.conv6_Top(y_5)
y_7 = self.conv7_Top(y_6)
y_8 = self.conv8_Top(y_7)
y_9 = self.conv9_Top(y_8)
y_3_cropped = croppCenter(y_3,y_9.shape)
y_6_cropped = croppCenter(y_6,y_9.shape)
y = self.fully_1(torch.cat((y_3_cropped, y_6_cropped, y_9), dim=1))
y = self.fully_2(y)
return self.final(y)
class LiviaSemiDenseNet(nn.Module):
def __init__(self, nClasses):
super(LiviaSemiDenseNet, self).__init__()
# Path-Top
# self.conv1_Top = torch.nn.Conv3d(1, 25, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True)
self.conv1_Top = convBlock(1, 25)
self.conv2_Top = convBlock(25, 25, batchNorm=True)
self.conv3_Top = convBlock(25, 25, batchNorm=True)
self.conv4_Top = convBlock(25, 50, batchNorm=True)
self.conv5_Top = convBlock(50, 50, batchNorm=True)
self.conv6_Top = convBlock(50, 50, batchNorm=True)
self.conv7_Top = convBlock(50, 75, batchNorm=True)
self.conv8_Top = convBlock(75, 75, batchNorm=True)
self.conv9_Top = convBlock(75, 75, batchNorm=True)
self.fully_1 = nn.Conv3d(450, 400, kernel_size=1)
self.fully_2 = nn.Conv3d(400, 100, kernel_size=1)
self.final = nn.Conv3d(100, nClasses, kernel_size=1)
def forward(self, input):
# get the 3 channels as 5D tensors
y_1 = self.conv1_Top(input[:, 0:1, :, :, :])
y_2 = self.conv2_Top(y_1)
y_3 = self.conv3_Top(y_2)
y_4 = self.conv4_Top(y_3)
y_5 = self.conv5_Top(y_4)
y_6 = self.conv6_Top(y_5)
y_7 = self.conv7_Top(y_6)
y_8 = self.conv8_Top(y_7)
y_9 = self.conv9_Top(y_8)
y_1_cropped = croppCenter(y_1, y_9.shape)
y_2_cropped = croppCenter(y_2, y_9.shape)
y_3_cropped = croppCenter(y_3, y_9.shape)
y_4_cropped = croppCenter(y_4, y_9.shape)
y_5_cropped = croppCenter(y_5, y_9.shape)
y_6_cropped = croppCenter(y_6, y_9.shape)
y_7_cropped = croppCenter(y_7, y_9.shape)
y_8_cropped = croppCenter(y_8, y_9.shape)
y = self.fully_1(torch.cat((y_1_cropped,
y_2_cropped,
y_3_cropped,
y_4_cropped,
y_5_cropped,
y_6_cropped,
y_7_cropped,
y_8_cropped,
y_9), dim=1))
y = self.fully_2(y)
return self.final(y) |
import numpy as np
import pandas as pd
import os
import cv2
import load_data
import data_util
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Lambda, Input
import keras as k
from keras.layers.core import Activation
from keras import optimizers
from sklearn.model_selection import train_test_split
INPUT_SHAPE = ( 64, 64, 3)
path_train_txt = r'/home/cloud/Desktop/oneShot1/data/DatasetA_train_20180813/train.txt'
path_label_txt = r'/home/cloud/Desktop/oneShot1/data/DatasetA_train_20180813/label_list.txt'
image_path = r"/home/cloud/Desktop/oneShot1/data/DatasetA_train_20180813/train"
batch_size = 64
learning_rate = 0.001
X,Y, mappedCick = data_preprocessing1( path_train_txt, path_label_txt)
X_train, X_test, Y_train, Y_test = train_test_split( X, Y)
def build_model( self):
model = Sequential()
model.add( Lambda( lambda x: x/127.5-1.0, input_shape = INPUT_SHAPE))
model.add( Conv2D( 64, [8,8], strides = (1,1), padding = 'valid', activation = 'elu'))
model.add( MaxPool2D( pool_size = (2,2), strides = (1,1), padding = 'valid'))
model.add( Conv2D(128, [7,7], strides= (1,1), padding = 'valid'))
model.add( MaxPool2D())
model.add( Conv2D( 128, [4, 4], strides = (1,1), activation = 'elu') )
model.add( MaxPool2D())
model.add( Dense( 4096))
model.add( Dense(1024))
model.add( Dense(231))
model.add( Activation = 'sigmoid')
model.add_sumary()
return model
def train_model( model, batch_size, image_path, X_train, Y_train):
model.compile( loss = 'mean_squared_error', optimizer = Adam( lr = learning_rate))
model.fit_generater( batch_generator( batch_size, image_path, 231, X_train, Y_train),
steps_per_epoch = len( X_train)/batch_size, epochs = 10, verbose = 2)
def main():
model = build_model()
train_model( model, batch_size, image_path,X_train, Y_train)
|
'''
1. 程式名稱:test.py
2. 程式內容:Test the Performance of Lane Detection
(1) Grayscale
(2) Use HSV to find the yellow&white mask
(2) Canny Edge Detection
(3) Hough Transform
(4) Group Left and Right Lines
(5) Fit and Draw
可修改的部份以加入了
gamma correction, CLAHE
'''
from Land_Detection.land_detection import *
import argparse
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True, help="path to input video")
args = vars(ap.parse_args())
test_video(args["video"])
# ap.add_argument("-i", "--image", required=True, help="path to input image")
# args = vars(ap.parse_args())
# test_img(args["image"])
if __name__ == '__main__':
main()
|
'''
该源程序用于生成测试用例
'''
import string
import random
import sys
from checkLE import check
# 所有大写字母
s = string.ascii_uppercase
def random_mini_exp():
# 随机选择 二元运算符或者一元运算符
if random.randint(0, 1) == 0:
return "~" + random.choice(s)
else:
# 随机二元运算符
return random.choice(s) + ["|", "&"][random.randint(0, 1)] + random.choice(s)
def random_exp(max_deep):
# 递归生成表达式 max_deep是最大深度
# 随机值成立或者达到最大深度截止
if random.randint(0, 9) < 3 or max_deep == 0:
return random_mini_exp()
# 递归生成
if random.randint(0, 1) == 0:
return "~" + random_exp(max_deep - 1)
else:
# 随机二元运算符
return "(%s)%s(%s)" % (random_exp(max_deep - 1), ["|", "&"][random.randint(0, 1)], random_exp(max_deep - 1))
if __name__ == "__main__":
count = 1000
max_deep = 6
if len(sys.argv) == 3:
count = int(sys.argv[1])
max_deep = int(sys.argv[2])
for i in range(count):
print(check(random_exp(random.randint(0, max_deep))))
|
from game import errors
from game.conf import GRAVITY
from pygame.sprite import spritecollide
class AbstractFallingState():
def __init__(self):
raise errors.AbstractClassError()
def fall(self):
self.frame_count += 1
return int(round(1 + self.frame_count * GRAVITY))
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render_to_response
# 表单
def search_form(request):
return render_to_response('search_form.html')
# 接收请求数据 GET 方法
def search(request):
request.encoding='utf-8'
if 'q' in request.GET and request.GET['q']:
message = '您搜索的内容为: ' + request.GET['q']
else:
message = '您提交了空表单。'
return HttpResponse(message)
|
from datetime import datetime
from airflow import DAG
from airflow.operators.dagrun_operator import DagRunOrder
from airflow.operators.python_operator import PythonOperator
from airflow.operators.multi_dagrun import TriggerMultiDagRunOperator
def generate_dag_run():
for i in range(100):
yield DagRunOrder(payload={'index': i})
default_args = {
'owner': 'airflow',
'start_date': datetime(2015, 6, 1),
}
dag = DAG('reindex_scheduler', schedule_interval=None, default_args=default_args)
ran_dags = TriggerMultiDagRunOperator(
task_id='gen_target_dag_run',
dag=dag,
trigger_dag_id='example_target_dag',
python_callable=generate_dag_run,
)
dag = DAG(
dag_id='example_target_dag',
schedule_interval=None,
default_args={'start_date': datetime.utcnow(), 'owner': 'airflow'},
)
def run_this_func(dag_run, **kwargs):
print("Chunk received: {}".format(dag_run.conf['index']))
chunk_handler = PythonOperator(
task_id='chunk_handler',
provide_context=True,
python_callable=run_this_func,
dag=dag
)
|
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re, urllib, urlparse, json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['fr']
self.domains = ['streamay.ws']
self.base_link = 'http://streamay.ws'
self.search_link = '/search'
def movie(self, imdb, title, localtitle, aliases, year):
return self.__search(title, localtitle, year, 'Film')
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.__search(tvshowtitle, localtvshowtitle, year, 'Série')
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
r = client.request(urlparse.urljoin(self.base_link, url))
r = client.parseDOM(r, 'a', attrs={'class': 'item', 'href': '[^\'"]*/saison-%s/episode-%s[^\'"]*' % (season, episode)}, ret='href')[0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return
hostDict = [(i.rsplit('.', 1)[0], i) for i in hostDict]
hostDict.append(['okru', 'ok.ru'])
locDict = [i[0] for i in hostDict]
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'ul', attrs={'class': '[^\'"]*lecteurs nop[^\'"]*'})
r = client.parseDOM(r, 'li')
r = [(client.parseDOM(i, 'a', ret='data-streamer'), client.parseDOM(i, 'a', ret='data-id')) for i in r]
r = [(i[0][0], i[1][0], re.search('([a-zA-Z]+)(?:_([a-zA-Z]+))?', i[0][0]), ) for i in r if i[0] and i[1]]
r = [(i[0], i[1], i[2].group(1), i[2].group(2)) for i in r if i[2]]
for streamer, id, host, info in r:
if host not in locDict:
continue
host = [x[1] for x in hostDict if x[0] == host][0]
link = urlparse.urljoin(self.base_link, '/%s/%s/%s' % (('streamerSerie' if '/series/' in url else 'streamer'), id, streamer))
sources.append({'source': host, 'quality': 'SD', 'url': link, 'language': 'FR', 'info': info if info else '', 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
url = json.loads(client.request(url)).get('code')
url = url.replace('\/', '/')
url = client.replaceHTMLCodes(url).encode('utf-8')
if url.startswith('/'): url = 'http:%s' % url
return url
except:
return
def __search(self, title, localtitle, year, content_type):
try:
t = cleantitle.get(title)
tq = cleantitle.get(localtitle)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
query = urlparse.urljoin(self.base_link, self.search_link)
post = urllib.urlencode({'k': "%s"}) % tq
r = client.request(query, post=post)
r = json.loads(r)
r = [i.get('result') for i in r if i.get('type', '').encode('utf-8') == content_type]
r = [(i.get('url'), i.get('originalTitle'), i.get('title'), i.get('anneeProduction', 0), i.get('dateStart', 0)) for i in r]
r = [(i[0], re.sub('<.+?>|</.+?>', '', i[1] if i[1] else ''), re.sub('<.+?>|</.+?>', '', i[2] if i[2] else ''), i[3] if i[3] else re.findall('(\d{4})', i[4])[0]) for i in r if i[3] or i[4]]
r = sorted(r, key=lambda i: int(i[3]), reverse=True) # with year > no year
r = [i[0] for i in r if i[3] in y and (t.lower() == cleantitle.get(i[1].lower()) or tq.lower() == cleantitle.query(i[2].lower()))][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
|
# Copyright 2017 Abhinav Agarwalla. All Rights Reserved.
# Contact: agarwallaabhinav@gmail.com, abhinavagarwalla@iitkgp.ac.in
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
__author__ = "Abhinav Agarwalla"
__copyright__ = "Copyright (C) 2017 Abhinav Agarwalla"
__version__ = "0.1"
import sys
from PyQt5 import QtWidgets
from interface.mainwindow import Ui_MainWindow
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) |
def prime(x):
if x == 1:
return False
for i in range(2,x//2 + 1):
if x%i == 0:
return False
return True
print " ".join([str(i) for i in filter(prime,range(1,100))])
|
import time, math
import torch
import numpy as np
class TicToc:
"""
TicToc class for time pieces of code.
"""
def __init__(self):
self._TIC_TIME = {}
self._TOC_TIME = {}
def tic(self, tag=None):
"""
Timer start function
:param tag: Label to save time
:return: current time
"""
if tag is None:
tag = 'default'
self._TIC_TIME[tag] = time.time()
return self._TIC_TIME[tag]
def toc(self, tag=None):
"""
Timer ending function
:param tag: Label to the saved time
:param fmt: if True, formats time in H:M:S, if False just seconds.
:return: elapsed time
"""
if tag is None:
tag = 'default'
self._TOC_TIME[tag] = time.time()
if tag in self._TIC_TIME:
d = (self._TOC_TIME[tag] - self._TIC_TIME[tag])
return d
else:
print("No tic() start time available for tag {}.".format(tag))
# Timer as python context manager
def __enter__(self):
self.tic('CONTEXT')
def __exit__(self, type, value, traceback):
self.toc('CONTEXT')
def save_checkpoint(model, optimizer, ckp_file):
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
ckp_file)
def load_checkpoint(ckp_file, model=None, optimizer=None):
chk_dict = torch.load(ckp_file)
if model is not None:
model.load_state_dict(chk_dict['model_state_dict'])
if optimizer is not None:
optimizer.load_state_dict(chk_dict['optimizer_state_dict'])
def batchify(arr, batch_size):
num_batches = math.ceil(len(arr) / batch_size)
return [arr[i*batch_size:(i+1)*batch_size] for i in range(num_batches)]
def batch_meshes(list_trimesh):
batch_vertices, batch_faces, batch_lenghts, vertices_cumsum = [], [], [], 0.0
for mesh in list_trimesh:
batch_vertices.append(mesh.vertices)
batch_faces.append(mesh.faces + vertices_cumsum)
batch_lenghts.append([mesh.vertices.shape[0], mesh.faces.shape[0]])
vertices_cumsum += mesh.vertices.shape[0]
batch_vertices, batch_faces, batch_lenghts = np.concatenate(batch_vertices, axis=0), np.concatenate(batch_faces, axis=0), np.array(batch_lenghts)
return batch_vertices, batch_faces, batch_lenghts |
from VacEnvWrapper import Environment
from VacEnvGym import VacEnvironment
import VacWindowWrapper as Window
from gym.envs.registration import register
register(
id='VacEnv-v0',
entry_point='VacEnv:VacEnvironment',
) |
from config import config
from web.bot.telegram import handle_update
from web.bot.evernote import oauth_callback, oauth_callback_full_access
urls = [
('POST', '/{}'.format(config['telegram']['token']), handle_update),
('GET', '/evernote/oauth', oauth_callback),
('GET', '/evernote/oauth/full_access', oauth_callback_full_access),
]
|
from math import pi, pow
radius = float(input("Enter radius of the circle: "))
area_of_circle = pi * pow(radius, 2)
print("Area of the circle is: %f" % area_of_circle)
'''
Output: python3 chapter1_4_circle.py
Enter radius of the circle: 4
Area of the circle is: 50.265482
'''
|
from rest_framework import serializers
from django.contrib.auth.models import User
from items.models import Item, FavoriteItem
class RigesterSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ['username', 'password', 'first_name', 'last_name',]
def create(self, validated_data):
new_user = User(**validated_data)
new_user.set_password(new_user.password)
new_user.save()
return new_user
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [ 'first_name','last_name']
class ItemListSerializer(serializers.ModelSerializer):
detail = serializers.HyperlinkedIdentityField(view_name = 'api-detail',lookup_field = 'id',lookup_url_kwarg = 'item_id')
added_by= UserSerializer()
favourited = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['image', 'name','detail', 'added_by', 'favourited']
def get_favourited (self , obj):
return obj.favoriteitem_set.count()
class FavoriteSerializer(serializers.ModelSerializer):
class Meta:
model = FavoriteItem
fields = ['user']
class ItemDetailsSerializer(serializers.ModelSerializer):
favourited_by = serializers.SerializerMethodField()
class Meta:
model = Item
fields = ['name' , 'image', 'description' , 'favourited_by']
def get_favourited_by(self, obj):
return FavoriteSerializer(obj.favoriteitem_set.all(), many=True).data
|
from flask import Flask
app = Flask(__name__)
app.config.from_object('wsgi.settings')
from wsgi import route
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DrCoding
LSTM baseline model for the MIMIC-III ICD-9 prediction task
"""
import sys
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
from utils import create_embedding_from_glove
class DischargeLSTM(nn.Module):
def __init__(self, vocab, hidden_size, dropout_rate, embed_size, device, glove_path=None):
"""
Bidrectional LSTM Multi-Label Classifier with Glove embeddings
:param vocab: the vocab object
:param hidden_size: the hidden size of the LSTM
:param dropout_rate: the dropout to apply after the LSTM
:param num_output_classes: the number of ICDs to predict
:param embed_size: the size of the word embeddings
:param glove_path: the path to the GLOVE file
"""
super(DischargeLSTM, self).__init__()
self.glove_path = glove_path
self.vocab = vocab
self.num_output_classes = len(self.vocab.icd)
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.embed_size = embed_size
self.device = device
if glove_path is not None and glove_path != "NONE":
emb_layer, num_embeddings, embedding_dim = create_embedding_from_glove(glove_path, self.vocab, device)
self.embeddings = emb_layer
self.embed_size = embedding_dim
else:
self.embed_size = embed_size
self.embeddings = nn.Embedding(len(self.vocab.discharge), embed_size, padding_idx=self.vocab.discharge.pad_token)
self.lstm = nn.LSTM(input_size=self.embed_size, hidden_size=hidden_size, bidirectional=True, bias=True)
self.linear = nn.Linear(embed_size * 2, self.num_output_classes, bias=True)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, discharge_padded, source_lengths):
"""
Forward pass of the LSTM with a linear layer
:param discharge_padded: the padded discharge summaries (bs, seq length)
:param source_lengths: the actual, original discharge summaries length (bs)
:return: logits after applying the model (bs, num_output_classes)
"""
embeddings = self.embeddings(discharge_padded)
discharge_padded_packed = nn.utils.rnn.pack_padded_sequence(embeddings, source_lengths, batch_first=True, enforce_sorted=False)
output_state, hidden_and_cell = self.lstm(discharge_padded_packed)
final_hidden, final_cell = hidden_and_cell # (2, batch_size, embed_size)
final_hidden = final_hidden.permute(1, 0, 2).contiguous() # (batch_size, 2, embed_size)
final_hidden = final_hidden.view(final_hidden.shape[0], -1) # (batch_size, 2 * embed_size)
final_hidden = self.dropout(final_hidden)
lstm_out = self.linear(final_hidden) # batch_size x num_output_classes
return lstm_out
@staticmethod
def load(model_path: str):
"""
Load the model from a file.
@param model_path (str): path to model
"""
params = torch.load(model_path, map_location=lambda storage, loc: storage)
args = params['args']
model = DischargeLSTM(vocab=params['vocab'], **args)
model.load_state_dict(params['state_dict'])
return model
def save(self, path: str):
"""
Save the model to a file.
@param path (str): path to the model
"""
print('save model parameters to [%s]' % path, file=sys.stderr)
params = {
'args': dict(hidden_size=self.hidden_size, dropout_rate=self.dropout_rate, embed_size=self.embed_size, device=self.device, glove_path=self.glove_path),
'vocab': self.vocab,
'state_dict': self.state_dict()
}
torch.save(params, path)
|
from . import views
from django.conf.urls import url
app_name = 'jobs'
urlpatterns = [
# /jobs
url(r'^$', views.JobIndexView.as_view(), name='index'),
# /jobs/<job_pk>
url(r'^(?P<job_id>[0-9]+)/$', views.JobDetailView.as_view(), name='detail'),
# /jobs/create
url(r'^create/$', views.JobCreateView.as_view(), name='job-create'),
# /jobs/<job_id>/update
url(r'^(?P<job_id>[0-9]+)/update/$', views.JobUpdateView.as_view(), name='job-update'),
# /jobs/<job_id>/delete/
url(r'^(?P<job_id>[0-9]+)/delete/$', views.JobDeleteView.as_view(), name='job-delete')
] |
from typing import Dict, Any
from src.data.common_types import AbstractRawDataProvider
from src.data.raw_data.raw_data_providers import ExtruderRawDataProvider
from src.estimator.launcher.launchers import ExperimentLauncher
from src.estimator.model.contrastive_model import ContrastiveModel
from src.estimator.model.estimator_conv_model import merge_two_dicts
from src.utils import consts
class ExtruderContrastiveExperimentLauncher(ExperimentLauncher):
@property
def name(self):
return "contrastive_extruder_exp"
class ConvParamsAwareExtruderContrastiveModel(ContrastiveModel):
@property
def raw_data_provider(self) -> AbstractRawDataProvider:
return ExtruderRawDataProvider(100)
@property
def summary(self) -> str:
return self._summary_from_dict(
{
"f": self.filters,
"ksl": self.kernel_side_lengths
})
def __init__(self, filters, kernel_side_lengths) -> None:
super().__init__()
self.filters = filters
self.kernel_side_lengths = kernel_side_lengths
@property
def additional_model_params(self) -> Dict[str, Any]:
return merge_two_dicts(super().additional_model_params,
{
consts.FILTERS: self.filters,
consts.KERNEL_SIDE_LENGTHS: self.kernel_side_lengths,
consts.TRAIN_STEPS: 5000,
consts.BATCH_SIZE: 300,
})
launcher = ExtruderContrastiveExperimentLauncher([
ConvParamsAwareExtruderContrastiveModel(filters=[32, 2], kernel_side_lengths=[3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[32, 2], kernel_side_lengths=[5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[32, 32, 2], kernel_side_lengths=[3, 3, 3]), # useless
ConvParamsAwareExtruderContrastiveModel(filters=[32, 32, 2], kernel_side_lengths=[5, 5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[64, 64, 2], kernel_side_lengths=[5, 5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[8, 16, 32, 64, 128, 320, 80],
kernel_side_lengths=[3, 3, 3, 3, 3, 3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[8, 16, 32, 64, 128, 320, 80],
kernel_side_lengths=[5, 5, 5, 5, 5, 5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[8, 16, 32, 64, 128, 320, 2],
kernel_side_lengths=[3, 3, 3, 3, 3, 3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[8, 16, 32, 64, 128, 320, 2],
kernel_side_lengths=[5, 5, 5, 5, 5, 5, 5]), # the best one
ConvParamsAwareExtruderContrastiveModel(filters=[32, 32, 32, 32, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[32, 32, 32, 32, 2], kernel_side_lengths=[5, 5, 5, 5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[64, 64, 64, 64, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[64, 64, 64, 64, 2], kernel_side_lengths=[5, 5, 5, 5, 5]),
ConvParamsAwareExtruderContrastiveModel(filters=[64, 64, 64, 64, 2], kernel_side_lengths=[7, 7, 7, 7, 7]),
ConvParamsAwareExtruderContrastiveModel(filters=[64, 128, 256, 512, 2], kernel_side_lengths=[9, 7, 5, 3, 3]),
ConvParamsAwareExtruderContrastiveModel(filters=[128, 256, 512, 1024, 2], kernel_side_lengths=[9, 7, 5, 3, 3]),
# ConvParamsAwareExtruderContrastiveModel(filters=[32, 64, 128, 256, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
# ConvParamsAwareExtruderContrastiveModel(filters=[32, 64, 128, 256, 2], kernel_side_lengths=[2, 2, 2, 2, 2]),
# ConvParamsAwareExtruderContrastiveModel(filters=[128, 256, 512, 1024, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
# ConvParamsAwareExtruderContrastiveModel(filters=[64, 128, 256, 512, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
# ConvParamsAwareExtruderContrastiveModel(filters=[512, 512, 512, 512, 2], kernel_side_lengths=[3, 3, 3, 3, 3]),
])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# /almadata02/lperez/casa_5.1.1/casa-release-5.1.1-5.el7/bin/casa
#######################################################################
# AS 205 Self-Calibration #
#######################################################################
'''
Universidad de Chile
Facultad de Ciencias Fisicas y Matematicas
Departamento de Astronomia
Nicolas Troncoso Kurtovic
mail: nicokurtovic at gmail.com
This script was written for CASA 5.1.1 to
self-calibrate the short and long baselines of AS205 datasets. This includes:
- 2 SB from archive.
- 1 SB from Large Program.
- 1 LB from Large Program.
We are naming this datasets by short-long plus a chronological number.
'''
# Execute the reduction functions
execfile('reduction_utils.py')
# Append the au functions from other directory.
sys.path.append('/umi_01/nkurtovic/HTLup/analysis_scripts/')
import analysisUtils as au
#######################################################################
# NAMES AND PATH #
#######################################################################
# Do you want to skip the plots of images and interactive clean process?
skip_plots=True
# Name of the object
prefix = 'AS205'
# Frequency of 12CO 2-1 line
rest_freq = 2.30538e11 # Hz
# Other lines that appear in SB1 and SB2
rest_freq_13CO21 = 2.20399e11 # Hz
rest_freq_C18O21 = 2.1956e11 # Hz
#
# Paths, names and lines in the data. Fill this dictionary with listobs.
# line_spws have the spws with lines that need to be flagged.
# line_freqs have the frequencies to flag. In this case is the 12CO 2-1 line
# Build the path to each dataset
path_SB1 = '/umi_01/nkurtovic/AS205/uid___A002_X3b3400_Xcd7.ms.split.cal'
path_SB2 = '/umi_01/nkurtovic/AS205/uid___A002_X3f6a86_X5da.ms.split.cal'
path_SB3 = '/umi_01/nkurtovic/AS205/as205_p484_SB.ms'
path_LB1 = '/umi_01/nkurtovic/AS205/as205_p484_LB.ms'
data_params = {'SB1': {'vis' : path_SB1,
'name': 'SB1', \
'field': 'V866 Sco', \
'line_spws': np.array([0, 2, 3]), \
'line_freqs': np.array([rest_freq, rest_freq_13CO21, rest_freq_C18O21])
},
'SB2': {'vis' : path_SB2,
'name': 'SB2', \
'field': 'V866 Sco', \
'line_spws': np.array([0, 2, 3]), \
'line_freqs': np.array([rest_freq, rest_freq_13CO21, rest_freq_C18O21])
},
'SB3': {'vis' : path_SB3,
'name': 'SB3', \
'field': 'AS_205', \
'line_spws': np.array([0, 4, 8]), \
'line_freqs': np.array([rest_freq, rest_freq, rest_freq])
},
'LB1': {'vis' : path_LB1,
'name' : 'LB1',
'field': 'AS_205',
'line_spws': np.array([3]),
'line_freqs': np.array([rest_freq]),
}}
'''
Important stuff about the datasets:
SB1: 4 spws, the CO line is in the spw0, observed in march 2012.
SB2: 4 spws, the CO line is in the spw0, observed in may 2012.
SB3: 12 spws, CO line in 0, 4 and 8. observed in may 2017.
LB1: 4 spws, CO line in spw3, observed in september 2017.
'''
#######################################################################
# LINE FLAGGING #
#######################################################################
'''
To flag the CO line, we proceed inspecting by eye the location in
channels and spw. Flag ~50 km/s around 12CO 2-1 line. Each channel in
these spectral windows has 0.635 km/s, so we need to
flag ~160 channels in total. (each channel is half that
speed to take the mean)
'''
# rest_freq_13CO21 = 2.20399e11 # Hz
# rest_freq_C18O21 = 2.1956e11 # Hz
# Visual inspection of the spws in each SB data.
if not skip_plots:
# Plot the amplitude of each channel, in groups of spw.
plotms(vis=path_SB1, \
xaxis='channel', \
yaxis='amplitude', \
avgtime='1e8', \
ydatacolumn='data', \
field=data_params['SB1']['field'], \
avgscan=True, \
avgbaseline=True, \
iteraxis='spw', \
restfreq=str(rest_freq))
plotms(vis=path_SB2, \
xaxis='velocity', \
yaxis='amplitude', \
avgtime='1e8', \
ydatacolumn='data', \
field=data_params['SB2']['field'], \
avgscan=True, \
avgbaseline=True, \
iteraxis='spw', \
restfreq=str(rest_freq_C18O21))
plotms(vis=path_SB3, \
xaxis='velocity', \
yaxis='amplitude', \
avgtime='1e8', \
ydatacolumn='data', \
field=data_params['SB3']['field'], \
avgscan=True, \
avgbaseline=True, \
iteraxis='spw', \
restfreq=str(rest_freq))
plotms(vis=path_LB1, \
xaxis='frequency', \
yaxis='amplitude', \
avgtime='1e8', \
ydatacolumn='data', \
field=data_params['LB1']['field'], \
avgscan=True, \
avgbaseline=True, \
iteraxis='spw', \
restfreq=str(rest_freq))
'''
Position of the line depends on the SB dataset that is observed.
In SB1, the central position is aproximately in -31km/s, but
the two disks has different radial velocity, so the mid
velocity observed should be something like 36km/s.
In SB2, the central position is aproximately in -25km/s.
In SB3, the central position is aproximately in -11km/s.
This issue is only observable in the plotms, cause the get_flagchannels needs
the real velocity (~4km/s) to flag the correct channels. We continue by
flagging the known lines.
'''
# Find the channels that will be flagged in the datasets.
# Delete previous runs.
os.system('rm -rf '+path_SB1+'.flagversions*')
os.system('rm -rf '+path_SB2+'.flagversions*')
os.system('rm -rf '+path_SB3+'.flagversions*')
os.system('rm -rf '+path_LB1+'.flagversions*')
# Find
SB1_flagchannels = get_flagchannels(data_params['SB1'], \
prefix, \
velocity_range=np.array([-21., 29.]))
SB2_flagchannels = get_flagchannels(data_params['SB2'], \
prefix, \
velocity_range=np.array([-21., 29.]))
SB3_flagchannels = get_flagchannels(data_params['SB3'], \
prefix, \
velocity_range=np.array([-21., 29.]))
LB1_flagchannels = get_flagchannels(data_params['LB1'], \
prefix, \
velocity_range=np.array([-21., 29.]))
# Flagchannels input string for SB1: '0:2232~3492, 2:651~1855, 3:148~1348'
# Flagchannels input string for SB2: '0:1868~3128, 2:999~2204, 3:495~1695'
# Flagchannels input string for SB3: '0:1841~1998, 4:1841~1998, 8:1841~1998'
# Flagchannels input string for LB1: '3:1841~1998'
# Average continuum. This will generate the file 'prefix+_*B_initcont.ms'
# Delete previous run.
os.system('rm -rf '+prefix+'_SB1_initcont.*')
os.system('rm -rf '+prefix+'_SB2_initcont.*')
os.system('rm -rf '+prefix+'_SB3_initcont.*')
os.system('rm -rf '+prefix+'_LB1_initcont.*')
# Average.
avg_cont(data_params['SB1'], prefix, flagchannels=SB1_flagchannels)
avg_cont(data_params['SB2'], prefix, flagchannels=SB2_flagchannels)
avg_cont(data_params['SB3'], prefix, flagchannels=SB3_flagchannels)
avg_cont(data_params['LB1'], prefix, flagchannels=LB1_flagchannels)
#######################################################################
# DEFINE MASK AND FIRST IMAGE #
#######################################################################
# Multiscales in pixels for LB dataset.
scales_LB = [0, 11, 33, 55] #0x, 1x, 3x, 5x
# Image the SB ms file interactively. Inspection to define the mask
if not skip_plots:
tclean_wrapper(vis=prefix+'_SB1_initcont.ms', \
imagename=prefix+'_SB1_initcont', \
scales=[0], imsize=900, interactive=True)
tclean_wrapper(vis=prefix+'_SB2_initcont.ms', \
imagename=prefix+'_SB2_initcont', \
scales=[0], imsize=900, interactive=True)
tclean_wrapper(vis=prefix+'_SB3_initcont.ms', \
imagename=prefix+'_SB3_initcont', \
scales=[0], imsize=900, interactive=True)
tclean_wrapper(vis=prefix+'_LB1_initcont.ms', \
imagename=prefix+'_LB1_initcont', \
scales=scales_LB, imsize=3000, interactive=True)
# Use this quick imaging to find centroids and positions. With fit_gaussian
# you can find the optimal positions to build the masks.
#########################
# LONG BASELINE MASKS
# AND FIRST IMAGE
#########################
'''
In the LB dataset we can observe two disks at a distance of 1.2arcsec
between their centers. We will mask them sepparately, and calculate the
rms with a ring around them.
The beam is elliptical, so we are going to use uvtappering to circularize
it, and then obtain a more reliable centroid, PA and inc estimations
through the gaussian fit.
'''
# Define the center of the main disk mask in LB data. First we used
# visual inspection to define centers, and then the gaussian centroid.
center_ra_LB_main = '16h11m31.351317s'
center_dec_LB_main = '-18d38m26.24671s'
semimajor_LB_main = '0.50arcsec'
semiminor_LB_main = '0.60arcsec'
center_LB_main = center_ra_LB_main + ' ' + center_dec_LB_main
# Define the center of the secondary disk mask in LB data
center_ra_LB_second = '16h11m31.294763s'
center_dec_LB_second = '-18d38m27.27550s'
semimajor_LB_second = '0.26arcsec'
semiminor_LB_second = '0.48arcsec'
PA_LB_second = 18.5
center_LB_second = center_ra_LB_second + ' ' + center_dec_LB_second
# Write position angle in string and in radians
str_PA_LB_second = str(PA_LB_second * (np.pi / 180))+'rad'
# First object Mask
mask_LB_main = 'ellipse[[%s, %s], [%s, %s], 0rad]' % (center_ra_LB_main, \
center_dec_LB_main, \
semimajor_LB_main, \
semiminor_LB_main)
# Second object Mask
mask_LB_second = 'ellipse[[%s, %s], [%s, %s], %s]' % (center_ra_LB_second, \
center_dec_LB_second, \
semimajor_LB_second, \
semiminor_LB_second, \
str_PA_LB_second)
# Create a residual mask
res_mask_LB = 'annulus[[%s, %s], [%s, %s]]' % (center_ra_LB_main, \
center_dec_LB_main, \
'1.5arcsec', '2.5arcsec')
mask_LB = [mask_LB_main, mask_LB_second]
uvtaper_LB = ['0mas', '40mas', '90deg']
#for i in ['.image', '.residual', '.pb', '.sumwt', '.psf', '.model']:
# os.system('rm -rf '+prefix+'_LB1_initcont'+i)
# Image non interactively
tclean_wrapper(vis=prefix+'_LB1_initcont.ms', \
imagename=prefix+'_LB1_initcont', \
scales=scales_LB, \
mask=mask_LB, \
savemodel='modelcolumn', \
threshold='0.1mJy', \
uvtaper=uvtaper_LB, \
interactive=False)
estimate_SNR(prefix+'_LB1_initcont.image', disk_mask = mask_LB_main, noise_mask = res_mask_LB)
# WITHOUT UVTAPER
#AS205_LB1_initcont.image
#Beam 0.032 arcsec x 0.020 arcsec (-85.30 deg)
#Flux inside disk mask: 360.29 mJy
#Peak intensity of source: 3.36 mJy/beam
#rms: 3.78e-02 mJy/beam
#Peak SNR: 89.00
# WITH UVTAPER
#AS205_LB1_initcont.image
#Beam 0.039 arcsec x 0.037 arcsec (32.78 deg)
#Flux inside disk mask: 359.48 mJy
#Peak intensity of source: 6.59 mJy/beam
#rms: 7.16e-02 mJy/beam
#Peak SNR: 92.08
#########################
# SHORT BASELINE MASKS
# AND FIRST IMAGES
#########################
# For short baselines we can use the same masks that were
# defined for LB. We are going to increase a little the size
# of the masks, but will remain in the same position.
semimajor_SB_main = '0.55arcsec'
semiminor_SB_main = '0.82arcsec'
# Define the center of the secondary disk mask in LB data
semimajor_SB_second = '0.3arcsec'
semiminor_SB_second = '0.52arcsec'
# First object Mask
mask_SB_main = 'ellipse[[%s, %s], [%s, %s], 0rad]' % (center_ra_LB_main, \
center_dec_LB_main, \
semimajor_SB_main, \
semiminor_SB_main)
# Second object Mask
mask_SB_second = 'ellipse[[%s, %s], [%s, %s], %s]' % (center_ra_LB_second, \
center_dec_LB_second, \
semimajor_SB_second, \
semiminor_SB_second, \
str_PA_LB_second)
# Create a residual mask
res_mask_SB = 'annulus[[%s, %s], [%s, %s]]' % (center_ra_LB_main, \
center_dec_LB_main, \
'3.0arcsec', '5.0arcsec')
mask_SB = [mask_SB_main, mask_SB_second]
# Image non interactively
tclean_wrapper(vis=prefix+'_SB1_initcont.ms',
imagename=prefix+'_SB1_initcont', \
scales=[0], mask=mask_SB, savemodel='modelcolumn', \
threshold='1.0mJy', interactive=False)
tclean_wrapper(vis=prefix+'_SB2_initcont.ms',
imagename=prefix+'_SB2_initcont', \
scales=[0], mask=mask_SB, savemodel='modelcolumn', \
threshold='1.0mJy', interactive=False)
tclean_wrapper(vis=prefix+'_SB3_initcont.ms',
imagename=prefix+'_SB3_initcont', \
scales=[0], mask=mask_SB, savemodel='modelcolumn', \
threshold='0.2mJy', interactive=False)
estimate_SNR(prefix+'_SB1_initcont.image', disk_mask = mask_LB_main, noise_mask = res_mask_LB)
estimate_SNR(prefix+'_SB2_initcont.image', disk_mask = mask_LB_main, noise_mask = res_mask_LB)
estimate_SNR(prefix+'_SB3_initcont.image', disk_mask = mask_LB_main, noise_mask = res_mask_LB)
#AS205_SB1_initcont.image
#Beam 0.781 arcsec x 0.512 arcsec (-83.41 deg)
#Flux inside disk mask: 255.79 mJy
#Peak intensity of source: 283.43 mJy/beam
#rms: 4.30e+00 mJy/beam
#Peak SNR: 65.92
#AS205_SB2_initcont.image
#Beam 0.585 arcsec x 0.528 arcsec (88.99 deg)
#Flux inside disk mask: 267.69 mJy
#Peak intensity of source: 245.11 mJy/beam
#rms: 1.39e+00 mJy/beam
#Peak SNR: 176.16
#AS205_SB3_initcont.image
#Beam 0.265 arcsec x 0.225 arcsec (87.38 deg)
#Flux inside disk mask: 357.22 mJy
#Peak intensity of source: 101.62 mJy/beam
#rms: 3.46e-01 mJy/beam
#Peak SNR: 293.80
#######################################################################
# FIND AND ALIGN CENTROIDS #
#######################################################################
'''
We calculate the centroids of each execution. For SB1, SB2 and LB1 there
is only one execution, but SB3 has 3. In case of desalignement, we
will center them to the LB position, because of the high resolution
and signal to noise.
The centroid of the main disk will be used to align.
Remember that in fit_gaussian the mask must be a string, not a mask file.
'''
#########################
# SHORT BASELINE
# GAUSSIAN FIT
#########################
# Image each execution in SB3 dataset.
image_each_obs(data_params['SB3'], prefix, \
scales=[0], mask=mask_SB, threshold='0.1mJy')
split_all_obs(prefix+'_SB3_initcont.ms', prefix+'_SB3_initcont_exec')
# Find centroids
fit_gaussian(prefix+'_SB1_initcont.image', region=mask_SB_main)
# 16h11m31.354744s -18d38m26.09769s
# Peak of Gaussian component identified with imfit: J2000 16h11m31.354744s -18d38m26.09769s
# PA of Gaussian component: 111.93 deg
# inclination of Gaussian component: 19.18 deg
# Pixel coordinates of peak: x = 465.282 y = 410.810
fit_gaussian(prefix+'_SB2_initcont.image', region=mask_SB_main)
# 16h11m31.356286s -18d38m26.13514s
# Peak of Gaussian component identified with imfit: J2000 16h11m31.356286s -18d38m26.13514s
# PA of Gaussian component: 121.49 deg
# inclination of Gaussian component: 23.26 deg
# Pixel coordinates of peak: x = 464.552 y = 409.562
fit_gaussian(prefix+'_SB3_initcont.image', region=mask_SB_main)
# 16h11m31.351009s -18d38m26.25311s
#Peak of Gaussian component identified with imfit: ICRS 16h11m31.351009s -18d38m26.25311s
#16:11:31.351009 -18:38:26.25311
#Separation: radian = 8.23157e-08, degrees = 0.000005, arcsec = 0.016979
#Peak in J2000 coordinates: 16:11:31.35172, -018:38:26.239466
#PA of Gaussian component: 108.65 deg
#Inclination of Gaussian component: 19.42 deg
#Pixel coordinates of peak: x = 464.318 y = 410.841
fit_gaussian(prefix+'_SB3_initcont_exec0.image', region=mask_SB_main)
# 16h11m11.351910s -18d38m26.26593s
# Peak of Gaussian component identified with imfit: ICRS 16h11m31.351910s -18d38m26.26593s
# 16:11:31.351910 -18:38:26.26593
# Separation: radian = 8.22747e-08, degrees = 0.000005, arcsec = 0.016970
# Peak in J2000 coordinates: 16:11:31.35262, -018:38:26.252286
# PA of Gaussian component: 121.01 deg
# Inclination of Gaussian component: 18.22 deg
# Pixel coordinates of peak: x = 463.891 y = 410.413
fit_gaussian(prefix+'_SB3_initcont_exec1.image', region=mask_SB_main)
#16h11m31.352010s -18d38m26.22882s
#Peak of Gaussian component identified with imfit: ICRS 16h11m31.352010s -18d38m26.22882s
#16:11:31.352010 -18:38:26.22882
#Separation: radian = 8.22747e-08, degrees = 0.000005, arcsec = 0.016970
#Peak in J2000 coordinates: 16:11:31.35272, -018:38:26.215176
#PA of Gaussian component: 100.17 deg
#Inclination of Gaussian component: 17.24 deg
#Pixel coordinates of peak: x = 463.843 y = 411.650
fit_gaussian(prefix+'_SB3_initcont_exec2.image', region=mask_SB_main)
#16h11m31.349837s -18d38m26.25763s
#Peak of Gaussian component identified with imfit: ICRS 16h11m31.349837s -18d38m26.25763s
#16:11:31.349837 -18:38:26.25763
#Separation: radian = 8.23978e-08, degrees = 0.000005, arcsec = 0.016996
#Peak in J2000 coordinates: 16:11:31.35055, -018:38:26.243986
#PA of Gaussian component: 105.04 deg
#Inclination of Gaussian component: 21.32 deg
#Pixel coordinates of peak: x = 464.873 y = 410.690
fit_gaussian(prefix+'_LB1_initcont.image', region=mask_LB_main)
#16h11m31.351310s -18d38m26.24660s
#Peak of Gaussian component identified with imfit: ICRS 16h11m31.351310s -18d38m26.24660s
#16:11:31.351310 -18:38:26.24660
#Separation: radian = 8.22747e-08, degrees = 0.000005, arcsec = 0.016970
#Peak in J2000 coordinates: 16:11:31.35202, -018:38:26.232956
#PA of Gaussian component: 110.75 deg
#Inclination of Gaussian component: 23.78 deg
#Pixel coordinates of peak: x = 1639.604 y = 1114.500
# Secondary disk
fit_gaussian(prefix+'_LB1_initcont.image', region=mask_LB_second)
#16h11m31.294774s -18d38m27.27574s
#Peak of Gaussian component identified with imfit: ICRS 16h11m31.294774s -18d38m27.27574s
#16:11:31.294774 -18:38:27.27574
#Separation: radian = 8.2111e-08, degrees = 0.000005, arcsec = 0.016937
#Peak in J2000 coordinates: 16:11:31.29548, -018:38:27.262096
#PA of Gaussian component: 108.42 deg
#Inclination of Gaussian component: 64.79 deg
#Pixel coordinates of peak: x = 1907.452 y = 771.455
'''
Peaks in J2000
SB1 16h 11m 31.35473, -18d 38m 26.09852s
SB2 16h 11m 31.35629, -18d 38m 26.13575s
SB3 16h 11m 31.35172, -18d 38m 26.239466
SB3_0 16h 11m 31.35262, -18d 38m 26.252276
SB3_1 16h 11m 31.35272, -18d 38m 26.215186
SB3_2 16h 11m 31.35055, -18d 38m 26.243996
LB1 16h 11m 31.35202, -18d 38m 26.232956
There are differences up to 240mas, which is much greater than LB beam.
'''
common_dir = 'J2000 16h11m31.35202s -18d38m26.232956s'
# Create the shifted ms file of SB datasets
shiftname_SB1 = prefix+'_SB1_initcont_shift'
shiftname_SB2 = prefix+'_SB2_initcont_shift'
shiftname_SB30 = prefix+'_SB3_initcont_exec0_shift'
shiftname_SB31 = prefix+'_SB3_initcont_exec1_shift'
shiftname_SB32 = prefix+'_SB3_initcont_exec2_shift'
shiftname_LB1 = prefix+'_LB1_initcont_shift'
os.system('rm -rf %s.*' % shiftname_SB1)
os.system('rm -rf %s.*' % shiftname_SB2)
os.system('rm -rf %s.*' % shiftname_SB30)
os.system('rm -rf %s.*' % shiftname_SB31)
os.system('rm -rf %s.*' % shiftname_SB31)
os.system('rm -rf %s.*' % shiftname_LB1)
# SB1
fixvis(vis=prefix+'_SB1_initcont.ms', outputvis=shiftname_SB1+'.ms', \
field=data_params['SB1']['field'], \
phasecenter='J2000 16h11m31.35473s -18d38m26.09852s')
fixplanets(vis=shiftname_SB1+'.ms', \
field=data_params['SB1']['field'], direction=common_dir)
# SB2
fixvis(vis=prefix+'_SB2_initcont.ms', outputvis=shiftname_SB2+'.ms', \
field=data_params['SB2']['field'], \
phasecenter='J2000 16h11m31.35629s -18d38m26.13575s')
fixplanets(vis=shiftname_SB2+'.ms', \
field=data_params['SB2']['field'], direction=common_dir)
# SB3 exec0
fixvis(vis=prefix+'_SB3_initcont_exec0.ms', outputvis=shiftname_SB30+'.ms', \
field=data_params['SB3']['field'], \
phasecenter='J2000 16h11m31.35262s -18d38m26.252276s')
fixplanets(vis=shiftname_SB30+'.ms', \
field=data_params['SB3']['field'], direction=common_dir)
# SB3 exec1
fixvis(vis=prefix+'_SB3_initcont_exec1.ms', outputvis=shiftname_SB31+'.ms', \
field=data_params['SB3']['field'], \
phasecenter='J2000 16h11m31.35272s -18d38m26.215186s')
fixplanets(vis=shiftname_SB31+'.ms', \
field=data_params['SB3']['field'], direction=common_dir)
# SB3 exec2
fixvis(vis=prefix+'_SB3_initcont_exec2.ms', outputvis=shiftname_SB32+'.ms', \
field=data_params['SB3']['field'], \
phasecenter='J2000 16h11m31.35055s -18d38m26.243996s')
fixplanets(vis=shiftname_SB32+'.ms', \
field=data_params['SB3']['field'], direction=common_dir)
# LB1
fixvis(vis=prefix+'_LB1_initcont.ms', outputvis=shiftname_LB1+'.ms', \
field=data_params['LB1']['field'], \
phasecenter=common_dir)
# New Images to check centroid
tclean_wrapper(vis=shiftname_SB1+'.ms', imagename=shiftname_SB1, \
mask=mask_SB, scales=[0], threshold='1.0mJy')
tclean_wrapper(vis=shiftname_SB2+'.ms', imagename=shiftname_SB2, \
mask=mask_SB, scales=[0], threshold='1.0mJy')
tclean_wrapper(vis=shiftname_SB30+'.ms', imagename=shiftname_SB30, \
mask=mask_SB, scales=[0], threshold='0.2mJy')
tclean_wrapper(vis=shiftname_SB31+'.ms', imagename=shiftname_SB31, \
mask=mask_SB, scales=[0], threshold='0.2mJy')
tclean_wrapper(vis=shiftname_SB32+'.ms', imagename=shiftname_SB32, \
mask=mask_SB, scales=[0], threshold='0.2mJy')
tclean_wrapper(vis=shiftname_LB1+'.ms', imagename=shiftname_LB1, \
mask=mask_LB, scales=scales_LB, threshold='0.1mJy', \
uvtaper=uvtaper_LB)
# Results
fit_gaussian(shiftname_SB1+'.image', region=mask_SB_main)
# 16h11m31.352050s -18d38m26.23157s
fit_gaussian(shiftname_SB2+'.image', region=mask_SB_main)
# 16h11m31.352055s -18d38m26.23183s
fit_gaussian(shiftname_SB30+'.image', region=mask_SB_main)
# 16h11m31.352025s -18d38m26.23260s
fit_gaussian(shiftname_SB31+'.image', region=mask_SB_main)
# 16h11m31.352011s -18d38m26.23303s
fit_gaussian(shiftname_SB32+'.image', region=mask_SB_main)
# 16h11m31.352028s -18d38m26.23280s
fit_gaussian(shiftname_LB1+'.image', region=mask_LB_main)
# 16h11m31.352018s -18d38m26.23307s
# The difference is ~1mas. We will proceed.
#######################################################################
# FLUX CORRECTION #
#######################################################################
# Redefine noise masks
res_mask_SB = 'annulus[[%s, %s], [%s, %s]]' % (center_ra_LB_main, \
center_dec_LB_main, \
'8.0arcsec', '11.0arcsec')
# We are going to calculate the rms in the images of the shifted
# SB3 images, and the one with less rms will be used as calibrator.
rms_exec0 = imstat(imagename=shiftname_SB30+'.image', \
region=res_mask_SB)['rms'][0]*10**3
rms_exec1 = imstat(imagename=shiftname_SB31+'.image', \
region=res_mask_SB)['rms'][0]*10**3
rms_exec2 = imstat(imagename=shiftname_SB32+'.image', \
region=res_mask_SB)['rms'][0]*10**3
print (rms_exec0, rms_exec1, rms_exec2)
# rms mJy of each execution
# (0.2247990773012572, 0.34920840346455695, 0.30993271493078195)
# The rough values from gaussian fitting of LB main disk will be
# used for deprojection.
PA = 111.
incl = 24.
datasets = [shiftname_SB1+'.ms', \
shiftname_SB2+'.ms', \
shiftname_SB30+'.ms', \
shiftname_SB31+'.ms', \
shiftname_SB32+'.ms', \
shiftname_LB1+'.ms']
# We'll check the flux of the calibrators in the SB3 data, and then
# calibrate the other observations with this measurements.
# SB3 execution 0 had J1517-2422 as a calibrator, with flux 1.948Jy.
au.getALMAFlux('J1517-2422', frequency='230.538GHz', date='2017/05/14')
#Closest Band 3 measurement: 2.420 +- 0.060 (age=+0 days) 91.5 GHz
#Closest Band 7 measurement: 1.840 +- 0.090 (age=-1 days) 343.5 GHz
#getALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age -1 days from 2017/05/14, with age separation of 0 days
# 2017/05/15: freqs=[103.49, 91.46, 343.48], fluxes=[2.55, 2.49, 1.84]
#Median Monte-Carlo result for 230.538000 = 2.045217 +- 0.159999 (scaled MAD = 0.157413)
#Result using spectral index of -0.234794 for 230.538 GHz from 2.420 Jy at 91.460 GHz = 1.947794 +- 0.159999 Jy
# 5% difference
# SB3 execution 1 had J1733-1304 as a calibrator, with flux 1.622Jy.
au.getALMAFlux('J1733-1304', frequency='230.537GHz', date='2017/05/17')
#Closest Band 3 measurement: 3.020 +- 0.060 (age=+0 days) 103.5 GHz
#Closest Band 7 measurement: 1.190 +- 0.060 (age=+0 days) 343.5 GHz
#getALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age 0 days from 2017/05/17, with age separation of 0 days
# 2017/05/17: freqs=[103.49, 343.48], fluxes=[3.02, 1.19]
#Median Monte-Carlo result for 230.537000 = 1.621264 +- 0.130647 (scaled MAD = 0.129509)
#Result using spectral index of -0.776310 for 230.537 GHz from 3.020 Jy at 103.490 GHz = 1.621711 +- 0.130647 Jy
# 1% difference
# SB3 execution 2 had J1517-2422 as a calibrator, with flux 2.113Jy.
au.getALMAFlux('J1517-2422', frequency='230.537GHz', date='2017/05/19')
#Closest Band 3 measurement: 2.550 +- 0.060 (age=+4 days) 103.5 GHz
#Closest Band 3 measurement: 2.490 +- 0.050 (age=+4 days) 91.5 GHz
#Closest Band 7 measurement: 1.750 +- 0.060 (age=+2 days) 343.5 GHz
#getALMAFluxCSV(): Fitting for spectral index with 1 measurement pair of age 4 days from 2017/05/19, with age separation of 0 days
# 2017/05/15: freqs=[103.49, 91.46, 343.48], fluxes=[2.55, 2.49, 1.84]
#Median Monte-Carlo result for 230.537000 = 2.048019 +- 0.160344 (scaled MAD = 0.161804)
#Result using spectral index of -0.234794 for 230.537 GHz from 2.520 Jy at 97.475 GHz = 2.058844 +- 0.160344 Jy
# 10% difference
# SB3 execution 0 has little difference between the calibrator
# and the getALMAFlux, and also the less rms between SB3 executions,
# so we are calibrating every dataset with this observation.
if not skip_plots:
for msfile in datasets:
export_MS(msfile)
#Measurement set exported to AS205_SB1_initcont_shift.vis.npz
#Measurement set exported to AS205_SB2_initcont_shift.vis.npz
#Measurement set exported to AS205_SB3_initcont_exec0_shift.vis.npz
#Measurement set exported to AS205_SB3_initcont_exec1_shift.vis.npz
#Measurement set exported to AS205_SB3_initcont_exec2_shift.vis.npz
#Measurement set exported to AS205_LB1_initcont_shift.vis.npz
# PLot fluxes comparing datasets with SB3exec0
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB1+'.vis.npz'], PA=PA, incl=incl)
# There is a difference between SB1 and SB30
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB2+'.vis.npz'], PA=PA, incl=incl)
# There is a difference between SB2 and SB30
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB31+'.vis.npz'], PA=PA, incl=incl)
# There is a difference between SB31 and SB30
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB32+'.vis.npz'], PA=PA, incl=incl)
# There is a difference in the short lambda part of this datasets
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_LB1+'.vis.npz'], PA=PA, incl=incl)
# Almost match
# We calculate the difference between all executions.
estimate_flux_scale(reference=shiftname_SB30+'.vis.npz', \
comparison=shiftname_SB1+'.vis.npz', \
incl=incl, PA=PA, uvbins=100+10*np.arange(100))
estimate_flux_scale(reference=shiftname_SB30+'.vis.npz', \
comparison=shiftname_SB2+'.vis.npz', \
incl=incl, PA=PA, uvbins=100+10*np.arange(100))
estimate_flux_scale(reference=shiftname_SB30+'.vis.npz', \
comparison=shiftname_SB31+'.vis.npz', \
incl=incl, PA=PA, uvbins=100+10*np.arange(100))
estimate_flux_scale(reference=shiftname_SB30+'.vis.npz', \
comparison=shiftname_SB32+'.vis.npz', \
incl=incl, PA=PA, uvbins=100+10*np.arange(100))
estimate_flux_scale(reference=shiftname_SB30+'.vis.npz', \
comparison=shiftname_LB1+'.vis.npz', \
incl=incl, PA=PA, uvbins=100+10*np.arange(100))
#The ratio of the fluxes of AS205_SB1_initcont_shift.vis.npz to
# AS205_SB3_initcont_exec0_shift.vis.npz is 1.16550
#The scaling factor for gencal is 1.080 for your comparison measurement
#The error on the weighted mean ratio is 5.012e-04, although it's
# likely that the weights in the measurement sets are too off by
# some constant factor
#The ratio of the fluxes of AS205_SB2_initcont_shift.vis.npz to
# AS205_SB3_initcont_exec0_shift.vis.npz is 1.06620
#The scaling factor for gencal is 1.033 for your comparison measurement
#The error on the weighted mean ratio is 5.244e-04, although it's
# likely that the weights in the measurement sets are too off by
# some constant factor
#The ratio of the fluxes of AS205_SB3_initcont_exec1_shift.vis.npz to
# AS205_SB3_initcont_exec0_shift.vis.npz is 1.09033
#The scaling factor for gencal is 1.044 for your comparison measurement
#The error on the weighted mean ratio is 2.719e-04, although it's
# likely that the weights in the measurement sets are too off by
# some constant factor
#The ratio of the fluxes of AS205_SB3_initcont_exec2_shift.vis.npz to
# AS205_SB3_initcont_exec0_shift.vis.npz is 1.08228
#The scaling factor for gencal is 1.040 for your comparison measurement
#The error on the weighted mean ratio is 2.368e-04, although it's
# likely that the weights in the measurement sets are too off by
# some constant factor
#The ratio of the fluxes of AS205_LB1_initcont_shift.vis.npz to
# AS205_SB3_initcont_exec0_shift.vis.npz is 1.01869
#The scaling factor for gencal is 1.009 for your comparison measurement
#The error on the weighted mean ratio is 3.464e-04, although it's
# likely that the weights in the measurement sets are too off by
# some constant factor
# Check the corrected differences
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB1+'.vis.npz'], \
PA=PA, incl=incl, fluxscale=[1., 1./1.16550])
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB2+'.vis.npz'], \
PA=PA, incl=incl, fluxscale=[1., 1./1.06620])
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB31+'.vis.npz'], \
PA=PA, incl=incl, fluxscale=[1., 1./1.09033])
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_SB32+'.vis.npz'], \
PA=PA, incl=incl, fluxscale=[1., 1./1.08228])
plot_deprojected([shiftname_SB30+'.vis.npz', \
shiftname_LB1+'.vis.npz'], \
PA=PA, incl=incl, fluxscale=[1., 1./1.01869])
# Correcting discrepants datasets.
os.system('rm -rf *rescaled.*')
rescale_flux(shiftname_SB1+'.ms', [1.080])
rescale_flux(shiftname_SB2+'.ms', [1.033])
rescale_flux(shiftname_SB31+'.ms', [1.044])
rescale_flux(shiftname_SB32+'.ms', [1.040])
rescale_flux(shiftname_LB1+'.ms', [1.009])
#Splitting out rescaled values into new MS: AS205_SB1_initcont_shift_rescaled.ms
#Splitting out rescaled values into new MS: AS205_SB2_initcont_shift_rescaled.ms
#Splitting out rescaled values into new MS: AS205_SB3_initcont_exec1_shift_rescaled.ms
#Splitting out rescaled values into new MS: AS205_SB3_initcont_exec2_shift_rescaled.ms
#Splitting out rescaled values into new MS: AS205_LB1_initcont_shift_rescaled.ms
export_MS(shiftname_SB1+'_rescaled.ms')
export_MS(shiftname_SB2+'_rescaled.ms')
export_MS(shiftname_SB31+'_rescaled.ms')
export_MS(shiftname_SB32+'_rescaled.ms')
export_MS(shiftname_LB1+'_rescaled.ms')
#Measurement set exported to AS205_SB1_initcont_shift_rescaled.vis.npz
#Measurement set exported to AS205_SB2_initcont_shift_rescaled.vis.npz
#Measurement set exported to AS205_SB3_initcont_exec1_shift_rescaled.vis.npz
#Measurement set exported to AS205_SB3_initcont_exec2_shift_rescaled.vis.npz
#Measurement set exported to AS205_LB1_initcont_shift_rescaled.vis.npz
shift_scaled_SB1 = 'AS205_SB1_initcont_shift_rescaled'
shift_scaled_SB2 = 'AS205_SB2_initcont_shift_rescaled'
shift_scaled_SB30 = shiftname_SB30
shift_scaled_SB31 = 'AS205_SB3_initcont_exec1_shift_rescaled'
shift_scaled_SB32 = 'AS205_SB3_initcont_exec2_shift_rescaled'
shift_scaled_LB1 = 'AS205_LB1_initcont_shift_rescaled'
if skip_plots:
# Re-plot to see if it worked
plot_deprojected([shift_scaled_SB30+'.vis.npz', \
shift_scaled_SB1+'.vis.npz'], PA=PA, incl=incl)
# Re-plot to see if it worked
plot_deprojected([shift_scaled_SB30+'.vis.npz', \
shift_scaled_SB2+'.vis.npz'], PA=PA, incl=incl)
# Re-plot to see if it worked
plot_deprojected([shift_scaled_SB30+'.vis.npz', \
shift_scaled_SB31+'.vis.npz'], PA=PA, incl=incl)
# Re-plot to see if it worked
plot_deprojected([shift_scaled_SB30+'.vis.npz', \
shift_scaled_SB32+'.vis.npz'], PA=PA, incl=incl)
# Re-plot to see if it worked
plot_deprojected([shift_scaled_SB30+'.vis.npz', \
shift_scaled_LB1+'.vis.npz'], PA=PA, incl=incl)
# Now that our datasets are shifted and calibrated, we well combine
# the SB executions.
shift_scaled_SB = 'AS205_SB_initcont_shift_scaled'
os.system('rm -rf '+shift_scaled_SB+'.*')
concat(vis=[shift_scaled_SB1+'.ms', shift_scaled_SB2+'.ms', shift_scaled_SB30+'.ms', shift_scaled_SB31+'.ms', shift_scaled_SB32+'.ms'], \
concatvis=shift_scaled_SB+'.ms', \
dirtol='0.1arcsec', copypointing=False, freqtol='0')
#######################################################################
# SB SELF-CALIBRATION PARAMETERS #
#######################################################################
# Robust level
robust = 0.5
# In SB the source is not well resolved. We are going to use few short
# scales. Apparently, the tclean choose to use only puntual sources,
# but we let the freedom of choosing 1beam sources anyway.
scales_SB = [0, 9]
# We'll search for a reference antenna by inspection in plotants or
# calibration files.
#ref_SB1 = 'DV09' # Visually selected
#ref_SB2 = 'DV09' # Visually selected
#ref_SB3 = 'DA46' # Highest recommended reference antenna present in all SB3
get_station_numbers(shift_scaled_SB1+'.ms', 'DV09')
get_station_numbers(shift_scaled_SB2+'.ms', 'DV09')
get_station_numbers(shift_scaled_SB30+'.ms', 'DA46')
get_station_numbers(shift_scaled_SB31+'.ms', 'DA46')
get_station_numbers(shift_scaled_SB32+'.ms', 'DA46')
#Observation ID 0: DV09@A046
#Observation ID 0: DV09@A046
#Observation ID 0: DA46@A034
#Observation ID 1: DA46@A034
#Observation ID 2: DA46@A034
ref_SB = 'DV09@A046, DA46@A034'
SB_contspws = '0~15'
# Timeranges
SB1_timerange = '2012/03/27/00:00:01~2012/03/27/23:59:59'
SB2_timerange = '2012/05/04/00:00:01~2012/05/04/23:59:59'
SB30_timerange = '2017/05/14/00:00:01~2017/05/14/23:59:59'
SB31_timerange = '2017/05/17/00:00:01~2017/05/17/23:59:59'
SB32_timerange = '2017/05/19/00:00:01~2017/05/19/23:59:59'
LB1_timerange = '2017/09/29/00:00:01~2017/09/29/23:59:59'
#######################################################################
# PHASE SELF-CALIBRATION 0 #
#######################################################################
# Name of the first data.
SB_p0 = prefix+'_SB_p0'
# Split the data to continue with the calibrations
os.system('rm -rf '+SB_p0+'.*')
split(vis=shift_scaled_SB+'.ms',
outputvis=SB_p0+'.ms',
datacolumn='data')
# Clean for selfcalibration
tclean_wrapper(vis=SB_p0+'.ms', \
imagename=SB_p0, \
mask=mask_SB, \
scales=scales_SB, \
robust=robust, \
threshold='0.2mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(SB_p0+'.image', \
disk_mask = mask_SB_main, \
noise_mask = res_mask_SB)
#AS205_SB_p0.image
#Beam 0.268 arcsec x 0.228 arcsec (86.44 deg)
#Flux inside disk mask: 346.66 mJy
#Peak intensity of source: 97.39 mJy/beam
#rms: 1.69e-01 mJy/beam
#Peak SNR: 576.63
# RMS in mJy
rms_SB_p0 = imstat(imagename=SB_p0+'.image', region=res_mask_SB)['rms'][0]*10**3
# Gaincal for self-calibration
os.system('rm -rf '+SB_p0+'.cal')
gaincal(vis=SB_p0+'.ms',
caltable=SB_p0+'.cal',
gaintype='T',
spw=SB_contspws,
refant = ref_SB,
solint='120s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
None solutions were flagged
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=SB_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=SB_p0+'.ms', spw=SB_contspws,
gaintable=[SB_p0+'.cal'], interp='linearPD', calwt=True)
#######################################################################
# PHASE SELF-CALIBRATION 1 #
#######################################################################
# Name of the data.
SB_p1 = prefix+'_SB_p1'
# Split the data to continue with the calibrations
os.system('rm -rf '+SB_p1+'.*')
split(vis=SB_p0+'.ms',
outputvis=SB_p1+'.ms',
datacolumn='corrected')
# Clean for selfcalibration. 2*rms_SB_p0>0.2mJy, so we keep
# the 0.2 as threshold
tclean_wrapper(vis=SB_p1+'.ms', \
imagename=SB_p1, \
mask=mask_SB, \
scales=scales_SB, \
robust=robust, \
threshold='0.2mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(SB_p1+'.image', \
disk_mask = mask_SB_main, \
noise_mask = res_mask_SB)
#AS205_SB_p1.image
#Beam 0.270 arcsec x 0.228 arcsec (84.95 deg)
#Flux inside disk mask: 352.65 mJy
#Peak intensity of source: 103.14 mJy/beam
#rms: 5.90e-02 mJy/beam
#Peak SNR: 1749.40
# RMS in mJy
rms_SB_p1 = imstat(imagename=SB_p1+'.image', region=res_mask_SB)['rms'][0]*10**3
# Gaincal self-calibration
os.system('rm -rf '+SB_p1+'.cal')
gaincal(vis=SB_p1+'.ms',
caltable=SB_p1+'.cal',
gaintype='T',
spw=SB_contspws,
refant = ref_SB,
solint='60s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
None solutions were flagged
'''
if not skip_plots:
# Plot phase calibration
plotcal(caltable=SB_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=SB_p1+'.ms', spw=SB_contspws,
gaintable=[SB_p1+'.cal'], interp='linearPD', calwt=True)
#######################################################################
# AMP SELF-CALIBRATION 0 #
#######################################################################
# Name of the data.
SB_a0 = prefix+'_SB_a0'
# Split the data to continue with the calibrations
os.system('rm -rf '+SB_a0+'.*')
split(vis=SB_p1+'.ms',
outputvis=SB_a0+'.ms',
datacolumn='corrected')
# Clean for selfcalibration. The high signal to noise encourage us
# to push the limit to the 1.5rms level.
tclean_wrapper(vis=SB_a0+'.ms', \
imagename=SB_a0, \
mask=mask_SB, \
scales=scales_SB, \
robust=robust, \
threshold=str(1.5*rms_SB_p1)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(SB_a0+'.image', \
disk_mask = mask_SB_main, \
noise_mask = res_mask_SB)
#AS205_SB_a0.image
#Beam 0.270 arcsec x 0.228 arcsec (84.79 deg)
#Flux inside disk mask: 352.93 mJy
#Peak intensity of source: 103.18 mJy/beam
#rms: 5.83e-02 mJy/beam
#Peak SNR: 1770.71
'''
The decrease is almost 2%. It was tried another round of phase self-cal
with a gaincal of 30s, but the results got worse.
There were a decrease in SNR because of an increase in rms.
It was tried with 1*rms and 2*rms threshold, but the 30s gaincal always
decreased the snr.
#AS205_SB_a0.image
#Beam 0.270 arcsec x 0.228 arcsec (83.98 deg)
#Flux inside disk mask: 353.41 mJy
#Peak intensity of source: 104.39 mJy/beam
#rms: 5.92e-02 mJy/beam
#Peak SNR: 1762.12
We stop the phase cal here, and start the amp calibration.
'''
# RMS in mJy
rms_SB_a0 = imstat(imagename=SB_a0+'.image', region=res_mask_SB)['rms'][0]*10**3
# First self-calibration
os.system('rm -rf '+SB_a0+'.cal')
gaincal(vis=SB_a0+'.ms',
caltable=SB_a0+'.cal',
gaintype='T',
spw=SB_contspws,
refant = ref_SB,
solint='inf',
calmode='ap',
minsnr=3.0,
minblperant=4,
solnorm=False)
'''
None solutions were flagged
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=SB_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -5, 5], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -5, 5], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -5, 5], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -5, 5], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=SB_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -5, 5], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=SB_a0+'.ms', spw=SB_contspws,
gaintable=[SB_a0+'.cal'], interp='linearPD', calwt=True)
#######################################################################
# FINAL SB IMAGE #
#######################################################################
# Name of the first data.
SB_ap = prefix+'_SB_ap'
# Split the data to continue with the calibrations
os.system('rm -rf '+SB_ap+'.*')
split(vis=SB_a0+'.ms',
outputvis=SB_ap+'.ms',
datacolumn='corrected')
# Clean for imaging
tclean_wrapper(vis=SB_ap+'.ms', \
imagename=SB_ap, \
mask=mask_SB, \
scales=scales_SB, \
robust=robust, \
threshold=str(1.*rms_SB_a0)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(SB_ap+'.image', \
disk_mask = mask_SB_main, \
noise_mask = res_mask_SB)
#AS205_SB_ap.image
#Beam 0.270 arcsec x 0.227 arcsec (84.89 deg)
#Flux inside disk mask: 353.81 mJy
#Peak intensity of source: 102.50 mJy/beam
#rms: 3.10e-02 mJy/beam
#Peak SNR: 3310.36
# RMS in mJy
rms_SB_ap = imstat(imagename=SB_ap+'.image', region=res_mask_SB)['rms'][0]*10**3
#######################################################################
# COMBINE SB AND LB #
#######################################################################
# Name
combined_p0 = prefix+'_combined_p0'
# Combined file. Includes the calibrated SB, and the shifted and
# rescaled execution of LB.
os.system('rm -rf '+combined_p0+'.*')
concat(vis=[SB_ap+'.ms', shift_scaled_LB1+'.ms'], concatvis=combined_p0+'.ms', dirtol='0.1arcsec', copypointing=False)
#######################################################################
# COMBINED PHASE SELF-CAL 0 #
#######################################################################
# Robust level
robust = 0.5
# Combined parameters
combined_contspw = '0~19'
combined_spwmap = [0, 0, 0, 0, \
4, 4, 4, 4, \
8, 8, 8, 8, \
12, 12, 12, 12, \
16, 16, 16, 16]
# The beam is roughly 11 pixels.
combined_scales = [0, 11, 33, 55]
# I had to use imsize=4000 in order to get the calculation of rms,
# because of the secondary source.
combined_imsize = 3000
# Reference antenna. By visual inspection, DA61 seems like a good candidate
# for LB1 and SB3. We don't have any antenna present in all data.
#
#get_station_numbers(combined_p0+'.ms', 'DA61')
get_station_numbers(path_LB1, 'DA61')
#Observation ID 0: DA61@A015
combined_refant = 'DA61@A015, '+ref_SB
# Clean for selfcalibration
tclean_wrapper(vis=combined_p0+'.ms', \
imagename=combined_p0, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold='0.1mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_p0+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_p0.image
#Beam 0.039 arcsec x 0.025 arcsec (-81.58 deg)
#Flux inside disk mask: 374.69 mJy
#Peak intensity of source: 4.89 mJy/beam
#rms: 2.90e-02 mJy/beam
#Peak SNR: 168.75
# RMS in mJy
rms_LB_p0 = imstat(imagename=combined_p0+'.image', \
region=res_mask_LB)['rms'][0]*10**3
# Phase self-calibration
os.system('rm -rf '+combined_p0+'.cal')
gaincal(vis=combined_p0+'.ms',
caltable=combined_p0+'.cal',
combine= 'spw, scan',
gaintype='T',
spw=combined_contspw,
refant = combined_refant,
solint='360s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:44:08.8
3 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:48:01.7
3 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:54:51.2
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:58:15.9
4 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:01:38.6
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:08:28.0
4 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:22:10.5
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:27:23.4
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:31:56.7
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_p0+'.ms',
spw=combined_contspw,
spwmap=combined_spwmap,
gaintable=[combined_p0+'.cal'],
interp='linearPD',
calwt=True,
applymode='calonly')
#######################################################################
# COMBINED PHASE SELF-CAL 1 #
#######################################################################
# Name of the first data.
combined_p1 = prefix+'_combined_p1'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_p1+'.*')
split(vis=combined_p0+'.ms',
outputvis=combined_p1+'.ms',
datacolumn='corrected')
# Clean for selfcalibration
tclean_wrapper(vis=combined_p1+'.ms', \
imagename=combined_p1, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold=str(2*rms_LB_p0)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_p1+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_p1.image
#Beam 0.039 arcsec x 0.025 arcsec (-81.58 deg)
#Flux inside disk mask: 363.44 mJy
#Peak intensity of source: 5.71 mJy/beam
#rms: 1.96e-02 mJy/beam
#Peak SNR: 291.45
# RMS in mJy
rms_LB_p1 = imstat(imagename=combined_p1+'.image', \
region=res_mask_LB)['rms'][0]*10**3
# Phase self-calibration
os.system('rm -rf '+combined_p1+'.cal')
gaincal(vis=combined_p1+'.ms',
caltable=combined_p1+'.cal',
combine= 'spw, scan',
gaintype='T',
spw=combined_contspw,
refant = combined_refant,
solint='180s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
4 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:38:30.4
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:40:30.1
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:42:27.0
4 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:50:45.5
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:54:42.1
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:01:19.9
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:03:00.5
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:06:57.1
3 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:15:14.0
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:17:13.2
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:19:10.6
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:21:10.7
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:25:48.2
3 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:27:28.4
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:29:28.3
4 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:31:24.9
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:32:37.8
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_p1+'.ms',
spw=combined_contspw,
spwmap=combined_spwmap,
gaintable=[combined_p1+'.cal'],
interp='linearPD', calwt=True, applymode='calonly')
#######################################################################
# COMBINED PHASE SELF-CAL 2 #
#######################################################################
# Name of the first data.
combined_p2 = prefix+'_combined_p2'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_p2+'.*')
split(vis=combined_p1+'.ms',
outputvis=combined_p2+'.ms',
datacolumn='corrected')
# Clean for selfcalibration
tclean_wrapper(vis=combined_p2+'.ms', \
imagename=combined_p2, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold=str(2*rms_LB_p1)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_p2+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_p2.image
#Beam 0.039 arcsec x 0.025 arcsec (-81.58 deg)
#Flux inside disk mask: 358.54 mJy
#Peak intensity of source: 6.13 mJy/beam
#rms: 1.80e-02 mJy/beam
#Peak SNR: 341.17
# RMS in mJy
rms_LB_p2 = imstat(imagename=combined_p2+'.image', \
region=res_mask_LB)['rms'][0]*10**3
# Phase self-calibration
os.system('rm -rf '+combined_p2+'.cal')
gaincal(vis=combined_p2+'.ms',
caltable=combined_p2+'.cal',
combine= 'spw, scan',
gaintype='T',
spw=combined_contspw,
refant = combined_refant,
solint='60s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:35:37.5
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:36:50.4
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:38:12.2
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:39:34.1
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:42:17.9
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:45:15.1
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:47:52.6
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:50:27.3
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/22:55:54.9
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:00:07.6
3 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:01:20.4
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:05:26.0
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:08:09.8
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:12:21.2
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:16:17.7
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:21:58.4
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:24:35.5
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:25:48.3
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:28:32.1
2 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:29:54.0
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:31:15.9
1 of 43 solutions flagged due to SNR < 1.5 in spw=16 at 2017/09/29/23:32:37.8
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_p2+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p2+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p2+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p2+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p2+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_p2+'.ms',
spw=combined_contspw,
spwmap=combined_spwmap,
gaintable=[combined_p2+'.cal'],
interp='linearPD', calwt=True, applymode='calonly')
#######################################################################
# COMBINED PHASE SELF-CAL 3 #
#######################################################################
# Name of the first data.
combined_p3 = prefix+'_combined_p3'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_p3+'.*')
split(vis=combined_p2+'.ms',
outputvis=combined_p3+'.ms',
datacolumn='corrected')
# Clean for selfcalibration. We push it to 1.5rms level
tclean_wrapper(vis=combined_p3+'.ms', \
imagename=combined_p3, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold=str(1.5*rms_LB_p2)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_p3+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_p3.image
#Beam 0.039 arcsec x 0.025 arcsec (-81.58 deg)
#Flux inside disk mask: 356.45 mJy
#Peak intensity of source: 6.45 mJy/beam
#rms: 1.78e-02 mJy/beam
#Peak SNR: 362.7
# RMS in mJy
rms_LB_p3 = imstat(imagename=combined_p3+'.image', \
region=res_mask_LB)['rms'][0]*10**3
# Phase self-calibration
os.system('rm -rf '+combined_p3+'.cal')
gaincal(vis=combined_p3+'.ms',
caltable=combined_p3+'.cal',
combine= 'spw, scan',
gaintype='T',
spw=combined_contspw,
refant = combined_refant,
solint='30s',
calmode='p',
minsnr=1.5,
minblperant=4)
'''
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_p3+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p3+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p3+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p3+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_p3+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, -180, 180], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_p3+'.ms',
spw=combined_contspw,
spwmap=combined_spwmap,
gaintable=[combined_p3+'.cal'],
interp='linearPD', calwt=True, applymode='calonly')
#######################################################################
# COMBINED AMP SELF-CAL 0 #
#######################################################################
# Name of the first data.
combined_a0 = prefix+'_combined_a0'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_a0+'.*')
split(vis=combined_p3+'.ms',
outputvis=combined_a0+'.ms',
datacolumn='corrected')
# Clean for selfcalibration. we push to 1.5rms level.
tclean_wrapper(vis=combined_a0+'.ms', \
imagename=combined_a0, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold=str(1.5*rms_LB_p3)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_a0+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_a0.image
#Beam 0.039 arcsec x 0.025 arcsec (-81.58 deg)
#Flux inside disk mask: 356.56 mJy
#Peak intensity of source: 6.61 mJy/beam
#rms: 1.78e-02 mJy/beam
#Peak SNR: 371.72
# RMS in mJy
rms_LB_a0 = imstat(imagename=combined_a0+'.image', \
region=res_mask_LB)['rms'][0]*10**3
'''
We proceed to amp calibration
'''
# First amp-calibration
os.system('rm -rf '+combined_a0+'.cal')
gaincal(vis=combined_a0+'.ms',
caltable=combined_a0+'.cal',
refant=combined_refant,
combine='spw, scan',
solint='360',
calmode='ap',
gaintype='T',
spw=combined_contspw,
minsnr=3.0,
minblperant=4,
solnorm=False)
'''
7 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:48:01.7
4 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:54:51.2
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:58:16.0
7 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:01:38.6
6 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:08:28.0
3 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:22:10.5
2 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:31:56.7
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a0+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_a0+'.ms',
spw=combined_contspw,
spwmap=combined_spwmap,
gaintable=[combined_a0+'.cal'],
interp='linearPD', calwt=True, applymode='calonly')
#######################################################################
# COMBINED AMP SELF-CAL 1 #
#######################################################################
# Name of the first data.
combined_a1 = prefix+'_combined_a1'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_a1+'.*')
split(vis=combined_a0+'.ms',
outputvis=combined_a1+'.ms',
datacolumn='corrected')
# Clean for selfcalibration
tclean_wrapper(vis=combined_a1+'.ms', \
imagename=combined_a1, \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=robust, \
threshold=str(1.5*rms_LB_a0)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_a1+'.image', \
disk_mask=mask_LB_main, noise_mask=res_mask_LB)
#AS205_combined_a1.image
#Beam 0.039 arcsec x 0.025 arcsec (-83.55 deg)
#Flux inside disk mask: 357.24 mJy
#Peak intensity of source: 6.32 mJy/beam
#rms: 1.64e-02 mJy/beam
#Peak SNR: 384.05
# RMS in mJy
rms_LB_a1 = imstat(imagename=combined_a1+'.image', \
region=res_mask_LB)['rms'][0]*10**3
'''
We proceed to amp calibration
'''
# First amp-calibration
os.system('rm -rf '+combined_a1+'.cal')
gaincal(vis=combined_a1+'.ms',
caltable=combined_a1+'.cal',
refant=combined_refant,
combine='spw, scan',
solint='180',
calmode='ap',
gaintype='T',
spw=combined_contspw,
minsnr=3.0,
minblperant=4,
solnorm=False)
'''
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:36:50.3
7 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:38:30.4
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:40:30.1
5 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:42:27.0
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:44:28.0
7 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:50:45.5
4 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:54:42.1
2 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/22:56:42.4
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:01:20.2
5 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:03:00.5
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:06:57.1
3 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:08:57.4
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:13:33.7
6 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:15:14.0
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:17:13.4
3 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:19:10.6
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:21:10.7
1 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:25:48.4
5 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:27:28.4
2 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:29:28.3
3 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:31:24.9
2 of 43 solutions flagged due to SNR < 3 in spw=16 at 2017/09/29/23:32:37.8
'''
if not skip_plots:
# Plot the first phase calibration
plotcal(caltable=combined_a1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB1_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB2_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB30_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB31_timerange,
markersize=5, fontsize=10.0, showgui=True)
plotcal(caltable=combined_a1+'.cal', \
xaxis='time', yaxis='phase', subplot=221, iteration='antenna', \
plotrange=[0, 0, 0, 0], timerange=SB32_timerange,
markersize=5, fontsize=10.0, showgui=True)
# Apply calibration
applycal(vis=combined_a1+'.ms',
spw=combined_contspw, spwmap=combined_spwmap,
gaintable=[combined_a1+'.cal'],
interp='linearPD', calwt=True, applymode='calonly')
#######################################################################
# COMBINED FINAL IMAGE #
#######################################################################
im_robust = -0.5
# Name of the first data.
combined_ap = prefix+'_combined_ap'
# Split the data to continue with the calibrations
os.system('rm -rf '+combined_ap+'_'+str(im_robust)+'.*')
split(vis=combined_a1+'.ms',
outputvis=combined_ap+'_'+str(im_robust)+'.ms',
datacolumn='corrected')
# Clean for selfcalibration
tclean_wrapper(vis=combined_ap+'.ms', \
imagename=combined_ap+'_'+str(im_robust), \
imsize=combined_imsize, \
mask=mask_LB, \
scales=combined_scales, \
robust=im_robust, \
threshold=str(2*rms_LB_a1)+'mJy', \
savemodel='modelcolumn', \
interactive=False)
# Check the values from the clean
estimate_SNR(combined_ap+'_'+str(im_robust)+'.image', \
disk_mask = mask_LB_main, \
noise_mask = res_mask_LB)
# ROBUST 0.5
#AS205_combined_ap_0.5.image
#Beam 0.038 arcsec x 0.025 arcsec (-84.63 deg)
#Flux inside disk mask: 358.01 mJy
#Peak intensity of source: 6.15 mJy/beam
#rms: 1.61e-02 mJy/beam
#Peak SNR: 381.28
# ROBUST 0.0
#AS205_combined_ap_0.0.image
#Beam 0.031 arcsec x 0.019 arcsec (89.18 deg)
#Flux inside disk mask: 357.03 mJy
#Peak intensity of source: 4.45 mJy/beam
#rms: 1.93e-02 mJy/beam
#Peak SNR: 230.53
# ROBUST -0.5
#AS205_combined_ap_-0.5.image
#Beam 0.029 arcsec x 0.016 arcsec (84.51 deg)
#Flux inside disk mask: 356.11 mJy
#Peak intensity of source: 3.60 mJy/beam
#rms: 2.64e-02 mJy/beam
#Peak SNR: 136.41
exportfits(imagename=combined_ap+'_0.5.image', \
fitsimage=prefix+'_combined_selfcal_ap.fits', \
history=False, overwrite=True)
|
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm import lib
from boardfarm.tests import rootfs_boot
class RouterPingWanDev(rootfs_boot.RootFSBootTest):
'''Router can ping device through WAN interface.'''
def runTest(self):
board = self.dev.board
wan = self.dev.wan
if not wan:
msg = 'No WAN Device defined, skipping ping WAN test.'
lib.common.test_msg(msg)
self.skipTest(msg)
board.sendline('\nping -c5 %s' % wan.gw)
board.expect('5 (packets )?received', timeout=15)
board.expect(board.prompt)
def recover(self):
self.dev.board.sendcontrol('c')
class RouterPingInternet(rootfs_boot.RootFSBootTest):
'''Router can ping internet address by IP.'''
def runTest(self):
board = self.dev.board
board.sendline('\nping -c2 8.8.8.8')
board.expect('2 (packets )?received', timeout=15)
board.expect(board.prompt)
class RouterPingInternetName(rootfs_boot.RootFSBootTest):
'''Router can ping internet address by name.'''
def runTest(self):
board = self.dev.board
board.sendline('\nping -c2 www.google.com')
board.expect('2 (packets )?received', timeout=15)
board.expect(board.prompt)
class LanDevPingRouter(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping router.'''
def runTest(self):
board = self.dev.board
lan = self.dev.lan
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
router_ip = board.get_interface_ipaddr(board.lan_iface)
lan.sendline('\nping -i 0.2 -c 5 %s' % router_ip)
lan.expect('PING ')
lan.expect('5 (packets )?received', timeout=15)
lan.expect(lan.prompt)
class LanDevPingWanDev(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping through router.'''
def runTest(self):
lan = self.dev.lan
wan = self.dev.wan
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
if not wan:
msg = 'No WAN Device defined, skipping ping WAN test.'
lib.common.test_msg(msg)
self.skipTest(msg)
lan.sendline('\nping -i 0.2 -c 5 %s' % wan.gw)
lan.expect('PING ')
lan.expect('5 (packets )?received', timeout=15)
lan.expect(lan.prompt)
def recover(self):
self.dev.lan.sendcontrol('c')
class LanDevPingInternet(rootfs_boot.RootFSBootTest):
'''Device on LAN can ping through router to internet.'''
def runTest(self):
lan = self.dev.lan
if not lan:
msg = 'No LAN Device defined, skipping ping test from LAN.'
lib.common.test_msg(msg)
self.skipTest(msg)
lan.sendline('\nping -c2 8.8.8.8')
lan.expect('2 (packets )?received', timeout=10)
lan.expect(lan.prompt)
def recover(self):
self.dev.lan.sendcontrol('c')
|
import unittest
class TestCase(object):
"""
Compatibility layer for unittest.TestCase
"""
try:
assertItemsEqual = unittest.TestCase.assertCountEqual
except AttributeError:
def assertItemsEqual(self, first, second):
"""Method missing in python2.6 and renamed in python3."""
self.assertEqual(sorted(first), sorted(second))
def assertLess(self, first, second):
"""Method missing in python2.6."""
self.assertTrue(first < second)
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import itertools
import logging
import sys
from contextlib import redirect_stdout
from typing import Mapping
from typing import MutableSequence
from typing import Optional
from typing import Sequence
from typing import Tuple
import a_sync
from marathon.exceptions import MarathonError
from mypy_extensions import TypedDict
from paasta_tools import __version__
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.kubernetes_tools import is_kubernetes_available
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import load_kubernetes_service_config
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.marathon_tools import MarathonClient
from paasta_tools.marathon_tools import MarathonClients
from paasta_tools.mesos.exceptions import MasterNotAvailableException
from paasta_tools.mesos.master import MesosMaster
from paasta_tools.mesos.master import MesosState
from paasta_tools.mesos_tools import get_mesos_config_path
from paasta_tools.mesos_tools import get_mesos_leader
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.mesos_tools import is_mesos_available
from paasta_tools.metrics import metastatus_lib
from paasta_tools.metrics.metastatus_lib import _GenericNodeGroupingFunctionT
from paasta_tools.metrics.metastatus_lib import _KeyFuncRetT
from paasta_tools.metrics.metastatus_lib import HealthCheckResult
from paasta_tools.metrics.metastatus_lib import ResourceUtilization
from paasta_tools.metrics.metastatus_lib import ResourceUtilizationDict
from paasta_tools.utils import format_table
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import print_with_indent
log = logging.getLogger("paasta_metastatus")
logging.basicConfig()
# kazoo can be really noisy - turn it down
logging.getLogger("kazoo").setLevel(logging.CRITICAL)
ServiceInstanceStats = TypedDict(
"ServiceInstanceStats", {"mem": float, "cpus": float, "disk": float, "gpus": int}
)
class FatalError(Exception):
def __init__(self, exit_code: int) -> None:
self.exit_code = exit_code
def parse_args(argv):
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"-g",
"--groupings",
nargs="+",
default=["pool", "region"],
help=(
"Group resource information of slaves grouped by attribute."
"Note: This is only effective with -vv"
),
)
parser.add_argument("-t", "--threshold", type=int, default=90)
parser.add_argument("--use-mesos-cache", action="store_true", default=False)
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbose",
default=0,
help="Print out more output regarding the state of the cluster",
)
parser.add_argument(
"-s",
"--service",
dest="service_name",
help=(
"Show how many of a given service instance can be run on a cluster slave."
"Note: This is only effective with -vvv and --instance must also be specified"
),
)
parser.add_argument(
"-i",
"--instance",
dest="instance_name",
help=(
"Show how many of a given service instance can be run on a cluster slave."
"Note: This is only effective with -vvv and --service must also be specified"
),
)
return parser.parse_args(argv)
def get_marathon_framework_ids(
marathon_clients: Sequence[MarathonClient],
) -> Sequence[str]:
return [client.get_info().framework_id for client in marathon_clients]
def _run_mesos_checks(
mesos_master: MesosMaster, mesos_state: MesosState
) -> Sequence[HealthCheckResult]:
mesos_state_status = metastatus_lib.get_mesos_state_status(mesos_state)
metrics = a_sync.block(mesos_master.metrics_snapshot)
mesos_metrics_status = metastatus_lib.get_mesos_resource_utilization_health(
mesos_metrics=metrics, mesos_state=mesos_state
)
return mesos_state_status + mesos_metrics_status # type: ignore
def _run_marathon_checks(
marathon_clients: Sequence[MarathonClient],
) -> Sequence[HealthCheckResult]:
try:
marathon_results = metastatus_lib.get_marathon_status(marathon_clients)
return marathon_results
except (MarathonError, ValueError) as e:
print(PaastaColors.red(f"CRITICAL: Unable to contact Marathon cluster: {e}"))
raise FatalError(2)
def all_marathon_clients(
marathon_clients: MarathonClients,
) -> Sequence[MarathonClient]:
return [
c for c in itertools.chain(marathon_clients.current, marathon_clients.previous)
]
def utilization_table_by_grouping(
groupings: Sequence[str],
grouping_function: _GenericNodeGroupingFunctionT,
resource_info_dict_grouped: Mapping[_KeyFuncRetT, ResourceUtilizationDict],
threshold: float,
service_instance_stats: Optional[ServiceInstanceStats] = None,
) -> Tuple[Sequence[MutableSequence[str]], bool]:
static_headers = [
"CPU (used/total)",
"RAM (used/total)",
"Disk (used/total)",
"GPU (used/total)",
"Agent count",
]
# service_instance_stats could be None so check and insert a header if needed.
if service_instance_stats:
# Insert so agent count is still last
static_headers.insert(-1, "Slots + Limiting Resource")
all_rows = [[grouping.capitalize() for grouping in groupings] + static_headers]
table_rows = []
for grouping_values, resource_info_dict in resource_info_dict_grouped.items():
resource_utilizations = (
metastatus_lib.resource_utillizations_from_resource_info(
total=resource_info_dict["total"], free=resource_info_dict["free"]
)
)
healthcheck_utilization_pairs = [
metastatus_lib.healthcheck_result_resource_utilization_pair_for_resource_utilization(
utilization, threshold
)
for utilization in resource_utilizations
]
healthy_exit = all(pair[0].healthy for pair in healthcheck_utilization_pairs)
table_rows.append(
metastatus_lib.get_table_rows_for_resource_info_dict(
[v for g, v in grouping_values], healthcheck_utilization_pairs
)
)
# Fill table rows with service-instance data if possible.
if service_instance_stats:
fill_table_rows_with_service_instance_stats(
service_instance_stats, resource_utilizations, table_rows
)
# Always append the agent count last
table_rows[-1].append(str(resource_info_dict["slave_count"]))
table_rows = sorted(table_rows, key=lambda x: x[0 : len(groupings)])
all_rows.extend(table_rows)
return all_rows, healthy_exit
def utilization_table_by_grouping_from_mesos_state(
groupings: Sequence[str],
threshold: float,
mesos_state: MesosState,
service_instance_stats: Optional[ServiceInstanceStats] = None,
) -> Tuple[Sequence[MutableSequence[str]], bool]:
grouping_function = metastatus_lib.key_func_for_attribute_multi(groupings)
resource_info_dict_grouped = metastatus_lib.get_resource_utilization_by_grouping(
grouping_function, mesos_state
)
return utilization_table_by_grouping(
groupings,
grouping_function,
resource_info_dict_grouped,
threshold,
service_instance_stats,
)
def utilization_table_by_grouping_from_kube(
groupings: Sequence[str],
threshold: float,
kube_client: KubeClient,
service_instance_stats: Optional[ServiceInstanceStats] = None,
namespace: str = "paasta",
) -> Tuple[Sequence[MutableSequence[str]], bool]:
grouping_function = metastatus_lib.key_func_for_attribute_multi_kube(groupings)
resource_info_dict_grouped = (
metastatus_lib.get_resource_utilization_by_grouping_kube(
grouping_func=grouping_function,
kube_client=kube_client,
namespace=namespace,
)
)
return utilization_table_by_grouping(
groupings,
grouping_function,
resource_info_dict_grouped,
threshold,
service_instance_stats,
)
def fill_table_rows_with_service_instance_stats(
service_instance_stats: ServiceInstanceStats,
resource_utilizations: Sequence[ResourceUtilization],
table_rows: MutableSequence[MutableSequence[str]],
) -> None:
# Calculate the max number of runnable service instances given the current resources (e.g. cpus, mem, disk)
resource_free_dict = {rsrc.metric: rsrc.free for rsrc in resource_utilizations}
num_service_instances_allowed = float("inf")
limiting_factor = "Unknown"
# service_instance_stats.keys() should be a subset of resource_free_dict
for rsrc_name, rsrc_amt_wanted in service_instance_stats.items():
if rsrc_amt_wanted > 0: # type: ignore
# default=0 to indicate there is none of that resource
rsrc_free = resource_free_dict.get(rsrc_name, 0)
if (
rsrc_free // rsrc_amt_wanted # type: ignore
< num_service_instances_allowed # type: ignore
):
limiting_factor = rsrc_name
num_service_instances_allowed = (
rsrc_free // rsrc_amt_wanted # type: ignore
)
table_rows[-1].append(
"{:6} ; {}".format(int(num_service_instances_allowed), limiting_factor)
)
def get_service_instance_stats(
service: str, instance: str, cluster: str
) -> Optional[ServiceInstanceStats]:
"""Returns a Dict with stats about a given service instance.
Args:
service: the service name
instance: the instance name
cluster: the cluster name where the service instance will be searched for
Returns:
A Dict mapping resource name to the amount of that resource the particular service instance consumes.
"""
if service is None or instance is None or cluster is None:
return None
try:
instance_config = get_instance_config(service, instance, cluster)
# Get all fields that are showed in the 'paasta metastatus -vvv' command
if instance_config.get_gpus():
gpus = int(instance_config.get_gpus())
else:
gpus = 0
service_instance_stats = ServiceInstanceStats(
mem=instance_config.get_mem(),
cpus=instance_config.get_cpus(),
disk=instance_config.get_disk(),
gpus=gpus,
)
return service_instance_stats
except Exception as e:
log.error(
f"Failed to get stats for service {service} instance {instance}: {str(e)}"
)
return None
def _run_kube_checks(
kube_client: KubeClient, namespace: str = "paasta"
) -> Sequence[HealthCheckResult]:
kube_status = metastatus_lib.get_kube_status(kube_client, namespace)
kube_metrics_status = metastatus_lib.get_kube_resource_utilization_health(
kube_client=kube_client
)
return kube_status + kube_metrics_status # type: ignore
def print_output(argv: Optional[Sequence[str]] = None) -> None:
mesos_available = is_mesos_available()
kube_available = is_kubernetes_available()
args = parse_args(argv)
system_paasta_config = load_system_paasta_config()
service_config_dict = load_kubernetes_service_config(
service=args.service_name,
instance=args.instance_name,
cluster=system_paasta_config.get_cluster(),
)
if mesos_available:
master_kwargs = {}
# we don't want to be passing False to not override a possible True
# value from system config
if args.use_mesos_cache:
master_kwargs["use_mesos_cache"] = True
master = get_mesos_master(
mesos_config_path=get_mesos_config_path(system_paasta_config),
**master_kwargs,
)
marathon_servers = get_marathon_servers(system_paasta_config)
marathon_clients = all_marathon_clients(get_marathon_clients(marathon_servers))
try:
mesos_state = a_sync.block(master.state)
all_mesos_results = _run_mesos_checks(
mesos_master=master, mesos_state=mesos_state
)
except MasterNotAvailableException as e:
# if we can't connect to master at all,
# then bomb out early
print(PaastaColors.red("CRITICAL: %s" % "\n".join(e.args)))
raise FatalError(2)
marathon_results = _run_marathon_checks(marathon_clients)
else:
marathon_results = [
metastatus_lib.HealthCheckResult(
message="Marathon is not configured to run here", healthy=True
)
]
all_mesos_results = [
metastatus_lib.HealthCheckResult(
message="Mesos is not configured to run here", healthy=True
)
]
if kube_available:
kube_client = KubeClient()
kube_results = _run_kube_checks(
kube_client, service_config_dict.get_namespace()
)
else:
kube_results = [
metastatus_lib.HealthCheckResult(
message="Kubernetes is not configured to run here", healthy=True
)
]
mesos_ok = all(metastatus_lib.status_for_results(all_mesos_results))
marathon_ok = all(metastatus_lib.status_for_results(marathon_results))
kube_ok = all(metastatus_lib.status_for_results(kube_results))
mesos_summary = metastatus_lib.generate_summary_for_check("Mesos", mesos_ok)
marathon_summary = metastatus_lib.generate_summary_for_check(
"Marathon", marathon_ok
)
kube_summary = metastatus_lib.generate_summary_for_check("Kubernetes", kube_ok)
healthy_exit = True if all([mesos_ok, marathon_ok]) else False
print(f"Master paasta_tools version: {__version__}")
print("Mesos leader: %s" % get_mesos_leader())
metastatus_lib.print_results_for_healthchecks(
mesos_summary, mesos_ok, all_mesos_results, args.verbose
)
if args.verbose > 1 and mesos_available:
print_with_indent("Resources Grouped by %s" % ", ".join(args.groupings), 2)
all_rows, healthy_exit = utilization_table_by_grouping_from_mesos_state(
groupings=args.groupings, threshold=args.threshold, mesos_state=mesos_state
)
for line in format_table(all_rows):
print_with_indent(line, 4)
if args.verbose >= 3:
print_with_indent("Per Slave Utilization", 2)
cluster = system_paasta_config.get_cluster()
service_instance_stats = get_service_instance_stats(
args.service, args.instance, cluster
)
if service_instance_stats:
print_with_indent(
"Service-Instance stats:" + str(service_instance_stats), 2
)
# print info about slaves here. Note that we don't make modifications to
# the healthy_exit variable here, because we don't care about a single slave
# having high usage.
all_rows, _ = utilization_table_by_grouping_from_mesos_state(
groupings=args.groupings + ["hostname"],
threshold=args.threshold,
mesos_state=mesos_state,
service_instance_stats=service_instance_stats,
)
# The last column from utilization_table_by_grouping_from_mesos_state is "Agent count", which will always be
# 1 for per-slave resources, so delete it.
for row in all_rows:
row.pop()
for line in format_table(all_rows):
print_with_indent(line, 4)
metastatus_lib.print_results_for_healthchecks(
marathon_summary, marathon_ok, marathon_results, args.verbose
)
metastatus_lib.print_results_for_healthchecks(
kube_summary, kube_ok, kube_results, args.verbose
)
if args.verbose > 1 and kube_available:
print_with_indent("Resources Grouped by %s" % ", ".join(args.groupings), 2)
all_rows, healthy_exit = utilization_table_by_grouping_from_kube(
groupings=args.groupings,
threshold=args.threshold,
kube_client=kube_client,
namespace=service_config_dict.get_namespace(),
)
for line in format_table(all_rows):
print_with_indent(line, 4)
if args.verbose >= 3:
print_with_indent("Per Node Utilization", 2)
cluster = system_paasta_config.get_cluster()
service_instance_stats = get_service_instance_stats(
args.service, args.instance, cluster
)
if service_instance_stats:
print_with_indent(
"Service-Instance stats:" + str(service_instance_stats), 2
)
# print info about nodes here. Note that we don't make
# modifications to the healthy_exit variable here, because we don't
# care about a single node having high usage.
all_rows, _ = utilization_table_by_grouping_from_kube(
groupings=args.groupings + ["hostname"],
threshold=args.threshold,
kube_client=kube_client,
service_instance_stats=service_instance_stats,
namespace=service_config_dict.get_namespace(),
)
# The last column from utilization_table_by_grouping_from_kube is "Agent count", which will always be
# 1 for per-node resources, so delete it.
for row in all_rows:
row.pop()
for line in format_table(all_rows):
print_with_indent(line, 4)
if not healthy_exit:
raise FatalError(2)
def get_output(argv: Optional[Sequence[str]] = None) -> Tuple[str, int]:
output = io.StringIO()
exit_code = 1
with redirect_stdout(output):
exit_code = 0
try:
print_output(argv)
except FatalError as e:
exit_code = e.exit_code
ret = output.getvalue()
return ret, exit_code
def main(argv: Optional[Sequence[str]] = None) -> None:
exit_code = 0
try:
print_output(argv)
except FatalError as e:
exit_code = e.exit_code
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
"""
This package contains functionality to simplify the display of complex matrices.
"""
import logging
log = logging.getLogger(__name__)
from .grid2extent import grid2extent
from .complex2rgb import complex2rgb
from .hsv import hsv2rgb, rgb2hsv
from .colormap import InterpolatedColorMap
|
def minimum_chars_palindrome(string: str) -> int:
# The string is already a palindrome
if not string or len(string) == 1:
return 0
if string[0] == string[-1]:
return minimum_chars_palindrome(string[1:-1])
return 1 + min(minimum_chars_palindrome(string[1:]), minimum_chars_palindrome(string[:-1]))
print(minimum_chars_palindrome("aebcbda"))
|
from django.contrib import admin
from .models import Question, Answer, UserAnswer
# Register your models here.
# here I am tabulating answers in line for each questions
# Got ideas from tutorials and doc
# https://docs.djangoproject.com/en/1.10/ref/contrib/admin/#django.contrib.admin.TabularInline
class AnswerInline(admin.TabularInline):
model = Answer
class QuestionAdm(admin.ModelAdmin):
inlines = [
AnswerInline,
]
class Meta:
model = Question
# admin.site.register(Question, QuestionAdm)
admin.site.register(Answer)
admin.site.register(UserAnswer)
|
from scipy import stats
import numpy as np
data1 = np.array([1,2,1,1,2,1])
data2 = np.array([3,5,5,5,4,5])
# ttest
from math import sqrt
from numpy.random import seed
from numpy.random import randn
from numpy import mean
from scipy.stats import sem
from scipy.stats import t
# function for calculating the t-test for two independent samples
def independent_ttest(data1, data2, alpha):
# calculate means
mean1, mean2 = mean(data1), mean(data2)
# calculate standard errors
se1, se2 = sem(data1), sem(data2)
# standard error on the difference between the samples
sed = sqrt(se1**2.0 + se2**2.0)
# calculate the t statistic
t_stat = (mean1 - mean2) / sed
# degrees of freedom
df = len(data1) + len(data2) - 2
# calculate the critical value
cv = t.ppf(1.0 - alpha, df)
# calculate the p-value
p = (1.0 - t.cdf(abs(t_stat), df)) * 2.0
# return everything
return t_stat, df, cv, p
print (sem(data1), sem(data2))
print("ttest: ")
print(independent_ttest (data1, data2, 0.95))
print(stats.ttest_ind(data1,data2))
# ttest pareado
# function for calculating the t-test for two dependent samples
def dependent_ttest(data1, data2, alpha):
# calculate means
mean1, mean2 = mean(data1), mean(data2)
# number of paired samples
n = len(data1)
# sum squared difference between observations
d1 = sum([(data1[i]-data2[i])**2 for i in range(n)])
# sum difference between observations
d2 = sum([data1[i]-data2[i] for i in range(n)])
# standard deviation of the difference between means
sd = sqrt((d1 - (d2**2 / n)) / (n - 1))
# standard error of the difference between the means
sed = sd / sqrt(n)
# calculate the t statistic
t_stat = (mean1 - mean2) / sed
# degrees of freedom
df = n - 1
# calculate the critical value
cv = t.ppf(1.0 - alpha, df)
# calculate the p-value
p = (1.0 - t.cdf(abs(t_stat), df)) * 2.0
# return everything
return t_stat, df, cv, p
print("ttest pareado: ")
print(dependent_ttest (data1, data2, 0.95))
print(stats.ttest_rel(data1,data2))
|
from .featuregroup import FeatureGroup
from .layer import Layer
from .layergroup import LayerGroup
from .imageoverlay import imageOverlay
|
print('"""')
print("THIS IS A STRING")
print('"""')
x = "hi"
print(f"{x} Giovanni")
name = 'Elizabeth II'
title = 'Queen of the United Kingdom and the other Commonwealth realms'
reign = 'the longest-lived and longest-reigning British monarch'
x = f'{name}, the {title}, is {reign}.'
print(x)
print("{1} {1} {1}".format(1, 2, 3)) |
#testing how to make a binary tree
#create class that represents an individual node in
#a binary tree
#TODO
#1. make binary tree
#2. creat the maxHeuristic and minHeuristic
#3. find a way to pass the state and Heuristic to the binary tree (Dylan currently working on in brain.py)
#4.
import board
import copy
import math
row = board.row
col = board.col
#character token to represent an empty space on the board
blankSpace = board.blankSpace
#character token to represent spaces occupied by player pieces
blackToken = board.blackToken
whiteToken = board.whiteToken
#initialize the board with [] to represent squares
treeBoard = [[blankSpace for i in range(col)]for j in range(row)]
#end createBoard
#put an "Heuristic" after min and max to reduce the confusion of using the functions we defined vs the ones built into python
#find the Heuristic with the highest value
#rewrite min max and inorder to search next turns to find the specific value
class Node(object):
def __init__(self, fKey, fState):
self.heuristic = fKey
self.state = fState
self.nextTurns = [] #list of nodes
#def createNode(key,fState):
# return Node(key, fState)
#def getNodeKey(thisNode):
# return thisNode.heuristic
def maxHeuristic(fNode):
max = (0-math.inf)
for i in range(len(fNode.nextTurns)):
if fNode.nextTurns[i].heuristic > max:
max = fNode.nextTurns[i].heuristic
return max
#find the Heuristic with the lowest value
def minHeuristic(fNode):
min = math.inf
for i in range(len(fNode.nextTurns)):
if fNode.nextTurns[i].heuristic < min:
min = fNode.nextTurns[i].heuristic
return min
##### recursive function to add new node to the tree
def insert(root, key, fState):
if root == None:
return Node(key,fState)
else:
root.nextTurns.append((Node(key, fState)))
#sorted(root.nextTurns, key = getNodeKey)
#prints the binary tree in order form when tehe data was entered
#global counter to keep track of nodes
nodeCounter = 0
def printTree(root):
global nodeCounter
depthCounter = 0
if root:
#then print the data of node
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.heuristic))
board.printBoard(root.state)
nodeCounter += 1
depthCounter += 1
for i in range(len(root.nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[i].heuristic))
board.printBoard(root.nextTurns[i].state)
nodeCounter += 1
depthCounter += 1
for j in range(len(root.nextTurns)):
for k in range(len(root.nextTurns[j].nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[j].nextTurns[k].heuristic))
board.printBoard(root.nextTurns[j].nextTurns[k].state)
nodeCounter += 1
depthCounter += 1
for l in range(len(root.nextTurns)):
for m in range(len(root.nextTurns[l].nextTurns)):
for n in range(len(root.nextTurns[l].nextTurns[m].nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[l].nextTurns[m].nextTurns[n].heuristic))
board.printBoard(root.nextTurns[l].nextTurns[m].nextTurns[n].state)
nodeCounter += 1
nodeCounter = 0
#end print
def printOneBranch(root):
global nodeCounter
depthCounter = 0
if root:
#then print the data of node
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.heuristic))
board.printBoard(root.state)
nodeCounter += 1
depthCounter += 1
for i in range(len(root.nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[i].heuristic))
board.printBoard(root.nextTurns[i].state)
nodeCounter += 1
depthCounter += 1
for j in range(len(root.nextTurns[0].nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[0].nextTurns[j].heuristic))
board.printBoard(root.nextTurns[0].nextTurns[j].state)
nodeCounter += 1
depthCounter += 1
for n in range(len(root.nextTurns[0].nextTurns[0].nextTurns)):
print("Layer: " + str(depthCounter))
print("Node: " + str(nodeCounter))
print("Heuristic: " + str(root.nextTurns[0].nextTurns[0].nextTurns[n].heuristic))
board.printBoard(root.nextTurns[0].nextTurns[0].nextTurns[n].state)
nodeCounter += 1
#end printone branch
def writeTree(root, fileName):
nodeCounter = 0
depthCounter = 0
#open file
file = open(fileName, "w+")
if root:
#then print the data of node
file.write("Layer: " + str(depthCounter)+"\n")
file.write("Node: " + str(nodeCounter)+"\n")
file.write("Heuristic: " + str(root.heuristic)+"\n")
board.writeBoard(root.state, file)
nodeCounter += 1
depthCounter += 1
for i in range(len(root.nextTurns)):
file.write("Layer: " + str(depthCounter)+"\n")
file.write("Node: " + str(nodeCounter)+"\n")
file.write("Heuristic: " + str(root.nextTurns[i].heuristic)+"\n")
board.writeBoard(root.nextTurns[i].state, file)
nodeCounter += 1
depthCounter += 1
for j in range(len(root.nextTurns)):
for k in range(len(root.nextTurns[j].nextTurns)):
file.write("Layer: " + str(depthCounter)+"\n")
file.write("Node: " + str(nodeCounter)+"\n")
file.write("Heuristic: " + str(root.nextTurns[j].nextTurns[k].heuristic)+"\n")
board.writeBoard(root.nextTurns[j].nextTurns[k].state, file)
nodeCounter += 1
depthCounter += 1
for l in range(len(root.nextTurns)):
for m in range(len(root.nextTurns[l].nextTurns)):
for n in range(len(root.nextTurns[l].nextTurns[m].nextTurns)):
file.write("Layer: " + str(depthCounter)+"\n")
file.write("Node: " + str(nodeCounter)+"\n")
file.write("Heuristic: " + str(root.nextTurns[l].nextTurns[m].nextTurns[n].heuristic)+"\n")
board.writeBoard(root.nextTurns[l].nextTurns[m].nextTurns[n].state, file)
nodeCounter += 1
#end print
#testing
"""
root = None
root = insert(root, 9 , copy.deepcopy(treeBoard))
insert(root, 6 , copy.deepcopy(treeBoard))
insert(root, 2 , copy.deepcopy(treeBoard))
board.setStartingPieces(treeBoard)
insert(root, 3 , copy.deepcopy(treeBoard))
insert(root, 20 , copy.deepcopy(treeBoard))
printTree(root)
print()
print()
"""
#print(maxHeuristic(root).heuristic)
#board.printBoard(maxHeuristic(root).state)
#print(minHeuristic(root).heuristic)
|
n,k,q = input().strip().split(' ')
n,k,q = [int(n),int(k),int(q)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
d = a[n-(k%n):n]+a[0:n-(k%n)]
for a0 in range(q):
m = int(input().strip())
print(d[m]) |
# Generated by Django 2.1.4 on 2019-01-19 13:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coreapp', '0060_auto_20190118_1651'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='selleraddress',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seller_address', to='coreapp.ShippingAddress'),
),
migrations.AddField(
model_name='transaction',
name='useraddress',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_address', to='coreapp.ShippingAddress'),
),
migrations.AlterField(
model_name='shippingaddress',
name='zip_code',
field=models.CharField(choices=[('421202', '421202'), ('421201', '421201'), ('421204', '421204'), ('421203', '421203'), ('421301', '421301')], default='421202', max_length=100),
),
]
|
#!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
#########################################################
### your code goes here ###
#########################################################
from sklearn.svm import SVC
"""
Using a linear kernel
clf = SVC(kernel="linear")
### the time to train and test the algorithm
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t1 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t1, 3), "s"
### find the accuracy score
from sklearn.metrics import accuracy_score
print accuracy_score(labels_test, pred)
"""
### Using an rbf kernel, C = 10000
clf = SVC(kernel="rbf", C=10000)
"""
and 1% of training data set
features_train = features_train[:len(features_train)/100]
labels_train = labels_train[:len(labels_train)/100]
"""
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
pred = clf.predict(features_test)
### find the accuracy score
from sklearn.metrics import accuracy_score
print accuracy_score(labels_test, pred)
### predict the elements, 1 means Chris, 0 means Sarah
print "predict 10th element:", clf.predict(features_test[10])
print "predict 26th element:", clf.predict(features_test[26])
print "predict 50th element:", clf.predict(features_test[50])
|
from django.core.management.base import BaseCommand, CommandError
from robot_hive import server
from sequbot_data.robot_hive.constants import HIVE_PORT
from sequbot_data import models
STATUS = models.SocialAccount.STATUS
class Command(BaseCommand):
help = 'Starts robot hive server'
def log(self, msg):
self.stdout.write(msg)
def add_arguments(self, parser):
parser.add_argument('--port')
def handle(self, *args, **options):
port = options['port'] or HIVE_PORT
models.SocialAccount.objects \
.exclude(status__in=[STATUS.EXTERNAL_VERIF, STATUS.NEEDS_AUTH]) \
.update(status=STATUS.IDLE)
server.start(port)
|
import os
import json
import datetime
import pandas as pd
import networkx as nx
from libcity.utils import ensure_dir
from logging import getLogger
from libcity.evaluator.abstract_evaluator import AbstractEvaluator
class MapMatchingEvaluator(AbstractEvaluator):
def __init__(self, config):
self.metrics = config['metrics'] # 评估指标, 是一个 list
self.allowed_metrics = ['RMF', 'AN', 'AL']
self.config = config
self.save_modes = config.get('save_modes', ['csv', 'json'])
self.evaluate_result = {} # 每一种指标的结果
self._check_config()
self._logger = getLogger()
self.rel_info = {}
def _check_config(self):
if not isinstance(self.metrics, list):
raise TypeError('Evaluator type is not list')
for metric in self.metrics:
if metric not in self.allowed_metrics:
raise ValueError('the metric {} is not allowed in TrafficStateEvaluator'.format(str(metric)))
def collect(self, batch):
self.rd_nwk = batch["rd_nwk"]
if batch["route"] is not None:
self.truth_sequence = list(batch["route"])
else:
self.truth_sequence = None
self.result = batch["result"]
for point1 in self.rd_nwk.adj:
for point2 in self.rd_nwk.adj[point1]:
rel = self.rd_nwk.adj[point1][point2]
self.rel_info[rel['rel_id']] = {}
self.rel_info[rel['rel_id']]["distance"] = rel['distance']
self.rel_info[rel['rel_id']]['point1'] = point1
self.rel_info[rel['rel_id']]['point2'] = point2
self.find_completed_sequence()
if self.truth_sequence is not None:
# find the longest common subsequence
self.find_lcs()
def evaluate(self):
if 'RMF' in self.metrics:
d_plus = 0
d_sub = 0
d_total = 0
for rel_id in self.truth_sequence:
d_total += self.rel_info[rel_id]['distance']
i = j = k = 0
while i < len(self.lcs):
while self.truth_sequence[j] != self.lcs[i]:
d_sub += self.rel_info[self.truth_sequence[j]]['distance']
j += 1
i += 1
j += 1
i = 0
while j < len(self.truth_sequence):
d_sub += self.rel_info[self.truth_sequence[j]]['distance']
j += 1
while i < len(self.lcs):
while self.output_sequence[k] != self.lcs[i]:
d_plus += self.rel_info[self.output_sequence[k]]['distance']
k += 1
i += 1
k += 1
while k < len(self.output_sequence):
d_sub += self.rel_info[self.output_sequence[k]]['distance']
k += 1
self.evaluate_result['RMF'] = (d_plus + d_sub) / d_total
if 'AN' in self.metrics:
self.evaluate_result['AN'] = len(self.lcs) / len(self.truth_sequence)
if 'AL' in self.metrics:
d_lcs = 0
d_tru = 0
for rel_id in self.lcs:
d_lcs += self.rel_info[rel_id]['distance']
for rel_id in self.truth_sequence:
d_tru += self.rel_info[rel_id]['distance']
self.evaluate_result['AL'] = d_lcs / d_tru
def save_result(self, save_path, filename=None):
"""
将评估结果保存到 save_path 文件夹下的 filename 文件中
Args:
save_path: 保存路径
filename: 保存文件名
yyyy_mm_dd_hh_mm_ss_model_dataset_result.csv: 模型原始输出
yyyy_mm_dd_hh_mm_ss_model_dataset_result.json(geojson): 原始输出扩充得到的连通路径
yyyy_mm_dd_hh_mm_ss_model_dataset.json: 评价结果
yyyy_mm_dd_hh_mm_ss_model_dataset.csv: 评价结果
"""
ensure_dir(save_path)
if filename is None: # 使用时间戳
filename = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') + '_' + \
self.config['model'] + '_' + self.config['dataset']
dataframe = {'dyna_id': [], 'rel_id': []}
self._logger.info('Result is saved at ' +
os.path.join(save_path, '{}_result.csv'.format(filename)))
for line in self.result:
dataframe['dyna_id'].append(str(line[0]))
dataframe['rel_id'].append(str(line[1]))
dataframe = pd.DataFrame(dataframe)
dataframe.to_csv(os.path.join(save_path, '{}_result.csv'.format(filename)), index=False)
self._logger.info('Completed sequence is saved at ' +
os.path.join(save_path, '{}_result.json'.format(filename)))
evaluate_result = dict()
evaluate_result['type'] = 'Feature'
evaluate_result['properties'] = {}
evaluate_result['geometry'] = {}
evaluate_result['geometry']['type'] = 'LineString'
evaluate_result['geometry']['coordinates'] = []
lat_last = None
lon_last = None
for rel_id in self.output_sequence:
lat_origin = self.rd_nwk.nodes[self.rel_info[rel_id]["point1"]]['lat']
lon_origin = self.rd_nwk.nodes[self.rel_info[rel_id]["point1"]]['lon']
lat_destination = self.rd_nwk.nodes[self.rel_info[rel_id]["point2"]]['lat']
lon_destination = self.rd_nwk.nodes[self.rel_info[rel_id]["point2"]]['lon']
if lat_last is None and lon_last is None:
evaluate_result['geometry']['coordinates'].append([lon_origin, lat_origin])
evaluate_result['geometry']['coordinates'].append([lon_destination, lat_destination])
lat_last = lat_destination
lon_last = lon_destination
else:
if lat_last == lat_origin and lon_last == lon_origin:
evaluate_result['geometry']['coordinates'].append([lon_destination, lat_destination])
lat_last = lat_destination
lon_last = lon_destination
else:
evaluate_result['geometry']['coordinates'].append([lon_origin, lat_origin])
evaluate_result['geometry']['coordinates'].append([lon_destination, lat_destination])
lat_last = lat_destination
lon_last = lon_destination
json.dump(evaluate_result, open(save_path + '/' + filename + '_result.json', 'w', encoding='utf-8'),
ensure_ascii=False, indent=4)
if self.truth_sequence is not None:
self.evaluate()
if 'json' in self.save_modes:
self._logger.info('Evaluate result is ' + json.dumps(self.evaluate_result))
with open(os.path.join(save_path, '{}.json'.format(filename)), 'w') as f:
json.dump(self.evaluate_result, f)
self._logger.info('Evaluate result is saved at ' +
os.path.join(save_path, '{}.json'.format(filename)))
dataframe = {}
if 'csv' in self.save_modes:
for metric in self.metrics:
dataframe[metric] = [self.evaluate_result[metric]]
dataframe = pd.DataFrame(dataframe)
dataframe.to_csv(os.path.join(save_path, '{}.csv'.format(filename)), index=False)
self._logger.info('Evaluate result is saved at ' +
os.path.join(save_path, '{}.csv'.format(filename)))
self._logger.info("\n" + str(dataframe))
def clear(self):
pass
def find_lcs(self):
sequence1 = self.output_sequence
sequence2 = self.truth_sequence
len1 = len(sequence1)
len2 = len(sequence2)
res = [[0 for i in range(len1 + 1)] for j in range(len2 + 1)] # python 初始化二维数组 [len1+1],[len2+1]
for i in range(1, len2 + 1): # 开始从1开始,到len2+1结束
for j in range(1, len1 + 1): # 开始从1开始,到len2+1结束
if sequence2[i - 1] == sequence1[j - 1]:
res[i][j] = res[i - 1][j - 1] + 1
else:
res[i][j] = max(res[i - 1][j], res[i][j - 1])
lcs = []
i = len(sequence2)
j = len(sequence1)
while i > 0 and j > 0:
# 开始从1开始,到len2+1结束
if sequence2[i - 1] == sequence1[j - 1]:
lcs.append(sequence2[i - 1])
i = i - 1
j = j - 1
else:
if res[i - 1][j] > res[i][j - 1]:
i = i - 1
elif res[i - 1][j] < res[i][j - 1]:
j = j - 1
else:
i = i - 1
lcs.reverse()
self.lcs = lcs
def find_completed_sequence(self):
uncompleted_sequence = []
for line in self.result:
uncompleted_sequence.append(line[1])
while uncompleted_sequence.count(None) != 0:
uncompleted_sequence.remove(None)
completed_sequence = []
i = 0
last_road = None
last_point = None
while i < len(uncompleted_sequence):
if last_road is not None:
if last_road == uncompleted_sequence[i]:
i += 1
else:
if last_point != self.rel_info[uncompleted_sequence[i]]['point1']:
try:
path = nx.dijkstra_path(self.rd_nwk,
source=last_point,
target=self.rel_info[uncompleted_sequence[i]]['point1'],
weight='distance')
j = 0
while j < len(path) - 1:
point1 = path[j]
point2 = path[j + 1]
for rel_id in self.rel_info.keys():
if self.rel_info[rel_id]["point1"] == point1 and \
self.rel_info[rel_id]["point2"] == point2:
completed_sequence.append(rel_id)
break
j += 1
completed_sequence.append(uncompleted_sequence[i])
except:
# shortest_path does not exist
completed_sequence.append(uncompleted_sequence[i])
else:
completed_sequence.append(uncompleted_sequence[i])
last_road = uncompleted_sequence[i]
last_point = self.rel_info[uncompleted_sequence[i]]['point2']
i += 1
else:
completed_sequence.append(uncompleted_sequence[i])
last_road = uncompleted_sequence[i]
last_point = self.rel_info[uncompleted_sequence[i]]['point2']
i += 1
self.output_sequence = completed_sequence
|
# -*- coding: utf-8 -*-
import pygame
import util
import global_store as store
class Display():
def __init__(self):
self.current_display = WelcomDisplay()
def draw(self):
store.screen.blit(self.current_display.background, (0,0))
"""
# - Ajout de tous les elements de l'ecran -
for item in currentDisplay.decorList:
screen.blit(item.surf, item.position)
screen.blit(currentDisplay.perso.surf, currentDisplay.perso.position)
"""
class WelcomDisplay():
def __init__(self):
self.background = pygame.image.load(util.get_asset_path(store.main_background)).convert()
self.background = pygame.transform.scale(self.background, store.windows_size)
|
from .browser import BrowserHtml, BrowserResponse
from .client import HttpClient
from .http import (
HttpRequest,
HttpRequestBody,
HttpRequestHeaders,
HttpResponse,
HttpResponseBody,
HttpResponseHeaders,
)
from .page_params import PageParams
from .url import RequestUrl, ResponseUrl
|
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.stats.diagnostic import acorr_ljungbox
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.tsa.stattools import adfuller as ADF
filename = 'training.xlsx'
forrecastnum = 5
data = pd.read_excel(filename, index_col=u'Date')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
data.plot()
plt.title('Time Series')
plt.savefig("timeseries.png")
plot_acf(data)
plt.savefig("1.png")
print(u'原始序列的ADF检验结果为:', ADF(data[u'Value']))
D_data = data.diff(periods=1).dropna()
D_data.columns = [u'Value差分']
D_data.plot()
plt.savefig("2.png")
plot_acf(D_data).show()
plot_pacf(D_data).show()
print(u'1阶差分序列的ADF检验结果为:', ADF(D_data[u'Value差分']))
print(u'差分序列的白噪声检验结果为:', acorr_ljungbox(D_data, lags=1))
data[u'Value'] = data[u'Value'].astype(float)
pmax = int(len(D_data) / 10)
qmax = int(len(D_data) / 10)
bic_matrix = []
for p in range(pmax + 1):
tmp = []
for q in range(qmax + 1):
try:
tmp.append(ARIMA(data, (p, 1, q)).fit().bic)
except BaseException:
tmp.append(None)
bic_matrix.append(tmp)
bic_matrix = pd.DataFrame(bic_matrix)
print(bic_matrix)
p, q = bic_matrix.stack().idxmin()
print(u'bic最小的P值和q值为:%s、%s' % (p, q))
model = ARIMA(data, (p, 1, q)).fit()
model.summary2()
forecast = model.forecast(5)
print(forecast)
|
def lambda_handler(event, context):
if event['request']['type'] == "LaunchRequest" :
return onLaunch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest" :
return onIntent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest" :
return onSessionEnd(event['request'], event['session'])
def onLaunch(launchRequest, session):
return welcomeuser()
def onIntent(intentRequest, session):
intent = intentRequest['intent']
intentName = intentRequest['intent']['name']
if intentName == "PlantFactIntent":
return plant_fact(intent, session)
elif intentName == "AMAZON.HelpIntent":
return welcomeuser()
elif intentName == "AMAZON.CancelIntent" or intentName == "AMAZON.StopIntent":
return handleSessionEndRequest()
else:
raise ValueError("Invalid intent")
def onSessionEnd(sessionEndedRequest, session):
print("on_session_ended requestId=" + sessionEndedRequest['requestId'] + ", sessionId=" + session['sessionId'])
def welcomeuser():
sessionAttributes = {}
cardTitle = " Hello"
speechOutput = "Hello , Welcome to Green Facts! " \
"You can know interesting facts about plants like by saying Tell me plant facts! How can I help?"
repromptText = "You can know interesting facts about plants like by saying Tell me plant facts! Hoe can I help?"
shouldEndSession = False
return buildResponse(sessionAttributes, buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession))
def plant_fact(intent, session):
import random
index = random.randint(0,len(green)-1)
cardTitle = intent['name']
sessionAttributes = {}
speechOutput = "One of the interesting fact related to plants is " + green[index]
repromptText = "You can know interesting facts about plants like by saying Tell me plant fact"
shouldEndSession = True
return buildResponse(sessionAttributes, buildSpeechletResponse(cardTitle, speechOutput, repromptText, shouldEndSession))
def handleSessionEndRequest():
cardTitle = "Session Ended"
speechOutput = "Thank you for using green facts Alexa Skills Kit. " \
"Talk to you later" \
"Have a great time! "
shouldEndSession = True
return buildResponse({}, buildSpeechletResponse(cardTitle, speechOutput, None, shouldEndSession))
def buildSpeechletResponse(title, output, repromptTxt, endSession):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': repromptTxt
}
},
'shouldEndSession': endSession
}
def buildResponse(sessionAttr , speechlet):
return {
'version': '1.0',
'sessionAttributes': sessionAttr,
'response': speechlet
}
green = [ "An average size tree can provide enough wood to make 170,100 pencils!",
"The first type of aspirin, painkiller and fever reducer came from the tree bark of a willow tree!",
"85% of plant life is found in the ocean!",
"Bananas contain a natural chemical which can make people feel happy!",
"Brazil is named after a tree!",
"The Amazon rainforest produces half the world’s oxygen supply!",
"Cricket bats are made of a tree called Willow and baseball bats are made out of the wood of Hickory tree!",
"Dendrochronology is the science of calculating a tree’s age by its rings!",
"Caffeine serves the function of a pesticide in a coffee plant! ",
"Apple is 25% air, that is why it floats on water!",
"Peaches, Pears, apricots, quinces, strawberries, and apples are members of the rose family!",
"Apple, potatoes and onions have the same taste, to test this eat them with your nose closed!",
"The tears during cutting an onion are caused by sulfuric acid present in them!",
"The tallest tree ever was an Australian eucalyptus – In 1872 it was measured at 435 feet tall!",
"The first potatoes were cultivated in Peru about 7,000 years ago!",
"The evaporation from a large oak or beech tree is from ten to twenty-five gallons in twenty-four hours!",
"Strawberry is the only fruit that bears its seeds on the outside. The average strawberry has 200 seeds!",
"Leaving the skin on potatoes while cooking is healthier as all the vitamins are in the skin!",
"Around 2000 different types of plants are used by humans to make food!",
"Small pockets of air inside cranberries cause them to bounce and float in water!",
"Bamboo is the fastest-growing woody plant in the world; it can grow 35 inches in a single day!",
"A sunflower looks like one large flower, but each head is composed of hundreds of tiny flowers called florets, which ripen to become the seeds!",
"Cabbage has 91% water content!",
"Banana is an Arabic word for fingers!",
"The California redwood (coast redwood and giant sequoia) are the tallest and largest living organism in the world!"
] |
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
Linear_Regression = pickle.load(open('Linear_Regression.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
feature = [float(x) for x in request.form.values()]
final_feature = np.array(feature).reshape(-1,1)
prediction = Linear_Regression.predict(final_feature)
prediction=float(prediction)
return render_template('index.html', prediction_text='Percentage should be {}'.format(prediction))
@app.route('/predict_api',methods=['POST'])
def predict_api():
'''
For direct API calls trought request
'''
data = request.get_json(force=True)
prediction = Linear_Regression.predict([np.array(list(data.values()))])
output = prediction[0]
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True) |
from flask import render_template, Blueprint, make_response, request
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
resp = make_response(render_template("home.html"))
resp.set_cookie("userID", "12345")
return resp
@main.route("/getCookie")
def getCookie():
userID = request.cookies.get("userID")
return 'user ID is ' + userID
@main.route("/delCookie")
def delCookie():
resp = make_response("deleting cookie")
resp.set_cookie("userID", '', expires = 0)
return resp
|
#!/usr/bin/env python
import os
import sys
from os.path import join, dirname
from dotenv import load_dotenv
if __name__ == "__main__":
try:
# Create .env file path and load it
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
sisyphus_dir = os.environ["SISYPHUS_DIR"]
tempdir = os.path.join(sisyphus_dir, 'python')
if tempdir not in sys.path:
sys.path.append(tempdir)
tempdir = os.path.join(tempdir, 'sisyphus')
if tempdir not in sys.path:
sys.path.append(tempdir)
tempdir = os.path.join(tempdir, 'webapp')
if tempdir not in sys.path:
sys.path.append(tempdir)
import django
django.setup()
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
except Exception, e:
sys.stderr.write('bughunter error manager.py: %s\n' % e)
raise
|
from unittest import TestCase
from sprint_2.b_stonks import max_income
class MaxIncomeTest(TestCase):
def test_empty(self):
self.assertEqual(0, max_income([]))
def test_single(self):
self.assertEqual(0, max_income([1]))
def test_two(self):
self.assertEqual(1, max_income([0, 1]))
def test_skip_middle(self):
self.assertEqual(2, max_income([1, 2, 3]))
def test_skip_first(self):
self.assertEqual(1, max_income([3, 1, 2]))
def test_skip_last(self):
self.assertEqual(2, max_income([1, 2, 3, 2]))
def test_skip_all(self):
self.assertEqual(0, max_income([3, 2, 1])) |
from gandalf_app.database.models import Project, UploadedMediaFile, UploadedDataFile, Analysis, ResultDetails, \
ResultSummary, Elaboration
from gandalf_app.api.project.dao import save, get_all, get_project_by_id, saveMediaFile, saveDataFile, deleteProject, \
get_media_by_id, removeMediaFromProject, get_data_by_id, removeDataFromProject, get_tool_by_id, saveAnalysis, \
get_analysis_by_uuid, saveResultSummary, saveElaboration, get_elaboration_by_uuid, get_result_by_id
# from gandalf_app import settings
from gandalf_app.settings import MULTIMEDIA_DIRECTORY
import hashlib
import os
import uuid
import requests
import pickle
from flask_restplus import fields, marshal
import json
from gandalf_app import settings
import uuid
def getUuid():
return str(uuid.uuid4())
def getHash(filePath):
BLOCK_SIZE = 65536
file_hash = hashlib.sha256()
with open(filePath, 'rb') as f:
fb = f.read(BLOCK_SIZE)
while len(fb) > 0:
file_hash.update(fb)
fb = f.read(BLOCK_SIZE)
return str(file_hash.hexdigest())
def post_project(data):
name = data.get('name')
project = Project(name)
return save(project)
def get_projects():
return get_all()
def get_project(projectId):
return get_project_by_id(projectId)
def add_media_to_project(projectId, filename, role):
project = get_project_by_id(projectId)
uploadedMediaFile = UploadedMediaFile(filename)
uploadedMediaFile.fileName = filename
uploadedMediaFile.role = role
filePath = os.path.join(MULTIMEDIA_DIRECTORY, filename)
uploadedMediaFile.hash = getHash(filePath)
createdMediaFile = saveMediaFile(uploadedMediaFile, projectId)
if role == 'PROBE':
project.probes.append(createdMediaFile)
else:
project.references.append(createdMediaFile)
save(project)
return createdMediaFile
def add_data_to_project(projectId, filename, dataType):
project = get_project_by_id(projectId)
uploadedDataFile = UploadedDataFile(filename)
uploadedDataFile.fileName = filename
uploadedDataFile.dataType = dataType
filePath = os.path.join(MULTIMEDIA_DIRECTORY, filename)
uploadedDataFile.hash = getHash(filePath)
createdDataFile = saveDataFile(uploadedDataFile, projectId)
project.additionalData.append(createdDataFile)
save(project)
return createdDataFile
def delete_project(projectId):
deleteProject(projectId)
def deleteMediaForProject(projectId, mediaId):
project = get_project_by_id(projectId)
media = get_media_by_id(mediaId)
removeMediaFromProject(project, media)
def deleteDataForProject(projectId, dataId):
project = get_project_by_id(projectId)
data = get_data_by_id(dataId)
removeDataFromProject(project, data)
def startAnalysis(projectId, toolId, result_uuid, result_path, tools, elaboration_uuid):
project = get_project_by_id(projectId)
tool = get_tool_by_id(toolId)
tool_endpoint = tool.endpoint
tool_method = tool.method
analysis_uuid = str(uuid.uuid4())
if tool_method == 'POST':
requests.post(
tool_endpoint + '/?uuid=' + str(result_uuid) + '&projectId=' + str(projectId) + '&analysis_uuid=' +
analysis_uuid)
else:
requests.get(
tool_endpoint + '/?uuid=' + str(result_uuid) + '&projectId=' + str(projectId) + '&analysis_uuid=' +
analysis_uuid)
# crea un'analisi su db
# un'analisi è una elaborazione di un tool che sfrutta probes di un progetto
# un risultato è un insieme di analisi
analysis = Analysis()
analysis.uuid = analysis_uuid
analysis.result_uuid = result_uuid # uuid del risultato di cui l'analisi fa parte
analysis.tools = tools
analysis.project_id = projectId
analysis.elaboration_uuid = elaboration_uuid
saveAnalysis(analysis)
# aggiunge l'analisi nella lista delle analisi per il progetto
project.analysis.append(analysis)
save(project)
return analysis_uuid
def create_result_scaffold(projectId, toolIds, result_uuid, result_path):
elaboration = Elaboration()
elaboration.number_of_tools = len(toolIds)
elaboration_uuid = result_uuid
elaboration.uuid = elaboration_uuid
elaboration.project_id = projectId
uuid_list = []
for i in toolIds:
toolId = int(i)
analysis_uuid = startAnalysis(projectId, toolId, result_uuid, result_path, len(toolIds), elaboration_uuid)
# print("É stata lanciata l'analisi " + analysis_uuid)
uuid_list.append(analysis_uuid)
elaboration.analysis_uuid_list.append(analysis_uuid)
created = saveElaboration(elaboration)
print("Elaborazione " + elaboration_uuid + " status: " + created.status)
return uuid_list
def update_elaboration(analysisUuid):
analysis = get_analysis_by_uuid(analysisUuid)
print("Aggiornamento analisi " + analysisUuid + " appartenente ad elaborazione " + analysis.elaboration_uuid)
elaboration = get_elaboration_by_uuid(analysis.elaboration_uuid)
completed_tool_elaborations = elaboration.completed_tool_elaborations
completed_tool_elaborations = completed_tool_elaborations + 1
if completed_tool_elaborations == elaboration.number_of_tools:
elaboration.status = 'COMPLETED'
elaboration.completed_tool_elaborations = completed_tool_elaborations
resultSummary = ResultSummary(name='Risultati di elaborazione ' + str(elaboration.uuid))
resultSummary.results_uuid_list = elaboration.analysis_uuid_list
resultSummary.project_id = elaboration.project_id
resultSummary.folder_result_uuid = elaboration.uuid
created = saveResultSummary(resultSummary)
print("Project Id: " + str(elaboration.project_id))
project = get_project_by_id(elaboration.project_id)
project.results.append(created)
save(project)
saveAnalysis(analysis)
updated = saveElaboration(elaboration)
print("Elaborazione " + updated.uuid + " status: " + updated.status)
def get_project_with_analysis_with_uuid(analysisUuid):
analysis = get_analysis_by_uuid(analysisUuid)
return get_project(analysis.project_id)
def get_result(projectId, resultId):
resultSummary = get_result_by_id(resultId)
analysis_uuid_list = resultSummary.results_uuid_list
print(analysis_uuid_list)
result_uuid = resultSummary.folder_result_uuid
dataList = []
result_dir = MULTIMEDIA_DIRECTORY + '/' + result_uuid
for analysis_uuid in analysis_uuid_list:
data_path = result_dir + '/result-' + analysis_uuid + '.pkl'
print(data_path)
with open(result_dir + '/result-' + analysis_uuid + '.pkl', 'rb') as input:
data = pickle.load(input)
if len(data) > 0:
[dataList.append(d.tolist()) for d in data]
# resultDetails.data = dataList[0]
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# location = db.Column(db.String())
# probes = db.Column(db.PickleType())
# toolId = db.Column(db.Integer())
# name = db.Column(db.String())
# resultType = db.Column(db.Enum(ResultType))
# project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
# dataType = db.Column(db.String, db.ForeignKey('project.id'))
# probes = pickle.load(project.probes)
project = get_project_by_id(projectId)
resource_fields = {'probes': fields.List(fields.String()), 'name': fields.String}
des = json.loads(json.dumps(marshal(project, resource_fields)))
# https://lesc.dinfo.unifi.it/gandalf/api/v1/projects/123/results/120
location = settings.HTTP_PROTOCOL + '://' + settings.FLASK_SERVER_NAME + '/gandalf/api/v' + str(
settings.API_VERSION) + '/projects' + str(
projectId) + "/results/" + str(resultId)
if des['name'] is None:
name = "NON_DISPONIBILE"
else:
name = des['name']
# ritornare i data potrebbe essere molto pesante, per esempio matrici che hanno molti valori
return {
'id': resultId,
'location': location,
'toolId': 1,
'name': name,
'resultType': 'MULTI',
'dataType': 'matrix',
'data': dataList
}
|
import pygame
from scripts.game_objects.game_object import GameObject
from scripts.utilities import load_image
class HealthBarNPC(GameObject):
def __init__(self, game, npc):
super().__init__(game, load_image('resources/sprites/gui/npc_hp_bar.png'), npc.rect.x, npc.rect.y - 20, game.all_sprites)
self.npc = npc
def update(self):
self.x = self.npc.x
self.y = self.npc.y - 15
pygame.draw.rect(self.image, (0, 0, 0, 0), (4, 4, 56, 8))
pygame.draw.rect(self.image, (0, 255, 33), (4, 4, 56 // self.npc.max_hp * self.npc.hp, 8))
|
student = ['A','B','C','D','E']
kor_score = [49,79,20,100,80]
math_score = [43,59,85,30,90]
eng_score = [49,79,48,60,100]
mid_term_score = [kor_score, math_score, eng_score]
student_score =[ 0 for _ in range(len(student)) ]
# print(mid_term_score[0])
for i in range(len(mid_term_score)):
for j in range(len(student)):
student_score[j] += (mid_term_score[i][j])
for i in range(len(student)):
print('{}\'s total score : {} , average score : {}'.format(student[i], student_score[i], int(student_score[i]/len(mid_term_score))))
|
from mcpi.minecraft import Minecraft
mc=Minecraft.create()
x,y,z=mc.player.getTilePos()
mc.setSign(x,y,z,63,0,"Welcome","My","World") |
from pyowm import OWM
import datetime
import os
#from weather import Weather
owm = OWM(os.environ['OWM_API_KEY'])
now = datetime.datetime.now()
def get_weather():
# collect Forecasters for each location
owm_obs = [None, None, None]
owm_obs[0] = owm.three_hours_forecast('Leiden,NL')
owm_obs[1] = owm.three_hours_forecast('Katwijk Aan Zee,NL')
owm_obs[2] = owm.three_hours_forecast('Rijnsburg, NL')
# set up desired format
day = now.strftime("%Y-%m-%d")
#am = day + " 15:00:00+00"
#pm = day +" 23:00:00+00"
hours = range(7,21) # check hours from 7 am to 8pm incl.
# array for extracted Forecast objects
# fc_arr = [None, None, None]
response = ""
response_end = " at these hours: "
will_rain = False
will_rain_commute_am = False
will_rain_commute_pm = False
#for x in range(0, len(owm_obs)):
# extract Forecast object
# fc_arr[x] = owm_obs[x].get_forecast()
for hour in hours:
time = str(day) + " " + str(hour) + ":00:00+00"
if owm_obs[0].will_be_rainy_at(time) or owm_obs[1].will_be_rainy_at(time) or owm_obs[2].will_be_rainy_at(time):
response_end = response_end + str(hour) +":00 "
will_rain = True
if hour < 10:
will_rain_commute_am = True
if hour > 16:
will_rain_commute_pm = True
if will_rain and (not will_rain_commute_am and not will_rain_commute_pm):
response = ":| Be careful, rain forecast outside commute " + response_end
elif will_rain and (will_rain_commute_am or will_rain_commute_pm):
response = ":( Rain forecast during"
if will_rain_commute_am and will_rain_commute_pm:
response = response + " morning and afternoon"
elif will_rain_commute_am:
response = response + " morning"
elif will_rain_commute_pm:
response = response + " afternoon"
response = response + " commute, specifically " + response_end
else:
response = ":) No rain forecast for today"
return response
|
# -*- coding: utf-8 -*-
__author__ = 'ivany'
def md5(str):
import hashlib
m = hashlib.md5()
m.update(str)
return m.hexdigest()
def filterHtml(html):
import re
dr = re.compile(r'<[^>]+>', re.S)
return dr.sub('', html) |
import math
class MathUtils:
def dist(a: tuple, b:tuple):
return ((a[0]-b[0])**2+(a[1]-b[1])**2)**0.5
def hypoth(s: int):
return math.sqrt((s**2) * 2) |
from tkinter import *
import time
import sys
tk = Tk()
canvas = Canvas(tk,width=1024,heigh=768) #ancho-alto
canvas.pack()
img2 = PhotoImage(file = "F.png") #fondo Ancho1024 Alto 768
#label1 = Label(tk, image=img2)
#label1.grid(row=1,column=1)
#print(img2.size)
img = PhotoImage(file = "M.png") #personaje
canvas.create_image(0,0,anchor=NW,image=img2)
canvas.create_image(1100-(481),768-(600),anchor=NW,image = img)
print("Coordenas Iniciales: " , 0,0)
x = 619
y = 168
def movetriangle(event):
global x
global y
if event.keysym == 'Up':
canvas.move(2, 0, -3) #id grafico,x,y #ventana=0
y = y-3
elif event.keysym == 'Down':
canvas.move(2, 0, 3)
y = y+3
elif event.keysym == 'Left':
canvas.move(2, -3, 0)
x = x -3
else:
canvas.move(2, 3, 0)
x = x +3
print("Coordenadas Actuales:" , x ,y)
if x <= 20:
master = Tk()
w = Message(master, text="Usted ha hecho un Gooooool")
w.pack()
sys.exit(0)
canvas.bind_all('<KeyPress-Up>', movetriangle)
canvas.bind_all('<KeyPress-Down>', movetriangle)
canvas.bind_all('<KeyPress-Left>', movetriangle)
canvas.bind_all('<KeyPress-Right>', movetriangle)
tk.mainloop()
|
# -*- coding: utf-8 -*-
import tensorflow as tf
#""四维张量""
t=tf.constant(
[
#"第1个2行2列2深度的三维张量"
[
[[1,12],[6,18]],
[[9,13],[4,11]],
],
#"第2个2行2列2深度的三维张量"
[
[[2,19],[7,17]],
[[3,15],[8,11]]
]
],tf.float32
)
#"计算均值和方差"moments
mean,variance=tf.nn.moments(t,[0,1,2])#[0,1,2]
#"BatchNormalize"
gamma=tf.Variable(tf.constant([2,5],tf.float32))
beta=tf.Variable(tf.constant([3,8],tf.float32))
r=tf.nn.batch_normalization(t,mean,variance,beta,gamma,1e-8)
session=tf.Session()
session.run(tf.global_variables_initializer())
#"打印结果"
print('均值和方差:')
print(session.run([mean,variance]))
#"BatchNormalize的结果"
print('BN操作后的结果:')
print(session.run(r)) |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading
import logging
from ambari_agent.CommandHooksOrchestrator import HooksOrchestrator
from ambari_agent.FileCache import FileCache
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_agent.ClusterConfigurationCache import ClusterConfigurationCache
from ambari_agent.ClusterTopologyCache import ClusterTopologyCache
from ambari_agent.ClusterMetadataCache import ClusterMetadataCache
from ambari_agent.ClusterHostLevelParamsCache import ClusterHostLevelParamsCache
from ambari_agent.ClusterAlertDefinitionsCache import ClusterAlertDefinitionsCache
from ambari_agent.ActionQueue import ActionQueue
from ambari_agent.CommandStatusDict import CommandStatusDict
from ambari_agent.CustomServiceOrchestrator import CustomServiceOrchestrator
from ambari_agent.RecoveryManager import RecoveryManager
from ambari_agent.AlertSchedulerHandler import AlertSchedulerHandler
from ambari_agent.ConfigurationBuilder import ConfigurationBuilder
from ambari_agent.StaleAlertsMonitor import StaleAlertsMonitor
from ambari_stomp.adapter.websocket import ConnectionIsAlreadyClosed
from ambari_agent.listeners.ServerResponsesListener import ServerResponsesListener
from ambari_agent import HeartbeatThread
from ambari_agent.ComponentStatusExecutor import ComponentStatusExecutor
from ambari_agent.CommandStatusReporter import CommandStatusReporter
from ambari_agent.HostStatusReporter import HostStatusReporter
from ambari_agent.AlertStatusReporter import AlertStatusReporter
logger = logging.getLogger(__name__)
class InitializerModule:
"""
- Instantiate some singleton classes or widely used instances along with providing their dependencies.
- Reduce cross modules dependencies.
- Make other components code cleaner.
- Provide an easier way to mock some dependencies.
"""
def __init__(self):
self.stop_event = threading.Event()
self.config = AmbariConfig.get_resolved_config()
self.is_registered = None
self.metadata_cache = None
self.topology_cache = None
self.host_level_params_cache = None
self.configurations_cache = None
self.alert_definitions_cache = None
self.configuration_builder = None
self.stale_alerts_monitor = None
self.server_responses_listener = None
self.file_cache = None
self.customServiceOrchestrator = None
self.hooks_orchestrator = None
self.recovery_manager = None
self.commandStatuses = None
self.action_queue = None
self.alert_scheduler_handler = None
self.init()
def init(self):
"""
Initialize properties
"""
self.is_registered = False
self.metadata_cache = ClusterMetadataCache(self.config.cluster_cache_dir, self.config)
self.topology_cache = ClusterTopologyCache(self.config.cluster_cache_dir, self.config)
self.host_level_params_cache = ClusterHostLevelParamsCache(self.config.cluster_cache_dir)
self.configurations_cache = ClusterConfigurationCache(self.config.cluster_cache_dir)
self.alert_definitions_cache = ClusterAlertDefinitionsCache(self.config.cluster_cache_dir)
self.configuration_builder = ConfigurationBuilder(self)
self.stale_alerts_monitor = StaleAlertsMonitor(self)
self.server_responses_listener = ServerResponsesListener(self)
self.file_cache = FileCache(self.config)
self.customServiceOrchestrator = CustomServiceOrchestrator(self)
self.hooks_orchestrator = HooksOrchestrator(self)
self.recovery_manager = RecoveryManager(self)
self.commandStatuses = CommandStatusDict(self)
self.init_threads()
def init_threads(self):
"""
Initialize thread objects
"""
self.component_status_executor = ComponentStatusExecutor(self)
self.action_queue = ActionQueue(self)
self.alert_scheduler_handler = AlertSchedulerHandler(self)
self.command_status_reporter = CommandStatusReporter(self)
self.host_status_reporter = HostStatusReporter(self)
self.alert_status_reporter = AlertStatusReporter(self)
self.heartbeat_thread = HeartbeatThread.HeartbeatThread(self)
@property
def connection(self):
try:
return self._connection
except AttributeError:
"""
Can be a result of race condition:
begin sending X -> got disconnected by HeartbeatThread -> continue sending X
"""
raise ConnectionIsAlreadyClosed("Connection to server is not established")
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
def helper(lo, hi):
if lo > hi:
return [None]
if lo == hi:
return [TreeNode(lo)]
tree_list = []
for i in range(lo, hi + 1):
left = helper(lo, i - 1)
right = helper(i + 1, hi)
for l in left:
for r in right:
node = TreeNode(i)
node.left, node.right = l, r
tree_list.append(node)
return tree_list
return helper(1, n)
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import scipy
from scipy import signal
matplotlib.use("pgf")
plt.rcParams.update({
"pgf.texsystem": "pdflatex",
"font.family": "serif",
"font.size": 6,
"legend.fontsize": 5,
"ytick.labelsize": 4,
"text.usetex": True,
"pgf.rcfonts": False
});
plt.figure(figsize=(2.65, 1.5))
data_path = "data/multi_walker/"
plt.plot(np.array([0,60000]),np.array([-1e5,-1e5]), label='Rainbow DQN', linewidth=0.6, color='tab:blue', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path, 'sa_ppo.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/30), 5)
plt.plot(data[:, 0], filtered, label='PPO', linewidth=0.6, color='tab:orange', linestyle=(0, (5, 2, 1, 2)))
df = pd.read_csv(os.path.join(data_path,'sa_impala.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1], label='IMPALA', linewidth=0.6, color='tab:green', linestyle='solid')
plt.plot(np.array([0,60000]),np.array([-1e5,-1e5]), label='ApeX DQN', linewidth=0.6, color='tab:brown', linestyle=(0, (1, 1)))
df = pd.read_csv(os.path.join(data_path,'sa_a2c.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1], '--', label='A2C', linewidth=0.6, color='tab:purple', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path,'sa_apex_ddpg.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1], label='ApeX DDPG', linewidth=0.6, color='tab:brown', linestyle=(0, (5, 2, 1, 2)))
df = pd.read_csv(os.path.join(data_path, 'sa_sac.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/30), 4)
plt.plot(data[:, 0], filtered, label='SAC', linewidth=0.6, color='tab:pink', linestyle='solid')
df = pd.read_csv(os.path.join(data_path,'sa_td3.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
plt.plot(data[:, 0], data[:, 1], label='TD3', linewidth=0.6, color='tab:olive', linestyle=(0, (1, 1)))
plt.plot(np.array([0,60000]),np.array([-1e5,-1e5]), label='DQN', linewidth=0.6, color='tab:cyan', linestyle=(0, (3, 3)))
df = pd.read_csv(os.path.join(data_path,'sa_ddpg.csv'))
df = df[['episodes_total', "episode_reward_mean"]]
data = df.to_numpy()
filtered = scipy.signal.savgol_filter(data[:, 1], int(len(data[:, 1])/30)-1, 5)
plt.plot(data[:, 0], filtered, label='DDPG', linewidth=0.6, color='steelblue', linestyle=(0, (5, 2, 1, 2)))
plt.plot(np.array([0,60000]),np.array([-102.05,-102.05]), label='Random', linewidth=0.6, color='red', linestyle=(0, (1, 1)))
plt.xlabel('Episode', labelpad=1)
plt.ylabel('Average Total Reward', labelpad=1)
plt.title('Multiwalker')
plt.xticks(ticks=[10000,20000,30000,40000,50000],labels=['10k','20k','30k','40k','50k'])
plt.xlim(0, 60000)
plt.yticks(ticks=[-150,-100,-50,0],labels=['-150','-100','-50','0'])
plt.ylim(-200, 50)
plt.tight_layout()
plt.legend(loc='lower center', ncol=3, labelspacing=.2, columnspacing=.25, borderpad=.25, bbox_to_anchor=(0.5, -0.75))
plt.margins(x=0)
plt.savefig("SAMultiwalkerGraph_camera.pgf", bbox_inches = 'tight',pad_inches = .025)
plt.savefig("SAMultiwalkerGraph_camera.png", bbox_inches = 'tight',pad_inches = .025, dpi = 600)
|
import os
import pygame
import eng.interface as interface
import eng.foreground_object as foreground_object
import eng.box as box
import eng.globs as globs
import eng.settings as settings
import eng.font as font
import eng.pokemon as pokemon
from . import summary_screen
import eng.data as data
import eng.script_engine as script_engine
from . import resources
from . import common
from eng.constants.buttons import *
#contexts
CX_PAUSE = 0
CX_BAG_USEITEM = 1
CX_BAG_GIVEITEM = 2
CX_BATTLE_CHOOSENEW = 3
#party screen states
PS_NORMAL = 0
PS_SWITCH = 1
PS_WAITFORDISAPPEAR = 2
PS_WAITFORAPPEAR = 3
#party box states
PB_NORMAL = 0
PB_SWITCH = 1
PB_DISAPPEAR = 2
PB_APPEAR = 3
#mini menu options
MM_SUMMARY = 0
MM_SWITCH = 1
MM_ITEM = 2
MM_CANCEL = 3
MM_FIELDEFFECT = 4
#directory for fieldmoves
FIELDMOVES = {}
#formatting
MAINBOX_ORIGIN = (17, 50)
SIDEBOX_ORIGIN = (105, 7)
#number of frames a box takes to appear/disappear
SWITCHTIME = 10
#messages
MSG_CHOOSE = "Choose a pokemon."
MSG_MINIMENU = "Do what with this pokemon?"
MSG_MOVE = "Move to where?"
class PartyScreen(interface.Interface):
"""The pokemon party screen."""
def __init__(self, screen, context, game):
"""
Create the menu and the initial boxes.
screen - the screen to blit to.
context - the (context, caller) tuple we've been called from.
game - the game to show about.
"""
#init
fn = resources.getFilename(resources.I_PARTY)
self.partyNode = data.getTreeRoot(fn, "Party config.")
fn = self.partyNode.getAttr("back", data.D_FILENAME)
self.transparency = self.partyNode.getAttr("transparency", data.D_INT3LIST)
self.init(screen, background=fn,
transparency=self.transparency)
#store variables for later
self.context, self.caller = context
self.game = game
self.party = game.party
#create script engine
self.scriptEngine = script_engine.ScriptEngine()
#font
fn = self.partyNode.getAttr("font", data.D_FILENAME)
self.font = font.Font(fn)
fn = self.partyNode.getAttr("boxfont", data.D_FILENAME)
self.boxFont = font.Font(fn)
#create boxes list
self.boxes = []
#create the main box using the first poke in the party
b = self.createBox(0)
self.boxes.append(b)
#create boxes for each member of the party
self.sideboxHeight = 0
for i in range(1, len(self.party)):
b = self.createBox(i)
self.boxes.append(b)
self.sideboxHeight = b.height+1 #so subsequent calls will know how far apart the boxes are
#create empty boxes for any unused slots
for i in range(len(self.party), 6):
b = EmptyBox(self, self.partyNode)
self.addWidget(b, (SIDEBOX_ORIGIN[0], SIDEBOX_ORIGIN[1]+(self.sideboxHeight*(i-1))))
self.boxes.append(b)
self.sideboxHeight = b.height+1
#create message
self.message = interface.Message(self, MSG_CHOOSE, resources.BOXPATH, width=((self.width*2)/3),
padding=4)
self.message.setPosition((0, self.height), interface.SW)
self.addWidget(self.message)
#start with the first pokemon
self.current = 0
self.currentBox = None
self.giveFocus(self.boxes[self.current])
self.miniMenu = None
self.foregroundObject = None
#start it normal state
#init switchFrom
self.status = PS_NORMAL
self.switchFrom = 0
def createBox(self, i):
if i == 0:
b = MainBox(self, self.partyNode, self.party[0], font=self.boxFont)
self.addWidget(b, (17,50))
return b
else:
b = SideBox(self, self.partyNode, self.party[i], font=self.boxFont)
pos = (SIDEBOX_ORIGIN[0],
SIDEBOX_ORIGIN[1]+(self.sideboxHeight*(i-1)))
self.addWidget(b, pos)
return b
def runScript(self, s):
"""
Have the script engine run a script.
s - the script to run.
"""
#run the script, with self as caller
self.scriptEngine.run(s, self)
def onInputButton(self, button):
"""
Process a button press.
button - the button which was pressed.
"""
#if we have a foreground object, send the button to that
if self.foregroundObject is not None:
self.foregroundObject.inputButton(button)
return
#else if we're sending input to a widget, send it on
if self.miniMenu is not None:
if self.miniMenu.busy:
self.miniMenu.onInputButton(button)
return
else:
self.miniMenu = None
#otherwise process it ourselves
#if we're in normal state:
#A should open a minimenu
#B should quit
#arrows should move the focus
if self.status == PS_NORMAL:
if button == BT_A:
if self.context == CX_PAUSE:
self.miniMenu = self.createMiniMenu(self.party[self.current])
self.miniMenu.setPosition((self.width-4, self.height-4), interface.SE)
self.addWidget(self.miniMenu)
self.message.text = MSG_MINIMENU
elif self.context == CX_BAG_USEITEM:
self.party[self.current].useItemOn(self.context[1].getSelectedItem())
self.context[1].decreaseSelectedItem()
self.context[1].miniMenu.destroy()
self.busy = False
elif self.context == CX_BAG_GIVEITEM:
poke = self.party[self.current]
oldItem = poke.heldItem
poke.heldItem = self.context[1].getSelectedItem()
self.context[1].decreaseSelectedItem()
print("Gave %s the %s" % (poke.getName(), poke.heldItem.name))
if oldItem is not None:
self.context[1].bag.add(oldItem)
self.context[1].pocket.updateLists()
print("Recieved %s from %s and put in bag" % (poke.getName(), oldItem.name))
self.context[1].miniMenu.destroy()
self.busy = False
elif button == BT_B:
self.busy = False
elif button == BT_UP:
if self.current > 1:
self.current -= 1
self.giveFocus(self.boxes[self.current])
elif button == BT_DOWN:
if self.current < len(self.party)-1:
self.current += 1
self.giveFocus(self.boxes[self.current])
elif button == BT_LEFT:
self.current = 0
self.giveFocus(self.boxes[self.current])
elif button == BT_RIGHT:
if (self.current == 0) and (len(self.party) > 1):
self.current = 1
self.giveFocus(self.boxes[self.current])
#if we're waiting for a switch:
#A should start the switch
#B should cancel the switch, going back to the minimenu
#arrows should change the switch target
elif self.status == PS_SWITCH:
if button == BT_A:
self.doSwitch()
elif button == BT_B:
self.boxes[self.current].status = PB_NORMAL
self.boxes[self.switchFrom].status = PB_NORMAL
self.status = PS_NORMAL
self.message.text = MSG_CHOOSE
elif button == BT_UP:
if self.current > 1:
self.boxes[self.current].status = PB_NORMAL
self.current -= 1
self.boxes[self.current].status = PB_SWITCH
elif button == BT_DOWN:
if self.current < len(self.party)-1:
self.boxes[self.current].status = PB_NORMAL
self.current += 1
self.boxes[self.current].status = PB_SWITCH
elif button == BT_LEFT:
self.boxes[self.current].status = PB_NORMAL
self.current = 0
self.boxes[self.current].status = PB_SWITCH
elif button == BT_RIGHT:
if (self.current == 0) and (len(self.party) > 1):
self.boxes[self.current].status = PB_NORMAL
self.current = 1
self.boxes[self.current].status = PB_SWITCH
#if after processing we're still waiting for the switch,
#make sure the first poke is highlighted
if self.status == PS_SWITCH:
self.boxes[self.switchFrom].status = PB_SWITCH
def createMiniMenu(self, poke):
#list field effects
if len(FIELDMOVES) == 0:
root = data.getTreeRoot(globs.FIELDEFFECTS, "Field effects global.")
for effectNode in root.getChildren("fieldeffect"):
scriptNode = effectNode.getChild("script")
FIELDMOVES[effectNode.getAttr("id", data.D_STRING)] = script_engine.scriptFromNode(scriptNode)
#work out what choices we have
choices = []
for m in FIELDMOVES:
for move in poke.moves:
if move is not None:
if move.moveId == m:
choices.append((move.name, (MM_FIELDEFFECT, FIELDMOVES[m])))
break
choices.append(("Summary", (MM_SUMMARY,)))
choices.append(("Switch", (MM_SWITCH,)))
choices.append(("Item", (MM_ITEM,)))
choices.append(("Cancel", (MM_CANCEL,)))
return interface.MiniMenu(self, choices, resources.BOXPATH, callback=self.miniMenuChoose,
border=7)
def miniMenuChoose(self, arg):
choice = arg[0]
if choice == MM_SUMMARY:
#create the screen and set it as the foreground object
self.foregroundObject = summary_screen.SummaryScreen(self._screen, (summary_screen.CX_PARTY, self), self.game, self.current)
elif choice == MM_SWITCH:
self.startSwitch()
elif choice == MM_CANCEL:
self.miniMenu.destroy()
self.message.text = MSG_CHOOSE
elif choice == MM_FIELDEFFECT:
script = arg[1]
self.runScript(script)
def onTick(self):
if self.status == PS_WAITFORDISAPPEAR:
if self.boxes[self.switchFrom].counter > SWITCHTIME:
self.party.switch(self.switchFrom, self.current)
switchBox = self.createBox(self.switchFrom)
currentBox = self.createBox(self.current)
self.boxes[self.switchFrom] = switchBox
self.boxes[self.current] = currentBox
switchBox.appear()
currentBox.appear()
self.giveFocus(self.boxes[self.current])
self.status = PS_WAITFORAPPEAR
#if we're waiting for the boxes to appear, check whether they've finished appearing (both will do it together)
#if so, we're done waiting and can go back to normal
elif self.status == PS_WAITFORAPPEAR:
if self.boxes[self.switchFrom].status == PB_NORMAL:
self.status = PS_NORMAL
self.message.text = MSG_CHOOSE
def giveFocus(self, box):
if self.currentBox is not None:
self.currentBox.selected = False
box.selected = True
self.currentBox = box
def startSwitch(self):
"""Start the switch procedure, looking for a second pokemon."""
#hide the minimenu and take back input control from it
self.miniMenu.destroy()
self.message.text = MSG_MOVE
#store the current selection as the first pokemon to switch
#set it's status to switch so it highlights
self.switchFrom = self.current
self.boxes[self.switchFrom].status = PB_SWITCH
#set ourself to switch status
self.status = PS_SWITCH
def doSwitch(self):
"""Perform the switch."""
#if they're on the original pokemon, no switch and go back to minimenu
if self.switchFrom == self.current:
self.boxes[self.current].status = PB_NORMAL
self.status = PS_NORMAL
#otherwise have the two relevant boxes disappear and start waiting for them to do so
else:
self.boxes[self.current].disappear()
self.boxes[self.switchFrom].disappear()
self.status = PS_WAITFORDISAPPEAR
class PartyBox(interface.Widget):
"""Base class for the individual pokemon boxes on the party screen."""
def __init__(self, parent, partyNode, poke, **kwargs):
"""
Store information and create the icon.
parent - the parent widget.
partyNode - the <party> menu node.
poke - the pokemon this box is relevant to.
"""
#init base widget
self.init(parent, **kwargs)
#store variables we'll need later
self.poke = poke
self.speciesNode = poke.speciesNode
#create the icon
graphicsNode = self.speciesNode.getChild("graphics")
iconNode = graphicsNode.getChild("icon")
fn = iconNode.getAttr("file", data.D_FILENAME)
self.icon = interface.AnimatedImage(self, fn, 2, ticksPerFrame=10,
transparency=self.transparency)
self.addWidget(self.icon, (0,0))
#load the hp bar
fn = partyNode.getAttr("hpbar", data.D_FILENAME)
self.hpBar = common.HpBar(self, poke.stats[pokemon.ST_HP], value=poke.currentHP,
background=fn,
transparency=self.transparency,
z=-1)
#create labels
self.nameLabel = interface.Label(self, poke.getName())
self.lvlLabel = interface.Label(self, "Lv%s" % poke.level)
self.hpLabel = interface.Label(self, "%i/%i" % (poke.currentHP, poke.stats[pokemon.ST_HP]))
#load item icon
fn = partyNode.getAttr("item", data.D_FILENAME)
self.itemIcon = interface.Image(self, fn, transparency=self.transparency)
self._status = PB_NORMAL
self.counter = 0
self._selected = False
@property
def selected(self):
return self._selected
@selected.setter
def selected(self, val):
self._selected = val
self.selectedBox.visible = self._selected
@property
def status(self):
return self._status
@status.setter
def status(self, val):
self._status = val
if val == PB_NORMAL:
self.switchBox.visible = False
else:
self.switchBox.visible = True
def disappear(self):
self.status = PB_DISAPPEAR
self.counter = 0
self.basePosition = self.position
class MainBox(PartyBox):
def __init__(self, parent, partyNode, poke, **kwargs):
mainNode = partyNode.getChild("main")
fn = mainNode.getAttr("file", data.D_FILENAME)
self.transparency = partyNode.getAttr("transparency", data.D_INT3LIST)
PartyBox.__init__(self, parent, partyNode, poke, background=fn,
transparency=self.transparency,
**kwargs)
self.border = 3
self.lineBuffer = 1
fn = mainNode.getAttr("selected", data.D_FILENAME)
self.selectedBox = interface.Image(self, fn, transparency=self.transparency)
self.addWidget(self.selectedBox, (0,0))
self.selectedBox.visible = False
fn = mainNode.getAttr("switch", data.D_FILENAME)
self.switchBox = interface.Image(self, fn, transparency=self.transparency,
z=-1)
self.addWidget(self.switchBox, (0,0))
self.switchBox.visible = False
self.addWidget(self.nameLabel, (32+self.border, self.border))
pos = (self.nameLabel.position[0],
self.nameLabel.position[1]+self.nameLabel.height+self.lineBuffer)
self.addWidget(self.lvlLabel, pos)
pos = (self.width-self.border,
self.lvlLabel.position[1]+self.lvlLabel.height+self.lineBuffer)
self.addWidget(self.hpBar, pos, anchor=interface.NE)
pos = (self.width-self.border,
self.hpBar.position[1]+self.hpBar.height+self.lineBuffer)
self.addWidget(self.hpLabel, pos, anchor=interface.NE)
def appear(self):
self.status = PB_APPEAR
self.counter = 0
self.basePosition = self.position
distance = self.basePosition[0]+self.width
self.position = (self.basePosition[0]-distance, self.basePosition[1])
def onTick(self):
self.counter += 1
if self.counter >= 600:
self.counter = 0
if self.status == PB_DISAPPEAR:
distance = self.x+self.width
offset = (distance*self.counter)/SWITCHTIME
self.position = (self.basePosition[0]-offset, self.basePosition[1])
if self.counter > SWITCHTIME:
self.destroy()
elif self.status == PB_APPEAR:
distance = self.basePosition[0]+self.width
offset = (distance*(SWITCHTIME-self.counter))/SWITCHTIME
self.position = (self.basePosition[0]-offset, self.basePosition[1])
if self.counter > SWITCHTIME:
self.position = self.basePosition
self.status = PB_NORMAL
class SideBox(PartyBox):
def __init__(self, parent, partyNode, poke, **kwargs):
sideNode = partyNode.getChild("side")
fn = sideNode.getAttr("file", data.D_FILENAME)
self.transparency = partyNode.getAttr("transparency", data.D_INT3LIST)
PartyBox.__init__(self, parent, partyNode, poke, background=fn,
transparency=self.transparency,
**kwargs)
self.border = 3
self.lineBuffer = 1
fn = sideNode.getAttr("selected", data.D_FILENAME)
self.selectedBox = interface.Image(self, fn, transparency=self.transparency)
self.addWidget(self.selectedBox, (0,0))
self.selectedBox.visible = False
fn = sideNode.getAttr("switch", data.D_FILENAME)
self.switchBox = interface.Image(self, fn, transparency=self.transparency,
z=-1)
self.addWidget(self.switchBox, (0,0))
self.switchBox.visible = False
self.addWidget(self.nameLabel, (32+self.border, self.border))
pos = (self.nameLabel.position[0],
self.nameLabel.position[1]+self.nameLabel.height+self.lineBuffer)
self.addWidget(self.lvlLabel, pos)
pos = (self.width-self.border,
self.border)
self.addWidget(self.hpBar, pos, anchor=interface.NE)
pos = (self.width-self.border,
self.hpBar.position[1]+self.hpBar.height+self.lineBuffer)
self.addWidget(self.hpLabel, pos, anchor=interface.NE)
def appear(self):
self.status = PB_APPEAR
self.counter = 0
self.basePosition = self.position
distance = self.parent.width-self.basePosition[0]
self.position = (self.basePosition[0]+distance, self.basePosition[1])
def onTick(self):
self.counter += 1
if self.counter >= 600:
self.counter = 0
if (self.status == PB_DISAPPEAR):
distance = self.parent.width-self.basePosition[0]
offset = (distance*self.counter)/SWITCHTIME
self.position = (self.basePosition[0]+offset, self.basePosition[1])
if self.counter > SWITCHTIME:
self.destroy()
elif (self.status == PB_APPEAR):
distance = self.parent.width-self.basePosition[0]
offset = (distance*(SWITCHTIME-self.counter))/SWITCHTIME
self.position = (self.basePosition[0]+offset, self.basePosition[1])
if self.counter > SWITCHTIME:
self.position = self.basePosition
self.status = PB_NORMAL
class EmptyBox(interface.Widget):
def __init__(self, parent, partyNode, **kwargs):
emptyNode = partyNode.getChild("empty")
fn = emptyNode.getAttr("file", data.D_FILENAME)
self.transparency = partyNode.getAttr("transparency", data.D_INT3LIST)
self.init(parent, background=fn,
transparency=self.transparency,
**kwargs)
|
def isprime(number):
for p in range(2,number):
x = number % p
if x == 0:
return False,p
|
from winsound import Beep # Import des modules
from time import sleep
latin=['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',' ']
morse=['-----','.----','..---','...--','....-','.....','-....','--...','---..','----.','.-','-...','-.-.','-..','.','..-.','--.','....','..','.---','-.-','.-..','--','-.','---','.--.','--.-','.-.','...','-','..-','...-','.--','-..-','-.--','--..','/']
phrase = input('Entrez une phrase en majuscule : ') # l'utilisateur entre une phrase
i = 0 # Parcourt la chaine de caracteres
y = 0 # Parcourt liste latin
p_morse = '' # contient la traduction
while (i != len(phrase)) : # parcourt la chaine avec i
if (phrase[i] != latin[y]) : # recherche l'equivalent du caractere de la chaine dans la liste latin
y += 1 # si le caractere n'est pas trouvé, rechercher le caractere suivant
else : # si le caractere equivalent est retrouvé :
p_morse += morse[y] + ' ' # rajoute le caractere morse equivalent
i += 1 # incremente i de 1
y = 0 # remet y a 0 pour parcourir de nouveau la liste latin pour le caracteres suivant (i + 1)
# fin de la boucle la chaine est traduite
i = 0
# affichage de la traduction :
while (i != len(p_morse)) :
if (p_morse[i] == '.') : # si le caractere est un point :
sleep(0.08) # temps d'attente correspondant
Beep(330,80) # son correspondant
if (p_morse[i] == '-') : # si le caractere est un tiret :
sleep(3 * 0.08)
Beep(220,240)
if (p_morse[i] == ' ') : # si le caractere est un espace :
sleep(3 * 0.09)
if (p_morse[i] == '/') : # si le caractere est une barre oblique :
sleep(7 * 0.08)
print(p_morse[i], end='') # affichage
i += 1
|
import numpy as np
import pandas as pd
from nltk import word_tokenize
from sklearn.metrics.pairwise import euclidean_distances
import json
from tools.exceptions import SummarySizeTooSmall, TextTooLong
from tools.tools import sentence_tokenize
class EmbeddingsBasedSummary:
"""
This algorithm is following the idea presented in
Kobayashi, Hayato, Masaki Noguchi, and Taichi Yatsuka.
"Summarization based on embedding distributions."
Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing. 2015.
"""
max_sentences = 300
def __init__(self, text, dictionary_file=None, dictionary=None):
self.word_counter = lambda s: len([w for w in word_tokenize(s) if len(w) > 1])
assert (dictionary_file != None or dictionary != None)
if dictionary != None:
self.dictionary = dictionary
else:
self.dictionary = np.load(dictionary_file).item()
self.reversed_dictionary = dict(zip(self.dictionary.values(), self.dictionary.keys()))
self.sentences, self.words = self.split_document(text)
self.r = 0.75 # scaling factor, must be positive
# when searching argmax, indexes with this value are not selected
# becouse it is so big
self.max_distance = -10000000
with open('config.json', 'r') as f:
config = json.load(f)
self.embeddings_file = config["embeddings_file"]
self.embeddings = np.load(self.embeddings_file)
self.distances, self.distance_index_mapping = self.calculate_distances(self.words)
self.reversed_distance_index_mapping = dict(zip(self.distance_index_mapping.values(), \
self.distance_index_mapping.keys()))
def split_document(self, text, minimum_sentence_length=5):
sentences = sentence_tokenize(text)
words = word_tokenize(text, language="finnish")
words = np.array([w for w in words if len(w) > 1])
words = np.array([w.replace('.','').replace(',','') for w in words])
words = np.array([w.lower() for w in words if w.lower() in self.dictionary]) # ATTENTION! Skipping unknown words here.
words = np.unique(words) # considering unique is fine, becouse we will consider THE nearests words, so duplicates are useless
sentences_without_newlines = []
for s in sentences:
s = s.strip()
if len(s) < minimum_sentence_length:
continue
if "\n" in s:
for split in s.split("\n"):
split = split.strip()
if len(split) >= minimum_sentence_length:
sentences_without_newlines.append(split)
else:
sentences_without_newlines.append(s)
sentences = np.array(sentences_without_newlines)
return pd.DataFrame({'position': np.arange(len(sentences)), 'sentences': sentences}), words.tolist()
def nearest_neighbors(self, distances, candidate_summary_indexes):
index_mapping = dict(enumerate(candidate_summary_indexes))
candidate_document_distances = distances[:, candidate_summary_indexes]
# before selecting minimun distances, let's avoid selecting, that the nearest one is the point himself
cand_sums = candidate_document_distances[candidate_summary_indexes]
np.fill_diagonal(cand_sums, 1000) # let's put big value so that diagonal will not be chosen
candidate_document_distances[candidate_summary_indexes] = cand_sums
nearests = candidate_document_distances.argmin(axis=1)
distances = candidate_document_distances.min(axis=1)
return distances, np.vectorize(index_mapping.get)(nearests) #np.array([index_mapping[i] for i in nearests])
def nearest_neighbor_objective_function(self, candidate_summary_words):
"""
Counts the distance between candidate_summary and document (words of original document).
:param candidate_summary: list of words => current summary_methods in iterative optimisation process
:return: negative distance between candidate and sentences
"""
if candidate_summary_words.shape[0] == 0:
return self.max_distance
try:
nearests_document_word_distances, _ = self.nearest_neighbors(self.distances, candidate_summary_words.astype(int))
return -nearests_document_word_distances.sum()
except Exception:
print("Error with " + str(candidate_summary_words))
def precalcule_sentence_distances(self, sentences_left):
sentence_word_indexes = self.precalcule_sentence_indexes(sentences_left)
calcul_distance = lambda indexes : self.nearest_neighbor_objective_function(indexes)
sentence_distances = np.vectorize(calcul_distance)(sentence_word_indexes)
return sentence_distances
def precalcule_sentence_indexes(self, sentences):
assert len(sentences) > 0, "Provide at least one sentence."
filter_dictionary_words = lambda sentence: np.array([w for w in word_tokenize(sentence.lower(), language="finnish") if w in self.dictionary])
get_index = lambda word : self.distance_index_mapping[self.dictionary[word]]
get_sentence_indexes = lambda sentence: np.vectorize(get_index, otypes=[np.ndarray])(filter_dictionary_words(sentence))
return np.vectorize(get_sentence_indexes,otypes=[np.ndarray])(sentences)
def modified_greedy_algrorithm(self, summary_size):
"""
Implementation of Algorithm 1 in chapter 3
:param summary_size: the size of summary_methods to be made
:return: summary_methods
"""
N = self.sentences.shape[0]
print("precalcule")
sentence_indexes = self.precalcule_sentence_indexes(self.sentences['sentences'].values)
sentence_distances = self.precalcule_sentence_distances(self.sentences['sentences'].values)
sentence_lengths = self.sentences['lengths']
candidate_summary = np.array([], dtype=int) # C in algorithm
handled = np.array([], dtype=int) # (C \ handled) in algorithm, in other words : list of indexes not in C (C is thing of algiruthm)
candidate_summary_words = np.array([], dtype=int)
candidate_word_count = 0
print("iterate")
while(handled.shape[0] < N):
s_candidates = np.array([
self.nearest_neighbor_objective_function(
np.append(candidate_summary_words, sentence_indexes[i])
) / (sentence_lengths[i] ** self.r)
if i not in handled else 1000
for i in range(N)])
distance_min_border = s_candidates.min() - 1000 # indexes with this value cannot be maximum
s_candidates[s_candidates == 1000] = distance_min_border # this way we dont choose same twice
s_star_i = s_candidates.argmax()
s_star = self.sentences['sentences'][s_star_i]
s_star_len = sentence_lengths[s_star_i]
if candidate_word_count + s_star_len <= summary_size:
candidate_summary = np.append(candidate_summary, s_star_i)
candidate_word_count += s_star_len
candidate_summary_words = np.append(candidate_summary_words, sentence_indexes[s_star_i])
handled = np.append(handled, s_star_i)
print("post processing")
# then let's consider sentence, that is the best all alone, algorithm line 6
s_candidates = np.array([sentence_distances[i] if sentence_lengths[i] <= summary_size else self.max_distance \
for i in range(len(sentence_distances))])
s_star_i = s_candidates.argmax()
if len(candidate_summary) == 0: # summary_size smaller than any of sentences
return np.array([]),np.array([]), np.array([])
# and now choose eiher the best sentence or combination, algorithm line 7
if s_candidates.max() > self.nearest_neighbor_objective_function(candidate_summary_words):
final_summary = np.array([s_star_i])
else:
final_summary = candidate_summary
final_summary = np.sort(final_summary)
positions = final_summary + 1 # index starts from 0 but it is better show from 1
sentences = self.sentences['sentences'].iloc[final_summary]
summary_indexes = np.array(np.unique(np.hstack(sentence_indexes[final_summary])), dtype=int)
if len(summary_indexes) == 0:
return sentences,positions,np.array([])
_, nearest_neighbors = self.nearest_neighbors(self.distances, summary_indexes)
get_word = lambda i: self.reversed_dictionary[self.reversed_distance_index_mapping[i]]
nearest_words = np.vectorize(get_word)(nearest_neighbors)
return sentences, positions, nearest_words
def calculate_sentence_lengths(self):
lengths = self.sentences['sentences'].apply(self.word_counter)
lengths = np.maximum(lengths, np.ones(lengths.shape[0], dtype=int))
self.sentences['lengths'] = lengths
def summarize(self, word_count = 100,return_words=False):
self.calculate_sentence_lengths()
lengths = self.sentences['lengths']
if (lengths > word_count).all():
raise SummarySizeTooSmall("None of sentences is shorter than given length, cannot choose any sentences.")
N = self.sentences.shape[0]
if N > EmbeddingsBasedSummary.max_sentences:
raise TextTooLong(" " + str(N) + " sentences are too many for the embeddings based method (max "+str(EmbeddingsBasedSummary.max_sentences)+").")
selected_sentences, positions, nearest_words = self.modified_greedy_algrorithm(word_count)
if return_words:
return selected_sentences, positions, [self.dictionary[w] for w in self.words], \
[self.dictionary[nw] for nw in nearest_words]
return selected_sentences, positions
def calculate_distances(self, words):
"""
Calculates euclidean distances between words.
:param words:
:return: distances and transformations for indexes
"""
indexes = np.array([self.dictionary[w] for w in words])
if len(indexes) == 0:
return np.array([]), {}
embeds = self.embeddings[indexes]
distance_matrix = euclidean_distances(embeds,embeds)
submatrix_indexes = dict(zip(indexes, np.arange(indexes.shape[0])))
return distance_matrix, submatrix_indexes |
# Generated by Django 2.0.1 on 2018-01-03 03:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('quora', '0003_auto_20180102_2209'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='question_title',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='quora.Question'),
),
]
|
from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView
from .models import Entry
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView,UpdateView
from django.contrib import messages
# Create your views here.
class HomeView(LoginRequiredMixin, ListView):
model = Entry
template_name = 'entries/index.html'
context_object_name ="blog_entries"
ordering = ['-entry_date']
paginate_by = 5
class EntryView(LoginRequiredMixin, DetailView):
model = Entry
template_name = 'entries/entry_detail.html'
class CreateEntryView(LoginRequiredMixin, CreateView):
model = Entry
template_name = 'entries/create_entry.html'
fields = ['entry_title', 'entry_text','img']
def form_valid(self,form):
form.instance.entry_author = self.request.user
return super().form_valid(form)
from django.urls import reverse_lazy
class YDeleteView(LoginRequiredMixin, DeleteView,):
model = Entry
success_url = reverse_lazy('blog-home')
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if obj.entry_author != self.request.user:
messages.error(request, 'Document not deleted.')
return redirect('blog-home')
messages.success(request, 'Document deleted.')
return super(YDeleteView, self).dispatch(request, *args, **kwargs)
class EditPost(LoginRequiredMixin,UpdateView):
model = Entry
template_name = 'entries/create_entry.html'
fields = ['entry_title','entry_text']
success_url = reverse_lazy('blog-home')
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if obj.entry_author != self.request.user:
messages.error(request, 'Document does not belong to you')
return redirect('blog-home')
return super(EditPost, self).dispatch(request, *args, **kwargs)
|
from pytest_postgresql import factories
# We do custom fixture to have fixed port for easier local connection
postgresql_custom_fixture = factories.postgresql_proc(port=5433)
postgres = factories.postgresql(
'postgresql_custom_fixture',
load=['database_schema.sql']
)
|
import pytest
import torch
from torch.autograd import Variable
from pytorch_unet import UNet
def test_invalid_shape():
with pytest.raises(ValueError):
UNet(input_shape=(3, 33), layers=3, num_classes=1)
def test_unet_double_center():
net = UNet(input_shape=(3, 32), layers=3, num_classes=2, double_center_features=True)
x = torch.randn(10, 3, 32, 32)
x_var = Variable(x)
output = net(x_var)
output_size = output.size()
assert output_size[0] == 10
assert output_size[1] == 2
assert output_size[2] == 32
assert output_size[3] == 32
def test_unet_no_double_center():
net = UNet(input_shape=(3, 32), layers=3, num_classes=2, double_center_features=False)
x = torch.randn(10, 3, 32, 32)
x_var = Variable(x)
output = net(x_var)
output_size = output.size()
assert output_size[0] == 10
assert output_size[1] == 2
assert output_size[2] == 32
assert output_size[3] == 32
|
# -*- coding: utf-8 -*-
def takefirst(elem):
return elem[1]#返回在A上的执行时间
def takesecond(elem):
return elem[2]#返回在B上的执行时间
def calc_jobs_cost(jobs_a,jobs_b):
total_cost=0
temp_calc=0
for i in range(len(jobs_a)):
temp_calc+=jobs_a[i][1]
if temp_calc>total_cost:
total_cost=temp_calc
total_cost+=jobs_a[i][2]
for i in range(len(jobs_b)):
temp_calc+=jobs_b[i][1]
if temp_calc>total_cost:
total_cost=temp_calc
total_cost+=jobs_b[i][2]
print("cost:",total_cost)
def john_calc(job_nums,jobs):
jobs_a=[]
jobs_b=[]
for i in range(job_nums):
#如果i工件在A设备上工作时间小于在B设备上工作时间
if jobs[i][1]<jobs[i][2]:
#将这个job放入jobs_a,根据在A上的工作时间,越短越先进行
jobs_a.append(jobs[i])
#如果这个job在B上的工作时间大于在A上的工作时间
else:
#将这个job放入jobs_b,根据在B上的工作时间,越短越后进行
jobs_b.append(jobs[i])
#jobs_a按在A上工作时间增长的顺序排序
jobs_a.sort(key=takefirst)
#jobs_b按在B上工作时间增长的顺序排序
jobs_b.sort(key=takesecond)
#将jobs_b元素列表反转,方便正向输出
jobs_b.reverse()
print("jobs sequence:",end='')
#正序输出jobs_a
for i in range(len(jobs_a)):
print(jobs_a[i][0],end='')
print(",",end='')
#正序输出jobs_b
for i in range(len(jobs_b)):
print(jobs_b[i][0],end='')
if i!=len(jobs_b)-1:
print(",",end='')
print("")#换行
calc_jobs_cost(jobs_a,jobs_b)#调用计算花费函数
if __name__=="__main__":
#工作列表,第一个是作业编号,第二个是在A上的生产时间,第三个是在B上的生产时间
jobs=[(1,2,5),(2,4,2),(3,3,3),(4,9,1),(5,1,7)]
#算法调用
john_calc(len(jobs),jobs)
|
"""
Exploring trained generator model
"""
import os, sys
import numpy as np
import matplotlib.pyplot as plt
# flint imports
# src imports
import utils
################################################################################
### Interrogate latent space
################################################################################
def interpolate_points(p1, p2, n_steps=10, method='linear'):
"""Uniform interpolation between two points
Returns array of shape (n_steps, <p1/p2 shape>)
"""
if method.startswith('line'):
func = linear_interp
elif method.startswith('spher'):
func = slerp
else:
raise ValueError("Argument 'method' must be 'linear' or 'spherical'")
ratios = np.linspace(0, 1, num=n_steps)
vectors = [func(r, p1, p2) for r in ratios]
return np.asarray(vectors)
def append_avg_vector(vectors):
"""Returns a new array of vectors with the average appended
"""
avg_vector = np.mean(vectors, axis=0)
return np.vstack((vectors, np.expand_dims(avg_vector, axis=0)))
def plot_representative_img_set(img_set, label=None, annot_avg=True):
"""Plots selected representative images - assumes average is present
"""
fig = utils.plot_images(img_set, grid_shape=(1, len(img_set)), rescale=True, figsize=(12, 3))
axes = fig.axes
if annot_avg:
axes[-1].text(0.5, 0, 'avg', transform=axes[-1].transAxes, ha='center', va='top')
if label is not None:
axes[0].text(0, 0.5, label, transform=axes[0].transAxes, ha='right', va='center', rotation=90)
return fig
################################################################################
### Helper utils
################################################################################
def linear_interp(val, lo, hi):
"""Linear interpolation between lo and hi values
"""
return (1 - val) * lo + val * hi
def slerp(val, lo, hi):
"""Spherical linear interpolation between lo and hi values
"""
lo_norm = lo / np.linalg.norm(lo)
hi_norm = hi / np.linalg.norm(hi)
omega = np.arccos(np.clip(np.dot(lo_norm, hi_norm), -1, 1)) # angle between lo and hi
so = np.sin(omega)
if so == 0:
return linear_interp(val, lo, hi) # L'Hopital's rule/LERP
return (np.sin((1 - val) * omega) / so) * lo + (np.sin(val * omega) / so) * hi
|
from biokit.io.fastq import FASTQ as FastQ
def set_example1():
f = FastQ()
f.identifier = ''
f.sequence = 'CCCC'
f.quality = '@ABC'
return f
def set_example2():
f = FastQ()
f.identifier = '@slicing'
f.sequence = 'CCCCTTTT'
f.quality = '@ABC;;;;'
return f
def test_offset():
f = set_example1()
assert f.get_quality_integer() == [31, 32, 33, 34]
f.offset = 64
assert f.get_quality_integer() == [0, 1, 2, 3]
f.offset = 33
def test_quality():
f = FastQ()
assert f.get_quality(0.000000001) == 90
assert f.error_probability_from_quality(90) == 1e-9
assert f.quality_from_error_probability(1e-9) == 90
def test_others():
f = set_example1()
#assert len(f) == 1
print(f)
f.check()
def test_slicing():
f = set_example2()
newf = f[2:6]
assert newf.sequence == 'CCTT'
assert newf.quality == 'BC;;'
def test_qual():
f = set_example1()
f.quality = '!@;A'
f.to_qual()
assert f.to_qual().split("\n")[1] == '0 31 26 32'
#@attr('fixme')
#def test_read_and_plot(self):
# self.f.read(self.f._multiple_fastq_example)
# self.f.plot()
def test_clear():
f = FastQ()
f.clear()
|
from model_location import ModelLocation
class ControllerLocation():
def create_location(self,
location_name,
location_genre,
location_code):
return ModelLocation(location_name, location_genre, location_code)
|
import argparse
from cyy_naive_pytorch_lib.default_config import DefaultConfig
class ExperimentConfig(DefaultConfig):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.distributed_algorithm = None
self.worker_number = None
self.round = None
def load_args(self, parser=None):
if parser is None:
parser = argparse.ArgumentParser()
parser.add_argument("--distributed_algorithm", type=str, required=True)
parser.add_argument("--worker_number", type=int, required=True)
parser.add_argument("--round", type=int, required=True)
super().load_args(parser=parser)
def get_config(parser=None) -> ExperimentConfig:
config = ExperimentConfig()
config.load_args(parser=parser)
return config
|
# Creating the superclass Candidate which has a name and a position
class Candidate:
def __init__(self,position,name):
self.name = name
self.position = position
self.votes1 = 0 # vote as a first choice
self.votes2 = 0 # vote as a second choice
self.votes3 = 0 # vote as a third choice
self.votes4 = 0 # vote as a fourth choice
# Get the votes of a candidate
def getVotes(self):
return self.votes1,self.votes2,self.votes3,self.votes4
# Creating the subclasses of Candidate
class GSUOfficer(Candidate):
def __init__(self,position,name):
self.index = 0 # We will assign an index for every candidate
super().__init__(position,name)
class BSOfficer(Candidate):
def __init__(self,position,name):
self.index = 0 # We will assign an index for every candidate
super().__init__(position,name)
class FACHOfficer(Candidate):
def __init__(self,position,name):
self.index = 0 # We will assign an index for every candidate
super().__init__(position,name)
class FEHOfficer(Candidate):
def __init__(self,position,name):
self.index = 0 # We will assign an index for every candidate
super().__init__(position,name)
class President(Candidate):
def __init__(self,position,name):
self.index = 0 # We will assign an index for every candidate
super().__init__(position,name)
# Function to show the Presidents
def showPresidents():
print("----------------------")
for i in range(len(Presidents)):
print(i+1,"-",Presidents[i].name)
print("----------------------")
# Function to show the GSUOfficers
def showGSUOfficers():
print("----------------------")
for i in range(len(GSUOfficers)):
print(i+1,"-",GSUOfficers[i].name)
# Function to show the FEHOfficers
def showFEHOfficers():
print("----------------------")
for i in range(len(FEHOfficers)):
print(i+1,"-",FEHOfficers[i].name)
# Function to show the BSOfficers
def showBSOfficers():
print("----------------------")
for i in range(len(BSOfficers)):
print(i+1,"-",BSOfficers[i].name)
# Function to show the FACHOfficers
def showFACHOfficers():
print("----------------------")
for i in range(len(FACHOfficers)):
print(i+1,"-",FACHOfficers[i].name)
# Creating 5 lists for 5 different positions
GSUOfficers = []
Presidents = []
BSOfficers = []
FEHOfficers = []
FACHOfficers = []
# open the file containing the name and the position of the candidates as a reading mode
my_file=open("GSUCandidates.txt","r")
for line in my_file:
# splitting the lines by the coma
name,position = line.split(",") # the strings before the come are stored inside name,
# and the string after the coma inside position
# if position is equal to 'GSUOfficer', then the names will be stored inside the GSUOfficers list
if position == "GSUOfficer\n":
GSUOfficers.append(GSUOfficer(position,name)) # if the condition matches, we will append the name
GSUOfficers[len(GSUOfficers) - 1].index = len(GSUOfficers)-1 # indexing the candidates
# if position is equal to 'GSUOfficer', then the names will be stored inside the Presidents list
if position == "president\n":
Presidents.append(President(position,name)) # if the condition matches, we will append the name
Presidents[len(Presidents)-1].index = len(Presidents)-1 # indexing the candidates
# if position is equal to 'GSUOfficer', then the names will be stored inside the BSOfficers list
if position == "BSOfficer\n":
BSOfficers.append(BSOfficer(position,name)) # if the condition matches, we will append the name
Presidents[len(BSOfficers)-1].index = len(BSOfficers)-1 # indexing the candidates
# if position is equal to 'GSUOfficer', then the names will be stored inside FACHOfficers list
if position == "FACHOfficer\n":
FACHOfficers.append(FACHOfficer(position, name)) # if the condition matches, we will append the name
FACHOfficers[len(FACHOfficers)-1].index = len(FACHOfficers)-1 # indexing the candidates
# if position is equal to 'GSUOfficer', then the names will be stored inside the FEHOfficers list
if position == "FEHOfficer\n":
FEHOfficers.append(FEHOfficer(position,name)) # if the condition matches, we will append the name
FEHOfficers[len(FEHOfficers)-1].index = len(FEHOfficers)-1 # indexing the candidates
my_file.close() # close the file
|
import re
def strip_non_alphanumeric(input):
pattern = re.compile('[\W_]+')
return pattern.sub('', input)
|
# content: come on !
# author: 十六
# date: 2020/8/5
# - 需求:爬取搜狗指定词条对应的搜索结果页面(简易网页采集器) UA检测
# 这里 需要加上 UA伪装 US:User-Agent(请求网页载体的身份标识)
# 一定要使用 UA伪装
import requests
if __name__ == "__main__":
# 将UA伪装: 将对应的user-agent 封装到一个字典中
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/84.0.4147.105 Safari/537.36"}
# url 后的参数 单独提取出。
url = "https://www.sogou.com/web"
# 处理 url携带的参数:封装好字典中。与get请求相对应
kw = input("请输入要搜索的名字")
param = {
"query": kw}
# params= 就是对应 url的参数。 headers = UA伪装
response = requests.get(url=url, params=param, headers=headers)
# 得到响应数据
page_text = response.text
file_name = kw + '.html'
path = "./text/" + file_name
with open(path, 'w', encoding="utf-8") as fp:
fp.write(page_text)
print(file_name, "数据完成!")
|
# -*- coding: utf-8 -*-
# @Time : 2019-07-19 16:43
# @Author : finupgroup
# @FileName: BERT.py
# @Software: PyCharm
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from keras.callbacks import TensorBoard
from keras_bert import load_trained_model_from_checkpoint, Tokenizer, get_custom_objects
import codecs
import keras
from keras.layers import *
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
# tensorboard --logdir=logs
class OurTokenizer(Tokenizer):
def _tokenize(self, text):
R = []
for c in text:
if c in self._token_dict:
R.append(c)
elif self._is_space(c):
R.append('[unused1]') # space类用未经训练的[unused1]表示
else:
R.append('[UNK]') # 剩余的字符是[UNK]
return R
def seq_padding(X, padding=0):
L = [len(x) for x in X]
ML = max(L)
return np.array([
np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X
])
class data_generator:
def __init__(self, data, batch_size=32):
self.data = data
self.batch_size = batch_size
self.steps = len(self.data) // self.batch_size
if len(self.data) % self.batch_size != 0:
self.steps += 1
def __len__(self):
return self.steps
def __iter__(self):
while True:
idxs = list(range(len(self.data)))
np.random.shuffle(idxs)
_X1, _X2, _Y = [], [], []
for i in idxs:
d = self.data[i]
text = d[0][:maxlen]
x1, x2 = tokenizer.encode(first=text)
y = d[1]
_X1.append(x1)
_X2.append(x2)
_Y.append(keras.utils.to_categorical(y, num_classes=num_classes))
if len(_X1) == self.batch_size or i == idxs[-1]:
_X1 = seq_padding(_X1)
_X2 = seq_padding(_X2)
_Y = seq_padding(_Y)
yield [_X1, _X2], _Y
[_X1, _X2, _Y] = [], [], []
# 数据预处理
df = pd.read_csv('data.csv')
df = df[~df['modified_label'].isnull()]
df['modified_label'] = df['modified_label'].astype(str)
encoder = LabelEncoder()
df['modified_label_'] = encoder.fit_transform(df['modified_label'])
encoder_dict = {}
j = 0
for i in encoder.classes_:
encoder_dict[i] = j
j += 1
data = []
for x, y in zip(df['modified_content'], df['modified_label_']):
data.append((x, y))
# 按照8:2的比例划分训练集和验证集
random_order = list(range(len(data)))
np.random.shuffle(random_order)
train_data = [data[j] for i, j in enumerate(random_order) if i % 5 != 0]
valid_data = [data[j] for i, j in enumerate(random_order) if i % 5 == 0]
# 模型参数
num_classes = len(encoder_dict)
maxlen = 100
config_path = '/Users/finupgroup/Desktop/FinupCredit/资易通-云电猫/chinese_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/Users/finupgroup/Desktop/FinupCredit/资易通-云电猫/chinese_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/Users/finupgroup/Desktop/FinupCredit/资易通-云电猫/chinese_L-12_H-768_A-12/vocab.txt'
token_dict = {}
with codecs.open(dict_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = OurTokenizer(token_dict)
bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)
for l in bert_model.layers:
l.trainable = True
x1_in = Input(shape=(None,))
x2_in = Input(shape=(None,))
x = bert_model([x1_in, x2_in])
x = Lambda(lambda x: x[:, 0])(x)
p = Dense(num_classes, activation='softmax', name='softmax')(x)
model = Model([x1_in, x2_in], p)
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(1e-5), # 用足够小的学习率
metrics=['categorical_accuracy']
)
model.summary()
train_D = data_generator(train_data)
valid_D = data_generator(valid_data)
model.fit_generator(
train_D.__iter__(),
steps_per_epoch=len(train_D),
epochs=5,
validation_data=valid_D.__iter__(),
validation_steps=len(valid_D),
callbacks=[keras.callbacks.ModelCheckpoint("weights-{epoch:04d}--{val_categorical_accuracy:.4f}.h5",
monitor='val_categorical_accuracy',
save_best_only=True, verbose=1),
TensorBoard(log_dir='./logs', write_graph=True, write_images=True)]
)
# Test
model = keras.models.load_model('weights-0002--0.8593.h5', custom_objects=get_custom_objects())
test = '马上就还了,这会钱还没到位,马上了'
test = test[:100]
x1, x2 = tokenizer.encode(first=test)
oot = pd.read_csv('oot5-8.csv')
oot = oot[~oot['content'].isnull()]
oot['label'] = oot['label'].astype(str)
oot['modified_label_'] = encoder.fit_transform(oot['label'])
oot_data = []
for x, y in zip(oot['content'], oot['modified_label_']):
oot_data.append((x, y))
def ft_api_parse_result_new(encoder_dict, result):
return list(encoder_dict.keys())[result.argmax()], result[0].max()
yuliao, true, predict_label, predict_prob = [], [], [], []
for i in range(len(oot_data)):
test = oot_data[i][0]
test = test[:100]
x1, x2 = tokenizer.encode(first=test)
label, prob = ft_api_parse_result_new(encoder_dict, model.predict([[x1], [x2]]))
yuliao.append(test)
true.append(oot_data[i][1])
predict_label.append(label)
predict_prob.append(prob)
result = pd.DataFrame({'yuliao': yuliao, 'true': true, 'predict_label': predict_label, 'predict_prob': predict_prob})
def get_true(ele):
return list(encoder_dict)[ele]
def get_acc(ele1, ele2):
if ele1 == ele2:
return 1
else:
return 0
result['true_label'] = result['true'].apply(lambda x: get_true(x))
result['acc'] = result.apply(lambda row: get_acc(row['predict_label'], row['true_label']), axis=1)
result.groupby(['predict_label']).agg({'predict_label': 'count', 'acc': 'sum'})
|
#Karla Ivonne Serrano Arevalo
#Ejerercicio 1.b
#Diciembre 2016
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(-1,5,100)
y=2*x**2-8*x-11
plt.plot(x,y,linewidth=5, color='r')
plt.legend()
plt.title('Laboratorio 3b ejercicio 1b')
plt.xlabel('eje x')
plt.ylabel('eje y')
plt.grid(True)
plt.show()
|
# -*- coding: utf-8 -*-
"""
Servidor de TFG - Proyecto Janet
Versión 1.0
MIT License
Copyright (c) 2019 Mauricio Abbati Loureiro - Jose Luis Moreno Varillas
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from ActionsController import Action
class ActionMoreBooks(Action):
def __init__(self, mongo, wms):
Action.__init__(self, mongo, wms)
def accion(self, intent, entities, response, uid):
respuesta = response
historial = self.mongo.obtener_consulta(uid)
intentant = historial['intent']
hayautor = False
haylibro = False
for ent in entities:
if 'libro' in ent:
haylibro = True
elif 'autores' in ent:
hayautor = True
if haylibro and hayautor:
respuesta['books'] = self.wms.buscarLibro(entities['libro'], entities['autores'],
entities['searchindex'], self._acortarkwconsulta(intentant))
elif haylibro:
respuesta['books'] = self.wms.buscarLibro(entities['libro'], None,
entities['searchindex'], self._acortarkwconsulta(intentant))
elif hayautor:
respuesta['books'] = self.wms.buscarLibro(None, entities['autores'],
entities['searchindex'], self._acortarkwconsulta(intentant))
if not respuesta['books']:
del respuesta['books']
respuesta['content-type'] = 'text'
respuesta['response'] = 'Vaya, parece que no hay libros relacionados con esta consulta'
else:
if len(respuesta['books']) == 1:
respuesta.update(self.wms.cargarInformacionLibro(respuesta['books'][0]['oclc']))
del respuesta['books']
respuesta['content-type'] = 'single-book'
self.mongo.guardar_consulta(uid, respuesta, intentant.replace('libros', 'libro'))
return respuesta
elif intentant == 'consulta_libros_kw' or intentant == 'consulta_libros_titulo' or \
intentant == 'consulta_libros_autor' or intentant == 'consulta_libros_titulo_autor' \
or intentant == 'consulta_libros_kw_autor':
respuesta['content-type'] = 'list-books'
else:
respuesta.update(self.wms.cargarInformacionLibro(respuesta['books'][0]['oclc']))
del respuesta['books']
respuesta['content-type'] = 'single-book'
self.mongo.guardar_consulta(uid, respuesta, intentant)
return respuesta
def _acortarkwconsulta(self, intent):
if intent == 'consulta_libros_kw' or intent == 'consulta_libro_kw':
return 'kw'
elif intent == 'consulta_libros_titulo' or intent == 'consulta_libro_titulo':
return 'title'
elif intent == 'consulta_libros_autor' or intent == 'consulta_libro_autor':
return 'author'
elif intent == 'consulta_libros_titulo_autor' or intent == 'consulta_libro_titulo_autor':
return 'title_author'
elif intent == 'consulta_libros_kw_autor' or intent == 'consulta_libro_kw_autor':
return 'kw_autor'
else:
return None
|
from setting import log
logging = log()
class InfoBulider:
def __init__(self,postion,city):
self.__postion = postion
self.__city = city
def urlbulider(self):
url = 'https://www.lagou.com/jobs/positionAjax.json?{}&needAd' \
'dtionalResult=false'.format(self.__city)
return url
def headersbulider(self):
headers = {
'Host': 'www.lagou.com',
'Origin': 'https://www.lagou.com',
'Referer': 'https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&s'
'uginput='.format(self.__postion),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like' \
' Gecko) Chrome/69.0.3497.81 Safari/537.36',
'X-Anit-Forge-Code': '0',
'X-Anit-Forge-Token': None,
'X-Requested-With': 'XMLHttpRequest'
}
return headers
def databulider(self,page):
logging.info('data bulider working')
data = {'first': 'true', 'fn': {}, 'kd': '{}'.format(page, self.__postion)}
return data |
import os
import sys
if not os.path.exists('res/recommend_2.txt'):
print('Cannot find res/recommend_2.txt')
sys.exit()
t_users = {}
user_file = "res/predict/dev.users"
with open(user_file, "r") as fp:
for line in fp:
viewer_id = line.strip()
t_users[viewer_id] = 1
inferences = {}
with open('res/recommend_2.txt', 'r') as fp:
for line in fp:
tokens = line.strip().split()
inferences[tokens[0]] = tokens[1:]
with open('res/recommend.txt', 'w') as fp:
for user in t_users:
if user in inferences:
recs = inferences[user]
else:
recs = []
for i in range(len(recs), 100):
recs.append('@random_' + str(i+1))
fp.write(user + ' ' + ' '.join(recs) + '\n')
|
from LinearRegression import regression as lr
import numpy as np
import tensorflow as tf
x_data = np.random.randn(2000, 3)
w_real = [0.3, 0.5, 0.1]
b_real = -0.2
learningRate = 0.5
# w_real_column, w_real_row = x_data.shape
# print(w_real_column, w_real_row)
regression = lr(x_data,w_real,b_real,learningRate)
regression.train(10)
|
import random
import numpy as np
import itertools
class Allocation:
def __init__(self, num_nodes, comm_matrix, mesh_dims, node_weights=None):
self.num_nodes = num_nodes
self.mesh_dims = mesh_dims
self.comm_matrix = comm_matrix
self.possible_locs = self.generate_possible_coords()
self.node_weights = node_weights
# return the manhattan distance between coordinates
@staticmethod
def manhattan(c1, c2):
m = 0
for dim in range(len(c1)):
val = abs(c2[dim] - c1[dim])
m += val
return m
def generate_possible_coords(self):
coords = [[i] for i in range(self.mesh_dims[0])]
for dim in self.mesh_dims[1:]:
new_coords = []
for i in range(dim):
for c in coords:
new_c = c.copy()
new_c.append(i)
new_coords.append(tuple(new_c))
coords = new_coords
return coords
# generates a point tuple within the list of given dimensions
def generate_random_coordinate(self):
index = random.randint(0, len(self.possible_locs) - 1)
return self.possible_locs[index]
@staticmethod
def choose_random_coordinate(poss_coords):
index = random.randint(0, len(poss_coords) - 1)
return poss_coords[index]
# find the total communication cost of a Mapping
def total_comm_cost(self, allocation_dict):
total_cost = 0
for n1_index in range(self.num_nodes - 1):
for n2_index in range(n1_index + 1, self.num_nodes):
edge_weight = self.comm_matrix[n1_index][n2_index]
if edge_weight == 0:
pass
else:
n1_id, n2_id = n1_index + 1, n2_index + 1
n1_coords, n2_coords = allocation_dict[n1_id], allocation_dict[n2_id]
m = self.manhattan(n1_coords, n2_coords)
pair_cost = m * edge_weight
total_cost += pair_cost
return total_cost
# find the communication of a partial mapping
def partial_comm_cost(self, allocation_dict):
total_cost = 0
mapped_nodes = allocation_dict.keys()
node_pairs = list(itertools.combinations(mapped_nodes, 2))
for node_pair in node_pairs:
n1_id, n2_id = node_pair[0], node_pair[1]
n1_index, n2_index = n1_id - 1, n2_id - 1
edge_weight = self.comm_matrix[n1_index][n2_index]
if edge_weight == 0:
pass
else:
n1_coords, n2_coords = allocation_dict[n1_id], allocation_dict[n2_id]
m = self.manhattan(n1_coords, n2_coords)
pair_cost = m * edge_weight
total_cost += pair_cost
return total_cost
def random_swap(self, mapping, alloc_dict):
# generate two random coordinates and swap the nodes at those tiles
# could either move a node into an empty tile or swap two nodes
t1_loc = self.generate_random_coordinate()
t2_loc = self.generate_random_coordinate()
while t2_loc == t1_loc:
t2_loc = self.generate_random_coordinate()
node1 = mapping[t1_loc]
node2 = mapping[t2_loc]
if node1 == 0 and node2 == 0:
pass
elif node1 == 0:
# tile 1 has no node on, so move node 2 to its location
alloc_dict[node2] = t1_loc
mapping[t1_loc] = node2
mapping[t2_loc] = 0
elif node2 == 0:
# tile 2 has no node on, so move node 1 to its location
alloc_dict[node1] = t2_loc
mapping[t2_loc] = node1
mapping[t1_loc] = 0
else:
# swap both nodes
alloc_dict[node2] = t1_loc
alloc_dict[node1] = t2_loc
mapping[t2_loc] = node1
mapping[t1_loc] = node2
return mapping, alloc_dict
# mesh dims is a list of dims i.e. (2, 3), or (3, 3, 3)
def generate_random_mapping(self):
mesh = np.zeros(shape=self.mesh_dims)
# track already allocated locations
poss_coords = self.generate_possible_coords()
allocation_dict = {}
# all nodes
for node in range(self.num_nodes):
# node ids must start at 1 in order tha 0 can represent an empty space
node_id = node + 1
# generate random coordinate to enter node
location = self.choose_random_coordinate(poss_coords)
poss_coords.remove(location)
mesh[location] = node_id
allocation_dict[node_id] = location
return mesh, allocation_dict, self.total_comm_cost(allocation_dict)
def generate_empty_mesh(self):
mesh = np.zeros(shape=self.mesh_dims)
return mesh
def get_allocation_dict_from_mapping(self, mapping):
alloc_dict = {}
for loc in self.possible_locs:
node = int(mapping[loc])
if node != 0:
alloc_dict[node] = loc
return alloc_dict
|
import json
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
with open("./1grams-3.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
normalised_idf = dict()
print("Parsing has started!")
for raw in content:
tf, word = raw.split('\t')
if word == {}:
continue
word_normal_form = morph.parse(word)[0].normal_form
if normalised_idf.get(word_normal_form) is None:
normalised_idf[word_normal_form] = int(tf)
else:
normalised_idf[word_normal_form] = int(normalised_idf[word_normal_form]) + int(tf)
with open("./normalized_idf.json", 'w', encoding='utf-8') as fp:
json.dump(normalised_idf, fp, ensure_ascii=False)
print("Parsing has finished!")
|
import logging
import logging.handlers
import os
# from logging_utils.telegramhandler import TelegramLoggerHandler
def generate_logger(name='root'):
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(logging.Formatter("[%(name)s] %(asctime)s - %(levelname)s:\n%(message)s\n"))
streamHandler.setLevel(logging.DEBUG)
trfHandler = logging.handlers.TimedRotatingFileHandler(f'{os.path.dirname(__file__)}/../logs/debug_{name}.log',
when='D', interval=1, backupCount=5)
trfHandler.setFormatter(logging.Formatter("[%(name)s] %(asctime)s - %(levelname)s:\n%(message)s\n"))
trfHandler.setLevel(logging.DEBUG)
logger.addHandler(streamHandler)
# logger.addHandler(TelegramLoggerHandler())
logger.addHandler(trfHandler)
if len(logger.handlers) > 3:
raise Exception('logger.handlers length exception')
print('{name} logger setted up'.format(name=name))
return logger
def generate_writer_logger(name='root'):
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(logging.DEBUG)
trfHandler = logging.handlers.TimedRotatingFileHandler(f'{os.path.dirname(__file__)}/../logs/debug_{name}.log',
when='D', interval=60, backupCount=5)
trfHandler.setFormatter(logging.Formatter("%(asctime)s | %(message)s"))
trfHandler.setLevel(logging.DEBUG)
logger.addHandler(trfHandler)
# logger.propagate
# raise Exception('writer logger exception')
print('{name} logger setted up'.format(name=name))
# logger.info('Logger setted up'.format(name=name))
return logger
|
class Solution(object):
def addNegabinary(self, arr1, arr2):
"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: List[int]
"""
n = max(len(arr1),len(arr2))
res=[0]*(n+2)
arr1 = arr1[::-1]
arr2 = arr2[::-1]
for i in xrange(n):
a = arr1[i] if i < len(arr1) else 0
b = arr2[i] if i < len(arr2) else 0
t = a + b + res[i]
#print i, t
if t == 2:
res[i] = 0
res[i+1] += 1
res[i+2] += 1
elif t==3:
res[i] = 1
res[i+1] += 1
res[i+2] += 1
elif t==4:
res[i] = 0
res[i+1] += 0
res[i+2] += 1
else:
res[i] = t
#print res,i
i+=1
if res[i] == 2:
res[i+1] += 1
res[i] = 0
#print res,i
if res[i+1] == 2: res[i+1] = 0
#print res, n
res = res[::-1]
while len(res) > 1:
if not res[0]: res.pop(0)
else: break
return res
print Solution().addNegabinary([1,1,1,1,1], [1,0,1])
print Solution().addNegabinary([1], [1])
print Solution().addNegabinary([0], [0])
print Solution().addNegabinary([1,1], [1])
print Solution().addNegabinary([1,1,1,1,0], [1,1,1,0,0])
|
# existing markdown inlinePatterns
# https://github.com/Python-Markdown/markdown/blob/2.6/markdown/inlinepatterns.py
from markdown.extensions import Extension
from markdown.inlinepatterns import SimpleTagPattern, Pattern
from markdown.inlinepatterns import SubstituteTagPattern
from markdown.util import etree
from markdown import util
DEL_RE = r'(~)(.*?)~' # Strikeout in slack
INS_RE = r'(__)(.*?)__' # not slack ;-)
STRONG_RE = r'(\*)(.*?)\*' # Bold in slack
EMPH_RE = r'(_)(.*?)_' # Italics in slack
CODE_RE = r'(`)(.*?)`' # code in slack
PREFORMATTED_RE = r'(```)(.*?)```' # preformatted in slack
# NEWLINE_RE = r'\n' # newline in slack
USERNAME_RE = r'(<@)(.*?)>' # username tag
CHANNEL_RE = r'(<#.+?\|)(.*?)>' # username tag
CHANNEL_2_RE = r'(<#)(.*?)>' # username tag
class MyExtension(Extension):
def __init__(self, *args, **kwargs):
# Define config options and defaults
self.config = {
'data_for_replacing_text': ['it shall be a list', 'To provide data_for_replacing_text data']
}
# Call the parent class's __init__ method to configure options
super(MyExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
data_for_replacing_text = self.getConfig('data_for_replacing_text')
del_tag = SimpleTagPattern(DEL_RE, 'del')
md.inlinePatterns.add('del', del_tag, '>not_strong')
ins_tag = SimpleTagPattern(INS_RE, 'ins')
md.inlinePatterns.add('ins', ins_tag, '>del')
strong_tag = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns['strong'] = strong_tag
emph_tag = SimpleTagPattern(EMPH_RE, 'em')
md.inlinePatterns.add('em', emph_tag, '>del')
preformatted_tag = SimpleTagPattern(PREFORMATTED_RE, 'pre')
md.inlinePatterns.add('preformatted', preformatted_tag, '<backtick')
# newline_tag = SubstituteTagPattern(NEWLINE_RE, 'br')
# md.inlinePatterns.add('linebreak2', newline_tag, '>linebreak')
if isinstance(data_for_replacing_text, list):
username_tag = SimpleTagPatternWithClassOptionsAndData(USERNAME_RE, 'span', 'username', data_for_replacing_text)
md.inlinePatterns.add('username', username_tag, '<link')
channel_tag = SimpleTagPatternWithClassOptionsAndData(CHANNEL_RE, 'span', 'channel', data_for_replacing_text)
md.inlinePatterns.add('channel', channel_tag, '<username')
channel_2_tag = SimpleTagPatternWithClassOptionsAndData(CHANNEL_2_RE, 'span', 'channel', data_for_replacing_text)
md.inlinePatterns.add('channel_2', channel_2_tag, '>channel')
else:
username_tag = SimpleTagPatternWithClassOptions(USERNAME_RE, 'span', 'username')
md.inlinePatterns.add('username', username_tag, '<link')
channel_tag = SimpleTagPatternWithClassOptions(CHANNEL_RE, 'span', 'channel')
md.inlinePatterns.add('channel', channel_tag, '<username')
channel_2_tag = SimpleTagPatternWithClassOptions(CHANNEL_2_RE, 'span', 'channel')
md.inlinePatterns.add('channel_2', channel_2_tag, '>channel')
class SimpleTagPatternWithClassOptions(Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__(self, pattern, tag, class_name_in_html):
Pattern.__init__(self, pattern)
self.tag = tag
self.class_name_in_html = class_name_in_html
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = m.group(3)
el.set('class', self.class_name_in_html)
return el
class SimpleTagPatternWithClassOptionsAndData(Pattern):
"""
Return element of type `tag` with input text
of a Pattern.
"""
def __init__(self, pattern, tag, class_name_in_html, data_for_replacing_text):
Pattern.__init__(self, pattern)
self.tag = tag
self.class_name_in_html = class_name_in_html
self.data_for_replacing_text = data_for_replacing_text
def handleMatch(self, m):
el = util.etree.Element(self.tag)
data_id = m.group(3)
datum_for_replacing_text_name = self.get_datum_text(self.data_for_replacing_text, data_id)
el.text = datum_for_replacing_text_name
el.set('class', self.class_name_in_html)
return el
def get_datum_text(self, data_for_replacing_text, data_id):
datum_for_replacing_text_name = data_id
for datum_for_replacing_text in data_for_replacing_text:
if datum_for_replacing_text.get('data_id') == data_id:
datum_for_replacing_text_name = datum_for_replacing_text.get('text')
break
return datum_for_replacing_text_name
|
from pymongo import ASCENDING, DESCENDING
from models import Vote, GistPoints, UserPoints
from mongokit import Connection
import settings
con = Connection()
con.register([Vote, GistPoints, UserPoints])
db = con[settings.DEFAULT_DATABASE_NAME]
def run():
collection = db.Vote.collection
collection.ensure_index('user.$id')
yield 'user.$id'
collection.ensure_index('gist.$id')
yield 'user.$id'
collection = db.GistPoints.collection
collection.ensure_index('gist.$id')
yield 'user.$id'
collection.ensure_index([('points',DESCENDING)])
yield 'points'
collection = db.UserPoints.collection
collection.ensure_index('user.$id')
yield 'user.$id'
collection.ensure_index([('points',DESCENDING)])
yield 'points'
test()
def test():
any_obj_id = list(db.Vote.find().limit(1))[0]._id
curs = db.Vote.find({'user.$id':any_obj_id}).explain()['cursor']
assert 'BtreeCursor' in curs
curs = db.Vote.find({'gist.$id':any_obj_id}).explain()['cursor']
assert 'BtreeCursor' in curs
curs = db.GistPoints.find({'gist.$id':any_obj_id}).explain()['cursor']
assert 'BtreeCursor' in curs
curs = db.GistPoints.find().sort('points', DESCENDING).explain()['cursor']
assert 'BtreeCursor' in curs
curs = db.UserPoints.find({'user.$id':any_obj_id}).explain()['cursor']
assert 'BtreeCursor' in curs
curs = db.UserPoints.find().sort('points', DESCENDING).explain()['cursor']
assert 'BtreeCursor' in curs
|
# -*- coding: utf-8 -*-
__author__ = 'PC-LiNing'
import gensim
from lda import load_data
import numpy as np
from word2vec import model_util
import redis
def degree_diff(word,centers):
distances = []
word_vec = get_vector(word)
if word_vec is None:
return None
for center in centers:
distances.append(np.linalg.norm(word_vec - center))
return np.var(np.asarray(distances))
# redis
r = redis.StrictRedis(host='10.2.1.7', port=6379, db=0)
def get_vector(word):
result = r.get(word)
if result is not None:
vec = np.fromstring(result,dtype=np.float32)
else:
vec = None
return vec
corpus,dic,labels = load_data.load_corpus()
tfidf = gensim.models.TfidfModel(corpus=corpus,dictionary=dic)
corpus_tfidf = [tfidf[doc] for doc in corpus]
all_class_centers = ['建筑','机械','计算机']
centers = model_util.get_center_embeddings(all_class_centers)
paper = corpus_tfidf[100]
allwords = [dic[pair[0]] for pair in paper]
print(allwords)
doc_sorted = sorted(paper,key= lambda pair:pair[1])
# select top 10
keywords = doc_sorted[-10:]
print('top 10 word and tf-idf: ')
for word in keywords:
print(dic[word[0]]+" "+str(word[1]))
print('#######')
print('keyword and degree-diff: ')
for word in keywords:
current = dic[word[0]]
print(current+" " + str(degree_diff(current,centers)))
|
import re
import clipboard
import xlrd as xl
def extract_information(file_path, start, end, ent="", append_ents=False):
"""
This method takes a text file and extracts a slice of it and returns a list of strings. entities can be appended.
:param file_path: text file (e.g. parsed pdf file)
:param start: start line from which shall be sliced
:param end: end line to slice to
:param ent: which entity Type should be annotated
:param append_ents: enable/disable entity annotations appendings
:return: list of strings
"""
entts = {"entities": [(0, 0, ent)]} # just a template, entity indices have to be set manually still
with open(file_path, "r", encoding="utf-8") as doc:
# only append entity annotations if append_ents parameter is True
if not append_ents:
data = [line.rstrip() for line in doc.readlines()[start:end]]
else:
data = [(line.rstrip(), entts) for line in doc.readlines()[start:end]]
return data
def merge_list_strings(*cols, ent="", ent_col=0):
"""
This method wants to merge extracted columns (from pdf) back to their originally intented strings/line format. Also
finds entity index range (only viable if one column solely contains entity) and appends annotation dict to list item.
:param cols: arbitrary number of list of strings (columns in pdf)
:param ent: entity abbreviation which shall be annotated
:param ent_col: column that contains entities
:return: list of merged column lines
"""
# merge lists so they are structured like this [a,b,c][x,y,z][1,2,3] -> [(a,x,1),(b,y,2),(c,z,3)]
merged_list = list(zip(*cols))
# merge tuples in list so they are structured like this [(a,x,1),(b,y,2),(c,z,3)] -> [(a x 1),(b y 2),(c z 3)]
for t in merged_list:
out = [" ".join(t) for t in merged_list]
# find index of item in ent_col, which ideally contains entity. Give out index range and append dict with correct
# index range. [(a x 1),(b y 2),(c z 3)] -> [(a x 1, {ents: (ent_col_start, ent_col_end, SKL)}),
# (b y 2, {ents: (ent_col_start, ent_col_end, SKL)}),(c z 3, {ents: (ent_col_start, ent_col_end, SKL)})]
i = 0
final = []
for item in out:
index_range = (item.find(cols[ent_col][i]), len(cols[ent_col][i]) + item.find(cols[ent_col][i]), ent)
entts = ({"entities": [index_range]})
# list comprehension does not work properly, would only append last entts value
final.append((item, entts))
i += 1
# only return appended list if ent tag is in arguments
if ent:
return final
else:
return out
def extract_index_pos(file_path, search_keyword, cv_index, annot=""):
"""
This method makes manual entity annotation of parsed data easier. Takes text file as input, which should contain one raw data item per line.
Line specified by argument will be parsed. Copy paste desired token from text and insert as argument. This method returns the indices of the token
in a tuple form so that they can be copy-pasted into the train-data txt file. Also prints out the token to make sure the correct substring is
matched.
:param file_path: path of train data txt file (requires one tuple per line max)
:param search_keyword: the token which shall be matched and whose index range shall be returned
:param cv_index: the line of the document
:param annot: Optional parameter for when you dont want to type in entity tag everytime by hand
:return: a tuple with an entity tag
"""
with open(file_path, "r", encoding="utf-8") as f:
sentence = f.readlines()[cv_index]
pat = re.compile(re.escape(search_keyword))
print(pat.search(sentence))
# Entity type shortcuts
if not annot:
key = input("Ent_Tag:")
if key == "1":
ent = "Skills"
elif key == "2":
ent = "Experience"
elif key == "3":
ent = "CurrentJob"
elif key == "4":
ent = "Languages"
elif key == "5":
ent = "Degrees"
elif key == "6":
ent = "Name"
else:
ent = key
else:
ent = annot
print("(" + str(pat.search(sentence).span()[0]) + ", " + str(pat.search(sentence).span()[1]) + ", \"" + ent + "\"), ")
clipboard.copy("(" + str(pat.search(sentence).span()[0]) + ", " + str(pat.search(sentence).span()[1]) + ", \"" + ent + "\"), ")
print("Above is copied to clipboard")
print("\n")
print(sentence[pat.search(sentence).span()[0]:pat.search(sentence).span()[1]])
def extract_from_xls(file_path, sheet_index=0):
"""
This method takes a xls file in this format: text, string, start_index, end_index, entity_type and converts it to an entities list containing
text sample with their according entity annotations. These lists can be used to train spacy NER models.
:param file_path: path of
:param sheet_index:
:return:
"""
data = xl.open_workbook(file_path)
sheet = data.sheet_by_index(sheet_index)
entities = []
for i in range(1, len(sheet.col_values(2))):
text = sheet.row_values(i)[0] # the text which contains the annotation/s
start = sheet.row_values(i)[2] # start index of entity in text
end = sheet.row_values(i)[3] # end index of entity in text
ent = sheet.row_values(i)[4] # entity type
# column 'text'
if text != xl.empty_cell.value:
# print(text)
insert = (text, {"entities": [(int(start), int(end), ent)]})
entities.append(insert)
else:
entities[-1][1]["entities"].append((int(start), int(end), ent))
# print(entities)
return entities
# Examples
# -------------------
# file = r"F:\UNI\Master WiInfo\Thesis\application\cb\bewerbungen_raw\output\train_data.txt"
#
# keyword = clipboard.paste()
# extract_index_pos(file, keyword, 0)
# file_path = r"F:\UNI\Master WiInfo\Thesis\application\cb\bewerbungen_raw\output\cv_68.txt"
# col1 = extract_information(file_path, 32, 73)
# col2 = extract_information(file_path, 73, 114)
# col3 = extract_information(file_path, 114, 155)
#
# print(*merge_list_strings(col1, col2, col3,ent="SKL", ent_col=1), sep=",\n")
|
import urllib
import requests
import os
import json
from article import Article
from facts import Facts
class Query(object):
def __init__(self, query):
self.API_KEY = os.getenv('GOOGLE_API')
self.SEARCH_ENGINE_ID = '015040051912301786117:ukzldfl328w'
self.query = query.replace(' ','+')
self.facts = []
self.articles = []#{'title':[], 'url':[], 'sentiment':[], 'political':[], 'summary':[]}
self.article_information()
def create_fake(self):
self.facts = ['Pip is a good man', 'Byron is a person', 'Keenan is Keenan', 'Patrick smells']
for i in xrange(10):
self.articles.append( {'title':str(i), 'url':'google.com', 'sentiment':str(i*4), 'political':str(i*8), 'summary':'Hello I am summary. This is summary. I will summarize you BITCH'})
def generate_query(self, iteration):
if iteration != 0:
template = ['https://www.googleapis.com/customsearch/v1?highrange&start=', str(iteration), '&key=',self.API_KEY,'&cx=',self.SEARCH_ENGINE_ID,'&q=', self.query]
else:
template = ['https://www.googleapis.com/customsearch/v1?highrange&key=',self.API_KEY,'&cx=',self.SEARCH_ENGINE_ID,'&q=', self.query]
url = ''.join(template)
return url
def sort_urls(self, urls):
ret = {'cnn':[], 'guardian':[], 'huffington':[], 'nytimes':[]}
for i in urls:
if 'cnn.com' in i:
ret['cnn'].append(i)
elif 'guardian.com' in i:
ret['guardian'].append(i)
elif 'huffingtonpost.com' in i:
ret['huffington'].append(i)
elif 'nytimes.com' in i:
ret['nytimes'].append(i)
return ret
# TODO: Reimplement this over get_urls tmp
def get_urls(self):
# Returns a dictionary object with links as the values
urls = []
for i in xrange(5):
url = self.generate_query( 10 * i )
response = requests.get(url)
response = json.loads(response.content)
if 'error' in response.keys():
raise Exception("Error")
for i in response['items']:
urls.append(i['link'])
return self.sort_urls(urls)
def get_urls_tmp(self):
urls = []
try:
f = open('backup_urls.txt', 'r')
except:
f = open('models/backup_urls.txt','r')
for line in f:
urls.append(line.strip())
f.close()
return self.sort_urls(urls)
def article_information(self):
Articles = self.fetch_articles()
self.articles = Articles
self.choosing_quotes(Articles)
def choosing_quotes(self, Articles):
total_quotes = []
total_length = 0
count = 0
for item in Articles:
for i in item.quotes:
tmp = len(''.join(item.quotes[i]))
total_length += tmp
count += 1
average = total_length/float(count)
for item in Articles:
for i in item.quotes:
tmp = len(''.join(item.quotes[i]))
if tmp > average:
total_quotes.append(i)
#print total_quotes
self.facts = total_quotes[:8]
def fetch_articles(self):
# TODO: REIMPLEMENT get_urls & not tmp
urls = self.get_urls_tmp()
#urls = self.get_urls()
firsts = []
for i in urls:
try:
firsts.append(urls[i][0])
except:
pass
Articles = []
for link in firsts:
Articles.append(Article(link))
return Articles
if __name__ == '__main__':
test = Query('ahmed mohamed')
#print test.articles
for i in test.facts:
print i
print
#test.create_fake()
#Articles = test.fetch_articles()
#facts = Facts(Articles)
#for i in Articles:
# print i.quotes
|
import glob
from pyMCDS_cells import pyMCDS_cells
import numpy as np
import matplotlib.pyplot as plt # if you want to plot results
# run this script from your output directory
xml_files = glob.glob('output*.xml')
xml_files.sort()
print(xml_files)
n = len(xml_files)
t = np.zeros(n)
uninfected = np.zeros(n)
infected = np.zeros(n)
dead = np.zeros(n)
idx = 0
for f in xml_files:
mcds = pyMCDS_cells(f,'.')
t[idx] = mcds.get_time()
cycle = mcds.data['discrete_cells']['cycle_model']
cycle = cycle.astype(int)
ID_uninfected = np.where((mcds.data['discrete_cells']['assembled_virion'] < 1) & (cycle < 100))
ID_infected = np.where((mcds.data['discrete_cells']['assembled_virion'] >= 1) & (cycle < 100))
uninfected[idx] = len(ID_uninfected[0])
infected[idx] = len(ID_infected[0])
dead_ID = np.where( cycle >= 100 )
dead[idx] = len(dead_ID[0])
# print(infected)
# print(dead)
idx += 1
plt.plot(t,uninfected, label='uninfected', linewidth=2)
plt.plot(t,infected, label='infected', linewidth=2)
plt.plot(t,dead, label='dead', linewidth=2)
plt.legend(loc='center left', prop={'size': 8})
plt.show() |
import os,sys,re,time,shutil
import random
import numpy as np
from tfnlp.image_summary.model import ism
from tfnlp.image_summary.image_feature_generator import image_feature_generator
from tfnlp.image_summary.summary_feature import summary_dict,counter,word_list,word_dict
from tfnlp.image_summary.train_decode_generator import multi_model_state_generator
_get_module_path = lambda path: os.path.normpath(os.path.join(os.getcwd(),
os.path.dirname(__file__), path))
G = multi_model_state_generator(batch_size=128)
def train():
for i in range(100):
#ism.load_model()
for j in range(1000):
ism.train_decoder(train_generator=G)
ism.save_model()
decode()
def decode():
img224,img299,SL,WID = next(G)
summary = ism.decode_summary(img224,img299)
R,C= summary.shape
for i in range(R):
I = WID[i]
_I = summary[i]
sentence = []
_sentence = []
for step in range(SL[i]):
wid = I[step]
w = word_list[wid]
sentence.append(w)
for step in range(C):
_wid = _I[step]
_w = word_list[_wid]
_sentence.append(_w)
if _w in ['<NON>', '<END>']:
break
print('----------------------------------------')
print(' '.join(sentence))
print(' '.join(_sentence))
print('')
print('')
if __name__=='__main__':
if len(sys.argv)<2 or sys.argv[1] == 'train':
train()
elif sys.argv[1] == 'test':
#import pdb;pdb.set_trace()
decode()
|
from nmap import *
def run_nmap(ip, ports):
scan_results = "\n"
string_list = []
print("Running nmap...")
nm = nmap.PortScanner()
nm.scan(ip, ports)
nm.command_line()
for host in nm.all_hosts():
string_list.append('----------------------------------------------------')
string_list.append('Host : %s (%s)' % (host, nm[host].hostname()))
string_list.append('State : %s' % nm[host].state())
for proto in nm[host].all_protocols():
string_list.append('----------')
string_list.append('Protocol : %s' % proto)
lport = nm[host][proto].keys()
for port in lport:
string_list.append('port : %s\tstate : %s' % (port, nm[host][proto][port]['state']))
return scan_results.join([string for string in string_list])
|
from PySide.QtCore import *
from PySide.QtGui import *
class ParamItem(object):
def __init__(self, name, parent, data, meta, model):
self.name = name
self.parentItem = parent
self.items = {}
self.model = model
if type(data) == dict:
for k, v in data.items():
if not k.endswith("__meta"):
childdata = v
childmeta = data.get(k + "__meta")
self.items[k] = ParamItem(k, self, childdata, childmeta, model)
self.data = None
else:
self.data = data
self.meta = meta
def merge(self, item):
if item.name != self.name:
raise ValueError("Error using ParamItem.merge")
self.meta = item.meta
if item.data is not None:
emit = self.data != item.data or self.items != {}
self.data = item.data
self.items = {}
if emit:
index = self.createIndex(1)
self.model.dataChanged.emit(index, index)
else:
oldkeys = set(self.items.keys())
newkeys = set(item.items.keys())
add = newkeys - oldkeys
remove = oldkeys - newkeys
keep = newkeys & oldkeys
for k in add:
self.items[k] = item.items[k]
for k in remove:
del self.items[k]
for k in keep:
self.items[k].merge(item.items[k])
if len(add) > 0 or len(remove) > 0:
index = self.createIndex(0)
print "change this?"
self.model.dataChanged.emit(index, index)
def createIndex(self, col):
if self.parentItem:
return self.model.createIndex(self.row(), col, self)
return QModelIndex()
def child(self, row):
return self.items[sorted(self.items.keys())[row]]
def childCount(self):
return len(self.items)
def parent(self):
return self.parentItem
def row(self):
if self.parentItem:
return sorted(self.parentItem.items.keys()).index(self.name)
def fullpath(self):
if self.parentItem:
return self.parentItem.fullpath() + "/" + self.name
return ""
|
from django import forms
class UrlSwapForm(forms.Form):
origin_url = forms.URLField(label='Adres url', required=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.