text stringlengths 38 1.54M |
|---|
from tkinter import *
import tkinter.messagebox as box
# Use app name throughout
appname='Thorganizer'
window = Tk()
window.title(appname)
label = Label(window, text='What is running through your mind?')
label.pack(padx=200, pady=50)
frame = Frame(window)
entry = Entry(frame, width=200)
listbox = Listbox(frame)
listbox.insert(1, 'ANT')
listbox.insert(2, 'Fact')
listbox.insert(3, 'Idea')
listbox.insert(4, 'Info')
listbox.insert(5, 'Realisation')
def dialog():
box.showinfo('Thought: ' + entry.get() + 'Class: ' + listbox.get( listbox.curselection() ))
btn = Button(frame, text='Add content', command=dialog)
img = PhotoImage(file = 'THORG.gif')
can = Canvas(window, width=100, height = 100)
can.create_image((50, 50), image= img)
entry.pack()
frame.pack(padx=20, pady=20)
listbox.pack(padx=5)
btn.pack(padx=5, pady=5)
can.pack(padx=2, pady=2)
box.pack(padx=2, pady=2)
window.mainloop()
|
from django.db import models
# Create your models here.
class Clicks(models.Model):
clicked = models.IntegerField(default=0)
def __str__(self):
return f"Button Clicked {self.clicked} times"
|
from math import log2
def cal(l,num):
r=0
for i in l:
i.append(i[0]-i[1])
r-=i[0]/num*(i[1]/i[0]*log2(i[1]/i[0])+i[2]/i[0]*log2(i[2]/i[0]))
return r
height=-(3/8*(2/3*log2(2/3)+1/3*log2(1/3))+2/8*0+3/8*(2/3*log2(2/3)+1/3*log2(1/3)))
print(height)
he=[[3,2],[2,0.0000001],[3,2]]
bu=[[2,1],[3,2],[3,2]]
lo=[[5,3],[3,0.0000001]]
print(cal(he,8))
print(cal(bu,8))
print(cal(lo,8)) |
# with open("blalblb.txt") as f:
# read_file = f.read()
# print(read_file)
##
# print(f.read())
class OurOpen:
def __init__(self, file_path):
self._file_path = file_path
def __enter__(self):
print('Im enter')
self.file = open(self._file_path)
return file
def __exit__(self, _, _1, _2):
self.file.close()
print("I'm Exit")
with OurOpen("blabla.txt") as f:
print("Im inside with")
print(f)
print(f.read())
|
import re
import json
# https://api.github.com/emojis
with open("emoji.json", "r") as h:
data = json.load(h)
URL = re.compile(r"/([^/.]+).png")
emojis = {}
for name, url in data.items():
match = URL.search(url)
if not match:
print(url)
continue
codes = match[1].split('-')
try:
emoji = "".join(chr(int(x, 16)) for x in codes)
except ValueError:
continue
emojis[name] = emoji
with open("emoji_tbl.json", "w") as h:
json.dump(emojis, h)
|
import pytest
from apitest.core.helpers import make_directoriable
def test_make_directoriable_ok_case():
assert make_directoriable("Hello World guy!") == "hello_world_guy"
def test_make_directoriable_underline():
assert make_directoriable("Hello-World-guy!") == "hello_world_guy"
def test_make_directoriable_bad_input():
assert make_directoriable(None) == ""
|
from src.loaders.abstract_loader import AbstractVideoLoader
import cv2
from src.models import VideoMetaInfo, FrameInfo, ColorSpace, Frame, FrameBatch
class SimpleVideoLoader(AbstractVideoLoader):
def __init__(self, video_metadata: VideoMetaInfo, *args, **kwargs):
super().__init__(video_metadata, *args, **kwargs)
def load(self):
video = cv2.VideoCapture(self.video_metadata.file)
video_start = self.offset if self.offset else 0
video.set(cv2.CAP_PROP_POS_FRAMES, video_start)
_, frame = video.read()
frame_ind = video_start - 1
info = None
if frame is not None:
(height, width, channels) = frame.shape
info = FrameInfo(height, width, channels, ColorSpace.BGR)
frames = []
while frame is not None:
frame_ind += 1
eva_frame = Frame(frame_ind, frame, info)
if self.skip_frames > 0 and frame_ind % self.skip_frames != 0:
_, frame = video.read()
continue
frames.append(eva_frame)
if self.limit and frame_ind >= self.limit:
return FrameBatch(frames, info)
if len(frames) % self.batch_size == 0:
yield FrameBatch(frames, info)
frames = []
_, frame = video.read()
if frames:
return FrameBatch(frames, info)
|
# Generated by Django 2.1.2 on 2018-11-14 15:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0021_auto_20181114_1523'),
]
operations = [
migrations.AlterField(
model_name='post',
name='luser',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='accounts.UserProfile'),
),
]
|
import unittest
import tempfile
import os
from maruti import sizes
class DeepfakeTest(unittest.TestCase):
def test_byte_to_mb(self):
self.assertEqual(sizes.byte_to_mb(1024*1024), 1)
self.assertAlmostEqual(sizes.byte_to_mb(1024),
0.0009765624, delta=1e-8)
def test_sizes(self):
with tempfile.TemporaryDirectory() as dir:
# dir test
sizes.dir_size(dir)
sizes.dir_size()
# file test
with open(os.path.join(dir, 'test_file.txt'), 'w') as f:
f.write("It's a test")
sizes.file_size(os.path.join(dir, 'test_file.txt'))
# var test
sizes.var_size(dir)
|
import random
class robot:
def __init__(self):
"""Our robot constructer."""
self.moods = ["neutral","angry","happy","'kill all humans'"]
self.eyecolors = ["off","red","green","electric crimson"]
self.mood = ""
self.eyecolor = ""
def changemood(self):
r = random.randint(0,3)
self.mood = self.moods[r]
self.eyecolor = self.eyecolors[r]
def printmood(self):
print("My mood has changed!")
print("I am feeling very",self.mood,"right now!")
print("My eyes are now",self.eyecolor)
def main():
kristina = robot()
kristina.changemood()
kristina.printmood()
kristina.changemood()
kristina.printmood()
main()
|
# a116_buggy_image.py
import turtle as trtl
# instead of a descriptive name of the turtle such as painter,
# a less useful variable name x is used
spider = trtl.Turtle()
spider.pensize(40)
spider.circle(20)
w = 6
y = 70
z = 380 / w
spider.pensize(5)
n = 0
while (n < w):
spider.goto(0, 0)
spider.setheading(z * n)
spider.forward(y)
n = n + 1
spider.hideturtle()
wn = trtl.Screen()
wn.mainloop()
|
valor1 = int(input("primeiro valor: "))
valor2 = int(input("Ultimo valor: "))
valor_potencia = int(input("valor potência: "))
for i in range(valor1, valor2+1):
print("i = {}^{} = {}".format(i,valor_potencia,(i ** valor_potencia))) |
from django.contrib import admin
from .models import Media, Platform, Review
admin.site.register(Platform)
admin.site.register(Media)
admin.site.register(Review)
|
"""
Simple Baseline Model for AV-Sync. Organinzed in PyTorch Lightning
Flatten both audio and video features; concat them and feed into sequential linear layers.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
# from chord_rec.models.seq2seq.Seq2Seq import BaseSeq2Seq, AttnSeq2Seq
# from chord_rec.models.seq2seq.Encoder import BaseEncoder
# from chord_rec.models.seq2seq.Decoder import BaseDecoder, AttnDecoder
class BaseEncoder(pl.LightningModule):
""" The Encoder module of the Seq2Seq model """
def __init__(self, input_size, emb_size, encoder_hidden_size, decoder_hidden_size, n_layers, dropout = 0.5):
super().__init__()
self.input_size = input_size
self.emb_size = emb_size
self.encoder_hidden_size = encoder_hidden_size
self.decoder_hidden_size = decoder_hidden_size
self.n_layers = n_layers
# # self.embedding = nn.Embedding(input_size, emb_size)
# self.embedding = nn.Linear(input_size, emb_size)
self.recurrent = nn.LSTM(emb_size, encoder_hidden_size, n_layers, dropout = dropout, batch_first=True)
self.linear1 = nn.Linear(encoder_hidden_size, encoder_hidden_size)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(encoder_hidden_size, decoder_hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input):
""" The forward pass of the encoder
Args:
input (tensor): the encoded sequences of shape (batch_size, seq_len, input_size)
Returns:
output (tensor): the output of the Encoder; later fed into the Decoder.
hidden (tensor): the weights coming out of the last hidden unit
"""
output, hidden = None, None
# x = self.embedding(input)
# x = self.dropout(x)
x = input
output, (hidden, cell) = self.recurrent(x)
# hidden = torch.tanh(self.linear2(self.relu(self.linear1(hidden))))
#############################################################################
# END OF YOUR CODE #
#############################################################################
return output, (hidden, cell)
class AttnDecoder(pl.LightningModule):
def __init__(self, emb_size, decoder_hidden_size, output_size, n_layers, max_length, dropout = 0.5):
super().__init__()
self.emb_size = emb_size
self.decoder_hidden_size = decoder_hidden_size
self.output_size = output_size
self.dropout = dropout
self.max_length = max_length
self.n_layers = n_layers
self.embedding = nn.Embedding(self.output_size, self.emb_size)
self.attn = nn.Linear(self.decoder_hidden_size + self.emb_size, self.max_length)
self.attn_combine = nn.Linear(self.decoder_hidden_size + self.emb_size, self.emb_size)
self.dropout = nn.Dropout(self.dropout)
self.recurrent = nn.LSTM(emb_size, decoder_hidden_size, n_layers, dropout = dropout, batch_first=True)
self.out = nn.Linear(self.decoder_hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded.squeeze(1), hidden[0][0]), -1)), dim=-1) # Use the first layer(direction) in hidden
attn_applied = torch.bmm(attn_weights.unsqueeze(1),
encoder_outputs)
output = torch.cat((embedded.squeeze(1), attn_applied.squeeze(1)), 1)
output = self.attn_combine(output).unsqueeze(1)
output = F.relu(output)
output, hidden = self.recurrent(output, hidden)
output = self.out(output.squeeze(1))
return output, hidden, attn_weights
class AttnSeq2Seq(pl.LightningModule):
""" The Sequence to Sequence model. """
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert self.encoder.encoder_hidden_size == self.decoder.decoder_hidden_size, \
"Hidden dimensions of encoder and decoder must be equal!"
assert self.encoder.n_layers == self.decoder.n_layers, \
"Encoder and decoder must have equal number of layers!"
def forward(self, source, target, out_seq_len = None, teacher_forcing = True, start_idx = None):
""" The forward pass of the Seq2Seq model.
Args:
source (tensor): sequences in source language of shape (batch_size, seq_len, input_size)
out_seq_len (int): the maximum length of the output sequence. If None, the length is determined by the input sequences.
"""
batch_size = source.shape[0]
if out_seq_len is None:
seq_len = source.shape[1]
if start_idx is None:
start_idx = 0
outputs = torch.full((batch_size, seq_len, self.decoder.output_size), start_idx, dtype = torch.float)
# outputs = torch.zeros(batch_size, seq_len, self.decoder.output_size).to(self.device)
encoder_outputs, hidden = self.encoder(source)
# first input to the decoder is the <sos> token
input = target[:,0].unsqueeze(1)
# input = source[:,0]
for t in range(1, seq_len):
output, hidden, attn= self.decoder(input, hidden, encoder_outputs)
outputs[:,t,:] = output
# input = output.max(1)[1].unsqueeze(1)
if teacher_forcing:
input = target[:,t].unsqueeze(1)
else:
input = output.max(1)[1].unsqueeze(1)
# print(outputs)
return outputs
class PrintSize(nn.Module):
def __init__(self):
super(PrintSize, self).__init__()
def forward(self, x):
print(x.shape)
return x
class LitSeq2Seq(pl.LightningModule):
def __init__(self, vec_size, max_len, chord_vocab, configs):
super().__init__()
self.input_size = vec_size
# TODO: In the future if the inputs are tokens, need to specify this differently
self.emb_size = vec_size # For already vectorized input
self.max_len = max_len
self.chord_vocab = chord_vocab
self.output_size = len(chord_vocab.stoi)
self._init_configs(configs)
self._init_model()
self.save_hyperparameters("vec_size", "max_len", "configs")
def _init_configs(self, configs):
### Configs to add ###
self.n_layers = configs.model.n_layers
self.encoder_dropout = configs.model.encoder_dropout
self.decoder_dropout = configs.model.decoder_dropout
self.warm_up = configs.training.warm_up
self.decay_run = configs.training.decay_run
self.post_run = configs.training.post_run
self.tf_ratios = np.hstack((np.full(self.warm_up, 1), np.flip(np.linspace(0, 0.75, self.decay_run)), np.zeros(self.post_run)))
self.dataset_name = configs.dataset.name
self.backbone = configs.training.backbone
self.hidden_size = configs.model.hidden_dim
self.lr = configs.training.lr
self.momentum = configs.training.momentum
self.optimizer_type = configs.training.optimizer_type
self.criterion = nn.CrossEntropyLoss(ignore_index = self.chord_vocab.stoi["<pad>"])
self.train_acc = pl.metrics.Accuracy()
self.valid_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
self.attn = configs.model.attn
def _init_model(self):
if self.attn:
# Use Attention
self.encoder = BaseEncoder(self.input_size, self.emb_size, self.hidden_size, self.hidden_size, self.n_layers, dropout = self.encoder_dropout)
self.decoder = AttnDecoder(self.emb_size, self.hidden_size, self.output_size, self.n_layers, self.max_len , dropout = self.decoder_dropout)
# self.model = AttnSeq2Seq(self.encoder, self.decoder)
# self.model = AttnSeq2Seq(BaseEncoder(self.input_size, self.emb_size, self.hidden_size, self.hidden_size, self.n_layers, dropout = self.encoder_dropout)
# ,AttnDecoder(self.emb_size, self.hidden_size, self.output_size, self.n_layers, self.max_len , dropout = self.decoder_dropout)
# )
# print(self.device)
else:
# Don't use Attention
self.encoder = BaseEncoder(self.input_size, self.emb_size, self.hidden_size, self.hidden_size, self.n_layers, dropout = self.encoder_dropout)
self.decoder = BaseDecoder(self.emb_size, self.hidden_size, self.output_size, self.n_layers, dropout = self.decoder_dropout)
# self.model = AttnSeq2Seq(self.encoder, self.decoder)
pass
def forward(self, source, target, out_seq_len = None, teacher_forcing = True, start_idx = None):
# return self.model(note, chord, teacher_forcing = teacher_forcing, start_idx = start_idx)
batch_size = source.shape[0]
if out_seq_len is None:
seq_len = source.shape[1]
if start_idx is None:
start_idx = 0
outputs = torch.full((batch_size, seq_len, self.decoder.output_size), start_idx, dtype = torch.float).to(self.device)
# outputs = torch.zeros(batch_size, seq_len, self.decoder.output_size).to(self.device)
encoder_outputs, hidden = self.encoder(source)
# first input to the decoder is the <sos> token
input = target[:,0].unsqueeze(1)
# input = source[:,0]
for t in range(1, seq_len):
output, hidden, attn= self.decoder(input, hidden, encoder_outputs)
outputs[:,t,:] = output
# input = output.max(1)[1].unsqueeze(1)
if teacher_forcing:
input = target[:,t].unsqueeze(1)
else:
input = output.max(1)[1].unsqueeze(1)
# print(outputs)
return outputs
def configure_optimizers(self):
if self.optimizer_type == "Adam":
return torch.optim.Adam(self.parameters(), lr = self.lr)
elif self.optimizer_type == "AdamW":
return torch.optim.AdamW(self.parameters(), lr = self.lr)
elif self.optimizer_type == "SGD":
return torch.optim.SGD(self.parameters(), lr = self.lr, momentum = self.momentum)
def training_step(self, batch, batch_idx):
note, chord = batch
chord = chord.long()
tf = np.random.random()< self.tf_ratios[self.current_epoch - 1]
prob = self(note, chord, teacher_forcing = tf, start_idx = self.chord_vocab.stoi["<sos>"])
prob = prob.permute(0,2,1)
loss = self.criterion(prob, chord)
self.log("loss", loss, on_epoch = True)
return loss
def validation_step(self, batch, batch_idx):
note, chord = batch
chord = chord.long()
tf = False # Don't use tf for evaluation
prob = self(note, chord, teacher_forcing = tf, start_idx = self.chord_vocab.stoi["<sos>"])
prob = prob.permute(0,2,1)
loss = self.criterion(prob, chord)
self.log("val_loss", loss, on_epoch = True, prog_bar = True)
preds = prob.detach().cpu().numpy().argmax(axis = 1)
labels = chord.detach().cpu().numpy()
preds[:,0] = np.full(len(preds), self.chord_vocab.stoi["<sos>"])
return self.vec_decode(preds), self.vec_decode(labels)
def validation_epoch_end(self, validation_step_outputs):
preds, labels = zip(*validation_step_outputs)
preds = np.vstack(preds)
labels = np.vstack(labels)
### Get chord name accuracy ###
mask = (preds != "<sos>") & (preds != "<eos>") & (preds != "<pad>")
masked_preds = preds[mask]
masked_labels = labels[mask]
chord_name_acc = np.sum(masked_preds == masked_labels) / len(masked_labels)
### Get root and quality acc ###
root_preds = preds.copy()
quality_preds = preds.copy()
for r_id in range(preds.shape[0]):
for c_id in range(preds.shape[1]):
sp = preds[r_id, c_id].split(' ')
root_preds[r_id, c_id] = sp[0]
quality_preds[r_id, c_id] = ' '.join(sp[1:])
root_labels = labels.copy()
quality_labels = labels.copy()
for r_id in range(labels.shape[0]):
for c_id in range(labels.shape[1]):
sp = labels[r_id, c_id].split(' ')
root_labels[r_id, c_id] = sp[0]
quality_labels[r_id, c_id] = ' '.join(sp[1:])
mask = (root_preds != "<sos>") & (root_preds != "<eos>") & (root_preds != "<pad>")
root_preds = root_preds[mask]
quality_preds = quality_preds[mask]
root_label = root_labels[mask]
quality_labels = quality_labels[mask]
root_acc = np.sum(root_preds == root_label) / len(root_preds)
quality_acc = np.sum(quality_preds == quality_labels) / len(quality_preds)
self.log("val_name_acc", chord_name_acc, on_epoch = True, prog_bar = True)
self.log("val_root_acc", root_acc, on_epoch = True, prog_bar = True)
self.log("val_quality_acc", quality_acc, on_epoch = True, prog_bar = True)
def test_step(self, batch, batch_idx):
note, chord = batch
chord = chord.long()
tf = False # Don't use tf for evaluation
prob = self(note, chord, teacher_forcing = tf, start_idx = self.chord_vocab.stoi["<sos>"])
prob = prob .permute(0,2,1)
loss = self.criterion(prob, chord)
predictions = torch.argmax(prob, axis = 1)
self.log("test_loss", loss, on_epoch = True, prog_bar = True)
self.log("test_acc", self.test_acc(predictions, labels), on_epoch = True, prog_bar = True)
return test_loss
def decode(self, x):
return self.chord_vocab.itos[x]
def vec_decode(self, x):
return np.vectorize(self.decode)(x)
|
import requests
import json
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
from mutagen.id3 import ID3, APIC
from auth import getToken
def getSpotify():
# Spotify Api call for meta data
token = getToken()
f = open('YT_results.json')
yt_result = json.load(f)
track = yt_result['title']
artist = yt_result['artist']
# get genre meta data info from spotify -- CAN BE IMPOROVED
url = f"https://api.spotify.com/v1/search?q={track}%20{artist}&type=track,artist&limit=1"
headers = {
'Content-Type': 'application/json',
'Authorization' : f'Bearer {token}'
}
r = requests.get(url, headers=headers)
if r.status_code == 200:
data = r.json()
# with open('spotify_debug_data.json', 'w', encoding='utf-8') as f:
# json.dump(data, f, ensure_ascii=False, indent=4)
else:
print("there's been an error")
spotify_results = {}
if data.get('error'):
print("Access token has expired!")
spotify_results['spotify'] = "False"
return False
else:
try:
spotify_results['spotify'] = "True"
spotify_results['title'] = data['tracks']['items'][0]['name']
spotify_results['album']=data['tracks']['items'][0]['album']['name']
spotify_results['year']=str(data['tracks']['items'][0]['album']['release_date'])[0:4]
spotify_results['image'] = data['tracks']['items'][0]['album']['images'][0]['url']
spotify_results['track_number'] = data['tracks']['items'][0]['track_number']
spotify_results['total_tracks'] = data['tracks']['items'][0]['album']['total_tracks']
spotify_results['artist_id'] = data['tracks']['items'][0]['album']['artists'][0]['id']
# get genre data using artist endpoint
url = r"https://api.spotify.com/v1/artists/{}".format(spotify_results['artist_id'])
headers = {
'Content-Type': 'application/json',
'Authorization' : 'Bearer {}'.format(token)
}
r = requests.get(url, headers=headers)
genre_data = r.json()
# with open('genre_debug_data.json', 'w', encoding='utf-8') as f:
# json.dump(genre_data, f, ensure_ascii=False, indent=4)
try:
spotify_results['genre'] = genre_data['genres'][0] # gets the first genre
except IndexError:
spotify_results['genre'] = ""
try:
spotify_results['artist'] = data['artists']['items'][0]['name']
except IndexError:
spotify_results['artist'] = data['tracks']['items'][0]['album']['artists'][0]['name']
with open('spotify_results.json', 'w', encoding='utf-8') as f:
json.dump(spotify_results, f, ensure_ascii=False, indent=4)
except Exception as e:
print("getSpotify: "+ str(e))
print("Skip... ")
return False
return True
def spotifyTags(file=None):
# Open results json file from getSP
f = open('spotify_results.json')
spotify_tag = json.load(f)
# download cover image
img_url = spotify_tag['image']
r = requests.get(img_url, stream=True)
if r.status_code == 200:
with open('cover.jpg', 'wb') as f:
for chunk in r:
f.write(chunk)
audio_path = file
picture_path = 'cover.jpg'
# Text based tags
audio = EasyID3(audio_path)
audio['title'] = spotify_tag['title']
audio['album'] = spotify_tag['album']
audio['artist'] = spotify_tag['artist']
audio['date'] = spotify_tag['year']
audio['tracknumber'] = str(spotify_tag['track_number'])
audio['genre'] = spotify_tag['genre']
audio.save() # save the current changes
# Add image
image_tag = MP3(audio_path, ID3=ID3)
image_tag.tags.add(APIC(mime='image/jpeg',type=3,desc=u'Cover',data=open(picture_path,'rb').read()))
image_tag.save()
print('[SPOTIFY-TAGS: DONE]')
if __name__ == "__main__":
getSpotify()
|
# -*- coding: utf-8 -*-
'''
from gluon.sql import DAL, Field
db1=DAL('firebird://sysdba:masterkey@127.0.0.1:3050//fdb/erp.fdb',
migrate_enabled=False,
ignore_field_case=True,
entity_quoting=False
)
'''
data = IS_NULL_OR(IS_DATE(format=T("%d/%m/%Y")))
TIPOCLIENTE = {'J':"Jurídica","F":"Física"}
CLIENTEFINAL = {'S':"Sim","N":"Não"}
Tipos_Clientes = db.define_table('tiposclientes',
Field('codtip','integer',label='Código:'),
Field('nomtip','string',label='Nome:',length=50),
Field('clifin','string',label='Nome:',length=1),
primarykey = ['codtip'],
migrate = False,
format='%(nomtip)s',
)
Tipos_Clientes.clifin.requires = IS_IN_SET(CLIENTEFINAL,zero=None)
Vendedores = db.define_table('vendedores',
Field('codven','integer',label='Código:'),
Field('nomven','string',label='Vendedor:',length=50),
primarykey = ['codven'],
migrate = False,
format='%(nomven)s',
)
Clientes = db.define_table('clientes',
Field('codcli','integer',label='Código:'),
Field('nomcli','string',label='Nome:',length=50),
Field('nomfan','string',label='Nome Fantasia:',length=30),
Field('fisjur','string',label='Tipo:', length=1),
Field('emacli','string',label='Email:', length=40),
Field('telcli','string',label='Fone:', length=40),
Field('celcli','string',label='Celular:', length=15),
Field('contat','string',label='Contato:', length=35),
Field('cgccpf','string',label='Cnpj/Cpf:', length=17),
Field('insnrg','string',label='IE/RG:', length=17),
Field('codtip','integer',label='Tipo Cliente:'),
Field('endcli','string',label='Endereço:',length=50),
Field('numcli','string',label='Número:',length=10),
Field('baicli','string',label='Bairro:',length=35),
Field('cidcli','string',label='Cidade:',length=35),
Field('estcli','string',label='Estado:',length=2),
Field('cepcli','string',label='Cep:',length=9),
Field('endcob','string',label='Endereço:',length=50),
Field('numcob','string',label='Número:',length=10),
Field('baicob','string',label='Bairro:',length=35),
Field('cidcob','string',label='Cidade:',length=35),
Field('estcob','string',label='Estado:',length=50),
Field('cepcob','string',label='Cep:',length=9),
Field('endent','string',label='Endereço:',length=50),
Field('nument','string',label='Número:',length=10),
Field('baient','string',label='Bairro:',length=35),
Field('cident','string',label='Cidade:',length=35),
Field('estent','string',label='Estado:',length=50),
Field('cepent','string',label='Cep:',length=9),
Field('datcad','date',label='Fundação:'),
Field('datalt','date',label='Cadastro:'),
Field('datfun','date',label='Atualização:'),
primarykey = ['codcli'],
migrate = False
)
Clientes.fisjur.requires = IS_IN_SET(TIPOCLIENTE,zero=None)
Clientes.datcad.requires = data
Clientes.datalt.requires = data
Clientes.datfun.requires = data
Clientes.codtip.requires = IS_IN_DB(db,'tiposclientes.codtip','%(nomtip)s')
Fornecedores = db.define_table('fornecedores',
Field('codfor','integer',label='Código:'),
Field('nomfor','string',label='Produto:',length=50),
Field('nomfan','string',label='Fantasia:',length=50),
Field('fisjur','string)',label='Tipo:'),
primarykey = ['codfor'],
migrate = False,
format='%(nomfor)s',
)
Grupos = db.define_table('grupos',
Field('codgru','integer',label='Código:'),
Field('nomgru','string',label='Produto:',length=50),
primarykey = ['codgru'],
migrate = False,
format='%(nomgru)s',
)
Produtos = db.define_table('produtos',
Field('codpro','integer',label='Código:'),
Field('nompro','string',label='Produto:',length=50),
Field('modelo','string',label='Produto:',length=20),
Field('unipro','string',label='Unidade:',length=2),
Field('qntest','decimal(12,4)',label='Estoque:'),
Field('precus','decimal(15,5)',label='Custo:'),
Field('forpri','integer',label='Fornecedor:'),
Field('codgru','integer',label='Grupo:'),
Field('tabela','string',label='Tabela:',length=1),
primarykey = ['codpro'],
migrate = False,
format='%(nompro)s',
)
Produtos.forpri.requires = IS_IN_DB(db,'fornecedores.codfor','%(nomfor)s')
Produtos.codgru.requires = IS_IN_DB(db,'grupos.codgru','%(nomgru)s')
Local = db.define_table('local',
Field('codloc','integer',label='Id:'),
Field('locest','string',label='Fornecedor:',length=20),
primarykey = ['codloc'],
migrate = False,
format='%(locest)s',
)
#Clientes.codcli.writable = False
#Clientes[None] = dict(codcli=22579, nomcli= 'teste2')
|
from flask import Flask, render_template
serious1 = Flask(__name__)
@serious1.route("/bmi/<int:weight>/<int:height>")
def bmi(weight, height):
bmi = weight / (height/100)**2
a = ""
if bmi< 16:
a = 'Severely underweight'
elif 16 <= bmi < 18.5:
a = 'Underweight'
elif 18.5 <= bmi < 25:
a = 'Normal'
elif 25 <= bmi < 30:
a = 'Overweight'
else:
a = 'Obese'
# return "<p>BMI = {0}<p><p>Condition: {1}<p>".format(bmi, a)
return render_template("bmi.html", bmi=bmi, a=a)
if __name__ == "__main__":
serious1.run(debug=True) |
from util import *
def parse(code, references):
statements = code.split(';')
output = ''
for statement in statements:
cellPointer = 0
statement = statement.strip()
op = '+'
splitByPlusOrMinusEquals = stripAll(statement.split('+='))
if len(splitByPlusOrMinusEquals) <= 1:
op = '-'
splitByPlusOrMinusEquals = stripAll(statement.split('-='))
splitByArrow = stripAll(splitByPlusOrMinusEquals[0].split('->'))
if splitByArrow[0] in references.keys():
reference = references[splitByArrow[0]]
reference = reference + (int(splitByArrow[1]) if len(splitByArrow) > 1 else 0)
cellPointer += reference
output += '>' * reference
if len(splitByPlusOrMinusEquals) > 1:
try:
output += '+' * int(splitByPlusOrMinusEquals[1])
except ValueError:
reference2 = references[splitByPlusOrMinusEquals[1]]
diff = reference2 - reference
cellPointer += diff
output += '>' * diff + '[-' + '<' * diff + op + '>' * diff + ']'
#output +=
output += '<' * cellPointer
output += '\n'
return output |
"""
Reverse a string
"""
def reverseString(string):
#base case
if len(string) == 0:
return string
else:
return reverseString(string[1:]) + string[0]
if __name__ == '__main__':
string = "hello world"
print(reverseString(string))
|
# This is the message sender
import os
from socket import *
from fractions import gcd
from random import randrange
from collections import namedtuple
from math import log
from binascii import hexlify, unhexlify
from threading import Thread
import tkinter
# LWZ Compression
def compress(uncompressed):
"""Compress a string to a list of output symbols."""
# Build the dictionary.
dict_size = 256
dictionary = {chr(i): i for i in range(dict_size)}
w = ""
result = []
for c in uncompressed:
wc = w + c
if wc in dictionary:
w = wc
else:
result.append(dictionary[w])
# Add wc to the dictionary.
dictionary[wc] = dict_size
dict_size += 1
w = c
# Output the code for w.
if w:
result.append(dictionary[w])
return result
KeyPair = namedtuple('KeyPair', 'public private')
Key = namedtuple('Key', 'exponent modulus')
def encode(msg, pubkey, verbose=False):
chunksize = int(log(pubkey.modulus, 256))
outchunk = chunksize + 1
outfmt = '%%0%dx' % (outchunk * 2,)
bmsg = msg.encode()
result = []
for start in range(0, len(bmsg), chunksize):
chunk = bmsg[start:start + chunksize]
chunk += b'\x00' * (chunksize - len(chunk))
plain = int(hexlify(chunk), 16)
coded = pow(plain, *pubkey)
bcoded = unhexlify((outfmt % coded).encode())
# if verbose: print('Encode:', chunksize, chunk, plain, coded, bcoded)
result.append(bcoded)
return b''.join(result)
def send(event=None):
# data = input("Enter message to sent or type 'exit': ")
data = my_msg.get()
my_msg.set("")
if not data:
data = "Send a correct message"
if data == "exit":
UDPSock.close()
os._exit(0)
msgCompressed = compress(data)
print(msgCompressed)
listToString = ""
for i, item in enumerate(msgCompressed):
if i:
listToString = listToString + ','
listToString = listToString + str(item)
msgCoded = encode(listToString, pubKeyReceived, 1)
print(msgCoded)
UDPSock.sendto(msgCoded, address)
if __name__ == '__main__':
# Set the ip from the receiver
host = "192.168.100.9"
port = 13007
address = (host, port)
# Listen just one time to receive public keys
hostTemp = ""
portTemp = 13004
bufTemp = 1024
addressTemp = (hostTemp, portTemp)
UDPSockTemp = socket(AF_INET, SOCK_DGRAM)
UDPSockTemp.bind(addressTemp)
(dataKeyTemp, addressTemp) = UDPSockTemp.recvfrom(bufTemp)
data2 = dataKeyTemp.decode('utf-8')
exponent, modulus = data2.split(",")
exponent = int(exponent)
modulus = int(modulus)
pubKeyReceived = Key(exponent, modulus)
print("Key received")
UDPSockTemp.close()
# Stops listening
UDPSock = socket(AF_INET, SOCK_DGRAM)
top = tkinter.Tk()
top.title("Jorge Espinosa Lara")
top.geometry("500x500")
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
tkinter.mainloop()
|
# MIT License
#
# Copyright (c) 2016-2023 AnonymousDapper
#
from __future__ import annotations
import asyncio
import sys
from pathlib import Path
from typing import Literal, Optional, Union
import aiohttp
import arrow
import discord
import mystbin
import tomli
from discord.ext import commands
from cogs.utils import logger
from cogs.utils.colors import Colorize as C
from cogs.utils.sql import SQL, Emote
clogger = logger.get_console_logger("snake")
# Attempt to load uvloop for improved event loop performance
try:
import uvloop
except ModuleNotFoundError:
clogger.warn("Can't find uvloop, defaulting to standard policy")
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
clogger.info("Using uvloop policy")
_DEBUG = any(arg.lower() == "-d" for arg in sys.argv)
def _read_config(filename):
with open(filename, "rb") as cfg:
return tomli.load(cfg)
_CREDS = _read_config("credentials.toml")
logger.set_level(debug=_DEBUG)
class Builtin(commands.Cog):
def __init__(self, bot: SnakeBot):
self.bot = bot
@commands.command(name="quit", brief="exit bot", aliases=["×"])
@commands.is_owner()
async def quit_command(self, ctx: commands.Context):
await self.bot.myst_client.close()
await self.bot.aio_session.close()
await self.bot.db.close()
await self.bot.close()
@commands.group(
name="cog", brief="manage cogs", invoke_without_command=True, aliases=["±"]
)
@commands.is_owner()
async def manage_cogs(self, ctx: commands.Context, name: str, action: str):
print("cogs")
@manage_cogs.command(name="load", brief="load cog", aliases=["^"])
@commands.is_owner()
async def load_cog(self, ctx: commands.Context, name: str):
cog_name = "cogs." + name.lower()
if self.bot.extensions.get(cog_name) is not None:
await self.bot.post_reaction(ctx.message, unknown=True)
else:
try:
await self.bot.load_extension(cog_name)
except Exception as e:
await ctx.send(f"Failed to load {name}: [{type(e).__name__}]: `{e}`")
else:
await self.bot.post_reaction(ctx.message, success=True)
@manage_cogs.command(name="unload", brief="unload cog", aliases=["-"])
@commands.is_owner()
async def unload_cog(self, ctx: commands.Context, name: str):
cog_name = "cogs." + name.lower()
if self.bot.extensions.get(cog_name) is None:
await self.bot.post_reaction(ctx.message, unknown=True)
else:
try:
await self.bot.unload_extension(cog_name)
except Exception as e:
await ctx.send(f"Failed to unload {name}: [{type(e).__name__}]: `{e}`")
else:
await self.bot.post_reaction(ctx.message, success=True)
@manage_cogs.command(name="reload", brief="reload cog", aliases=["*"])
@commands.is_owner()
async def reload_cog(self, ctx: commands.Context, name: str):
cog_name = "cogs." + name.lower()
if self.bot.extensions.get(cog_name) is None:
await self.bot.post_reaction(ctx.message, unknown=True)
else:
try:
await self.bot.unload_extension(cog_name)
await self.bot.load_extension(cog_name)
except Exception as e:
await ctx.send(f"Failed to reload {name}: [{type(e).__name__}]: `{e}`")
else:
await self.bot.post_reaction(ctx.message, success=True)
@manage_cogs.command(name="list", brief="list loaded cogs", aliases=["~"])
@commands.is_owner()
async def list_cogs(self, ctx: commands.Context, name: Optional[str] = None):
if name is None:
await ctx.send(
f"Currently loaded cogs:\n{' '.join('`' + cog_name + '`' for cog_name in self.bot.extensions)}"
if len(self.bot.extensions) > 0
else "No cogs loaded"
)
else:
if self.bot.extensions.get("cogs." + name) is None:
await self.bot.post_reaction(ctx.message, failure=True)
else:
await self.bot.post_reaction(ctx.message, success=True)
@commands.command(name="sync", brief="sync slash commands", aliases=["§"])
@commands.guild_only()
@commands.is_owner()
async def sync_tree(
self,
ctx: commands.Context,
guilds: commands.Greedy[discord.Object],
spec: Optional[Literal["~", "*", "^"]] = None,
):
if not guilds:
if spec == "~":
synced = await self.bot.tree.sync(guild=ctx.guild)
elif spec == "*":
self.bot.tree.copy_global_to(guild=ctx.guild) # type: ignore
synced = await self.bot.tree.sync(guild=ctx.guild)
elif spec == "^":
self.bot.tree.clear_commands(guild=ctx.guild)
await self.bot.tree.sync(guild=ctx.guild)
synced = []
else:
synced = await self.bot.tree.sync()
await ctx.send(
f"Synced {len(synced)} commands {'globally' if spec is None else 'to ' + ctx.guild.name}"
)
return
ret = 0
for guild in guilds:
try:
await self.bot.tree.sync(guild=guild)
except discord.HTTPException:
pass
else:
ret += 1
await ctx.send(f"Synced global tree to {ret}/{len(guilds)}.")
class SnakeBot(commands.Bot):
def __init__(self, *args, **kwargs):
self.debug = _DEBUG
self.loop = asyncio.get_event_loop()
self.log = clogger
self.config = _read_config("config.toml")
self.db = SQL(db_file=Path(self.config["SQLite"]["file_path"]))
# Load credentials
self.token = _CREDS["Discord"]["token"]
self.start_time = None
self.resume_time = None
help_cmd = commands.DefaultHelpCommand(
command_attrs=dict(hidden=True),
)
# Init superclass
super().__init__(
*args,
**kwargs,
help_command=help_cmd,
description="\nHsss!\n",
command_prefix=self.get_prefix, # type: ignore
intents=discord.Intents.all(),
)
self.boot_time = arrow.utcnow()
async def setup_hook(self):
await self.db._setup()
self.aio_session = aiohttp.ClientSession()
self.myst_client = mystbin.Client(session=self.aio_session)
await self.add_cog(Builtin(self))
for file in Path("cogs/").iterdir():
if (
file.is_file()
and file.suffix == ".py"
and not file.stem.startswith("_")
):
stem = file.stem
try:
await self.load_extension(f"cogs.{stem}")
except Exception as e:
self.log.warn(
f"Failed to load cog {C(stem).bright_red()}: [{type(e).__name__}]: {e}",
exc_info=True,
)
else:
self.log.info(f"Loaded cog {C(stem).green()}")
# Post a reaction indicating command status
async def post_reaction(
self, message: discord.Message, emoji: Optional[Emote] = None, **kwargs
):
reaction_emoji = ""
if emoji is None:
if kwargs.get("success"):
reaction_emoji = "\N{WHITE HEAVY CHECK MARK}"
elif kwargs.get("failure"):
reaction_emoji = "\N{CROSS MARK}"
elif kwargs.get("warning"):
reaction_emoji = "\N{WARNING SIGN}\N{VARIATION SELECTOR-16}"
elif kwargs.get("unknown"):
reaction_emoji = "\N{BLACK QUESTION MARK ORNAMENT}"
else:
reaction_emoji = "\N{NO ENTRY}"
else:
reaction_emoji = emoji
try:
await message.add_reaction(reaction_emoji)
except Exception:
if not kwargs.get("quiet"):
await message.channel.send(str(reaction_emoji))
async def get_prefix(self, message: discord.Message):
prefixes = [self.config["General"]["default_prefix"]] + self.config[
"General"
].get("extra_prefixes", [])
if await self.is_owner(message.author):
prefixes += ["s ", "Σ "]
return prefixes
async def on_ready(self):
self.start_time = arrow.utcnow()
boot_duration = self.start_time.humanize(self.boot_time)
self.log.info(
f"Logged in as {C(self.user.name).yellow()}{C(' DEBUG MODE').bright_magenta() if self.debug else ''}\nLoaded {C(boot_duration).cyan()}"
)
act = discord.Activity(name="for ~help", type=discord.ActivityType.watching)
await self.change_presence(activity=act)
async def on_resume(self):
self.resume_time = arrow.utcnow()
boot_duration = self.resume_time.humanize(
self.start_time,
)
self.log.info(
f"Resumed as {C(self.user.name).yellow()}{C(' DEBUG MODE').bright_magenta() if self.debug else ''}\Resumed {C(boot_duration).cyan()}"
)
# Message handler to block bots
async def on_message(self, message: discord.Message):
if not message.author.bot:
await self.process_commands(message)
# Reaction added
async def on_reaction_add(
self, reaction: discord.Reaction, user: Union[discord.User, discord.Member]
):
if user != self.user and not reaction.is_custom_emoji():
message = reaction.message
if (
reaction.emoji
== "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS}"
and message.author == user
):
await self.on_message(message)
elif (
reaction.emoji == "\N{WASTEBASKET}\N{VARIATION SELECTOR-16}"
and message.author == self.user
):
await message.delete()
def main():
bot = SnakeBot()
bot.run(bot.token)
if __name__ == "__main__":
main()
|
def mul():
l=[3,5,6,7,-8]
i=0
mul=1
while i<len(l):
mul=mul*l[i]
i=i+1
print(mul)
mul() |
# Expectation from correct syntax for each row:
# - Labels comes first. could be more than one in theory.
# - OP code comes after label.
# - Operands comes last.
# - Defined labels must end with ":"
# -
#
# TODO: /////////////////////////////////////////////////////////////////
# Fixxa så vi appendar till listan istället för att 'ibland' returnera listan
# addr_mode läggs sist i token_listan tills det att vi skapar Instruction i
# parse_line. VARFÖR HETER DEN LABEL_LIST?
# /////////////////////////////////////////////////////////////////////////
import os
numeric = '0123456789'
alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class Instruction():
tokens = []
addr_mode = None
row = None
def __init__(self, tokens = [], addr_mode = None, row = None):
self.tokens = tokens
self.addr_mode = addr_mode
self.row = row
class Number():
name = None
line = None
adr_mode = None
def __init__(self, name = "", line = -1):
self.name = name
self.line = line
class Label():
name = None
line = None
def __init__(self, name = "", line = -1):
self.name = name
self.line = line
class Operator():
name = None
line = None
def __init__(self, name = "", line = -1):
self.name = name
self.line = line
# ---------------------------------
# Parse functions, one for each type.
# ---------------------------------
def parse_line(line):
tokens, rest = parse_label(line,label_list = [])
rest = parse_operator(rest,tokens)
if not rest:
print("ERROR: OP NOT FOUND")
for elem in tokens:
print(elem.name)
# Parse label returns given label and the rest of the line.
def parse_label(line,label_list):
result, rest = read(line, str.isalpha)
if not isOP(result) and rest[0] == ':':
label_list.append(Label(result))
return parse_label(rest[1:].strip(),label_list)
else:
return (label_list,result+" "+rest)
def parse_operator(line,label_list):
result, rest = read(line, str.isalpha)
if isOP(result):
label_list.append(Operator(result))
return rest
else:
return None
def parse_number(line,label_list):
isDeci = True
if line[0] == '$':
line = line[1:]
isDeci = False
result, rest = read(line,isHex)
if not result:
print 'Not a number'
return (None, rest)
if isDeci:
result = hex(int(result)[2:])
return (Number(result), rest)
# ---------------------------------
# Parse addr_mode, returns False or tokens if correct
# ---------------------------------
def parse_operand(line,label_list):#returns addr_mode & remaining tokens
func_list = [parse_immediate, parse_abs, parse_zp_abs,\
parse_abs_index, parse_indirect, parse_zp_indirect,\
parse_index_indirect, parse_indirect_index]
for func in func_list:
result, addr_mode = func(line, line_list)
if result:
return (line_list,addr_mode)#borde ha kvarvarande tokens
def parse_immediate(line, line_list):
if line[0] == '#':
result, rest = parse_number(line[1:], lista)
if not result and rest:
print 'immediate ERROR'
return (False, -1)
line_list.append(result)
return (True, 0)
#def parse_abs(line):
#def parse_zp_abs(line):
#def parse_abs_index(line):
#def parse_indirect(line):
#def parse_zp_indirect
#def parse_index_indirect(line):
#def parse_indirect_index(line):
def isOP(string):
opCodes = ['adc','and','asl','bit','clc','clv',\
'cmp','dex','eor','inx','jmp','jsr',\
'lda','lsr','bcs','beq','bmi','bne',\
'bpl','bvs','nop','ora','pha','pla',\
'rts','sbc','sec','sta','tax','txa',\
'tay','tya']
return string in opCodes
def isHex(char):
hexes = "1234567890abcdef"
return char in hexes
# Returns tuple with first elemen is part of line that is true in func.
def read(line, func):
length = 0
while length < len(line) and func(line[length]):
length += 1
result = line[:length]
rest = line[length:].strip()
return (result, rest)
# Returns a list containing each line of _file.
def read_file(_file):
lines = []
with open(_file) as open_file:
for line in open_file:
curr_line = line.strip()
curr_line = curr_line.lower()
lines.append(curr_line)
return lines
def main():
a = read_file("asm3")
for line in a:
parse_line(line)
print line
if __name__=="__main__":
main()
|
t = int(input("Digite uma temperatura em graus Celsius: "))
def loop_for():
for c in range(t-10, t+11):
f = c * 1.8 + 32
print("%.dº Celsius = %.dº Farenheit" % (c, f))
def loop_while():
c = t - 10
while c <= t + 10:
f = c * 1.8 + 32
print("%.dº Celsius = %.dº Farenheit" % (c, f))
c += 1
print("----------Usando WHILE:----------")
loop_while()
print("----------Usando FOR:----------")
loop_for()
|
# coding: utf-8
#!/usr/bin/env python
from torneira.controller import BaseController, render_to_extension
from twittface.models.usuario import Usuario
from torneira.core.meta import TorneiraSession
from sqlalchemy.orm.exc import NoResultFound
from tornado.web import HTTPError
import tweepy
import math
import settings
import logging
class LoginController(BaseController):
def index(self, request_handler):
return self.render_to_template("login.html")
def oauth(self, request_handler):
auth = tweepy.OAuthHandler("5dMcC3yYelEVwQykbsitcA","63g7kzmNdJX25qVuz51RMUFXCwiJ7DKaeoMn3fLmlQ", "http://twittface.local:8080/login/oauth_callback")
redirect_url = auth.get_authorization_url()
request_handler.set_secure_cookie(name="OAUTH_TOKEN", value=str("%s|%s" % (auth.request_token.key, auth.request_token.secret)), path="/", expires_days=1)
request_handler.redirect(redirect_url)
return
def oauth_callback(self, request_handler, **kw):
auth = tweepy.OAuthHandler("5dMcC3yYelEVwQykbsitcA","63g7kzmNdJX25qVuz51RMUFXCwiJ7DKaeoMn3fLmlQ")
request_token = request_handler.get_secure_cookie("OAUTH_TOKEN").split("|")
auth.set_request_token(request_token[0], request_token[1])
auth.get_access_token(kw.get('oauth_verifier'))
api = tweepy.API(auth)
user_twitter = api.me()
session = TorneiraSession()
try:
usuario = session.query(Usuario).filter(Usuario.id_twitter==int(user_twitter.id)).one()
except NoResultFound:
usuario = Usuario()
usuario.id_twitter = user_twitter.id
usuario.image_url = user_twitter.profile_image_url
usuario.login = user_twitter.screen_name
usuario.save()
request_handler.set_secure_cookie(name="TWITTFACE_ID", value=str(usuario.id), path="/", expires_days=None)
request_handler.redirect("/")
return
def logout(self, request_handler):
request_handler.clear_all_cookies()
request_handler.redirect("/")
return |
lists = ['start', 1, 2, 3, 4, 5, 6, 7, 8, 9, 'end']
print(lists[:])
print(lists[0:5])
print(lists[6:8])
print(lists[:6])
print(lists[5:])
print(lists[-1])
print(lists[-6:-1])
print(lists[0:10:2]) # [starting_index : ending_index : increment]
print(lists[::-1]) # reverse showing
print("\nString Slicing:")
string = 'Python is a great language.'
print(string[:])
print(string[0:5])
print(string[6:8])
print(string[:6])
print(string[5:])
print(string[-1])
print(string[-6:-1])
print(string[0:10:2]) # [starting_index : ending_index : increment]
print(string[::-1]) # reverse showing
print('\nUsing Slice object')
pyString = 'Python'
# contains indices (-1, -2, -3)
# i.e. n, o and h
sObject = slice(-1, -4, -1)
print(pyString[sObject])
|
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
#print (stopwords.words("russian"))
#ru_stops = set(stopwords.words('russian'))
#ru_stops.add(".");
#ru_stops.add(",");
#ru_stops.add(")");
#ru_stops.add("(");
stops = ['%']
stops_s = ['\x02']
stops_for_n = ['(', ')', '-', '–', ':', '.']
legend_words = ['тыс.', 'тыс', '%', 'млн.', 'млн', 'млрд', 'млрд.', 'В', 'Гц', 'дБ', 'см', 'м', 'км']
def legend_f(list_f1, sentence, result):
legend = []
legend_i = ""
for i in list_f1:
k = 1
while (i + k < len(sentence)):
if sentence[i + k] in legend_words:
legend_i = legend_i + sentence[i + k] + " "
k = k + 1
else:
break
legend.append(legend_i)
legend_i = ""
result.append(legend)
def diagram(text):
text = "".join([ch for ch in text if ch not in stops_s])
new_text = word_tokenize(text, language="russian")
flag1 = 0
arr_of_words = [[]]
i1 = 0
i2 = 0
i3 = 0
for word in new_text: # токенизация по предложениям + токенизация по словам + удаление лишнего
p = morph.parse(word)
#print(p[0].tag.case)
if {'PNCT'} in p[0].tag:
if word == ']':
flag1 = 0
elif word == '[':
flag1 = 1
elif word == '.':
i1 = i1 + 1
arr_of_words.append([])
elif word != '[' and flag1 != 1:
arr_of_words[i1].append(word)
elif flag1 != 1: arr_of_words[i1].append(word)
result = []
for sentence in arr_of_words:
i = 0
j = 0
k = 0
flag1 = 1 # флаг обнаружения :
flag2 = 0
new_res = []
list_f1 = []
for word in sentence:
p = morph.parse(word)
if ({'NUMB'} in p[0].tag and flag1 == 0) or (word in stops_for_n):
if len(new_res) > 1:
result.append(new_res)
else:
while (i > 0):
list_f1.pop()
i = i - 1
if len(new_res) > 1:
legend_f(list_f1, sentence, result)
list_f1 = []
new_res = []
i = 0
j = j + 1
flag1 = 1
if {'NUMB'} in p[0].tag and flag1 == 1:
word = word.replace(',','.')
new_res.append(word)
i = i + 1
list_f1.append(k)
flag1 = 0
if word == ',' or {'CONJ'} in p[0].tag:
flag1 = 1
k = k + 1
if len(new_res) > 1:
result.append(new_res)
legend_f(list_f1, sentence, result)
list_f1 = []
# for sentence in arr_of_words:
# for word in sentence:
result.append([])
# i = 0
# j = 0
# k = 0
# result = [[]]
# while j < len(short_text):
# p = morph.parse(short_text[j])
# if {'NUMB'} in p[0].tag:
# short_text[j] = short_text[j].replace(',','.')
# result[i].append(short_text[j])
# j = j + 1
# continue
#
# if short_text[j] == ".":
# if len(result[i]) <= 1:
# result[i] = []
# i = i - 1
# if len(result[i]) != 0:
# result.append([])
# i = i + 1
# j = j + 1
# continue
# j = j + 1
return result
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def midpoints(x):
'''Used in the function plot_voxel()'''
sl = ()
for i in range(x.ndim):
x = (x[sl + np.index_exp[:-1]] + x[sl + np.index_exp[1:]]) / 2.0
sl += np.index_exp[:]
return x
def plot_voxel(data,num=30,size=15,rgb=45):
'''
Input data should have the form 3*N, where N is the number of positions
Parameters
-------
num: density of voxels, a larger value will give more voxels so it is smoothier
size: size of each plotted circle
rgb: control the color, small value shows more green, large value shows more blue
Notes
-------
Axis scale and labels can be easily customized
'''
# define the colors, red -> green -> blue
colormap = np.array([[3,3,3,3,2,1,0,0,0,0,0,0,0],
[0,1,2,3,3,3,3,3,3,3,2,1,0],
[0,0,0,0,0,0,0,1,2,3,3,3,3]]) / 3
# define grid
center = np.array([size/2,size/2,size/2])
x,y,z = np.indices((num+1,num+1,num+1)) /num *size
xc, yc, zc = midpoints(x), midpoints(y), midpoints(z)
cx, cy, cz = np.zeros((num,num,num)), np.zeros((num,num,num)), np.zeros((num,num,num))
# attach color to each points according to distance
stat = np.zeros(13)
for i in range(num):
for j in range(num):
for k in range(num):
point = np.array([xc[i,j,k],yc[i,j,k],zc[i,j,k]])
dist = sum((point - center)**2)
stufe = int(dist // (3*(size/2)**2/rgb))
if stufe > 12:
stufe = 12
stat[stufe] += 1
cx[i,j,k], cy[i,j,k], cz[i,j,k] = colormap[0,stufe], colormap[1,stufe], colormap[2,stufe]
# define the shape
s1 = (xc - center[0])**2 + (yc - center[1])**2 + (zc - center[2])**2 < (size/2)**2
s2 = xc+yc+zc < 1.55*size
s3 = xc+yc+zc > 1.45*size
sphere = s1*s2*s3
# assign color
colors = np.zeros(sphere.shape + (3,))
colors[..., 0] = cx
colors[..., 1] = cy
colors[..., 2] = cz
# shift data according to sphere center
data[0] = data[0] - center[0]
data[1] = data[1] - center[1]
data[2] = data[2] - center[2]
# Plot
fig = plt.figure()
ax = fig.gca(projection='3d')
for i in range(data.shape[1]):
ax.voxels(x+data[0,i], y+data[1,i], z+data[2,i], sphere,
facecolors=colors,
# edgecolors=np.clip(2*colors - 0.5, 0, 1), # brighter
linewidth=0.5)
ax.set(xlabel='X', ylabel='Y', zlabel='Z')
ax.set_xlim3d(0, 100)
ax.set_ylim3d(0, 100)
ax.set_zlim3d(0, 100)
plt.show()
if __name__ == "__main__":
coors = np.load('./coor.npy')
plot_voxel(np.array([20,30,40]).reshape(-1,1))
plot_voxel(coors[:,0,:])
print('Finished') |
import random
ITER_AMOUNT = 100
def mul(a, b, modulo):
return ((a % modulo) * (b % modulo)) % modulo
def modulo_binary_exp(x, n, modulo):
if n == 0:
return 1
if n % 2 == 0:
x_pow = modulo_binary_exp(x, n // 2, modulo)
return mul(x_pow, x_pow, modulo)
else:
return mul(x, modulo_binary_exp(x, n - 1, modulo), modulo)
def gcd(a, b):
if b != 0:
return gcd(b, a % b)
else:
return a
def fermat_primality_test(x):
random.seed()
a = random.randint(0, int(1e18))
while gcd(a, x) != 1:
a = random.randint(0, int(1e18))
return modulo_binary_exp(a, x - 1, x) == 1
def get_next_prime(x):
x += 1
while True:
is_probably_prime = True
for i in range(0, ITER_AMOUNT):
if not fermat_primality_test(x):
is_probably_prime = False
break
if is_probably_prime:
return x
else:
x += 1
|
from string import ascii_lowercase, ascii_uppercase
text = 'dkghokdghopAAAAAAAfghgfbnvbnretwwquouipiuopAAAAdkhotkyr0k59-kk53gki53g53k5klk%^%^%_&)%_+)_+)$_+)%^$%$)(^+_)%(&_('
new_text = ''
for elem in text:
if elem in ascii_lowercase:
new_text += str(elem)
if elem in ascii_uppercase:
new_text += str(elem)
print(new_text) |
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
# object level permission to only allow the use of the object to edit it
# assumes the model instalnce has an owner attribute
def has_object_permisson(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
# Check permissions for read-only request
return True
return obj.owner == request.user
|
import numpy as np
import gspread
import pandas as pd
from google.oauth2.service_account import Credentials
import os
import selenium
from selenium import webdriver
import time
import io
import requests
from webdriver_manager.firefox import GeckoDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from gspread_dataframe import set_with_dataframe
from selenium.webdriver.firefox.options import Options
scopes = [
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive'
]
credentials = Credentials.from_service_account_file(
'C:/Users/wrath/gsheets_testing/keys.json',
scopes=scopes
)
gc = gspread.authorize(credentials)
sh = gc.open_by_url('https://docs.google.com/spreadsheets/d/12nZmdZar3ThXa9OwJNHhvw2wpv_8UvviX2mV-DG4raI/edit#gid=0')
df = pd.DataFrame(sh.sheet1.get_all_records())
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path=r'C:\Users\wrath\Downloads\Compressed\geckodriver-v0.29.1-win64\geckodriver.exe')
print ("Headless Firefox Initialized")
records = len(df)
url_col = 5
tel_col = 6
check = 0
r = 0
url_list = {}
tel_list = {}
tel = None
while r < records:
url_list[r] = df.iloc[r][url_col]
tel_list[r] = df.iloc[r][tel_col]
driver.get(url_list[r])
try:
contact = driver.find_element_by_css_selector("[data-tooltip = 'Copy phone number']")
print(contact.text)
tel = contact.text
except:
print("Couldn't extract")
if tel != None:
tel = ''.join(e for e in tel if e.isalnum())
if df.iloc[r][tel_col]==tel:
df.iloc[r][check]= True
print('correct')
else:
df.iloc[r][check]= False
print('incorrect')
r = r+1
set_with_dataframe(sh.sheet1, df) |
from Lista import Lista
ListaEjemplo = Lista()
ListaEjemplo.insert_node("Prueba1", 12)
ListaEjemplo.insert_node("Prueba2", "Valor2")
ListaEjemplo.insert_node("Prueba3", 12)
print(ListaEjemplo.get_size())
print(ListaEjemplo.get_node(0).get_contenido()) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
1.16 筛选序列中的元素
Created on 2016年7月24日
@author: wang
'''
mylist = [1,4,-5,10,-7,2,3,-1]
print [n for n in mylist if n > 0]#列表推导式
print [n for n in mylist if n < 0]
pos = (n for n in mylist if n > 0)#生成器表达式
print pos
for x in pos:
print(x)
values = ['1', '2', '-3', '-', '4', 'N/A', '5']
def is_int(val):
try:
x = int(val)
return True
except ValueError:
return False
ivals = list(filter(is_int, values))
print(ivals)
mylist = [1,4,-5,10,-7,2,3,-1]
import math
print [math.sqrt(n) for n in mylist if n > 0]
clip_neg = [n if n > 0 else 0 for n in mylist]
print clip_neg
clip_pos = [n if n < 0 else 0 for n in mylist]
print clip_pos
addresses = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
counts = [0, 3, 10, 4, 1, 7, 6, 1]
from itertools import compress
more5 = [n > 5 for n in counts]
print more5
print list(compress(addresses, more5))
|
#!/usr/bin/env python
__author__ = 'rgolla'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument( '-i', action='store', dest='input',
help = 'Text file that contains the whitespace seperated "source_id image_url" lines for which the activity that we wish to train for is present.' )
results = parser.parse_args()
with open( results.input ) as f:
all_lines = f.readlines()
ftr_index = {}
for ( i, image_id ) in enumerate( all_lines ):
( video_name, image_url ) = image_id.split()[0:2]
if video_name in ftr_index.keys():
ftr_index[video_name].append( image_url )
else:
ftr_index[video_name] = []
ftr_index[video_name].append( image_url )
eighty_percent = int( len( ftr_index ) * 0.8 )
train = dict( ftr_index.items()[:eighty_percent] )
test = dict( ftr_index.items()[eighty_percent:] )
train_file = open( results.input.split( '.' )[0] + '_train.txt', 'w' )
test_file = open( results.input.split( '.' )[0] + '_test.txt', 'w' )
for key, value in train.iteritems():
for each in value:
train_file.write( key + ' ' + each + '\n' )
train_file.close()
for key, value in test.iteritems():
for each in value:
test_file.write( key + ' ' + each + '\n' )
test_file.close()
print len( train ), "training images"
print len( test ), "testing images"
print len( ftr_index ), "total images"
|
# '''
# Return the number of letter occorrances in a string.
#
# >>>count_letter("i", "Antidisestablishmentterianism")
#
# >>>count_letter("p", "Pneumonoultramicroscopicsilicovolcanoconiosis")
# '''
# def count_letter(char, word):
# for letter in word:
def count_letter(char, word):
letter_count = 0
for letter in word:
if letter == char:
letter_count += 1
print("The letter " + char + " occurs " + str(letter_count) + " times.")
char = "q"
word = "asdfkjhaewiufhilusdbfviuqqqqhsfuyiahweku"
count_letter(char, word)
#
# def count_letter(char_to_find, text):
# count = 0
# for char in text:
# if char == char_to_find
# count += 1
# return count
# |
import math
f = open('p099_base_exp.txt', 'r')
m = 0
count = 0
lineNum = 0
for line in f:
count += 1
a = int(line.split(',')[0])
b = int(line.split(',')[1])
if m < b*math.log(a):
m = b*math.log(a)
lineNum = count
print lineNum
|
#to run spark-submit spark.py
#Serenitylesa4411!
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import DoubleType, IntegerType, StringType
# from pyspark.sql.functions import *
# from pyspark.sql.functions import mean as _mean, stddev as _stddev, col
import pyspark.sql.functions as F
scSpark = SparkSession \
.builder \
.appName("Uk Traffic Accidents 2012-2014") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
sdfData = scSpark.read.csv("accidents_2012_to_2014.csv", header=True, inferSchema = True)
# print(sdfData.schema)
# sdfData.orderBy("Longitude").show()
# sdfData.select("Longitude").show()
#Must make sure you purge whitespace or match exactly on column names
# print(sdfData.groupBy().sum('Age').collect())
df_stats = sdfData.select(
F.mean(F.col('Longitude')).alias('mean'),
F.stddev(F.col('Longitude')).alias('std')
).collect()
mean = df_stats[0]['mean']
std = df_stats[0]['std']
print "mean of longitude = %f" %(mean)
print "standard deviation of longitude = %f" %(std)
df_stats = sdfData.select(
F.mean(F.col('Latitude')).alias('mean'),
F.stddev(F.col('Latitude')).alias('std')
).collect()
mean = df_stats[0]['mean']
std = df_stats[0]['std']
print "mean of latitude = %f" %(mean)
print "standard deviation of latitude = %f" %(std)
answer = sdfData.groupBy("Day_of_Week").count().orderBy("count", ascending=False).show()
answer = sdfData.groupBy("Road_Type").count().orderBy("count", ascending=False).show()
answer = sdfData.groupBy("Time").count().orderBy("count", ascending=False).show()
answer = sdfData.groupBy("Number_of_Casualties").count().orderBy("count", ascending=False).show()
answer = sdfData.groupBy("Junction_Control").count().orderBy("count", ascending=False).show()
answer = sdfData.groupBy("Speed_limit").count().orderBy("count", ascending=False).show()
# df.where((col("foo") > 0) & (col("bar") < 0))
# answer = sdfData.where((F.col("Accident_Severity") < 2 & (F.col("Number_of_Vehicles") < 2))).count().show()
# answer = sdfData.groupBy("Accident_Severity").where((F.col("Accident_Severity") < 2 & (F.col("Number_of_Vehicles") < 2))).count().show()
# tdata.withColumn("Age", when((tdata.Age == "" & tdata.Survived == "0"), mean_age_0).otherwise(tdata.Age)).show()
|
# coding: utf-8
"""
난이도 : 1
문제 : 양의 정수 n과 m이 주어진다. (1000 10)
n원을 m명에게 정확히 분배해야 한다. 만약 10원을 3명에게 배분하면 3원씩 배분하고 1원이 남는다.
첫 줄에 얼마씩 배분할 수 있는지, 둘째 줄에 배분하고 남는돈을 1원단위로 출력.
알고리즘 : n을 m으로 나눈 몫을 첫줄에 출력하고, 나머지를 둘째 줄에 출력
"""
m,n=map(int,input().split())
print(*[m//n,m%n],sep='\n') |
"""2a: RNN"""
"""I have tried different dense model architecture but best one was this
which is 2nd dense with 32 hidden units"""
"""Dropout also helped to improve model. I kept playing with dropouts and
additional dropout layer until i get least loss"""
"""But when i rerun model it gives me different kind of test_loss values
even thoough i train the same model( between 18 and 48). that means our data is very unstable.
therefore stochastig gradient method catch different local minimum each time"""
model = Sequential()
model.add(layers.GRU(32,
dropout=0.2,
recurrent_dropout=0.2,
input_shape=(None, training_data.shape[-1])))
model.add(layers.Dense(32,activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(output_size,activation='tanh'))
"""COMPILE YOUR MODEL"""
model.compile(optimizer=RMSprop(), loss='mae')
"""TRAINING YOUR MODEL"""
epoch_size = 20
batch_size = 32
history = model.fit(training_data,
training_labels,
epochs=epoch_size,
batch_size=batch_size,
validation_data = (val_data, val_labels))
"""Plotting results"""
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation losses')
plt.legend()
plt.show()
"""PREDICTION - TESTING DATA"""
test_loss = model.evaluate(test_data, test_labels)
print('normalized test_loss:', test_loss)
print('unnormalized test_loss:', test_loss*std[0])
"""Save your model:"""
#model.save('RNN_1day')
|
from offsetbasedgraph import IntervalCollection, Interval, NumpyIndexedInterval
from collections import defaultdict
from offsetbasedgraph import IndexedInterval
import logging
class HaploTyper:
"""
Simple naive haplotyper
"""
def __init__(self, graph, intervals):
assert isinstance(intervals, IntervalCollection)
self.graph = graph
self.intervals = intervals
self.node_counts = defaultdict(int)
def build(self):
logging.info("Building haplotyper")
i = 0
for interval in self.intervals.intervals:
if i % 50000 == 0:
logging.info("%d intervals processed" % i)
i += 1
for rp in interval.region_paths:
self.node_counts[rp] += 1
def get_maximum_interval_through_graph(self):
logging.info("Getting first blocks")
graph = self.graph
start = graph.get_first_blocks()
logging.info("First blocks found")
assert len(start) == 1, "Only works when graph has one start node"
nodes = []
current_block = start[0]
i = 0
while True:
if i % 1000000 == 0:
logging.info("Processing node %d" % i)
i += 1
nodes.append(current_block)
next_blocks = graph.adj_list[current_block]
if len(next_blocks) < 1:
print("Stopping at %d" % current_block)
break
next = None
max_pileup_value = -1
for potential_next in next_blocks:
pileup_value = self.node_counts[potential_next]
if pileup_value > max_pileup_value:
next = potential_next
max_pileup_value = pileup_value
current_block = next
return IndexedInterval(0, graph.node_size(nodes[-1]), nodes, graph)
|
import sys
sys.path.append("..")
import uuid
from wizhelper.WizServerUrl import WizServerUrl
##
class WizObject:
'''wizobject'''
def __init__(self):
self.guid = uuid.uuid1()
def printWizObject(self):
print(self.guid)
print(WizServerUrl())
|
import json
import os
from datetime import datetime
import sys
sys.path.append("../")
from causal_graphs.graph_visualization import visualize_graph
from causal_graphs.graph_export import load_graph
from causal_graphs.graph_real_world import load_graph_file
from causal_graphs.graph_definition import CausalDAG
from causal_discovery.utils import set_cluster
from experiments.utils import set_seed, get_basic_parser, test_graph
if __name__ == '__main__':
parser = get_basic_parser()
parser.add_argument('--graph_files', type=str, nargs='+',
help='Graph files to apply ENCO to. Files must be .pt, .npz, or .bif files.')
args = parser.parse_args()
# Basic checkpoint directory creation
current_date = datetime.now()
if args.checkpoint_dir is None or len(args.checkpoint_dir) == 0:
checkpoint_dir = "checkpoints/%02d_%02d_%02d__%02d_%02d_%02d/" % (
current_date.year, current_date.month, current_date.day, current_date.hour, current_date.minute, current_date.second)
else:
checkpoint_dir = args.checkpoint_dir
os.makedirs(checkpoint_dir, exist_ok=True)
with open(os.path.join(checkpoint_dir, "args.json"), "w") as f:
json.dump(vars(args), f, indent=4)
set_cluster(args.cluster)
for gindex, graph_path in enumerate(args.graph_files):
# Seed setting for reproducibility
set_seed(args.seed)
# Load graph
if graph_path.endswith(".bif"):
graph = load_graph_file(graph_path)
elif graph_path.endswith(".pt"):
graph = CausalDAG.load_from_file(graph_path)
elif graph_path.endswith(".npz"):
graph = load_graph(graph_path)
else:
assert False, "Unknown file extension for " + graph_path
graph_name = graph_path.split("/")[-1].rsplit(".", 1)[0]
if graph_name.startswith("graph_"):
graph_name = graph_name.split("graph_")[-1]
file_id = "%s_%s" % (str(gindex+1).zfill(3), graph_name)
# Visualize graph
if graph.num_vars <= 100:
figsize = max(3, graph.num_vars ** 0.7)
visualize_graph(graph,
filename=os.path.join(checkpoint_dir, "graph_%s.pdf" % (file_id)),
figsize=(figsize, figsize),
layout="circular" if graph.num_vars < 40 else "graphviz")
s = "== Testing graph \"%s\" ==" % graph_name
print("="*len(s)+"\n"+s+"\n"+"="*len(s))
# Start structure learning
test_graph(graph, args, checkpoint_dir, file_id)
|
import os
import re
import requests
from tqdm import tqdm
def save_file_at_new_dir(new_dir_path,
new_filename,
new_file_content,
mode='w'):
os.makedirs(new_dir_path, exist_ok=True)
with open(os.path.join(new_dir_path, new_filename), mode) as f:
f.write(new_file_content)
def HTTPdownload(file_url, dir_path, file_name, Cookies, Referer):
headers = {'Referer': Referer}
file_size = int(
requests.head(file_url, cookies=Cookies,
headers=headers).headers["content-length"])
res = requests.get(file_url, cookies=Cookies, headers=headers, stream=True)
pbar = tqdm(total=file_size, unit="B", unit_scale=True)
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, file_name), 'wb') as file:
for chunk in res.iter_content(chunk_size=1024):
file.write(chunk)
pbar.update(len(chunk))
pbar.close()
def str2dic(x):
x = x.split('&')
data = {}
for i in x:
temp = re.search('([^=]+)=(.+)$', i)
data[temp.groups()[0]] = temp.groups()[1]
return data
|
import os
#定义文件生成器
#生成一个空文件
def createini():
os.system("cd.>%s.ini" %ininame)
first = open('%s.ini' %ininame,'w')
first.close()
#定义数据创建器
def createdata(ininame,hajime,owari,hitoashi):
datas = []
while hajime <= owari:
datas.append(hajime)
hajime = hajime + hitoashi
else:
strdatas = str(datas)
strdatas = strdatas[1:-1]
os.system("cd.>%s.ini" %ininame)
base = open('%s.ini' %ininame,'w')
base.write('[SaveMoney365]'+'\n\r'+'\n\r'+'Money='+'\n\r'+'0'+'\n\r'+'LIST=\n\r'+strdatas)
base.close() |
"""
demo09_cv.py 词袋模型
"""
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
doc = 'The brown dog is running. ' \
'The black dog is in the black room. ' \
'Running in the room is forbidden.'
# 对doc按照句子进行拆分
sents = tk.sent_tokenize(doc)
# 构建词袋模型
cv = ft.CountVectorizer()
bow = cv.fit_transform(sents)
print(bow.toarray()) |
#!/usr/bin/env python
# $Id$
##
## This file is part of pyFormex 0.7.1 Release Sat May 24 13:26:21 2008
## pyFormex is a Python implementation of Formex algebra
## Website: http://pyformex.berlios.de/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
##
## This program is distributed under the GNU General Public License
## version 2 or later (see file COPYING for details)
##
"""Functions for executing pyFormex scripts."""
import globaldata as GD
import threading,os,commands,copy,re,time
import formex
import utils
######################### Exceptions #########################################
class Exit(Exception):
"""Exception raised to exit from a running script."""
pass
class ExitAll(Exception):
"""Exception raised to exit pyFormex from a script."""
pass
class ExitSeq(Exception):
"""Exception raised to exit from a sequence of scripts."""
pass
class TimeOut(Exception):
"""Exception raised to timeout from a dialog widget."""
pass
############################# Globals for scripts ############################
def Globals():
"""Return the globals that are passed to the scripts on execution.
This basically contains the globals defined in draw.py, colors.py,
and formex.py, as well as the globals from numpy.
It also contains the definitions put into the globaldata.PF, by
preference using the export() function.
During execution of the script, the global variable __name__ will be
set to either 'draw' or 'script' depending on whether the script
was executed in the 'draw' module (--gui option) or the 'script'
module (--nogui option).
"""
g = copy.copy(GD.PF)
g.update(globals())
if GD.gui:
from gui import colors,draw
g.update(colors.__dict__)
g.update(draw.__dict__)
g.update(formex.__dict__)
return g
def export(dic):
"""Export the variables in the given dictionary."""
GD.PF.update(dic)
def export2(names,values):
"""Export a list of names and values."""
export(dict(zip(names,values)))
def forget(names):
"""Remove the global variables specified in list."""
g = GD.PF
for name in names:
if g.has_key(name):
del g[name]
def rename(oldnames,newnames):
"""Rename the global variables in oldnames to newnames."""
g = GD.PF
for oldname,newname in zip(oldnames,newnames):
if g.has_key(oldname):
g[newname] = g[oldname]
del g[oldname]
def listAll(clas=None,dic=None):
"""Return a list of all objects in dic that are of given clas.
If no class is given, Formex objects are sought.
If no dict is given, the objects from both GD.PF and locals()
are returned.
"""
if dic is None:
dic = GD.PF
if clas is None:
return dic.keys()
else:
return [ k for k in dic.keys() if isinstance(dic[k],clas) ]
def named(name):
"""Returns the global object named name."""
#GD.debug("name %s" % name)
if GD.PF.has_key(name):
#GD.debug("Found %s in GD.PF" % name)
dic = GD.PF
elif globals().has_key(name):
GD.debug("Found %s in globals()" % name)
dic = globals()
else:
raise NameError,"Name %s is in neither GD.PF nor globals()" % name
return dic[name]
#################### Interacting with the user ###############################
def ask(question,choices=None,default=''):
"""Ask a question and present possible answers.
If no choices are presented, anything will be accepted.
Else, the question is repeated until one of the choices is selected.
If a default is given and the value entered is empty, the default is
substituted.
Case is not significant, but choices are presented unchanged.
If no choices are presented, the string typed by the user is returned.
Else the return value is the lowest matching index of the users answer
in the choices list. Thus, ask('Do you agree',['Y','n']) will return
0 on either 'y' or 'Y' and 1 on either 'n' or 'N'.
"""
if choices:
question += " (%s) " % ', '.join(choices)
choices = [ c.lower() for c in choices ]
while 1:
res = raw_input(question)
if res == '' and default:
res = default
if not choices:
return res
try:
return choices.index(res.lower())
except ValueError:
pass
def ack(question):
"""Show a Yes/No question and return True/False depending on answer."""
return ask(question,['Y','N']) == 0
def error(message):
print "pyFormex Error: "+message
if not ack("Do you want to continue?"):
exit()
def warning(message):
print "pyFormex Warning: "+message
if not ack("Do you want to continue?"):
exit()
def showInfo(message):
print "pyFormex Info: "+message
##def log(s):
## """Display a message in the terminal."""
## print s
# message is the preferred function to send text info to the user.
# The default message handler is set here.
# Best candidates are log/info
message = GD.message
def system(cmdline,result='output'):
"""Run a command and return its output.
If result == 'status', the exit status of the command is returned.
If result == 'output', the output of the command is returned.
If result == 'both', a tuple of status and output is returned.
"""
if result == 'status':
return os.system(cmdline)
elif result == 'output':
return commands.getoutput(cmdline)
elif result == 'both':
return commands.getstatusoutput(cmdline)
########################### PLAYING SCRIPTS ##############################
scriptDisabled = False
scriptRunning = False
exitrequested = False
starttime = 0.0
def playScript(scr,name=None,argv=[]):
"""Play a pyformex script scr. scr should be a valid Python text.
There is a lock to prevent multiple scripts from being executed at the
same time.
If a name is specified, sets the global variable GD.scriptName if and
when the script is started.
"""
global scriptDisabled,scriptRunning,exitrequested
GD.debug('SCRIPT MODE %s,%s,%s'% (scriptDisabled,scriptRunning,exitrequested))
# (We only allow one script executing at a time!)
# and scripts are non-reentrant
if scriptRunning or scriptDisabled :
return
scriptRunning = True
exitrequested = False
# Get the globals
g = Globals()
if GD.gui:
modname = 'draw'
else:
modname = 'script'
g.update({'__name__':modname})
g.update({'argv':argv})
# Now we can execute the script using these collected globals
exportNames = []
GD.scriptName = name
exitall = False
starttime = time.clock()
GD.debug('STARTING SCRIPT (%s)' % starttime)
try:
try:
if GD.gui and stepmode:
step_script(scr,g,True)
else:
exec scr in g
if GD.cfg['autoglobals']:
exportNames.extend(listAll(clas=formex.Formex,dic=g))
GD.PF.update([(k,g[k]) for k in exportNames])
except Exit:
pass
except ExitAll:
exitall = True
finally:
scriptRunning = False # release the lock in case of an error
elapsed = time.clock() - starttime
GD.debug('SCRIPT RUNTIME : %s seconds' % elapsed)
if GD.gui:
stepmode = False
drawrelease() # release the lock
GD.gui.actions['Play'].setEnabled(True)
#GD.gui.actions['Step'].setEnabled(False)
GD.gui.actions['Continue'].setEnabled(False)
GD.gui.actions['Stop'].setEnabled(False)
if exitall:
GD.debug("Calling exit() from playscript")
exit()
def breakpt(msg=None):
"""Set a breakpoint where the script can be halted on a signal.
If an argument is specified, it will be written to the message board.
The exitrequested signal is usually emitted by pressing a button in the GUI.
In nongui mode the stopatbreakpt function can be called from another thread.
"""
global exitrequested
if exitrequested:
if msg is not None:
GD.message(msg)
exitrequested = False # reset for next time
raise Exit
def stopatbreakpt():
"""Set the exitrequested flag."""
global exitrequested
exitrequested = True
def play(fn,argv=[]):
"""Play a formex script from file fn.
A list of arguments can be passed. They will be available
under the name argv.
"""
message("Running script (%s)" % fn)
playScript(file(fn,'r'),fn,argv)
message("Finished script %s" % fn)
return argv
def exit(all=False):
if scriptRunning:
if all:
raise ExitAll # exit from pyformex
else:
raise Exit # exit from script only
else:
sys.exit(0) # exit from pyformex
########################### app ################################
def runApp(args):
"""Run the application without gui."""
# remaining args are interpreted as scripts, possibly interspersed
# with arguments for the scripts.
# each script should pop the required arguments from the list,
# and return the remainder
## GD.message = message
while len(args) > 0:
scr = args.pop(0)
if os.path.exists(scr) and utils.isPyFormex(scr):
play(scr,args)
else:
raise RuntimeError,"No such pyFormex script found: %s" % scr
########################## print information ################################
def formatInfo(F):
"""Return formatted information about a Formex."""
bb = F.bbox()
return """shape = %s
bbox[lo] = %s
bbox[hi] = %s
center = %s
maxprop = %s
""" % (F.shape(),bb[0],bb[1],F.center(),F.maxprop())
def printall():
"""Print all Formices in globals()"""
print "Formices currently in globals():\n%s" % listAll(clas=formex.Formex)
def printglobals():
print globals()
def printglobalnames():
a = globals().keys()
a.sort()
print a
def printconfig():
print "Reference Configuration: " + str(GD.refcfg)
print "User Configuration: " + str(GD.cfg)
def printdetected():
print "%s (%s)\n" % (GD.Version,GD.__revision__)
print "Detected Python Modules:"
for k,v in GD.version.items():
if v:
print "%s (%s)" % ( k,v)
print "\nDetected External Programs:"
for k,v in GD.external.items():
if v:
print "%s (%s)" % ( k,v)
### Utilities
def chdir(fn):
"""Change the current working directory.
If fn is a directory name, the current directory is set to fn.
If fn is a file name, the current directory is set to the directory
holding fn.
In either case, the current dirctory is stored in GD.cfg['workdir']
for persistence between pyFormex invocations.
If fn does not exist, nothing is done.
"""
if os.path.exists:
if not os.path.isdir(fn):
fn = os.path.dirname(fn)
os.chdir(fn)
GD.cfg['workdir'] = fn
GD.message("Your current workdir is %s" % os.getcwd())
def workHere():
"""Change the current working directory to the script's location."""
os.chdir(os.path.dirname(GD.cfg['curfile']))
def runtime():
"""Return the time elapsed since start of execution of the script."""
return time.clock() - starttime
#### End
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SUMKAM.NET
#
# GIMP Python-Fu скрипт обработки изображений
# для каталога товаров.
#
# Считывает файл конфигурации cfgHomeFile.
# Увеличивает контраст на значение val.
from gimpfu import *
import os
import ConfigParser
def plugin_func():
cfgHomeFile = "~/sumkamnet/gimp-plug-ins-config/gimp-plug-ins.cfg"
config = ConfigParser.ConfigParser()
config.read(os.path.expanduser(cfgHomeFile))
val = config.getint("Contrast","hard")
image = gimp.image_list()[0]
drawable = pdb.gimp_image_get_active_drawable(image)
pdb.gimp_brightness_contrast(drawable, 0, val)
return
register(
"python-fu-sumkamnet-contrast-hard", # Имя регистрируемой функции
"sumkam.net. Increases contrast on preconfigured value", # Информация о дополнении
"Contrast hard", # Короткое описание выполняемых скриптом действий
"Eugene Polyakov", # Информация об авторе
"Eugene Polyakov (foxen@foxen.ru)", # Информация о копирайте
"29.12.2013", # Дата изготовления
"contrast hard", # Название пункта меню, с помощью которого дополнение будет запускаться
"*", # Типы изображений, с которыми может работать дополнение
[],# Параметры, которые будут переданы дополнению
[],# Список переменных, которые вернет дополнение
plugin_func,
menu="<Image>/SUMKAMNET/")# Имя исходной функции и меню, в которое будет помещён пункт, запускающий дополнение
main() |
from django.contrib import admin
from locations.models import *
@admin.register(Country)
class CountryAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ['name']
@admin.register(City)
class CityAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ['name']
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ['name']
search_fields = ['name', 'city__name', 'country__name'] |
# TODO: Create CSV matrix files for existing data
import common.constants as cn
import common_python.constants as ccn
from common_python.testing import helpers
from common_python.classifier import feature_analyzer
import classifier.main_case_classifier as main
import numpy as np
import os
import pandas as pd
import shutil
import unittest
IGNORE_TEST = False
IS_PLOT = False
DIR = os.path.dirname(os.path.abspath(__file__))
TEST_OUT_PATH = os.path.join(DIR,
"test_main_case_classifier.csv")
TEST_IN_PATH = os.path.join(cn.TRINARY_SAMPLES_DIR,
"AM_MDM.csv")
STATE = 1
class TestFunctions(unittest.TestCase):
def _remove(self):
for path in [TEST_OUT_PATH]:
if os.path.isfile(path):
os.remove(path)
def setUp(self):
self._remove()
def tearDown(self):
self._remove()
def testRunState(self):
if IGNORE_TEST:
return
df_instance = pd.read_csv(TEST_IN_PATH)
arguments = main.Arguments(
state=STATE, df=df_instance, num_fset=5)
df = main._runState(arguments)
columns = expected_columns=[ccn.FEATURE_VECTOR,
ccn.SIGLVL, cn.STATE, main.INSTANCE,
ccn.FRAC, ccn.COUNT]
self.assertTrue(helpers.isValidDataFrame(df,
expected_columns=columns,
nan_columns=columns))
def testRun(self):
if IGNORE_TEST:
return
#
with open(TEST_IN_PATH, "r") as in_fd:
with open(TEST_OUT_PATH, "w") as out_fd:
main.run(in_fd, out_fd, num_fset=2)
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
self.assertTrue(os.path.isfile(TEST_OUT_PATH))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 13 11:58:41 2021
@author: satheesh
"""
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
# Your code here
a = ''.join(sorted(aStr))
low = 0
high = len(a)
mid = (low+high)/2
while mid != low:
if char < a[mid]:
low = mid
else:
high = mid
mid = (low+high)/2
return
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
################################################
#a simple test .
class ListNode2:
def __init__(self, x):
self.val = x
self.next = None
self.add=self.val*5
def addcount(self):
print "begin"
t=self.add*4
return t
a=ListNode2(4)
print a.add
print a.addcount()
##################################################
##leetcode : problem3 ---- longest substring without repeating characters
class Solution:
def __init__(self,s):
"""
:type s: str
:rtype: int
"""
self.s = s
def substring(self):
if len(self.s) <= 1:
return len(self.s)
list1 = []
for i in range(len(self.s)):
for j in range(len(self.s) - 1):
if self.s[i - 1] == self.s[j]:
list1.append(j - i + 1)
return max(list1)
c=raw_input("Please input a string: ") ### input and output. please pay attention to the python version,
# here we use python2.7 so we use raw_input
a = Solution(c)
print(a.substring())
#################################################################
# leetcode: problem4 ---- Median of two sorted arrays.
class Solution4:
def findMedianSortedArray(self,nums1,nums2):
self.nums1=nums1
self.nums2=nums2
listnum = self.nums1 + self.nums2
listnum.sort()
if len(listnum) % 2 == 0:
median = (listnum[int(len(listnum) / 2) - 1] + listnum[int(len(listnum) / 2)]) / 2
elif len(listnum) % 2 == 1:
median = listnum[int((len(listnum) + 1) / 2 - 1)]
return median
num1=[1,2,5]
num2=[3,5,4]
a4=Solution4()
a4.findMedianSortedArray(num1,num2)
######################################################################
#leetcode: problem5---- Longest Palindromic Substring.
class Solution5(object):
def longestPalindrome(self, s):
if len(s)==0 or len(s)==1:
return s
sub=s[0:1]
for i in range(len(s)-1):
def ishuiwen(s,x,y):
while x>=0 and y<len(s) and s[x]==s[y]:
x-=1
y+=1
return s[x+1:y]
s1=ishuiwen(s,i,i)
if len(s1)>len(sub):
sub=s1
s2=ishuiwen(s,i,i+1)
if len(s2)>len(sub):
sub=s2
return sub
a=Solution5()
a.longestPalindrome("dfasdfc")
############################################
#problem 6 -----Zig Zag conversion
class Solution6:
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1 or numRows >= len(s):
return s
L = [''] * numRows
index, step = 0, 1
for x in s:
L[index] += x
if index == 0:
step = 1
elif index == numRows - 1:
step = -1
index += step
return ''.join(L)
#######################################################
## problem 7 : reverse 32-bit problem
class Solution7:
def reverse(self, x):
n = abs(x)
s = str(n)
a = []
m = ''
for i in range(len(s)):
a.append(s[i])
la=len(a)
if x < 0:
m += '-'
while la>0:
m += a.pop()
la -= 1
x = int(m)
if abs(x) > 2**31:
return 0
else:
return x
|
# Add the upper directory (where the nodebox module is) to the search path.
import os, sys; sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics import *
# In the previous examples, drawing occurs directly to the canvas.
# It is also possible to draw into different layers,
# and then transform / animate the layers individually.
# The Layer class introduces a lot of useful functionality:
# - layers can receive events from the mouse,
# - layers have an origin point (e.g. "center") from which transformations originate,
# - layers have methods such as Layer.rotate() and Layer.scale(),
# - layers can enable motion tweening (i.e. smooth, automatic transititions).
# A Layer has its personal Layer.draw() method that contains drawing commands.
# In this example, we create a subclass of Layer to display a colored, draggable rectangle:
class DraggableRect(Layer):
def __init__(self, *args, **kwargs):
# A Layer with an extra "clr" property.
Layer.__init__(self, *args, **kwargs)
self.clr = Color(0, 0.75)
def draw(self):
rect(0, 0, self.width, self.height, fill=self.clr, stroke=self.clr)
def on_mouse_enter(self, mouse):
# When the mouse hovers over the rectangle, highlight it.
mouse.cursor = HAND
self.clr.a = 0.75
def on_mouse_leave(self, mouse):
# Reset the mouse cursor when the mouse exits the rectangle.
mouse.cursor = DEFAULT
self.clr.a = 0.5
def on_mouse_drag(self, mouse):
# When the rectangle is dragged, transform it.
# Its scale increases as the mouse is moved up.
# Its angle increases as the mouse is moved left or right.
self.scale(1 + 0.005 * mouse.dy)
self.rotate(mouse.dx)
# The layer's origin defines the origin point for the layer's placement,
# its rotation and scale. If it is (0.5, 0.5), this means the layer will transform
# from its center (i.e. 50% width and 50% height). If you supply integers,
# the values will be interpreted as an absolute offset from the layer's bottom-left corner.
r1 = DraggableRect(x=200, y=200, width=200, height=200, origin=(0.5,0.5), name="blue1")
r1.clr = color(0.0, 0.5, 0.75, 0.5)
r2 = DraggableRect(x=250, y=250, width=200, height=200, origin=(0.5,0.5), name="blue2")
r2.clr = color(0.0, 0.5, 0.75, 0.5)
r3 = DraggableRect(x=300, y=300, width=200, height=200, origin=(0.5,0.5), name="purple1")
r3.clr = color(0.25, 0.15, 0.75, 0.5)
# We'll attach a layer as a child to layer r3.
# Child layers are very handy because they transform together with their parent.
# For example, if the parent layer rotates, all of its children rotate as well.
# However, all of the layers can still receive separate mouse and keyboard events.
# You can use this to (for example) create a flying creature that responds differently
# when the mouse touches its wings or its head - but where all the body parts stick together.
# Position the child's center at (100,100) relative from the parent's layer origin:
r4 = DraggableRect(x=100, y=100, width=100, height=100, origin=(0.5,0.5), name="purple2")
r4.clr = color(0.25, 0.15, 0.75, 0.5)
r3.append(r4)
# Even more nested child layers:
#r5 = DraggableRect(x=50, y=50, width=50, height=50, origin=(0.5,0.5), name="pink1")
#r5.clr = color(1.00, 0.15, 0.75, 0.5)
#r4.append(r5)
# The canvas is essentially a list of layers, just as an image in Photoshop is a list of layers.
# Appending a layer to the canvas ensures that it gets drawn each frame,
# that it receives mouse and keyboard events, and that its motion tweening is updated.
canvas.append(r1)
canvas.append(r2)
canvas.append(r3)
def draw(canvas):
# There is nothing to draw here;
# all the drawing occurs in the separate layers.
canvas.clear()
canvas.size = 500, 500
canvas.run(draw)
# Note: if you have layers that do not need to receive events,
# set Layer.enabled = False; this saves some time doing expensive matrix operations. |
import argparse
import logging
import math
import sys
from ignite.metrics import Loss
from tensorboardX import SummaryWriter
import torch
from torch.nn import ModuleList
from torch.utils import data
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(name)s | %(levelname)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
def create_data(examples=180, noise_ratio=0.1):
x = torch.unsqueeze(torch.linspace(-1 * math.pi, math.pi, examples), dim=1)
y = torch.cos(x) + noise_ratio * torch.rand(x.size())
return data.TensorDataset(x, y)
def create_loader(examples=128, noise_ratio=0.1, batch_size=64, shuffle=True):
dataset = create_data(examples=examples, noise_ratio=noise_ratio)
loader = data.DataLoader(
dataset=dataset, batch_size=batch_size, num_workers=2, shuffle=shuffle
)
return loader
class CosineNet(torch.nn.Module):
def __init__(self, writer, num_hidden_layers=1):
super(CosineNet, self).__init__()
input_features = 1
hidden_output_features = 10
final_output_features = 1
self.writer = writer
layers = ModuleList()
for i in range(num_hidden_layers):
if i == 0:
layers.append(torch.nn.Linear(input_features, hidden_output_features))
else:
layers.append(
torch.nn.Linear(hidden_output_features, hidden_output_features)
)
layers.append(torch.nn.ReLU())
final_layer = torch.nn.Linear(hidden_output_features, final_output_features)
layers.append(final_layer)
self.model = torch.nn.Sequential(*layers)
self.loss_func = torch.nn.MSELoss()
def forward(self, input):
preds = self.model(input)
return preds
def evaluate(self, data_loader, epoch=0):
loss_metric = Loss(self.loss_func)
for i, (inputs, targets) in enumerate(data_loader):
preds = self.model.forward(inputs)
loss = self.loss_func(preds, targets)
loss_metric.update((preds, targets))
logger.debug(f"validation: {i} has loss {loss.item()}")
mean_epoch_loss = loss_metric.compute()
logger.info(f"validation mean loss is {mean_epoch_loss}")
self.writer.add_scalar("val/loss", mean_epoch_loss, epoch)
def train_loop(self, train_loader, val_loader, epochs=1000, lr=0.01):
optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
for i in range(epochs):
self.train_one_epoch(
dataloader=train_loader,
epoch=i,
model=self.model,
loss_func=self.loss_func,
optimizer=optimizer,
)
self.evaluate(data_loader=val_loader, epoch=i)
def train_one_epoch(self, dataloader, model, epoch, loss_func, optimizer):
loss_metric = Loss(self.loss_func)
for i, (inputs, targets) in enumerate(dataloader):
preds = model.forward(inputs)
loss = loss_func(preds, targets)
loss_metric.update((preds, targets))
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.debug(f"epoch: {epoch}, batch: {i} has training loss {loss.item()}")
mean_epoch_loss = loss_metric.compute()
logger.info(f"epoch {epoch}, has mean training loss is {mean_epoch_loss}")
self.writer.add_scalar("training/loss", mean_epoch_loss, epoch)
def parse_args():
command_parser = argparse.ArgumentParser(description="CosineNet Modeling Interface")
command_parser.add_argument(
"command", help="sub-command to run", choices=("train", "evaluate")
)
parser = argparse.ArgumentParser(description="CosineNet Modeling Interface")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="turn on verbose mode to enable debug logging",
)
parser.add_argument(
"--logdir", type=str, default=None, help="path where to save training logs"
)
parser.add_argument("--lr", type=float, default=0.01, help="learning rate for ADAM")
parser.add_argument(
"--training-noise",
type=float,
default=0.1,
help="amount of noise to add to training data",
)
parser.add_argument(
"--val-noise",
type=float,
default=0.1,
help="amount of noise to add to val data",
)
parser.add_argument(
"--n-hidden-layers",
type=int,
default=3,
help="number of hidden layers for model",
)
parser.add_argument(
"--n-val-examples",
type=int,
default=2048,
help="number of examples in validation set",
)
parser.add_argument(
"--n-train-examples",
type=int,
default=256,
help="number of examples in training set",
)
parser.add_argument("--batch-size", type=int, default=64, help="batch size")
parser.add_argument(
"--n-training-epochs",
type=int,
default=1000,
help="number of epochs to train for",
)
command = command_parser.parse_args(sys.argv[1:2]).command
args = parser.parse_args(sys.argv[2:])
logger.info("Parsed CLI args: %s", args)
writer = SummaryWriter(args.logdir)
cosine_net = CosineNet(writer=writer, num_hidden_layers=args.n_hidden_layers)
val_loader = create_loader(
examples=args.n_val_examples,
noise_ratio=args.val_noise,
batch_size=args.batch_size,
shuffle=False,
)
if command == "train":
train_loader = create_loader(
examples=args.n_train_examples,
noise_ratio=args.training_noise,
batch_size=args.batch_size,
shuffle=True,
)
cosine_net.train_loop(
train_loader, val_loader, epochs=args.n_training_epochs, lr=args.lr
)
elif command == "evaluate":
cosine_net.evaluate(val_loader)
writer.close()
if __name__ == "__main__":
parse_args()
# todo tune hyperparams: num hidden layers, learning rate
|
import numpy
import math
u = [1,-1,1,-1,1,-1,1,-1,1,-1, 1]
def evalPoly(f, n):
s = 0
for i in range(0, len(f)):
s += f[i] * (n ** (i))
return s
def getPoly(seq):
a = numpy.matrix([ [n ** j for j in range(0, len(seq))] for n in range(1, len(seq) + 1)])
b = numpy.array(seq)
return [round(k) for k in numpy.linalg.solve(a, b)]
def FIT(p):
for i in range(1, 12):
nextVal = evalPoly(p, i)
if nextVal != evalPoly(u, i):
return nextVal
return 0
ans = 0
seq = []
for i in range(1, 11):
seq.append(evalPoly(u, i))
p = getPoly(seq)
print(p)
fit = FIT(p)
ans += fit
print(ans)
"""
a + b + c = u1
a + 2b + 3c = u2
a + 4b + 9c = u3
a + 9b + 27c = k
a + b*(i^2) + c*(i^3) + d*(i^4)
"""
|
def patternToNumber(pattern):
num = 0
k = len(pattern) - 1
gene_dict = {"A":0, "C":1, "G":2, "T":3}
for i in pattern:
num += gene_dict[i] * pow(4, k)
k -= 1
return num
def numberToPattern(index, k):
gene_dict_reverse = {0:"A", 1:"C", 2:"G", 3:"T"}
p = ""
q = 0
k = k - 1
while k >=0:
q = int(index/pow(4, k))
p = p + gene_dict_reverse[q]
index = index - q * pow(4, k)
k = k - 1
return p
def computeFrequency(text, k):
indexDB = []
frequentArray = [0]*pow(4,k)
for i in range(0, len(text)-k+1):
pattern = text[i:i+k]
indexDB.append(patternToNumber(pattern))
#print("indexDB={}".format(indexDB))
for i in indexDB:
frequentArray[i] += 1
return frequentArray
def betterClumpFinding(genome, k, L, t):
# initialize
freqDB = [0]*pow(4, k)
clump = [0]*pow(4, k)
frequentPattern = set()
firstPattern = ""
freqDB = computeFrequency(genome[0:L], k)
#print("freqDB={}".format(freqDB))
# create the clump array for frequency greater than t times
for i in range(0, pow(4, k)):
if freqDB[i] >= t:
clump[i] = 1
#print("Found {}".format(i))
# update frequencyDB
# sliding and looping
for i in range(1, len(genome)-L+1):
firstPattern = genome[i-1:i-1+k]
#print("i={} firstPattern[{}:{}]={}".format(i, i-1, i-1+k, genome[i-1:i-1+k]))
# first Pattern decrease frequency by one
index = patternToNumber(firstPattern)
#print("i={} index = {} firstPattern[{}:{}]={}".format(i, index, i-1, i-1+k, genome[i-1:i-1+k]))
freqDB[index] -= 1
if freqDB[index] >=t:
clump[index] = 1
# last Pattern increase frequency by one
lastPattern = genome[i+L-k: i+L]
index = patternToNumber(lastPattern)
freqDB[index] += 1
# update the clump array
if freqDB[index] >= t:
clump[index] = 1
#print("Found {}".format(i))
# iterate all pattern frequency greater than t times and add them into frequentPattern set
for i in range(0, pow(4,k)):
if clump[i] == 1:
frequentPattern.add(numberToPattern(i,k))
#print("clump {} == 1, add {} to frequentPattern".format(i, numberToPattern(i,k)))
return frequentPattern
import sys
fd = open("./datasets/E_coli.txt")
data = fd.readlines()
fd.close()
genome=""
genome=data[0]
k=9
L=500
t=3
result = set()
result = betterClumpFinding(genome, k, L, t)
print(len(result))
|
from unittest import TestCase
import numpy as np
from com.seregy77.evnn.spea2.individual import Individual
from com.seregy77.evnn.spea2.network_parameter import LayerParameter
def init_weights():
layers_config = [784, 50, 50, 10]
max_weight = 0.3 * 1.0
min_weight = -0.3 * 1.0
max_bias = 0.3 * 1.0
min_bias = -0.3 * 1.0
layer_amount = len(layers_config)
layer_params = []
for i in range(layer_amount - 1):
# Weights
weights = np.random.uniform(min_weight, max_weight, (layers_config[i], layers_config[i + 1]))
biases = np.random.uniform(min_bias, max_bias, (layers_config[i + 1],))
layer_param = LayerParameter(weights,
biases)
layer_params.append(layer_param)
return layer_params
class TestIndividual(TestCase):
def cross(self):
individual = Individual(init_weights())
individual2 = Individual(init_weights())
(new_individual1, new_individual2) = individual.cross(individual2)
self.assertNotEqual(new_individual1, new_individual2)
self.assertNotEqual(individual, new_individual1)
self.assertNotEqual(individual2, new_individual2)
|
# -*- encoding: utf-8 -*-
import requests
import re
import sys
import json
import time
import datetime
aid='44625717' #就是av号
page=0 #我们一般说的“第xP”里的“x”就是这个值
date_start='2019-02-25' #起止日期
date_end='2019-03-01'
cookie_header={
'Cookie': #填一个你的登录Cookie上去,隐私相关,注意不要泄露
}
class bilibili:
class video:
def get_cid(aid, page):
response = requests.get('https://api.bilibili.com/x/player/pagelist?aid='+str(aid))
pagelist = response.json()
cid = pagelist['data'][page]['cid']
return cid
class danmaku:
def get_data(date, cid, cookie_header):
data_out = []
response = requests.get('https://api.bilibili.com/x/v2/dm/history?type=1&date='+str(date)+'&oid='+str(cid),headers=cookie_header)
response.encoding = response.apparent_encoding
xml_text = response.text
data = re.findall('(?<=<d p=").*?(?=">)', xml_text)
text = re.findall('(?<=">).*?(?=</d>)', xml_text)
entry_count = 0
for i in range(len(data)):
data_split = data[i].split(',') #浮点值秒数转整数的分和秒
second = int(float(data_split[0]))
minute = int(second/60)
second %= 60
mode = int(data_split[1])
size = int(data_split[2])
color = hex(int(data_split[3]))
submit_time = time.localtime(int(data_split[4])) #Unix格式时间转换成一般格式
pool = int(data_split[5])
coded_uid = data_split[6]
rowID = int(data_split[7])
new_entry = {
'appear_time':{'sec':second,'min':minute},
'mode':mode,
'size:':size,
'color':color,
'submit_time':{
'year':submit_time[0],
'month':submit_time[1],
'day':submit_time[2],
'hour':submit_time[3],
'min':submit_time[4],
'sec':submit_time[5],
},
'pool':pool,
'uid':coded_uid,
'row':rowID,
'text':text[i]
}
data_out.append(new_entry)
return data_out
def getall(date_start, date_end, cid, cookie_header):
data_all = []
date_pointer = date_start
while date_pointer <= date_end:
data = bilibili.video.danmaku.get_data(date_pointer, cid, cookie_header)
data_all.extend([i for i in data if i not in data_all])
date_pointer += datetime.timedelta(days=1)
return data_all
if __name__=='__main__':
date_start=datetime.date(*[int(i) for i in date_start.split('-')])
date_end=datetime.date(*[int(i) for i in date_end.split('-')])
print('Getting cid...')
cid = str(bilibili.video.get_cid(aid, page))
print('Getting danmaku list...')
all_danmaku = bilibili.video.danmaku.getall(date_start,date_end,cid,cookie_header)
print(len(all_danmaku),'in total from',str(date_start),'to',str(date_end))
all_danmaku.sort(key=lambda i:i['row'])
print('Saving data...')
with open('av'+aid+'_'+str(date_start)+' to '+str(date_end)+'.json','w',encoding='utf-8') as jfile:
json.dump(all_danmaku, jfile,
ensure_ascii=False,
sort_keys=False,
indent=2,
separators=(',', ': ')
)
print('Finished') |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import difflib
import logging
import os
import re
from typing import Iterable, List, Optional
from benchpress.lib.parser import TestCaseResult, TestStatus
from benchpress.suites.suite import DiscoveredTestCase, Suite
logger = logging.getLogger(__name__)
TESTS_DIR = "xfstests/tests"
RESULTS_DIR = "xfstests/results"
class XfstestsSuite(Suite):
NAME = "xfstests"
def discover_cases(self) -> List[DiscoveredTestCase]:
# TODO
return [DiscoveredTestCase(name="exec", description="does the test exit(0)")]
@staticmethod
def get_status_from_name(status: str):
status = status.upper()
try:
return TestStatus[status]
except KeyError:
try:
return TestStatus[status + "ED"]
except KeyError:
logger.warning(f'No such status "{status}(ED)"')
return None
def run(
self, cases: Optional[List[DiscoveredTestCase]] = None
) -> Iterable[TestCaseResult]:
if cases:
logger.warning(
"benchpress currently doesn't support running groups"
" of tests in xfstests, assuming you passed a list of individual"
" test cases to run"
)
self.args += [c.name for c in cases]
return super().run(cases)
def parse(
self, stdout: List[str], stderr: List[str], returncode: int
) -> Iterable[TestCaseResult]:
excluded = {}
# The exclude list is one test per line optionally followed by a
# comment explaining why the test is excluded.
exclude_list_re = re.compile(
r"\s*(?P<test_name>[^\s#]+)\s*(?:#\s*(?P<reason>.*))?\s*"
)
try:
with open("exclude_list", "r", errors="backslashreplace") as f:
for line in f:
match = exclude_list_re.match(line)
if match:
reason = match.group("reason")
if reason is None:
reason = ""
excluded[match.group("test_name")] = reason
except OSError:
pass
test_regex = re.compile(
r"^(?P<test_name>\w+/\d+)\s+(?:\d+s\s+\.\.\.\s+)?(?P<status>.*)"
)
for line in stdout:
match = test_regex.match(line)
if match:
test_name = match.group("test_name")
case = TestCaseResult(name=test_name, status=TestStatus.FATAL)
status = match.group("status")
duration_match = re.fullmatch(r"(\d+(?:\.\d+)?)s", status)
if duration_match:
case.status = TestStatus.PASSED
case.runtime = float(duration_match.group(1))
elif status.startswith("[not run]"):
case.status = TestStatus.SKIPPED
case.details = self.not_run_details(test_name)
elif status.startswith("[expunged]"):
case.status = TestStatus.OMITTED
case.details = self.excluded_details(excluded, test_name)
else:
case.status = TestStatus.FAILED
case.details = self.run_details(test_name)
yield case
def not_run_details(self, test_name):
try:
notrun = os.path.join(RESULTS_DIR, test_name + ".notrun")
with open(notrun, "r", errors="backslashreplace") as f:
return "Not run: " + f.read().strip()
except OSError:
return "Not run"
@staticmethod
def excluded_details(excluded, test_name):
try:
return "Excluded: " + excluded[test_name]
except KeyError:
return "Excluded"
def run_details(self, test_name):
details = []
self.append_diff(test_name, details)
self.append_full_output(test_name, details)
self.append_dmesg(test_name, details)
return "".join(details)
def append_diff(self, test_name, details):
try:
out_path = os.path.join(TESTS_DIR, test_name + ".out")
with open(out_path, "r", errors="backslashreplace") as f:
out = f.readlines()
out_bad_path = os.path.join(RESULTS_DIR, test_name + ".out.bad")
with open(out_bad_path, "r", errors="backslashreplace") as f:
out_bad = f.readlines()
except OSError:
return
diff = difflib.unified_diff(out, out_bad, out_path, out_bad_path)
details.extend(diff)
def append_full_output(self, test_name, details):
full_path = os.path.join(RESULTS_DIR, test_name + ".full")
try:
# There are some absurdly large full results.
if os.path.getsize(full_path) < 100_000:
with open(full_path, "r", errors="backslashreplace") as f:
if details:
details.append("--\n")
details.append(f"{full_path}:\n")
details.append(f.read())
except OSError:
pass
def append_dmesg(self, test_name, details):
dmesg_path = os.path.join(RESULTS_DIR, test_name + ".dmesg")
try:
with open(dmesg_path, "r", errors="backslashreplace") as f:
if details:
details.append("--\n")
details.append(f"{dmesg_path}:\n")
details.append(f.read())
except OSError:
pass
|
import helper
from flask import Flask, request, Response
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/record/new', methods=['POST'])
def add_record():
# Get record from the POST body
# only pass '{"contact_name": "Name", "contact_email_id": "mail_id", "contact_number": "phone_number"}
req_data = request.get_json()
contact_name = req_data['contact_name']
contact_email_id = req_data['contact_email_id']
contact_number = req_data['contact_number']
# Add record to the database
res_data = helper.add_to_database(contact_name, contact_email_id, contact_number)
# Return error if item not added
if res_data is None:
response = Response("{'error': 'Record-22 not added - '}", status=400, mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/record/all')
def get_all_items():
page = request.args.get('page_no')
res_data = helper.get_all_records(int(page))
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/record/status/by_name', methods=['GET'])
def get_item_by_name():
# Get parameter from the URL
contact_name = request.args.get('contact_name')
# Get records from the helper
status = helper.get_record_by_name(contact_name)
# Return 404 if item not found
if status is None:
response = Response("{'error': 'Item Not Found - '}" + contact_name, status=404, mimetype='application/json')
return response
# Return status
res_data = {
'status': status
}
response = Response(json.dumps(res_data), status=200, mimetype='application/json')
return response
@app.route('/record/status/by_mail', methods=['GET'])
def get_item_by_mail():
# Get parameter from the URL
contact_email_id = request.args.get('contact_email_id')
# Get records from the helper
status = helper.get_record_by_mail(contact_email_id)
# Return 404 if item not found
if status is None:
response = Response("{'error': 'Item Not Found - '}" + contact_email_id, status=404,
mimetype='application/json')
return response
# Return status
res_data = {
'status': status
}
response = Response(json.dumps(res_data), status=200, mimetype='application/json')
return response
@app.route('/record/update', methods=['PUT'])
def update_record():
# Get record from the POST body
req_data = request.get_json()
record_id = req_data['record_id']
new_contact_name = req_data['new_contact_name']
new_contact_mail = req_data['new_contact_mail']
new_contact_number = req_data['new_contact_number']
# Update record in the database
res_data = helper.update_record(record_id, new_contact_name, new_contact_mail, new_contact_number)
if res_data is None:
response = Response("{'error': 'Error updating record - '" + record_id + ", " + new_contact_name + "}",
status=400,
mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
@app.route('/record/remove', methods=['DELETE'])
def delete_record():
# Get record from the POST body
req_data = request.get_json()
record_id = req_data['record_id']
# Delete record from the database
res_data = helper.delete_record(record_id)
if res_data is None:
response = Response("{'error': 'Error deleting item - '" + record_id + "}", status=400,
mimetype='application/json')
return response
# Return response
response = Response(json.dumps(res_data), mimetype='application/json')
return response
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
"""
@author: Dhruv
This python script contains functions to perform pre-processing on the
uploaded image. Attributes of the image such as brightness and constrast are
adjusted in order to enhance the accuracy of the OCR process.
Three operations, namely grayscaling, thresholding and noise removal are performed.
"""
import cv2
# get grayscale image
def get_grayscale(image_file):
return cv2.cvtColor(image_file, cv2.COLOR_BGR2GRAY)
# noise removal
def remove_noise(image_file):
return cv2.medianBlur(image_file,5)
# thresholding
def thresholding(image_file):
return cv2.threshold(image_file, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# image pre-processing
def preprocess(image_file):
image_file = get_grayscale(image_file)
image_file = remove_noise(image_file)
image_file = thresholding(image_file)
return image_file
|
# code_user.py
# Mike Solak
# 22 Jan 2020
from cryptography.fernet import Fernet
import key_from_pass
def encode_account(acc_list: list, key: bytes):
'''acc_list = ['username', 'password']
will then return acc_list but encoded
'''
f = Fernet(key) # key created from password
index = 0
while index < len(acc_list):
cred = acc_list[index]
# print(f'Normal:{cred}')
cred = cred.encode('utf-8')
#print(cred)
cred = f.encrypt(cred)
cred = cred.decode('utf-8')
#print(cred)
acc_list[index] = cred
# print(acc_list[index])
index += 1
return(acc_list)
def encode_accounts(accs_list: list, key: bytes):
for list in accs_list:
#print(list)
list = encode_account(list, key)
return accs_list
def decode_account(acc_list: list, key: bytes):
'''acc_list = ['username', 'password']
will then return acc_list but decoded
'''
f = Fernet(key) # key created from password
index = 0
while index < len(acc_list):
#print(acc_list[index])
cred = acc_list[index]
# print(acc_list)
# make str
# print(cred)
# print(type(cred))
cred = cred.encode('utf-8')
#print(cred)
cred = f.decrypt(cred)
cred = cred.decode('utf-8')
#print(cred)
acc_list[index] = cred
# print(acc_list[index])
index += 1
return(acc_list)
def decode_accounts(accs_list: list, key: bytes):
# decodes nested list
# sends inner list to decode account
# print(accs_list)
for list in accs_list:
list = decode_account(list, key)
return (accs_list)
if __name__ == '__main__':
key = key_from_pass.main()
accs_list = [['yolanda', 'pass123'], ['marvin', 'gaye']]
encrypted_accounts = encode_accounts(accs_list, key)
print(encrypted_accounts)
decrypted_accounts = decode_accounts(encrypted_accounts, key)
print(decrypted_accounts)
re_encrypted_accounts = encode_accounts(decrypted_accounts, key)
print(re_encrypted_accounts)
re_decrypted_accounts = decode_accounts(re_encrypted_accounts, key)
print(re_decrypted_accounts)
|
import sys
sys.stderr.write('this is a error message\n')
sys.stderr.flush()
sys.stdout.write('this is a standard text\n')
print(sys.argv)
if len(sys.argv)>1:
print(sys.argv[1])
|
shopping = [
('Iphone',5800),
('mac pro',9800),
('bike',800),
('watch',10600),
('coffee',31),
('alex',120)
]
# def info():
# tem_info={}
# with open('info.txt','r',encoding='utf-8') as f:
# line = f.readline()
# info_list = line.rstrip().split('')
# tem_info[info_list[0]] = info_list[1]
# line = f.readline()
# return tem_info
shopping_list = []
user_name = input("please enter username:")
password = input("please enter password:")
# user_info = info()
# for k,v in user_info.items():
salary = input("input your salary:")
if salary.isdigit():
salary = int(salary)
while True:
#for item in shopping:
#print (shopping.index(item),item)#
for index,item in enumerate(shopping):#
print(index,item)
user_choice = input("选择什么?")
if user_choice.isdigit():
user_choice=int(user_choice)
if user_choice <len(shopping) and user_choice>=0:
p_item = shopping[user_choice]
if p_item[1]<=salary:
shopping_list.append(p_item)
salary -=p_item[1]
print("Added %s into shoping cart,your current \033[31;1m%s\033[0m"%(p_item,salary))#\033[31;1m%s\033[0m红色 \033[32;1m%s\033[0m绿色
else:
print("\033[41;1m你的余额只剩[%s],买不了\033[0m"% salary)
else:
print("product code [%s] is not exist"%user_choice)
elif user_choice=="q":
print("------shopping list----")
for p in shopping_list:
print(p)
print("you current balance:",salary)
exit()
#print("exit...") |
#!/usr/bin/env python
# encoding: utf-8
# Daniele Trifiro`
# brethil@phy.olemiss.edu
'''
This program is used to perform PCA and cluster in the principal component
space data from `pcat.finder`
It's important to note that, even if this is written to use GMM, with a few
changes, every clustering algorithm (currently 'gaussian_mixture()')
can be implemented with few lines of code.
The clustering algorithm should output a set of labels for the input data.
Input data is a matrix with the observations on the rows, output is a list
of the same length of the number of the observations.
Each element of this list corresponds to the cluster number of which the
observation was assigned to (ranging from 0 to number of clusters-1).
If we had 5 observations, then:
labels[0] = 1
labels[1] = 0
labels[2] = 1
labels[3] = 1
labels[4] = 0
in this case the observations 0, 2 and 3 are assigned to cluster 1, and
observations 1 and 4 are assigned to cluster' type 0, with a total of
2 clusters.
A function for plotting 3-D scatterplots is supplied (three_plot), this can be
used by simply un-commenting a line (search for 3DPLOT).
'''
SMALL_COMPONENTS_BOOL = False
SMALL_COMPONENTS = 50
import warnings
from .utils import *
from .pca import standardize, eigensystem, PCA, load_data, matrix_whiten
import matplotlib.mlab
from matplotlib.image import NonUniformImage
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
import sklearn.mixture as mix
#import sklearn.mixture as mix
# mplot3d is used for the 3D plots.
#from mpl_toolkits.mplot3d import axes3d
def usage():
'''
Usage help.
'''
print "Usage:\tGMM.py [--time || --frequency] (--sampling, -s) sampl_freq [-m gauss_comp] [-p princ_comp]"
print "\t\t [--low low, --high high] [--plots number] [--repr_components number]" #[--model model_file (not working)]"
print "\t\t file1 file2 file3 ..."
print "\n\tGMM performs PCA and clusters the data using a"
print "\tGaussian Mixture Model."
print "#"*frame_width
print "\n\tOptions:"
print "\t--time, --frequency\n\
Type of analysis being performed, either in time\n\
or in frequency.\n\
--time should be used for databases output from finder.py,\n\
and also plots the time series of the transients in the database\n\
\"--frequency\" should be used for PSDs\n\
transform databases.\n\
For frequency bands analysis the syntax is the following:\n\
\tGMM.py --frequency --low low_freq --high high_freq database.data"
print "\t--log\n\
Takes base-10 logarithm of the input data. This improves clustering\n\
when data spans several order of magnitudes."
print "\t--sampling sampl_freq, -s sampl_freq\n\t\t Specify a sampling frequency for the data's channel"
#print "\t[--compare file]\n\t\t PCA is performed on the difference between observations\n\t\t\
# and input model (plain-text)."
print "\t-m number, --maxclusters number\n\t\tSpecifies the maximum number\n\
of clusters. (Default = 10 )."
print "\t-p number, --components\n\t\tNumber of principal components to be used when clustering\n\t\t (default is 40).\n\
One can guess this number by simply looking at the explained variance\n\
graph by using PCA.py with the --extras option (explained-variance.pdf).\n\
Rule of thumb: choose a number of components which accounts for about 70%\n\
of the total variance."
#print "\t-S, --standardize\n\t\tStandardizes the data in standard deviation,\n\t\t\
# so that each variable has unit variance (default is NOT standardized)."
print "\t--plots number\n\t\tPlots scatterplots up to the first 'number' components."
print "\t--repr_components number\n\t\tSets the number of components with which the representative\n\
transients are computed."
print "\t-r\n\t\t Interactive mode, chose wich clusters to remove from the database."
print "\t--noplot\n\t\t Do not plot time series/PSDs, use this to save time if not\n\
interested in time series and/or using -r."
"""
print "\t--model model_file\n\t\t Specify a model file to use, this model is used\n\
in the frequency domain analysis. To plot the differences against an 'ideal' spectrum."
"""
print "\t--silent\n\t\tDo not print progress bars."
#----print "\t--list\n\t\t Saves GPS start and end times for each type's spectra."
def check_options_and_args():
global MAX_CLUSTERS, max_clusters
MAX_CLUSTERS = False
max_clusters = 10
global MAX_COMPONENTS
MAX_COMPONENTS = 4
global principal_components_number
principal_components_number = 8
global marker
marker = '+'
global SILENT
SILENT = False
global REMOVE
REMOVE = False
global MODEL
MODEL = False
global MODEL_FILE
MODEL_FILE = ""
global SAMPLING
global PRINT_LIST
PRINT_LIST = False
global ANALYSIS
global PLOT
PLOT = True
global low, high
# low, high = None, None
global COMPONENTS
COMPONENTS = 4
try:
opts, args = getopt.getopt(sys.argv[1:],"hm:s:p:r", ["help", 'maxclusters=', 'list',\
'standardize', 'marker=', 'model=', "sampling=", "plots=",\
'time', 'frequency', 'low=', 'high=', 'log', 'bands', "repr_components=", "noplot", "silent"] )
except getopt.error, msg:
print msg
sys.exit(1)
if ( len(sys.argv[1:]) == 0 and not (any( flag in o for flag in [ '--help', '-h'] for o in opts)) ) :
print "No input files."
print "Use GMM.py -h for usage."
print "Example of usage:"
print "\t GMM.py --time -s 32768 file.list"
print "\t GMM.py --frequency -s 32768 file.list\n"
print "\t GMM.py [--log] matrix_database.data\n"
print "\t GMM.py --freq --low --high matrix_database.data\n"
sys.exit(1)
for o, a in opts:
if o in ( '-h', '--help' ):
usage()
sys.exit(1)
elif o in ( '-m', 'maxclusters' ):
max_clusters = int(a)+1
elif o in ( '--marker' ):
marker = a
elif o in ( '-p' ):
principal_components_number = int(a)
elif o in ( '-r' ):
REMOVE = True
elif o in ( '--model' ):
MODEL = True
MODEL_FILE = a
elif o in ( '-s', '--sampling' ):
SAMPLING = float(a)
elif o in ( '--list' ):
PRINT_LIST = True
elif o in ( '--low' ):
low = int(a)
elif o in ( '--high' ):
high = int(a)
elif o in ( '--time' ):
ANALYSIS = 'time'
elif o in ( '--frequency' ):
ANALYSIS = 'frequency'
elif o in ( '--plots' ):
MAX_COMPONENTS = int(a)+1
elif ( o == "--log"):
# Pass, "_log" will later be appended to ANALYSIS
pass
elif o in ( '--repr_components' ):
SMALL_COMPONENTS_BOOL = True
SMALL_COMPONENTS_BOOL = int(a)
elif (o == "--noplot"):
PLOT = False
elif (o == "--silent"):
SILENT = True
else:
assert False, "Unknown Option."
if not ( any( flag in o for flag in ['--time', "--frequency"] for o in opts ) ):
#print "Analysis type (time domain or frequency domain) has to be
# supplied"
#print "through the --time or --frequency flags. Quitting."
#sys.exit()
ANALYSIS = "generic"
if ( any( "--log" in o for o in opts) ):
ANALYSIS += "_log"
if any( flag in o for flag in ['--low'] for o in opts) and\
any(flag in o for flag in ["--high"] for o in opts):
ANALYSIS += "_bands"
elif not ( any( flag in o for flag in ['-s', "--sampling"] for o in opts )):
if not any( flag in o for flag in ['--low', '--high'] for o in opts ):
print "Sampling frequency has to be supplied. Quitting."
sys.exit()
elif not ( any( flag in o for flag in ['--low'] for o in opts ) and ( any( flag in o for flag in ["--high"] for o in opts ))):
print "Both --low and --high have to be supplied if performing bands analysis. Quitting."
sys.exit()
return args
def gaussian_mixture(matrix, upper_bound, SILENT=False):
'''
This function clusters the input matrix using the GMM algorithm (gaussian mixture model)
The number of clusters is found by running the algorithm for n_components = 2 to upper_bound
and chosing the model which minimized the BIC.
Returns the labels for each observation.
'''
if ( len(matrix) < upper_bound+1 ):
print "\n\tWARNING: Not enough samples (less than the minimum %i) to run GMM." % (upper_bound)
print "\t Only one cluster is returned.\n"
return [0]*len(matrix)
# Create progress bar
if not SILENT:
progress = progressBar(minValue = 0, maxValue = 4*upper_bound-1, totalWidth = 40 )
progress(0)
j = 0
lowest_bic = np.infty
bic = []
n_components_range = range (1, upper_bound+1)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n in n_components_range :
gmm = mix.GMM(n_components = n, covariance_type = cv_type)
gmm.fit(matrix)
bic.append( gmm.bic(matrix) )
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
if not SILENT:
progress(j)
j += 1
if not SILENT:
progress(j)
j += 1
best_gmm.fit(matrix)
res = best_gmm.predict(matrix)
# Print an empty line to avoid printing on the progress bar.
if not SILENT:
print ""
return res
"""
def gaussian_mixture(matrix, upper_bound, weights):
'''
This function clusters the input matrix using the DPGMM algorithm (Dirichlet Process Gaussian Mixture Model)
Returns the labels for each observation.
'''
if ( len(matrix) < upper_bound ):
print "\n\tWARNING: Not enough samples (less than the minimum %i) to run GMM." % (upper_bound-1)
print "\t Only one cluster is returned.\n"
return [0]*len(matrix)
gmm = mix.DPGMM(n_components = upper_bound)
gmm.weights_ = weights
gmm.fit(matrix)
res = gmm.predict(matrix)
return res
"""
def color_clusters(score_matrix, labels):
'''
This function "colors" the cluster, that is, for each different cluster in 'labels',
a list with all the observations corresponding to that cluster is created.
Returns a list, where each item is the list of observations corresponding to a cluster.
'''
cluster_number = len( np.unique(labels) )
if ( cluster_number == 1 ):
return [score_matrix.tolist()]
colored_list = [ list() for i in range(0, cluster_number)]
for index, spike in enumerate(score_matrix):
colored_list[ labels[index] ].append(spike)
return colored_list
def three_plot(colored_clusters, x, y, z, output):
'''
Plots 3-D scatterplot of colored_clusters, using components
x, y and z, output is the output file name.
'''
global DPI
global colors
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.set_xlabel( "Principal component score: "+str(x) )
ax.set_ylabel( "Principal component score: "+str(y) )
ax.set_zlabel( "Principal component score: "+str(z) )
plotlabels = list()
ax.set_title("{0} clusters and noise".format(len(colored_clusters)))
for index, element in enumerate(colored_clusters):
tmp = np.matrix(element)
ax.scatter(tmp[:,0].tolist(), tmp[:,1].tolist(),tmp[:,2].tolist(),\
c = str(colors[index]), marker = "." , markersize = 1)
plotlabels.append(str(index+1))
ax.legend(plotlabels, loc = 'upper right', markerscale = 5)
ax.grid( linestyle = '--' )
plt.show()
fig.savefig(output)
print "Written:\t"+output+"."
def print_cluster_info(colored_clusters):
print "\t\tCluster\t\t\t Size"
print "\t\t" + "#"*45
# Output to file:
f = open( "Types_detail.txt", "w")
f.write("\t\tCluster\t\t Length\n\t\t"+"#"*45+"\n")
all_transients = [len(element) for element in colored_clusters]
total = float(np.sum(all_transients))
for index, element in enumerate(all_transients):
percent = (element/total)*100
output = "\t\tType {0:2d} \t\t {1:3d}\t ({2:4.1f}%)".format(index+1, element, percent)
print output
f.write(output + "\n")
def set_axes_frequency(freq_array, logy=True):
fig = plt.figure(figsize=(12, 6), dpi=300)
ax = fig.add_subplot(111)
ax.set_xscale( 'log' )
ax.set_xlabel( "Frequency [Hz]" )
ax.set_ylabel( "Power Spectral Density [Counts^2/Hz]" )
ax.xaxis.grid( True, 'minor' )
ax.xaxis.grid( True, 'major' )
if ( logy == True ):
ax.set_yscale( 'log' )
ax.yaxis.grid( True, 'minor' )
ax.yaxis.grid( True, 'major' )
ax.set_xbound( lower=10, upper=np.max(freq_array) )
ax.set_autoscalex_on(False)
return fig, ax
def set_axes_frequency_band(freq_array, waveform_len,logy=True):
fig = plt.figure(figsize=(12, 6), dpi=300)
ax = fig.add_subplot(111)
ax.set_xscale( 'log' )
ax.set_xlabel( "Frequency [Hz]" )
ax.set_ylabel( "Power Spectral Density [Counts^2/Hz]" )
if ( logy == True ):
ax.set_yscale( 'log' )
ax.yaxis.grid( which="both")
ax.xaxis.grid( which="both")
ax._autoscaleXon = False
ax.set_xbound( lower=np.min(freq_array), upper=np.max(freq_array) )
return fig, ax
def set_axes_time(time_array):
fig = plt.figure(figsize=(12, 6), dpi=300)
ax = fig.add_subplot(111)
ax.set_xlabel( " Time [ms]" )
ax.set_ylabel( "Amplitude [Counts]" )
ax.grid(which="both")#, axis="both")
x_min, x_max = np.min(time_array), np.max(time_array)
ax.set_xbound( lower=x_min, upper=x_max )
ax.set_autoscalex_on(False)
return fig, ax
def configure_subplot_time(subplot_number):
"Configure 'subplot_number' subplots, arranged in a column"
fig = plt.figure(figsize=(12, 6*subplot_number), dpi=300)
ax = []
ax.append( fig.add_subplot(subplot_number, 1, 1) )
for index in range(subplot_number):
if (index != 0):
ax.append( fig.add_subplot(subplot_number, 1, index+1, sharex=ax[index-1]) )
ax[index].grid(which='both')#, axis='both')
ax[index].set_title("Type " + str(index+1) )
ax[index].set_ylabel("Amplitude [counts]")
ax[index].set_xlabel("Time [ms]")
return fig, ax
def configure_subplot_freq(subplot_number, logy=True):
"Configure 'subplot_number' subplots, arranged in a column"
fig = plt.figure(figsize=(12, 6*subplot_number), dpi=300)
ax = []
ax.append( fig.add_subplot(subplot_number, 1, 1) )
for index in range(subplot_number):
if (index != 0):
ax.append( fig.add_subplot(subplot_number, 1, index+1, sharex=ax[index-1]) )
ax[index].set_title("Type " + str(index+1) )
ax[index].set_ylabel("Power Spectral Density [Counts^2/Hz]")
ax[index].set_xlabel("Frequency [Hz]")
ax[index].set_xscale('log')
ax[index].xaxis.grid( True, 'minor' )
ax[index].xaxis.grid( True, 'major' )
if ( logy == True ):
ax[index].set_yscale( 'log' )
ax[index].yaxis.grid( True, 'minor' )
ax[index].yaxis.grid( True, 'major' )
return fig, ax
def calculate_types(database, clusters, score_matrix, principal_components, means, stds, labels, ANALYSIS, f_sampl, low=None, high=None):
'''
For each cluster compute an average observation from all observations in that cluster using a median.
The median is used for outlier rejection.
For each average observation, perform inverse PCA (multiplying by the transpose of the
principal components matrix and then adding the mean for each observation)
After the average types have been computed, these are plotted.
'''
DPI = 100
cluster_number = len( clusters )
"""
# The following commented code gives out weird forms for the representative transient due to the adding of means at the end
# but might be a good way to get a good estimate of the "true" transient form
if not SMALL_COMPONENTS_BOOL:
rows, columns = np.shape(score_matrix)
else:
rows, columns = np.shape(score_matrix[:,0:SMALL_COMPONENTS])
cluster_matrices = []
cluster_medians = []
# For each cluster in 'clusters' compute the median and save it in
# cluster_medians
for cluster in clusters:
if len(cluster) > 1:
cluster_matrices.append( np.array(cluster) )
# SMALL_COMPONENTS_BOOL is defined at the top of the document.
# If TRUE, only the first SMALL_COMPONENTS principal component
# scores
# are used to reconstruct
# the average observations. This should remove noise, as the higher
# order principal components
# mainly consist in noise.
if not SMALL_COMPONENTS_BOOL:
cluster_medians.append( np.median(cluster_matrices[-1], axis=0) )
else:
cluster_medians.append( np.median(cluster_matrices[-1], axis=0)[0:SMALL_COMPONENTS] )
else:
cluster_medians.append(cluster[0])
# Invert PCA: multiply by the transpose of the principal components matrix
# and add means
average_observations = []
for median in cluster_medians:
average_observations.append( np.dot( median, principal_components.transpose() ) )
average_observation_matrix = np.array(average_observations)
average_observation_matrix *= stds
average_observation_matrix += means
"""
time_domain_clusters = []
for i in range(cluster_number):
time_domain_clusters.append([])
for index, spike in enumerate(database):
time_domain_clusters[labels[index]].append(spike.waveform)
cluster_medians = []
for cluster in time_domain_clusters:
cluster = np.array(cluster)
cluster_medians.append(np.median(cluster, axis=0))
average_observation_matrix = np.array(cluster_medians)
'''
THIS PART IS TO BE REVIEWD
IS THIS NECESSARY AT ALL?
####################################################
# Mean max amplitude is calculated for
# every transient in the cluster, and used as peak amplitude
# for the representative transient.
# This should be used only when normalizing all spikes to unit amplitude
if ( "time" in ANALYSIS ):
peaks = [ list() for element in np.unique(labels) ]
for index, spike in enumerate(database):
if ( len(spike.waveform) > 0 ):
peaks[ labels[index] ].append( spike.norm )
peaks_means = [ np.mean(element) for element in peaks ]
for index, element in enumerate(datamatrix):
element *= peaks_means[index]
####################################################
'''
observations, waveform_len = np.shape(average_observation_matrix)
# Initialize axes for summary plot
if ( "frequency" in ANALYSIS ):
if ("bands" in ANALYSIS):
freq_array = np.linspace(low, high, waveform_len)
else:
freq_array = rfftfreq( 2*(waveform_len-1), d=1./f_sampl )
fig_all, ax_all = configure_subplot_freq(len(average_observation_matrix)+1)
if ("bands" in ANALYSIS):
for element in ax_all:
element.autoscale(True, "both", tight=True)
else:
for element in ax_all:
element.set_xbound( lower=10, upper=np.max(freq_array) )
element.autoscale(True, "y", tight=True)
elif ( "time" in ANALYSIS ):
time_axis = (np.array(range(waveform_len))/f_sampl)*1000.0
max_index = waveform_len//2
time_axis -= time_axis[max_index]
fig_all, ax_all = configure_subplot_time(len(average_observation_matrix))
elif ( "generic" in ANALYSIS ):
fig_all = plt.figure()
ax_all = fig_all.add_subplot(111)
ax_all.grid()
if ( 'log' in ANALYSIS ):
ax_all.set_yscale('log')
else:
assert False, "Fatal error with analysis types."
plotlabels = []
if ANALYSIS == "time":
polarities = [ {'plus': 0, 'minus': 0} for i in range(cluster_number)]
for index, spike in enumerate(database):
if (spike.polarity == 1):
polarities[labels[index]]['plus'] += 1
else:
polarities[labels[index]]['minus'] += 1
polarities_plus_percent = []
polarities_minus_percent = []
for i in range(cluster_number):
polarities_plus_percent.append( 100 * ( polarities[i]['plus']/float(len(clusters[i])) ) )
polarities_minus_percent.append( 100 * ( polarities[i]['minus']/float(len(clusters[i])) ) )
# Default line marker is a continous line, switch to
# dotted line if there are more than 7 types
marker = "-"
with warnings.catch_warnings():
warnings.filterwarnings( "ignore", category=UserWarning )
if not "generic" in ANALYSIS:
ax_all[-1].set_title("Summary")
for index, element in enumerate(average_observation_matrix):
percent = (len(clusters[index])/float(len(database)))*100.0
if ( "frequency" in ANALYSIS ):
if ( "bands" in ANALYSIS):
fig, ax = set_axes_frequency_band(freq_array, waveform_len)
ax.set_xticks(np.logspace(np.log10(low), np.log10(high), num=10))
ax.set_xticklabels([ "%.2f" % el for el in ax.get_xticks()])
else:
fig, ax = set_axes_frequency(freq_array)
# Only plot frequencies above 10 Hz:
# only choose indexes corresponding to freq_array>10
if not ( "bands" in ANALYSIS):
ax.plot( freq_array, np.power(10, element), "r-", linewidth = 0.4)
ax_all[index].plot( freq_array, np.power(10, element), "r-", linewidth = 0.4)
ax_all[index].set_title("Type {0:d}: {1:d} of {2:d} observations ({3:.1f}%)".format(index+1, len(clusters[index]), len(database), percent) )
ax_all[-1].plot( freq_array, np.power(10, element), markers_and_colors[index][1]+marker, linewidth=0.4)
ax_all[index].autoscale(enable=True, axis="y", tight=True)
ax_all[index].autoscale(enable=False, axis="x")
ax_all[index].set_xlim((10, freq_array[-1]))
ax_all[index].axis("tight")
elif ("bands" in ANALYSIS):
ax.plot(freq_array, np.power(10, element), "r-", linewidth = 0.4)
ax_all[index].plot( freq_array, np.power(10, element), "r-", linewidth = 0.4)
ax_all[index].set_title("Type {0:d}: {1:d} of {2:d} observations ({3:.1f}%)".format(index+1, len(clusters[index]), len(database), percent) )
ax_all[index].set_xticks( np.logspace(np.log10(low), np.log10(high), num=10 ))
ax_all[index].set_xticklabels([ "%.2f" % el for el in ax.get_xticks()])
ax_all[-1].plot( freq_array, np.power(10, element), markers_and_colors[index][1]+marker, linewidth=0.4)
# Change marker: there only are 7 colors
if (index > 5) and (marker == "-"):
marker = "-."
#ax.autoscale(True, "both", tight=True)
elif ( "time" in ANALYSIS ):
fig, ax = set_axes_time(time_axis)
maximum_index = np.argmax(np.abs(element))
element[maximum_index] = (element[maximum_index-1]+element[maximum_index+1])/2.0
ax.plot(time_axis, element, 'b-', linewidth = 0.4 )
ax_all[index].plot(time_axis, element, "b-", linewidth = 0.4)
ax_all[index].autoscale(True, "both", tight=True)
ax_all[index].set_title("Type {0:d}: {1:d} of {2:d} observations ({3:.1f}%) - Polarity: {4:.1f}% positive {5:.1f}% negative ".format(index+1, len(clusters[index]), len(database), percent, polarities_plus_percent[index], polarities_minus_percent[index]) )
elif ( "generic" in ANALYSIS ):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid()
if ( "log" in ANALYSIS ):
ax.set_yscale('log')
ax.plot(np.power(10, element), 'r-', linewidth = 0.4)
ax_all.plot(np.power(10, element), markers_and_colors[index]+'-', linewidth = 0.4)
else:
ax.plot(element, 'r-', linewidth = 0.4)
ax_all.plot(element, markers_and_colors[index]+'-', linewidth = 0.4)
output = str(cluster_number) + "-clusters_#" + str(index+1) + ".pdf"
plotlabels.append( "{0:d} ({1:.1f}%)".format(index+1, percent) )
if ("time" in ANALYSIS):
ax.set_title("Type {0:d}: {1:d} of {2:d} observations ({3:.1f}%) - Polarity: {4:.1f}% positive {5:.1f}% negative ".format(index+1, len(clusters[index]), len(database), percent, polarities_plus_percent[index], polarities_minus_percent[index]) )
else:
ax.set_title("Type {0:d}: {1:d} of {2:d} observations ({3:.1f}%)".format(index+1, len(clusters[index]), len(database), percent))
if ( "frequency" in ANALYSIS):
plt.autoscale(True, axis="y", tight=True)
fig.savefig(output, dpi = DPI)
plt.close(fig)
if ( "time" in ANALYSIS):
x_min, x_max = np.min(time_axis), np.max(time_axis)
ax_all[0].set_xbound( lower=x_min, upper=x_max )
ax_all[0].set_autoscalex_on(False)
else:
if ("frequency") in ANALYSIS and ("bands" in ANALYSIS):
x_min, x_max = np.min(freq_array), np.max(freq_array)
ax_all[0].set_xlim( (x_min, x_max) )
ax_all[0].set_autoscalex_on(False)
ax_all[0].autoscale(enable=True, axis="y", tight=True)
ax_all[-1].set_xticks( np.logspace(np.log10(low), np.log10(high), num=10) )
ax_all[-1].set_xticklabels([ "%.2f" % el for el in ax.get_xticks()])
elif ("frequency" in ANALYSIS):
#ax_all[0].set_xbound( lower=10, upper=np.max(freq_array) )
ax_all[-1].autoscale(enable=False, axis="x")
ax_all[-1].set_xlim((10, freq_array[-1]))
ax_all[-1].axis("tight")
box = ax_all[-1].get_position()
ax_all[-1].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax_all[-1].legend( plotlabels, loc="best", bbox_to_anchor=(1.25, 1))
ax_all[-1].axis("tight")
print "\tSaving types details..."
fig_all.savefig( str(cluster_number) +" - All_types.pdf", dpi = DPI, bbox_inches='tight', pad_inches=0.2)
print "\tSaved \"All_types.pdf\"."
plt.close('all')
print_lists(database, labels, cluster_number, ANALYSIS)
# If time analysis, plot a spectrogram for each of the average observations
if ( "time" in ANALYSIS ):
if not ( os.path.exists( "spectrograms" ) ):
os.mkdir( "spectrograms" )
print "\n\tPlotting spectrograms..."
for index, element in enumerate(average_observation_matrix):
with warnings.catch_warnings():
warnings.filterwarnings( "ignore", category=UserWarning )
create_spectrogram(element, f_sampl, "Type_"+str(index+1)+"_Spectrogram", "Type " + str(index+1))
if cluster_number > 1:
print "\tSaved: Type_[{0}-{1}]_Spectrogram.pdf".format(1, len(average_observation_matrix))
else:
print "\tSaved: Type_1_Spectrogram.pdf"
plt.close('all')
def remove_clusters(database, labels):
OUTPUT = "database_new"
cluster_number = len( np.unique(labels) )
keyboard_input = raw_input( "Insert the cluster number to be removed: ('q' or 'quit' to exit)\n" )
clusters_to_remove = list()
while ( keyboard_input != ( 'q', 'quit' ) ):
if ( keyboard_input in str(range( 1, cluster_number+1) ) ):
var = int(keyboard_input)-1
clusters_to_remove.append(var)
keyboard_input = raw_input( "Any other clusters? ('q' or 'quit' to exit)\n" )
if ( keyboard_input in ( 'q', 'quit') ):
break
else:
if ( keyboard_input in ( 'q', 'quit') ):
break
else:
keyboard_input = raw_input( "Input has to be a number in the range 1 to "+str(cluster_number)+". Try again:\n" )
indexes_to_remove = list()
new_database = []
removed_transients = 0
for index, observation in enumerate(database):
if labels[index] not in clusters_to_remove:
new_database.append(observation)
else:
removed_transients += 1
print removed_transients, "transients removed."
print "Saving "+OUTPUT+".list"
f = open( OUTPUT+".list", "wb" )
pickle.dump(new_database, f)
f.close()
print "You now may re-run GMM.py on "+OUTPUT+".list."
def print_lists(database, labels, cluster_number, ANALYSIS):
if not ( os.path.exists( "Types_detail" ) ):
os.mkdir( "Types_detail" )
for index in range(0, cluster_number):
OUTPUT = "Type_" + str(index+1) + ".txt"
f = open( "Types_detail/" + OUTPUT, "w")
if ( 'frequency' in ANALYSIS ):
for j_index, observation in enumerate(database):
if ( labels[j_index] == index ):
f.write( str(observation.start)+"\t"+str(observation.end)+"\n" )
elif ( 'time' in ANALYSIS ):
for j_index, observation in enumerate(database):
if ( labels[j_index] == index ):
f.write( "{0:.6f}".format(observation.peak_GPS)+"\n")
f.close()
#print "\tSaved: "+OUTPUT+"."
def create_spectrogram(data, sampling, output, title=""):
"""
Creates a spectrogram of the input data, and saves the spectrogram
in "spectrograms/filename.pdf" where 'filename' is the string 'output'.
Resolution is the spectrogram's time resolution in milliseconds
"""
fig = plt.figure( figsize=(12, 6), dpi=100 )
ax = fig.add_subplot(111)
# Set FFT to len(data) to obtain the maximum resolution in frequency
NFFT = len(data)//8
# Compute a spectrogram using matplotlib.mlab.specgram
pxx, freq, time = matplotlib.mlab.specgram( data, NFFT=NFFT, Fs=sampling, noverlap=NFFT//4, pad_to=int(sampling) )
halfbin_time = (time[1] - time[0]) / 2.0
halfbin_freq = (freq[1] - freq[0]) / 2.0
# Center time and frequency bins
time -= halfbin_time
freq -= halfbin_freq
# Change time axis to be in milliseconds and centered on the
# transient's maximum (which at vector's midpoint)
half_time = int(len(time)/2.0)
time = (time - time[half_time])*1000
# Plot the spectrogram
cax = ax.contourf(time, freq, 10*np.log10(pxx), 50)
# Plot the colorbar
cbar = plt.colorbar(cax, ax=ax, orientation="vertical", shrink=0.7, fraction=0.05, pad=0.01, spacing="uniform")
cbar.set_label("Energy (dB)")
# Values for interpolation karg are *None*, 'none', 'nearest', 'bilinear',
#'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
#'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
#'bessel', 'mitchell', 'sinc', 'lanczos'
#plt.autoscale()
#title += ", Time resolution: %i ms (%i points) " % (RESOLUTION, NFFT)
title += " , Spectrogram - {0:.1f}Hz Frequency Resolution".format(sampling/NFFT)
ax.set_title( title )
ax.set_xlabel( "Time [ms]" )
ax.set_ylabel( "Frequency [Hz]" )
ax.yaxis.grid(which="both")
ax.xaxis.grid(which="major")
ax.set_yscale('log')
#ax.set_xlim( (np.min(time), np.max(time)) )
#plt.minorticks_on()
#ax.tick_params(axis='y', which='both', labelleft='on', left='on')
plt.autoscale(True, tight=True, axis="both")
ax.set_ylim( (10, np.max(freq)) )
#plt.clim(0,1)
plt.savefig("spectrograms/" + output + ".png", bbox_inches='tight', pad_inches=0.2, dpi=100)
plt.close('all')
def spike_time_series(database, PCA_info, components_number, labels, f_sampl, RECONSTRUCT=False, SILENT=False):
'''
Create a time series for each element contained in database.
- database is a list of Spike() istances
- PCA_info is a tuple with: (score_matrix, principal_components, means, stds )
where means and stds are arrays with the column means and column standard
deviations of the original data matrix (np.array([spike.waveform for spike in database])).
This is only used if RECONSTRUCT=True (see below)
- components_number is the number of principal components to be used when reconstructing the glitch.
In order to plot the reconstructed glitches one has to use RECONSTRUCT=True, otherwise the raw
time series are plotted.
- labels contains the information about which cluster each glitch belongs to (gaussian_mixture() output)
- f_sampl sampling frequency for the given glitches' time series
- RECONSTRUCT (bool), if True, then glitches' time series are reconstructed using components_number
principal components. Only using the first few principal components will reduce noise in the time series
and make the "true" shape of the glitch more clear.
The reconstructed time series are also saved in the spike.reconstructed attribute of spike, with the number
of components used saved in spike.PCs_used
'''
spikes_number = len(database)
waveform_length = len(database[0].waveform)
# If RECONSTRUCT is true, generate an array of 'reconstructed' using the
# first few principal components
if RECONSTRUCT:
reconstructed = []
# Unpack the tuple
score_matrix, principal_components, means, sigmas = PCA_info
# Replace all the coefficients for principal components with index
# greater than "components_number" with 0
score_matrix[:, components_number:] = 0.0
# Invert the transformation and append to reconstructed list
new_data = np.dot(score_matrix, principal_components.transpose())
new_data *= sigmas
new_data += means
for index, spike in enumerate(database):
# Before the PCA is computed, all waveforms are normalized
# by divind them by the norm attribute. To correctly invert
# the PCA, one has to multiply by norm.
tmp = new_data[index]*database[index].norm
reconstructed.append(tmp)
spike.reconstructed = tmp
spike.PCs_used = components_number
# Create a progress bar
if not SILENT:
if (spikes_number > 1):
progress = progressBar(minValue = 0, maxValue = spikes_number-1, totalWidth = 40 )
progress(0)
if f_sampl:
x_axis = np.array([ i/f_sampl for i in range( 0, waveform_length ) ])
# Multiply by 1000 to show time axis in milliseconds
x_axis *= 1000.0
# Center the x axis on the transients' peak
max_index = np.argmax(np.abs(database[0].waveform))
x_axis -= x_axis[max_index]
x_min, x_max = np.min(x_axis), np.max(x_axis)
# Start plotting
for index, spike in enumerate(database):
labels_list = []
if RECONSTRUCT:
fig = plt.figure(figsize=(12,12), dpi=300)
ax = fig.add_subplot(211)
ax1 = fig.add_subplot(212)
ax1.grid(ls = '--')
else:
fig = plt.figure(figsize=(12,6), dpi=300)
ax = fig.add_subplot(111)
ax.set_title( "Peak at: " + str(spike.peak_GPS) )
ax.grid( ls = '--' )
ax.set_autoscalex_on(False)
if f_sampl:
ax.set_xlim( ( x_min, x_max ) )
ax.plot( x_axis, spike.polarity*spike.waveform, "b", label="Raw")
labels_list.append("Time series")
ax.set_xlabel("Time [ms]")
ax.set_ylabel("Amplitude [counts] ")
if RECONSTRUCT:
ax.plot( x_axis, spike.polarity*reconstructed[index], 'r', label="Reconstructed - {0} PCs".format(components_number))
labels_list.append( "Reconstructed - {0} PCs".format(components_number) )
ax.legend(labels_list, loc = 'best', markerscale = 2, numpoints = 1)
ax1.plot( x_axis, spike.polarity*spike.waveform, "b", label="Raw 2")
ax1.set_xlim( ( x_min, x_max ) )
ax1.set_xlabel("Time [ms]")
ax1.set_ylabel("Amplitude [counts] ")
ax1.legend(labels_list, loc='best', markerscale = 2, numpoints =1)
else:
plt.xlim( ( 0, waveform_length ) )
ax.plot(spike.waveform, label="Raw")
if RECONSTRUCT:
ax.plot( spike.polarity*reconstructed[index], "r", label="Reconstructed - {0} PCs".format(components_number) )
labels_list.append( "Reconstructed - {0} PCs".format(components_number) )
ax.legend(labels_list, loc = 'best', markerscale = 2, numpoints = 1)
ax1.plot( x_axis, spike.waveform, "b", label="Raw" )
ax1.legend(labels_list, loc="best", markerscale=2, numpoints=1)
fig.savefig( "time_series/Type_%i/%.3f.pdf" % (labels[index]+1, spike.peak_GPS), bbox_inches='tight', pad_inches=0.2)
plt.close(fig)
if not SILENT:
if ( spikes_number > 1 ):
progress(index+1)
del labels_list
def plot_psds(database, PCA_info, components_number, labels, f_sampl, ANALYSIS="frequency", low=None, high=None, RECONSTRUCT=False, SILENT=False):
'''
Plot all the PSDs in database, putting each in a different folder according to
the labels in 'labels'.
f_sampl is the sampling frequency for the given PSDs.
If "bands" in analysis, rescale the axis to be between 'low' and 'high' (global variables).
The reconstructed time series are also saved in the spike.reconstructed attribute of spike, with the number
of components used saved in spike.PCs_used
'''
psd_number = len(database)
waveform_length = len(database[0].waveform)
# If RECONSTRUCT is true, generate an array of 'reconstructed' using the
# first few principal components
if RECONSTRUCT:
reconstructed = []
# Unpack the tuple
score_matrix, principal_components, means, stds = PCA_info
# Replace all the coefficients for principal components with index
# greater than "components_number" with 0
score_matrix[:, components_number:] = 0.0
# Invert the transformation
new_data = np.dot(score_matrix, principal_components.transpose())
new_data *= stds
new_data += means
# Replace time series in the database with the new reconstructed
# time series
for index, spike in enumerate(database):
tmp = new_data[index]*database[index].norm
reconstructed.append(tmp)
spike.reconstructed = tmp
spike.PCs_used = components_number
# Create a progress bar
if not SILENT:
if ( psd_number > 1):
progress = progressBar(minValue = 0, maxValue = psd_number-1, totalWidth = 40 )
progress(0)
# Initialize axes
if ( "bands" in ANALYSIS ):
freq_array = np.linspace(low, high, waveform_length)
else:
freq_array = rfftfreq( 2*(waveform_length-1), d=1./f_sampl )
# Start plotting
for index, spike in enumerate(database):
labels_list = []
if ( "bands" in ANALYSIS):
fig, ax = set_axes_frequency_band(freq_array, waveform_length)
else:
fig, ax = set_axes_frequency(freq_array)
ax.set_title( "PSD: GPS %i to %i " % ( spike.segment_start, spike.segment_end) )
ax.grid( ls = '--', which="both" )
ax.set_autoscalex_on(False)
ax.set_autoscaley_on(True)
if RECONSTRUCT:
if ( "bands" in ANALYSIS):
ax.plot( freq_array, np.power(10, reconstructed[index]), label="Reconstructed - {0} PCs".format(components_number) )
else:
ax.plot( freq_array, np.power(10,reconstructed[index]), label="Reconstructed - {0} PCs".format(components_number) )
labels_list.append( "Reconstructed - {0} PCs".format(components_number) )
ax.legend(labels_list, loc = 'best', markerscale = 2, numpoints = 1)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Power Spectral Density [Counts^2/Hz)]")
if ( "bands" in ANALYSIS):
ax.plot( freq_array, np.power(10, spike.waveform), 'r-', linewidth = 0.4 )
ax.set_xticks( np.logspace(np.log10(low), np.log10(high), num=10))
ax.set_xticklabels([ "%.2f" % el for el in ax.get_xticks()])
else:
ax.plot( freq_array, np.power(10, spike.waveform), 'r-', linewidth = 0.4 )
fig.savefig( "PSDs/Type_%i/%i-%i.png" % (labels[index]+1, spike.segment_start, spike.segment_end), bbox_inches='tight', pad_inches=0.2)
plt.close('all')
del fig
if not SILENT:
if ( psd_number > 1 ):
progress(index)
del labels_list
def scatterplot(score_matrix, spike_database, colored_clusters_list, labels, x, y, output, ANALYSIS):
'''
This function plots an html image map scatterplot of the columns x vs y of the
score matrix, also generating an image map linked to the corresponding
observation e.g.:
observation x links to 'time_series/Type_y/event_gps_time.pdf'
This way each point in the scatterplot can be clicked to obtain the
time series of the clicked observation.
- score_matrix
Score matrix obtained with PCA.PCA() (numpy array)
- spike_database
List of Spike() istances (list)
- colored_clusters_list
List of clusters, each cluster containing the scores to scatterplot
- labels
Labels for spike_database (list of integers, same size as spike_database)
- x
column to scatterplot (x axis) (integer)
- y
column to scatterplot (y_axis) (integer)
- output
name of the output plot (string)
- ANALYSIS
Type of analysis being performed (string)
'''
DPI = 100
images_folder = "Scatterplots_images"
try:
os.makedirs(images_folder)
except:
pass
fig = plt.figure(dpi=DPI, edgecolor='k', frameon=True)#, figsize=(8.15, 6))
ax = fig.add_subplot(111)
ax.set_xlabel( "Principal component score: " + str(x) )
ax.set_ylabel( "Principal component score: " + str(y) )
ax.grid(linestyle = '--')
plotlabels = []
cluster_number = len( np.unique(labels) )
ax.set_title( "%i clusters" % cluster_number )
# Plot points, saving info to 'xs','ys' and 'info' to
# create the image map
xs = []
ys = []
info_list = []
for index, element in enumerate( np.array(score_matrix) ):
xs.append( element[x-1] )
ys.append( element[y-1] )
if ( ANALYSIS == "time"):
info_list.append( ( labels[index]+1, spike_database[index].peak_GPS ) )
elif ( "frequency" in ANALYSIS):
info = "%s-%s" % (spike_database[index].segment_start, spike_database[index].segment_end )
info_list.append( ( labels[index]+1, info) )
elif ( "generic" in ANALYSIS ):
pass
else:
assert False, "Analysis type '" + ANALYSIS + "' not supported. Quitting."
for index, element in enumerate(colored_clusters_list):
tmp = np.array(element)
ax.plot( tmp[:,x-1] , tmp[:,y-1], markers_and_colors[index], label = str(index), markersize = 5 )
plotlabels.append( str(index+1) )
# Create a legend
ax.legend(plotlabels, bbox_to_anchor=(0, 0, 1.12, 1), loc = 'best', markerscale = 2, numpoints = 1)
# If analysis is generic, no additional steps are needed
# The image map code does not need to be run. Return.
if ( "generic" in ANALYSIS ):
fig.savefig(images_folder + "/" + output + ".png", dpi=fig.get_dpi())
plt.close('all')
return
fig.savefig(images_folder + "/" + output + ".png", dpi=fig.get_dpi() )
plt.close('all')
###
# Finished plotting. Now creating the image map (only for 'time' and
# 'frequency' ANALYSIS)
###
# Create an array with the x and y coordinates, saved from the previous
# plots
xys = zip(xs, ys)
dpi = fig.get_dpi()
height = fig.get_figheight() * dpi
ixs = [0]*len(xs)
iys = [0]*len(ys)
i = 0
for x1, y1 in xys:
ixs[i], iys[i] = ax.transData.transform_point( [x1, y1] )
i += 1
icoords = zip(ixs, iys)
# The minimal 'template' to generate an image map.
tmpl = """
<html><head><title> Scatterplot Imagemap - PC scores {0}vs{1}</title></head><body>
<img src="%s.png" usemap="#points" border="0">
<map name="points">%s</map>
</body></html>""".format(x, y)
if ( "time" in ANALYSIS):
fmt = "<area shape='circle' coords='%f,%f,2' href='time_series/Type_%i/%0.3f.pdf' title='GPS %.3f - Type %i'>"
else:
fmt = "<area shape='circle' coords='%f,%f,2' href='PSDs/Type_%i/%s.png' title='%s - Type %i'>"
# need to do height - y for the image-map
fmts = [fmt % (ix, height-iy, x, y, y, x) for (ix, iy), (x, y) in zip(icoords, info_list) ]
with open(output + ".html", 'w') as output_file:
print >> output_file, tmpl % (images_folder + "/" + output, "\n".join(fmts))
#print "\tWritten: " + output + " (html and png)"
def correlation_test(database, labels, ANALYSIS):
"""
Chi square test for the clusters
Takes as input a PCAT database (python list) and gaussian_mixture() labels (list)
and tests for the goodness of the clustering using a correlation test with the average (median) waveforms
A confidence level can probably be also implemented through the gmm class of scikit-learn.
"""
#TODO: check out GMM class functions sklearn.mixture.gmm, in particular gmm.predict_proba, gmm.score
#TODO: it would be interesting having GPS time on the x axis (they're already time-ordered though, since that's the way the orinal database is built)
# Create a database
cluster_number = len(np.unique(labels))
glitch_number = float(len(labels))
colored_database = [[] for i in range(cluster_number)]
info_list = [[] for i in range(cluster_number)]
for index, spike in enumerate(database):
colored_database[labels[index]].append(spike)
# Save additional information used for the clickable html
if ( ANALYSIS == "time"):
info_list[labels[index]].append( ( labels[index]+1, spike.peak_GPS ) )
elif ( "frequency" in ANALYSIS):
info = "%s-%s" % (spike.segment_start, spike.segment_end )
info_list[labels[index]].append( ( labels[index]+1, info) )
elif ( "generic" in ANALYSIS ):
return
else:
assert False, "What are you trying to do? Only time, frequency and generic analysis are supported"
# Compute a median for each cluster, used a representative
# time series for the cluster
representatives = []
# Compute correlations between representative and glitches
# for each type
cluster_correlations = []
for index, cluster in enumerate(colored_database):
median = np.median([spike.waveform for spike in cluster], axis=0 )
representatives.append(median)
# np.corrcoef returns the correlation matrix, which is symmetric (2x2).
# on the diagonal we have the autocorrelations, off diagonal we have the cross correlation
# which is what we're interested in.
correlations = [np.corrcoef(median, spike.waveform)[0,1] for spike in cluster]
cluster_correlations.append(correlations)
# Plot correlation coefficients:
fig = plt.figure(figsize=(12, 6*cluster_number), dpi=100)
plt.subplots_adjust(left=0.10, right=0.95, top=0.97, bottom=0.05)
dpi = fig.get_dpi()
height = fig.get_figheight() * dpi
ax = []
xys = [ [] for i in range(cluster_number)]
icoords = [ [] for i in range(cluster_number)]
for index, correlations in enumerate(cluster_correlations):
# Create subplot and add it to the ax list
ax.append(fig.add_subplot(cluster_number, 1, index+1))
# Create x axis
glitch_indexes = range(1, len(correlations)+1)
if (len(correlations) > 1):
#ax[-1].plot(glitch_indexes, correlations)
ax[-1].plot(glitch_indexes, correlations, 'b.')
else:
ax[-1].set_title("Type #{0}: 1 element".format(index+1))
ax[-1].plot(range(10), np.zeros(10))
ax[-1].plot(range(10), np.ones(10)*correlations[0], "b.")
# Save data for imagemap:
if (len(correlations) > 1):
xys[index] = zip(glitch_indexes, correlations)
else:
xys[index] = [(1, correlations[0])]
ax[-1].set_title("Type #{0}: {1} of {2} ({3:.2f}%)".format(index+1, len(correlations), int(glitch_number), (len(correlations)/glitch_number)*100))
ax[-1].grid(which="both")
ax[-1].set_xlabel("Observation")
ax[-1].set_ylabel("Correlation")
plt.xlim((0, len(correlations)))
plt.ylim((-1.05,1.05))
# Set title for the first subplot
ax[0].set_title("Correlation Coefficients:\nType #1 {0}/{1} ({2:.2f}%)".format(len(cluster_correlations[0]), int(glitch_number), (len(cluster_correlations[0])/glitch_number)*100))
# Get image coordinates for each of the points plotted in the previous loop
for index, correlations in enumerate(cluster_correlations):
# x axis is the same as above
glitch_indexes = range(1, len(correlations)+1)
# We have the same number of transformed coordinates as glitch_indexes
ixs = [0]*len(glitch_indexes)
iys = [0]*len(glitch_indexes)
# Transform coordinates
i = 0
for x, y in xys[index]:
ixs[i], iys[i] = ax[index].transData.transform_point( [x, y])
i += 1
icoords[index] = zip(ixs, iys)
# Save figure
fig.savefig("Types_correlations.png", dpi=fig.get_dpi())
###
# Setup the clickable HTML file
# The minimal 'template' to generate an image map:
tmpl = """
<html><head><title>Correlations</title></head><body>
<img src="%s" usemap="#points" border="0">
<map name="points">%s</map>
</body></html>"""
if "time" in ANALYSIS:
fmt = "<area shape='circle' coords='%f,%f,3' href='time_series/Type_%i/%0.3f.pdf' title='GPS %0.2f - Type %i ' >"
elif "frequency" in ANALYSIS:
fmt = "<area shape='circle' coords='%f,%f,3' href='PSDs/Type_%i/%s.png' title='%s - Type %i'>"
else:
assert False, "Analyis not time nor frequency. What are you trying to do?"
# need to do height - y for the image-map
fmts = []
for index, infos in enumerate(info_list):
fmts.extend([fmt % (ix, height-iy, x, y, y, x) for (ix, iy), (x, y) in zip(icoords[index], infos) ])
plt.close(fig)
f = open("Types_correlations.html", "w")
print >> f, tmpl % ("Types_correlations.png", "\n".join(fmts))
f.close()
plt.close(fig)
print "\n\tSaved: Types_correlations.html"
return
def matched_filtering_test(database, labels, ANALYSIS):
"""
Matched filtering test for the clusters (this is only useful for time domain analysis)
Takes as input a PCAT database (python list) and gaussian_mixture() labels (list)
and tests for the goodness of the clustering using a correlation test with the average (median) waveforms
A confidence level can probably be also implemented through the gmm class of scikit-learn.
"""
#TODO: check out GMM class functions sklearn.mixture.gmm, in particular gmm.predict_proba, gmm.score
#TODO: it would be interesting having GPS time on the x axis (they're already time-ordered though, since that's the way the orinal database is built)
assert ANALYSIS == "time"
# Create a database
cluster_number = len(np.unique(labels))
glitch_number = float(len(labels))
colored_database = [[] for i in range(cluster_number)]
info_list = [[] for i in range(cluster_number)]
for index, spike in enumerate(database):
colored_database[labels[index]].append(spike)
# Save additional information used for the clickable html
if ( ANALYSIS == "time"):
info_list[labels[index]].append( ( labels[index]+1, spike.peak_GPS ) )
elif ( "frequency" in ANALYSIS):
info = "%s-%s" % (spike.segment_start, spike.segment_end )
info_list[labels[index]].append( ( labels[index]+1, info) )
elif ( "generic" in ANALYSIS ):
return
else:
assert False, "What are you trying to do? Only time, frequency and generic analysis are supported"
# Compute a median for each cluster, used a representative
# time series for the cluster
representatives = []
# Compute inner products between representative and glitches
# for each type
cluster_matched_filters = []
def inner_product(a, b):
""" Takes as input two numpy array and returns the inner product (\int a*b_conj)"""
# Factor of two because we're only integrating over positive frequencies
# and data is hermitian
inner_tmp = 2*np.real( a*b.conj() + b*a.conj()).sum()
# Normalize by the norm of a and b, multiplied by a
# factor two because of the way inner product is defined (line above)
# there should be a sqrt(2) for both a and b
norm_a = 2 * np.sqrt((np.abs(a)**2).sum())
norm_b = 2 * np.sqrt((np.abs(b)**2).sum())
return inner_tmp/(norm_a*norm_b)
for index, cluster in enumerate(colored_database):
median = np.median([spike.waveform for spike in cluster], axis=0 )
representatives.append(median)
# np.corrcoef returns the correlation matrix, which is symmetric (2x2).
median_transform = np.fft.rfft(median)
spike_transforms = [np.fft.rfft(spike.waveform) for spike in cluster]
# Factor of two because the ffts are one-sided and we're integrating over all frequencies
matched_filters = [inner_product(median_transform, spike_transform) for spike_transform in spike_transforms]
cluster_matched_filters.append(matched_filters)
# Plot correlation coefficients:
fig = plt.figure(figsize=(12, 6*cluster_number), dpi=100)
plt.subplots_adjust(left=0.10, right=0.95, top=0.97, bottom=0.05)
dpi = fig.get_dpi()
height = fig.get_figheight() * dpi
ax = []
xys = [ [] for i in range(cluster_number)]
icoords = [ [] for i in range(cluster_number)]
for index, match in enumerate(cluster_matched_filters):
# Create subplot and add it to the ax list
ax.append(fig.add_subplot(cluster_number, 1, index+1))
# Create x axis
glitch_indexes = range(1, len(match)+1)
if (len(match) > 1):
ax[-1].plot(glitch_indexes, match, 'b.')
else:
ax[-1].set_title("Type #{0}: 1 element".format(index+1))
ax[-1].plot(range(10), np.zeros(10))
ax[-1].plot(range(10), np.zeros(10), "b.")
# Save data for imagemap:
if (len(match) > 1):
xys[index] = zip(glitch_indexes, match)
else:
xys[index] = [(1, match[0])]
ax[-1].set_title("Type #{0}: {1} of {2} ({3:.2f}%)".format(index+1, len(match), int(glitch_number), (len(match)/glitch_number)*100))
ax[-1].grid(which="both")
ax[-1].set_xlabel("Observation")
ax[-1].set_ylabel("Matched filter")
plt.xlim((0, len(match)))
plt.ylim((-0.05,1.05))
# Set title for the first subplot
ax[0].set_title("Normalized matched filter results:\nType #1 {0}/{1} ({2:.2f}%)".format(len(cluster_matched_filters[0]), int(glitch_number), (len(cluster_matched_filters[0])/glitch_number)*100))
# Get image coordinates for each of the points plotted in the previous loop
for index, match in enumerate(cluster_matched_filters):
# x axis is the same as above
glitch_indexes = range(1, len(match)+1)
# We have the same number of transformed coordinates as glitch_indexes
ixs = [0]*len(glitch_indexes)
iys = [0]*len(glitch_indexes)
# Transform coordinates
i = 0
for x, y in xys[index]:
ixs[i], iys[i] = ax[index].transData.transform_point( [x, y])
i += 1
icoords[index] = zip(ixs, iys)
# Save figure
fig.savefig("Types_matched_filtering.png", dpi=fig.get_dpi())
###
# Setup the clickable HTML file
# The minimal 'template' to generate an image map:
tmpl = """
<html><head><title>Matched filtering</title></head><body>
<img src="%s" usemap="#points" border="0">
<map name="points">%s</map>
</body></html>"""
if "time" in ANALYSIS:
fmt = "<area shape='circle' coords='%f,%f,3' href='time_series/Type_%i/%0.3f.pdf' title='GPS %0.2f - Type %i ' >"
elif "frequency" in ANALYSIS:
fmt = "<area shape='circle' coords='%f,%f,3' href='PSDs/Type_%i/%s.png' title='%s - Type %i'>"
else:
assert False, "Analyis not time nor frequency. What are you trying to do?"
# need to do height - y for the image-map
fmts = []
for index, infos in enumerate(info_list):
fmts.extend([fmt % (ix, height-iy, x, y, y, x) for (ix, iy), (x, y) in zip(icoords[index], infos) ])
plt.close(fig)
f = open("Types_matched_filtering.html", "w")
print >> f, tmpl % ("Types_matched_filtering.png", "\n".join(fmts))
f.close()
plt.close(fig)
print "\n\tSaved: Types_matched_filtering.html"
return
####################################################
def main():
global means, observations, samples, marker
global STANDARDIZE, PRINT_LIST
args = check_options_and_args()
matrix, spike_database = load_data(args, ANALYSIS)
observations, samples = matrix.shape
print "Data matrix is %ix%i, %i observations of %i variables" % (observations, samples, observations, samples)
time0 = time.time()
# PCA create a plot of the explained variance vs. principal components
# number.
# The default number of plotted components is 40
score_matrix, principal_components, means, stds, eigenvalues = PCA(matrix, components_number=40)
print "PCA timing:\t %.2f s" % float(time.time()-time0)
time1 = time.time()
# gaussian_mixture is called with
# 'score_matrix[:,:principal_components_number]'
# as argument to cluster only using the first 'principal_components_number'
# components, as specified by the '-p' option.
print "Clustering using the first %i principal components..." % principal_components_number
reduced_score_matrix = score_matrix[:,:principal_components_number]
mat, tmp, tmp1 = matrix_whiten(reduced_score_matrix, std=False) ####%%% Changed to False by MC
labels = gaussian_mixture(mat, upper_bound=max_clusters)
cluster_number = len( np.unique(labels) )
print "GMM timing:\t %.1f s" % float( time.time() - time1 )
print "GMM algorithm found %i cluster" % cluster_number + \
('s' if ( cluster_number > 1 or cluster_number == 0) else '')+"."
colored_clusters = color_clusters( score_matrix, labels )
print_cluster_info(colored_clusters)
# DEVELOPMENT TEST 03/25/2014
# Testing the goodness of the clutering with a
# chisquare test on the glitches in each cluster
with warnings.catch_warnings():
warnings.filterwarnings( "ignore", category=UserWarning )
chisquare_test(spike_database, labels)
'''
# 3D Plots
# Uncomment for 3D scatterplots
x = 1
y = 2
z = 3
three_plot(colored_clusters, x, y, z, "Colored_scatter-"+str(clusternumber)+
"_clusters"+str(x)+"_vs_"+str(y)+"_vs_"+str(z)+".pdf" )
'''
output = "Colored_scatter-"+str(cluster_number)+"_clusters_"
for x in range( 1, MAX_COMPONENTS ):
for y in range ( 1, MAX_COMPONENTS ):
if ( x != y ) & ( x < y ):
scatterplot( score_matrix, spike_database, colored_clusters, labels, x, y, output+str(x)+"_vs_"+str(y), ANALYSIS )
if ( "bands" in ANALYSIS ):
calculate_types(spike_database, colored_clusters, score_matrix, principal_components, means, stds, labels, ANALYSIS, SAMPLING, low, high)
else:
calculate_types(spike_database, colored_clusters, score_matrix, principal_components, means, stds, labels, ANALYSIS, SAMPLING)
if PLOT:
if ( "generic" not in ANALYSIS ):
# If time-domain analysis, create a folder containing the time
# series for each
# of the spikes containing a folder for each cluster
if ( "time" in ANALYSIS ):
print "\n\tPlotting time series..."
if not ( os.path.exists( "time_series" ) ):
os.mkdir( "time_series" )
for i in range(cluster_number):
if not ( os.path.exists( "time_series/Type_"+str(i+1) ) ):
os.mkdir( "time_series/Type_"+str(i+1) )
spike_time_series(spike_database, (score_matrix, principal_components, means, stds), components_number, labels, SAMPLING, SILENT)
elif ( "frequency" in ANALYSIS ):
print "\n\tPlotting PSDs..."
# Plot a list of PSDs with the relative times:
for i in range(cluster_number):
try:
os.makedirs( "PSDs/Type_"+str(i+1) )
except:
pass
if ( "bands" in ANALYSIS):
plot_psds(spike_database, (score_matrix, principal_components, means, stds), components_number, labels, SAMPLING, ANALYSIS, low, high)
else:
plot_psds(spike_database, (score_matrix, principal_components, means, stds), components_number, labels, SAMPLING, ANALYSIS)
# Cluster removal
if REMOVE:
print "Entering interactive mode..."
remove_clusters(spike_database, labels)
print "\n\t\tFinished!"
if __name__ == "__main__":
start = time.time()
main()
endtime = float(time.time()-start)
print "Total Execution: {0:.1f} s".format(endtime if endtime > 0 else endtime/60.0 )
|
import os
import pandas as pd
import geopandas as gpd
FOLDER_RAW = os.getenv('DIR_DATA_RAW')
FOLDER_PROCESSED = os.getenv('DIR_DATA_PROCESSED')
DATA_LIGHT = 'geo_export_e87e8b43-cf73-48e4-ad5c-692f56b45394.shp'
shp_light = gpd.read_file(filename=FOLDER_RAW + "/camden_street_lighting/" + DATA_LIGHT)
# aggregate to get key info
df_lamp = shp_light.groupby(by=["ward_name", "lamp_type"]).agg(func={"street_nam": 'count',
"wattage": 'mean'})
df_lamp = df_lamp.reset_index()
df_lamp = df_lamp.rename(columns={"street_nam": "count_lamps",
"wattage": "mean_wattage"})
df_lamp['date'] = pd.to_datetime('2020-12-04')
# ensure uniqueness so can use Cypher CREATE for efficient import
df_lamp = df_lamp.drop_duplicates(subset=["ward_name", "lamp_type"])
# export to csv for neo4j
df_lamp.to_csv(path_or_buf=FOLDER_PROCESSED + "/" + "df_lamp.csv",
index=False)
|
#!/usr/bin/env python
#coding=utf-8
import urllib2
import urllib
import re
class Tool:
#删除img标签和7位空格
removeImg=re.compile('<img.*?>|\s{7}')
#删除超链接
removeAddr=re.compile('<a.*?>|</a>')
#替换换行标签为\n
replaceLine=re.compile('<tr>|<div>|</div>|</p>')
#将制表<td>替换为\t
replaceTD=re.compile('<td>')
#把段落开头替换为\n加两个空格
replacePara=re.compile('<p.*?>')
#将换行符替换为\n
replaceBR=re.compile('<br>|<br/>')
#将其余标签删除
removeOther=re.compile('<.*?>')
def replace(self,target):
target=re.sub(self.removeImg,"",target)
target=re.sub(self.removeAddr,"",target)
target=re.sub(self.replaceLine,"\n",target)
target=re.sub(self.replaceTD,"\t",target)
target=re.sub(self.replacePara,"\n\s[2]",target)
target=re.sub(self.replaceBR,"\n",target)
target=re.sub(self.removeOther,"",target)
return target.strip()
class BDTB:
#初始化,只传入基地址,和是否只看楼主的参数
def __init__(self,baseUrl,see_lz):
self.baseURL=baseUrl
self.seeLZ='?see_lz='+str(see_lz)
self.tool=Tool()
self.file=None
self.floor=1
self.default_fileName=u'百度贴吧'
def getPage(self,pageNum):
try:
url=self.baseURL+self.seeLZ+'&pn='+str(pageNum)
request=urllib2.Request(url)
response=urllib2.urlopen(request)
#print response.read()
return response.read()
except urllib2.URLError,e:
if hasattr(e,'reason'):
print u'连接百度贴吧失败,错误原因',e.reason
return None
def getTitle(self):
pageCode=self.getPage(1)
pattern=re.compile('<h3 class="core_title_txt.*?>(.*?)</h3>',re.S)
result=re.search(pattern,pageCode)
if result:
return result.group(1).strip()
else:
return None
def getPageNum(self):
pageCode=self.getPage(1)
pattern=re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result=re.search(pattern,pageCode)
if result:
return result.group(1).strip()
else:
return None
def setFileName(self,name):
if name is not None:
self.file=open(name+'.txt',"w+")
else:
self.file=open(self.default_fileName+'.txt','w+')
def write_data(self,data):
try:
self.file.write(data)
except Exception:
print 'write failed'
def getContent(self,pageCode):
pattern=re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
items=re.findall(pattern,pageCode)
#floor=1
for item in items:
self.write_data('\n\n------------------------第%s楼----------------------------\n' % self.floor)
self.write_data(self.tool.replace(item))
self.floor+=1
'''
class Tool:
#删除img标签和7位空格
removeImg=re.compile('<img.*?>|\s{7}')
#删除超链接
removeAddr=re.compile('<a.*?>|</a>')
#替换换行标签为\n
replaceLine=re.compile('<tr>|<div>|</div>|</p>')
#将制表<td>替换为\t
replaceTD=re.compile('<td>')
#把段落开头替换为\n加两个空格
replacePara=re.compile('<p.*?>')
#将换行符替换为\n
replaceBR=re.compile('<br>|<br/>')
#将其余标签删除
removeOther=re.compile('<.*?>')
def replace(self,target):
target=re.sub(removeImg,"",target)
target=re.sub(removeAddr,"",target)
target=re.sub(replaceLine,"\n",target)
target=re.sub(replaceTD,"\t",target)
target=re.sub(replacePara,"\n\s[2]",target)
target=re.sub(replaceBR,"\n",target)
target=re.sub(removeOther,"",target)
return target.strip()
'''
baseURL='http://tieba.baidu.com/p/3138733512'
bdtb=BDTB(baseURL,1)
bdtb.setFileName('百度贴吧-NBA')
bdtb.getContent(bdtb.getPage(1))
|
# -*- encoding: utf-8 -*-
from pygmatic_segmenter.types import Text
import re
class AbbreviationReplacer:
"""This class searches for periods within an abbreviation and replaces the periods."""
def __init__(self, text, language):
self.text = Text(text)
self.language = language
def replace(self):
self.text = self.text.apply(self.language.PossessiveAbbreviationRule,
self.language.KommanditgesellschaftRule,
self.language.SingleLetterAbbreviationRules.All)
self.text = self.search_for_abbreviations_in_string(self.text)
self.text = self.replace_multi_period_abbreviations(self.text)
self.text = self.text.apply(self.language.AmPmRules.All)
return self.replace_abbreviation_as_sentence_boundary(self.text)
def search_for_abbreviations_in_string(self, txt):
original = txt[:]
downcased = original.lower()
for abbreiv in self.language.Abbreviation.ABBREVIATIONS:
stripped = abbreiv.strip()
if stripped not in downcased:
continue
abbrev_match = re.findall(r"(?:^|\s|\r|\n){}(?i)".format(re.escape(stripped)), original)
if len(abbrev_match) == 0:
continue
next_word_start = r"(?<=" + re.escape(stripped) + " ).{1}"
character_array = re.findall(next_word_start, self.text)
for i, am in enumerate(abbrev_match):
txt = self.scan_for_replacements(txt, am, i, character_array)
return txt
def scan_for_replacements(self, txt, am, index, character_array):
character = character_array[index] if len(character_array) > index else None
prepositive = self.language.Abbreviation.PREPOSITIVE_ABBREVIATIONS
number_abbr = self.language.Abbreviation.NUMBER_ABBREVIATIONS
upper = re.search(r"[[:upper]]", str(character))
if (not upper) or am.strip().lower() in prepositive:
if am.strip().lower() in prepositive:
txt = self.replace_prepositive_abbr(txt, am)
elif am.strip().lower() in number_abbr:
txt = self.replace_pre_number_abbr(txt, am)
else:
txt = self.replace_period_of_abbr(txt, am)
return txt
def replace_abbreviation_as_sentence_boundary(self, txt):
# As we are being conservative and keeping ambiguous
# sentence boundaries as one sentence instead of
# splitting into two, we can split at words that
# we know for certain never follow these abbreviations.
# Some might say that the set of words that follow an
# abbreviation such as U.S. (i.e. U.S. Government) is smaller than
# the set of words that could start a sentence and
# never follow U.S. However, we are being conservative
# and not splitting by default, so we need to look for places
# where we definitely can split. Obviously SENTENCE_STARTERS
# will never cover all cases, but as the gem is named
# 'Pragmatic Segmenter' we need to be pragmatic
# and try to cover the words that most often start a
# sentence but could never follow one of the abbreviations below.
# Rubular: http://rubular.com/r/PkBQ3PVBS8
for word in self.language.AbbreviationReplacer.SENTENCE_STARTERS:
escaped = re.escape(word)
regex = r"(U∯S|U\.S|U∯K|E∯U|E\.U|U∯S∯A|U\.S\.A|I|i.v|I.V)∯(?=\s{}\s)".format(escaped)
txt = re.sub(regex, r'\1.', txt)
return Text(txt)
def replace_multi_period_abbreviations(self, txt):
mpa = re.findall(self.language.MULTI_PERIOD_ABBREVIATION_REGEX, txt)
if len(mpa) == 0: return Text(txt)
for match in mpa:
txt = re.sub(re.escape(match), match.replace('.', '∯'), txt)
return Text(txt)
def replace_pre_number_abbr(self, txt, abbr):
txt = re.sub(r"(?<=\s{ab})\.(?=\s\d)|(?<=^{ab})\.(?=\s\d)".format(ab = abbr.strip()), '∯', txt)
txt = re.sub(r"(?<=\s{ab})\.(?=\s+\()|(?<=^{ab})\.(?=\s+\()".format(ab = abbr.strip()), '∯', txt)
return txt
def replace_prepositive_abbr(self, txt, abbr):
txt = re.sub(r"(?<=\s{ab})\.(?=\s)|(?<=^{ab})\.(?=\s)".format(ab = abbr.strip()), '∯', txt)
txt = re.sub(r"(?<=\s{ab})\.(?=:\d+)|(?<=^{ab})\.(?=:\d+)".format(ab = abbr.strip()), '∯', txt)
return txt
def replace_period_of_abbr(self, txt, abbr):
txt = re.sub(r"(?<=\s{ab})\.(?=((\.|\:|-|\?)|(\s([a-z]|I\s|I'm|I'll|\d|\())))|(?<=^{ab})\.(?=((\.|\:|\?)|(\s([a-z]|I\s|I'm|I'll|\d))))".format(ab = abbr.strip()), '∯', txt)
txt = re.sub(r"(?<=\s{ab})\.(?=,)|(?<=^{ab})\.(?=,)".format(ab = abbr.strip()), '∯', txt)
return txt
def replace_possessive_abbreviations(self, txt):
return re.sub(self.language.POSSESSIVE_ABBREVIATION_REGEX, '∯', txt) |
# 线程池和进程池
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
def fn(name):
for i in range(1000):
print(name, i)
if __name__ == '__main__':
# 创建线程池 50 个
with ThreadPoolExecutor(50) as t:
for i in range(100):
t.submit(fn, name=f"线程{i}")
print("done")
|
-X FMLP -Q 0 -L 3 88 300
-X FMLP -Q 0 -L 3 86 250
-X FMLP -Q 0 -L 3 73 250
-X FMLP -Q 0 -L 3 58 200
-X FMLP -Q 1 -L 1 46 175
-X FMLP -Q 1 -L 1 45 200
-X FMLP -Q 1 -L 1 33 100
-X FMLP -Q 2 -L 1 27 150
-X FMLP -Q 2 -L 1 27 100
-X FMLP -Q 2 -L 1 23 100
-X FMLP -Q 3 -L 1 18 300
-X FMLP -Q 3 -L 1 16 175
-X FMLP -Q 3 -L 1 13 100
|
import urllib.request
import time
from bs4 import BeautifulSoup
from Task4.MatrixTools import MatrixTools
from Task4.MatrixToolsOld import MatrixToolsOld
if __name__ == '__main__':
start_time = time.time()
tools = MatrixTools("http://en.wikipedia.org/", 100)
# tools.makeMatrix()
tools.countPagerank()
print(time.time()-start_time, "sec")
|
from typing import *
class Solution:
def corpFlightBookings(self, bookings: List[List[int]], n: int) -> List[int]:
be = []
for b in bookings:
be.append((b[0],0,b[2]))
be.append((b[1],1,b[2]))
be.sort()
rc,cr = [0]*(n+1),0
#print(rc)
for i,(f,sRe,v) in enumerate(be):
#print((f,sRe,v)),
if i==0:
rc[1:f] = [cr]*(f-1)
else:
p = be[i-1][0]
if p != f:
rc[p+1:f] = [cr]*(f-p)
if sRe: cr -= v
else: cr += v
rc[f] = max(rc[f],cr)
#print(cr,rc)
return rc[1:n+1]
print(Solution().corpFlightBookings(bookings = [[1,2,10],[2,3,20],[2,5,25]], n = 5))
print(Solution().corpFlightBookings(bookings = [[2,4,10],[2,3,20]], n = 6))
print(Solution().corpFlightBookings(bookings = [[3,4,10],[3,4,20]], n = 7))
|
""" Import Python modules """
import os
""" Import Django modules """
from django.db import models
""" Import Django settings """
from emp.settings import MEDIA_ROOT
""" Import Models """
from django.contrib.auth.models import User
""" Import from 3rd party Django modules """
from taggit.managers import TaggableManager # django-taggit taggable manager
from djangoratings.fields import RatingField # django-ratings field type
class VideoCategory(models.Model):
title = models.CharField(max_length=255)
nsfw = models.BooleanField(blank=True)
thumbnail = models.ImageField(upload_to="videos/categories/thumbs/")
def __unicode__(self):
return self.title
class Video(models.Model):
title = models.CharField(max_length=255)
uploader = models.ForeignKey(User, editable=False)
upload_datetime = models.DateTimeField(auto_now_add=True)
modified_datetime = models.DateTimeField(auto_now=True)
length = models.CharField(max_length=255, blank=True, editable=False)
description = models.TextField()
converted = models.BooleanField(editable=False)
views = models.IntegerField(editable=True)
categories = models.ManyToManyField(VideoCategory)
tags = TaggableManager(blank=True) # django-taggit handles tagging
nsfw = models.BooleanField() # (Not Safe for Work)
codec = models.CharField(max_length=255, editable=False)
src_codec = models.CharField(max_length=255, editable=False)
file_size = models.IntegerField(blank=True, null=True, editable=False)
src_file = models.FileField(upload_to="videos/src/")
src_filename = models.CharField(max_length=255, editable=False)
converted_file = models.CharField(max_length=255, blank=True, null=True, editable=False)
def __unicode__(self):
return self.title
""" Get list of thumbnail filenames for video """
def _get_thumbs(self):
thumbs_path = MEDIA_ROOT + '/videos/thumbs/'+ str(self.id) +'/'
thumbs_list = os.listdir(thumbs_path)
if '.DS_Store' in thumbs_list:
thumbs_list.remove('.DS_Store')
return thumbs_list
thumbs_list = property(_get_thumbs)
""" Get number of favorites (via the # of user channels which have the video favorited (m2m relation)) """
def _get_num_favorites(self):
favoriters = self.userchannel_set.all()
return len(favoriters)
num_favorites = property(_get_num_favorites)
""" Generate a video title slug based on the video title for use in URLS """
def _get_video_title_slug(self):
title_slug = str(self.title).lower()
title_slug = title_slug.replace(' ','-')
title_slug = title_slug.replace('\'','')
title_slug = title_slug.replace(',','')
return title_slug
title_slug = property(_get_video_title_slug)
""" Get hours, minutes, seconds from length field, as a list of integers """
def _get_length_list(self):
init_length_list = self.length.split(':')
hours = int(init_length_list[0])
minutes = int(init_length_list[1])
seconds = int(init_length_list[2].split('.')[0])
length_list = [hours, minutes, seconds]
return length_list
length_list = property(_get_length_list)
""" Get total seconds (useful when need to order by length) """
def _get_total_seconds(self):
hours = self.length_list[0]
minutes = self.length_list[1]
seconds = self.length_list[2]
seconds_in_hours = hours * 60 * 60
seconds_in_minutes = minutes * 60
total_seconds = seconds_in_hours + seconds_in_minutes + seconds
return total_seconds
total_seconds = property(_get_total_seconds)
""" Each video playlist is owned by a single user who created it.
It may be added to a different user's lists of playlists, however:
Each playlist is immutable, and as such a user may only edit (delete/add videos) a playlist
by importing a copy of it, therefore becoming the new owner of that particular instance of the playlist """
class VideoPlaylist(models.Model):
title = models.CharField(max_length=255)
videos = models.ManyToManyField(Video, blank=True)
created_datetime = models.DateTimeField(auto_now_add=True)
modified_datetime = models.DateTimeField(auto_now=True)
owner = models.ForeignKey(User)
def __unicode__(self):
return self.title
""" Generate a playlist title slug based on the playlist title for use in URLS """
def _get_playlist_title_slug(self):
title_slug = str(self.title).lower()
title_slug = title_slug.replace(' ','-')
title_slug = title_slug.replace('\'','')
title_slug = title_slug.replace(',','')
return title_slug
title_slug = property(_get_playlist_title_slug)
"""
class HTML5Profiles(models.Model):
title = models.CharField(max_length=255)
vcodec = models.CharField(max_length=255)
"""
|
from django.test import TestCase
from django.contrib.auth import get_user_model
class UserTests(TestCase):
def test_create_interviewer(self):
"""Test creating a new user"""
email = "test@gmail.com"
username = "test"
password = "test1234"
user = get_user_model().objects.create_user(
email=email,
username=username,
password=password,
is_candidate=False,
is_interviewer=True
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_create_candidate(self):
"""Test creating a new user"""
email = "test@gmail.com"
username = "test"
password = "test1234"
user = get_user_model().objects.create_user(
email=email,
username=username,
password=password,
is_candidate=True,
is_interviewer=False
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_create_superuser(self):
"""Test creating a newm super user"""
username = "test"
password = "test1234"
user = get_user_model().objects.create_superuser(
username=username,
password=password,
)
self.assertEqual(user.username, username)
self.assertTrue(user.check_password(password))
|
"""
-- Theano implementation of a single de-noising autoencoder, trained
on reconstruction error
-- Pieces of this code borrow from code snippets on deeplearning.net
NOTES:
-- Imports tt_algebra module, which performs all symbolic algebra
required to generate gradient updates
-- Function compilation to execute the SGD alegbra and train the network
occurs in train_AE, which returns the learned parameters as well as the
mean loss for each epoch
-- Scoring function (score_AE) takes a layer parameter dictionary (as returned
by train_AE) as well as the layer options used and data to be scored
"""
import theano
from theano.tensor.shared_randomstreams import RandomStreams
import theano.tensor as T
import numpy as np
import sys
import weight_initializers as WI
import tt_algebra as ALG
from activation_ops import ActivationConstructors as AC
from data_utils import shared_dataset, dim_checker
################
### TRAINING ###
################
def train_AE(training_data, input_layer_ops):
"""
:param input_layer_ops:
:param training_data:
:return:
"""
try:
# parse out options
batch_size = input_layer_ops['batch_size']
n_epochs = input_layer_ops['n_epoch']
n_visible, n_hidden = input_layer_ops['layer_dim']
weight_op = input_layer_ops['weight_init']
except KeyError:
print "Something in the global options class was mis-specified."
print "Please check the AEOps class or your input dictionary."
else:
# set random number generators
numpy_rng = np.random.RandomState(2 ** 30)
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# compute number of minibatches for training, validation and testing
n_train_batches = dim_checker(training_data, 0) / batch_size
# set training data as a theano shared variable
training_shared = shared_dataset(training_data)
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.fmatrix('x')
# initialize encoding weight matrix, decoding weight matrix
initial_W = WI.WeightInits(n_visible, n_hidden, numpy_rng).weight_init(weight_op)
W = theano.shared(value=initial_W, name='W', borrow=True)
W_prime = W.T
# initialize bias vectors
b = theano.shared(value=np.zeros(n_hidden, dtype=theano.config.floatX), borrow=True)
b_prime = theano.shared(value=np.zeros(n_visible, dtype=theano.config.floatX), borrow=True)
# generate symbolic representations of SGD updates using mSGD
cost, updates = ALG.AE_mSGD(x, [W, W_prime, b, b_prime], input_layer_ops, theano_rng)
# compile a theano function that takes an index value as an argument
# and updates the parameter values using the algebraic expressions
# generated by mSGD
train_dae = theano.function(
[index],
cost,
updates=updates,
givens={x: training_shared[index * batch_size: (index + 1) * batch_size]}
)
mean_loss = []
# for each training epoch
for epoch in xrange(n_epochs):
l = []
# compute gradient updates for each mini-batch
for batch_index in xrange(n_train_batches):
l.append(train_dae(batch_index)) # record loss for each batch
# record the mean loss for the epoch
epoch_loss = np.mean(l)
mean_loss.append(epoch_loss)
print "Loss for epoch " + str(epoch + 1) + ": " + str(epoch_loss)
# once training procedure is complete, fetch weights
out_params = {'W': W.get_value(),
'b': b.get_value()}
out_values = {'parameters': out_params, 'loss': mean_loss}
return out_values
def score_AE(unscored_x, input_layer_ops, layer_params):
"""
:param unscored_x:
:param input_layer_ops:
:param layer_params:
:return:
"""
hidden_activation_op = input_layer_ops['activation']
W_raw = layer_params['W']
b_raw = layer_params['b']
W = T.fmatrix('W')
X = T.fmatrix('X')
b = T.fvector('b')
# compute symbolic hidden representation
linear_hidden_expr = T.dot(X, W) + b
nonlinear_hidden = AC(hidden_activation_op).activation(linear_hidden_expr)
# compile function and score
score_f = theano.function([X, W, b], nonlinear_hidden)
scored = score_f(unscored_x, W_raw, b_raw)
return scored
|
from QTA.Signals.ClassifyBar import ClassifyBar
from QTA.Execution.SetEntryExits import SetEntryExits
import QTA.DataConfiguration.RetrieveData as RD
import datetime as dt
import math as m
################################
# Unit Tests (SetEntryExits)
################################
def test_set_entry_price_from_buy_signal():
pass
def test_set_target_price_from_entry_price():
pass
def test_set_loss_price_from_entry_price():
pass
def test_trade_was_entered():
pass
|
#!/usr/bin/env python
# verify types of CRISPR-Cas systems / verify using predictions
import sys
import os
import operator
import subprocess
cmd = sys.argv[0]
resultdir, resultfile = "", ""
genome = True
for idx in range(len(sys.argv)):
if (sys.argv[idx] == "-d") and (len(sys.argv) > idx + 1):
resultdir = sys.argv[idx + 1]
elif (sys.argv[idx] == "-f") and (len(sys.argv) > idx + 1):
resultfile = sys.argv[idx + 1]
elif (sys.argv[idx] == "-g") and (len(sys.argv) > idx + 1):
genome = sys.argv[idx + 1]
cmddir = os.path.dirname(cmd)
if not cmddir:
modulefile = "../local/cas-db/interference-module.txt"
cdhitest = "../bin/cd-hit-v4.6.1-2012-08-27/cd-hit-est"
else:
modulefile = cmddir + "/../local/cas-db/interference-module.txt"
cdhitest = cmddir + "/../bin/cd-hit-v4.6.1-2012-08-27/cd-hit-est"
plotfiles = []
if resultfile:
plotfiles.append(resultfile)
if resultdir:
for afile in os.listdir(resultdir):
if afile.endswith("plot.txt"):
plotfiles.append(resultdir + "/" + afile)
if len(plotfiles) < 1:
print("Usage: " + sys.argv[0] + " <-d folder> <-f plot.txt> <-s False/True>")
print(" if -d folder is given, all .plot.txt files under that folder will be checked\n")
print(" use -f filename if only use a single .plot.txt file\n")
sys.exit("Error: no plot.txt files provided/found\n")
imptfam = []
impthmm = []
inf = open(modulefile, "r")
for aline in inf:
subs = aline.split()
if subs[0] not in impthmm:
impthmm.append(subs[0])
if subs[2] not in imptfam:
imptfam.append(subs[2][:-1])
inf.close()
imptcas = 0
crispr = 0
cas9 = 0
cpf1 = 0
repeat, typeI, typeII, typeIII, typeIV, typeV = 0, 0, 0, 0, 0, 0
typelist = ["Type-I", "Type-II", "Type-III", "Type-IV", "Type-V"]
typegene = [0] * 5
typegeneimp = {}
repeatseq = []
castot = 0
subtypespt, subtypesptimp = {}, {}
for afile in plotfiles:
#print("plotfile " + afile)
inf = open(afile, "r")
for aline in inf:
subs = aline.strip().split()
namedes = subs[4]
typedes = subs[5]
if "repeat" in namedes:
repeat += 1
repeatseq.append(namedes[7:])
elif (namedes != "unk") and (namedes != "antiRepeat"):
castot += 1
(famid, famname) = namedes.split(":")
if famid in impthmm:
imptcas += 1
if famname == "cas9":
cas9 += 1
elif famname == "cpf1":
cpf1 += 1
for idx in [4, 3, 2, 1, 0]:
if typelist[idx][1:] in typedes:
typegene[idx] += 1
if famid in impthmm:
if typelist[idx] in typegeneimp:
typegeneimp[typelist[idx]] = typegeneimp[typelist[idx]] + 1
else:
typegeneimp[typelist[idx]] = 1
break
if "Subtype" in typedes:
if typedes not in subtypespt:
subtypespt[typedes] = 1
subtypesptimp[typedes] = 0
else:
subtypespt[typedes] = subtypespt[typedes] + 1
if famid in impthmm:
subtypesptimp[typedes] = subtypesptimp[typedes] + 1
inf.close()
typedit = {typelist[0]:typegene[0], typelist[1]:typegene[1], typelist[2]:typegene[2], typelist[3]:typegene[3], typelist[4]:typegene[4]}
sorted_typedit = sorted(typedit.items(), key=operator.itemgetter(1), reverse=True)
sorted_subtypespt = sorted(subtypespt.items(), key=operator.itemgetter(1), reverse=True)
repeatnr = 0
if len(repeatseq) > 1:
out = open("repeat-temp-0000.fa", "w")
for idx in range(len(repeatseq)):
out.write(">repeat" + str(idx+1)+"\n")
out.write(repeatseq[idx]+"\n")
out.close()
cmd = cdhitest + " -c 0.9 -i repeat-temp-0000.fa -o repeat-temp-0000-nr0.9.fa >/dev/null 2>&1"
os.system(cmd)
inf = open("repeat-temp-0000-nr0.9.fa", "r")
for aline in inf:
if aline[0] == '>':
repeatnr += 1
inf.close()
os.remove("repeat-temp-0000.fa")
os.remove("repeat-temp-0000-nr0.9.fa")
os.remove("repeat-temp-0000-nr0.9.fa.clstr")
else:
repeatnr = repeat
typefound = []
typeunflt = []
if (not imptcas) and (not repeat) and (genome == True):
print("NO CRISPR-Cas found")
elif (imptcas or repeat):
print("CRISPR-Cas found")
print("#repeat/cas all nr/important")
print("Repeat " + str(repeat) + " " + str(repeatnr))
print("Cas " + str(castot) + " " + str(imptcas))
for idx in [0, 1, 2, 3, 4]:
#if sorted_typedit[idx][1] > 0:
if (sorted_typedit[idx][1] > 0) and (idx == 0):
if sorted_typedit[idx][0] in typegeneimp:
print(sorted_typedit[idx][0] + " " + str(sorted_typedit[idx][1]) + " " + str(typegeneimp[sorted_typedit[idx][0]]))
else:
print(sorted_typedit[idx][0] + " " + str(sorted_typedit[idx][1]) + " 0")
elif (sorted_typedit[idx][1] > 0) and (idx > 0):
if ((sorted_typedit[idx][0] in typegeneimp) and (typegeneimp[sorted_typedit[idx][0]] > 1)) or ((cas9>0) and (sorted_typedit[idx][0] == "Type-II")) or ((cpf1>0) and (sorted_typedit[idx][0] == "Type-V")):
if sorted_typedit[idx][0] in typegeneimp:
print(sorted_typedit[idx][0] + " " + str(sorted_typedit[idx][1]) + " " + str(typegeneimp[sorted_typedit[idx][0]]))
else:
print(sorted_typedit[idx][0] + " " + str(sorted_typedit[idx][1]) + " 0")
for idx in range(len(subtypespt)):
if sorted_subtypespt[idx][1] > 0:
if sorted_subtypespt[idx][0] in subtypesptimp:
print(sorted_subtypespt[idx][0] + " " + str(sorted_subtypespt[idx][1]) + " " + str(subtypesptimp[sorted_subtypespt[idx][0]]))
else:
print(sorted_subtypespt[idx][0] + " " + str(sorted_subtypespt[idx][1]) + " 0")
|
# Generated by Django 3.0.7 on 2020-09-02 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0005_migration_product'),
]
operations = [
migrations.AddField(
model_name='conditioning',
name='products',
field=models.ManyToManyField(related_name='conditionnings', through='product.ProductPackaging', to='product.Product'),
),
]
|
#!/usr/bin/env python
# coding: utf-8
# ## Soal 1
# In[14]:
nama = str(input("Masukan Nama : "))
Umur = int(input("Masukan umur : ")) # 17 tapi boong
Tinggi = float(input("Masukan tinggi badan : "))
print("Nama saya " + nama + " , umur saya " + f'{Umur}' + " tahun, dan tinggi saya " + f'{Tinggi}' + " cm")
# In[15]:
print(type(nama))
print(type(Umur))
print(type(Tinggi))
# ## Soal 2
# In[23]:
r = float(input("Masukan Jari-jari Lingkaran : "))
luas_lingkaran = round((22/7)*(r*r),2)
print("Luas lingkar dengan jari :" + f'{r}' + "cm, adalah luas lingkaran :", f'{luas_lingkaran}' + "cm\u00b2")
# In[24]:
r = float(input("Masukan Jari-jari Lingkaran : "))
luas_lingkaran = round((22/7)*(r*r),2)
print("Luas lingkar dengan jari :" + f'{r}' + "cm, adalah luas lingkaran :", f'{luas_lingkaran}' + "cm\u00b2")
# ## Soal 3
# In[26]:
ujian_teori = float(input("Nilai Ujian Teori : "))
ujian_praktek = float(input("Nilai Ujian Praktek : "))
if ujian_teori >= 70 and ujian_praktek >=70:
print("Selamat anda lulus")
elif ujian_teori >= 70 and ujian_praktek <=70:
print("Anda harus mengulangi ujian praktek")
elif ujian_teori <= 70 and ujian_praktek >=70:
print("Anda harus mengulangi ujian teori")
else:
print("Anda harus mengulangi ujian teori dan praktek")
# In[39]:
no = -1
deret = int(input("Masukkan Jumlah Piramida Segitiga :"))
for x in range(-1,deret):
no = no + 1
print(" " *(deret - x),"*" *no)
# In[ ]:
# In[ ]:
|
from os.path import isfile, dirname, join, realpath
from cached_property import cached_property
from isabl_cli import AbstractApplication
from isabl_cli import options
from myapps.utils import get_docker_command
from myapps.apps.merge.apps import Merge
class Annot(AbstractApplication):
NAME = "ANOT"
VERSION = "0.1"
ASSEMBLY = "hg19_2"
SPECIES = "HUMAN"
cli_help = "Annotation with OncoKB."
cli_options = [options.TARGETS]
application_description = cli_help
application_results = {
"svs": {
"frontend_type": "tsv-file",
"description": "BED file with annotations.",
"verbose_name": "Bed annotation",
"external_link": None,
}
}
dir_path = dirname(realpath(__file__))
application_settings = {
"cores": "1",
"docker_pysam": "docker run -it --entrypoint '' -v /mnt/efs/myisabl:/mnt/efs/myisabl danielrbroad/pysamdocker /bin/bash ",
}
@cached_property
def _apps(self):
return {
"merge": Merge(),
}
@cached_property
def analyses_dependencies(self):
return [
{"app": self._apps["merge"], "result": "svs", "name": "merge"},
]
def get_dependencies(self, targets, references, settings):
inputs = {}
analyses = []
for dependency in self.analyses_dependencies:
input_name = dependency["name"]
inputs[input_name], key = self.get_result(
targets[0],
application_key=dependency["app"].primary_key,
application_name=dependency["app"].NAME,
result_key=dependency["result"],
targets=targets,
references=references,
)
analyses.append(key)
return analyses, inputs
def get_experiments_from_cli_options(self, **cli_options):
return [([i], []) for i in cli_options["targets"]]
def validate_experiments(self, targets, references):
self.validate_one_target_no_references(targets, references)
def get_command(self, analysis, inputs, settings):
outdir = analysis.storage_url
outbed = join(outdir, "annotation.bed")
inp = inputs["merge"]
return " ".join(
map(
str,
[
settings.docker_pysam,
"pip install requests",
"python /mnt/efs/myisabl/svanno/svanno.py",
"-i",
inp,
"-o",
outbed,
"&& sudo chown -R ec2-user {}".format(outdir),
],
)
)
def get_analysis_results(self, analysis):
results = {
"svs": join(analysis.storage_url, "annotation.bed"),
}
for i in results.values():
assert isfile(i), f"Missing result file {i}"
return results
|
def Bs(arr,key):
low = 0
high = len(arr)-1
while low <= high:
mid = (low+high)//2
if arr[mid] == key:
return "Element found at position : " +str(mid+1)
elif arr[mid] < key:
low = mid+1
else:
high = mid-1
return "key not found"
b = Bs([1,2,3,4,6,8,9,11,12,34,55,66,77,88,99,111,112,113,115,118],1)
print(b)
|
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : CoT_Medicator.py
# @Time : Created at 2020/4/20
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
from models.generator import LSTMGenerator
class Cot_D(LSTMGenerator):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(Cot_D, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
def get_pred(self, input, target):
pred = self.forward(input, self.init_hidden(input.size(0)))
target_onehot = F.one_hot(target.view(-1), self.vocab_size).float()
pred = torch.sum(pred * target_onehot, dim=-1)
return pred
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.SearchTimetable.as_view(), name="searchTimetable")
]
|
import logging
import os
from paths import KEYWORDS_PATH, CACHE_DIR
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(threadName)s:(%(name)s) [%(funcName)s()] %(message)s')
logger = logging.getLogger(__name__)
from twi_spider import TwiSpider
if __name__ == '__main__':
logger.setLevel(logging.DEBUG)
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
with open(KEYWORDS_PATH) as file:
keywords = [line.rstrip() for line in file]
TwiSpider(keywords).run()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-24 17:12
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tracks', '0008_auto_20160824_1711'),
]
operations = [
migrations.AlterField(
model_name='event',
name='date',
field=models.DateField(default=datetime.datetime(2016, 8, 24, 17, 12, 7, 543418, tzinfo=utc)),
),
migrations.AlterField(
model_name='project',
name='date',
field=models.DateField(default=datetime.datetime(2016, 8, 24, 17, 12, 7, 563167, tzinfo=utc)),
),
migrations.AlterField(
model_name='track',
name='date',
field=models.DateField(default=datetime.datetime(2016, 8, 24, 17, 12, 7, 563728, tzinfo=utc)),
),
]
|
result = None
A = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
result = set()
temp = set()
for x in A:
temp.add(x)
result.add(frozenset(temp))
print(result)
assert frozenset((0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) in result |
# class with __init__
class C1:
def __init__(self):
self.x = 1
c1 = C1()
print(type(c1) == C1)
print(c1.x)
class C2:
def __init__(self, x):
self.x = x
c2 = C2(4)
print(type(c2) == C2)
print(c2.x)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for ged4py encodings handling."""
import io
import logging
import pytest
from ged4py.parser import GedcomReader, CodecError
def _check_log_rec(rec, level, msg, args):
assert rec.levelno == level
assert msg in rec.msg
assert rec.args == args
def test_001_standard():
"""Test standard encodings."""
file = io.BytesIO(b"0 HEAD\n1 CHAR ASCII\n0 TRLR")
reader = GedcomReader(file)
assert reader._encoding == "ascii"
file = io.BytesIO(b"0 HEAD\n1 CHAR ANSEL\n0 TRLR")
reader = GedcomReader(file)
assert reader._encoding == "gedcom"
file = io.BytesIO(b"0 HEAD\n1 CHAR UTF-8\n0 TRLR")
reader = GedcomReader(file)
assert reader._encoding == "utf-8"
file = io.BytesIO(b"\xef\xbb\xbf0 HEAD\n1 CHAR UTF-8\n0 TRLR")
reader = GedcomReader(file)
assert reader._encoding == "utf-8"
# UTF-16 is broken, do not use
@pytest.mark.parametrize('enc,pyenc,ambig',
[("IBMPC", "cp437", True),
("IBM", "cp437", True),
("IBM-PC", "cp437", True),
("OEM", "cp437", True),
("MSDOS", "cp850", True),
("IBM DOS", "cp850", True),
("MS-DOS", "cp850", True),
("ANSI", "cp1252", True),
("WINDOWS", "cp1252", True),
("IBM WINDOWS", "cp1252", True),
("IBM_WINDOWS", "cp1252", True),
("WINDOWS-1250", "cp1250", False),
("WINDOWS-1251", "cp1251", False),
("CP1252", "cp1252", False),
("ISO-8859-1", "iso8859-1", False),
("ISO8859-1", "iso8859-1", False),
("ISO8859", "iso8859-1", True),
("LATIN1", "iso8859-1", True),
("MACINTOSH", "mac-roman", True),
])
def test_002_illegal(enc, pyenc, ambig, caplog):
"""Test for illegal encodings.
"""
caplog.set_level(logging.WARNING)
# %s formatting works in py27 and py3
char = ("1 CHAR " + enc).encode()
file = io.BytesIO(b"0 HEAD\n" + char + b"\n0 TRLR")
reader = GedcomReader(file)
assert reader._encoding == pyenc
# check logging
assert len(caplog.records) == (2 if ambig else 1)
_check_log_rec(caplog.records[0], logging.ERROR,
"is not a legal character set or encoding",
(2, char, enc))
if ambig:
_check_log_rec(caplog.records[1], logging.WARNING,
"is ambiguous, it will be interpreted as",
(enc, pyenc))
def test_003_codec_exceptions():
"""Test codecs-related exceptions."""
# unknown codec name
file = io.BytesIO(b"0 HEAD\n1 CHAR NOTCODEC\n0 TRLR")
with pytest.raises(CodecError):
GedcomReader(file)
# BOM disagrees with CHAR
file = io.BytesIO(b"\xef\xbb\xbf0 HEAD\n1 CHAR ANSEL\n0 TRLR")
with pytest.raises(CodecError):
GedcomReader(file)
|
"""
줄을 바꿔 정수(integer) 2개를 입력받아 줄을 바꿔 출력해보자.
"""
num1 = input()
num2 = input()
print(num1)
print(num2) |
#! /usr/bin/python
import sys
import os
from numpy import *
#This script is to take the three matrices for the correlation calculation and convert them into 0/1 matrices.
f=open('dihVal18.bin','w')
d=open('dihVal18.new','r')
for line in d:
newline=[]
s=line.split()
for word in s:
word=float(word)
value=0
if word < 0.0:
word=word+360.0
#print word
if word >=140.0:
#print word
value=1
newline.append(value)
#print newline
#x=" ".join(map(str,newline))
#print x
f.write(" ".join(map(str,newline)))
f.write('\n')
f.close()
d.close()
f=open('waterdistribution_shift.bin','w')
d=open('waterdistribution_shift','r')
for line in d:
newline=[]
s=line.split()
for word in s:
word=float(word)
value=0
#~ if word < 0.0:
#~ word=word+360.0
if word <= 2.0:
#print word
value=1
newline.append(value)
#print newline
#x=" ".join(map(str,newline))
#print x
f.write(" ".join(map(str,newline)))
f.write('\n')
f.close()
d.close()
f=open('dssp4corr.bin','w')
d=open('dssp4corr','r')
for line in d:
newline=[]
s=line.split()
for word in s:
word=float(word)
value=0
if word >=20.0:
value=1
newline.append(value)
#print newline
#x=" ".join(map(str,newline))
#print x
f.write(" ".join(map(str,newline)))
f.write('\n')
f.close()
d.close()
|
# В списке все элементы попарно различны.
# Поменяйте местами минимальный и максимальный элемент этого списка
numList = list(map(int, input().split()))
max_i = numList.index(max(numList))
min_i = numList.index(min(numList))
numList[max_i], numList[min_i] = numList[min_i], numList[max_i]
print(*numList)
|
def reversDigits(num):
string = str(num)
string = list(string)
string.reverse()
string = ','.join(string)
return string
if __name__ == "__main__":
num = int(input("enter a number"))
print("Reverse of no. is ", reversDigits(num))
|
"""
Find all undefined reference targets and attempt to determine
if they are code by emulation behaviorial analysis.
(This module works best very late in the analysis passes)
"""
import envi
import vivisect
import vivisect.exc as v_exc
from envi.archs.i386.opconst import *
import vivisect.impemu.monitor as viv_imp_monitor
import logging
from vivisect.const import *
logger = logging.getLogger(__name__)
class watcher(viv_imp_monitor.EmulationMonitor):
def __init__(self, vw, tryva):
viv_imp_monitor.EmulationMonitor.__init__(self)
self.vw = vw
self.tryva = tryva
self.hasret = False
self.mndist = {}
self.insn_count = 0
self.lastop = None
self.badcode = False
self.arch = None
self.plat = vw.getMeta('Platform')
self.badops = vw.arch.archGetBadOps()
def logAnomaly(self, emu, eip, msg):
self.badcode = True
emu.stopEmu()
def looksgood(self):
if not self.hasret or self.badcode:
return False
# TODO: Rethink this logic. Otherwise I'll breakout movfuscator again.
# if there is 1 mnem that makes up over 50% of all instructions then flag it as invalid
for mnem, count in self.mndist.items():
if round(float( float(count) / float(self.insn_count)), 3) >= .67 and self.insn_count > 4:
return False
return True
def iscode(self):
op = self.lastop
if not self.lastop:
return False
if not (op.iflags & envi.IF_RET) and not (op.iflags & envi.IF_BRANCH) and not (op.iflags & envi.IF_CALL):
return False
for mnem, count in self.mndist.items():
# XXX - CONFIG OPTION
if round(float(float(count) / float(self.insn_count)), 3) >= .60:
return False
return True
def prehook(self, emu, op, eip):
if self.arch is None:
self.arch = op.iflags & envi.ARCH_MASK
if self.arch == envi.ARCH_I386:
if op.opcode == INS_OUT:
emu.stopEmu()
raise v_exc.BadOutInstruction(op.va)
if op.opcode == INS_TRAP:
reg = emu.getRegister(envi.archs.i386.REG_EAX)
if reg == 1:
emu.stopEmu()
self.vw.addNoReturnVa(eip)
if self.arch == envi.ARCH_AMD64:
if op.opcode == INS_OUT:
emu.stopEmu()
raise v_exc.BadOutInstruction(op.va)
if op in self.badops:
emu.stopEmu()
raise v_exc.BadOpBytes(op.va)
if op.iflags & envi.IF_RET:
self.hasret = True
emu.stopEmu()
self.lastop = op
loc = self.vw.getLocation(eip)
if loc is not None:
va, size, ltype, linfo = loc
if ltype != vivisect.LOC_OP:
emu.stopEmu()
raise Exception("HIT LOCTYPE %d AT 0x%.8x" % (ltype, va))
cnt = self.mndist.get(op.mnem, 0)
self.mndist[op.mnem] = cnt + 1
self.insn_count += 1
if self.vw.isNoReturnVa(eip):
self.hasret = True
emu.stopEmu()
# FIXME do we need a way to terminate emulation here?
def apicall(self, emu, op, pc, api, argv):
# if the call is to a noret API we are done
if self.vw.isNoReturnVa(pc):
self.hasret = True
emu.stopEmu()
def analyze(vw):
flist = vw.getFunctions()
tried = set()
while True:
docode = []
bcode = []
vatodo = set([va for va, name in vw.getNames() if vw.getLocation(va) is None and va not in tried])
vatodo = vatodo.union([tova for _, tova, _, _ in vw.getXrefs(rtype=REF_PTR) if vw.getLocation(tova) is None and tova not in tried])
for va in vatodo:
loc = vw.getLocation(va)
if loc is not None:
if loc[L_LTYPE] == LOC_STRING:
vw.makeString(va)
tried.add(va)
elif loc[L_LTYPE] == LOC_UNI:
vw.makeUnicode(va)
tried.add(va)
continue
if vw.isDeadData(va):
continue
# Skip it if we've tried it already.
if va in tried:
continue
tried.add(va)
# if it's not exectuable, check to see if it's at least readable, in which case
# we can check for other location types
# otherwise, try emulating it to see if it feels like code
if not vw.isExecutable(va):
if not vw.isReadable(va):
continue
if vw.isProbablyUnicode(va):
vw.makeUnicode(va)
elif vw.isProbablyString(va):
vw.makeString(va)
else:
emu = vw.getEmulator(va=va)
wat = watcher(vw, va)
emu.setEmulationMonitor(wat)
try:
emu.runFunction(va, maxhit=1)
except Exception:
continue
if wat.looksgood():
docode.append(va)
# flag to tell us to be greedy w/ finding code
# XXX - visi is going to hate this..
elif wat.iscode() and vw.greedycode:
bcode.append(va)
else:
if vw.isProbablyUnicode(va):
vw.makeUnicode(va)
elif vw.isProbablyString(va):
vw.makeString(va)
else:
# if we get all the way down here, and it has a name, it's gotta be *something*
if vw.getName(va):
try:
vw.makePointer(va)
except Exception as e:
logger.warning('Emucode failed to make 0x.8%x due to %s', va, str(e))
continue
if len(docode) == 0:
break
docode.sort()
for va in docode:
if vw.getLocation(va) is not None:
continue
try:
logger.debug('discovered new function: 0x%x', va)
vw.makeFunction(va)
except:
continue
bcode.sort()
for va in bcode:
if vw.getLocation(va) is not None:
continue
# TODO: consider elevating to functions?
vw.makeCode(va)
dlist = vw.getFunctions()
newfuncs = set(dlist) - set(flist)
for fva in newfuncs:
vw.setVaSetRow('EmucodeFunctions', (fva,))
vw.vprint("emucode: %d new functions defined (now total: %d)" % (len(dlist)-len(flist), len(dlist)))
|
#!/usr/bin/env python
"""
Unit tests for python Enum class
"""
__author__ = 'VMware, Inc.'
__copyright__ = 'Copyright 2016 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long
import logging
import unittest
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.type import EnumType
class PowerState(Enum):
"""
The ``PowerState`` class defines the valid power states for a virtual
machine.
"""
POWERED_OFF = None
"""
The virtual machine is powered off.
"""
POWERED_ON = None
"""
The virtual machine is powered on.
"""
SUSPENDED = None
"""
The virtual machine is suspended.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`State` instance.
"""
Enum.__init__(string)
PowerState._set_values([
PowerState('POWERED_OFF'),
PowerState('POWERED_ON'),
PowerState('SUSPENDED')
])
PowerState._set_binding_type(EnumType('vm.power.state', PowerState))
class TestVapiEnum(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.INFO)
def test_get_values(self):
actual_values = PowerState.get_values()
expected_values = [PowerState.POWERED_ON, PowerState.POWERED_OFF,
PowerState.SUSPENDED]
self.assertEqual(set(actual_values), set(expected_values))
expected_values2 = ['POWERED_ON', 'POWERED_OFF', 'SUSPENDED']
self.assertEqual(set(actual_values), set(expected_values2))
def test_is_unknown_with_known_val(self):
val = PowerState.POWERED_ON
self.assertFalse(val.is_unknown())
def test_is_unknown_with_unknown_val(self):
val = PowerState('RANDOM')
self.assertTrue(val.is_unknown())
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python
#coding=utf-8
import rospy
from rb_msgAndSrv.srv import rb_ArrayAndBool, rb_ArrayAndBoolRequest, rb_ArrayAndBoolResponse
rospy.init_node("cli_test")
client = rospy.ServiceProxy("Rb_grepSetCommand", rb_ArrayAndBool)
# a = rb_ArrayAndBool()
# a.data =
print(client.call([0, 2, 1, 0])) |
# Generated by Django 2.2.5 on 2019-11-06 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('private_profiles', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='bio',
),
migrations.AddField(
model_name='profile',
name='dislikes',
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name='profile',
name='github',
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name='profile',
name='languages',
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name='profile',
name='likes',
field=models.CharField(blank=True, max_length=250),
),
migrations.AddField(
model_name='profile',
name='skills',
field=models.CharField(blank=True, max_length=250),
),
]
|
class PlayerCaracter:
membership = True
def __init__(self, name, age):
self.name = name
self.age = age
if ( PlayerCaracter.membership ):
self.name = 'juan'
def run(self):
print( 'run' )
def shout(self):
print(f'my name is {self.name}')
player1 = PlayerCaracter( 'juan', 33 )
player2 = PlayerCaracter( 'Karen', 77 )
print( player1 )
print( player2 )
print( player1.membership )
print( player2.membership )
print( player1.shout() )
print( player2.shout() )
# help(player1) |
# import libraries
from bs4 import BeautifulSoup
#import urllib.request
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import csv
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.model_selection import train_test_split
#from sklearn.linear_model import LinearRegression
# specify the url
url = 'https://kawalpemilu.org/#pilpres:0'
# The path to where you have your chrome webdriver stored:
webdriver_path = 'C:\Users\haditiyawijaya\Downloads\chromedriver_win32\chromedriver.exe'
# Add arguments telling Selenium to not actually open a window
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--window-size=1920x1080')
# Fire up the headless browser
browser = webdriver.Chrome(executable_path=webdriver_path,
options=chrome_options)
# Load webpage
browser.get(url)
# It can be a good idea to wait for a few seconds before trying to parse the page
# to ensure that the page has loaded completely.
time.sleep(10)
# Parse HTML, close browser
soup = BeautifulSoup(browser.page_source, 'html.parser')
# print(soup)
pretty = soup.prettify()
browser.quit()
# find results within table
results = soup.find('table',{'class':'table'})
rows = results.find_all('tr',{'class':'row'})
#array = []
#jokowi = []
prabowo = []
sah = []
# print(rows)
for r in rows:
# find all columns per result
data = r.find_all('td')
# check that columns have data
if len(data) == 0:
continue
# write columns to variables
#wilayah = data[1].find('a').getText()
#satu = data[2].find('span', attrs={'class':'abs'}).getText()
dua = data[3].find('span', attrs={'class': 'abs'}).getText()
tiga = data[4].find('span', attrs={'class': 'sah'}).getText()
# Remove decimal point
#satu = satu.replace('.','')
dua = dua.replace('.','')
tiga = tiga.replace('.','')
# Cast Data Type Integer
#satu = int(satu )
dua = int(dua)
tiga = int(tiga)
#array.append(wilayah)
#jokowi.append(satu)
prabowo.append(dua)
sah.append(tiga)
# Create Dictionary
#my_dict = {'wilayah':array,'value1':jokowi,'value2':prabowo,'value3':sah}
my_dict = {'value1':prabowo,'value2':sah}
# Create Dataframe
df = pd.DataFrame(my_dict)
#print(df)
#slicing data
X = df.iloc[:, :-1].values
y = df.iloc[:, 1].values
# Membagi data menjadi Training Set dan Test Set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
#Feature Scaling
"""from sklearn.preprocessing import StandardScaler
scale_X = StandardScaler()
X_train = scale_X.fit_transform(X_train)
X_test = scale_X.transform(X_test)"""
# Fitting Simple Linear Regression terhadap Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Memprediksi hasil Test Set
#y_pred = regressor.predict(X_test)
# Visualisasi hasil Test Set
plt.scatter(X_test, y_test, color = 'red')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.title('Prabowo vs Suara Sah (Test set)')
plt.xlabel('Prabowo')
plt.ylabel('Suara Sah')
plt.show()
|
import os
import csv
import cv2
import imutils
import random
import numpy as np
from pprint import pprint
from collections import Counter
from PIL import Image as Img
from PIL import ImageTk
from random import randint
from Tkinter import *
import Tkinter,tkFileDialog, tkMessageBox
meta_path = "./Tournament_Logs/Meta_Information.csv"
def Card_Information_GUI(meta_path):
class Generator_Screen:
PLAYER = " "
TOURNAMENT = " "
bad_exit = True
def Exit_Program(self,event='<Button-1>'):
self.master.destroy()
def Start_Program(self,event='<Button-1>'):
Generator_Screen.PLAYER = self.player_var.get()
Generator_Screen.TOURNAMENT = self.tournament_var.get()
Generator_Screen.bad_exit = False
self.master.destroy()
def Retrieve_Information(self,meta_path):
player_array = ["None"]
tournament_array = ["All"]
with open(meta_path, 'rb') as csvfile:
reader = csv.reader(csvfile,delimiter=',',quotechar='|')
for row in reader:
if row[3] not in tournament_array:
tournament_array.append(row[3])
players = row[5:]
for groups in players:
for people in players:
if people.strip() not in player_array and len(people) > 0:
player_array.append(people.strip())
return player_array,tournament_array
def __init__(self,master):
# configure master window
self.master = master
self.master.resizable(0,0)
master.title('Pump It Up Card Generator')
# creates icon in top left corner
if os.name == 'nt':
self.master.iconbitmap("./Graphics/icon.ico")
p_arr,t_arr = self.Retrieve_Information(meta_path)
self.player_var = StringVar(self.master)
self.tournament_var = StringVar(self.master)
self.player_var.set(p_arr[0])
self.tournament_var.set(t_arr[0])
# TODO: Create file options at top for full gui support
# blank bar at top for "file, edit, view, help" setings
self.File_Options = Tkinter.Frame(self.master, height=25)
self.File_Options.grid(row=0,column=0)
# Images for buttons and splash screen
self.Main_Menu = Tkinter.PhotoImage(file="./Graphics/Generator_Logo.gif")
self.Start = Tkinter.PhotoImage(file="./Graphics/Generate_Card.gif")
self.Exit = Tkinter.PhotoImage(file="./Graphics/Exit_Program.gif")
# splash screen image
self.Selected_Song = Tkinter.Label(self.master, image=self.Main_Menu)
self.Selected_Song.grid(row=1,column=0)
# Contains all buttons and widgets
self.Button_Frame = Tkinter.Frame(self.master, height=90)
self.Button_Frame.grid(row=2,column=0)
# important buttons
self.Command_Options = Tkinter.Frame(self.Button_Frame, height=90)
self.Command_Options.config(bg="WHITE")
self.Command_Options.grid(row=0,column=2,pady=(25,25))
player_text = Tkinter.Label(self.Command_Options, text="Player")
tournament_text = Tkinter.Label(self.Command_Options, text="Tournament")
player_text.configure(font=("TkDefaultFont",20))
tournament_text.configure(font=("TkDefaultFont",20))
player_text.grid(row=0,column=0,sticky=W+E+N+S)
tournament_text.grid(row=0,column=1,sticky=W+E+N+S)
player_menu = Tkinter.OptionMenu(self.Command_Options, self.player_var, *p_arr)
player_menu.config(bg='WHITE')
tournament_menu = Tkinter.OptionMenu(self.Command_Options, self.tournament_var, *t_arr)
tournament_menu.config(bg='WHITE')
player_menu.configure(font=("TkDefaultFont",20))
tournament_menu.configure(font=("TkDefaultFont",20))
player_menu.grid(row=1,column=0,sticky=W+E+N+S)
tournament_menu.grid(row=1,column=1,sticky=W+E+N+S)
# exits program
self.Start_Button = Tkinter.Button(self.Command_Options, image=self.Start, command=self.Start_Program)
self.Start_Button.grid(row=2,column=0,sticky=W+E+N+S)
# exits program
self.Exit_Button = Tkinter.Button(self.Command_Options, image=self.Exit, command=self.Exit_Program)
self.Exit_Button.grid(row=2,column=1,sticky=W+E+N+S)
# hotkeys
self.master.bind("<Return>", self.Start_Program)
self.master.bind("<Escape>", self.Exit_Program)
# starts GUI
Generator_Root = Tkinter.Tk()
Generator_Window = Generator_Screen(Generator_Root)
Generator_Root.mainloop()
return Generator_Screen.PLAYER,Generator_Screen.TOURNAMENT,Generator_Screen.bad_exit
def Generate_Player_Card(PLAYER,TOURNAMENT,meta_path):
def return_song_information(song_array,diff_array,mode_array,player_array,tournament_array,player="All",Tournament="All"):
try:
song_counts = Counter()
level_counts = Counter()
songs = []
levels = []
for i in range(len(song_array)):
if (player in [player_array[i][0].strip(),player_array[i][1].strip()] or player == "All") and Tournament in [tournament_array[i],"All"]:
if mode_array[i] == "Singles":
mode = 'S'
elif mode_array[i] == "Doubles":
mode = 'D'
songs.append(song_array[i])
song_counts[song_array[i]] += 1
level = "%s %s%s" % (song_array[i],mode,diff_array[i])
levels.append(level)
level_counts[level] += 1
sorted_song = sorted(songs, key=lambda x: -song_counts[x])
sorted_level = sorted(levels, key=lambda x: -level_counts[x])
printstring = ["Most Played Song:", "%s. (%d plays)" % (sorted_song[0],song_counts[sorted_song[0]]) ,"Most Played Level:", "%s. (%d plays)" % (sorted_level[0],level_counts[sorted_level[0]])]
return printstring
except:
return []
def return_difficulty_information(diff_array,mode_array,player_array,tournament_array,player="All",Tournament="All"):
try:
player_diff_array = []
player_mode_array = []
for i in range(len(diff_array)):
if (player in [player_array[i][0].strip(),player_array[i][1].strip()] or player == "All") and Tournament in [tournament_array[i],"All"]:
if mode_array[i] == "Singles":
mode = 'S'
elif mode_array[i] == "Doubles":
mode = 'D'
player_diff_array.append(diff_array[i])
player_mode_array.append("%s%d" %(mode,diff_array[i]))
avg_value = sum(player_diff_array)/len(player_diff_array)
highest_difficulty = max(player_diff_array)
diff_counts = Counter()
for elements in player_mode_array:
diff_counts[elements] += 1
sorted_list = sorted(player_mode_array, key=lambda x: -diff_counts[x])
most_played = sorted_list[0]
printstring = ["Average difficulty level: %d." % (avg_value), "Highest difficulty level: %d." % (highest_difficulty), "Most played difficulty: %s. (%d plays)" % (most_played,diff_counts[most_played])]
return printstring
except:
return []
def return_win_rate(winner_array,player_array,tournament_array,player="All",Tournament="All"):
try:
win_counts = Counter()
for i in range(len(winner_array)):
if Tournament in [tournament_array[i],"All"] and player in [player_array[i][0].strip(),player_array[i][1].strip()]:
win_counts[winner_array[i].strip()] += 1
player_counts = Counter()
for i in range(len(player_array)):
if Tournament in [tournament_array[i],"All"]:
for players in player_array[i]:
#print players
player_counts[players.strip()] += 1
#print player_counts
games_won = games_played = 0
for indexes in win_counts:
if indexes == player:
games_won = win_counts[indexes]
break
for indexes in player_counts:
if indexes == player:
games_played = player_counts[indexes]
break
win_percent = (float(games_won)/float(games_played))*100.00
printstring = [ "Number of games played: %d." % (games_played),"Number of games won: %d." % (games_won),
"Average win rate: %.2f%%. (%d wins %d losses)" % (win_percent,games_won,games_played-games_won)]
return printstring
except:
return []
song_array = []
mode_array = []
diff_array = []
tournament_array = []
winner_array = []
player_array = []
with open(meta_path, 'rb') as csvfile:
reader = csv.reader(csvfile,delimiter=',',quotechar='|')
for row in reader:
song_array.append(row[0])
mode_array.append(row[1])
diff_array.append(int(row[2]))
tournament_array.append(row[3])
winner_array.append(row[4])
player_array.append([players for players in row[5:] if len(players) > 0])
height = 780
width = 640
filler_string = ''
for characters in PLAYER:
filler_string += "="
filler_string = filler_string [:-2]
song_info = return_song_information(song_array,diff_array,mode_array,player_array,tournament_array,PLAYER,TOURNAMENT)
diff_info = return_difficulty_information(diff_array,mode_array,player_array,tournament_array,PLAYER,TOURNAMENT)
win_info = return_win_rate(winner_array,player_array,tournament_array,PLAYER,TOURNAMENT)
splash_array = ["./Graphics/Top_Generator_1.jpg","./Graphics/Top_Generator_2.jpg"]
splash_path = random.choice(splash_array)
accent_array = [(78,116,16),(32,6,96)]
accent_color = accent_array[splash_array.index(splash_path)]
logo_path = "./Graphics/Prime2_Logo.png"
splash_image = cv2.imread(splash_path)
logo_image = cv2.imread(logo_path)
splash_image = imutils.resize(splash_image,width=width)
logo_image = imutils.resize(logo_image,width=100)
blank_image = np.zeros((height,width,3), np.uint8)
blank_image[0:splash_image.shape[0], 0:splash_image.shape[1]] = splash_image
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.75
fontColor = (255,255,255)
lineType = 2
cv2.rectangle(blank_image,(25,25),(25+275,splash_image.shape[0]-25),(0,0,0),-1)
cv2.rectangle(blank_image,(25,25),(25+275,splash_image.shape[0]-25),(255,255,255),3)
for j in range(logo_image.shape[0]):
for i in range(logo_image.shape[1]):
if logo_image[j][i][0] != 0 or logo_image[j][i][1] != 0 or logo_image[j][i][2] != 0:
blank_image[j][i+25][0] = logo_image[j][i][0]
blank_image[j][i+25][1] = logo_image[j][i][1]
blank_image[j][i+25][2] = logo_image[j][i][2]
cv2.putText(blank_image,PLAYER, (50,100),font,2*fontScale,fontColor,lineType*2)
y_offset = 25+splash_image.shape[0]
#cv2.rectangle(blank_image,(0,25+y_offset),(width,65+y_offset),accent_color,-1)
if TOURNAMENT == "All":
tournament_info = "Lifetime Record"
TOURNAMENT = "Lifetime_Record"
else:
tournament_info = "Tournament: %s" % TOURNAMENT
sublist = ''
for elements in TOURNAMENT:
if elements != ' ':
sublist += elements
else:
sublist += '-'
TOURNAMENT = sublist
cv2.rectangle(blank_image,(0,y_offset-25),(width,40+y_offset-25),accent_color,-1)
cv2.putText(blank_image,tournament_info, (10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 40
for elements in win_info:
cv2.putText(blank_image,elements,(10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 40
y_offset += 40
for elements in song_info:
if "Most" in elements:
cv2.rectangle(blank_image,(0,y_offset-25),(width,40+y_offset-25),accent_color,-1)
cv2.putText(blank_image,elements, (10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 40
else:
cv2.putText(blank_image,elements, (10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 80
#y_offset += 40
cv2.rectangle(blank_image,(0,y_offset-25),(width,40+y_offset-25),accent_color,-1)
cv2.putText(blank_image,"Song Difficulties:",(10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 40
for elements in diff_info:
cv2.putText(blank_image,elements,(10,y_offset),font,fontScale,fontColor,lineType)
y_offset += 40
y_offset += 40
cv2.imwrite("./Player_Cards/%s_%s.jpg" % (PLAYER,TOURNAMENT), blank_image)
cv2.imshow("Frame",blank_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
PLAYER, TOURNAMENT,CONTINUE = Card_Information_GUI(meta_path)
if not CONTINUE:
Generate_Player_Card(PLAYER,TOURNAMENT,meta_path) |
"""
Handles the storage of Github information on a database
"""
import dbutils
DATABASE_FILE = "github.sqlite"
TAG_DDL = "CREATE TABLE release_tag " \
"(repository TEXT, name TEXT, zipball_url TEXT, tarball_url TEXT, commit_sha TEXT ," \
" commit_url TEXT, PRIMARY KEY (repository, name))"
COMMIT_DLL = "CREATE TABLE github_commit " \
"(repository TEXT, sha TEXT, commit_author_name TEXT, commit_author_mail TEXT, commit_author_date TEXT, " \
"commit_committer_name TEXT, commit_committer_mail TEXT, commit_committer_date TEXT, " \
"commit_message TEXT, commit_tree_sha TEXT, commit_tree_url TEXT, commit_comment_count INTEGER," \
"url TEXT PRIMARY KEY, html_url TEXT, comments_url TEXT, stats_total INTEGER, stats_additions INTEGER, " \
"stats_deletions INTEGER)"
COMPARE_DDL = "CREATE TABLE git_compare " \
"(repository TEXT, first_object TEXT, second_object TEXT, commit_url TEXT)"
TABLE_LIST = [TAG_DDL,
COMMIT_DLL,
COMPARE_DDL]
def load_compares(compare_list):
"""
Inserts a list of comparisons into the database
:param compare_list: List of comparisons, as tuples.
:return: None
"""
compare_insert = "INSERT INTO git_compare VALUES" \
" (?, ?, ?, ?)"
dbutils.load_list(compare_insert, compare_list, DATABASE_FILE)
def load_tags(tag_list):
"""
Inserts a list of tags into the database.
:param tag_list: List containing tuples with tag information.
:return: None.
"""
tag_insert = "INSERT INTO release_tag VALUES" \
" (?, ?, ?, ?, ?, ?)"
dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)
def load_commits(commit_list):
"""
Inserts a list of commits into the database
:param commit_list: List containing tuples with commit information.
:return: None
"""
commit_insert = "INSERT OR REPLACE INTO github_commit VALUES " \
"(?, ? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,? ,?)"
dbutils.load_list(commit_insert, commit_list, DATABASE_FILE)
def get_tags_and_dates(repository_name):
"""
Returns a list of tag with the corresponding date.
:param repository_name: Name of the repository
:return: Tag names with dates.
"""
tags_query = "SELECT t.name, c.commit_author_date " \
"FROM github_commit c, release_tag t " \
"where t.commit_url = c.url and t.repository=?"
return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)
def get_compares_by_commit(commit_url):
"""
Returns all the compares involved on a commit
:param commit_url: Commit URL
:return: List of compare information.
"""
compare_sql = "SELECT * from git_compare where commit_url=?"
return dbutils.execute_query(compare_sql, (commit_url,), DATABASE_FILE)
def get_commits_by_repository(repository_name):
"""
Returns commit information for a specific repository.
:param repository_name: Repository name.
:return: List of commits.
"""
commit_sql = "SELECT * FROM github_commit WHERE repository=?"
return dbutils.execute_query(commit_sql, (repository_name,), DATABASE_FILE)
def get_commit_by_url(commit_url):
"""
Returns a commit related to its URL
:param commit_url: Commit URL
:return: List of commits
"""
commit_sql = "SELECT * FROM github_commit WHERE url=?"
return dbutils.execute_query(commit_sql, (commit_url,), DATABASE_FILE)
def get_commit_by_timerange(start, end):
"""
Returns the list of commits in a time range
:param start: Initial date as a string
:param end: Final date as a string
:return: List of commits.
"""
commit_sql = "SELECT c.* FROM github_commit c WHERE " \
"substr(c.commit_committer_date, 0, 5) || substr(c.commit_committer_date, 6, 2) || substr(c.commit_committer_date, 9, 2) || " \
"substr(c.commit_committer_date, 12, 2) || substr(c.commit_committer_date, 15, 2) || substr(c.commit_committer_date, 18, 2) " \
"between ? and ?"
print "commit_sql ", commit_sql
return dbutils.execute_query(commit_sql, (start, end), DATABASE_FILE)
def get_repository_tags(repository_name):
"""
Returns all the tags stored in the database for a repository
:param repository_name: Repository name
:return: List of tags
"""
tags_query = "SELECT * FROM release_tag where repository=?"
return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)
if __name__ == "__main__":
dbutils.create_schema(TABLE_LIST, DATABASE_FILE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.