text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
__author__ = "Sreenivas Bhattiprolu"
__license__ = "Feel free to copy, I appreciate if you acknowledge Python for Microscopists"
# https://youtu.be/VHIM2FKGLzc
"""
@author: Sreenivas Bhattiprolu
Sharpness Estimation for Document and Scene Images
by Jayant Kumar , Francine Chen , David Doermann
http://citeseerx.ist.psu.edu/viewdoc/download;jsessionid=33CD0038A0D2D24AE2C4F1A30B6EF1A4?doi=10.1.1.359.7002&rep=rep1&type=pdf
https://github.com/umang-singhal/pydom
pip install git+https://github.com/umang-singhal/pydom.git
#Use difference of differences in grayscale values
of a median-filtered image as an indicator of edge sharpness
"""
from dom import DOM
import cv2
#img = cv2.imread("images/image_quality_estimation/02_2sigma_blurred.tif", 1)
img1 = cv2.imread("images/image_quality_estimation/02.tif", 1)
img2 = cv2.imread("images/image_quality_estimation/02_2sigma_blurred.tif", 1)
img3 = cv2.imread("images/image_quality_estimation/02_3sigma_blurred.tif", 1)
img4 = cv2.imread("images/image_quality_estimation/02_5sigma_blurred.tif", 1)
# initialize DOM
iqa = DOM()
#Calculate scores
score1 = iqa.get_sharpness(img1)
score2 = iqa.get_sharpness(img2)
score3 = iqa.get_sharpness(img3)
score4 = iqa.get_sharpness(img4)
print("Sharpness for reference image:", score1)
print("Sharpness for 2 sigma blurred image:", score2)
print("Sharpness for 3 sigma blurred image:", score3)
print("Sharpness for 5 sigma blurred image:", score4) |
# from pyspark import SparkContext
# myrange = SparkContext.range(0,1000).toDF("number")
# print (myrange)
from pyspark import SQLContext
from pyspark import SparkContext
#from pyspark import SparkSession
# sc = SparkContext()
# myrange = sc.range(1000).toDF("number")
# print (myrange)
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
# Create a local StreamingContext with two working thread and batch interval of 1 second
sc = SparkContext("local[2]","NetworkCount")
ssc = StreamingContext(sc,1)
# Create a DStream that will connect to hostname:port, like localhost:9999
lines = ssc.socketTextStream("localhost",9999)
# Split each line into words
words = lines.flatMap(lambda line: line.split(" "))
# Count each word in each batch
pairs = words.map(lambda word: (word,1))
wordCounts = pairs.reduceByKey(lambda x,y : x+ y)
# Print the first ten elements of each RDD generated in this DStream to the console
wordCounts.pprint()
ssc.start()
ssc.awaitTermination() |
import pytest
from day_15.part_one import speak_n_say
def test_speak_n_say_one():
nums = [1, 3, 2]
assert speak_n_say(nums, 30000000) == 2578
def test_speak_n_say_two():
nums = [2, 1, 3]
assert speak_n_say(nums, 30000000) == 3544142
def test_speak_n_say_three():
nums = [1, 2, 3]
assert speak_n_say(nums, 30000000) == 261214
def test_speak_n_say_four():
nums = [2, 3, 1]
assert speak_n_say(nums, 30000000) == 6895259
def test_speak_n_say_five():
nums = [3, 2, 1]
assert speak_n_say(nums, 30000000) == 18
def test_speak_n_say_six():
nums = [3, 1, 2]
assert speak_n_say(nums, 30000000) == 362
if __name__ == '__main__':
pytest.main() |
"""
1. Создать класс TrafficLight (светофор) и определить у него один атрибут color (цвет) и метод running (запуск).
Атрибут реализовать как приватный. В рамках метода реализовать переключение светофора в режимы: красный, желтый,
зеленый. Продолжительность первого состояния (красный) составляет 7 секунд, второго (желтый) — 2 секунды, третьего
(зеленый) — на ваше усмотрение. Переключение между режимами должно осуществляться только в указанном порядке
(красный, желтый, зеленый). Проверить работу примера, создав экземпляр и вызвав описанный метод.
"""
import time
class TrafficLight:
_color = 0
def running(on_off = False, count = 10):
print (f'on_off: {on_off}, count= {count}')
count_int = 0
TrafficLight._color = 0
if on_off:
while count_int < count:
if TrafficLight._color == 0: # red
print ("RED")
time.sleep(7)
if TrafficLight._color == 1: # yellow
print ("YELLOW")
time.sleep(2)
if TrafficLight._color == 2: # green
print ("GREEN")
time.sleep(10)
count_int += 1
TrafficLight._color += 1
if TrafficLight._color % 3 == 0:
TrafficLight._color = 0
svetofor1 = TrafficLight
svetofor1.running(True, 5)
print ("*"*15)
svetofor1.running(True)
|
import pygame
from pygame.locals import *
pygame.init()
color = (15, 200, 100)
color_2 = (155, 200, 99)
colorCirculo = (150, 56, 78)
ventana = pygame.display.set_mode((400, 600))
pygame.display.set_caption('pygame en py ')
pygame.draw.circle(ventana, colorCirculo,(250, 100), 89 )
corriendo = True
#fuentes
fuente = pygame.font.SysFont('Arial', 50)
texto = fuente.render('Hola fuente', 0, (155, 255, 155))
#objetos
rectangulo = pygame.Rect(0, 0, 100, 130)
rectangulo_2 = pygame.Rect(100, 100, 50, 70)
#variable auxiliar
aux = 1
while corriendo:
ventana.fill(color)
pygame.draw.rect(ventana, colorCirculo, rectangulo_2)
pygame.draw.rect(ventana, colorCirculo,rectangulo)
rectangulo.left, rectangulo.top = pygame.mouse.get_pos()
ventana.blit(texto, (101, 100))
tiempo_aux = pygame.time.get_ticks()/1000
tiempo_posta = (round(tiempo_aux))
if aux == tiempo_posta:
aux += 1
print (tiempo_posta)
contador = fuente.render('pasaron: ' + str(tiempo_posta), 0, (155, 255, 155))
ventana.blit(contador, (90, 259))
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
print('sesión finalizada')
corriendo = False
if rectangulo.colliderect(rectangulo_2):
ventana.fill(color_2)
pygame.display.update()
|
# coding: utf-8
"""
Fecha de creacion 3/18/20
@autor: mjapon
"""
import datetime
import logging
from sqlalchemy import Column, Integer, String, TIMESTAMP, Text
from fusayrepo.models.conf import Declarative
from fusayrepo.utils.jsonutil import JsonAlchemy
log = logging.getLogger(__name__)
class TParams(Declarative, JsonAlchemy):
__tablename__ = 'tparams'
tprm_id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)
tprm_abrev=Column(String(20), nullable=False)
tprm_nombre=Column(String(80), nullable=False)
tprm_val=Column(Text, nullable=False)
tprm_fechacrea=Column(TIMESTAMP, default=datetime.datetime.now())
|
#encoding=utf-8
from chapt_15.DataDrivenFrameWork.testScripts.TestMail126AddContacts import *
if __name__ == '__main__':
test126MailAddContacts() #测试执行
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 04:10:06 2019
@author: Stacy
controller program for testing spacy nlp features
rem: use case: convert us date to uk date
"""
# IMPORTS ------------------------------
# py files
import sys
sys.path.append('jellyfish-mods/')
sys.path.append('spacy-mods/')
sys.path.append('dblMetaphone-mods/')
import jf_distance
import jf_phoneme
import jf_match
import spacy_modules
import dm_doubleMetaphone
# py libs
import spacy
# GLOBALS -------------------------------
nlp = spacy.load('en_core_web_sm')
# LOCAL FUNCTIONS -----------------------------
def menu():
print('\n')
print('---------------------- MENU ----------------------')
print('m - menu')
print('e - exit')
print('\nWX MATCHING ENGINE:')
print('1 - Tokenizer')
print('2 - Tagger')
print('3 - Entity Parser')
print('4 - NER')
print('5 - Matcher')
print('\nPHONETIC ENCODING:\t STRING COMPARISON:')
print('6 - Soundex\t\t 8 - Levenshtein Distance')
print('7 - NYSIIS\t\t 9 - Jaro-Winkler Distance')
print('7b - Double Metaphone\t 0 - Match Rating Codex')
# print('\n')
print('----------------------------------------------------')
# ---- end function ----
# MAIN ---------------------------------
# controller program for nlp functions
def main():
# declare menu items
menuItems = ['m','1','2','3','4','5','6','7', '7b', '8','9', '0']
# print menu to console
menu()
# get menu selection from user
choice = input('Select a menu item: ')
# process the user's menu selection
while choice != 'e':
# catch invalid user selection
match = False
for i in menuItems:
if i == choice: match = True
if match == False: print('Invalid selection.')
# process valid user selection
else:
if choice == 'e': break # end program
if choice == 'm': menu()
if choice == '1': spacy_modules.tokenizer()
if choice == '2': spacy_modules.tagger()
if choice == '3': spacy_modules.parser()
if choice == '4': spacy_modules.ner()
if choice == '5': spacy_modules.matcher()
if choice == '6': jf_phoneme.soundex()
if choice == '7': jf_phoneme.nysiis()
if choice == '7b': dm_doubleMetaphone.dlbMetaphone()
if choice == '8': jf_distance.levenshtein()
if choice == '9': jf_distance.jaroWinkler()
if choice == '0': jf_match.mrc()
# get new user selection
choice = input('\nSelect a menu item: ')
# end program
print('\nExit program. Done.')
if __name__ == '__main__' : main() |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
LandsatQATools
A QGIS plugin
Decode Landsat QA bands.
-------------------
begin : 2017-05-17
git sha : $Format:%H$
author : Steve Foga, SGT Inc., Contractor to USGS
EROS Center
email : steven.foga.ctr@usgs.gov
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon, QColor, QFileDialog
# Initialize Qt resources from file resources.py
# import resources
# Import the code for the dialog
from decode_qa_dialog import LandsatQAToolsDialog
import lookup_dict
import os
import sys
from random import randint
import numpy as np
from osgeo import gdal, gdalconst
from qgis.core import *
class LandsatQATools:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'LandsatQATools_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Landsat QA QGIS Tools')
self.toolbar = self.iface.addToolBar(u'LandsatQATools')
self.toolbar.setObjectName(u'LandsatQATools')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('LandsatQATools', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = LandsatQAToolsDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToRasterMenu(
self.menu,
action)
self.actions.append(action)
# Configure "Browse" button
self.dlg.rasterBox.clear()
self.dlg.browseButton.clicked.connect(self.select_output_file)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/LandsatQATools/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Decode QA'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginRasterMenu(
self.tr(u'&Landsat QA QGIS Tools'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def select_output_file(self):
"""
Enables ability to browse file system for input file.
:return:
"""
filename = QFileDialog.getOpenFileName(self.dlg, "Select input file ",
"", '*')
self.dlg.rasterBox.addItem(filename)
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
# add all raster layers in current session to UI as potential inputs
layers = QgsMapLayerRegistry.instance().mapLayers().values()
for layer in layers:
if layer.type() == QgsMapLayer.RasterLayer:
self.dlg.rasterBox.addItem(layer.name(), layer)
# Run the dialog event loop
result = self.dlg.exec_()
# TODO: add logic to auto-detect band and sensor using input_raster
# See if OK was pressed
if result:
# get variable names from input
input_raster = str(self.dlg.rasterBox.currentText())
band = str(self.dlg.bandBox.currentText())
sensor = str(self.dlg.sensorBox.currentText())
rm_low = self.dlg.rmLowBox.isChecked()
# use gdal to get unique values
ds = gdal.Open(input_raster)
rb = ds.GetRasterBand(1)
values = sorted(list(np.unique(np.array(rb.ReadAsArray()))))
#ds = None
# define lookup table
bit_flags = lookup_dict.bit_flags
#qa_values = lookup_dict.qa_values
# convert input_sensor to sensor values used in qa_values
if sensor == "Landsat 4-5, 7":
sens = "L47"
elif sensor == "Landsat 8":
sens = "L8"
else:
sys.exit("Incorrect sensor provided. Input: {0}; Potential "
"options: Landsat 4-5, 7; Landsat 8"
.format(sensor))
# get all possible bit values for sensor and band combination
bit_values = sorted(bit_flags[band][sens].values())
qa_labels = []
for row in values:
bit_bool = []
for bv in bit_values:
if len(bv) == 1: # single bit
bit_bool.append(row & 1 << bv[0] > 0)
elif len(bv) > 1: # 2+ bits
bits = []
for b in bv:
bits.append(row & 1 << b > 0)
if all(item == True for item in bits):
bit_bool.append(True)
else:
bit_bool.append(False)
else:
sys.exit("No valid bits found for target band.")
'''
NEW logic for getting labels using bit wise dictionary
'''
# create description of each value based upon all possible bits
true_bits = [i for (i, bb) in zip(bit_values, bit_bool) if bb]
# if double bits exist, eliminate single bit descriptions,
# otherwise, the descriptions will duplicate themselves.
bb_double = [len(i) > 1 for i in true_bits]
if any(bb_double):
# get only the double bits
dbit_nest = [i for (i, db) in zip(true_bits, bb_double)
if db]
# collapse the bits into a single list
dbits = [item for sublist in dbit_nest for item in sublist]
# remove matching single bits out of true_bits list
tbo = []
for t in true_bits:
tb_out = []
for d in dbits:
if t[0] != d or len(t) > 1:
tb_out.append(True)
else:
tb_out.append(False)
if all(tb_out):
tbo.append(t)
# replace true_bits with filtered list
true_bits = tbo
def get_label(bits):
"""
Generate label for value in attribute table.
:param bits: <list> List of True or False for bit position
:return: <str> Attribute label
"""
if len(bits) == 0:
if band == 'radsat_qa':
return 'No Saturation'
elif band == 'sr_cloud_qa' or band == 'sr_aerosol':
return 'None'
elif band == 'BQA':
return 'Not Determined'
# build description from all bits represented in value
desc = []
for tb in bits:
k = next(key for key, value in
bit_flags[band][sens].items() if value == tb)
# if 'low' labels are disabled, do not add them here
if rm_low and band != 'BQA' and 'low' in k.lower():
continue
# if last check, and not radiometric sat, set to 'clear'
elif rm_low and band == 'BQA' and 'low' in k.lower() \
and tb == bits[-1] and \
'radiometric' not in k.lower() and \
not desc:
k = 'Clear'
# if BQA and bit is low radiometric sat, keep it
elif rm_low and band == 'BQA' and 'low' in k.lower():
if 'radiometric' not in k.lower():
continue
# if radsat_qa, handle differently to display cleaner
if band == 'radsat_qa':
if not desc:
desc = "Band {0} Data Saturation".format(tb[0])
else:
desc = "{0},{1} Data Saturation".format(
desc[:desc.find('Data') - 1], tb[0])
# string creation for all other bands
else:
if not desc:
desc = "{0}".format(k)
else:
desc += ", {0}".format(k)
# final check to make sure something was set
if not desc:
desc = 'ERROR: bit set incorrectly'
return desc
# add desc to row description
qa_labels.append(get_label(true_bits))
'''
OLD logic for getting lookup values
# use unique raster values (and sensor+band pair) to get defs
if band == 'radsat_qa':
qa_labels = {i:qa_values[band][i] for i in qa_values[band] if i
in list(values)}
elif band == 'pixel_qa' and sens == 'L8': # terrain occl. check
qa_labels = {}
for i in qa_values[band]:
if i >= 1024:
qa_labels[i] = 'Terrain occlusion'
else:
qa_labels[i] = qa_values[band][sens][i]
else:
qa_labels = {i:qa_values[band][sens][i] for i in
qa_values[band][sens] if i in list(values)}
'''
'''
Use gdal.RasterAttributeTable to embed qa values in raster
'''
# create table
rat = gdal.RasterAttributeTable()
# get column count (for indexing columns)
rat_cc = rat.GetColumnCount()
# add 'value' and 'descr' columns to table
rat.CreateColumn("Value", gdalconst.GFT_Integer,
gdalconst.GFU_MinMax)
rat.CreateColumn("Descr", gdalconst.GFT_String,
gdalconst.GFU_MinMax)
# populate table with contents of 'qa_labels'
uid = 0
for val, lab in zip(values, qa_labels):
# 'value' column
rat.SetValueAsInt(uid, rat_cc, int(val))
# 'descr' column
rat.SetValueAsString(uid, rat_cc + 1, lab)
uid += 1
# set raster attribute table to raster
rb.SetDefaultRAT(rat)
'''
METHOD 1: use RasterAttributeTable to display values.
QGIS' UI does not currently support reading Attribute Tables
embedded in raster datasets. Instead, we'll assign labels and
random colors to the raster's color palette in the QGIS UI.
Feature request: https://issues.qgis.org/issues/4321
# open raster with QGIS API
q_raster = QgsRasterLayer(input_raster,
os.path.basename(input_raster))
# make sure the raster is valid
if not q_raster.isValid():
sys.exit("Layer {0} not valid!".format(input_raster))
# save changes and close raster
ds = None
# add raster to QGIS interface
QgsMapLayerRegistry.instance().addMapLayer(q_raster)
'''
'''
METHOD 2: re-assign colors in QGIS
'''
# open raster
q_raster = QgsRasterLayer(input_raster,
os.path.basename(input_raster))
if not q_raster.isValid():
sys.exit("Layer {0} not valid!".format(input_raster))
# define color shader
shader = QgsRasterShader()
# define ramp for color shader
c_ramp_shader = QgsColorRampShader()
c_ramp_shader.setColorRampType(QgsColorRampShader.EXACT)
# assign a random color to each value, and apply label
c_ramp_vals = []
for val, lab in zip(values, qa_labels):
c_ramp_vals.append(QgsColorRampShader.
ColorRampItem(
float(val),
QColor('#%06x' % randint(0, 2 ** 24)),
lab))
# apply new color/label combo to color ramps
c_ramp_shader.setColorRampItemList(c_ramp_vals)
shader.setRasterShaderFunction(c_ramp_shader)
# apply color ramps to raster
ps_ramp = QgsSingleBandPseudoColorRenderer(q_raster.dataProvider(),
1, shader)
q_raster.setRenderer(ps_ramp)
# add raster to QGIS interface
QgsMapLayerRegistry.instance().addMapLayer(q_raster)
|
"""
Description :
Slot-action prediction model.
Finetunes bert-base-uncased model on slot-action prediction data.
Run Command:
python train_slot_act.py -in=<path of the input data> -path=<output dir> -src_file=<name of the python script>
"""
#--------------------------------------------
import torch
import torchtext.vocab as vocab
import random
import math
import time
import argparse
import os
import shutil
import pandas as pd
import numpy as np
import torch
import transformers
from transformers import BertTokenizer, BertModel, AdamW
from transformers import get_linear_schedule_with_warmup
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data import TensorDataset
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import classification_report, confusion_matrix, matthews_corrcoef
#-----------------------------------------
SEED = 1234
random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed_all(SEED)
device = torch.device("cuda")
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
parser = argparse.ArgumentParser()
parser.add_argument('-path','--path', help='path of the working directiory', required=True)
parser.add_argument('-src_file','--src_file', help='path of the source file', required=False, default='')
parser.add_argument('-in','--in', help='path of the input files', required=True)
args = vars(parser.parse_args())
work_dir = args['path']
src_file = args['src_file']
in_dir = args['in']
print("Path of the working directory : {}".format(work_dir))
if(not os.path.isdir(work_dir)):
print("Directory does not exist.")
exit(0)
print("Path of the input directory : {}".format(in_dir))
if(not os.path.isdir(in_dir)):
print("Directory does not exist.")
exit(0)
if(src_file):
try:
shutil.copy(src_file, work_dir)
except:
print("File {} failed to get copied to {}".format(src_file, work_dir))
#-----------------------------------------
domain_list = ['police', 'restaurant', 'hotel', 'taxi', 'attraction', 'train', 'hospital']
slot_detail = {'Type': 'type', 'Price': 'price', 'Parking': 'parking', 'Stay': 'stay', 'Day': 'day',
'People': 'people', 'Post': 'post', 'Addr': 'address', 'Dest': 'destination', 'Arrive': 'arrive',
'Depart': 'departure', 'Internet': 'internet', 'Stars': 'stars', 'Phone': 'phone', 'Area': 'area',
'Leave': 'leave', 'Time': 'time', 'Ticket': 'ticket', 'Ref': 'reference', 'Food': 'food',
'Name': 'name', 'Department': 'department', 'Fee': 'fee', 'Id': 'id', 'Car': 'car'}
LABEL_COLUMN = "slot_act"
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
#-----------------------------------------
def get_word(word):
return glove.vectors[glove.stoi[word]]
def prepare_data(df, max_len, batch_size, slot_dict, domain_dict):
input_ids = []
attention_masks = []
for i in df.index:
# Encode system and user utterance pair
encoded_dict = tokenizer.encode_plus(
df['sys'][i].lower(),df['usr'][i].lower(),
add_special_tokens = True,
padding='max_length',
truncation=True,
max_length = max_len,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
slot_ids = [slot_dict[s] for s in df.slot]
slot_ids = torch.tensor(slot_ids)
domain_ids = [domain_dict[s] for s in df.domain]
domain_ids = torch.tensor(domain_ids)
labels = torch.tensor(df[LABEL_COLUMN])
dataset = TensorDataset(input_ids, attention_masks, slot_ids, domain_ids, labels)
dataloader = DataLoader(
dataset,
sampler = RandomSampler(dataset),
batch_size = batch_size
)
return dataloader
class Model(nn.Module):
def __init__(self, weights_matrix, domain_matrix, num_labels):
super(Model, self).__init__()
self.encode = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True)
self.embedding = nn.Embedding.from_pretrained(torch.FloatTensor(weights_matrix))
self.embedding_domain = nn.Embedding.from_pretrained(torch.FloatTensor(domain_matrix))
self.drop_out = nn.Dropout(0.3)
self.gelu = nn.GELU()
self.l1 = nn.Linear(300*2, 768)
self.l2 = nn.Linear(768*2, num_labels)
self.smax = nn.Softmax(dim=1)
def forward(self, input_ids, attention_masks, slot_ids, domain_ids):
outputs = self.encode(input_ids, attention_masks)
with torch.no_grad(): #Freezing GloVe embeddings
slot_embeddings = self.embedding(slot_ids)
domain_embeddings = self.embedding_domain(domain_ids)
input2 = torch.cat((slot_embeddings, domain_embeddings), 1)
input1 = outputs[2][-2]
input2 = self.l1(input2)
input2 = self.gelu(input2)
input3=torch.unsqueeze(input2, -1)
a = torch.matmul(input1, input3)/28.0
a = self.smax(torch.squeeze(a, -1))
a = torch.unsqueeze(a, -1)
input1 = input1.permute(0, 2, 1)
input1 = torch.matmul(input1, a)
input1 = torch.squeeze(input1,-1)
output = torch.cat((input1, input2), 1)
output = self.drop_out(output)
output = self.l2(output)
return output
def evaluate_metrics(dataloader, model):
total_loss = 0.0
criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
y_true = []
y_pred = []
model.eval()
with torch.no_grad():
for batch in dataloader:
b_input_ids = batch[0].to(device)
b_attn_mask = batch[1].to(device)
b_slot_ids = batch[2].to(device)
b_domain_ids = batch[3].to(device)
labels = batch[4].to(device)
outputs = model(b_input_ids, b_attn_mask, b_slot_ids, b_domain_ids)
loss = criterion(outputs, labels)
total_loss = total_loss + loss.item()
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
y_true.extend(labels.cpu().numpy().tolist())
y_pred.extend(predicted.cpu().numpy().tolist())
avg_loss = total_loss/len(dataloader)
print("MCC : {}".format(matthews_corrcoef(y_true, y_pred)))
print("Classification Report")
print(classification_report(y_true, y_pred))
print("Confusion Matrix")
print(confusion_matrix(y_true, y_pred))
return avg_loss
#-----------------------------------------
#Load GLOVE embeddings
#glove = vocab.GloVe(name='42B', dim=300, cache='.vector_cache')
glove = vocab.GloVe(name='42B', dim=300)
print('Loaded {} words from Glove'.format(len(glove.itos)))
#Build domain dictionary
domain_dict = {}
for i, k in enumerate(domain_list):
domain_dict[k] = i
print(domain_dict)
print("domain_dict : {}".format(domain_dict))
#Build slot dictionary
slot_dict = {}
slot_rev_dict = {}
for i, k in enumerate(slot_detail):
slot_dict[slot_detail[k]] = i
slot_rev_dict[i] = slot_detail[k]
print("slot_dict : {}".format(slot_dict))
print("slot_rev_dict : {}".format(slot_rev_dict))
#Loading Glove embeddings for slot
matrix_len = len(slot_dict)
weights_matrix = np.zeros((matrix_len, 300))
words_not_found = 0
for i in slot_rev_dict:
try:
weights_matrix[i] = get_word(slot_rev_dict[i])
except KeyError:
words_not_found += 1
print("{} not found".format(slot_rev_dict[i]))
weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
print("#Words not found : {}".format(words_not_found))
#Loading Glove embeddings for domain
matrix_len = len(domain_list)
domain_matrix = np.zeros((matrix_len, 300))
domain_not_found = 0
for i in range(len(domain_list)):
try:
domain_matrix[i] = get_word(domain_list[i])
except KeyError:
domain_not_found += 1
print("{} not found".format(domain_list[i]))
domain_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
print("Shape of domain matrix: {}".format(domain_matrix.shape))
print("#Domain not found : {}".format(domain_not_found))
#-----------------------------------------
#Load data
file_path = os.path.join(in_dir, 'train_slot_act.tsv')
train_df = pd.read_csv(file_path, sep='\t')
print("Shape of Training data : {}".format(train_df.shape))
file_path = os.path.join(in_dir, 'test_slot_act.tsv')
test_df = pd.read_csv(file_path, sep='\t')
print("Shape of Test data : {}".format(test_df.shape))
file_path = os.path.join(in_dir, 'dev_slot_act.tsv')
valid_df = pd.read_csv(file_path, sep='\t')
print("Shape of Valid data : {}".format(valid_df.shape))
num_labels = len(train_df[LABEL_COLUMN].unique())
print("Number of labels : {}".format(num_labels))
# Set class weights to handle imbalanced class ratios (if required)
class_weights = torch.ones(num_labels)
print("class weights : {}".format(class_weights))
MAX_LEN = 200
print("Max length final : {}".format(MAX_LEN))
batch_size = 32
print("Batch size : {}".format(batch_size))
print("Loading Train data")
train_dataloader = prepare_data(train_df, MAX_LEN, batch_size, slot_dict, domain_dict)
print("Loading Test data")
test_dataloader = prepare_data(test_df, MAX_LEN, batch_size, slot_dict, domain_dict)
print("Loading Validation data")
valid_dataloader = prepare_data(valid_df, MAX_LEN, batch_size, slot_dict, domain_dict)
print("Data load completed")
print("Size of Train loader : {}".format(len(train_dataloader)))
print("Size of Test loader : {}".format(len(test_dataloader)))
print("Size of Valid loader : {}".format(len(valid_dataloader)))
model = Model(weights_matrix, domain_matrix, num_labels)
model.to(device)
#-----------------------------------------
print('Starting Training ...')
clip = 2.0
num_epoch = 4
best_valid_loss = 9999
best_test_loss = 9999
best_train_loss = 0
best_model = 0
model_copy = type(model)(weights_matrix, domain_matrix, num_labels)
criterion = nn.CrossEntropyLoss(weight=class_weights.to(device))
optimizer = AdamW(model.parameters(),
lr = 2e-5,
eps = 1e-8
)
total_steps = len(train_dataloader) * num_epoch
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps)
for epoch in range(num_epoch):
model.train()
print("Epoch {} --------------------------".format(epoch+1))
running_loss = 0.0
for i, batch in enumerate(train_dataloader):
b_input_ids = batch[0].to(device)
b_attn_mask = batch[1].to(device)
b_slot_ids = batch[2].to(device)
b_domain_ids = batch[3].to(device)
b_labels = batch[4].to(device)
optimizer.zero_grad()
outputs = model(b_input_ids, b_attn_mask, b_slot_ids, b_domain_ids)
loss = criterion(outputs, b_labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
scheduler.step()
print("Training Accuracy :-")
train_loss = evaluate_metrics(train_dataloader, model)
print("Validation Accuracy :-")
valid_loss = evaluate_metrics(valid_dataloader, model)
print("Test Accuracy :-")
test_loss = evaluate_metrics(test_dataloader, model)
print("Epoch {} : Train loss = {} : Valid loss = {} : Test loss = {}".format(epoch + 1, train_loss, valid_loss, test_loss))
if(valid_loss < best_valid_loss):
best_valid_loss = valid_loss
best_test_loss = test_loss
best_train_loss = train_loss
best_model = epoch+1
model_copy.load_state_dict(model.state_dict())
print("Model {} copied".format(epoch+1))
print('Finished Training ...')
PATH = os.path.join(work_dir , 'slot_action_model.pt')
torch.save(model_copy.state_dict(), PATH)
model.to('cpu')
model_copy.to(device)
print("---Best model---")
print("Epoch {} : Train loss = {} : Validation Loss = {} : Test loss = {}".format(best_model, best_train_loss, best_valid_loss, best_test_loss))
print("Training Accuracy :-")
train_loss = evaluate_metrics(train_dataloader, model_copy)
print("Validation Accuracy :-")
valid_loss = evaluate_metrics(valid_dataloader, model_copy)
print("Test Accuracy :-")
test_loss = evaluate_metrics(test_dataloader, model_copy)
print("Verifying Epoch {} : Train loss = {} : Validation Loss = {} : Test loss = {}".format(best_model, train_loss, valid_loss, test_loss))
print("done")
#----------------------------------------- |
from page_objects import PageElement, PageObject
class LoginPage(PageObject):
username = PageElement(name='email')
password = PageElement(name='password')
login_button = PageElement(css='input[type="submit"]')
facebook_button = PageElement(css='.btn-facebook')
|
#Text-Based Tic Tac Toe (Computer Makes Moves Randomly)
#Using https://repl.it/languages/python3
#Author: Robert Beckett
from random import randint
#This function prints the board. (View section)
def PrintBoard():
print(a1, a2, a3)
print(b1, b2, b3)
print(c1, c2, c3)
print("\n")
#This function checks whether either x or o has 3 in a row for a given set of 3 variables. (Model Section)
def WinTest(wtone, wttwo, wtthree):
if wtone == " X " and wttwo == " X " and wtthree == " X ":
PrintBoard()
print("You win!")
quit()
if wtone == " O " and wttwo == " O " and wtthree == " O ":
PrintBoard()
print("Computer Wins!")
quit()
#This function uses WinTest to check every possible win on the board. (Model Section)
def HasSomeoneWon():
WinTest(a1, a2, a3)
WinTest(b1, b2, b3)
WinTest(c1, c2, c3)
WinTest(a1, b1, c1)
WinTest(a2, b2, c2)
WinTest(a3, b3, c3)
WinTest(a1, b2, c3)
WinTest(a3, b2, c1)
if a1 != " - " and a2 != " - " and a3 != " - " and b1 != " - " and b2 != " - " and b3 != " - " and c1 != " - " and c2 != " - " and c3 != " - ":
print("Cat's Game!")
PrintBoard()
quit()
def ValidateAndPlay(turn, XorO): #Check if the cell is empty. If so play and return "False". Otherwise, return, "True." (Model Section)
global a1, a2, a3, b1, b2, b3, c1, c2, c3
if turn == str('a1') and a1 == " - ": #validate that cell is empty
a1 = XorO
return False
elif turn == str('a2') and a2 == " - ":
a2 = XorO
return False
elif turn == str('a3') and a3 == " - ":
a3 = XorO
return False
elif turn == str('b1') and b1 == " - ":
b1 = XorO
return False
elif turn == str('b2') and b2 == " - ":
b2 = XorO
return False
elif turn == str('b3') and b3 == " - ":
b3 = XorO
return False
elif turn == str('c1') and c1 == " - ":
c1 = XorO
return False
elif turn == str('c2') and c2 == " - ":
c2 = XorO
return False
elif turn == str('c3') and c3 == " - ":
c3 = XorO
return False
else:
return True
def HumanTurn(): #input a cell from player (Control Section)
playing = True
while playing == True:
global a1, a2, a3, b1, b2, b3, c1, c2, c3
print ('Please enter an unoccupied space using these codes:')
print ("\n")
print ('a1, a2, a3')
print ('b1, b2, b3')
print ('c1, c2, c3')
turn = input()
if ValidateAndPlay(turn, " X ") == False:
playing = False
#This function has a while loop that generates a random number between 1 and 9, checks if the corresponding spot on the board is empty, then plays if the spots available. If it's not available, it generates another random number and tries again. (Model Section)
def ComputerTurn():
playing = True
while playing == True:
global a1, a2, a3, b1, b2, b3, c1, c2, c3
randturn = randint(1,9)
if randturn == 1 and ValidateAndPlay("a1", " O ") == False:
playing = False
elif randturn == 2 and ValidateAndPlay("a2", " O ") == False:
playing = False
elif randturn == 3 and ValidateAndPlay("a3", " O ") == False:
playing = False
elif randturn == 4 and ValidateAndPlay("b1", " O ") == False:
playing = False
elif randturn == 5 and ValidateAndPlay("b2", " O ") == False:
playing = False
elif randturn == 6 and ValidateAndPlay("b3", " O ") == False:
playing = False
elif randturn == 7 and ValidateAndPlay("c1", " O ") == False:
playing = False
elif randturn == 8 and ValidateAndPlay("c2", " O ") == False:
playing = False
elif randturn == 9 and ValidateAndPlay("c3", " O ") == False:
playing = False
#This introduces variables used to see if someone has won.
#This sets up the variables for the board at the beginng of a game.
def gameinit():
global a1, a2, a3, b1, b2, b3, c1, c2, c3
a1 = " - "
a2 = " - "
a3 = " - "
b1 = " - "
b2 = " - "
b3 = " - "
c1 = " - "
c2 = " - "
c3 = " - "
def main():
gameinit()
print("Welcome to Tic Tac Toe!")
#In this section, it randomly chooses the first player.
#The computer also always assigns O's to the computer and X's to the human.
#Eventually it'll be nice and ask if the user wants to go first
#or play X's or O's.
#pick 1 or 2. This is the start number.
i = randint(1,2)
while i < 15:
PrintBoard()
#Even numbers = user goes.
if i % 2 == 0: #
HumanTurn()
HasSomeoneWon()
#Otherwise= computer goes.
else:
ComputerTurn()
HasSomeoneWon()
i += 1
main()
|
a = {
"năm phát hành" : 2018,
"diễn viên" : "abc",
"Nhân vật tham gia" : "xyz",
}
b = {
"năm phát hành" : 2019,
"diễn viên" : "ghik",
"Nhân vật tham gia" : "lmn",
}
c = {
"năm phát hành" : 2020,
"diễn viên" : "123456",
"Nhân vật tham gia" : "98765",
}
d = [a, b, c]
for x in d:
g = input("Hãng sản xuất:")
h = input("Quốc gia")
x["Hãng sản xuất"] = (g)
x["Quốc gia"] = (h)
print(d) |
from ossConfig import ossConfig
import Oss
access_key = 'XXXXXXXXX'
secret_key = 'XXXXXXXXXXXXXXXXXXX'
endpoint_url = 'http://XXXXXXXXXXXXXXXXX.com'
config = ossConfig(access_key, secret_key, endpoint_url)
bucket_name = 'test1'
# get_website_configuration
get_website_configuration = Oss.get_bucket_website(config, bucket_name)
if get_website_configuration is not None:
print(get_website_configuration)
else:
print("Error OR This bucket is not set website!")
# put_bucket_website
website_configuration = {
'IndexDocument': {'Suffix': 'myindex.html'},
'ErrorDocument': {'Key': 'myerror.html'}
}
if Oss.put_bucket_website(config, bucket_name, website_configuration):
print("put bucket_website sucess!")
else:
print("put bucket_website failed!")
# delete_bucket_website
if Oss.delete_bucket_website(config, bucket_name):
print("delete bucket_website sucess!")
else:
print("delete bucket_website failed!")
|
"""Viết chương trình đếm các chuỗi trong một list thỏa mãn:
+ Độ dài từ 2 trở lên
+ Ký tự đầu tiên và cuối cùng của chuỗi đó giống nhau"""
def demchuoi(list):
dem=0
for i in range(len(list)):
if len(list[i])>=2:
if list[i][0]==list[i][len(list[i])-1]: dem+=1
print(dem)
demchuoi(['waww','hâh','b','n','ji','hjio']) |
from collections import defaultdict
from typing import List, Callable, Tuple, Iterable, Dict
from data_generator.tokenize_helper import TokenizedText
from list_lib import lmap, dict_value_map
from misc_lib import group_by, get_first, get_second, get_third
def assert_token_subword_same(idx, entries, t_text):
subword_tokens: List[str] = lmap(get_second, entries)
token = t_text.tokens[idx]
# Check token ans subword_tokens begin equal
token_from_sbword = "".join(subword_tokens)
token_from_sbword = token_from_sbword.replace("##", "")
if token.lower() != token_from_sbword:
print("Token is different from the subword: ", token.lower(), token_from_sbword)
# Convert subtoken-level score into word level score, output is represented as tuple of
def get_split_score_to_pair_list_fn(merge_subtoken_scores: Callable[[Iterable[float]], float]) -> \
Callable[[List[float], List[str], TokenizedText], List[Tuple[str, float]]]:
# scores
def split_score_to_pair_list(scores: List[float],
sbtoken_list: List[str],
t_text: TokenizedText) -> List[Tuple[str, float]]:
# Lookup token idx
e_list: List[Tuple[int, str, float]] = []
for idx, (sb_token, score) in enumerate(zip(sbtoken_list, scores)):
token_idx = t_text.sbword_mapping[idx]
e = (token_idx, sb_token, score)
e_list.append(e)
# Group by token idx
# Transpose array
grouped = group_by(e_list, get_first)
doc_id_score_list: List[Tuple[str, float]] = []
for idx, entries in grouped.items():
assert_token_subword_same(idx, entries, t_text)
per_tokens_scores: List[float] = lmap(get_third, entries)
s: float = merge_subtoken_scores(per_tokens_scores)
doc_id_score: Tuple[str, float] = str(idx), s
doc_id_score_list.append(doc_id_score)
return doc_id_score_list
return split_score_to_pair_list
# Convert subtoken-level score into word level score, output is represented as tuple of
InputArgs = List[float], List[str], TokenizedText
def merge_subtoken_level_scores(merge_subtoken_scores: Callable[[Iterable[float]], float],
scores: List[float],
t_text: TokenizedText) -> Dict[int, float]:
grouped: Dict[int, List[float]] = defaultdict(list)
for idx, (sb_token, score) in enumerate(zip(t_text.sbword_tokens, scores)):
grouped[idx].append(score)
return dict_value_map(merge_subtoken_scores, grouped)
|
#WHILE LOOPS
'''
i=1
while i<=10:
print('*'*i)
i+=1
print ('Done')
'''
#GUESSING GAME
'''secret_number=9
guess_count=0
guess_limit=3
while guess_count<guess_limit:
guess=int(input('Guess: '))
guess_count+=1
if guess == secret_number:
print('You won!!!')
break
else:
print('Sorry You lost :-(')
'''
#CAR SIMULATION GAME
'''default_input='HELP'
user_input=input('>')
while user_input.upper() == default_input:
print('start - to start the car')
print('stop - to stop the car')
print('quit - to quit')
if user_input == 'start':
print ('Car started . . . Ready to Go...!')
elif user_input == 'stop':
print('Car Stopped')
'''
#Solution
'''command=''
Started = False
while True:
command=input('>').lower()
if command == 'start':
if Started == True:
print('Car is already started !!! Dont spoil the Engine')
else:
Started=True
print('The car has been started . . .')
elif command == 'stop':
if not Started:
print ('Hey the Car is already stopped')
else:
Started =False
print('car has been stopped !!')
elif command == 'help':
print('''
'''Start - To start the car
Stop - To stop the car
Quit - to quit the game'''
''')
elif command == 'quit':
break
else:
print("I Don't Unserstand that")
'''
|
from django.conf.urls import patterns, url, include
from administration import urls as auth_urls
from paparazzi import views
urlpatterns = patterns(
url(r'^auth/', include(auth_urls)),
url(r'^$', views.api_root),
# /photos/
url(r'^photos/$', views.ListCreatePhoto.as_view(), name='photos'),
) |
import os
from django.shortcuts import render, redirect, reverse, HttpResponseRedirect, get_object_or_404
from .forms import SearchForm
from .models import JobAPI, JobPost
from .utils.api_runner import api_main
def index(request):
searches = JobAPI.objects.all()
context = {
"searches": searches,
}
return render(request, 'indeed_api/index.html', context=context)
def search(request):
search_form = SearchForm()
# if post - process form and build url with form data
# otherwise, return an empty search form
if request.method == 'POST':
submitted_form = SearchForm(request.POST)
if submitted_form.is_valid():
form_data = submitted_form.save(commit=False)
form_data.search_must_contain = submitted_form.cleaned_data['search_must_contain']
form_data.search_at_least_one = submitted_form.cleaned_data['search_at_least_one']
form_data.search_cant_contain = submitted_form.cleaned_data['search_cant_contain']
form_data.full_query = form_data.return_query_string()
form_data.city = submitted_form.cleaned_data['city']
form_data.state = submitted_form.cleaned_data['state']
form_data.location = form_data.return_location()
form_data.url_for_api = form_data.build_url_job_search()
# If existing form, update
if JobAPI.objects.filter(url_for_api=form_data.url_for_api).exists():
# Search already exists, update timestamp
form_data.url_updated()
# If new search, create
else:
form_data.save()
return redirect(reverse("indeed_api:index"))
context = {
'search_form': search_form
}
return render(request, template_name='indeed_api/search.html', context=context)
def results(request):
# get all job listings
jobs = JobPost.objects.filter(listing_hidden=False)
# if post - filter by categories
context = {
'jobs': jobs,
}
return render(request, template_name='indeed_api/results.html', context=context)
def hide_listing(request, listing_pk):
listing = get_object_or_404(JobPost, pk=listing_pk)
listing.listing_hidden = True
listing.save()
return HttpResponseRedirect(reverse('indeed:results'))
def update_listings(request, listing_pk):
# get newest listings, fresh off the press
api_main()
return HttpResponseRedirect(reverse('indeed:results'))
|
import argparse
import os
import shutil
import time
import sys
import csv
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import dataloader.data_utils as data_utils
from dataloader.data_utils import get_dataloader
from metrics import AverageMeter, Result
data_sets = data_utils.data_sets
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Depth Estimation')
parser.add_argument('--data', metavar='DATA', default='nyu_data',
help='dataset directory: (nyudepthv2/kitti/make3d)')
parser.add_argument('-s', '--num-samples', default=0, type=int, metavar='N',
help='number of sparse depth samples (default: 0)')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers (default: 10)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run (default: 30)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=16, type=int,
help='mini-batch size (default: 8)')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate (default 0.01)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
default=True, help='use ImageNet pre-trained weights (default: True)')
parser.add_argument('--optimizer', default='sgd', type=str, required=True, help='optimizer option')
parser.add_argument('--activation', default='relu', type=str, required=True, help='activation option')
parser.add_argument('--dataset', default='nyudepth',choices=data_sets, type=str, required=True, help='datasets option')
model_names = ['mobilenet_v2']
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
fieldnames = ['mse', 'rmse', 'absrel', 'lg10', 'mae', 'delta1', 'delta2', 'delta3', 'data_time', 'gpu_time']
best_result = Result()
best_result.set_to_worst()
def evaluate(val_loader, model, epoch, write_to_file=True):
print(model)
average_meter = AverageMeter()
model.eval()
for i, (rgb_raw, input, target, mask, h5name) in enumerate(val_loader):
input, target = input.to(device), target.to(device)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
depth_pred = model(input_var)
result = Result()
output1 = torch.index_select(depth_pred.data, 1, torch.cuda.LongTensor([0]))
result.evaluate(output1, target)
average_meter.update(result, input.size(0))
rgb = input
avg = average_meter.average()
print('\n*\n'
'RMSE={average.rmse:.3f}\n'
'MAE={average.mae:.3f}\n'
'Delta1={average.delta1:.3f}\n'
'Delta2={average.delta2:.3f}\n'
'Delta3={average.delta3:.3f}\n'
'REL={average.absrel:.3f}\n'
'Lg10={average.lg10:.3f}\n'.format(
average=avg))
if write_to_file:
with open(test_csv, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3})
def main():
global args, best_result, output_directory, train_csv, test_csv
args = parser.parse_args()
if not args.data.startswith('/'):
args.data = os.path.join('../', args.data)
output_directory = os.path.join('results',
'Dataset={}.nsample={}.lr={}.bs={}.optimizer={}'.
format(args.dataset, args.num_samples, args.lr, args.batch_size, args.optimizer))
if not os.path.exists(output_directory):
os.makedirs(output_directory)
test_csv = os.path.join(output_directory, 'test.csv')
# Data loading
print("=> creating data loaders ...")
val_on, val_loader, h, w = get_dataloader(args.dataset, args.data, args.batch_size, args.num_samples, args.workers)
val_len = len(val_loader)
out_size = h, w
print(out_size)
print("test dataloader len={}".format(val_len))
print("=> data loaders created.")
# evaluation
if args.evaluate:
best_model_filename = os.path.join(output_directory, 'mobilenetv2blconv7dw_0.597.pth.tar')
if os.path.isfile(best_model_filename):
print("=> loading best model '{}'".format(best_model_filename))
checkpoint = torch.load(best_model_filename)# map_location={'cuda:0': 'cpu'}
args.start_epoch = checkpoint['epoch']
best_result = checkpoint['best_result']
model = checkpoint['model']
print("=> loaded model (epoch {})".format(checkpoint['epoch']))
else:
print("=> no best model found at '{}'".format(best_model_filename))
evaluate(val_loader, model, checkpoint['epoch'], write_to_file=True)
return
else:
print("Please specify the evaluation mode: --evaluate ")
model.to(device)
print(model)
if __name__ == '__main__':
main()
|
import zipfile
import datetime
import os
import json
import salt.client
import django
import django.template
from django.contrib import auth
import integralstor_gridcell
from integralstor_gridcell import gluster_volumes, system_info
from integralstor_utils import config, audit, alerts, lock, db
import integral_view
from integral_view.forms import volume_management_forms, log_management_forms
from integral_view.utils import iv_logging
def view_alerts(request):
return_dict = {}
try:
alerts_list, err = alerts.get_alerts()
if err:
raise Exception(err)
return_dict['alerts_list'] = alerts_list
return django.shortcuts.render_to_response('view_alerts.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
return_dict['base_template'] = "log_base.html"
return_dict["page_title"] = 'System alerts'
return_dict['tab'] = 'view_current_alerts_tab'
return_dict["error"] = 'Error loading system alerts'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def view_audit_trail(request):
return_dict = {}
try:
al = None
al, err = audit.get_entries()
if err:
raise Exception(err)
if err:
raise Exception(err)
return_dict["audit_list"] = al
return django.shortcuts.render_to_response('view_audit_trail.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
return_dict['base_template'] = "log_base.html"
return_dict["page_title"] = 'System audit trail'
return_dict['tab'] = 'view_current_audit_tab'
return_dict["error"] = 'Error loading system audit trail'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def rotate_log(request, log_type=None):
return_dict = {}
try:
if log_type not in ["alerts", "audit_trail"]:
raise Exception("Unknown log type")
return django.shortcuts.render_to_response('logged_in_error.html', return_dict, context_instance=django.template.context.RequestContext(request))
if log_type == "alerts":
return_dict['tab'] = 'view_current_alerts_tab'
return_dict["page_title"] = 'Rotate system alerts log'
ret, err = alerts.rotate_alerts()
if err:
raise Exception(err)
return_dict["message"] = "Alerts log successfully rotated."
return django.http.HttpResponseRedirect("/view_rotated_log_list/alerts?success=true")
elif log_type == "audit_trail":
return_dict['tab'] = 'view_current_audit_tab'
return_dict["page_title"] = 'Rotate system audit trail'
ret, err = audit.rotate_audit_trail()
if err:
raise Exception(err)
return_dict["message"] = "Audit trail successfully rotated."
return django.http.HttpResponseRedirect("/view_rotated_log_list/audit_trail/?success=true")
except Exception, e:
return_dict['base_template'] = "log_base.html"
return_dict["error"] = 'Error rotating log'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def download_vol_log(request):
""" Used to download the volume log of a particular volume whose name is in the vol_name post parameter"""
return_dict = {}
try:
gluster_lck, err = lock.get_lock('gluster_commands')
if err:
raise Exception(err)
if not gluster_lck:
raise Exception(
'This action cannot be performed as an underlying storage command is being run. Please retry this operation after a few seconds.')
return_dict['base_template'] = 'log_base.html'
return_dict["page_title"] = 'Download volume logs'
return_dict['tab'] = 'volume_log_download_tab'
return_dict["error"] = 'Error downloading volume logs'
vil, err = gluster_volumes.get_basic_volume_info_all()
if err:
raise Exception(err)
if not vil:
raise Exception('No volumes detected')
l = []
for v in vil:
l.append(v["name"])
if request.method == 'POST':
form = volume_management_forms.VolumeNameForm(
request.POST, vol_list=l)
if form.is_valid():
cd = form.cleaned_data
vol_name = cd['vol_name']
iv_logging.debug(
"Got volume log download request for %s" % vol_name)
file_name = None
v, err = gluster_volumes.get_basic_volume_info(vol_name)
if err:
raise Exception(err)
if not v:
raise Exception("Could not retrieve volume info")
brick = v["bricks"][0][0]
if not brick:
raise Exception(
"Could not retrieve volume log location - no brick")
l = brick.split(':')
if not l:
raise Exception(
"Could not retrieve volume log location - malformed brick 1")
l1 = l[1].split('/')
if not l1:
raise Exception(
"Could not retrieve volume log location - malformed brick 2")
file_name = '/var/log/glusterfs/bricks/%s-%s-%s.log' % (
l1[1], l1[2], vol_name)
display_name = 'integralstor_gridcell-%s.log' % vol_name
# Formulate the zip file name
zf_name = '/tmp/integralstor_gridcell_volume_%s_log' % vol_name
dt = datetime.datetime.now()
dt_str = dt.strftime("%d%m%Y%H%M%S")
zf_name = zf_name + dt_str + ".zip"
try:
zf = zipfile.ZipFile(zf_name, 'w')
zf.write(file_name, arcname=display_name)
zf.write('/var/log/glusterfs/cli.log', arcname='cli.log')
zf.write('/var/log/glusterfs/cmd_history.log',
arcname='cmd_history.log')
zf.write('/var/log/glusterfs/etc-glusterfs-glusterd.vol.log',
arcname='etc-glusterfs-glusterd.vol.log')
zf.write('/var/log/glusterfs/', arcname='')
zf.write('/var/log/glusterfs/', arcname='')
zf.close()
except Exception as e:
raise Exception("Error generating zip file : %s" % str(e))
response = django.http.HttpResponse()
response['Content-disposition'] = 'attachment; filename=integralstor_gridcell_volume_%s_log_%s.zip' % (
vol_name, dt_str)
response['Content-type'] = 'application/x-compressed'
try:
with open(zf_name, 'rb') as f:
byte = f.read(1)
while byte:
response.write(byte)
byte = f.read(1)
response.flush()
except Exception as e:
raise Exception(
"Error compressing remote log file : %s" % str(e))
return response
else:
form = volume_management_forms.VolumeNameForm(vol_list=l)
# either a get or an invalid form so send back form
return_dict['form'] = form
return_dict['op'] = 'download_log'
return django.shortcuts.render_to_response('download_vol_log_form.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
s = str(e)
if "Another transaction is in progress".lower() in s.lower():
return_dict["error"] = "An underlying storage operation has locked a volume so we are unable to process this request. Please try after a couple of seconds"
else:
return_dict["error"] = "An error occurred when processing your request : %s" % s
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
finally:
lock.release_lock('gluster_commands')
def download_sys_log(request):
""" Download the system log of the type specified in sys_log_type POST param for the node specified in the hostname POST parameter.
This calls the /sys_log via an http request on that node to get the info"""
return_dict = {}
try:
gluster_lck, err = lock.get_lock('gluster_commands')
if err:
raise Exception(err)
if not gluster_lck:
raise Exception(
'This action cannot be performed as an underlying storage command is being run. Please retry this operation after a few seconds.')
return_dict['base_template'] = "log_base.html"
return_dict["page_title"] = 'Download system logs'
return_dict['tab'] = 'download_system_logs_tab'
return_dict["error"] = 'Error downloading system logs'
si, err = system_info.load_system_config()
if err:
raise Exception(err)
if not si:
raise Exception('Could not load system configuration')
form = log_management_forms.SystemLogsForm(
request.POST or None, system_config=si)
if request.method == 'POST':
if form.is_valid():
cd = form.cleaned_data
sys_log_type = cd['sys_log_type']
hostname = cd["hostname"]
iv_logging.debug("Got sys log download request for type %s hostname %s" % (
sys_log_type, hostname))
#fn = {'boot':'/var/log/boot.log', 'dmesg':'/var/log/dmesg', 'message':'/var/log/messages', 'smb':'/var/log/smblog.vfs', 'winbind':'/var/log/samba/log.winbindd','ctdb':'/var/log/log.ctdb'}
#dn = {'boot':'boot.log', 'dmesg':'dmesg', 'message':'messages','smb':'samba_logs','winbind':'winbind_logs','ctdb':'ctdb_logs'}
fn = {'boot': '/var/log/boot.log', 'dmesg': '/var/log/dmesg', 'message': '/var/log/messages',
'smb': '/var/log/smblog.vfs', 'winbind': '/var/log/samba/log.winbindd'}
dn = {'boot': 'boot.log', 'dmesg': 'dmesg', 'message': 'messages',
'smb': 'samba_logs', 'winbind': 'winbind_logs'}
file_name = fn[sys_log_type]
display_name = dn[sys_log_type]
client = salt.client.LocalClient()
ret = client.cmd('%s' % (hostname), 'cp.push', [file_name])
print ret
zf_name = '%s.zip' % display_name
try:
zf = zipfile.ZipFile(zf_name, 'w')
zf.write("/var/cache/salt/master/minions/%s/files/%s" %
(hostname, file_name), arcname=display_name)
zf.close()
except Exception as e:
raise Exception(
"Error compressing remote log file : %s" % str(e))
response = django.http.HttpResponse()
response['Content-disposition'] = 'attachment; filename=%s.zip' % (
display_name)
response['Content-type'] = 'application/x-compressed'
with open(zf_name, 'rb') as f:
byte = f.read(1)
while byte:
response.write(byte)
byte = f.read(1)
response.flush()
return response
# either a get or an invalid form so send back form
return_dict['form'] = form
return django.shortcuts.render_to_response('download_sys_log_form.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
s = str(e)
if "Another transaction is in progress".lower() in s.lower():
return_dict["error"] = "An underlying storage operation has locked a volume so we are unable to process this request. Please try after a couple of seconds"
else:
return_dict["error"] = "An error occurred when processing your request : %s" % s
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
finally:
lock.release_lock('gluster_commands')
def view_rotated_log_list(request, *args):
return_dict = {}
try:
log_type = ''
if args:
log_type = args[0]
if log_type not in ["alerts", "audit_trail"]:
raise Exception("Unknown log type")
l = None
if log_type == "alerts":
return_dict["page_title"] = 'View rotated alerts logs'
return_dict['tab'] = 'view_rotated_alert_log_list_tab'
return_dict["page_header"] = "Logging"
return_dict["page_sub_header"] = "View historical alerts log"
l, err = alerts.get_log_file_list()
if err:
raise Exception(err)
elif log_type == "audit_trail":
return_dict["page_title"] = 'View rotated audit trail logs'
return_dict['tab'] = 'view_rotated_audit_log_list_tab'
return_dict["page_header"] = "Logging"
return_dict["page_sub_header"] = "View historical audit log"
l, err = audit.get_log_file_list()
if err:
raise Exception(err)
return_dict["type"] = log_type
return_dict["log_file_list"] = l
return django.shortcuts.render_to_response('view_rolled_log_list.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
return_dict['base_template'] = "log_base.html"
return_dict["error"] = 'Error displaying rotated log list'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def view_rotated_log_file(request, *args):
return_dict = {}
try:
log_type = ''
if args:
log_type = args[0]
return_dict['tab'] = 'view_rotated_alert_log_list_tab'
if log_type not in ["alerts", "audit_trail"]:
raise Exception("Unknown log type")
if request.method != "POST":
raise Exception("Unsupported request")
if "file_name" not in request.POST:
raise Exception("Filename not specified")
file_name = request.POST["file_name"]
return_dict["historical"] = True
if log_type == "alerts":
return_dict['tab'] = 'view_rotated_alert_log_list_tab'
l, err = alerts.get_alerts(file_name)
if err:
raise Exception(err)
return_dict["alerts_list"] = l
return django.shortcuts.render_to_response('view_alerts.html', return_dict, context_instance=django.template.context.RequestContext(request))
else:
return_dict['tab'] = 'view_rotated_audit_log_list_tab'
d, err = audit.get_lines(file_name)
if err:
raise Exception(err)
return_dict["audit_list"] = d
return django.shortcuts.render_to_response('view_audit_trail.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
return_dict['base_template'] = "log_base.html"
return_dict["page_title"] = 'View rotated log file'
return_dict["error"] = 'Error viewing rotated log file'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def refresh_alerts(request):
ret = None
return_dict = {}
try:
return_dict['base_template'] = "log_base.html"
return_dict["page_title"] = 'System alerts'
return_dict['tab'] = 'view_current_alerts_tab'
return_dict["error"] = 'Error loading system alerts'
from datetime import datetime
cmd_list = []
# this command will insert or update the row value if the row with the
# user exists.
cmd = ["INSERT OR REPLACE INTO admin_alerts (user, last_refresh_time) values (?,?);", (
request.user.username, datetime.now())]
cmd_list.append(cmd)
db_path, err = config.get_db_path()
if err:
raise Exception(err)
test, err = db.execute_iud("%s" % db_path, cmd_list)
if err:
raise Exception(err)
ret, err = alerts.new_alerts()
if err:
raise Exception(err)
if ret:
alerts_list, err = alerts.get_alerts()
if err:
raise Exception(err)
new_alerts = json.dumps([dict(alert=pn) for pn in alerts_list])
return django.http.HttpResponse(new_alerts, mimetype='application/json')
else:
clss = "btn btn-default btn-sm"
message = "View alerts"
return django.http.HttpResponse("No New Alerts")
except Exception, e:
return_dict["error_details"] = "An error occurred when processing your request : %s" % str(
e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def raise_alert(request):
try:
return_dict = {}
if "msg" not in request.REQUEST:
raise Exception('No alert message specified.')
msg = request.REQUEST["msg"]
ret, err = alerts.raise_alert(msg)
if err:
raise Exception(err)
return django.http.HttpResponse("Raised alert")
except Exception, e:
return_dict["error"] = "An error occurred when processing your request : %s" % str(
e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def internal_audit(request):
response = django.http.HttpResponse()
if request.method == "GET":
response.write("Error!")
else:
if not "who" in request.POST or request.POST["who"] != "batch":
response.write("Unknown requester")
return response
if (not "audit_action" in request.POST) or (not "audit_str" in request.POST):
response.write("Insufficient information!")
else:
audit.audit(request.POST["audit_action"],
request.POST["audit_str"], request)
response.write("Success")
return response
def download_system_configuration(request):
""" Download the complete configuration stored in get_config_dir()"""
return_dict = {}
try:
return_dict['base_template'] = "system_base.html"
return_dict["page_title"] = 'Download system configuration'
return_dict['tab'] = 'download_config_tab'
return_dict["error"] = 'Error downloading system configuration'
if request.method == 'POST':
config_dir, err = config.get_config_dir()
if err:
raise Exception(err)
# Remove trailing '/'
if config_dir[len(config_dir) - 1] == '/':
config_dir = config_dir[:len(config_dir) - 1]
display_name = 'integralstor_config'
zf_name = '/tmp/integralstor_config.zip'
zf = zipfile.ZipFile(zf_name, 'w')
top_component = config_dir[config_dir.rfind('/') + 1:]
for dirname, subdirs, files in os.walk(config_dir):
for filename in files:
# print os.path.join(dirname, filename)
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = '%s/%s' % (top_component,
absname[len(config_dir) + 1:])
# print arcname
zf.write(absname, arcname)
zf.close()
response = django.http.HttpResponse()
response['Content-disposition'] = 'attachment; filename=%s.zip' % (
display_name)
response['Content-type'] = 'application/x-compressed'
with open(zf_name, 'rb') as f:
byte = f.read(1)
while byte:
response.write(byte)
byte = f.read(1)
response.flush()
return response
# either a get or an invalid form so send back form
return django.shortcuts.render_to_response('download_system_configuration.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
s = str(e)
if "Another transaction is in progress".lower() in s.lower():
return_dict["error"] = "An underlying storage operation has locked a volume so we are unable to process this request. Please try after a couple of seconds"
else:
return_dict["error"] = "An error occurred when processing your request : %s" % s
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
'''
def edit_integral_view_log_level(request):
return_dict = {}
try:
if request.method == 'POST':
iv_logging.debug("Trying to change Integral View Log settings")
form = log_management_forms.IntegralViewLoggingForm(request.POST)
if form.is_valid():
iv_logging.debug("Trying to change Integral View Log settings - form valid")
cd = form.cleaned_data
log_level = int(cd['log_level'])
iv_logging.debug("Trying to change Integral View Log settings - log level is %d"%log_level)
try:
iv_logging.set_log_level(log_level)
except Exception, e:
raise Exception('Error setting log level : %s'%e)
iv_logging.debug("Trying to change Integral View Log settings - changed log level")
return django.http.HttpResponseRedirect("/show/integral_view_log_level?saved=1")
else:
init = {}
init['log_level'] = iv_logging.get_log_level()
form = log_management_forms.IntegralViewLoggingForm(initial=init)
return_dict['form'] = form
return django.shortcuts.render_to_response('edit_integral_view_log_level.html', return_dict, context_instance=django.template.context.RequestContext(request))
except Exception, e:
s = str(e)
if "Another transaction is in progress".lower() in s.lower():
return_dict["error"] = "An underlying storage operation has locked a volume so we are unable to process this request. Please try after a couple of seconds"
else:
return_dict["error"] = "An error occurred when processing your request : %s"%s
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def rotate_log(request, *args):
return_dict = {}
try:
log_type = ''
if args:
log_type = args[0]
return_dict['base_template'] = "system_log_base.html"
return_dict["error"] = 'Error rotating logs'
if log_type not in ["alerts", "audit_trail"]:
raise Exception("Unknown log type")
if log_type == "alerts":
return_dict['tab'] = 'system_log_alert_tab'
return_dict["page_title"] = 'Rotate system alerts log'
ret, err = alerts.rotate_alerts()
if err:
raise Exception(err)
return_dict["message"] = "Alerts log successfully rotated."
return django.http.HttpResponseRedirect("/view_rotated_log_list/alerts?success=true")
elif log_type == "audit_trail":
return_dict['tab'] = 'system_log_view_current_audit_tab'
return_dict["page_title"] = 'Rotate system audit trail'
ret, err = audit.rotate_audit_trail()
if err:
raise Exception(err)
return_dict["message"] = "Audit trail successfully rotated."
return django.http.HttpResponseRedirect("/view_rotated_log_list/audit_trail/?success=true")
except Exception, e:
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def view_rotated_log_list(request, log_type):
return_dict = {}
try:
return_dict['base_template'] = "system_log_base.html"
if log_type not in ["alerts", "audit_trail"]:
raise Exception("Unknown log type")
l = None
if log_type == "alerts":
return_dict["page_title"] = 'View rotated alerts logs'
return_dict['tab'] = 'system_log_view_older_alerts_tab'
return_dict["page_header"] = "Logging"
return_dict["page_sub_header"] = "View historical alerts log"
l, err = alerts.get_log_file_list()
if err:
raise Exception(err)
elif log_type == "audit_trail":
return_dict["page_title"] = 'View rotated audit trail logs'
return_dict['tab'] = 'system_log_view_older_audit_tab'
return_dict["page_header"] = "Logging"
return_dict["page_sub_header"] = "View historical audit log"
l, err = audit.get_log_file_list()
if err:
raise Exception(err)
return_dict["type"] = log_type
return_dict["log_file_list"] = l
return django.shortcuts.render_to_response('view_rolled_log_list.html', return_dict, context_instance = django.template.context.RequestContext(request))
except Exception, e:
return_dict["error"] = 'Error displaying rotated log list'
return_dict["error_details"] = str(e)
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
def view_rotated_log_file(request, log_type):
return_dict = {}
try:
return_dict['base_template'] = "volume_log_base.html"
return_dict["page_title"] = 'View rotated log files'
if not log_type:
raise Exception('Unspecified log type')
if log_type not in ["alerts", "audit_trail"]:
raise Exception('Unrecognized log type')
if request.method != "POST":
raise Exception('Unsupported request type')
if "file_name" not in request.POST:
raise Exception('Filename not specified')
file_name = request.POST["file_name"]
if log_type == "alerts":
l, err = alerts.get_alerts(file_name)
if err:
raise Exception(err)
return_dict['tab'] = 'system_log_view_older_alerts_tab'
return_dict["alerts_list"] = l
return_dict["historical"] = True
return django.shortcuts.render_to_response('view_alerts.html', return_dict, context_instance = django.template.context.RequestContext(request))
else:
d, err = audit.get_lines(file_name)
if err:
raise Exception(err)
return_dict['tab'] = 'system_log_view_older_audit_tab'
return_dict["audit_list"] = d
return_dict["historical"] = True
return django.shortcuts.render_to_response('view_audit_trail.html', return_dict, context_instance = django.template.context.RequestContext(request))
except Exception, e:
return_dict["error"] = 'Error viewing rotated log files'
s = str(e)
if "Another transaction is in progress".lower() in s.lower():
return_dict["error"] = "An underlying storage operation has locked a volume so we are unable to process this request. Please try after a couple of seconds"
else:
return_dict["error"] = "An error occurred when processing your request : %s"%s
return django.shortcuts.render_to_response("logged_in_error.html", return_dict, context_instance=django.template.context.RequestContext(request))
'''
'''
def sys_log(request, log_type = None):
""" Invoked by a node in order to deliver the sys log to the remote node. Shd not normally be called from a browser """
if not log_type:
return None
fn = {'boot':'/var/log/boot.log', 'dmesg':'/var/log/dmesg', 'message':'/var/log/messages'}
dn = {'boot':'boot.log', 'dmesg':'dmesg', 'message':'messages'}
file_name = fn[log_type]
display_name = dn[log_type]
zf_name = '/tmp/%s'%log_type
dt = datetime.datetime.now()
dt_str = dt.strftime("%d%m%Y%H%M%S")
zf_name = zf_name + dt_str +".zip"
try:
zf = zipfile.ZipFile(zf_name, 'w')
zf.write(file_name, arcname = display_name)
zf.close()
except Exception as e:
return None
try:
response = django.http.HttpResponse()
response['Content-disposition'] = 'attachment; filename=%s%s.zip'%(log_type, dt_str)
response['Content-type'] = 'application/x-compressed'
with open(zf_name, 'rb') as f:
byte = f.read(1)
while byte:
response.write(byte)
byte = f.read(1)
response.flush()
except Exception as e:
return None
return response
url = "http://%s:8000/sys_log/%s"%(hostname, sys_log_type)
d = download.url_download(url)
if d["error"]:
return_dict["error"] = d["error"]
return django.shortcuts.render_to_response('logged_in_error.html', return_dict, context_instance = django.template.context.RequestContext(request))
fn = {'boot':'/var/log/boot.log', 'dmesg':'/var/log/dmesg', 'message':'/var/log/messages'}
dn = {'boot':'boot.log', 'dmesg':'dmesg', 'message':'messages'}
file_name = fn[sys_log_type]
display_name = dn[sys_log_type]
zf_name = '/tmp/%s'%sys_log_type
try:
response = django.http.HttpResponse()
response['Content-disposition'] = d["content-disposition"]
response['Content-type'] = 'application/x-compressed'
response.write(d["content"])
response.flush()
except Exception as e:
return_dict["error"] = "Error requesting log file: %s"%str(e)
return django.shortcuts.render_to_response('logged_in_error.html', return_dict, context_instance = django.template.context.RequestContext(request))
return response
'''
# vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
|
from sales import calc_shipping, calc_tax # now what we are calling are the functions
import sales # now what we call after sales.method is called METHOD
sales.calc_shipping()
sale.calc_tax()
calc_shipping()
calc_tax() |
import json as jsonlib
import operator
import re
from abc import ABC
from enum import Enum
from functools import reduce
from http.cookies import SimpleCookie
from types import MappingProxyType
from typing import (
Any,
Callable,
ClassVar,
Dict,
List,
Optional,
Pattern as RegexPattern,
Sequence,
Set,
Tuple,
Type,
Union,
)
from unittest.mock import ANY
from urllib.parse import urljoin
import httpx
from .types import (
URL as RawURL,
CookieTypes,
HeaderTypes,
QueryParamTypes,
URLPatternTypes,
)
class Lookup(Enum):
EQUAL = "eq"
REGEX = "regex"
STARTS_WITH = "startswith"
CONTAINS = "contains"
IN = "in"
class Match:
def __init__(self, matches: bool, **context: Any) -> None:
self.matches = matches
self.context = context
def __bool__(self):
return bool(self.matches)
def __invert__(self):
self.matches = not self.matches
return self
def __repr__(self): # pragma: nocover
return f"<Match {self.matches}>"
class Pattern(ABC):
key: ClassVar[str]
lookups: ClassVar[Tuple[Lookup, ...]] = (Lookup.EQUAL,)
lookup: Lookup
base: Optional["Pattern"]
value: Any
# Automatically register all the subclasses in this dict
__registry: ClassVar[Dict[str, Type["Pattern"]]] = {}
registry = MappingProxyType(__registry)
def __init_subclass__(cls) -> None:
if not getattr(cls, "key", None) or ABC in cls.__bases__:
return
if cls.key in cls.__registry:
raise TypeError(
"Subclasses of Pattern must define a unique key. "
f"{cls.key!r} is already defined in {cls.__registry[cls.key]!r}"
)
cls.__registry[cls.key] = cls
def __init__(self, value: Any, lookup: Optional[Lookup] = None) -> None:
if lookup and lookup not in self.lookups:
raise NotImplementedError(
f"{self.key!r} pattern does not support {lookup.value!r} lookup"
)
self.lookup = lookup or self.lookups[0]
self.base = None
self.value = self.clean(value)
def __iter__(self):
yield self
def __bool__(self):
return True
def __and__(self, other: "Pattern") -> "Pattern":
if not bool(other):
return self
elif not bool(self):
return other
return _And((self, other))
def __or__(self, other: "Pattern") -> "Pattern":
if not bool(other):
return self
elif not bool(self):
return other
return _Or((self, other))
def __invert__(self):
if not bool(self):
return self
return _Invert(self)
def __repr__(self): # pragma: nocover
return f"<{self.__class__.__name__} {self.lookup.value} {repr(self.value)}>"
def __hash__(self):
return hash((self.__class__, self.lookup, self.value))
def __eq__(self, other: object) -> bool:
return hash(self) == hash(other)
def clean(self, value: Any) -> Any:
"""
Clean and return pattern value.
"""
return value
def parse(self, request: httpx.Request) -> Any: # pragma: nocover
"""
Parse and return request value to match with pattern value.
"""
raise NotImplementedError()
def strip_base(self, value: Any) -> Any: # pragma: nocover
return value
def match(self, request: httpx.Request) -> Match:
try:
value = self.parse(request)
except Exception:
return Match(False)
# Match and strip base
if self.base:
base_match = self.base._match(value)
if not base_match:
return base_match
value = self.strip_base(value)
return self._match(value)
def _match(self, value: Any) -> Match:
lookup_method = getattr(self, f"_{self.lookup.value}")
return lookup_method(value)
def _eq(self, value: Any) -> Match:
return Match(value == self.value)
def _regex(self, value: str) -> Match:
match = self.value.search(value)
if match is None:
return Match(False)
return Match(True, **match.groupdict())
def _startswith(self, value: str) -> Match:
return Match(value.startswith(self.value))
def _contains(self, value: Any) -> Match: # pragma: nocover
raise NotImplementedError()
def _in(self, value: Any) -> Match:
return Match(value in self.value)
class Noop(Pattern):
def __init__(self) -> None:
super().__init__(None)
def __repr__(self):
return f"<{self.__class__.__name__}>"
def __bool__(self) -> bool:
# Treat this pattern as non-existent, e.g. when filtering or conditioning
return False
def match(self, request: httpx.Request) -> Match:
# If this pattern is part of a combined pattern, always be truthy, i.e. noop
return Match(True)
class PathPattern(Pattern):
path: Optional[str]
def __init__(
self, value: Any, lookup: Optional[Lookup] = None, *, path: Optional[str] = None
) -> None:
self.path = path
super().__init__(value, lookup)
class _And(Pattern):
value: Tuple[Pattern, Pattern]
def __repr__(self): # pragma: nocover
a, b = self.value
return f"{repr(a)} AND {repr(b)}"
def __iter__(self):
a, b = self.value
yield from a
yield from b
def match(self, request: httpx.Request) -> Match:
a, b = self.value
a_match = a.match(request)
if not a_match:
return a_match
b_match = b.match(request)
if not b_match:
return b_match
return Match(True, **{**a_match.context, **b_match.context})
class _Or(Pattern):
value: Tuple[Pattern, Pattern]
def __repr__(self): # pragma: nocover
a, b = self.value
return f"{repr(a)} OR {repr(b)}"
def __iter__(self):
a, b = self.value
yield from a
yield from b
def match(self, request: httpx.Request) -> Match:
a, b = self.value
match = a.match(request)
if not match:
match = b.match(request)
return match
class _Invert(Pattern):
value: Pattern
def __repr__(self): # pragma: nocover
return f"NOT {repr(self.value)}"
def __iter__(self):
yield from self.value
def match(self, request: httpx.Request) -> Match:
return ~self.value.match(request)
class Method(Pattern):
key = "method"
lookups = (Lookup.EQUAL, Lookup.IN)
value: Union[str, Sequence[str]]
def clean(self, value: Union[str, Sequence[str]]) -> Union[str, Sequence[str]]:
if isinstance(value, str):
value = value.upper()
else:
assert isinstance(value, Sequence)
value = tuple(v.upper() for v in value)
return value
def parse(self, request: httpx.Request) -> str:
return request.method
class MultiItemsMixin:
lookup: Lookup
value: Any
def _multi_items(
self, value: Any, *, parse_any: bool = False
) -> Tuple[Tuple[str, Tuple[Any, ...]], ...]:
return tuple(
(
key,
tuple(
ANY if parse_any and v == str(ANY) else v
for v in value.get_list(key)
),
)
for key in sorted(value.keys())
)
def __hash__(self):
return hash((self.__class__, self.lookup, self._multi_items(self.value)))
def _eq(self, value: Any) -> Match:
value_items = self._multi_items(self.value, parse_any=True)
request_items = self._multi_items(value)
return Match(value_items == request_items)
def _contains(self, value: Any) -> Match:
if len(self.value.multi_items()) > len(value.multi_items()):
return Match(False)
value_items = self._multi_items(self.value, parse_any=True)
request_items = self._multi_items(value)
for item in value_items:
if item not in request_items:
return Match(False)
return Match(True)
class Headers(MultiItemsMixin, Pattern):
key = "headers"
lookups = (Lookup.CONTAINS, Lookup.EQUAL)
value: httpx.Headers
def clean(self, value: HeaderTypes) -> httpx.Headers:
return httpx.Headers(value)
def parse(self, request: httpx.Request) -> httpx.Headers:
return request.headers
class Cookies(Pattern):
key = "cookies"
lookups = (Lookup.CONTAINS, Lookup.EQUAL)
value: Set[Tuple[str, str]]
def __hash__(self):
return hash((self.__class__, self.lookup, tuple(sorted(self.value))))
def clean(self, value: CookieTypes) -> Set[Tuple[str, str]]:
if isinstance(value, dict):
return set(value.items())
return set(value)
def parse(self, request: httpx.Request) -> Set[Tuple[str, str]]:
headers = request.headers
cookie_header = headers.get("cookie")
if not cookie_header:
return set()
cookies: SimpleCookie = SimpleCookie()
cookies.load(rawdata=cookie_header)
return {(cookie.key, cookie.value) for cookie in cookies.values()}
def _contains(self, value: Set[Tuple[str, str]]) -> Match:
return Match(bool(self.value & value))
class Scheme(Pattern):
key = "scheme"
lookups = (Lookup.EQUAL, Lookup.IN)
value: Union[str, Sequence[str]]
def clean(self, value: Union[str, Sequence[str]]) -> Union[str, Sequence[str]]:
if isinstance(value, str):
value = value.lower()
else:
assert isinstance(value, Sequence)
value = tuple(v.lower() for v in value)
return value
def parse(self, request: httpx.Request) -> str:
return request.url.scheme
class Host(Pattern):
key = "host"
lookups = (Lookup.EQUAL, Lookup.REGEX, Lookup.IN)
value: Union[str, RegexPattern[str], Sequence[str]]
def clean(
self, value: Union[str, RegexPattern[str]]
) -> Union[str, RegexPattern[str]]:
if self.lookup is Lookup.REGEX and isinstance(value, str):
value = re.compile(value)
return value
def parse(self, request: httpx.Request) -> str:
return request.url.host
class Port(Pattern):
key = "port"
lookups = (Lookup.EQUAL, Lookup.IN)
value: Optional[int]
def parse(self, request: httpx.Request) -> Optional[int]:
scheme = request.url.scheme
port = request.url.port
scheme_port = get_scheme_port(scheme)
return port or scheme_port
class Path(Pattern):
key = "path"
lookups = (Lookup.EQUAL, Lookup.REGEX, Lookup.STARTS_WITH, Lookup.IN)
value: Union[str, Sequence[str], RegexPattern[str]]
def clean(
self, value: Union[str, RegexPattern[str]]
) -> Union[str, RegexPattern[str]]:
if self.lookup in (Lookup.EQUAL, Lookup.STARTS_WITH) and isinstance(value, str):
# Percent encode path, i.e. revert parsed path by httpx.URL.
# Borrowed from HTTPX's "private" quote and percent_encode utilities.
path = "".join(
char
if char
in "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~/"
else "".join(f"%{byte:02x}" for byte in char.encode("utf-8")).upper()
for char in value
)
path = urljoin("/", path) # Ensure leading slash
value = httpx.URL(path).path
elif self.lookup is Lookup.REGEX and isinstance(value, str):
value = re.compile(value)
return value
def parse(self, request: httpx.Request) -> str:
return request.url.path
def strip_base(self, value: str) -> str:
if self.base:
value = value[len(self.base.value) :]
value = "/" + value if not value.startswith("/") else value
return value
class Params(MultiItemsMixin, Pattern):
key = "params"
lookups = (Lookup.CONTAINS, Lookup.EQUAL)
value: httpx.QueryParams
def clean(self, value: QueryParamTypes) -> httpx.QueryParams:
return httpx.QueryParams(value)
def parse(self, request: httpx.Request) -> httpx.QueryParams:
query = request.url.query
return httpx.QueryParams(query)
class URL(Pattern):
key = "url"
lookups = (
Lookup.EQUAL,
Lookup.REGEX,
Lookup.STARTS_WITH,
)
value: Union[str, RegexPattern[str]]
def clean(self, value: URLPatternTypes) -> Union[str, RegexPattern[str]]:
url: Union[str, RegexPattern[str]]
if self.lookup is Lookup.EQUAL and isinstance(value, (str, tuple, httpx.URL)):
_url = parse_url(value)
_url = self._ensure_path(_url)
url = str(_url)
elif self.lookup is Lookup.REGEX and isinstance(value, str):
url = re.compile(value)
elif isinstance(value, (str, RegexPattern)):
url = value
else:
raise ValueError(f"Invalid url: {value!r}")
return url
def parse(self, request: httpx.Request) -> str:
url = request.url
url = self._ensure_path(url)
return str(url)
def _ensure_path(self, url: httpx.URL) -> httpx.URL:
if not url._uri_reference.path:
url = url.copy_with(path="/")
return url
class ContentMixin:
def parse(self, request: httpx.Request) -> Any:
content = request.read()
return content
class Content(ContentMixin, Pattern):
lookups = (Lookup.EQUAL, Lookup.CONTAINS)
key = "content"
value: bytes
def clean(self, value: Union[bytes, str]) -> bytes:
if isinstance(value, str):
return value.encode()
return value
def _contains(self, value: Union[bytes, str]) -> Match:
return Match(self.value in value)
class JSON(ContentMixin, PathPattern):
lookups = (Lookup.EQUAL,)
key = "json"
value: str
def clean(self, value: Union[str, List, Dict]) -> str:
return self.hash(value)
def parse(self, request: httpx.Request) -> str:
content = super().parse(request)
json = jsonlib.loads(content.decode("utf-8"))
if self.path:
value = json
for bit in self.path.split("__"):
key = int(bit) if bit.isdigit() else bit
try:
value = value[key]
except KeyError as e:
raise KeyError(f"{self.path!r} not in {json!r}") from e
except IndexError as e:
raise IndexError(f"{self.path!r} not in {json!r}") from e
else:
value = json
return self.hash(value)
def hash(self, value: Union[str, List, Dict]) -> str:
return jsonlib.dumps(value, sort_keys=True)
class Data(ContentMixin, Pattern):
lookups = (Lookup.EQUAL,)
key = "data"
value: bytes
def clean(self, value: Dict) -> bytes:
request = httpx.Request("POST", "/", data=value)
data = request.read()
return data
def M(*patterns: Pattern, **lookups: Any) -> Pattern:
extras = None
for pattern__lookup, value in lookups.items():
# Handle url pattern
if pattern__lookup == "url":
extras = parse_url_patterns(value)
continue
# Parse pattern key and lookup
pattern_key, __, rest = pattern__lookup.partition("__")
path, __, lookup_name = rest.rpartition("__")
if pattern_key not in Pattern.registry:
raise KeyError(f"{pattern_key!r} is not a valid Pattern")
# Get pattern class
P = Pattern.registry[pattern_key]
pattern: Union[Pattern, PathPattern]
if issubclass(P, PathPattern):
# Make path supported pattern, i.e. JSON
try:
lookup = Lookup(lookup_name) if lookup_name else None
except ValueError:
lookup = None
path = rest
pattern = P(value, lookup=lookup, path=path)
else:
# Make regular pattern
lookup = Lookup(lookup_name) if lookup_name else None
pattern = P(value, lookup=lookup)
# Skip patterns with no value, exept when using equal lookup
if not pattern.value and pattern.lookup is not Lookup.EQUAL:
continue
patterns += (pattern,)
# Combine and merge patterns
combined_pattern = combine(patterns)
if extras:
combined_pattern = merge_patterns(combined_pattern, **extras)
return combined_pattern
def get_scheme_port(scheme: Optional[str]) -> Optional[int]:
return {"http": 80, "https": 443}.get(scheme or "")
def combine(patterns: Sequence[Pattern], op: Callable = operator.and_) -> Pattern:
patterns = tuple(filter(None, patterns))
if not patterns:
return Noop()
return reduce(op, patterns)
def parse_url(value: Union[httpx.URL, str, RawURL]) -> httpx.URL:
url: Union[httpx.URL, str]
if isinstance(value, tuple):
# Handle "raw" httpcore urls. Borrowed from HTTPX prior to #2241
raw_scheme, raw_host, port, raw_path = value
scheme = raw_scheme.decode("ascii")
host = raw_host.decode("ascii")
if host and ":" in host and host[0] != "[":
# it's an IPv6 address, so it should be enclosed in "[" and "]"
# ref: https://tools.ietf.org/html/rfc2732#section-2
# ref: https://tools.ietf.org/html/rfc3986#section-3.2.2
host = f"[{host}]"
port_str = "" if port is None else f":{port}"
path = raw_path.decode("ascii")
url = f"{scheme}://{host}{port_str}{path}"
else:
url = value
return httpx.URL(url)
def parse_url_patterns(
url: Optional[URLPatternTypes], exact: bool = True
) -> Dict[str, Pattern]:
bases: Dict[str, Pattern] = {}
if not url or url == "all":
return bases
if isinstance(url, RegexPattern):
return {"url": URL(url, lookup=Lookup.REGEX)}
url = parse_url(url)
scheme_port = get_scheme_port(url.scheme)
if url.scheme and url.scheme != "all":
bases[Scheme.key] = Scheme(url.scheme)
if url.host:
# NOTE: Host regex patterns borrowed from HTTPX source to support proxy format
if url.host.startswith("*."):
domain = re.escape(url.host[2:])
regex = re.compile(f"^.+\\.{domain}$")
bases[Host.key] = Host(regex, lookup=Lookup.REGEX)
elif url.host.startswith("*"):
domain = re.escape(url.host[1:])
regex = re.compile(f"^(.+\\.)?{domain}$")
bases[Host.key] = Host(regex, lookup=Lookup.REGEX)
else:
bases[Host.key] = Host(url.host)
if url.port and url.port != scheme_port:
bases[Port.key] = Port(url.port)
if url._uri_reference.path: # URL.path always returns "/"
lookup = Lookup.EQUAL if exact else Lookup.STARTS_WITH
bases[Path.key] = Path(url.path, lookup=lookup)
if url.query:
lookup = Lookup.EQUAL if exact else Lookup.CONTAINS
bases[Params.key] = Params(url.query, lookup=lookup)
return bases
def merge_patterns(pattern: Pattern, **bases: Pattern) -> Pattern:
if not bases:
return pattern
# Flatten pattern
patterns: List[Pattern] = list(filter(None, iter(pattern)))
if patterns:
if "host" in (_pattern.key for _pattern in patterns):
# Pattern is "absolute", skip merging
bases = {}
else:
# Traverse pattern and set related base
for _pattern in patterns:
base = bases.pop(_pattern.key, None)
# Skip "exact" base + don't overwrite existing base
if _pattern.base or base and base.lookup is Lookup.EQUAL:
continue
_pattern.base = base
if bases:
# Combine left over base patterns with pattern
base_pattern = combine(list(bases.values()))
if pattern and base_pattern:
pattern = base_pattern & pattern
else:
pattern = base_pattern
return pattern
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
from photutils import CircularAperture
from astroscrappy import detect_cosmics
from astropy.convolution import convolve_fft, Tophat2DKernel
from pyfftw.interfaces.scipy_fftpack import fft2, ifft2
__all__ = ['init_centroids']
def init_centroids(first_image_path, master_flat, master_dark, target_centroid,
max_number_stars=10, min_flux=0.2, plots=False):
first_image = (fits.getdata(first_image_path) - master_dark)/master_flat
# Clean cosmic rays
mask, first_image = detect_cosmics(first_image)
tophat_kernel = Tophat2DKernel(23)
convolution = convolve_fft(first_image, tophat_kernel, fftn=fft2, ifftn=ifft2)
convolution -= np.median(convolution)
from astropy.stats import mad_std
mad = mad_std(convolution)
convolution[convolution < -5*mad] = 0.0
from skimage.filters import threshold_otsu, threshold_yen
from skimage.measure import label, regionprops
thresh = threshold_yen(convolution)/3.0
masked = np.ones_like(convolution)
masked[convolution <= thresh] = 0
label_image = label(masked)
# plt.figure()
# plt.imshow(label_image, origin='lower', cmap=plt.cm.viridis)
# plt.show()
regions = regionprops(label_image, convolution)
centroids = [region.weighted_centroid for region in regions]
intensities = [region.mean_intensity for region in regions]
centroids = np.array(centroids)[np.argsort(intensities)[::-1]]
#positions = np.vstack([sources['xcentroid'], sources['ycentroid']])
# positions = np.array(centroids).T
positions = np.vstack([[y for x, y in centroids], [x for x, y in centroids]])
if plots:
apertures = CircularAperture(positions, r=12.)
apertures.plot(color='r', lw=2, alpha=1)
plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
origin='lower')
plt.show()
return positions
# target_index = np.argmin(np.abs(target_centroid - positions), axis=1)[0]
# flux_threshold = sources['flux'] > min_flux * sources['flux'].data[target_index]
#
# fluxes = sources['flux'][flux_threshold]
# positions = positions[:, flux_threshold]
#
# brightest_positions = positions[:, np.argsort(fluxes)[::-1][:max_number_stars]]
# target_index = np.argmin(np.abs(target_centroid - brightest_positions),
# axis=1)[0]
#
# apertures = CircularAperture(positions, r=12.)
# brightest_apertures = CircularAperture(brightest_positions, r=12.)
# apertures.plot(color='b', lw=1, alpha=0.2)
# brightest_apertures.plot(color='r', lw=2, alpha=0.8)
#
# if plots:
# plt.imshow(first_image, vmin=np.percentile(first_image, 0.01),
# vmax=np.percentile(first_image, 99.9), cmap=plt.cm.viridis,
# origin='lower')
# plt.plot(target_centroid[0, 0], target_centroid[1, 0], 's')
#
# plt.show()
#
# # Reorder brightest positions array so that the target comes first
# indices = list(range(brightest_positions.shape[1]))
# indices.pop(target_index)
# indices = [target_index] + indices
# brightest_positions = brightest_positions[:, indices]
#
# return brightest_positions
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import warnings
from qiskit import assemble
from qiskit.providers import BackendV1 as Backend
from qiskit.providers import Options
from qiskit.providers.models import BackendConfiguration
from qiskit.util import deprecate_arguments
import qubic_job
class QUBICDevice(Backend):
def __init__(self, provider):
configuration = {
'backend_name': 'qubic_backend',
'backend_version': '1.0',
'simulator': False,
'local': True,
'coupling_map': None,
'description': 'Qubic device',
'basis_gates': ['p', 'sx', 'cnot'],
'memory': False,
'n_qubits': 16,
'conditional': False,
'max_shots': 200,
'max_experiments': 1,
'open_pulse': False,
'gates': [
{
'name': 'TODO',
'parameters': [],
'qasm_def': 'TODO'
}
]
}
super().__init__(
configuration=BackendConfiguration.from_dict(configuration),
provider=provider)
@classmethod
def _default_options(cls):
return Options(shots=100,ids='DefaultId')
@deprecate_arguments({'qobj': 'circuit'})
def run(self, circuit, **kwargs):
for kwarg in kwargs:
if kwarg != 'shots' or kwarg != 'job_id':
warnings.warn(
"Option %s is not used by this backend" % kwarg,
UserWarning, stacklevel=2)
out_shots = kwargs.get('shots', self.options.shots)
job_id= kwargs.get('job_id', self.options.ids)
qobj = assemble(circuit, shots=out_shots, backend=self)
job = qubic_job.QUBICJob(self, job_id, qobj)
job.submit()
return job
|
# Exercício 5
#Criar dicionário com 5 alunos
#cada aluno precisa ter Nome; data de nascimento; curso; cadeiras
#Escrever função para apagar determinado aluno da base
#Escrever função para inserir alunos na base
#base
aluno1 = {"nome": "Anah", "dt_nasc": "02/02/2002", "curso": "biologia", "cadeiras": ["ab", "cd", "ef"]}
aluno2 = {"nome": "Boby", "dt_nasc": "03/03/2002", "curso": "biologia", "cadeiras": ["ab", "cd", "ef"]}
aluno3 = {"nome": "Carl", "dt_nasc": "04/04/2002", "curso": "biologia", "cadeiras": ["ab", "cd", "ef"]}
aluno4 = {"nome": "Dian", "dt_nasc": "05/05/2002", "curso": "biologia", "cadeiras": ["ab", "cd", "ef"]}
aluno5 = {"nome": "Earl", "dt_nasc": "06/06/2002", "curso": "biologia", "cadeiras": ["ab", "cd", "ef"]}
base = [aluno1, aluno2, aluno3, aluno4, aluno5]
# apagar aluno
def exibir (base):
posicao = 1
for aluno in base:
linha = "ID: "+ str(posicao) + " | "
for key, value in aluno.items():
linha = linha +str(value)
linha = linha +" | "
print (linha)
posicao = posicao + 1
# exibir (base)
def apagar_aluno (base):
posicao = int(input("Digite o ID do aluno a ser excluído da base: "))
base.pop(posicao - 1)
print ("Estudante removido(a)!")
return base
#exibir (base)
#base = apagar_aluno (base)
#exibir (base)
# Adicionar aluno
def adicionar (base):
novo_aluno = {}
novo_aluno ["nome"] = input ("Digite o nome: ")
novo_aluno ["dt_nasc"] = input ("Digite a data de nascimento: ")
novo_aluno ["curso"] = input ("Digite o curso: ")
cadeiras = input("Digite as cadeiras, separadas por vírgula: ")
novo_aluno ["cadeiras"] = cadeiras.split (",")
base.append(novo_aluno)
print ("Aluno(a) " + novo_aluno["nome"] + " adicionado(a).")
return base
#base = adicionar (base)
#exibir (base)
# MENU para manipular a base
sair = False
while sair == False:
print ("#### Exercício 5 - ")
print("### Escolha uma opção ###")
print("|1| Listar")
print("|2| Apagar")
print("|3| Adicionar")
print("|0| Sair")
opcao = int(input("Digite sua opção: "))
if(opcao == 1):
exibir (base)
if (opcao == 2):
base = apagar_aluno(base)
if (opcao == 3):
base = adicionar (base)
if (opcao == 0):
sair = True
print ("Fim")
input ("Pressione <Enter> para continuar...") |
import streamlit as st
import numpy as np
# from handcalcs.handcalcs import handcalc
# st.write('## Create your transition matrix:')
# text_input = st.text_input('Transition Matrix',help="Separate entries by commas, rows by semicolons")
# if text_input is not None:
# A = np.matrix(text_input)
# st.code(A)
# st.write('{} by {} transition matrix:'.format(np.shape(A)[0],np.shape(A)[1]))
# np.shape(A)
# # st.latex(f"{}x^2 + {b}x + {c} = 0")
# st.latex(r"""\bold{A}""")
st.metric(label="Value", value=.87, delta=.01)
st.date_input() |
import promote
import newspaper
from schema import Schema
from newspaper import Article
import nltk
nltk.download('punkt')
USERNAME = "colin"
API_KEY = "your_api_key"
PROMOTE_URL = "http://promote.c.yhat.com/"
p = promote.Promote(USERNAME, API_KEY, PROMOTE_URL)
@promote.validate_json(Schema({'url': str}))
def ArticleSummarizer(data):
# print(weights)
art = Article(url=data['url'], language='en')
art.download()
art.parse()
art.nlp()
result = art.summary
return {"summary": result}
TESTURL = "https://new.surfline.com/surf-news/santa-cruz-surf-character-catches-final-wave/10365"
TESTDATA = {"url": TESTURL}
# test the model locally
print(ArticleSummarizer(TESTDATA))
# name and deploy our model
p.deploy("ArticleSummarizer", ArticleSummarizer, TESTDATA, confirm=True, dry_run=False)
# once our model is deployed and online, we can send data and recieve predictions
# p.predict("ArticleSummarizer", testdata)
# example result:
# {
# 'summary': 'Never sporting a wetsuit, but always a smile, Marty “The Mechanic”
# Schreiber was an unmistakable personality amongst the Santa Cruz
# surf community.\n“I was really blown away,” said longtime Santa Cruz
# native Ken “Skindog” Collins.\nEven in the dead of winter, he’d brave
# the brutal Santa Cruz water without a wetsuit.\nBut Marty Mechanic was one
# of ‘em.\nHe could be seen driving a truck emblazoned by the words, “Marty
# The Mechanic,” which became a fitting moniker.'
# }
|
from django.db import models
from equipment_accounting_NOF.models import BaseModelAbstract, BaseDictionaryModelAbstract
from locations.models import Locations
from technical_equipments.models import TechnicalEquipments
class SwitchPorts(BaseModelAbstract):
port_num = models.PositiveIntegerField(verbose_name='Номер порта')
technical_equipment = models.ForeignKey('technical_equipments.TechnicalEquipments', on_delete=models.PROTECT,
null=True, verbose_name='Устройство')
switch = models.ForeignKey('Switches', on_delete=models.PROTECT, null=True, verbose_name='Свитч')
class Meta:
verbose_name = 'Порт свитча'
verbose_name_plural = 'Порты свитчей'
class SwitchCabinets(BaseDictionaryModelAbstract):
location = models.ForeignKey('locations.Locations', on_delete=models.PROTECT, null=True,
verbose_name='Местоположение')
class Meta:
verbose_name = 'Коммутационный шкаф'
verbose_name_plural = 'Коммутационные шкафы'
class SwitchModels(BaseDictionaryModelAbstract):
ports_num = models.PositiveIntegerField(verbose_name='Количество портов', null=True)
class Meta:
verbose_name = 'Модель свитча'
verbose_name_plural = 'Модели свитчей'
class Switches(BaseModelAbstract):
ip = models.CharField(max_length=15, verbose_name='IP адресс')
switch_cabinet = models.ForeignKey('SwitchCabinets', on_delete=models.PROTECT, null=True,
verbose_name='Коммутационный шкаф')
model = models.ForeignKey('SwitchModels', on_delete=models.PROTECT, null=True, verbose_name='Модель свитча')
class Meta:
verbose_name = 'Свитч'
verbose_name_plural = 'Свитчи'
|
#coding=utf-8
import re
def main():
email = input("请输入邮箱")
print ("您输入的邮箱是:%s" %email)
rec = re.match(r"[a-z0-9A-Z]{1,20}@163\.com$",email) #此处 需要反斜杠进行转义
#print (rec.group())
if rec:
print ("您输入的邮箱是:%s*********符合规则" %email)
else:
print ("您输入的邮箱是:%s,**********不符合规则" %email)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
'''Implement the cipher/decipher sytems using AES in either
CBC or CTR mode.
In CTR mode, the IV (initialization vector) is incremented
one each time.
'''
__author__ = "Jin Zhang(zj@utexas.edu)"
__version__ = "$Version: 1.0 $"
__date__ = "$Date: 2014/07/14 00:12:15"
__copyright__ = "Copyright: (c) 2014 Jin Zhang"
__license__ = "Python"
from Crypto.Cipher import AES
from Crypto.Util import Counter
from textwrap import wrap
import os
def AESencryptCBC(iv,key,message):
'''Encrypting text using AES in CBC mode.'''
idx = 0
encrypt_message = iv
cipher = AES.new(key, AES.MODE_CBC, iv)
while(idx + AES.block_size <= len(message)):
pt = message[idx:idx+AES.block_size]
ct = cipher.encrypt(pt)
encrypt_message += ct
idx += AES.block_size
gap = idx + AES.block_size - len(message)
pt = message[idx:]+chr(gap)*gap
ct = cipher.encrypt(pt)
encrypt_message += ct
return encrypt_message.encode('hex')
def AESdecryptCBC(key,ciphertext):
'''Decrypting text using AES in CBC mode.'''
iv = ciphertext[0:AES.block_size]
idx = AES.block_size
message = ''
cipher= AES.new(key, AES.MODE_CBC, iv)
while (idx + AES.block_size <= len(ciphertext)):
ct = ciphertext[idx:idx+AES.block_size]
pt = cipher.decrypt(ct)
message += pt
idx += AES.block_size
# get the padding number
#padding = ord(message[-1])*-1
return message
def AESencryptCTR(iv,key,message):
'''Encrypting text using AES in CTR mode.'''
encrypt_message = iv
idx = 0
ctr = Counter.new(128,initial_value=int(iv.encode('hex'),16))
cipher = AES.new(key, AES.MODE_CTR, counter=ctr)
while(idx + AES.block_size <= len(message)):
pt = message[idx:idx+AES.block_size]
ct = cipher.encrypt(pt)
encrypt_message += ct
idx += AES.block_size
if (idx != len(message)):
pt = message[idx:]
ct = cipher.encrypt(pt)
encrypt_message += ct
return encrypt_message.encode('hex')
# decrypt using AES in CTR mode
def AESdecryptCTR(key,ciphertext):
'''Decrypting text using AES in CTR mode.'''
iv = ciphertext[0:AES.block_size]
idx = AES.block_size
message = ''
ctr = Counter.new(128,initial_value=int(iv.encode('hex'),16))
cipher= AES.new(key, AES.MODE_CTR, counter=ctr)
while (idx + AES.block_size <= len(ciphertext)):
ct = ciphertext[idx:idx+AES.block_size]
pt = cipher.decrypt(ct)
message += pt
idx += AES.block_size
if (idx != len(ciphertext)):
ct = ciphertext[idx:]
pt = cipher.decrypt(ct)
message += pt
return message
if __name__ == "__main__":
print 'Implement encrypting/decrypting system using AES in CBC/CTR mode.'
input='A block cipher by itself is only suitable for the secure cryptographic transformation\
(encryption or decryption) of one fixed-length group of bits called a block'
key = os.urandom(AES.block_size)
iv = os.urandom(AES.block_size)
print "="*70
print "The testing input is: "
for e in wrap(input):
print e
print "The cipher key is: ",
print key.encode('hex')
print "The IV is: ",
print iv.encode('hex')
print "="*70
print "In CBC mode:"
print "The encrypt text is:"
ct = AESencryptCBC(iv,key, input)
for e in wrap(ct,width=32):
print e
print "The decrypt text is:"
for e in wrap(AESdecryptCBC(key,ct.decode('hex'))):
print e
print "="*70
print "In CTR mode:"
print "The encrypt text is:"
ct = AESencryptCTR(iv,key, input)
for e in wrap(ct,width=32):
print e
print "The decrypt text is:"
for e in wrap(AESdecryptCTR(key,ct.decode('hex'))):
print e
print "="*70
cbc = []
ctr = []
with open ("hw2_text") as f:
for line in f:
data = line[:-1].split()
if data[0] == 'CBC:':
cbc.append(data[1].decode('hex'))
else:
ctr.append(data[1].decode('hex'))
print AESdecryptCBC(cbc[0],cbc[1])
print AESdecryptCBC(cbc[2],cbc[3])
print AESdecryptCTR(ctr[0],ctr[1])
print AESdecryptCTR(ctr[2],ctr[3])
|
# user interface that people will use to interact with library
# import filters library into this script
from PIL import Image
# Rename this file to be "filters.py"
# Add commands to import modules here.
# Define your load_img() function here.
# Parameters: The name of the file to be opened (string)
# Returns: The image object with the opened file.
def load_img(filename):
image = Image.open(filename)
return image
# Define your show_img() function here.
# Parameters: The image object to display.
# Returns: nothing.
def show_img(image):
image.show()
# Define your save_img() function here.
# Parameters: The image object to save, the name to save the file as (string)
# Returns: nothing.
def save_img(image, filename):
image.save(filename, "jpeg")
show_img(image)
# Define your obamicon() function here.
# Parameters: The image object to apply the filter to.
# Returns: A New Image object with the filter applied.
def obamicon(image):
darkBlue = (0,51,76)
red = (217, 26, 33)
lightBlue = (112, 150, 156)
yellow = (252, 227, 166)
image_list = image.getdata()
image_list = list(image_list)
recolored = []
for pixel in image_list:
intensity = pixel[0]+pixel[1]+pixel[2]
if intensity < 102:
recolored.append(darkBlue)
elif intensity < 364:
recolored.append(red)
elif intensity < 546:
recolored.append(lightBlue)
else:
recolored.append(yellow)
new_image = Image.new("RGB",image.size)
new_image.putdata(recolored)
new_image.show()
new_image.save("newImage.jpg","jpeg")
|
from datetime import date
from django.core.exceptions import ValidationError
from django.test import TestCase
from animals.models import Animal, Breed, Pet
class AnimalModelTest(TestCase):
fixtures = ['animals.json',]
def test_animal_string(self):
animal_name = "Dog"
animal = Animal.objects.get(name=animal_name)
self.assertEqual(str(animal), animal_name)
def test_breed_string(self):
breed_name = "German Shepherd"
breed = Breed.objects.get(name=breed_name)
self.assertEqual(str(breed), breed_name)
def test_pet_string(self):
pet_name = "Fido"
pet = Pet.objects.get(name=pet_name)
self.assertEqual(str(pet), pet_name)
def test_breed_name_max_length(self):
char50 = "a" * 50
animal = Animal.objects.first()
Breed.objects.create(name=char50, animal=animal)
breed = Breed.objects.create(name=(char50 + "a"), animal=animal)
self.assertRaises(ValidationError, breed.full_clean)
def test_pet_name_max_length(self):
char30 = "a" * 30
breed = Breed.objects.first()
birthday = date.today()
Pet.objects.create(name=char30, breed=breed, birthday=birthday)
pet = Pet.objects.create(name=(char30 + "a"), breed=breed,
birthday=birthday)
self.assertRaises(ValidationError, pet.full_clean)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Vinícius Madureira"
__copyright__ = "Copyright 2020, Vinícius Madureira"
__license__ = "Creative Commons Zero 1.0 Universal"
__version__ = "0.01a"
__maintainer__ = "Vinícius Madureira"
__email__ = "viniciusmadureira@outlook.com"
__status__ = "Testing"
from product import Product
"""
Item class: Model for Item type objects.
Each item consists of a product (Product) and its amount (int).
"""
class Item:
def __init__(self, product: Product = None, amount: int = 0):
self.__product = product
self.__amount = amount
@property
def product(self):
return self.__product
@product.setter
def product(self, product):
self.__product = product if isinstance(product, Product) and product.is_valid() else None
@property
def amount(self):
return self.__amount
@amount.setter
def amount(self, amount):
self.__amount = None
try:
amount = int(amount)
if (amount > 0):
self.__amount = amount
except:
pass
def isValid(self):
return self.__product != None and self.__amount != None
|
import numpy as np
import pandas as pd
import pickle
from models import static_clf, dynamic_clf
from sklearn.metrics import f1_score,precision_score,recall_score
def load_data(filepath):
data = pd.read_csv(filepath)
data = data.sample(frac=1)
# data.reset_index(drop=True) <-- this will reset shuffle
st_X = [datum.split(' ') for datum in data['static']]
dy_X = [datum.split(' ') for datum in data['dynamic']]
ed_y = [label for label in data['ed']]
op_y = [label for label in data['op']]
seq_lens = np.asarray([seq_len for seq_len in data['seq_lens']])
return st_X,dy_X,ed_y,op_y,seq_lens
def static_test(X,y,test_X):
clf = static_clf(X,y)
static_probs = clf.predict_proba(test_X)
print("\tStatic Probs")
return static_probs
def dynamic_test(X,seq_lens,y,test_X,test_seq_lens,mode):
with open('data/tok2id.dict','rb') as voc:
vocab = pickle.load(voc)
print("\tDynamic Probs")
dynamic_probs = dynamic_clf(X,seq_lens,y,test_X,test_seq_lens,vocab,mode=mode)
return dynamic_probs
def model_probs(stat,dyn):
avg = (stat + dyn) / 2
return np.argmax(avg,axis=1)
def metrics(y_true,y_pred):
print('\tF1 Score:',f1_score(y_true,y_pred))
print('\tPrecision Score:',precision_score(y_true,y_pred))
print('\tRecall Score:',recall_score(y_true,y_pred))
print('\tAccuracy Score:',sum(y_true == y_pred) / y_pred.shape[0])
def main():
st_X, dy_X, ed_y, op_y, seq_lens = load_data('data/train.csv')
test_st_X, test_dy_X, ed_y_true, op_y_true, test_seq_lens = load_data('data/test.csv')
# ED Return
print('ED Return')
stat_probs = static_test(st_X,ed_y,test_st_X)
dyn_probs = dynamic_test(dy_X,seq_lens,ed_y,test_dy_X,test_seq_lens,mode='ed')
y_pred = model_probs(stat_probs,dyn_probs)
print('Static')
metrics(ed_y_true,np.argmax(stat_probs,axis=1))
print('Dynamic')
metrics(ed_y_true,np.argmax(dyn_probs,axis=1))
print('Combined')
metrics(ed_y_true,y_pred)
# Opioid Overdose
print('Opioid Overdose')
stat_probs = static_test(st_X,op_y,test_st_X)
dyn_probs = dynamic_test(dy_X,seq_lens,op_y,test_dy_X,test_seq_lens,mode='op')
y_pred = model_probs(stat_probs,dyn_probs)
print('Static')
metrics(op_y_true,np.argmax(stat_probs,axis=1))
print('Dynamic')
metrics(op_y_true,np.argmax(dyn_probs,axis=1))
print('Combined')
metrics(op_y_true,y_pred)
if __name__ == '__main__':
main()
|
# @author Nayara Souza
# UFCG - Universidade Federal de Campina Grande
# AA - Basico
n = int(input())
e = list(map(int,input().split()))
s = list(map(int,input().split()))
j = 0
i = 0
count = 0
f = set()
while len(f) < n:
if e[i] == s[j]:
f.add(s[j])
j += 1
i += 1
elif e[i] != s[j] and e[i] not in f:
count += 1
f.add(s[j])
j += 1
else:
i += 1
print(count) |
import random
def play_game(away_team, home_team):
def play(away_team, home_team):
def calc_event(player, team_defense):
d_sample = random.sample(team_defense, 3)
d_sum = sum(player.get_defense() for player in d_sample)
i = random.randint(0,d_sum)
if(i < player.get_offense()):
return (1, player.get_full_name())
return (0, player.get_full_name())
away_events = []
home_events = []
for player in away_team.get_all_players():
event = calc_event(player, home_team.get_all_players())
away_events.append(event)
for player in home_team.get_all_players():
event = calc_event(player, away_team.get_all_players())
home_events.append(event)
return (away_events, home_events)
def calc_points(events):
points = 0
for event in events:
points += event[0]
return points
away_score = 0
home_score = 0
away_events = []
home_events = []
for i in range(3):
outcome = play(away_team, home_team)
away_events += outcome[0]
home_events += outcome[1]
away_score += calc_points(away_events)
home_score += calc_points(home_events)
while(away_score == home_score):
outcome = play(away_team, home_team)
away_events += outcome[0]
home_events += outcome[1]
away_score += calc_points(away_events)
home_score += calc_points(home_events)
return GameResult(away_team.get_team_name(), away_score, away_events, home_team.get_team_name(), home_score, home_events)
class GameResult(object):
def __init__(self, away_team_name, away_score, away_events, home_team_name, home_score, home_events):
self.away_team_name = away_team_name
self.away_score = away_score
self.away_events = away_events
self.home_team_name = home_team_name
self.home_score = home_score
self.home_events = home_events
def get_winner(self):
if(self.away_score > self.home_score):
return self.away_team_name
else:
return self.home_team_name
def get_loser(self):
if(self.away_score < self.home_score):
return self.away_team_name
else:
return self.home_team_name
def get_away_team(self):
return self.away_team_name
def get_home_team(self):
return self.home_team_name
def get_away_score(self):
return self.away_score
def get_home_score(self):
return self.home_score
def get_results(self):
return ("{}: {} - {}: {}".format(self.away_team_name, self.away_score, self.home_team_name, self.home_score))
|
from django.contrib import admin
# Register your models here.
from .models import Patient_Directory, Doctor_Directory, Assigned_Patient, Assigned_Doctor, Instance
class Assigned_DoctorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'specialty', 'age', 'display_assigned_patient')
pass
# Register the admin class with the associated model
admin.site.register(Assigned_Doctor, Assigned_DoctorAdmin)
class InstanceInline(admin.TabularInline):
model = Instance
@admin.register(Assigned_Patient)
class Assigned_PatientAdmin(admin.ModelAdmin):
list_display = ('name', 'diagnosis', 'summary', 'age', 'date', 'display_assigned_doctor')
inlines = [InstanceInline]
pass
#admin.site.register(Assigned_PatientAdmin)
# Register the Admin classes for BookInstance using the decorator
@admin.register(Instance)
class InstanceAdmin(admin.ModelAdmin):
list_display = ('patient', 'status', 'doctor', 'id')
list_filter = ('status', 'date')
fieldsets = (
(None, {
'fields': ('patient', 'doctor', 'id',)
}),
('Status', {
'fields': ('status', 'date', 'operator')
}),
)
admin.site.register(Doctor_Directory)
admin.site.register(Patient_Directory)
|
__all__ = ["interface", "models", "database", "db_registry"]
from registry.models import *
from registry.interface import Registry
from registry.database import DbConnection, connect
from registry.db_registry import DbRegistry, ConflictError |
# TAGS map, partition, equivalence classes, pythonic
import collections
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
m = collections.defaultdict(list)
for s in strs:
ss = str(sorted(s))
m[ss].append(s)
return list(m.values())
|
from turtle import Screen
from snake import Snake
from scoreboard import Scoreboard
from food import Food
import time
screen=Screen()
screen.setup(width=600,height=600)
screen.bgcolor("black")
screen.title("Snake Game")
screen.tracer(0)
game_is_on=True
score=0
snake=Snake()
scoreboard=Scoreboard(score)
food=Food(score)
""" actions """
screen.listen()
screen.onkey(snake.up,"w")
screen.onkey(snake.down,"s")
screen.onkey(snake.left,"a")
screen.onkey(snake.right,"d")
while game_is_on:
screen.update()
snake.move()
time.sleep(0.1)
if snake.head.distance(food)<15:
""" if food is less than 15px distance from snake head,snake eats food """
food.refresh()
scoreboard.increase_score()
snake.extend()
score+=1
if snake.head.xcor() >290 or snake.head.xcor() < -290 or snake.head.ycor() > 290 or snake.head.ycor() < -290:
""" if snake touch outer wall,game is lost """
scoreboard.game_over()
game_is_on=False
for segment in snake.segments[1:]:
""" if snake head make contacts with its body,game is lost """
if snake.head.distance(segment)<10:
game_is_on=False
scoreboard.game_over()
screen.exitonclick()
|
import numpy as np
import cPickle
import matplotlib.pyplot as plt
# Open data
f = open('EthEur.p','rb')
A = cPickle.load(f)
dataShape = A.shape
f.close()
(nRows, nCols)= dataShape
print A[nRows-1,:]
nb_data=nRows
init_ind=100
initial_time = A[init_ind,2]#1438945790
final_time = A[nRows-1,2]
price=A[:,0]
volume=A[:,1]
time=A[:,2]
#vwap over time interval t
delta_t = 60
# init_ind = 999 * (nb_data / 1000)
# initial_time, final_time = time[init_ind], time[nb_data-1]
time_interval = final_time - initial_time
nb_bins = int(time_interval // delta_t +1)
vwap = np.zeros(nb_bins)
weights = np.zeros(nb_bins)
for j in range(init_ind,nb_data):
ind = int((time[j]-initial_time)//delta_t)
vwap[ind] += volume[j] * price[j]
weights[ind] += volume[j]
for k in range(len(vwap)):
if weights[k] > 0.0001:
vwap[k] = vwap[k] / weights[k]
else:
vwap[k] = vwap[k-1]
# plt.plot(range(nb_bins), vwap)
# plt.show()
plt.plot(volume[10000:11000])
plt.show()
plt.plot(vwap[10000:11000])
plt.show()
#log returns
y = np.log(vwap[1:]) - np.log(vwap[:nb_bins-1])
plt.plot(y[10000:11000])
plt.show()
#log returns squared
y2 = np.square(y)
|
#!/usr/bin/env python
# addressBook.py: a very simple email address book program
# By: Fadel Berakdar
# Date: 26 Jan 2016
from optparse import OptionParser
import re
import shelve
import configparser
config = configparser.RawConfigParser()
config.read("/Users/Phoenix/Dropbox/Projects/Code/Python3/12_OptParse/AddressBook/addressBook.cfg")
shelf_location = config.get('database', 'file')
def add_email(email):
"""
add a valid email to the database if its not existed.
:param email: a valid email string
:return: message as tuple(boolean, string)
"""
validate_email(email) # return error if the email is not valid email
shelf = shelve.open(shelf_location) # persistent, dictionary-like object
if "emails" not in shelf: # check if emails list in the database shelf
shelf['emails'] = []
emails = shelf['emails'] # assign the emails list in shelf to a variable
if email in emails: # check if the email already been added
message = False, 'Email "%s" already in address book' % email
else:
emails.append(email)
message = True, 'Email "%s" added to address book' % email
shelf['emails'] = emails # since writeback=False
shelf.close()
return message
def delete_email(email):
"""
delete a valid emails if its existed,
:param email: a valid email string
:return: message as tuple(boolean, string)
"""
validate_email(email)
shelf = shelve.open(shelf_location)
if "emails" not in shelf:
shelf['emails'] = []
emails = shelf['emails']
try:
emails.remove(email)
message = True, 'Email "%s" removed from address book' % email
except ValueError:
message = False, 'Email "%s" was not in address book' % email
shelf['emails'] = emails
shelf.close()
return message
def display_emails():
"""
A function to display the content of the emails address book
:return: a tuple (boolean, text)
"""
shelf = shelve.open(shelf_location)
emails = shelf['emails']
shelf.close()
text = "********* Emails List *********\n"
if not emails:
text += "Empty Database"
return False, text
else:
for email in emails:
text += email + '\n'
return True, text
class InvalidEmail(Exception):
pass
def validate_email(email):
"""
A function raises an error when invalid email passed
:param email: email string
:return: tuple (boolean, text)
"""
if not re.search(r"\w+[.|\w]\w+@\w+[.]\w+[.|\w+]\w+", email):
raise InvalidEmail("Invalid email: " + email)
return True
def main():
parser = OptionParser()
# Options:
parser.add_option("-a", # short option string
"--action", # long option string
action="store", # what to do with the option's argument
dest="action", # where to store the option's argument
type="string", # the type of accepted option's argument
help="requires -e option. Actions: add/delete" # help
)
parser.add_option("-e",
"--email",
action="store",
dest="email",
help="email used in the -a option"
)
parser.add_option('-d',
"--display",
action="store_true", # the display attribute of options
dest="display",
help="show all emails"
)
(options, args) = parser.parse_args()
# validation
if options.action and not options.email:
parser.error("option -a requires option -e")
elif options.email and not options.action:
parser.error("option -e requires option -a")
# routes requests"
if options.action == 'add':
try: # check before adding new email
add_email(options.email)
except InvalidEmail: # dealing with expected exception
parser.error("option -e requires a valid email address")
elif options.action == 'delete':
delete_email(options.email)
if options.display:
print(display_emails()[1])
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
from torch.nn.init import normal, constant
import math
class MindLSTMSep(nn.Module):
def __init__(self):
super(MindLSTMSep, self).__init__()
self.feature_dim = 20
self.seq_len = 5
self.base_model = nn.Linear(self.feature_dim, self.feature_dim)
self.lstm_m1 = nn.LSTM(self.feature_dim, self.feature_dim,bidirectional=False,num_layers=1,batch_first=True)
self.lstm_m2 = nn.LSTM(self.feature_dim, self.feature_dim, bidirectional=False, num_layers=1, batch_first=True)
self.lstm_m12 = nn.LSTM(self.feature_dim, self.feature_dim, bidirectional=False, num_layers=1, batch_first=True)
self.lstm_m21 = nn.LSTM(self.feature_dim, self.feature_dim, bidirectional=False, num_layers=1, batch_first=True)
self.lstm_mc = nn.LSTM(self.feature_dim, self.feature_dim, bidirectional=False, num_layers=1, batch_first=True)
# The linear layer that maps the LSTM with the 3 outputs
self.fc1_m1 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m2 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m12 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m21 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_mc = nn.Linear(2 * self.feature_dim, 20)
self.fc2_m1 = nn.Linear(20, 4)
self.fc2_m2 = nn.Linear(20, 4)
self.fc2_m12 = nn.Linear(20, 4)
self.fc2_m21 = nn.Linear(20, 4)
self.fc2_mc = nn.Linear(20, 4)
self.relu = torch.nn.ReLU()
self.dropout = nn.Dropout()
def forward(self, input):
base_out = self.base_model(input.view((-1, input.size()[-1])))
base_out = self.relu(base_out)
base_out = base_out.view(input.size(0), self.seq_len, self.feature_dim)
lstm_out_m1, _ = self.lstm_m1(base_out)
lstm_out_m1 = self.dropout(lstm_out_m1)
lstm_out_m1 = lstm_out_m1[:,4,:]
lstm_out_m1 = torch.cat([lstm_out_m1, input[:, 4, :]], dim=1)
lstm_out_m2, _ = self.lstm_m2(base_out)
lstm_out_m2 = self.dropout(lstm_out_m2)
lstm_out_m2 = lstm_out_m2[:, 4, :]
lstm_out_m2 = torch.cat([lstm_out_m2, input[:, 4, :]], dim=1)
lstm_out_m12, _ = self.lstm_m12(base_out)
lstm_out_m12 = self.dropout(lstm_out_m12)
lstm_out_m12 = lstm_out_m12[:, 4, :]
lstm_out_m12 = torch.cat([lstm_out_m12, input[:, 4, :]], dim=1)
lstm_out_m21, _ = self.lstm_m21(base_out)
lstm_out_m21 = self.dropout(lstm_out_m21)
lstm_out_m21 = lstm_out_m21[:, 4, :]
lstm_out_m21 = torch.cat([lstm_out_m21, input[:, 4, :]], dim=1)
lstm_out_mc, _ = self.lstm_mc(base_out)
lstm_out_mc = self.dropout(lstm_out_mc)
lstm_out_mc = lstm_out_mc[:, 4, :]
lstm_out_mc = torch.cat([lstm_out_mc, input[:, 4, :]], dim=1)
m1_out = self.fc1_m1(lstm_out_m1)
# m1_out = self.dropout(m1_out)
m1_out = self.relu(m1_out)
m1_out = self.fc2_m1(m1_out)
m2_out = self.fc1_m2(lstm_out_m2)
# m2_out = self.dropout(m2_out)
m2_out = self.relu(m2_out)
m2_out = self.fc2_m2(m2_out)
m12_out = self.fc1_m12(lstm_out_m12)
# m12_out = self.dropout(m12_out)
m12_out = self.relu(m12_out)
m12_out = self.fc2_m12(m12_out)
m21_out = self.fc1_m21(lstm_out_m21)
# m21_out = self.dropout(m21_out)
m21_out = self.relu(m21_out)
m21_out = self.fc2_m21(m21_out)
mc_out = self.fc1_mc(lstm_out_mc)
# mc_out = self.dropout(mc_out)
mc_out = self.relu(mc_out)
mc_out = self.fc2_mc(mc_out)
return m1_out, m2_out, m12_out, m21_out, mc_out
class MindLSTM(nn.Module):
def __init__(self):
super(MindLSTM, self).__init__()
self.feature_dim = 20
self.seq_len = 5
self.base_model = nn.Linear(self.feature_dim, self.feature_dim)
self.lstm = nn.LSTM(self.feature_dim, self.feature_dim,bidirectional=False,num_layers=1,batch_first=True)
# The linear layer that maps the LSTM with the 3 outputs
self.fc1_m1 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m2 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m12 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m21 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_mc = nn.Linear(2 * self.feature_dim, 20)
self.fc2_m1 = nn.Linear(20, 4)
self.fc2_m2 = nn.Linear(20, 4)
self.fc2_m12 = nn.Linear(20, 4)
self.fc2_m21 = nn.Linear(20, 4)
self.fc2_mc = nn.Linear(20, 4)
self.relu = torch.nn.ReLU()
self.dropout = nn.Dropout()
def forward(self, input):
base_out = self.base_model(input.view((-1, input.size()[-1])))
base_out = self.relu(base_out)
base_out = base_out.view(input.size(0), self.seq_len, self.feature_dim)
lstm_out, _ = self.lstm(base_out)
# lstm_out = self.dropout(lstm_out)
lstm_out = lstm_out[:,4,:]
lstm_out = torch.cat([lstm_out, input[:, 4, :]], dim=1)
m1_out = self.fc1_m1(lstm_out)
m1_out = self.dropout(m1_out)
m1_out = self.relu(m1_out)
m1_out = self.fc2_m1(m1_out)
m2_out = self.fc1_m2(lstm_out)
m2_out = self.dropout(m2_out)
m2_out = self.relu(m2_out)
m2_out = self.fc2_m2(m2_out)
m12_out = self.fc1_m12(lstm_out)
m12_out = self.dropout(m12_out)
m12_out = self.relu(m12_out)
m12_out = self.fc2_m12(m12_out)
m21_out = self.fc1_m21(lstm_out)
m21_out = self.dropout(m21_out)
m21_out = self.relu(m21_out)
m21_out = self.fc2_m21(m21_out)
mc_out = self.fc1_mc(lstm_out)
mc_out = self.dropout(mc_out)
mc_out = self.relu(mc_out)
mc_out = self.fc2_mc(mc_out)
return m1_out, m2_out, m12_out, m21_out, mc_out
class MindGRU(nn.Module):
def __init__(self):
super(MindGRU, self).__init__()
self.feature_dim = 20
self.seq_len = 5
self.base_model = nn.Linear(self.feature_dim, self.feature_dim)
self.gru = nn.GRU(self.feature_dim, self.feature_dim,bidirectional=False,num_layers=1,batch_first=True)
# The linear layer that maps the LSTM with the 3 outputs
self.fc1_m1 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m2 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m12 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_m21 = nn.Linear(2 * self.feature_dim, 20)
self.fc1_mc = nn.Linear(2 * self.feature_dim, 20)
self.fc2_m1 = nn.Linear(20, 4)
self.fc2_m2 = nn.Linear(20, 4)
self.fc2_m12 = nn.Linear(20, 4)
self.fc2_m21 = nn.Linear(20, 4)
self.fc2_mc = nn.Linear(20, 4)
self.relu = torch.nn.ReLU()
self.dropout = nn.Dropout()
def forward(self, input):
base_out = self.base_model(input.view((-1, input.size()[-1])))
base_out = self.relu(base_out)
base_out = base_out.view(input.size(0), self.seq_len, self.feature_dim)
gru_out, _ = self.gru(base_out)
gru_out = gru_out[:,4,:]
gru_out = torch.cat([gru_out, input[:, 4, :]], dim=1)
m1_out = self.fc1_m1(gru_out)
m1_out = self.dropout(m1_out)
m1_out = self.relu(m1_out)
m1_out = self.fc2_m1(m1_out)
m2_out = self.fc1_m2(gru_out)
m2_out = self.dropout(m2_out)
m2_out = self.relu(m2_out)
m2_out = self.fc2_m2(m2_out)
m12_out = self.fc1_m12(gru_out)
m12_out = self.dropout(m12_out)
m12_out = self.relu(m12_out)
m12_out = self.fc2_m12(m12_out)
m21_out = self.fc1_m21(gru_out)
m21_out = self.dropout(m21_out)
m21_out = self.relu(m21_out)
m21_out = self.fc2_m21(m21_out)
mc_out = self.fc1_mc(gru_out)
mc_out = self.dropout(mc_out)
mc_out = self.relu(mc_out)
mc_out = self.fc2_mc(mc_out)
return m1_out, m2_out, m12_out, m21_out, mc_out
class MLP(torch.nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = torch.nn.Linear(22, 20)
self.fc_mc = torch.nn.Linear(20, 4)
self.fc_m1 = torch.nn.Linear(20, 4)
self.fc_m2 = torch.nn.Linear(20, 4)
self.fc_m12 = torch.nn.Linear(20, 4)
self.fc_m21 = torch.nn.Linear(20, 4)
self.relu = torch.nn.ReLU()
self.dropout = nn.Dropout()
def forward(self, x):
embedding = self.fc1(x)
out = self.relu(embedding)
out = self.dropout(out)
out_mc = self.fc_mc(out)
out_m1 = self.fc_m1(out)
out_m2 = self.fc_m2(out)
out_m12 = self.fc_m12(out)
out_m21 = self.fc_m21(out)
return out_mc, out_m12, out_m21,out_m1, out_m2 |
#!/usr/bin/env python3
import os
import json
import base64
import lzma
import black
import dis
from flask import (
Flask,
render_template,
request,
send_from_directory,
render_template_string,
redirect,
url_for,
)
app = Flask(__name__)
def compress_state(data):
compressed = lzma.compress(json.dumps(data).encode("utf-8"))
return base64.urlsafe_b64encode(compressed).decode("utf-8")
def decompress_state(state):
compressed = base64.urlsafe_b64decode(state)
return json.loads(lzma.decompress(compressed))
def format_code(source, line_length, skip_string_normalization, py36, pyi):
try:
mode = black.FileMode.from_configuration(
py36=py36,
pyi=pyi,
skip_string_normalization=skip_string_normalization)
formatted = black.format_str(
source, line_length=line_length, mode=mode)
except Exception as exc:
formatted = exc
return formatted
@app.route("/favicon.ico")
def favicon():
return send_from_directory(
os.path.join(app.root_path, "static"), "favicon.ico")
@app.route("/", methods=["POST", "GET"])
def index():
if request.method == "POST":
source = request.form["source"]
line_length = int(request.form["line_length"])
skip_string_normalization = bool(
request.form.get("skip_string_normalization"))
py36 = bool(request.form.get("py36"))
pyi = bool(request.form.get("pyi"))
state = compress_state({
"sc": source,
"ll": line_length,
"ssn": skip_string_normalization,
"py36": py36,
"pyi": pyi,
})
return redirect(url_for(".index", state=state))
state = request.args.get("state")
if not state:
source = render_template("source.py")
line_length = 60
skip_string_normalization = False
py36 = False
pyi = False
state = compress_state({
"sc": source,
"ll": line_length,
"ssn": skip_string_normalization,
"py36": py36,
"pyi": pyi,
})
return redirect(url_for(".index", state=state))
state = decompress_state(state)
source = state.get("sc")
line_length = state.get("ll")
skip_string_normalization = state.get("ssn")
py36 = state.get("py36")
pyi = state.get("pyi")
try:
bytecode = dis.code_info(source) + "\n\n\n" + dis.Bytecode(
source).dis()
except SyntaxError as e:
bytecode = str(e)
data = {
"source_code": source,
"bytecode": bytecode,
"options": {
"line_length": line_length,
"skip_string_normalization": skip_string_normalization,
"py36": py36,
"pyi": pyi,
},
"black_version": black.__version__,
}
return render_template("index.html", **data)
if __name__ == "__main__":
app.run(debug=True)
|
# recebe chaves do twitter e o id do usuario e limite de followers para coleta
import conf
import helpers.manipulador_de_listas as mani
import logging
import os
import socket
import sys
import tweepy
consumer_key = sys.argv[1]
consumer_secret = sys.argv[2]
acess_token = sys.argv[3]
access_token_secret = sys.argv[4]
id_user = sys.argv[5]
limit_followers = int(sys.argv[6])
hostname = socket.gethostname()
# inicia configuracoes de logging
if not os.path.exists(conf.dir_logs):
os.makedirs(conf.dir_logs)
logging.basicConfig(filename="{}/collect_users_followers.{}.log".format(conf.dir_logs, hostname),
filemode="a", level=logging.INFO, format="[ %(asctime)s ] [%(levelname)s] %(message)s")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(acess_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True, compression=True)
logging.info("Use api key - {}".format(api.auth.consumer_key))
def collect_users_followers(user_id):
global api
dir_followers = "{}/user_followers".format(conf.dir_dados)
if not os.path.exists(dir_followers):
os.makedirs(dir_followers)
output_filename = "{}/{}.csv".format(dir_followers, user_id)
# Skip user if it was already collected
if os.path.exists(output_filename):
logging.info("User {} - Skipped".format(user_id))
return
# Skip user if it was already in nogeotagged list
if mani.in_lista(conf.lista_nogeotagged, user_id):
logging.info("User {} - Skipped - in nogeotagged".format(user_id))
return
# Skip user if it was already in restrict list
if mani.in_lista(conf.lista_restrito, user_id):
logging.info("User {} - Skipped - in restrict list".format(user_id))
return
# Skip user if it was already collected but crashed
if mani.in_lista(conf.lista_followers_erro, user_id):
logging.info("User {} - Skipped - in erro list".format(user_id))
return
# Collect all friends of the user
logging.info("User {} - Starting collecting followers".format(user_id))
# Add na lista de erros e quando a coleta finalizar retira
mani.add_lista_lock(conf.lista_followers_erro, user_id)
user_followers = []
coletou = False
while not coletou:
try:
# tenta recuperar a pagina, se nao conseguir 2 coisas podem acontecer
# 1 - excedeu o limite de paginas
# 2 - excedeu o limite de requisicoes a cada 15 min
c = tweepy.Cursor(api.followers_ids, id=user_id)
for page in c.pages():
logging.info("User {} - Coletando {} followers".format(user_id, len(page)))
user_followers.extend(page)
# caso exceda o limite de seguidores definidos para a coleta pare de coletar
if len(user_followers) >= limit_followers:
break
if(len(user_followers) == 0):
logging.warning("User {} - Nao tem followers".format(user_id))
coletou = True
mani.remove_lista(conf.lista_followers_erro, user_id)
except tweepy.TweepError as e:
if e.response is not None:
if e.response.status_code is not None:
# Se excedeu o numero de requisicoes
if e.response.status_code == 429:
user_followers = []
logging.warning("User {} - Error Status: {} - Reason: {} - Error: {}".format(
user_id, e.response.status_code, e.response.reason, e.response.text))
logging.warning("User {} - Coletando novamente".format(user_id))
else:
# Se perfil restrito
if e.response.status_code == 401:
mani.add_lista(conf.lista_restrito, user_id)
# Se o erro for outro, registra e sai do loop
logging.warning("User {} - Error Status: {} - Reason: {} - Error: {}".format(
user_id, e.response.status_code, e.response.reason, e.response.text))
mani.remove_lista(conf.lista_followers_erro, user_id)
return
except Exception as e:
# Se o erro for outro, registra e sai do loop
logging.error("User {} - Erro Desconhecido: {}".format(user_id, e.message))
return
# GRAVA FOLLOWERS
try:
# add followers in the file
i = 0
for user_follower_id in user_followers:
mani.add_lista(output_filename, user_follower_id)
i += 1
if i >= limit_followers:
break
if(len(user_followers) == 0):
mani.add_lista(output_filename, "")
except Exception:
logging.error("User {} - Erro ao escrever no arquivo do followers do usuario".format(user_id))
logging.info("User {} - Finish add followers in file".format(user_id))
del user_followers
collect_users_followers(id_user)
|
"""Top-level package for softshark."""
__author__ = """Soft Shell"""
__email__ = 'kumarcalif@gmail.com'
__version__ = '0.1.0'
|
#PIP
ver='1.0.0'
#github.com/smcclennon/Toolbox
import os
os.system('title PIP')
while True:
print('Please enter a package name (e.g. "requests" or "psutil")')
package=input(str('> '))
print('\n>>> pip install '+package+' --user')
os.system('pip install '+package+' --user')
print('\n\n\n')
|
import nsq
import tornado.ioloop
import time
def pub_message():
writer.pub('my_topic', time.strftime('%H:%M:%S'), finish_pub)
def finish_pub(conn, data):
print data
writer = nsq.Writer(['127.0.0.1:4150'])
tornado.ioloop.PeriodicCallback(pub_message, 1000).start()
nsq.run()
|
from flask_migrate import MigrateCommand
from flask_script import Manager
from APP import appname
app = appname()
manager = Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == '__main__':
manager.run()
|
import unittest
from SwiftFormat.Syntax import *
class IdentifierTest(unittest.TestCase):
def testImplicitParameter(self):
parser = identifier()
assert parser.parse(u"$0")
assert parser.parse(u"$0")[0].meta.type == SwiftTypes.IMPLICIT_PARAMETER
assert parser.parse(u"$1")
assert parser.parse(u"$123123")
assert parser.parse(u"$") is None
def testEscapedIdentifier(self):
parser = identifier()
assert parser.parse(u"`valid`")
assert parser.parse(u"`valid`")[0].meta.type == SwiftTypes.IDENTIFIER
assert parser.parse(u"`valid") is None
assert parser.parse(u"valid`")
assert parser.parse(u"`0valid`") is None
def testIdentifier(self):
parser = identifier()
assert parser.parse(u"valid")
assert parser.parse(u"valid")[0].meta.type == SwiftTypes.IDENTIFIER
assert parser.parse(u"0valid") is None
|
#!/usr/bin/python
import math
def conv(n,k):
c = 1
result = 0
sn = str(n)[::-1]
for i in range(len(sn)):
result += c * int(sn[i])
c *= k
return result
def prime(num):
res = 0
pr = True
for i in range(2, int(math.sqrt(1 + num)) + 1):
if num % i == 0 and i != num :
res = i
pr = False
break
return (pr, res)
N=16
J=50
result = []
t = 0
for k in range(32769, 65536):
if k % 2 == 0:
continue
x = bin(k)[2:]
dic = {}
f = True
for j in range(2, 11):
dic[j] = prime(conv(x, j))
if dic[j][0] == True:
f = False
break
if f:
result = [x]
for j in range(2, 11):
result.append(dic[j][1])
t += 1
print "%s %s %s %s %s %s %s %s %s %s " % tuple(result)
if t == 51:
break
print "Case #1:"
|
from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.urls import reverse
from .models import Employee
from .utils import createEmployee
class UserTest(TestCase):
def setUp(self):
my_test_user = User.objects.create_user('temporary',
'temporary@gmail.com',
'temporary')
def test_login_page(self):
response = self.client.get('/login')
self.assertEqual(response.status_code, 200)
def test_login_page_post(self):
response = self.client.post(reverse('login'), {
'username': 'temporary',
'password': 'temporary'
},
follow=True)
print('----------------------')
print(response)
print(dir(response))
print('----------------------')
self.assertEqual(True, True)
def test_users_list_page(self):
self.client.login(username='temporary', password='temporary')
response = self.client.get(reverse('user:user_list'), follow=True)
self.assertEqual(response.status_code, 200)
|
import scanpy as sc
from utils import plot
c_map = 'Purples'
base_path = '/Users/zhongyuanke/data/'
fig_size = (20, 3)
title = ['CD34', 'CD14', 'NKG7', 'CD4', 'CD19']
bar = ['silver', 'r']
file_orig = base_path+'merge_result/merge5.h5ad'
file_scxx = base_path+'result/merge5_scxx_z2.h5ad'
file_mse = base_path+'result/merge5_mse_2.h5ad'
file_dca = base_path+'result/merge5_dca_2.h5ad'
file_scan = base_path+'result/merge5_scanorama.h5ad'
batch_path = base_path+'merge_result/merge5.csv'
fig_path = base_path+'result/merge5_gene_plot_mse.png'
orig_umap = base_path+'result/merge5_umap.h5ad'
adata = sc.read_h5ad(file_orig)
genes = adata.var
adata_orig = sc.read_h5ad(orig_umap)
adata_dca = sc.read_h5ad(file_dca)
adata_mse = sc.read_h5ad(file_mse)
adata_scxx = sc.read_h5ad(file_scxx)
adata_scan = sc.read_h5ad(file_scan)
x_orig = adata_orig.obsm['umap']
x_mse = adata_mse.obsm['mid']
x_dca = adata_dca.obsm['mid']
x_scvi = adata_scxx.obsm['mid']
x_scan = adata_scan.obsm['umap']
gene_names = ['CD34', 'CD14', 'NKG7', 'CD4', 'CD19']
thresholds = [0.05, 0.05, 0.05, 0.01, 0.01]
plot.plot_mark_gene(x_mse, adata, gene_names, thresholds, 'mse', fig_size, 15, fig_path)
|
# frame => a rectangular container to group and hold widgets
from tkinter import *
window = Tk()
window.geometry("200x200")
frame = Frame(window, bg="pink", bd=5,relief=SUNKEN)
frame.place(x=30, y=50)
Button(frame, text="W", font=("Consolas", 15), width=3).pack(side=TOP)
Button(frame, text="A", font=("Consolas", 15), width=3).pack(side=LEFT)
Button(frame, text="S", font=("Consolas", 15), width=3).pack(side=LEFT)
Button(frame, text="D", font=("Consolas", 15), width=3).pack(side=LEFT)
window.mainloop() |
import constants
from plate import Plate
class SubmissionCounter():
def __init__(self, world, x, y):
self.__world = world
self.x = x
self.y = y
def put_on_chef_held_item(self, chef):
if isinstance(chef.held_item, Plate):
plate = chef.held_item
chef.held_item = None
if not plate.is_dirty:
self.__world.submit_plate(plate)
plate.x = -1
plate.y = -1
plate.is_dirty = True
plate.time_until_respawn = constants.PLATE_RESPAWN_TIME
for ingredient in plate.contents:
self.__world.ingredients.remove(ingredient)
plate.contents.clear()
def print_static(self):
print('S', end="")
def print(self):
self.print_static() |
#!/usr/bin/python
import numpy as np
import pandas as pd
from dataReductionAndSampling import DialogueActSample
from gensim.models import KeyedVectors
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from sklearn.decomposition import FastICA
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
class DaClassifier():
def __init__(self):
# in the terminal load the glove txt file and save it
# we = KeyedVectors.load_word2vec_format('/home/maitreyee/Development/glove/glove.6B.300d.txt')
# we.save_word2vec_format('/home/maitreyee/Development/glove/glove.6B.300d.bin', binary=True)
self.wrdembedding = KeyedVectors.load_word2vec_format(
'/home/maitreyee/Development/glove/glove.6B.300d.bin',
binary=True)
self.classes_int_encoder = LabelEncoder()
self.classes_encoder = OneHotEncoder()
self.utt_we, self.acts_de = [], []
self.classifier = Sequential(
[Dense(units=300, activation='relu'), # input_dim=100-n_removed_cmps,
Dropout(.5),
Dense(units=200, activation='relu'),
Dropout(.5),
Dense(units=15, activation='sigmoid')])
self.transformer = FastICA()
# encodes the classes in the dataset, encoding here:we encode the 20 classes into n-1 or 19 classes.
# Becuase the classes start at 0. (-1 says that first dimnesion is not
# known and the second states that the array should be one dimnesion.)
# generate the train and test samples for x i.e utterances and y i.e classes
# first we over and under-sample the features such that we can reduce the bias in the training data
# for graphical presentation of data that extract the principle values from the numerical dataset.
def classesEncod(self, sampled_data):
resampled_annotate = sampled_data
acts = [str(a).lower().strip() for a in resampled_annotate.commfunct.values]
classes_int = self.classes_int_encoder.fit_transform(acts).reshape(-1, 1)
classes = self.classes_encoder.fit_transform(classes_int).toarray()
return classes
def average_words(self, tokens, noise=0.):
tokens = [t.strip(')( ., ?') for t in tokens]
if noise <= 0.:
we = [self.wrdembedding[w].reshape(1, -1) for w in tokens if
w in self.wrdembedding]
else:
we = []
for t in tokens:
if np.random.uniform(size=1) <= noise:
idx = np.random.choice(self.wrdembedding.vectors.shape[0])
we.append(self.wrdembedding.vectors[idx, :])
else:
if t in self.wrdembedding:
we.append(self.wrdembedding[t])
if len(we) > 0:
mean_we = np.mean(we, axis=0, keepdims=True)
return mean_we
else:
return None
def pca_transform(self, data, n_removed_cmps):
pc_y = self.transformer.transform(data)
pc_y[:, :n_removed_cmps] = 0
x = self.transformer.inverse_transform(pc_y)
return x
def traintestgenerate(self, df_cleaned):
classesx = self.classesEncod(df_cleaned)
utt = [str(a).lower().strip() for a in df_cleaned.utterance.values]
for i in range(len(utt)):
u = utt[i]
a = classesx[i]
tokens = str(u).strip().lower().split()
mean_we = self.average_words(tokens, noise=.15)
if mean_we is None:
continue
if mean_we.shape == (1, 1):
continue
self.utt_we = self.utt_we + [mean_we]
self.acts_de = self.acts_de + [a]
self.acts_de = np.vstack(self.acts_de)
self.utt_we = np.vstack(self.utt_we)
size_utt = self.utt_we.shape[1]
dataset = np.hstack([self.utt_we, self.acts_de])
np.random.shuffle(dataset)
self.utt_we, self.acts_de = dataset[:, :size_utt], dataset[:, size_utt:]
perc_train = .6
defaulr_rm_cmps = 1
self.transformer.fit(self.utt_we)
utt_we_x = self.pca_transform(self.utt_we, defaulr_rm_cmps)
idx_train = int(utt_we_x.shape[0] * perc_train)
x_train, x_test = utt_we_x[:idx_train, :], utt_we_x[idx_train:, :]
y_train, y_test = self.acts_de[:idx_train, :], self.acts_de[idx_train:, :]
yield x_train, x_test, y_train, y_test
# perform the classification and print the log, some metrics to perform the classification is provided here
# the classification neural layer is defined in the __init__ function.
def classifier_n(self, frmtraintest):
data_toClassify = frmtraintest
# We compile it
self.classifier.compile(optimizer=Adam(2e-5),
loss='categorical_crossentropy',
metrics=['accuracy'])
for xytrain in data_toClassify:
x_train, x_test, y_train, y_test = xytrain
# We fit the network
h = self.classifier.fit(x=x_train,
y=y_train,
validation_data=[x_test, y_test],
epochs=100,
batch_size=10,
verbose=2,
callbacks=[EarlyStopping(patience=5)])
# its for logging, and showing results
# analyse the results of the neural network
def confmatrx(self, xandytest):
for traintest in xandytest:
x_train, x_test, y_train, y_test = traintest
y_true = y_test
y_pred = self.classifier.predict(x_test)
idx_max_true = np.argmax(y_true, axis=1)
idx_max_pred = np.argmax(y_pred, axis=1)
pd.options.display.max_columns = 500
conf_mat = confusion_matrix(idx_max_true, idx_max_pred)
np.core.arrayprint._line_width = 160
print(conf_mat, )
def test_on_brkdown(self):
test_file = []
utt_test = []
pd_test = pd.read_csv('./brkdown_corpora1.csv', sep='\t')
sents = list(pd_test.utterance.str.strip().str.replace(r'[^\w\s]', '',
regex=True).dropna())
for i in range(len(sents)):
u = sents[i]
tokens = str(u).lower().split()
we = [self.wrdembedding[w] for w in tokens if w in self.wrdembedding]
mean_we = np.mean(we, axis=0, keepdims=True).reshape(1, -1)
if mean_we.shape == (1, 1):
continue
utt_test = utt_test + [mean_we]
utt_test = np.concatenate(utt_test, axis=0)
utt_test = np.vstack(utt_test)
res = self.classifier.predict(utt_test)
pred_class = [list(zip(*[(a, r[a]) for a in range(len(r)) if r[a] > .20]))
for r in res]
res = [list(zip(self.classes_int_encoder.
inverse_transform(np.array(p[0], dtype='int')), p[1])) for p
in pred_class
if len(p) > 0]
p_res = list(zip(sents, res))
df_das = pd.DataFrame(p_res)
df_das.to_csv('./da_classified_brkdown_corpora1.csv', sep='\t')
print(df_das.head())
if __name__ == '__main__':
sampled_data = DialogueActSample()
df_cleaned1 = pd.read_csv('./cleaned.csv', sep='\t')
sampling = sampled_data.samplingFeatures(df_cleaned1)
classifier = DaClassifier()
traintest = classifier.traintestgenerate(sampling)
logging_classifier = classifier.classifier_n(traintest)
# brkdown = classifier.test_on_brkdown()
|
def prj_import(com_import, dst_user_dir, file_name):
com_import.g_target = dst_user_dir
com_import.g_file = file_name
com_import.g_target = dst_user_dir
assert com_import.g_target is dst_user_dir
com_import.Execute()
def prj_dgs_import(com_import, dst_user_dir, file_name, name, template=None):
com_import.targpath = dst_user_dir
com_import.targname = name
com_import.fFile = file_name
if template is not None:
com_import.prjTemplate = template
com_import.Execute()
def choose_imp_dir(user, IMPFOLD):
imp_dir_list = user.GetContents(IMPFOLD)
if len(imp_dir_list) != 0:
imp_dir = imp_dir_list[0]
else:
imp_dir = user.CreateObject('IntFolder', IMPFOLD)
return imp_dir
def clear_dir(dir):
trash = dir.GetContents()
for item in trash:
item.Delete()
def run_ldf(com_ldf):
com_ldf.SetAttribute('iopt_net', 0)
# com_ldf.SetAttribute('iopt_at', 1)
com_ldf.SetAttribute('iopt_pq', 0)
com_ldf.SetAttribute('errlf', 0.001)
com_ldf.SetAttribute('erreq', 0.01)
com_ldf.Execute()
|
from django import forms
from .models import Post,Client,Vendor,Match,NumberOfPhases
from django.forms.models import inlineformset_factory
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields='__all__'
class ClientForm(forms.ModelForm):
class Meta:
model=Client
fields='__all__'
class VendorForm(forms.ModelForm):
class Meta:
model=Vendor
fields='__all__'
class NumberOfPhasesForm(forms.ModelForm):
class Meta:
model=NumberOfPhases
fields='__all__'
NumberOfPhasesInlineForm=inlineformset_factory(Match,NumberOfPhases,form=NumberOfPhasesForm,fields=['macth','phase','description','timeline','payments'],extra=1) |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import status
from api.models import Comment
from api.serializers import CommentSerializer
class CommentListAPIView(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, post_id):
categories = Comment.objects.filter(post_id=post_id)
serializer = CommentSerializer(instance=categories, many=True)
return Response(serializer.data)
def post(self, request, post_id):
request.data['post_id'] = post_id
request.data['author_id'] = request.user.pk
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
from django.db import models
from core.models import User
class StudentProfile(models.Model):
student_profile_id = models.UUIDField(primary_key=True)
user = models.OneToOneField(User, on_delete=models.CASCADE)
|
from django.contrib import admin
from podminer.models import Ranking, RankingHistory, UpdateTaskResult, UserMetrics, GlobalTop100,GenreTop100,RegionTop100,CountryTop100,PowerRankingHistory
# Register your models here.
admin.site.register(Ranking)
admin.site.register(RankingHistory)
admin.site.register(UpdateTaskResult)
admin.site.register(UserMetrics)
admin.site.register(GlobalTop100)
admin.site.register(GenreTop100)
admin.site.register(RegionTop100)
admin.site.register(CountryTop100)
admin.site.register(PowerRankingHistory) |
#!/usr/bin/env python3
import sys
from sys import argv
import re
import copy
from Bio.PDB import *
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
"""
# pachinam_obj.py
To be used from within PyMOL, right after cif files have been loaded into objects
Uses the already defined objects to pull pairs of chain identifiers and names from cif files and create separate objects - essentially does the job of split_chains but adds the defined name from the cif file.
Names are of the format:
cifID_name_chainID
Hard-codes destination of cif files to a directory which is called cif and is within the current working directory - this means you need to have the cif files in this directory which is in the working directory (since the script is being ran from within PyMOL this would be the current working directory of PyMOL)
Tries to remove some lengthy parts of names often found in big structure files (e.g. 30S ribosomal protein, ribosomal RNA etc.) for better viewing in PyMOL.
Has been tested to work up to version 2.1.0 in PyMOL
Uses biopython and python3 so have them installed.
"""
parser = MMCIFParser(QUIET=True)
string = "./cif/"
fileext = ".cif"
cifs = list(cmd.get_object_list())
newcifs = [x + fileext for x in cifs]
newercifs = [string + x for x in newcifs]
filenames = newercifs
#Creates the objects with given dictionary of chainids -> names and name of the object parent
def create_objects(parent,element_dict):
for chain in sorted(element_dict.keys()):
x = re.sub('30S ribosomal protein ', '',element_dict[chain], flags=re.I)
x = re.sub('50S ribosomal protein ', '',x, flags=re.I)
x = re.sub('60S ribosomal protein ', '',x, flags=re.I)
x = re.sub('40S ribosomal protein ', '',x, flags=re.I)
x = re.sub('DNA-directed ', '',x, flags=re.I)
x = re.sub('ribosomal protein ', '',x, flags=re.I)
x = re.sub('ribosomal RNA', 'rRNA',x, flags=re.I)
x = re.sub('translation', '',x, flags=re.I)
x = re.sub(' initiation ', '',x, flags=re.I)
x = re.sub('RNA POLYMERASES', 'RNApol',x, flags=re.I)
x = re.sub('RNA POLYMERASE', 'RNApol',x, flags=re.I)
x = re.sub('SUBUNIT', 'subunit',x, flags=re.I)
cmd.create(parent + "_" + x + "_" + chain, "chain " + chain + " and " + object)
#Creates a dictionary {chainids -> names} from a given cif file
def assign(filename, dict):
n=0
#Grab header
mmcif_dict = MMCIF2Dict(filename)
#Grab entity names
details = mmcif_dict['_entity.pdbx_description']
#Grab chain ids
strand_id = mmcif_dict['_entity_poly.pdbx_strand_id']
for x in strand_id:
dict[x]=details[n]
n+=1
return dict
for file in filenames:
temp_chain_names={}
assign(file, temp_chain_names)
chain_names=copy.deepcopy(temp_chain_names)
#Following for loop is for structures which have multiple copies of the structure
#otherwise the hashes would get overwritten due to the repeating of objects
for chid in temp_chain_names.keys():
if re.search(",", chid):
temp = chid.split(",")
for x in temp:
chain_names[x]=chain_names[chid]
del chain_names[chid]
cutfile = file.split("/")[-1]
cutfile = re.sub('\.cif', '', cutfile)
create_objects(cutfile, chain_names)
|
import os
import json
import numpy as np
import tensorflow as tf
import experiments
from db import db
from config import Config
from argparse import ArgumentParser
from utils import logger
from utils import py_utils
from ops import data_loader
from ops import model_utils
from ops import loss_utils
from ops import eval_metrics
from ops import training
from ops import hp_opt_utils
from ops import tf_fun
from ops import gradients
from tensorboard.plugins.pr_curve import summary as pr_summary
def print_model_architecture(model_summary):
"""Print a list fancy."""
print '_' * 20
print 'Model architecture:'
print '_' * 20
for s in model_summary:
print s
print '_' * 20
def add_to_config(d, config):
"""Add attributes to config class."""
for k, v in d.iteritems():
if isinstance(v, list) and len(v) == 1:
v = v[0]
setattr(config, k, v)
return config
def process_DB_exps(experiment_name, log, config):
"""Interpret and prepare hyperparams at runtime."""
exp_params, exp_id = db.get_parameters(
experiment_name=experiment_name,
log=log,
evaluation=config.load_and_evaluate_ckpt)
if exp_id is None:
err = 'No empty experiments found.' + \
'Did you select the correct experiment name?'
log.fatal(err)
raise RuntimeError(err)
for k, v in exp_params.iteritems():
if isinstance(v, basestring) and '{' in v and '}' in v:
v = v.strip('{').strip('}').split(',')
setattr(config, k, v)
if not hasattr(config, '_id'):
config._id = exp_id
return config, exp_params
def get_data_pointers(dataset, base_dir, cv, log):
"""Get data file pointers."""
data_pointer = os.path.join(base_dir, '%s_%s.tfrecords' % (dataset, cv))
data_means = os.path.join(base_dir, '%s_%s_means.npy' % (dataset, cv))
log.info(
'Using %s tfrecords: %s' % (
cv,
data_pointer)
)
py_utils.check_path(
data_pointer, log, '%s not found.' % data_pointer)
mean_loc = py_utils.check_path(
data_means, log, '%s not found for cv: %s.' % (data_means, cv))
data_means_image, data_means_label = None, None
if not mean_loc:
alt_data_pointer = data_means.replace('.npy', '.npz')
alt_data_pointer = py_utils.check_path(
alt_data_pointer, log, '%s not found.' % alt_data_pointer)
# TODO: Fix this API and make it more flexible. Kill npzs in Allen?
if not alt_data_pointer:
# No mean for this dataset
data_means = None
else:
log.info('Loading means from npz for cv: %s.' % cv)
data_means = np.load(alt_data_pointer)
if 'image' in data_means.keys():
data_means_image = data_means['image']
if 'images' in data_means.keys():
data_means_image = data_means['images']
if 'label' in data_means.keys():
data_means_label = data_means['label']
if 'labels' in data_means.keys():
data_means_label = data_means['labels']
if data_means_image is not None and isinstance(
data_means_image, np.object):
data_means_image = data_means_image.item()
if data_means_label is not None and isinstance(
data_means_label, np.object):
data_means_label = data_means_label.item()
else:
data_means_image = np.load(data_means)
return data_pointer, data_means_image, data_means_label
def main(
experiment_name,
list_experiments=False,
load_and_evaluate_ckpt=None,
placeholder_data=None,
grad_images=False,
gpu_device='/gpu:0'):
"""Create a tensorflow worker to run experiments in your DB."""
if list_experiments:
exps = db.list_experiments()
print '_' * 30
print 'Initialized experiments:'
print '_' * 30
for l in exps:
print l.values()[0]
print '_' * 30
if len(exps) == 0:
print 'No experiments found.'
else:
print 'You can add to the DB with: '\
'python prepare_experiments.py --experiment=%s' % \
exps[0].values()[0]
return
if experiment_name is None:
print 'No experiment specified. Pulling one out of the DB.'
experiment_name = db.get_experiment_name()
# Prepare to run the model
config = Config()
condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp())
experiment_label = '%s' % (experiment_name)
log = logger.get(os.path.join(config.log_dir, condition_label))
assert experiment_name is not None, 'Empty experiment name.'
experiment_dict = experiments.experiments()[experiment_name]()
config = add_to_config(d=experiment_dict, config=config) # Globals
config.load_and_evaluate_ckpt = load_and_evaluate_ckpt
if load_and_evaluate_ckpt is not None:
# Remove the train operation and add a ckpt pointer
from ops import evaluation
config, exp_params = process_DB_exps(
experiment_name=experiment_name,
log=log,
config=config) # Update config w/ DB params
dataset_module = py_utils.import_module(
model_dir=config.dataset_info,
dataset=config.dataset)
dataset_module = dataset_module.data_processing() # hardcoded class name
train_key = [k for k in dataset_module.folds.keys() if 'train' in k]
if not len(train_key):
train_key = 'train'
else:
train_key = train_key[0]
train_data, train_means_image, train_means_label = get_data_pointers(
dataset=config.dataset,
base_dir=config.tf_records,
cv=train_key,
log=log)
val_key = [k for k in dataset_module.folds.keys() if 'val' in k]
if not len(val_key):
val_key = 'train'
else:
val_key = val_key[0]
val_data, val_means_image, val_means_label = get_data_pointers(
dataset=config.dataset,
base_dir=config.tf_records,
cv=val_key,
log=log)
# Initialize output folders
dir_list = {
'checkpoints': os.path.join(
config.checkpoints, condition_label),
'summaries': os.path.join(
config.summaries, condition_label),
'condition_evaluations': os.path.join(
config.condition_evaluations, condition_label),
'experiment_evaluations': os.path.join( # DEPRECIATED
config.experiment_evaluations, experiment_label),
'visualization': os.path.join(
config.visualizations, condition_label),
'weights': os.path.join(
config.condition_evaluations, condition_label, 'weights')
}
[py_utils.make_dir(v) for v in dir_list.values()]
# Prepare data loaders on the cpu
if all(isinstance(i, list) for i in config.data_augmentations):
if config.data_augmentations:
config.data_augmentations = py_utils.flatten_list(
config.data_augmentations,
log)
if load_and_evaluate_ckpt is not None:
config.epochs = 1
config.train_shuffle = False
config.val_shuffle = False
with tf.device('/cpu:0'):
if placeholder_data:
placeholder_shape = placeholder_data['train_image_shape']
placeholder_dtype = placeholder_data['train_image_dtype']
original_train_images = tf.placeholder(
dtype=placeholder_dtype,
shape=placeholder_shape,
name='train_images')
placeholder_shape = placeholder_data['train_label_shape']
placeholder_dtype = placeholder_data['train_label_dtype']
original_train_labels = tf.placeholder(
dtype=placeholder_dtype,
shape=placeholder_shape,
name='train_labels')
placeholder_shape = placeholder_data['val_image_shape']
placeholder_dtype = placeholder_data['val_image_dtype']
original_val_images = tf.placeholder(
dtype=placeholder_dtype,
shape=placeholder_shape,
name='val_images')
placeholder_shape = placeholder_data['val_label_shape']
placeholder_dtype = placeholder_data['val_label_dtype']
original_val_labels = tf.placeholder(
dtype=placeholder_dtype,
shape=placeholder_shape,
name='val_labels')
# Apply augmentations
(
train_images,
train_labels
) = data_loader.placeholder_image_augmentations(
images=original_train_images,
model_input_image_size=dataset_module.model_input_image_size,
labels=original_train_labels,
data_augmentations=config.data_augmentations,
batch_size=config.batch_size)
(
val_images,
val_labels
) = data_loader.placeholder_image_augmentations(
images=original_val_images,
model_input_image_size=dataset_module.model_input_image_size,
labels=original_val_labels,
data_augmentations=config.data_augmentations,
batch_size=config.batch_size)
# Store in the placeholder dict
placeholder_data['train_images'] = original_train_images
placeholder_data['train_labels'] = original_train_labels
placeholder_data['val_images'] = original_val_images
placeholder_data['val_labels'] = original_val_labels
else:
train_images, train_labels = data_loader.inputs(
dataset=train_data,
batch_size=config.batch_size,
model_input_image_size=dataset_module.model_input_image_size,
tf_dict=dataset_module.tf_dict,
data_augmentations=config.data_augmentations,
num_epochs=config.epochs,
tf_reader_settings=dataset_module.tf_reader,
shuffle=config.shuffle_train,
resize_output=config.resize_output)
if hasattr(config, 'val_augmentations'):
val_augmentations = config.val_augmentations
else:
val_augmentations = config.data_augmentations
val_images, val_labels = data_loader.inputs(
dataset=val_data,
batch_size=config.batch_size,
model_input_image_size=dataset_module.model_input_image_size,
tf_dict=dataset_module.tf_dict,
data_augmentations=val_augmentations,
num_epochs=config.epochs,
tf_reader_settings=dataset_module.tf_reader,
shuffle=config.shuffle_val,
resize_output=config.resize_output)
log.info('Created tfrecord dataloader tensors.')
# Load model specification
struct_name = config.model_struct.split(os.path.sep)[-1]
try:
model_dict = py_utils.import_module(
dataset=struct_name,
model_dir=os.path.join(
'models',
'structs',
experiment_name).replace(os.path.sep, '.')
)
except IOError:
print 'Could not find the model structure: %s in folder %s' % (
struct_name,
experiment_name)
# Inject model_dict with hyperparameters if requested
model_dict.layer_structure = hp_opt_utils.inject_model_with_hps(
layer_structure=model_dict.layer_structure,
exp_params=exp_params)
# Prepare variables for the models
if len(dataset_module.output_size) == 2:
log.warning(
'Found > 1 dimension for your output size.'
'Converting to a scalar.')
dataset_module.output_size = np.prod(
dataset_module.output_size)
if hasattr(model_dict, 'output_structure'):
# Use specified output layer
output_structure = model_dict.output_structure
else:
output_structure = None
# Correct number of output neurons if needed
if config.dataloader_override and\
'weights' in output_structure[-1].keys():
output_neurons = output_structure[-1]['weights'][0]
size_check = output_neurons != dataset_module.output_size
fc_check = output_structure[-1]['layers'][0] == 'fc'
if size_check and fc_check:
output_structure[-1]['weights'][0] = dataset_module.output_size
log.warning('Adjusted output neurons from %s to %s.' % (
output_neurons,
dataset_module.output_size))
# Prepare model on GPU
if not hasattr(dataset_module, 'input_normalization'):
dataset_module.input_normalization = None
with tf.device(gpu_device):
with tf.variable_scope('cnn') as scope:
# Training model
model = model_utils.model_class(
mean=train_means_image,
training=True,
output_size=dataset_module.output_size,
input_normalization=dataset_module.input_normalization)
train_scores, model_summary, _ = model.build(
data=train_images,
layer_structure=model_dict.layer_structure,
output_structure=output_structure,
log=log,
tower_name='cnn')
if grad_images:
oh_dims = int(train_scores.get_shape()[-1])
target_scores = tf.one_hot(train_labels, oh_dims) * train_scores
train_gradients = tf.gradients(target_scores, train_images)[0]
log.info('Built training model.')
log.debug(
json.dumps(model_summary, indent=4),
verbose=0)
print_model_architecture(model_summary)
# Normalize labels on GPU if needed
if 'normalize_labels' in exp_params.keys():
if exp_params['normalize_labels'] == 'zscore':
train_labels -= train_means_label['mean']
train_labels /= train_means_label['std']
val_labels -= train_means_label['mean']
val_labels /= train_means_label['std']
log.info('Z-scoring labels.')
elif exp_params['normalize_labels'] == 'mean':
train_labels -= train_means_label['mean']
val_labels -= val_means_label['mean']
log.info('Mean-centering labels.')
# Check the shapes of labels and scores
if not isinstance(train_scores, list):
if len(
train_scores.get_shape()) != len(
train_labels.get_shape()):
train_shape = train_scores.get_shape().as_list()
label_shape = train_labels.get_shape().as_list()
val_shape = val_scores.get_shape().as_list()
val_label_shape = val_labels.get_shape().as_list()
if len(
train_shape) == 2 and len(
label_shape) == 1 and train_shape[-1] == 1:
train_labels = tf.expand_dims(train_labels, axis=-1)
val_labels = tf.expand_dims(val_labels, axis=-1)
elif len(
train_shape) == 2 and len(
label_shape) == 1 and train_shape[-1] == 1:
train_scores = tf.expand_dims(train_scores, axis=-1)
val_scores = tf.expand_dims(val_scores, axis=-1)
# Prepare the loss function
train_loss, _ = loss_utils.loss_interpreter(
logits=train_scores, # TODO
labels=train_labels,
loss_type=config.loss_function,
weights=config.loss_weights,
dataset_module=dataset_module)
# Add loss tensorboard tracking
if isinstance(train_loss, list):
for lidx, tl in enumerate(train_loss):
tf.summary.scalar('training_loss_%s' % lidx, tl)
train_loss = tf.add_n(train_loss)
else:
tf.summary.scalar('training_loss', train_loss)
# Add weight decay if requested
if len(model.regularizations) > 0:
train_loss = loss_utils.wd_loss(
regularizations=model.regularizations,
loss=train_loss,
wd_penalty=config.regularization_strength)
assert config.lr is not None, 'No learning rate.' # TODO: Make a QC function
if config.lr > 1:
old_lr = config.lr
config.lr = loss_utils.create_lr_schedule(
train_batch=config.batch_size,
num_training=config.lr)
config.optimizer = 'momentum'
log.info('Forcing momentum classifier.')
else:
old_lr = None
train_op = loss_utils.optimizer_interpreter(
loss=train_loss,
lr=config.lr,
optimizer=config.optimizer,
constraints=config.optimizer_constraints,
model=model)
log.info('Built training loss function.')
# Add a score for the training set
train_accuracy = eval_metrics.metric_interpreter(
metric=dataset_module.score_metric, # TODO: Attach to exp cnfg
pred=train_scores, # TODO
labels=train_labels)
# Add aux scores if requested
train_aux = {}
if hasattr(dataset_module, 'aux_scores'):
for m in dataset_module.aux_scores:
train_aux[m] = eval_metrics.metric_interpreter(
metric=m,
pred=train_scores,
labels=train_labels) # [0] # TODO: Fix for multiloss
# Prepare remaining tensorboard summaries
if config.tensorboard_images:
if len(train_images.get_shape()) == 4:
tf_fun.image_summaries(train_images, tag='Training images')
if (np.asarray(
train_labels.get_shape().as_list()) > 1).sum() > 2:
tf_fun.image_summaries(
train_labels,
tag='Training_targets')
tf_fun.image_summaries(
train_scores,
tag='Training_predictions')
if isinstance(train_accuracy, list):
for tidx, ta in enumerate(train_accuracy):
tf.summary.scalar('training_accuracy_%s' % tidx, ta)
else:
tf.summary.scalar('training_accuracy', train_accuracy)
if config.pr_curve:
if isinstance(train_scores, list):
for pidx, train_score in enumerate(train_scores):
train_label = train_labels[:, pidx]
pr_summary.op(
tag='training_pr_%s' % pidx,
predictions=tf.cast(
tf.argmax(
train_score,
axis=-1),
tf.float32),
labels=tf.cast(train_label, tf.bool),
display_name='training_precision_recall_%s' % pidx)
else:
pr_summary.op(
tag='training_pr',
predictions=tf.cast(
tf.argmax(
train_scores,
axis=-1),
tf.float32),
labels=tf.cast(train_labels, tf.bool),
display_name='training_precision_recall')
log.info('Added training summaries.')
with tf.variable_scope('cnn', tf.AUTO_REUSE) as scope:
# Validation model
scope.reuse_variables()
val_model = model_utils.model_class(
mean=train_means_image, # Normalize with train data
training=False,
output_size=dataset_module.output_size,
input_normalization=dataset_module.input_normalization)
val_scores, _, _ = val_model.build( # Ignore summary
data=val_images,
layer_structure=model_dict.layer_structure,
output_structure=output_structure,
log=log,
tower_name='cnn')
if grad_images:
oh_dims = int(val_scores.get_shape()[-1])
target_scores = tf.one_hot(val_labels, oh_dims) * val_scores
val_gradients = tf.gradients(target_scores, val_images)[0]
log.info('Built validation model.')
# Check the shapes of labels and scores
val_loss, _ = loss_utils.loss_interpreter(
logits=val_scores,
labels=val_labels,
loss_type=config.loss_function,
weights=config.loss_weights,
dataset_module=dataset_module)
# Add loss tensorboard tracking
if isinstance(val_loss, list):
for lidx, tl in enumerate(val_loss):
tf.summary.scalar('validation_loss_%s' % lidx, tl)
val_loss = tf.add_n(val_loss)
else:
tf.summary.scalar('validation_loss', val_loss)
# Add a score for the validation set
val_accuracy = eval_metrics.metric_interpreter(
metric=dataset_module.score_metric, # TODO
pred=val_scores,
labels=val_labels)
# Add aux scores if requested
val_aux = {}
if hasattr(dataset_module, 'aux_scores'):
for m in dataset_module.aux_scores:
val_aux[m] = eval_metrics.metric_interpreter(
metric=m,
pred=val_scores,
labels=val_labels) # [0] # TODO: Fix for multiloss
# Prepare tensorboard summaries
if config.tensorboard_images:
if len(val_images.get_shape()) == 4:
tf_fun.image_summaries(
val_images,
tag='Validation')
if (np.asarray(
val_labels.get_shape().as_list()) > 1).sum() > 2:
tf_fun.image_summaries(
val_labels,
tag='Validation_targets')
tf_fun.image_summaries(
val_scores,
tag='Validation_predictions')
if isinstance(val_accuracy, list):
for vidx, va in enumerate(val_accuracy):
tf.summary.scalar('validation_accuracy_%s' % vidx, va)
else:
tf.summary.scalar('validation_accuracy', val_accuracy)
if config.pr_curve:
if isinstance(val_scores, list):
for pidx, val_score in enumerate(val_scores):
val_label = val_labels[:, pidx]
pr_summary.op(
tag='validation_pr_%s' % pidx,
predictions=tf.cast(
tf.argmax(
val_score,
axis=-1),
tf.float32),
labels=tf.cast(val_label, tf.bool),
display_name='validation_precision_recall_%s' %
pidx)
else:
pr_summary.op(
tag='validation_pr',
predictions=tf.cast(
tf.argmax(
val_scores,
axis=-1),
tf.float32),
labels=tf.cast(val_labels, tf.bool),
display_name='validation_precision_recall')
log.info('Added validation summaries.')
# Set up summaries and saver
if not hasattr(config, 'max_to_keep'):
config.max_to_keep = None
saver = tf.train.Saver(
var_list=tf.global_variables(),
max_to_keep=config.max_to_keep)
summary_op = tf.summary.merge_all()
# Initialize the graph
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# Need to initialize both of these if supplying num_epochs to inputs
sess.run(
tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer())
)
summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph)
# Set up exemplar threading
if placeholder_data:
coord, threads = None, None
else:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Create dictionaries of important training and validation information
train_dict = {
'train_loss': train_loss,
'train_images': train_images,
'train_labels': train_labels,
'train_op': train_op,
'train_scores': train_scores
}
val_dict = {
'val_loss': val_loss,
'val_images': val_images,
'val_labels': val_labels,
'val_scores': val_scores,
}
if grad_images:
train_dict['train_gradients'] = train_gradients
val_dict['val_gradients'] = val_gradients
if isinstance(train_accuracy, list):
for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)):
train_dict['train_accuracy_%s' % tidx] = ta
val_dict['val_accuracy_%s' % tidx] = va
else:
train_dict['train_accuracy_0'] = train_accuracy
val_dict['val_accuracy_0'] = val_accuracy
if load_and_evaluate_ckpt is not None:
# Remove the train operation and add a ckpt pointer
del train_dict['train_op']
if hasattr(dataset_module, 'aux_score'):
# Attach auxillary scores to tensor dicts
for m in dataset_module.aux_scores:
train_dict['train_aux_%s' % m] = train_aux[m]
val_dict['val_aux_%s' % m] = val_aux[m]
# Start training loop
if old_lr is not None:
config.lr = old_lr
np.save(
os.path.join(
dir_list['condition_evaluations'], 'training_config_file'),
config)
log.info('Starting training')
if load_and_evaluate_ckpt is not None:
return evaluation.evaluation_loop(
config=config,
db=db,
coord=coord,
sess=sess,
summary_op=summary_op,
summary_writer=summary_writer,
saver=saver,
threads=threads,
summary_dir=dir_list['summaries'],
checkpoint_dir=dir_list['checkpoints'],
weight_dir=dir_list['weights'],
train_dict=train_dict,
val_dict=val_dict,
train_model=model,
val_model=val_model,
exp_params=exp_params,
placeholder_data=placeholder_data)
else:
output_dict = training.training_loop(
config=config,
db=db,
coord=coord,
sess=sess,
summary_op=summary_op,
summary_writer=summary_writer,
saver=saver,
threads=threads,
summary_dir=dir_list['summaries'],
checkpoint_dir=dir_list['checkpoints'],
weight_dir=dir_list['weights'],
train_dict=train_dict,
val_dict=val_dict,
train_model=model,
val_model=val_model,
exp_params=exp_params)
log.info('Finished training.')
model_name = config.model_struct.replace('/', '_')
if output_dict is not None:
py_utils.save_npys(
data=output_dict,
model_name=model_name,
output_string=dir_list['experiment_evaluations'])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'--experiment',
dest='experiment_name',
type=str,
default=None,
help='Name of the experiment.')
parser.add_argument(
'--list_experiments',
dest='list_experiments',
action='store_true',
help='Name of the experiment.')
# TODO: Add the ability to specify multiple GPUs for parallelization.
args = parser.parse_args()
main(**vars(args))
|
import numpy as np
import dill
def get_fun(name):
input_file = open(name, 'rb')
return dill.loads(dill.load(input_file))
def is_valid(X, k, n):
K = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
k_i_j = k(X[i], X[j])
k_j_i = k(X[j], X[i])
if k_i_j != k_j_i:
return False
K[i, j] = k_i_j
K[j, i] = k_j_i
return np.all(np.linalg.eigvals(K) + .1 >= 0)
def main():
n = 150
X = []
for i in range(n):
x = np.random.uniform(low=-5, high=5, size=3).reshape(-1, 1)
X.append(x)
for i in range(4):
filename = 'function' + str(i + 1) + '.pkl'
func = get_fun(filename)
print(i+1, ' : ', is_valid(X, func, n))
sampler_file = 'k5sampler.pkl'
filename = 'function5.pkl'
sampler = get_fun(sampler_file)
func = get_fun(filename)
Y = []
for i in range(n):
y = sampler()
Y.append(y)
print(5, ' : ', is_valid(Y, func, n))
if __name__ == '__main__':
main()
|
from sample import getReport
import argparse
def main():
ap = argparse.ArgumentParser()
ap.add_argument('extension')
args = ap.parse_args()
print("\n".join(getReport(args.extension)))
if __name__=='__main__':
main() |
import re
def get_half(seq):
ssr_obj = re.search(r'([ATCG]+)\1', seq)
if ssr_obj:
return ssr_obj.group(1)
else:
return None
def check_tail(block, sequence, terminal):
block_len = len(block)
potential_tail = sequence[terminal: (terminal + block_len)]
if potential_tail == block:
return 1, block_len
else:
return 0, 0
def scan_kmer(kmer):
""" kmer is the seed to be considered each time.
It's a partial sequence or a fasta object, and
it's not necessary to be the same length as
given in multiple line fasta file.
"""
ssr_obj = re.search(r'([ATCG]+)\1', kmer)
if ssr_obj and len(ssr_obj.group()) >= 6:
half = kmer
start_pos = ssr_obj.start(0)
end_pos = ssr_obj.end(0)
time = 2
while half:
unit = half
half = get_half(half)
if half:
time = 2*time
else:
return unit, start_pos, end_pos, time
else:
return None
def calibrate_kmer(k):
"""
correct the times of repeated unit
by adding the possible missing single one at the end.
"""
k_result = scan_kmer(k)
if k_result:
repeat, pos = check_tail(k_result[0], k, k_result[2] + 1)
k_result[3] += repeat
k_result[2] += pos
return k_result
|
import math
num = float(input('Digite um número real: '))
numint = math.trunc(num)
print('A porção inteira de {} eh: {}'.format(num, numint))
|
import numpy
from pandas import read_csv
from sklearn.utils import resample
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from matplotlib import pyplot
# load dataset
data = read_csv('pima-indians-diabetes.data.csv', header=None)
values = data.values
# configure bootstrap
n_iterations = 1000
n_size = int(len(data) * 0.50)
# run bootstrap
stats = list()
for i in range(n_iterations):
# prepare train and test sets
train = resample(values, n_samples=n_size)
test = numpy.array([x for x in values if x.tolist() not in train.tolist()])
# fit model
model = DecisionTreeClassifier()
model.fit(train[:,:-1], train[:,-1])
# evaluate model
predictions = model.predict(test[:,:-1])
score = accuracy_score(test[:,-1], predictions)
print(score)
stats.append(score)
# plot scores
pyplot.hist(stats)
pyplot.show()
# confidence intervals
alpha = 0.95
p = ((1.0-alpha)/2.0) * 100
lower = max(0.0, numpy.percentile(stats, p))
p = (alpha+((1.0-alpha)/2.0)) * 100
upper = min(1.0, numpy.percentile(stats, p))
print('%.1f confidence interval %.1f%% and %.1f%%' % (alpha*100, lower*100, upper*100))
|
import os
import sys
import platform
import numpy as np
import sklearn.linear_model as sl
import sklearn.model_selection as sm
import matplotlib.pyplot as mp
def make_data():
x = np.array([
[4, 7],
[3.5, 8],
[3.1, 6.2],
[0.5, 1],
[1, 2],
[1.2, 1.9],
[6, 2],
[5.7, 1.5],
[5.4, 2.2]])
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
return x, y
def train_model(x,y):
model = sl.LogisticRegression(solver='liblinear',C = 80)
model.fit(x,y)
return model
def pred_model(model,x):
pred_y = model.predict(x)
return pred_y
def init_chart():
mp.gcf().set_facecolor(np.ones(3) * 240 / 255)
mp.title('Logistic Classifier', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
ax = mp.gca()
ax.xaxis.set_major_locator(mp.MultipleLocator())
ax.xaxis.set_minor_locator(mp.MultipleLocator(0.5))
ax.yaxis.set_major_locator(mp.MultipleLocator())
ax.yaxis.set_minor_locator(mp.MultipleLocator(0.5))
mp.tick_params(which='both', top=True, right=True,
labelright=True, labelsize=10)
mp.grid(axis='y', linestyle=':')
def draw_bk_chart(x,y):
mp.pcolormesh(x[0],x[1], y,cmap = "brg")
mp.xlim(x[0].min(), x[0].max())
mp.ylim(x[1].min(), x[1].max())
def draw_chart(x,y):
mp.scatter(x[:,0],x[:,1],c = y, cmap = "RdYlBu",s = 80)
def show_chart():
mng = mp.get_current_fig_manager()
if 'Windows' in platform.system():
mng.window.state('zoomed')
else:
mng.resize(*mng.window.maxsize())
mp.show()
def main(argc, argv, envir):
x, y = make_data()
model = train_model(x,y)
r,e,s =x[:,0].min()-1, x[:,0].max()+1,0.005
b,v,t = x[:,1].min()-1,x[:,1].max()+1,0.005
grid_x = np.meshgrid(np.arange(r,e,s),
np.arange(b,v,t))
grid_y = pred_model(model,
np.c_[grid_x[0].ravel()
,grid_x[1].ravel()]
).reshape(grid_x[0].shape)
init_chart()
draw_bk_chart(grid_x,grid_y)
draw_chart(x,y)
show_chart()
return 0
if __name__ =="__main__":
sys.exit(main(len(sys.argv),sys.argv,os.environ)) |
import string
import pango, gtk
import gnoetics, tilemodel
class TileTextView(gtk.TextView,
tilemodel.TileModelListener):
def __init__(self, model):
gtk.TextView.__init__(self)
tilemodel.TileModelListener.__init__(self, model)
self.set_editable(0)
self.set_cursor_visible(0)
self.__model = model
self.__buffer = gtk.TextBuffer()
self.__tiles_to_buffer()
self.set_buffer(self.__buffer)
def __tiles_to_buffer(self):
lines = []
for i in range(self.__model.get_line_count()):
line_str = ""
prev_right_glue = 0
for j in range(self.__model.get_line_length(i)):
t = self.__model.get_tile(i, j)
txt = "???"
has_left_glue = 0
if type(t) == gnoetics.Token:
txt = t.get_word()
has_left_glue = t.has_left_glue()
prev_right_glue = t.has_right_glue()
elif t == "stanza break":
txt = ""
prev_right_glue = 0
else:
txt = "__"
prev_right_glue = 0
if prev_right_glue or has_left_glue:
line_str += txt
else:
line_str += " " + txt
lines.append(" " + line_str + " ")
self.__buffer.set_text(string.join(lines, "\n"))
def do_changed_line(self, line_num):
self.__tiles_to_buffer()
|
firstname = input("Enter first Name : ")
lastname = input("Enter the last Name : ")
tmp = [firstname,lastname]
tmp.reverse()
print(tmp) |
#8.4 Open the file romeo.txt and read it line by line. For each line, split the line into a list of words using the split() method.
#The program should build a list of words. For each word on each line check to see if the word is already in the list and if not append it to the list.
#When the program completes,sort and print the resulting words in alphabetical order.
#fn=input("Enter file name : ") or fh=open(fn) or fh=open(fn,'r') or try and except
fh=open('romeo.txt')
#create a list
wordlist=list() #or better way: wordlist=[]
inp=fh.read()
#print(inp)
#cursor at end of file
#reset to zero otherwise nothing to read, do that using:
fh.seek(0)
for line in fh:
line=line.rstrip()
#print(line) #same contents as fh.read() i.e string and if strip was not used then it would display contents/strings of file with extra lines
line=line.split() #returns list from string
#print(line) #prints the 4 lists got from file of strings i.e array of words
#wrongcode: wordlist.split() as split is only used for strings not lists
for word in line: #for checking each element of line splitted and list got
if word in wordlist: #checking if word is present in our own created empty list
continue #if not present skip to next iteration, discards duplicates
#alternate code would be : #if word not in wordlist: instead of else
else:
wordlist.append(word) #adding to empty list created the "word" not "line",updates list
#wrongcode words=wordlist.append(line) even this is wrong : words.append(line) as appending line would give a huge list
wordlist.sort() #according to ascii ascending or descending
print(wordlist) #or print(sorted(wordlist))
#above statement prints list, however to print string,
## Python program to convert a list to string using list comprehension
##s = ['I', 'want', 4, 'apples', 'and', 18, 'bananas']
# using list comprehension : listToStr = ' '.join([str(elem) for elem in s])
##listToStr=' '.join([str(elements) for elements in wordlist])
##print(listToStr)
|
from __future__ import annotations
from typing import NamedTuple
class Config(NamedTuple):
username: str
channel: str
oauth_token: str
client_id: str
youtube_api_key: str
youtube_playlists: dict[str, dict[str, str]]
airnow_api_key: str
@property
def oauth_token_token(self) -> str:
_, token = self.oauth_token.split(':', 1)
return token
def __repr__(self) -> str:
return (
f'{type(self).__name__}('
f'username={self.username!r}, '
f'channel={self.channel!r}, '
f'oauth_token={"***"!r}, '
f'client_id={"***"!r}, '
f'youtube_api_key={"***"!r}, '
f'youtube_playlists={self.youtube_playlists!r}, '
f'airnow_api_key={"***"!r}, '
f')'
)
|
from Transmitter import Transmitter
class InputMethod:
def __init__(self):
self.motor_speeds = [0, 0, 0]
# Init transmitter
self.t = Transmitter()
def doListenLoop(self):
while True:
self.getInput()
self.t.send(self.motor_speeds)
def getInput(self):
pass
|
number_element_of_dict = int(input("Nhập số phần tử của Dictionary (lớn hơn 3): "))
number_element_of_list = int(input("Nhập số phần tử của Value List (lớn hơn 5): "))
my_dict = {}
if number_element_of_dict <= 3 or number_element_of_list <= 5:
print("Giá trị nhập không đúng")
else:
for i in range(1, number_element_of_dict + 1):
element_of_dict = []
print(f"Nhập phần tử cho list {i}")
for j in range(1, number_element_of_list + 1):
element_of_list = input(f"Nhập phần tử thứ {j} trong list: ")
element_of_dict.append(element_of_list)
my_dict.update({i: element_of_dict})
for key in my_dict.keys():
print(my_dict.get(key)[4]) |
#!/usr/bin/env python3
"""
DrugCentral PostgreSql db client.
"""
import os,sys,argparse,re,time,logging
from .. import drugcentral
from ..util import yaml as util_yaml
#############################################################################
if __name__=='__main__':
parser = argparse.ArgumentParser(description="DrugCentral PostgreSql client utility", epilog="Search via --ids as regular expressions, e.g. \"^Alzheimer\"")
ops = [
"list_tables",
"list_columns",
"list_tables_rowCounts",
"version",
"get_structure",
"get_structure_by_synonym",
"get_structure_by_xref",
"get_structure_xrefs",
"get_structure_products",
"get_structure_orangebook_products",
"get_structure_atcs",
"get_structure_synonyms",
"get_product",
"get_product_structures",
"get_indication_structures",
"list_products",
"list_structures",
"list_structures2smiles",
"list_structures2molfile",
"list_active_ingredients",
"list_indications",
"list_indication_targets",
"list_ddis",
"list_atcs",
"list_xref_types",
"search_indications",
"search_products",
"meta_listdbs"
]
parser.add_argument("op", choices=ops, help="OPERATION (select one)")
parser.add_argument("--i", dest="ifile", help="input ID file")
parser.add_argument("--ids", help="input IDs (comma-separated)")
parser.add_argument("--xref_type", help="xref ID type")
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--dbhost")
parser.add_argument("--dbport")
parser.add_argument("--dbname")
parser.add_argument("--dbusr")
parser.add_argument("--dbpw")
parser.add_argument("--param_file", default=os.environ['HOME']+"/.drugcentral.yaml")
parser.add_argument("--dbschema", default="public")
parser.add_argument("-v", "--verbose", dest="verbose", action="count", default=0)
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
if os.path.isfile(args.param_file):
params = util_yaml.ReadParamFile(args.param_file)
if args.dbhost: params['DBHOST'] = args.dbhost
if args.dbport: params['DBPORT'] = args.dbport
if args.dbname: params['DBNAME'] = args.dbname
if args.dbusr: params['DBUSR'] = args.dbusr
if args.dbpw: params['DBPW'] = args.dbpw
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
t0 = time.time()
ids=[]
if args.ifile:
fin = open(args.ifile)
while True:
line = fin.readline()
if not line: break
ids.append(line.rstrip())
logging.info('Input IDs: %d'%(len(ids)))
fin.close()
elif args.ids:
ids = re.split(r'[,\s]+', args.ids)
try:
dbcon = drugcentral.Utils.Connect(params['DBHOST'], params['DBPORT'], params['DBNAME'], params['DBUSR'], params['DBPW'])
except Exception as e:
logging.error("Connect failed.")
parser.error("{0}".format(str(e)))
if args.op=='list_tables':
drugcentral.Utils.ListTables(dbcon, args.dbschema, fout)
elif args.op=='list_tables_rowCounts':
drugcentral.Utils.ListTablesRowCounts(dbcon, args.dbschema, fout)
elif args.op=='list_columns':
drugcentral.Utils.ListColumns(dbcon, args.dbschema, fout)
elif args.op=='version':
drugcentral.Utils.Version(dbcon, args.dbschema, fout)
elif args.op=='list_structures':
drugcentral.Utils.ListStructures(dbcon, args.dbschema, fout)
elif args.op=='list_structures2smiles':
drugcentral.Utils.ListStructures2Smiles(dbcon, args.dbschema, fout)
elif args.op=='list_structures2molfile':
drugcentral.Utils.ListStructures2Molfile(dbcon, args.dbschema, fout)
elif args.op=='list_products':
drugcentral.Utils.ListProducts(dbcon, args.dbschema, fout)
elif args.op=='list_active_ingredients':
drugcentral.Utils.ListActiveIngredients(dbcon, args.dbschema, fout)
elif args.op=='list_indications':
drugcentral.Utils.ListIndications(dbcon, fout)
elif args.op=='list_indication_targets':
drugcentral.Utils.ListIndicationTargets(dbcon, fout)
elif args.op=='list_ddis':
drugcentral.Utils.ListDrugdruginteractions(dbcon, fout)
elif args.op=='list_atcs':
drugcentral.Utils.ListAtcs(dbcon, fout)
elif args.op=='list_xref_types':
drugcentral.Utils.ListXrefTypes(dbcon, fout)
elif args.op=='get_structure':
drugcentral.Utils.GetStructure(dbcon, ids, fout)
elif args.op=='get_structure_by_synonym':
drugcentral.Utils.GetStructureBySynonym(dbcon, ids, fout)
elif args.op=="get_structure_by_xref":
drugcentral.Utils.GetStructureByXref(dbcon, args.xref_type, ids, fout)
elif args.op=='get_structure_xrefs':
drugcentral.Utils.GetStructureXrefs(dbcon, ids, fout)
elif args.op=='get_structure_products':
drugcentral.Utils.GetStructureProducts(dbcon, ids, fout)
elif args.op=='get_structure_orangebook_products':
drugcentral.Utils.GetStructureOBProducts(dbcon, ids, fout)
elif args.op=='get_structure_atcs':
drugcentral.Utils.GetStructureAtcs(dbcon, ids, fout)
elif args.op=='get_structure_synonyms':
drugcentral.Utils.GetStructureSynonyms(dbcon, ids, fout)
elif args.op=='get_product_structures':
drugcentral.Utils.GetProductStructures(dbcon, ids, fout)
elif args.op=='search_products':
drugcentral.Utils.SearchProducts(dbcon, ids, fout)
elif args.op=='search_indications':
drugcentral.Utils.SearchIndications(dbcon, ids, fout)
elif args.op=='get_indication_structures':
drugcentral.Utils.GetIndicationStructures(dbcon, ids, fout)
elif args.op=='meta_listdbs':
drugcentral.Utils.MetaListdbs(dbcon, fout)
else:
parser.error(f"Invalid operation: {args.op}")
dbcon.close()
logging.info('Elapsed time: %s'%(time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0))))
|
from eval import *
import os
import numpy as np
from sklearn.linear_model import Perceptron
import pandas as pd
from utils import tokenize_sentences, MMR
from table import Table
from sklearn.calibration import CalibratedClassifierCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.externals import joblib
def extract_features(doc, summary=None):
tab = Table(parse_sentences=True, language='english')
sentences = tokenize_sentences(doc)
tab.init(sentences)
tfidf = tab.similarity(doc, bm25=False)
bm25 = tab.similarity(doc, bm25=True)
svd = TruncatedSVD(n_components=11)
svd.fit_transform(tab.matrix)
if summary is not None:
sum_sentences = tokenize_sentences(summary)
sent_tfidf = []
sent_bm25 = []
sent_pos = []
sent_len = []
sent_label = []
sent_svd = []
tfidf = [val[0] for val in tfidf]
bm25 = [val[0] for val in bm25]
for i in range(len(sentences)):
sent_tfidf.append(tfidf[i])
sent_bm25.append(bm25[i])
sent_pos.append(i+10)
# sent_len.append(len(sentences[i]))
words = tokenize(sentences[i])
if len(words) == 0:
avg_len = 0
else:
avg_len = sum([len(w) for w in words])/len(words)
sent_len.append(avg_len)
decomp = svd.transform(tab.tf_idf_vect([sentences[i]]))
sent_svd.append(decomp[0][0])
if summary is not None:
if sentences[i] in sum_sentences:
sent_label.append(1)
else:
sent_label.append(0)
if summary is not None:
return pd.DataFrame({'len': sent_len, 'svd': sent_svd, 'tfidf': sent_tfidf, 'bm25': sent_bm25}), pd.DataFrame({'label': sent_label})
else:
return pd.DataFrame({'len': sent_len, 'svd': sent_svd, 'tfidf': sent_tfidf, 'bm25': sent_bm25}), 0
def train_rfc(x, y):
clf = RandomForestClassifier(n_estimators=50, max_depth=30)
clf = clf.fit(x, y)
return clf
def train_calibrated_perceptron(x, y):
clf = Perceptron(penalty='l1', tol=1e-6)
clf_iso = CalibratedClassifierCV(clf, cv=8, method='isotonic')
clf_iso.fit(x, y)
return clf_iso
def train_perceptron(x, y):
clf = Perceptron()
clf.fit(x, y)
return clf
def train_mlp(x, y):
clf = MLPClassifier(hidden_layer_sizes=(100,), max_iter=100, verbose=True, activation='identity', batch_size=100)
clf.fit(x, y)
return clf
def concat_dataset(x, y):
return pd.concat(x, ignore_index=True), pd.concat(y, ignore_index=True)
def perceptron_summary(doc, n, clf):
x, _ = extract_features(doc, None)
y = clf.decision_function(x)
sent = tokenize_sentences(doc)
sorted_idx = np.argsort(-y).tolist()
result = []
for i in range(0, n):
result.append(sent[sorted_idx.index(i)])
return result
def classifier_summary(doc, n, clf):
x, _ = extract_features(doc, None)
y = clf.predict_proba(x)
# Fetch probability of getting 1 (in summary)
y = [v[1] for v in y]
sentences = tokenize_sentences(doc)
# sorted_idx = np.argsort(y).tolist()
sentences_info = []
for i in range(len(sentences)):
sentences_info.append({'pos': i, 'text': sentences[i], 'y': y[i]})
best = sorted(sentences_info, key=lambda k: k['y'], reverse=True)[:n]
res = [s['text'] for s in best]
return res
def main():
x = []
y = []
train_directory = 'data/train/flat_text/'
train_summary_directory = 'data/train/summary/'
print("-------- Exercise 2 ---------")
print("<<< Evaluating summaries using Multi-Layer Perceptron and set of features as described in report >>>")
print('(Learning from files in directory ' + train_directory + ')')
for filename in os.listdir(train_directory):
with open(train_directory+filename, 'r', encoding='latin1') as file:
d = file.read()
with open(train_summary_directory+'Sum-'+filename, 'r', encoding='latin1') as file:
s = file.read()
x_, y_ = extract_features(d, s)
x.append(x_)
y.append(y_)
x_train, y_train = concat_dataset(x, y)
clf = train_mlp(x_train.as_matrix(), y_train.as_matrix().ravel())
joblib.dump(clf, 'mlp.clf')
evaluate_print('Classifier', *full_evaluate(classifier_summary, 5, clf))
if __name__ == '__main__':
main()
|
# Generated by Django 2.2.6 on 2019-11-04 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("cases", "0015_auto_20191003_1323")]
operations = [
migrations.AddField(
model_name="image",
name="timepoints",
field=models.IntegerField(null=True),
)
]
|
from thefuck.rules.cd_cs import match, get_new_command
from thefuck.types import Command
def test_match():
assert match(Command('cs', 'cs: command not found'))
assert match(Command('cs /etc/', 'cs: command not found'))
def test_get_new_command():
assert get_new_command(Command('cs /etc/', 'cs: command not found')) == 'cd /etc/'
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
from subprocess import check_output
get_ipython().run_line_magic('matplotlib', 'inline')
# In[122]:
df=pd.read_csv('C:\\Users\\p860n111\\Desktop\\data science\\Hw2\\Shakespeare_data.csv')
# In[4]:
df.describe
# In[5]:
#to check the values for columns that have NaN value in them
print(df.info())
# In[6]:
df.head(10)
# In[7]:
df.describe()
# In[8]:
# to see the total number of columns in the dataset.
df.columns
# In[9]:
#to find the total number of unique play
print("Number of plays are: " + str(df['Play'].nunique()))
# In[123]:
# to replace the NaN value in the Player column to be Unknown
df['Player'].replace(np.nan, 'Unknown',inplace = True)
# In[10]:
# to check that the Player column has all i.e. 111396 non null values.
df.info()
# In[12]:
# to verify that the missing values in Palyer column got replaced by Unknown
df.head()
# In[13]:
# to print the count of unique plays and their names
print("The total number of plays are: " + str(df['Play'].nunique())+" and their names are as follows: " + str(df['Play'].unique()))
# In[14]:
#creating a new data frame that will contain unique Plays value as a new column named: Name of the Play
pd.DataFrame(df['Play'].unique().tolist(), columns=['Name of the Play'])
# In[15]:
# Additional Information 1: For each Play, number of lines (PlayerLine) spoken by each PlaYer
df.groupby(['Play','Player']).count()['PlayerLine']
# In[16]:
# Now converting the above data into a frame (playWise_lines_per_player).
playWise_lines_per_player= df.groupby(['Play','Player']).count()['PlayerLine']
playWise_lines_per_player= playWise_lines_per_player.to_frame()
playWise_lines_per_player
# In[17]:
playWise_lines_per_player.describe()
# In[18]:
# Additional Information 2: To count the number of PlayerLine corresponding to each Play.
df.groupby('Play').count().sort_values(by='PlayerLine',ascending=True)['PlayerLine']
# In[19]:
#Converting the data into a dataframe (playerLinePerPlay)
playerLinePerPlay = df.groupby('Play').count().sort_values(by='PlayerLine',ascending=True)['PlayerLine']
# In[20]:
playerLinePerPlay
# In[21]:
playerLinePerPlay = playerLinePerPlay.to_frame()
# In[22]:
playerLinePerPlay
# In[23]:
# applying indexing to the above dataframe
playerLinePerPlay['Play'] = playerLinePerPlay.index.tolist()
# In[24]:
playerLinePerPlay
# In[25]:
playerLinePerPlay.index = np.arange(0,len(playerLinePerPlay))
# In[26]:
playerLinePerPlay
# In[27]:
# plotting a graph to show: PlayerLine against Name of the Play
plt.figure(figsize=(90,50))
ax= sns.barplot(x='Play',y='PlayerLine',data=playerLinePerPlay, order = playerLinePerPlay['Play'])
ax.set(xlabel='Name of the Play', ylabel='PlayerLines')
plt.show()
# In[28]:
plt.figure(figsize=(15,15))
ax= sns.barplot(x='PlayerLine',y='Play',data=playerLinePerPlay, order = playerLinePerPlay['Play'])
ax.set(xlabel='PlayerLines', ylabel='Name of the Play')
plt.show()
# In[30]:
# Additional Information 3: Number of Players corresponding to each Play
playersPerPlay = df.groupby(['Play'])['Player'].nunique().sort_values(ascending= True)
# In[31]:
playersPerPlay
# In[32]:
#changing to it to dataframe
playersPerPlay=playersPerPlay.to_frame()
# In[33]:
playersPerPlay
# In[34]:
playersPerPlay['Play'] = playersPerPlay.index.tolist()
# In[35]:
playersPerPlay
# In[36]:
# now to change the index from Play to 0 - (length-1) adn renaming the column name
playersPerPlay.columns = ['Number of Players','Name of the Play']
# In[37]:
playersPerPlay
# In[38]:
playersPerPlay.index= np.arange(0,len(playersPerPlay))
playersPerPlay
# In[39]:
# plotting graph
plt.figure(figsize=(15,15))
ax = sns.barplot(x='Number of Players',y='Name of the Play',data=playersPerPlay)
ax.set(xlabel='Number of Players', ylabel='Name of the Play')
plt.show()
# In[40]:
plt.figure(figsize=(100,100))
ax = sns.barplot(x='Name of the Play',y='Number of Players',data=playersPerPlay)
ax.set(xlabel='Name of the Play', ylabel='Number of Players')
plt.show()
# In[42]:
# to calculate the total words in each PlayerLine cell entry
df['new_column'] = df.PlayerLine.apply(lambda x: len(str(x).split(' ')))
# In[43]:
df
# In[44]:
df.groupby(['Player'])['new_column']
# In[45]:
df
# In[46]:
df
# In[50]:
df.rename(columns={'new_column': 'NoOfWordsInPlayerLine'}, inplace=True)
# In[51]:
df
# In[52]:
h=df.groupby('Player')
# In[53]:
for Player, data in h:
print("Player:",Player)
print("\n")
print("Player:",data)
# In[54]:
h
# In[54]:
g.max
# In[56]:
g.sum()
# In[57]:
# to find total number of words in each PlayerLine so that to find the most important Player.
importantPlayer = df.groupby('Player')['NoOfWordsInPlayerLine'].sum()
print (importantPlayer)
# In[58]:
# converting result into dataframe
importantPlayer= importantPlayer.to_frame()
# In[59]:
importantPlayer
# In[60]:
importantPlayer['Player'] = importantPlayer.index.tolist()
# In[61]:
importantPlayer
# In[62]:
importantPlayer.index = np.arange(0,len(importantPlayer))
# In[63]:
importantPlayer
# In[64]:
importantPlayer.columns =['NoOfWordsInPlayerLine','Player']
# In[65]:
importantPlayer
# In[66]:
importantPlayer.sort_values('NoOfWordsInPlayerLine')
# In[67]:
df
# In[68]:
importantPlayer.sort_values(by='NoOfWordsInPlayerLine', ascending=False)
# In[69]:
df.to_csv('Shakespeare_ds_numberOfWordsCol.csv')
# In[70]:
importantPlayer = importantPlayer.reset_index(drop=True)
# In[71]:
importantPlayer=importantPlayer.sort_values(by='NoOfWordsInPlayerLine', ascending=False)
# In[72]:
importantPlayer
# In[73]:
importantPlayer.index = np.arange(0,len(importantPlayer))
# In[74]:
importantPlayer
# In[75]:
plt.figure(figsize=(100,100))
ax= sns.barplot(x='NoOfWordsInPlayerLine',y='Player',data=importantPlayer)
ax.set(xlabel='NoOfWordsInPlayerLine', ylabel='Player')
plt.show()
# In[76]:
importantPlayer
# In[77]:
importantPlayer.to_csv('Shakespeare_ds_importantPlayer.csv')
# In[78]:
df
# In[ ]:
#df_uniqueWords_Count_In_PlayerLine=pd.DataFrame(r1,columns=['PlayerLine'])
# In[76]:
df
# In[79]:
from collections import Counter
result = Counter(" ".join(df['PlayerLine'].values.tolist()).split(" ")).items()
result
# In[79]:
from collections import Counter
result = Counter(" ".join(df['PlayerLine'].values.tolist()).lower().split(" ")).items()
result
# In[80]:
most_Common_Word_df = pd.DataFrame([result])
# In[81]:
most_Common_Word_df
# In[82]:
most_Common_Word_df.to_csv('Shakespeare_ds_Most_common_word.csv')
# In[105]:
from pandas import ExcelWriter
writer = ExcelWriter('Shakespeare_ds_Most_common_word.xlsx')
most_Common_Word_df.to_excel(writer,'Sheet5')
writer.save()
# In[83]:
play_name = df['Play'].unique().tolist()
for play in play_name:
p_line = df[df['Play']==play].groupby('Player').count().sort_values(by='PlayerLine',ascending=True)['PlayerLine']
p_line = p_line.to_frame()
p_line['Player'] = p_line.index.tolist()
p_line.index = np.arange(0,len(p_line))
p_line.columns=['Lines','Player']
plt.figure(figsize=(10,10))
ax= sns.barplot(x='Lines',y='Player',data=p_line)
ax.set(xlabel='Number of Lines', ylabel='Player')
plt.title(play,fontsize=30)
plt.show()
# In[110]:
play_name = df['Play'].unique().tolist()
for play in play_name:
p_line = df[df['Play']==play].groupby('Player').count().sort_values(by='PlayerLine',ascending=False)['PlayerLine']
p_line = p_line.to_frame()
p_line['Player'] = p_line.index.tolist()
p_line.index = np.arange(0,len(p_line))
p_line.columns=['Lines','Player']
plt.figure(figsize=(10,10))
ax= sns.barplot(x='Lines',y='Player',data=p_line)
ax.set(xlabel='Number of Lines', ylabel='Player')
plt.title(play,fontsize=30)
plt.show()
# In[114]:
g= nx.Graph()
# In[116]:
df
# In[134]:
trainingDS=df.sample(frac=0.8,random_state=200)
testingDS=df.drop(train.index)
# In[135]:
trainingDS
# In[84]:
testingDS.describe()
# In[138]:
testingDS.info()
# In[ ]:
testingDS.info()
# In[140]:
from sklearn.model_selection import train_test_split
trainingSet, testSet = train_test_split(df, test_size=0.2)
# In[142]:
trainingSet.info()
# In[ ]:
testSet.info()
# In[144]:
trainingSet["Player"].value_counts().plot(kind="bar")
trainingSet["Player"].value_counts()
# In[221]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df.PlayerLinenumber, df.Play, test_size=0.25, random_state=0)
# In[218]:
from sklearn.linear_model import LogisticRegression
# In[219]:
logisticRegr = LogisticRegression()
# In[223]:
logisticRegr.fit(x_train), y_train)
# In[96]:
df
# In[131]:
df.drop('ActSceneLine', axis=1, inplace=True)
# In[132]:
df
# In[111]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df.Player, df.Play, test_size=0.25, random_state=0)
# In[157]:
X = df.loc[:, df.columns != 'Player']
y = df.loc[:, df.columns == 'Play']
# In[158]:
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
# In[141]:
X
# In[136]:
y
# In[142]:
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
plt.rc("font", size=14)
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
import seaborn as sns
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
# In[159]:
df=pd.read_csv('C:\\Users\\p860n111\\Desktop\\data science\\Hw2\\Shakespeare_data.csv')
# In[160]:
df = df.dropna()
print(df.shape)
print(list(df.columns))
# In[154]:
X = df.iloc[:,1:]
y = df.iloc[:,0]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# In[155]:
classifier = LogisticRegression(random_state=0)
classifier.fit(X_train, y_train)
# In[152]:
df.drop('PlayerLine', axis=1, inplace=True)
# In[156]:
df
# In[172]:
df = pd.read_csv('C:\\Users\\p860n111\\Desktop\\data science\\Hw2\\Shakespeare_data.csv')
# In[173]:
df.head()
# In[174]:
df = df.dropna()
print(df.shape)
print(list(df.columns))
# In[164]:
feature_cols = ['Player', 'Play']
# In[165]:
# you want all rows, and the feature_cols' columns
X = train.loc[:, feature_cols]
# In[168]:
# now we want to create our response vector
y = train.Player
# In[169]:
# 1. import
from sklearn.linear_model import LogisticRegression
# 2. instantiate model
logreg = LogisticRegression()
# 3. fit
logreg.fit(X, y)
# In[175]:
df.info()
# In[176]:
df.drop('Dataline', axis=1, inplace=True)
df.drop('PlayerLinenumber', axis=1, inplace=True)
df.drop('ActSceneLine', axis=1, inplace=True)
df.drop('Dataline', axis=1, inplace=True)
df.drop('PlayerLine', axis=1, inplace=True)
# In[177]:
df.drop('Dataline', axis=1, inplace=True)
# In[179]:
df.drop('PlayerLine', axis=1, inplace=True)
# In[180]:
df
# In[186]:
x =df.ix[:,0].values
y = df.ix[:,1].values
# In[189]:
X_train, X_test, y_train, y_test = train_test_split(xogReg = LogisticRegression()
LogReg.fit(X_train, y_train), y, test_size=0.25, random_state=0)
# In[191]:
LogReg = LogisticRegression()
LogReg.fit(X_train, y_train)
# In[194]:
X_train
# In[197]:
df.info()
# In[227]:
df = pd.read_csv('C:\\Users\\p860n111\\Desktop\\data science\\Hw2\\Shakespeare_data.csv')
# In[228]:
df = df.dropna()
print(df.shape)
print(list(df.columns))
# In[224]:
# Import train_test_split function
from sklearn.model_selection import train_test_split
X=df[['PlayerLinenumber']] # Features
y=df['Player'] # Labels
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1) # 70% training and 30% test
# In[225]:
#Import Random Forest Model
from sklearn.ensemble import RandomForestClassifier
#Create a Gaussian Classifier
clf=RandomForestClassifier(n_estimators=100)
#Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
# In[226]:
#Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# In[229]:
df.info()
# In[230]:
df.dtypes
# In[231]:
obj_df = df.select_dtypes(include=['object']).copy()
obj_df.head()
# In[232]:
df["Play"] = df["Play"].astype('category')
df.dtypes
# In[233]:
df["Play_cat"] = df["Play"].cat.codes
df.head()
# In[237]:
df.dtypes
# In[236]:
df["PlayerLinenumber"] = df["PlayerLinenumber"].astype('category')
df.dtypes
df["PlayerLinenumber_cat"] = df["PlayerLinenumber"].cat.codes
df.head()
# In[238]:
df["ActSceneLine"] = df["ActSceneLine"].astype('category')
df.dtypes
df["ActSceneLine_cat"] = df["ActSceneLine"].cat.codes
df.head()
# In[241]:
df.dtypes
# In[240]:
df["Player"] = df["Player"].astype('category')
df.dtypes
df["Player_cat"] = df["Player"].cat.codes
df.head()
# In[242]:
df["PlayerLine"] = df["PlayerLine"].astype('category')
df.dtypes
df["PlayerLine_cat"] = df["PlayerLine"].cat.codes
df.head()
# In[243]:
df.dtypes
# In[245]:
newDF = df.filter(['Dataline','Play_cat','PlayerLinenumber_cat','ActSceneLine_cat','Player_cat','PlayerLine_cat' ], axis=1)
# In[248]:
newDF.dtypes
# In[249]:
X = newDF.ix[:,(0,1,2,3,5)].values
y = newDF.ix[:,4].values
# In[250]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .3, random_state=25)
# In[251]:
LogReg = LogisticRegression()
LogReg.fit(X_train, y_train)
# In[253]:
predictions = LogReg.predict(X_test)
# In[254]:
LogReg.predict(X_test[0:10])
# In[255]:
predictions = LogReg.predict(X_test)
# In[256]:
# Use score method to get accuracy of model
score = LogReg.score(X_test, y_test)
print(score)
# In[263]:
X1 = newDF.ix[:,(1,2)].values
y1 = newDF.ix[:,4].values
# In[257]:
newDF.dtypes
# In[268]:
X_train1, X_test1, y_train1, y_test1 = train_test_split(X1, y1, test_size = .2, random_state=25)
# In[ ]:
LogReg = LogisticRegression()
LogReg.fit(X_train1, y_train1)
# In[ ]:
predictions = LogReg.predict(X_test1)
# In[267]:
# Use score method to get accuracy of model
score = LogReg.score(X_test1, y_test1)
print(score)
|
#!/usr/bin/env python
# coding:utf-8
'''
Created on:
@author: 张晓宇
Email: 61411916@qq.com
Version: 1.0
Description:
Help:
'''
from core import core
if __name__ == '__main__':
core.run() |
#!/usr/bin/env python
"""Script to check that a commit message is valid"""
# noqa
import argparse
import os.path
import re
import shutil
import sys
from collections import defaultdict
from colorama import Fore, Style
from git import Repo
def check(message):
"""Ensure the message follow some rules.
This is based on "Conventional Commits" (https://www.conventionalcommits.org), but with
added rules for the body
Rules
-----
A commit is split in four parts:
- A short subject in the first line
- An empty line
- A complete description
The description must be written in restructured text, containing sections, inspired by
https://www.python.org/dev/peps/pep-0012/#suggested-sections.
At least these ones are required, in this order:
- Abstract
- Motivation
- Rationale
- 1st line: subject
- length is max 72 chars
- starts with a type, from a specific list
- a scope can follow the type, surrounded by parentheses
- after the type (or scope), a colon must be present, followed by a space
- then a mandatory short subject
- 2nd line: empty
- mandatory
- empty
- 3rd line, start of first section of RST description
In the description, we expect to find:
- 1st line: "Abstract"
- 2nd line: "======="
- 3rd line: empty line
- 4th line: text
Then the next title must be "Motivation"
Parameters
----------
message : str
The git commit message to check
Yields
------
Tuple[int, str]
Will yield a tuple for each error, with the line number and the text of the error.
"""
if not message:
yield 0, "No message (message is mandatory)"
return
types = {
"build",
"ci",
"chore",
"docs",
"feat",
"fix",
"merge",
"perf",
"refactor",
"revert",
"style",
"tests",
}
lines = [line.rstrip() for line in message.splitlines()]
line = lines.pop(0)
if len(line) > 72:
yield 0, "Line to long (max 72 characters)"
parts = re.split(r"[^a-zA-Z]", line, maxsplit=1)
type_ = parts[0]
if not type_:
yield 0, f"Line must start with a type (must be one of {list(types)})"
else:
if type_.lower() not in types:
yield 0, f"`{type_}` is not a valid type (must be one of {list(types)})"
if type_ != type_.lower():
yield 0, f"Type `{type_}` must be lowercased"
else:
if type_ != type_.lower():
yield 0, f"Type `{type_}` must be lowercased (use {type_.lower()})"
if len(parts) == 1 or not parts[1].strip():
yield 0, f"Type `{type_}` must be followed by en optional scope and a subject (`type(scope): subject`)"
else:
rest = line[len(type_) :]
if rest.startswith(" "):
yield 0, f"No space expected after the type `{type_}` (must be a scope in parentheses or `: `)"
rest = rest.lstrip()
if rest.startswith("("):
parts = rest.split(")", maxsplit=1)
scope = parts[0][1:]
if not scope.strip():
yield 0, "Scope is empty (if set, scope is between parentheses after the type"
if scope.strip() != scope:
yield 0, f"Scope `{scope}` must not be surrounded by spaces"
scope = scope.strip()
if not re.fullmatch(r"[a-zA-Z]+[\w\-.]+[a-zA-Z]+", scope):
yield 0, f"Invalid scope `{scope}` (must start with letter, then letters, `_`, `-`, or `.`, then letter)"
if len(parts) == 1 or not parts[1].strip():
rest = ""
else:
rest = parts[1]
if not rest or not rest.strip():
yield 0, "Description is missing (must be after type or scope)"
else:
parts = rest.split(":", maxsplit=1)
if parts[0]:
if not parts[0].strip():
yield 0, "No space before `:` (type or scope is followed by `: `)"
else:
yield 0, "Invalid subject separator (subject must be prefixed by `: `)"
if parts[0].strip():
subject = parts[0]
elif len(parts) == 1 or not parts[1].strip():
yield 0, "Description is missing (must be after type or scope)"
subject = ""
else:
subject = parts[1]
if subject:
if subject[0] != " ":
yield 0, "Description must be preceded by a space (subject must be prefixed by `: `)"
else:
subject = subject[1:]
if subject.strip() != subject:
yield 0, "Invalid spaces around subject (required only one space after `:`, and no space at the end)"
subject = subject.strip()
if len(subject) < 20:
yield 0, "Description too short (min 20 characters)"
if len(lines) < 2:
yield 1, "Description is missing (must be after a blank line following the first line)"
return
sections = {
name: {
"found_on_line": None,
"underline": None,
"nb_blank_lines_before": 0,
"nb_blank_lines_after_title": 0,
"nb_blank_lines_after_underline": 0,
"has_text": False,
"order": index,
}
for index, name in enumerate(["Abstract", "Motivation", "Rationale"])
}
found_sections = []
current_section = None
text_before = False
skip = 0
for index, line in enumerate(lines):
if skip:
skip -= 1
continue
num = index + 1
if line in sections:
current_section = line
sections[current_section]["found_on_line"] = num
found_sections.append(current_section)
# search for empty lines before title
if index:
index_ = index
while index_:
if lines[index_ - 1]:
break
sections[current_section]["nb_blank_lines_before"] += 1
index_ -= 1
try:
# search for empty lines after title
index_ = index
while True:
if lines[index_ + 1 + skip]:
break
sections[current_section]["nb_blank_lines_after_title"] += 1
skip += 1
# search for underline
if lines[index + 1 + skip].startswith("="):
sections[current_section]["underline"] = lines[
index + 1 + skip
] == "=" * len(current_section)
if sections[current_section]["underline"] is not None:
skip += 1
# search for empty lines after underline
index_ = index + skip
while True:
if lines[index_ + skip]:
break
sections[current_section]["nb_blank_lines_after_underline"] += 1
skip += 1
except IndexError:
pass
continue
if line:
if not current_section:
text_before = True
else:
sections[current_section]["has_text"] = True
if text_before:
yield 2, "No text must preceed the first section"
for name, info in sections.items():
if not info["found_on_line"]:
yield 2, f"Description must include the {name} section"
for index, name in enumerate(found_sections):
info = sections[name]
num = info["found_on_line"]
if info["order"] != index:
yield num, f"Section {name} must be in position {info['order']+1}"
if info["nb_blank_lines_before"] != 1:
yield num - info[
"nb_blank_lines_before"
], f"Section {name} must be preceded with exactly one blank line"
if info["nb_blank_lines_after_title"]:
yield num + 1, f"No blank lines expected after title of section {name}"
num += info["nb_blank_lines_after_title"]
if info["underline"] is not True:
yield num + 1, f"Title of section {name} must be underlined with {len(name)} `=`"
if info["underline"] is not None:
num += 1
if info["nb_blank_lines_after_underline"] != 1:
yield num + 1, f"Underline of title of section {name} must be followed with exactly one blank line"
num += info["nb_blank_lines_after_underline"]
if not info["has_text"]:
yield num, f"Section {name} must contain text"
for index, line in enumerate(message.splitlines()):
if line != line.rstrip():
yield index, f"Remove trailing space(s)"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Validate a git commit message via a git reference or text.",
add_help=False,
)
parser.add_argument("-h", "--help", help="Show this help and exit.", action="help")
parser.add_argument(
"-v", "--verbose", help="Increase output verbosity.", action="store_true"
)
parser.add_argument(
"-t", "--template", help="Show git commit template.", action="store_true"
)
parser.add_argument(
"--check-merge",
help="If set, will enforce the style for a merge commit. Else merge commits are always valid.",
action="store_true",
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-r",
"--ref",
metavar="REF",
type=str,
nargs="?",
help="The git reference of the commit to check.",
default=None,
)
group.add_argument(
"-l",
"--last",
help="Use the last commit (equivalent to -r HEAD).",
action="store_true",
)
group.add_argument(
"path",
metavar="PATH",
type=argparse.FileType("r", encoding="UTF-8"),
nargs="?",
help="Path to file containing the message to check. Use `-` for stdin.",
default=None,
)
args = parser.parse_args()
errors = None
do_check = False
if args.last or args.ref or args.path:
do_check = True
if args.last:
args.ref = "HEAD"
if args.ref:
if args.verbose:
print(
f"Checking from git reference: {Style.BRIGHT}{args.ref}{Style.NORMAL}\n"
)
repo = Repo(search_parent_directories=True)
commit = repo.commit(args.ref)
if not args.check_merge and len(commit.parents) > 1:
do_check = False
if args.verbose:
print(
f"{Style.BRIGHT}{Fore.GREEN}It's a merge commit, no style enforced{Fore.RESET}{Style.NORMAL}\n"
)
else:
message = commit.message
else:
if args.verbose:
print(
f"Checking from file: {Style.BRIGHT}{args.path.name}{Style.NORMAL}\n"
)
with args.path as file:
message = file.read()
if not args.check_merge and message and message.startswith("Merge branch "):
do_check = False
if args.verbose:
print(
f"{Style.BRIGHT}{Fore.GREEN}It sounds like a merge commit, so no style enforced{Fore.RESET}{Style.NORMAL}\n"
)
if do_check:
lines = message.splitlines()
nb_lines = len(lines)
errors = defaultdict(list)
nb_errors = 0
for line, error in check(message):
if line >= nb_lines:
line = nb_lines - 1
errors[line].append(error)
nb_errors += 1
if nb_errors:
if args.verbose:
print(
f"{Style.BRIGHT}{Fore.RED}Message is invalid. "
f"Found {nb_errors} error{'(s)' if nb_errors > 1 else ''} "
f"for {len(errors)} line{'(s)' if len(errors) > 1 else ''} "
f"(on {nb_lines}):{Fore.RESET}{Style.NORMAL}\n",
file=sys.stderr,
)
else:
if args.verbose:
print(
f"{Style.BRIGHT}{Fore.GREEN}Message is valid:{Fore.RESET}{Style.NORMAL}\n"
)
if args.verbose:
for line_num, line in enumerate(lines):
if line_num in errors:
print(
f"{Style.BRIGHT}{Fore.RED}✘{Fore.RESET}{Style.NORMAL} {line}",
file=sys.stderr,
)
for error in errors[line_num]:
print(
f" -> {Style.BRIGHT}{Fore.RED}{error}{Fore.RESET}{Style.NORMAL}",
file=sys.stderr,
)
else:
print(
f"{Style.BRIGHT}{Fore.GREEN}✔{Fore.RESET} {Style.NORMAL}{line}",
file=sys.stderr,
)
else:
for line_num, line_errors in errors.items():
for error in line_errors:
print(f"{line_num}: {error}", file=sys.stderr)
if args.template:
if args.ref or args.path:
print("")
if args.verbose:
print(f"{Style.BRIGHT}Git commit message template:{Style.NORMAL}\n")
else:
print("Git commit message template:\n")
with open(
os.path.join(os.path.dirname(__file__), "..", ".gitmessage"), "r"
) as file:
shutil.copyfileobj(file, sys.stdout)
exit(1 if errors else 0)
|
from typing import Callable, Type, TypeVar
from uuid import UUID
from eventsourcing.application import Repository
from eventsourcing.domain import AggregateEvent
E = TypeVar("E", bound=AggregateEvent)
def assert_contains_event(
repository: Repository,
board_id: UUID,
type_: Type[E],
assertions: Callable[[E], bool],
) -> None:
for e in repository.event_store.get(board_id):
print(e)
if isinstance(e, type_) and assertions(e):
return
raise AssertionError("Event not found")
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from kneed import KneeLocator
import numpy as np
from scipy.spatial.distance import cdist
iris = pd.read_csv('C:/Users/jhunjhun/Downloads/iris.csv')
iris = iris.iloc[:,[2,3]]
sse = []
K = range(1,10)
for k in K:
kmeanModel = KMeans(n_clusters=k).fit(iris)
kmeanModel.fit(iris)
sse.append(sum(np.min(cdist(iris, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / iris.shape[0])
#for k in K:
# kmeans = KMeans(n_clusters=k, max_iter=1000).fit(iris)
# iris["clusters"] = kmeans.labels_
# #print(data["clusters"])
# sse[k] = kmeans.inertia_ # Inertia: Sum of distances of samples to their closest cluster center
plt.figure()
plt.plot(K,sse,'bx-')
plt.xlabel("Number of cluster")
plt.ylabel("Sum of Squared Error (SEE)")
print(K ,'\n' , sse)
kn = KneeLocator(list(K), sse, S=1.0, curve='convex', direction='decreasing')
#print(kn.knee)
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
|
# INPUT
cliente=input("Ingrese el nombre del cliente:")
RUC=int(input("Ingrese el numero de RUC:"))
relojes=int(input("Ingrese la cantidad de relojes:"))
pulseras=int(input("Ingrese la cantidad de pulseras:"))
cadenas=int(input("Ingrese la cantidad de cadenas:"))
precio_de_un_reloj=float(input("Ingrese el precio de un reloj:"))
precio_de_una_pulsera=float(input("Ingrese el precio de una pulsera:"))
precio_de_una_cadena=float(input("Ingrese el precio de una cadena:"))
# PROCESSING
total=((relojes*precio_de_un_reloj)+(pulseras*precio_de_una_pulsera)+(cadenas*precio_de_una_cadena))
# VERIFICADOR
precio_de_venta=not(total<=pulseras and relojes!=cadenas)
#OUTPUT
print("#########################")
print("# Joyeria: EL MILLONARIO ")
print("#########################")
print("# cliente:", cliente)
print("# RUC:", RUC)
print("# relojes:", relojes," precio: S/.", precio_de_un_reloj)
print("# pulseras:", pulseras," precio: S/.", precio_de_una_pulsera)
print("# cadenas:", cadenas," precio: S/.", precio_de_una_cadena)
print("# total: S/.", total)
print("#########################")
print("# precio de venta:", precio_de_venta)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Autor: Ingmar Stapel
# Date: 20141229
# Version: 1.0
# Homepage: www.raspberry-pi-car.com
import sys, tty, termios, os
from L298NHBridge import HBridge
speedrun = 0
anglesteer = 0
Motors = HBridge(19, 26, 23, 24, 13, 21, 22)
# Instructions for when the user has an interface
print("w/s: direction")
print("a/d: steering")
print("q: stops the motors")
print("p: print motor speed (L/R)")
print("x: exit")
# The catch method can determine which key has been pressed
# by the user on the keyboard.
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# Infinite loop
# The loop will not end until the user presses the
# exit key 'X' or the program crashes...
def printscreen():
# Print the motor speed just for interest
os.system('clear')
print("w/s: direction")
print("a/d: steering")
print("q: stops the motors")
print("x: exit")
print("========== Speed Control ==========")
print("run motor: ", speedrun)
print("steer motor: ", anglesteer)
while True:
# Keyboard character retrieval method. This method will save
# the pressed key into the variable char
char = getch()
# The car will drive forward when the "w" key is pressed
if(char == "w"):
# accelerate the RaPi car
speedrun = speedrun + 0.1
if speedrun > 1:
speedrun = 1
Motors.setMotorRun(speedrun)
#Motors.setMotorSteer(anglesteer)
printscreen()
# The car will reverse when the "s" key is pressed
if(char == "s"):
# slow down the RaPi car
speedrun = speedrun - 0.1
if speedrun < -1:
speedrun = -1
Motors.setMotorRun(speedrun)
#Motors.setMotorSteer(anglesteer)
printscreen()
# Stop the motors
if(char == "q"):
speedrun = 0
speedsteer = 0
Motors.setMotorRun(speedrun)
Motors.setMotorSteer(anglesteer)
printscreen()
# The "d" key will toggle the steering steer
if(char == "d"):
anglesteer = anglesteer + 0.5
if anglesteer > 5.5:
anglesteer = 5.5
#Motors.setMotorRun(speedrun)
Motors.setMotorSteer(anglesteer)
printscreen()
# The "a" key will toggle the steering run
if(char == "a"):
anglesteer = anglesteer - 0.5
if anglesteer < -5.5:
anglesteer = -5.5
#Motors.setMotorRun(speedrun)
Motors.setMotorSteer(anglesteer)
printscreen()
# The "x" key will break the loop and exit the program
if(char == "x"):
Motors.setMotorRun(0)
Motors.setMotorSteer(0)
Motors.exit()
print("Program Ended")
break
# The keyboard character variable char has to be set blank. We need
# to set it blank to save the next key pressed by the user
char = ""
# End
|
'''
Created on Jun 29, 2017
@author: Xueping
'''
from sklearn.decomposition import LatentDirichletAllocation
import pandas as pd
import numpy as np
def load_Preprocess():
df = pd.read_csv("~/data/hab_test.csv", dtype={'hab_test.tot_visit': np.float32,'hab_test.tot_visit_ed': np.float32,'hab_test.tot_visit_acute': np.float32,'hab_test.age_in_2015': np.float32}, low_memory=False)
df.fillna(0)
df.columns = [feature.split('.')[1] for (feature_idx, feature) in enumerate(df.columns)]
#the total visiting number is not more than 5
df.tot_visit[df.tot_visit <=5.0] = 0
#the total visiting number is between 6 and 10
df.tot_visit[(df.tot_visit > 5.0) & (df.tot_visit <=10.0)] = 1
#the total visiting number is more than 10
df.tot_visit[df.tot_visit >10.0] = 2
#the age of patient is not more than 12
df.age_in_2015 [df.age_in_2015 <=12.0] = 0
#the age of patient is is between 13 and 19
df.age_in_2015 [(df.age_in_2015 > 12.0) & (df.age_in_2015 <=19.0)] = 1
#the age of patient is is between 20 and 30
df.age_in_2015 [(df.age_in_2015 > 19.0) & (df.age_in_2015 <=30.0)] = 2
#the age of patient is is between 31 and 60
df.age_in_2015 [(df.age_in_2015 > 30.0) & (df.age_in_2015 <=60.0)] = 3
#the age of patient is more than 60
df.age_in_2015 [df.age_in_2015 > 60.0] = 4
df = pd.get_dummies(data=df, columns=['tot_visit', 'age_in_2015','gender', 'latest_hospital_type','latest_adm_year', 'latest_adm_month'])
df = df.drop(labels = ['row_number', 'person_id', 'tot_visit_ed', 'tot_visit_acute'], axis=1)
print df.head(10)
return df
def run_Model(df):
tf_feature_names = dict((feature_idx, feature) for (feature_idx, feature) in enumerate(df.columns))
no_topics = 5
# Run LDA
lda = LatentDirichletAllocation(n_topics=no_topics, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(df.fillna(0))
# def display_topics(model, feature_names, no_top_words):
# for topic_idx, topic in enumerate(model.components_):
# print "Topic %d:" % (topic_idx)
# print " ".join([feature_names[i]+":"+str(topic[i])
# for i in topic.argsort()[:-no_top_words - 1:-1]])
def display_topics(model, feature_names, no_top_words):
for topic_idx, topic in enumerate(model.components_):
print "Topic %d:" % (topic_idx)
print " ".join([feature_names[i]
for i in topic.argsort()[:-no_top_words - 1:-1]])
no_top_words = 5
display_topics(lda, tf_feature_names, no_top_words)
if __name__ == "__main__":
df = load_Preprocess()
run_Model(df)
|
# -*- coding: utf-8 -*-
"""Test base."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import numpy as np
from pytest import fixture
from ..base import BaseVisual, GLSLInserter, gloo
from ..transform import (subplot_bounds, Translate, Scale, Range,
Clip, Subplot, TransformChain)
from . import mouse_click, mouse_drag, mouse_press, key_press, key_release
from phy.gui.qt import QOpenGLWindow
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Fixtures
#------------------------------------------------------------------------------
@fixture
def vertex_shader_nohook():
yield """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position.xy, 0, 1);
}
"""
@fixture
def vertex_shader():
yield """
attribute vec2 a_position;
void main() {
gl_Position = transform(a_position.xy);
gl_PointSize = 2.0;
}
"""
@fixture
def fragment_shader():
yield """
void main() {
gl_FragColor = vec4(1, 1, 1, 1);
}
"""
class MyVisual(BaseVisual):
def __init__(self):
super(MyVisual, self).__init__()
self.set_shader('simple')
self.set_primitive_type('lines')
def set_data(self):
self.n_vertices = 2
self.program['a_position'] = [[-1, 0], [1, 0]]
self.program['u_color'] = [1, 1, 1, 1]
#------------------------------------------------------------------------------
# Test base
#------------------------------------------------------------------------------
def test_glsl_inserter_nohook(vertex_shader_nohook, fragment_shader):
vertex_shader = vertex_shader_nohook
inserter = GLSLInserter()
inserter.insert_vert('uniform float boo;', 'header')
inserter.insert_frag('// In fragment shader.', 'before_transforms')
vs, fs = inserter.insert_into_shaders(vertex_shader, fragment_shader)
assert vs == vertex_shader
assert fs == fragment_shader
def test_glsl_inserter_hook(vertex_shader, fragment_shader):
inserter = GLSLInserter()
inserter.insert_vert('uniform float boo;', 'header')
inserter.insert_frag('// In fragment shader.', 'before_transforms')
tc = TransformChain([Scale(.5)])
inserter.add_gpu_transforms(tc)
vs, fs = inserter.insert_into_shaders(vertex_shader, fragment_shader)
# assert 'temp_pos_tr = temp_pos_tr * 0.5;' in vs
assert 'uniform float boo;' in vs
assert '// In fragment shader.' in fs
def test_mock_events(qtbot, canvas):
c = canvas
pos = p0 = (50, 50)
p1 = (100, 100)
key = 'A'
mouse_click(qtbot, c, pos, button='left', modifiers=())
mouse_press(qtbot, c, pos, button='left', modifiers=())
mouse_drag(qtbot, c, p0, p1, button='left', modifiers=())
key_press(qtbot, c, key, modifiers=())
key_release(qtbot, c, key, modifiers=())
def test_next_paint(qtbot, canvas):
@canvas.on_next_paint
def next():
pass
canvas.show()
qtbot.waitForWindowShown(canvas)
def test_visual_1(qtbot, canvas):
v = MyVisual()
canvas.add_visual(v, key='key')
# Should be a no-op when adding the same visual twice.
canvas.add_visual(v, key='key')
# Must be called *after* add_visual().
v.set_data()
canvas.show()
qtbot.waitForWindowShown(canvas)
v.hide()
canvas.update()
qtbot.wait(5)
v.show()
v.toggle()
v.toggle()
assert canvas.get_visual('key') == v
canvas.remove(v)
assert canvas.get_visual('key') is None
canvas.clear()
# qtbot.stop()
def test_visual_2(qtbot, canvas, vertex_shader, fragment_shader):
"""Test a BaseVisual with multiple CPU and GPU transforms.
There should be points filling the entire right upper (2, 3) subplot.
"""
class MyVisual2(BaseVisual):
def __init__(self):
super(MyVisual2, self).__init__()
self.vertex_shader = vertex_shader
self.fragment_shader = fragment_shader
self.set_primitive_type('points')
self.transforms.add(Scale((.1, .1)))
self.transforms.add(Translate((-1, -1)))
self.transforms.add(Range(
(-1, -1, 1, 1), (-1.5, -1.5, 1.5, 1.5)))
s = 'gl_Position.y += (1 + 1e-8 * u_window_size.x);'
self.inserter.insert_vert(s, 'after_transforms')
self.inserter.add_varying('float', 'v_var', 'gl_Position.x')
def set_data(self):
self.n_vertices = 1000
data = np.random.uniform(0, 20, (1000, 2))
pos = self.transforms.apply(data).astype(np.float32)
self.program['a_position'] = pos
bounds = subplot_bounds(shape=(2, 3), index=(1, 2))
canvas.gpu_transforms.add([Subplot((2, 3), (1, 2)), Clip(bounds)])
# We attach the visual to the canvas. By default, a BaseLayout is used.
v = MyVisual2()
canvas.add_visual(v)
v.set_data()
v = MyVisual2()
canvas.add_visual(v)
v.set_data()
canvas.show()
qtbot.waitForWindowShown(canvas)
# qtbot.stop()
def test_canvas_lazy(qtbot, canvas):
v = MyVisual()
canvas.add_visual(v)
canvas.set_lazy(True)
v.set_data()
canvas.show()
qtbot.waitForWindowShown(canvas)
assert len(list(canvas.iter_update_queue())) == 2
def test_visual_benchmark(qtbot, vertex_shader_nohook, fragment_shader):
try:
from memory_profiler import memory_usage
except ImportError: # pragma: no cover
logger.warning("Skip test depending on unavailable memory_profiler module.")
return
class TestCanvas(QOpenGLWindow):
def paintGL(self):
gloo.clear()
program.draw('points')
program = gloo.Program(vertex_shader_nohook, fragment_shader)
canvas = TestCanvas()
canvas.show()
qtbot.waitForWindowShown(canvas)
def f():
for _ in range(100):
program['a_position'] = (-1 + 2 * np.random.rand(100_000, 2)).astype(np.float32)
canvas.update()
qtbot.wait(1)
mem = memory_usage(f)
usage = max(mem) - min(mem)
print(usage)
# NOTE: this test is failing currently because of a memory leak in the the gloo module.
# Recreating a buffer at every cluster selection causes a memory leak, once should ideally
# use a single large buffer and reuse that, even if the buffer's content is actually smaller.
# assert usage < 10
canvas.close()
|
import random
import time
with open("words.txt") as f:
words = f.read().splitlines()
class Game:
def __init__(self, name):
self.name = name
red_first = True
self.colors = self.init_colors(red_first)
self.selections = [False] * 25
self.last_touched = time.time()
self.words = random.sample(words, 25)
self.blue = 8 if red_first else 9
self.red = 9 if red_first else 8
self.scores = {
"red": [], "blue": []
}
self.winner = None
def __repr__(self):
return "Game repr"
@staticmethod
def init_colors(red_first):
n = set(range(25))
bigger = random.sample(list(n), 9)
n -= set(bigger)
smaller = random.sample(list(n), 8)
n -= set(smaller)
death = random.choice(list(n))
prototype = ["neutral"] * 25
big_name = "red" if red_first else "blue"
small_name = "red" if not red_first else "blue"
for i in bigger:
prototype[i] = big_name
for i in smaller:
prototype[i] = small_name
prototype[death] = "death"
return prototype
def select(self, num, clicker):
self.last_touched = time.time()
if self.selections[num]:
return
self.selections[num] = True
color = self.colors[num]
if color == "red":
self.red -= 1
if self.red == 0:
self.winner = "red"
self.scores["red"].append(clicker)
if color == "blue":
self.blue -= 1
if self.blue == 0:
self.winner = "blue"
self.scores["blue"].append(clicker)
if color == "death":
self.winner = "blue" if clicker == "red" else "red"
@property
def unattended(self):
# unattended if it hasn't been touched in an hour
return (time.time() - self.last_touched) > 3600
@property
def payload(self):
return {
"squares": [[self.selections[n], self.colors[n], self.words[n]] for n in range(25)],
"red": self.red,
"blue": self.blue,
"winner": self.winner,
"name": self.name,
"scores": self.scores,
}
|
import django.shortcuts
from .models import Author
# Create your views here.
def authors_list(request):
# print(request)
# return HttpResponse('<h1>Hello!</h1>')
authors = Author.objects.all()
return django.shortcuts.render(request, 'project_first_app/index.html', context={'authors': authors})
def author_detail(request, slug):
author = Author.objects.get(slug__iexact=slug)
return django.shortcuts.render(request, 'project_first_app/author_detail.html', context={'author': author})
|
from eso_bot.backend.helpers import command_invoked
from discord.ext.commands import Cog, command
class Admin(Cog, name="Admin commands"):
"""
Admin commands.
"""
def __init__(self, bot):
self.bot = bot
self.admins = [391583287652515841]
@command(name="stop")
async def restart_command(self, ctx):
await command_invoked(self.bot, "stop", ctx.message.author)
user = ctx.message.author.id
if user in self.admins:
await ctx.send("Goodbye cruel world :(")
return await self.bot.logout()
else:
return await ctx.send("Only admins can use that command.")
def setup(bot):
bot.add_cog(Admin(bot))
|
import numpy as np
def identify_quant_cols(
# TODO: add any arguments here
):
# TODO: write code that will return a list of the quantitative columns in a dataframe
pass
def make_col_positive(
#identify the smallest negative number, and add that to the entire dataset to shift everything into positive
# TODO: add any arguments here
parameter_data, column_number
):
# TODO: Add transformations here to make an entire dataframe column positive.
#parameterdata['ARRIVAL_DELAY'] = data['ARRIVAL_DELAY'].abs()
absolute_min = abs(parameter_data[parameter_data.columns[column_number]].min())
# parameter_data['ARRIVAL_DELAY'] = parameter_data['ARRIVAL_DELAY'] + absolute_min
parameter_data[parameter_data.columns[column_number]] = parameter_data[parameter_data.columns[column_number]] + absolute_min + 1
pass
def log_transform(
# log is only useful on a positive data set. so depends on the make col positive function above
# TODO: add any arguments here
parameter_data, column_number
):
# TODO: Add any code here to log transform an entire column.
#parameter_data[parameter_data.columns[column_number]] = make_col_positive(parameter_data, 21)
make_col_positive(parameter_data, column_number)
#https://stackoverflow.com/questions/37890849/pandas-series-log-normalize
import numpy as np
parameter_data[parameter_data.columns[column_number]] = np.log(parameter_data[parameter_data.columns[column_number]])
pass
|
from tkinter import *
from tkintertable import TableCanvas, TableModel
from PIL import ImageTk,Image
import os
import csv
import cv2
import time
from functools import partial
from utils import decode_video, get_image
class get_labels_single():
def __init__(self,labels_csv,):
def read_csv(the_csv):
with open(the_csv, mode='r') as infile:
reader = csv.reader(infile)
mydict = {rows[0]:rows[1] for rows in reader}
return mydict
self._labels = read_csv(labels_csv)
def labels(self):
#turns the dic into a list of nouns used to populate the options menu.
dictlist = []
for _, value in self._labels.items():
dictlist.append(value)
return dictlist
def label_dict(self):
return self._labels
class get_labels_duo():
def __init__(self,verb_csv,noun_csv):
#this function will ultimetly parse an input csv containing all the verbs and nounds, for the moment ill just use example lists
def read_csv(the_csv):
with open(the_csv, mode='r') as infile:
reader = csv.reader(infile)
mydict = {rows[0]:rows[1] for rows in reader}
return mydict
self._verbs = read_csv(verb_csv)
self._nouns = read_csv(noun_csv)
def verbs(self):
#turns the dic into a list of nouns used to populate the options menu.
dictlist = []
for _, value in self._verbs.items():
dictlist.append(value)
return dictlist
def nouns(self):
dictlist = []
for _, value in self._nouns.items():
dictlist.append(value)
return dictlist
def verb_dict(self):
return self._verbs
def noun_dict(self):
return self._nouns
class label_GUI:
def __init__(self, root, video_path, csv_path, labels_csv, mode):
self.video_path = video_path
self.fps = cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FPS)
self.csv_path = csv_path
self.mode = mode
if self.mode == 'single':
self.labels = get_labels_single(labels_csv)
self.CLASSES = self.labels.labels()
self.CLASSES_DICT = self.labels.label_dict()
if self.mode == 'duo':
verb_csv,noun_csv = labels_csv
self.labels = get_labels_duo(verb_csv,noun_csv)
self.NOUNS = self.labels.nouns()
self.NOUNS_DICT = self.labels.noun_dict()
self.VERBS = self.labels.verbs()
self.VERBS_DICT = self.labels.verb_dict()
self.window = root
self.canvas = Canvas(self.window,width = 1850,height=1000)
self.canvas.pack()
self.current_state = 0 #this is either 0 (not labeling), 1 (mid label (i.e defined start)), 2 labeled waiting for confirmation
self.console_output = []
self.current_index = 0
self.start_frame = None
self.end_frame = None
#pay attention to all keypresses
self.window.bind("<Key>",self.key_pressed)
#decode the video here (and check if its already been decoded)
self.image_folder = decode_video(self.video_path)
#==================== create all the widgets =========================:
#label selection
if self.mode == 'single':
self.label = Label(self.window,text='Select Label:',font=("Courier", 15))
self.label.place(x=1150,y=150)
self.class_drop = StringVar(self.window)
self.class_drop.set(self.CLASSES[0])
self.w_classes = OptionMenu(*(self.window, self.class_drop) + tuple(self.CLASSES))
self.w_classes.place(x=1400,y=150)
if self.mode == 'duo':
self.label = Label(self.window,text='Select Label:',font=("Courier", 15))
self.label.place(x=1150,y=150)
self.noun_drop = StringVar(self.window)
self.noun_drop.set(self.NOUNS[0])
self.w_noun = OptionMenu(*(self.window, self.noun_drop) + tuple(self.NOUNS))
self.w_noun.place(x=1550,y=150)
self.verb_drop = StringVar(self.window)
self.verb_drop.set(self.VERBS[0])
self.w_verb = OptionMenu(*(self.window, self.verb_drop) + tuple(self.VERBS))
self.w_verb.place(x=1400,y=150)
#play buttons:
self.prev_button = Button(self.window, text="Prev", height=100,width=100,command=self.prev)
self.next_button = Button(self.window, text="Next", height=100,width=100,command=self.nxt)
self.pause_button = Button(self.window, text="Stop", height = 50, width=100, command=self.pause_video)
self.play_button = Button(self.window, text="Play", height=50, width=100, command=partial(self.play_video,speed=1))
self.play_button2 = Button(self.window, text="x2", height=50, width=100, command=partial(self.play_video,speed=2))
self.play_button4 = Button(self.window, text="x4", height=50, width=100, command=partial(self.play_video,speed=4))
self.play_button8 = Button(self.window, text="x8", height=50, width=100, command=partial(self.play_video,speed=8))
self.speed = 1
self.prev_button.place(bordermode=OUTSIDE, height=100, width=100, x=45,y=300)
self.next_button.place(bordermode=OUTSIDE, height=100, width=100, x=1050,y=300)
self.pause_button.place(bordermode=OUTSIDE, height= 50, width=100, x=400, y=600)
self.play_button.place(bordermode=OUTSIDE, height=50, width=100, x=550, y=600)
self.play_button2.place(bordermode=OUTSIDE, height=50, width=100, x=650, y=600)
self.play_button4.place(bordermode=OUTSIDE, height=50, width=100, x=750, y=600)
self.play_button8.place(bordermode=OUTSIDE, height=50, width=100, x=850, y=600)
self.pause = False #boolean, if true, video is in paused (still image state) if faluse, it is playing.
self.currently_playing = False
#frame input textbox and button for submitting it.
self.textBox = Text(self.window, height=1, width=10)
self.textBox.place(x=150,y=600)
self.buttonCommit=Button(self.window, height=1, width=10, text="Jump to frame", command=lambda: self.retrieve_input())
self.buttonCommit.place(x=250,y=600)
#display frame numbers
self.frame_no = Label(self.window,text='Current Frame: {}'.format(self.current_index),font=("Courier", 15))
self.start_no = Label(self.window,text='Start Frame: {}'.format(self.start_frame),font=("Courier", 15))
self.end_no = Label(self.window,text='End Frame: {}'.format(self.end_frame),font=("Courier", 15))
self.frame_no.place(x=150,y=45)
self.start_no.place(x=1150,y=70)
self.end_no.place(x=1500,y=70)
#display image with current index.
self.image = get_image(self.image_folder,self.current_index) #retun the current image from
self.max_index = len(os.listdir(self.image_folder))-1
self.img = ImageTk.PhotoImage(self.image.resize((896,504)))
self.img_panel = Label(self.window,image=self.img)
self.img_panel.image = self.img
self.img_panel.place(x=150,y=75)
#add a slider to navigate frames
self.slider = Scale(self.window,from_=0,to=self.max_index,orient=HORIZONTAL)
self.slider.set(0)
self.slider.place(width=900,x=150, y=700)
self.slider_button = Button(self.window, text='Jump',command=self.goto_slider)
self.slider_button.place(x=1075,y=715)
#console output.
self.console_listbox = Listbox(self.window)
for item in self.console_output:
self.console_listbox.insert(END,item)
self.console_listbox.place(height=200,width=600,x=1200,y=700)
#table output
self.label_data = self.read_csv()
self.write('Welcome to my simple video label GUI. Please read the Github for user instructions')
self.window.mainloop()
def read_csv(self):
#reads csv and displays table using tkintertable
self.tframe = Frame(self.window)
self.tframe.place(x=1200,y=350,width=600)
self.table = TableCanvas(self.tframe)
self.table.importCSV(self.csv_path)
self.table.show()
def write(self, message):
#function that is used instead of regular print statement to display messages within the GUI output box
if len(self.console_output) < 10: #only collect last 10 lines of output.
self.console_output.append(message)
else:
self.console_output = self.console_output[1:]
self.console_output.append(message)
self.update_all()
return self.console_output
def prev(self):
#move to next image
if self.current_index == 0:
self.update_all()
else:
self.current_index -=1
self.update_all()
def nxt(self):
#move to previous image
if self.current_index == self.max_index:
self.update_all()
else:
self.current_index +=1
self.update_all()
def play_video(self,speed):
#function that runs when play buttons are pressed.
self.speed = speed
def play():
delay = int((1/(self.speed*self.fps))*1000)
if self.pause: #if currently paused and button is pressed, we want to play
self.window.after_cancel(self.after_id)
self.pause = False
self.currently_playing = False
else:
self.currently_playing = True
self.current_index+=1
start = time.time()
self.update_image()
finish = int(1000*(time.time() - start))
delay = max(1,(delay-finish)) #this factors in the time to retrieve and display an image into the fps calculations.
self.after_id = self.window.after(delay,play)
play()
def pause_video(self):
if self.currently_playing:
self.pause = True
else:
self.pause = False
def retrieve_input(self):
input_val = self.textBox.get("1.0",END)
try:
input_val = int(input_val)
except:
self.write('please input an intiger')
self.window.mainloop()
input_val = 0
if input_val < 0 or input_val > int(self.max_index):
self.write('please enter a value between 0 and {}'.format(self.max_index))
self.window.mainloop()
else:
self.current_index = input_val
self.write('jumped to frame {}'.format(self.current_index))
self.window.mainloop()
def goto_slider(self):
self.current_index = self.slider.get() #set current index to slider value.
self.update_all()
def _on_mousewheel(self, event):
if event.delta < 0: #if scroll down
self.prev()
if event.delta > 0: #if scrolled up
self.nxt()
#code for using keyboard shortcuts.
def key_pressed(self,event):
if event.keysym == 'Right':
self.nxt()
if event.keysym == 'Left':
self.prev()
if event.keysym == 'space':
if self.current_state == 0:
self.start_frame = self.current_index
self.write('selected a start frame, press space to select end frame or esc to cancel selection')
self.end_frame = None
self.current_state = 1 #change state to 1.
elif self.current_state == 1:
self.end_frame = self.current_index
self.write('Selected an end frame, press space to change end frame, return to submit the label or esc to cancel selection')
self.current_state = 2
elif self.current_state == 2:
self.end_frame = self.current_index
self.update_all()
if event.keysym == 'Return':
if self.current_state == 2: #only care is someone is in state 2.
self.make_label()
self.current_state=0
self.start_frame = None
self.end_frame = None
else:
self.write('You must make a start and end frame selection before submitting the label')
self.update_all()
if event.keysym == 'Escape':
#if escape is hit, delete all frame selections and return to state 0, ready for a new input sequence.
self.start_frame = None #delete frame selection
self.end_frame = None
self.current_state = 0 #set current state back to 0.
self.write('cancled frame selection')
self.update_all()
def make_label(self):
video_name = os.path.basename(self.video_path)[:-4]
if self.mode == 'single':
_class = self.class_drop.get()
with open(self.csv_path,'a',newline='') as csvfile:
linewriter = csv.writer(csvfile,delimiter=',')
linewriter.writerow([video_name,self.start_frame,self.end_frame,_class,self.CLASSES.index(_class)])
self.write('added action {} between frames {} and {} to csv file'.format(_class,self.start_frame,self.end_frame))
if self.mode == 'duo':
verb = self.verb_drop.get()
noun = self.noun_drop.get()
#still need to implement checks here, e.g make sure end frame is after begining, that they are not the same frame etc...
with open(self.csv_path,'a',newline='') as csvfile:
linewriter = csv.writer(csvfile,delimiter=',')
linewriter.writerow([video_name,self.start_frame,self.end_frame,verb,self.VERBS.index(verb),noun,self.NOUNS.index(noun)])
self.write('added label to csv file, action {} {} between frames {} and {}'.format(verb,noun,self.start_frame,self.end_frame))
self.read_csv()
def update_image(self):
"""
Same as update_all except only updates the image and the current frame - quicker to execute as doesn't update csv table
"""
pil_img = get_image(self.image_folder,self.current_index)
img = ImageTk.PhotoImage(pil_img.resize((896,504))) #size is 896x504
self.img_panel.configure(image=img)
self.img_panel.image = img
self.frame_no['text'] = "Current Frame: {}".format(self.current_index)
self.slider.set(self.current_index)
def update_all(self):
'''
the main function that updates the display
'''
pil_img = get_image(self.image_folder,self.current_index)
img = ImageTk.PhotoImage(pil_img.resize((896,504)))
self.img_panel.configure(image=img)
self.img_panel.image = img
self.slider.set(self.current_index)
self.table.show()
self.frame_no['text'] = "Current Frame: {}".format(self.current_index)
self.start_no['text'] = "Start Frame: {}".format(self.start_frame)
self.end_no['text'] = "End Frame: {}".format(self.end_frame)
self.console_listbox.delete(0,'end')
for item in self.console_output:
self.console_listbox.insert(END,item)
|
import itertools
w, h, k = map(int, input().split())
C = [input() for _ in range(h)]
I = [f'i{i}' for i in range(h)]
J = [f'j{i}' for i in range(w)]
combi = []
for i in range(w+h):
combi.extend(itertools.combinations(I + J, i))
|
#
# Ghost_evade w/ standard DQN at work.
#
#
import gym
import numpy as np
import time as time
import tensorflow as tf
import tf_util_rob as U
import models as models
import build_graph_rob3 as build_graph
from replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from schedules import LinearSchedule
import matplotlib.pyplot as plt
# I had problems w/ the gym environment, so here I made my own standalone class
#import envs.frozen_lake
#import envs.ballcatch1_standalone as envstandalone
import envs.testrob3_standalone as envstandalone
def main():
# env = envstandalone.BallCatch()
env = envstandalone.TestRob3Env()
max_timesteps=40000
learning_starts=1000
buffer_size=50000
# buffer_size=1000
exploration_fraction=0.2
exploration_final_eps=0.02
print_freq=10
gamma=.98
target_network_update_freq=500
learning_alpha = 0.2
batch_size=32
train_freq=1
obsShape = (8,8,1)
deicticShape = (3,3,1)
num_deictic_patches=36
num_actions = 4
episode_rewards = [0.0]
num_cpu=16
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# same as getDeictic except this one just calculates for the observation
# input: n x n x channels
# output: dn x dn x channels
def getDeicticObs(obs):
windowLen = deicticShape[0]
deicticObs = []
for i in range(np.shape(obs)[0] - windowLen + 1):
for j in range(np.shape(obs)[1] - windowLen + 1):
deicticObs.append(obs[i:i+windowLen,j:j+windowLen,:])
return np.array(deicticObs)
# conv model parameters: (num_outputs, kernel_size, stride)
model = models.cnn_to_mlp(
# convs=[(16,3,1)],
convs=[(16,2,1)],
# convs=[(32,3,1)],
hiddens=[16],
# hiddens=[64],
# dueling=True
dueling=False
)
q_func=model
# lr=1e-3
lr=0.001
def make_obs_ph(name):
# return U.BatchInput(deicticShape, name=name)
return U.BatchInput(obsShape, name=name)
def make_target_ph(name):
return U.BatchInput([num_actions], name=name)
sess = U.make_session(num_cpu)
sess.__enter__()
getq, targetTrain = build_graph.build_train_nodouble(
make_obs_ph=make_obs_ph,
make_target_ph=make_target_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
grad_norm_clipping=10,
double_q=False
)
# Initialize the parameters and copy them to the target network.
U.initialize()
replay_buffer = ReplayBuffer(buffer_size)
obs = env.reset()
timerStart = time.time()
for t in range(max_timesteps):
# Get current q-values: neural network version
qCurr = getq(np.array([obs]))
# select action
qCurrNoise = qCurr + np.random.random(np.shape(qCurr))*0.01 # add small amount of noise to break ties randomly
action = np.argmax(qCurrNoise,1)
if np.random.rand() < exploration.value(t):
action = np.random.randint(env.action_space.n)
# take action
new_obs, rew, done, _ = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
# # debug
# if t > 5000:
# print("obs:\n" + str(np.squeeze(obs)))
# print("qCurr:\n" + str(qCurr))
# print("action: " + str(action) + ", patch: " + str(selPatch))
# print("close:\n" + str(obsDeictic[selPatch,:,:,0] + obsDeictic[selPatch,:,:,1]))
# print("far:\n" + str(obsDeictic[selPatch,:,:,2] + obsDeictic[selPatch,:,:,3]))
# action
# sample from replay buffer and train
if t > learning_starts and t % train_freq == 0:
# Sample from replay buffer
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
actions = np.int32(np.reshape(actions,[batch_size,]))
# Get curr, next values: neural network version
qNext = getq(obses_tp1)
qCurr = getq(obses_t)
# Get targets
qNextmax = np.max(qNext,1)
targets = rewards + (1-dones) * gamma * qNextmax
qCurrTargets = np.zeros(np.shape(qCurr))
for i in range(num_actions):
myActions = actions == i
qCurrTargets[:,i] = myActions * targets + (1 - myActions) * qCurr[:,i]
# Update values: neural network version
td_error_out, obses_out, targets_out = targetTrain(
obses_t,
qCurrTargets
)
td_error_pre = qCurr[range(batch_size),actions] - targets
# print("td error pre-update: " + str(np.linalg.norm(td_error_pre)))
# neural network version
qCurr = getq(obses_t)
td_error_post = qCurr[range(batch_size),actions] - targets
# print("td error post-update: " + str(np.linalg.norm(td_error_post)))
# bookkeeping for storing episode rewards
episode_rewards[-1] += rew
if done:
new_obs = env.reset()
episode_rewards.append(0.0)
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
# print("steps: " + str(t) + ", episodes: " + str(num_episodes) + ", mean 100 episode reward: " + str(mean_100ep_reward) + ", % time spent exploring: " + str(int(100 * exploration.value(t))) + ", max q at curr state: " + str(np.max(qCurr)))
timerFinal = time.time()
print("steps: " + str(t) + ", episodes: " + str(num_episodes) + ", mean 100 episode reward: " + str(mean_100ep_reward) + ", % time spent exploring: " + str(int(100 * exploration.value(t))) + ", time elapsed: " + str(timerFinal - timerStart))
timerStart = timerFinal
obs = new_obs
if __name__ == '__main__':
main()
|
# Generated by Django 2.2 on 2019-04-21 21:24
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Racer',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name_r', models.CharField(max_length=200)),
('name_t', models.CharField(max_length=200)),
('desc_c', models.CharField(max_length=1000)),
('desc_r', models.CharField(max_length=1000)),
('exp_r', models.CharField(choices=[('b', 'Beginner'), ('i', 'Intermediate'), ('e', 'Experienced'), ('p', 'Professional')], default='b', max_length=1)),
('class_r', models.CharField(choices=[('fo', 'Fourth'), ('th', 'Third'), ('sc', 'Second'), ('fi', 'First')], default='fo', max_length=2)),
],
options={
'ordering': ['name_t'],
},
),
migrations.CreateModel(
name='Race',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('fifth', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racer_fifth', to='board.Racer')),
('first', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racer_first', to='board.Racer')),
('fouth', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racer_fourth', to='board.Racer')),
('second', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racer_second', to='board.Racer')),
('third', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='racer_third', to='board.Racer')),
],
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 28 09:29:06 2019
@author: Aaron
"""
import pandas as pd
file_one = "Resources/election_data.csv"
file_one_df = pd.read_csv(file_one, encoding="ISO-8859-1")
title = "Election Results\n \n------------------------"
print(title)
#Find Total Votes
total_votes = file_one_df["Voter ID"].value_counts()
total_votes_str = (f"Total Votes: {sum(total_votes)} \n------------------------\n")
print(total_votes_str)
#Group candidates
grouped_candidates = file_one_df.groupby(["Candidate"])
#Get total votes per candidate
vote_p_can = grouped_candidates["Voter ID"].count()
#Find vote percent per candidate
p_votes = 100 * vote_p_can / total_votes.count()
pd.options.display.float_format = '{:.3f}%'.format
#create new DF with vote percent and total per candidate
voter_sum = pd.DataFrame({"Percent of Vote": p_votes, "Votes per Candidate": vote_p_can})
voter_sum = voter_sum.sort_values("Votes per Candidate", ascending=False)
voter_sum_str = (voter_sum.rename_axis('').rename_axis("Candidate", axis=1))
print(voter_sum_str)
#find Winner!
winner = vote_p_can.idxmax()
winner_str = (f"\n------------------------\nWinner: {winner}\n------------------------")
print(winner_str)
#print to txt doc
pypoll_text = open("PyPolling_Analysis.txt", "w")
L = [title, total_votes_str, voter_sum_str, winner_str]
for x in L:
pypoll_text.write(str(x))
pypoll_text.write("\n")
pypoll_text.close() |
# -*- coding: utf-8 -*-
"""
Created on Apr 11 2020
@author: Felix Brinquis
Description: este programa recibe como parametro un fichero en formato TCX Garmin, lee el contenido
del mismo y lo convierte en un dataframe.
"""
# Importacion de librerias
from lxml.etree import parse as lxml_parse
from dateutil.parser import parse as dateutil_parse
import pandas as pd
import numpy as np
def LecturaTCX(ficheroTCX):
tcx = lxml_parse(ficheroTCX)
Name = ficheroTCX.split('\\')[-1].split('.')[0]
for Activity in tcx.xpath("//TrainingCenterDatabase:Activity", namespaces = {'TrainingCenterDatabase': "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"}):
Type = Activity.attrib['Sport']
# Definicion de listas
HoraISO = []
Distancia = []
FrecuenciaCardiaca = []
Velocidad = []
CadenciaBraceo = []
# Extraccion de valores en forma de listas
for Trackpoint in tcx.xpath("//TrainingCenterDatabase:Trackpoint", namespaces = {'TrainingCenterDatabase': "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"}):
if Trackpoint.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Trackpoint':
for tpt in Trackpoint:
if tpt.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Time':
HoraISO.append(dateutil_parse(tpt.text).replace(tzinfo=None))
if tpt.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}DistanceMeters':
Distancia.append(float(tpt.text))
if tpt.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}HeartRateBpm':
for HeartRateBpm in tpt:
if HeartRateBpm.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Value':
FrecuenciaCardiaca.append(float(HeartRateBpm.text))
if tpt.tag == '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}Extensions':
for Extensions in tpt:
if Extensions.tag == '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}TPX':
for ns3 in Extensions:
if ns3.tag == '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}Speed':
Velocidad.append(float(ns3.text))
if ns3.tag == '{http://www.garmin.com/xmlschemas/ActivityExtension/v2}RunCadence':
CadenciaBraceo.append(float(ns3.text))
# Control de fallos en el sensor
for Variable in (Distancia, FrecuenciaCardiaca, Velocidad, CadenciaBraceo):
if len(HoraISO) > len(Variable):
Variable.append(np.nan)
# No tenemos una referencia que permita corregir el indice temporal a la hora local
# Creacion de vectores vacios si tuvieran distinta longitud que la referencia temporal
for Variable in (Distancia, FrecuenciaCardiaca, Velocidad, CadenciaBraceo):
if len(Variable) != len(HoraISO):
Variable = []
for i in range(len(HoraISO)):
Variable.append(0)
# Creacion del DataFrame con la hora como indice ordenado
DataFrame = pd.DataFrame({
'Hora':HoraISO,
'Distancia':Distancia,
'FrecuenciaCardiaca':FrecuenciaCardiaca,
'Velocidad':Velocidad,
'CadenciaBraceo':CadenciaBraceo}).set_index('Hora').sort_index()
# Si algun valor leido no contiene valores se elimina
for campo in DataFrame.columns:
if (DataFrame[campo] == 0).all():
DataFrame = DataFrame.drop([campo], axis=1)
# Se eliminan duplicados en el indice
DataFrame = DataFrame[~DataFrame.index.duplicated()]
return Name, Type, DataFrame |
import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
car_width = 74
global_speed = 0 #speed everything falls at, increase to make things faster
speed_counter = 0
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('RaceToMILK')
clock = pygame.time.Clock()
carImg = pygame.image.load('racecar.png')
enemyCarImgYellow = pygame.image.load('racecarYellow.png')
enemyCarImgGreen = pygame.image.load('racecarGreen.png')
enemyCarImgBlue = pygame.image.load('racecarBlue.png')
billImg = pygame.image.load('bill.png')
def car(x,y):
gameDisplay.blit(carImg,(x,y))
def enemyBlue(x,y):
gameDisplay.blit(enemyCarImgBlue,(x,y))
def enemyYellow(x,y):
gameDisplay.blit(enemyCarImgYellow,(x,y))
def enemyGreen(x,y):
gameDisplay.blit(enemyCarImgGreen,(x,y))
def bill(x,y):
gameDisplay.blit(billImg,(x,y))
def things(thingx, thingy, thingw, thingh, color):
pygame.draw.rect(gameDisplay, color, [thingx,thingy,thingw,thingh])
def text_objects(text,font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf', 95)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((display_width/2),(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def game_loop():
heroCarX = (display_width * 0.45)
heroCarY = (display_height - 74) #car is 74x74
global global_speed
global speed_counter
heroCarX_change = 0
if random.randrange(0,11) == 10:
billY = -200
else:
billY = -2000
bill_left_or_right = random.randrange(0,2)
if bill_left_or_right == 0:
billX = 0
else:
billX = display_width
bill_speedY = 3 + global_speed
bill_speedX = 3 + global_speed
enemyBlueX = random.randrange(0, display_width - 74) #img is 74x74
enemyBlueY = -600
enemyBlue_speed = 7 + global_speed
enemyGreenX = random.randrange(0,display_width -74)
enemyGreenY = -3100
enemyGreen_speed = 10 + global_speed
enemyYellowX = random.randrange(0,display_width -74)
enemyYellowY = -6100
enemyYellow_speed = 13 + global_speed
thing_startx = random.randrange(0, display_width)
thing_starty = -600
thing_speed = 7
thing_width = 100
thing_height = 100
gameExit = False
hero_LR_Speed = 7 #speed of movement left or right
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
heroCarX_change = -hero_LR_Speed
if event.key == pygame.K_RIGHT:
heroCarX_change = hero_LR_Speed
heroCarX += heroCarX_change
gameDisplay.fill(white)
#things(thingx, thingy, thingw, thingh, color):
things(thing_startx,thing_starty,thing_width,thing_height,black)
thing_starty += thing_speed
car(heroCarX,heroCarY)
enemyBlue(enemyBlueX,enemyBlueY)
enemyGreen(enemyGreenX,enemyGreenY)
enemyYellow(enemyYellowX,enemyYellowY)
bill(billX,billY)
billY += bill_speedY
if bill_left_or_right == 0 and billY > -141:
billX += bill_speedX
if bill_left_or_right == 1 and billY > -141:
billX -= bill_speedX
if billY > display_height:
bill_left_or_right = random.randrange(0,2)
if bill_left_or_right == 0:
billX = 0
else:
billX = display_width
if random.randrange(0,11) == 10:
billY = -200
else:
billY = -2000
enemyBlueY += enemyBlue_speed
enemyGreenY += enemyGreen_speed
enemyYellowY += enemyYellow_speed
if heroCarX > display_width - car_width or heroCarX < 0:
crash()
if enemyBlueY > display_height:
enemyBlueY = 0 - 74
enemyBlueX = random.randrange(0,display_width)
speed_counter += 1
if enemyGreenY > display_height:
enemyGreenY = -3100
enemyGreenX = random.randrange(0,display_width)
speed_counter += 1
if enemyYellowY > display_height:
enemyYellowY = -6100
enemyYellowX = random.randrange(0,display_width)
speed_counter += 1
if thing_starty > display_height:
thing_starty = 0 - thing_height
thing_startx = random.randrange(0,display_width)
#bill.png is 55x142
if heroCarY <= thing_starty + thing_height:
if heroCarX > thing_startx and heroCarX < (thing_startx + thing_width) or (heroCarX + car_width) > thing_startx and (heroCarX + car_width) < (thing_startx + thing_width):
print ('heroCarX crossover')
#if speed_counter == 10:
#global_speed += 1
speed_counter = 0
pygame.display.update()
clock.tick(60)
game_loop()
pygame.quit()
quit() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.