text stringlengths 38 1.54M |
|---|
from django.contrib.auth.models import User
from django.db import models
from django.forms import ModelForm
from django.contrib.auth.signals import user_logged_in, user_logged_out
class MyCircle(models.Model):
userC = models.ForeignKey(User)
username = models.CharField(max_length=30)
Uname = models.CharField(max_length=50)
def __unicode__(self):
return self
class LoggedUser(models.Model):
user = models.ForeignKey(User, primary_key=True)
def __unicode__(self):
return self.user.username
def login_user(sender, request, user, **kwargs):
LoggedUser(user=user).save()
def logout_user(sender, request, user, **kwargs):
try:
u = LoggedUser.objects.get(user=user)
u.delete()
except LoggedUser.DoesNotExist:
pass
user_logged_in.connect(login_user)
user_logged_out.connect(logout_user)
class Chat(models.Model):
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
to_user = models.ForeignKey(MyCircle, null=True)
message = models.CharField(max_length=100)
def __unicode__(self):
return self.message
class Notify(models.Model):
user = models.ForeignKey(User)
text = models.CharField(max_length=70)
def __unicode__(self):
return self |
import numpy as np
import matplotlib.pyplot as plt
def twoPtForwardDiff(x,y):
"""Function that takes two arrays as inputs and returns the approximate
derivative from 2 points, x and x+h.
df/dx = lim h->0[f(x+h)-f(x)/h]"""
dy = np.diff(y)
dx = np.diff(x)
dydx = np.zeros(y.shape, float)
dydx[0:-1] = dy/dx
dydx[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])
return dydx
def twoPtCenteredDiff(x,y):
"""Function that takes two arrays as inputs and returns the approximate
derivative from 2 points, x-h and x+h.
df/dx = lim h->0[f(x+h)-f(x-h)/2h]"""
dy = np.diff(y)
dx = np.diff(x)
dydx = np.zeros(y.shape, float)
dydx[1:-1] = (y[2:] - y[:-2])/(x[2:] - x[:-2])
dydx[0] = (y[1] - y[0])/(x[1] - x[0])
dydx[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])
return dydx
def FourPtCenteredDiff(x,y):
"""function that takes two arrays as inputs and returns the approximate
derivative from 4 points. used to get a more accurate derivative"""
dy = np.diff(y)
dx = np.diff(x)
dydx = np.zeros(y.shape, float)
dydx[2:-2] = (y[0:-4] - 8*y[1:-3] + 8*y[3:-1] - y[4:])/(12*(x[1]-x[0]))
dydx[0] = (y[1] - y[0])/(x[1] - x[0])
dydx[-1] = (y[-1] - y[-2])/(x[-1] - x[-2])
dydx[1] = (y[2] - y[1])/(x[2] - x[1])
dydx[-2] = (y[-3] - y[-2])/(x[-3] - x[-2])
return dydx |
#!/usr/bin/env python
import sys
import datetime
if len(sys.argv) != 4:
print "%s YEAR COPYYEAR NUMDAYS" % sys.argv[0]
sys.exit(0)
YEAR = int(sys.argv[1])
COPYYEAR = int(sys.argv[2])
DAY_COUNT = int(sys.argv[3])
description = """abcdefghijklmnopqrstuvwqyz.?{}-abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}
abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}"""
date_iterator = datetime.date(YEAR, 01, 02)
delta = datetime.timedelta(days=1)
for i in range(DAY_COUNT):
print "INSERT INTO ao_part_table select '%s', data, description from ao_part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
print "INSERT INTO co_part_table select '%s', data, description from ao_part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
print "INSERT INTO part_table select '%s', data, description from part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
date_iterator = date_iterator + delta
|
''' Convert prefix to postfix '''
'''
Read exp from right to left
push operand to stack
if operator is encountered, then pop 2 operands from stack.
Concatenate the 2 operands with the operator in between and push it to stack
Repeat process until end of expression
'''
# IMPORTANT NOTE:
# EXPRESSION = OP1 + OPERATION + OP2
class Conversion(object):
def __init__(self, exp):
self.exp = exp
self.stack = []
def push(self, val):
self.stack.append(val)
def pop(self):
if not self.isEmpty():
return self.stack.pop()
raise
def isEmpty(self):
return len(self.stack) == 0
def isOperand(self, ch):
return ch.isalpha()
def prefix_to_infix(self):
for i in range(len(self.exp)-1, -1, -1):
ch = self.exp[i]
if self.isOperand(ch):
self.push(ch)
else:
count = 0
string = []
while not self.isEmpty() and count < 2:
string.append(self.pop())
count += 1
exp = ch.join(string)
exp = '(' + exp + ')'
self.push(exp)
if len(self.stack) == 1:
return self.pop()
return -1
exp = '*+AB-CD'
c = Conversion(exp)
print c.prefix_to_infix() |
import logging
import os
import timeit
import unittest
import pandas as pd
import yaml
from mimesis.enums import Gender
from mimesis.schema import Field, Schema
from odoo.tests.common import TransactionCase
from odoo.tools.config import config
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
_logger = logging.getLogger(__name__)
_data_path = os.path.join(config.get('data_dir'), 'test_data.csv')
class TestGoogleDriveUpload(TransactionCase):
def setUp(self):
super(TestGoogleDriveUpload, self).setUp()
_ = Field('en')
schema_desc = (
lambda: {
'name': _('person.name'),
'street': _('street_name'),
'city': _('city'),
'email': _('person.email'),
'zip': _('zip_code'),
'region': _('region'),
'state': _('state'),
'date_time': _('formatted_datetime'),
'company_name': _('company')
})
schema = Schema(schema=schema_desc)
result = schema.create(iterations=1000)
pd.DataFrame.from_dict(result).to_csv(_data_path)
self.cc = self.env['res.config.settings'].create({
'google_drive_client_id': '321350850398-ub1vd55bmrjmh2oh96oosi0hn1cliufh.apps.googleusercontent.com',
'google_drive_client_secret': '0jRNrUmCGlZaJQoTSicZ0OcA'
})
self.cc.set_values()
self.gdrive = self.env['odoo_file_export.google_drive'].create({
'name': 'Prueba',
'file': _data_path,
#'target_folder_id': "1JU6WrdUUfJR66x3Ct4mgn0ReUWRGDDDd",
'target_file_name': "pruebatestoddogoogledrive"
})
def off_test_upload_to_gdrive(self):
gauth = GoogleAuth(settings_file=self.cc.gdrive_settings_file_name())
d = GoogleDrive(gauth)
#previamente borramos los ficheros para que no haya un falso positivo
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and trashed=false"}).GetList()
for f in files: f.Delete()
_logger.debug("TEST: Subir fichero al raiz sin borrado local y sobreescribir destino")
self.gdrive.upload(remove_local_data_file=False)
assert os.path.exists(self.gdrive.file), \
"Upload deberia haber respetado el fichero de datos local, no lo ha encontrado"
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and 'root' in parents and trashed=false"}).GetList()
assert len(files) == 1, \
"No coinciden los ficheros subidos. E(1) R({})".format(len(files))
_logger.debug("TEST: comoprobamos que sobreescribe el destrino")
self.gdrive.upload(remove_local_data_file=False)
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and 'root' in parents and trashed=false"}).GetList()
assert len(files) == 1, \
"No coincide los ficheros en gdrive despues de dos upload con overwrite=true. E(1) R({})".format(
len(files)
)
_logger.debug("TEST: subir fichero al raiz sin borrado local y sin sobreescribir destino")
self.gdrive.upload(remove_local_data_file=False, overwrite=False)
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and 'root' in parents and trashed=false"}).GetList()
assert len(files) == 2, \
"No coincide los ficheros gdrive. E(2) R({}). Found:{}".format(
len(files),
[(x['id'], x['title']) for x in files]
)
_logger.debug("TEST: subir fichero a carpeta no rai<")
folder = d.CreateFile({'title': 'pruebatestoddogoogledrive_folder','mimeType' : 'application/vnd.google-apps.folder'})
folder.Upload()
self.gdrive.target_folder_id = folder['id']
self.gdrive.upload(remove_local_data_file=False)
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and '{}' in parents and trashed=false".format(folder['id'])}).GetList()
assert 1 == len(files), "No ha creado bien el fichero en el directorio. E(1). R({})".format(len(files))
_logger.debug("TEST: subir fichero a carpeta no raiz sin sobrescribir destrino y sin borrado local")
self.gdrive.upload(remove_local_data_file=False, overwrite=False)
files = d.ListFile({'q': "title = 'pruebatestoddogoogledrive' and '{}' in parents and trashed=false".format(folder['id'])}).GetList()
assert 2 == len(files), "No ha creado bien el fichero en el directorio. E(2). R({})".format(len(files))
_logger.debug("TEST: borrado local")
self.gdrive.upload()
assert not os.path.exists(_data_path), "No ha borrado el fichero de origen"
|
import numpy as np
from dashboard.bokeh.helper import get_palette
from bokeh.models import LinearColorMapper, ColorBar
from bokeh.models import HoverTool, PrintfTickFormatter, ColumnDataSource
from bokeh.plotting import Figure
class Patch:
def set_amp(self, z_value):
''' Setup for AMP plots
'''
dz = z_value
# removing NaN in ranges
dz_valid = [x if x > -999 else np.nan for x in dz]
dzmax, dzmin = np.nanmax(dz_valid), np.nanmin(dz_valid)
if np.log10(dzmax) > 4 or np.log10(dzmin) < -3:
ztext = ['{:4.2e}'.format(i) for i in dz_valid]
cbarformat = "%2.1e"
elif np.log10(dzmin) > 0:
ztext = ['{:5.2f}'.format(i) for i in dz_valid]
cbarformat = "%4.2f"
else:
ztext = ['{:6.2f}'.format(i) for i in dz_valid]
cbarformat = "%5.2f"
return ztext, cbarformat
def plot_amp(self, dz, refexp, name="", font_size="1.2vw", description="", wrg=[]):
''' Initializing AMP plot
'''
ztext, cbarformat = self.set_amp(dz)
dx = [0, 1, 0, 1]
dy = [1, 1, 0, 0]
cmap_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">AMP: </span>
<span style="font-size: 1vw; color: #515151;">@amp_number</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">counts: </span>
<span style="font-size: 1vw; color: #515151">@ztext</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Reference: </span>
<span style="font-size: 1vw; color: #515151;">@ref</span>
</div>
</div>
""".replace("counts:", name.replace("_AMP", "")+":")
hover = HoverTool(tooltips=cmap_tooltip)
p = Figure(title=name, tools=[hover],
x_range=list([-0.5, 1.5]),
y_range=list([-0.5, 1.5]),
plot_width=450, plot_height=400)
p.xaxis.axis_label_text_font_size = font_size
p.legend.label_text_font_size = font_size
p.title.text_font_size = font_size
p.xaxis.axis_label= description
cmap = get_palette("RdBu_r")
mapper = LinearColorMapper(palette=cmap, low=wrg[0], high=wrg[1],
nan_color="darkgrey")
formatter = PrintfTickFormatter(format=cbarformat)
color_bar = ColorBar(color_mapper=mapper, major_label_text_align='left',
major_label_text_font_size='10pt', label_standoff=2, location=(0, 0),
formatter=formatter, title="(Val-Ref)", title_standoff=15,
title_text_baseline="alphabetic")
p.add_layout(color_bar, 'right')
p.grid.grid_line_color = None
p.outline_line_color = None
p.axis.clear
p.axis.minor_tick_line_color = None
p.axis.major_label_text_font_size = '0pt'
p.yaxis.major_label_text_font_size = '0pt'
p.xaxis.major_tick_line_color = None
p.xaxis.minor_tick_line_color = None
p.yaxis.major_tick_line_color = None
p.yaxis.minor_tick_line_color = None
p.yaxis.visible = False
p.xaxis.visible = True
zvalid = [x if x > -999 else np.nan for x in dz]
source = ColumnDataSource(
data=dict(
x=dx,
y=dy,
z=dz,
zvalid=zvalid,
ref=["{:.2f}".format(x) for x in refexp],
zdiff=np.array(zvalid) - np.array(refexp),
y_offset1=[i+0.15 for i in dy],
y_offset2=[i-0.10 for i in dy],
amp=['AMP %s' % i for i in range(1, 5)],
amp_number=['%s' % i for i in range(1, 5)],
ztext=ztext,
)
)
text_props = {
"source": source,
"angle": 0,
"color": "black",
"text_color": "black",
"text_align": "center",
"text_baseline": "middle"}
p.rect("x", "y", .98, .98, 0, source=source,
fill_color={'field': 'zdiff', 'transform': mapper}, fill_alpha=0.9)
p.text(x="x", y="y_offset1", text="amp",
text_font_size="2vw", **text_props)
p.text(x="x", y="y_offset2", text="ztext",
text_font_style="bold", text_font_size="2.5vw", **text_props)
return p
|
import json
from copy import copy
import numpy as np
from batch_generator.utils import get_random_paths
from train_utils.utils import read_json
class DirIterator:
def __init__(self, fnames, reader=read_json, shuffle=True):
self.fnames = fnames
self.reader = reader
self.gen = None
self.shuffle = shuffle
@staticmethod
def from_data_folder(data_folder, file_limit=None, reader=read_json, shuffle=True):
return DirIterator(get_random_paths(data_folder, file_limit), reader, shuffle)
def __iter__(self):
self.gen = self.helper()
return self
def __next__(self):
if self.gen is None:
self.gen = self.helper()
return next(self.gen)
def helper(self):
fnames = copy(self.fnames)
if self.shuffle:
np.random.shuffle(fnames)
for fname in fnames:
data = self.reader(fname)
if self.shuffle:
np.random.shuffle(data)
for item in data:
yield item
def dir_iterator(data_folder, file_limit, make_validation=False, val_size=1):
if make_validation:
fnames = get_random_paths(data_folder, file_limit)
train_fnames = fnames[:-val_size]
val_fnames = fnames[-val_size:]
return DirIterator(train_fnames), \
DirIterator(val_fnames)
else:
return DirIterator.from_data_folder(data_folder, file_limit)
|
# A - 添字
# https://atcoder.jp/contests/abc041/tasks/abc041_a
string = input()
n = int(input())
print(string[n-1])
|
from datetime import datetime
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.http import HttpResponse
from casetracker.models import Filter, Category, Status, Priority
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from ashandapp.models import FilterProfile
from ashandapp.templatetags.filter_tags import case_column_plain
from patient.models import Patient
@login_required
def get_json_for_paging(request):
user = request.user
display_filter = None
start_index = None
length = None
try:
profile = FilterProfile.objects.get(user = user)
except ObjectDoesNotExist:
#make the new case profile
profile = FilterProfile()
profile.user = user
profile.filter = Filter.objects.all()[0]
profile.last_filter = Filter.objects.all()[0] #this is a nasty hack, as we're assuming id 1 is the reflexive one
if len(request.GET.items()) > 0:
for item in request.GET.items():
if item[0] == 'filter':
filter_id = item[1]
if item[0] == 'start':
start_index = item[1]
if item[0] == 'iDisplayLength':
length = item[1]
try:
display_filter = Filter.objects.get(id=filter_id)
except:
pass
else:
display_filter = profile.last_filter
profile.last_login = datetime.utcnow()
profile.last_login_from = request.META['REMOTE_ADDR']
profile.last_filter = display_filter
profile.save()
filter = display_filter
# add pagination later...
display_filter = display_filter.get_filter_queryset()
# add conditional to only work if display and start not null
# try:
# display_filter = display_filter.get_filter_queryset()[start_index:start_index + length]
# except:
# display_filter = display_filter.get_filter_queryset()[start_index:]
#build json_string with information from data
json_string = "{ \"aaData\": ["
#adding user
for case in display_filter:
careteam_url = reverse('view-careteam', kwargs={"careteam_id": case.careteam_set.get().id})
json_string += "["
json_string += "\"<a href = '%s'>%s</a>\"," % (careteam_url, case.careteam_set.get().patient.user.title())
for col in filter.gridpreference.get_display_columns:
table_entry = case_column_plain(case, col.name)
if len(table_entry) > 45:
table_entry = table_entry[0:45] + "..."
# terribly hardcoded...quick fix to add links
if (col.name == "description"):
json_string += "\"<a href = 'case/%s'>%s</a>\"," % (case.id, table_entry)
else:
json_string += "\"%s\"," % table_entry
json_string += "],"
#terribly inefficient, but quick fix to allow sorting of links....
# json_string += " ], \"aoColumns\": [ null,"
# for col in filter.gridpreference.display_columns.all():
# json_string += "{\"sType\": \"html\"},"
#closing json_string
json_string += "] }"
return HttpResponse(json_string)
def dashboard_case_filter(request, filter_id):
context = {}
showfilter = False
filter = Filter.objects.get(id=filter_id)
gridpref = filter.gridpreference
group_by = None
for key, value in request.GET.items():
if key == "groupBy":
group_by_col = value
split_headings = True
qset = filter.get_filter_queryset()
if group_by_col.count('_date') > 0:
split_headings = False
qset.order_by(group_by_col)
elif group_by_col == 'description':
split_headings = False
qset.order_by('description')
elif group_by_col == 'assigned_to' or group_by_col == 'closed_by' or group_by_col == 'opened_by'\
or group_by_col == 'last_edit_by' or group_by_col == 'resolved_by' or group_by_col == 'last_event_by':
model_class = User
elif group_by_col == 'patient':
model_class = Patient
elif group_by_col == 'status':
model_class = Status
elif group_by_col == 'priority':
model_class = Priority
elif group_by_col == 'category':
model_class = Category
qset_dict = {}
if split_headings:
heading_keys = model_class.objects.all().distinct()
for key in heading_keys:
#http://www.nomadjourney.com/2009/04/dynamic-django-queries-with-kwargs/
#dynamic django queries baby
#kwargs = { 'deleted_datetime__isnull': True }
#args = ( Q( title__icontains = 'Foo' ) | Q( title__icontains = 'Bar' ) )
#entries = Entry.objects.filter( *args, **kwargs )
if group_by_col == 'patient':
ptq = Q(patient=key)
subq = qset.filter(ptq)
else:
kwargs = {str(group_by_col): key, }
subq = qset.filter(**kwargs)
else:
pass
context['filter'] = filter
context['gridpref'] = gridpref
context['filter_cases'] = qset
template_name='casetracker/filter/filter_simpletable.html'
return render_to_response(template_name, context, context_instance=RequestContext(request))
@login_required
def view_caselist_fbstyle(request, filter_id):
context = {}
for key, value in request.GET.items():
if key == "sort":
sorting = value
|
"""The iotawatt integration."""
from datetime import timedelta
import logging
from typing import Dict, List
from httpx import AsyncClient
from iotawattpy.iotawatt import Iotawatt
import voluptuous as vol
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
COORDINATOR,
DEFAULT_ICON,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
IOTAWATT_API,
SIGNAL_ADD_DEVICE,
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the iotawatt component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up iotawatt from a config entry."""
polling_interval = entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
session = AsyncClient()
if "username" in entry.data.keys():
api = Iotawatt(
entry.data["name"],
entry.data["host"],
session,
entry.data["username"],
entry.data["password"],
)
else:
api = Iotawatt(
entry.data["name"],
entry.data["host"],
session,
)
coordinator = IotawattUpdater(
hass,
api=api,
name="IoTaWatt",
update_interval=polling_interval,
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR: coordinator,
IOTAWATT_API: api,
}
for component in PLATFORMS:
_LOGGER.info(f"Setting up platform: {component}")
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
class IotawattUpdater(DataUpdateCoordinator):
"""Class to manage fetching update data from the IoTaWatt Energy Device."""
def __init__(self, hass: HomeAssistant, api: str, name: str, update_interval: int):
"""Initialize IotaWattUpdater object."""
self.api = api
self.sensorlist: Dict[str, List[str]] = {}
super().__init__(
hass=hass,
logger=_LOGGER,
name=name,
update_interval=timedelta(seconds=update_interval),
)
async def _async_update_data(self):
"""Fetch sensors from IoTaWatt device."""
await self.api.update()
sensors = self.api.getSensors()
for sensor in sensors["sensors"]:
if sensor not in self.sensorlist:
to_add = {
"entity": sensor,
"mac_address": sensors["sensors"][sensor].hub_mac_address,
"name": sensors["sensors"][sensor].getName(),
}
async_dispatcher_send(self.hass, SIGNAL_ADD_DEVICE, to_add)
self.sensorlist[sensor] = sensors["sensors"][sensor]
return sensors
class IotaWattEntity(CoordinatorEntity, SensorEntity):
"""Defines the base IoTaWatt Energy Device entity."""
def __init__(self, coordinator: IotawattUpdater, entity, mac_address, name):
"""Initialize the IoTaWatt Entity."""
super().__init__(coordinator)
self._entity = entity
self._name = name
self._icon = DEFAULT_ICON
self._mac_address = mac_address
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._mac_address
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Return the icon for the entity."""
return self._icon |
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
pt = 0
size = len(nums)
while pt < size - 1:
if nums[pt] > nums[pt+1]:
nums[pt], nums[pt+1] = nums[pt+1], nums[pt]
pt -= 1
if pt < 0:
pt = 0
else:
pt += 1
return nums
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import re
import util
import random
import embeddings_util
import nltk
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
EMBEDDING_DIM = 50
def get_doc_embedding(text):
#Retuns document embedding for text
#Doc embedding is computed as: avg(word_embedding * tfidf)
global vocab
global device
global tfidf_matrix
global word_to_ix
global doc_to_ix
global EMBEDDING_DIM
embeds = []
text = text.lower()
for word in text.split():
if word in vocab:
tf_idf = tfidf_matrix[doc_to_ix[text], word_to_ix[word]]
embed = [v * tf_idf for v in vocab[word]]
embeds.append(torch.tensor(embed, dtype = torch.float, device = device))
else:
embeds.append(torch.zeros(EMBEDDING_DIM, device = device, dtype = torch.float))
embeds = torch.cat(embeds).view(len(text.split()), -1)
return embeds.sum(dim = 0)/embeds.shape[0]
def text_similarity(text1, text2):
#Retuns cosine similarity of document embeddings of text1 and text2
cos = nn.CosineSimilarity(dim = 0)
embed1 = get_doc_embedding(text1)
embed2 = get_doc_embedding(text2)
return cos(embed1, embed2)
def get_mean_score_pair(data, score_function):
#Returns average of score function applied of lr pairs in data
score_sum = 0.0
for lr1, lr2 in data:
score_sum += score_function(lr1, lr2)
return score_sum/len(data)
def get_next_lr(given_lr, lr_list, target_score, score_function):
#Returns lr from lr_list with score(with given_lr) closest to target_score
next_lr = ""
score_diff = 1.0
for lr in lr_list:
score = score_function(given_lr, lr)
if abs(score - target_score) < score_diff:
score_diff = abs(score - target_score)
next_lr = lr
return next_lr
def create_collection(collection_size, start_lr, lr_list, target_score, score_function):
#Creates collection of size collection_size using lr_list, starting with start_lr
collection = [start_lr]
lr = start_lr
for _ in range(collection_size - 1):
next_lr = get_next_lr(lr, lr_list, target_score, score_function)
collection.append(next_lr)
lr = next_lr
return collection
if __name__ == "__main__":
#Loading glove word embeddings
vocab = embeddings_util.load_glove()
print("Loaded glove embeddings")
#Loading collections
df = util.get_processed_data("../data/collections.csv", False)
collections = util.get_collections(df)
documents = util.get_texts_from_collections(collections)
print("Collections created")
#Processing documents to remove words of length 1
new_documents = []
for doc in documents:
new_doc = ""
for word in doc.split():
if len(word) > 1:
new_doc = word+" "
if new_doc.strip()!="":
new_documents.append(new_doc.strip().lower())
documents = new_documents
#Runnign tftdf on documents
tfidf_matrix, doc_to_ix, word_to_ix = embeddings_util.run_tfidf(documents)
print("TDIDF computation complete")
#Collecting learning resource pairs which occur consecutively in collections
lr_pairs = []
for i in range(len(documents)-1):
lr1 = documents[i]
lr2 = documents[i+1]
lr_pairs.append((lr1, lr2))
print("Created lr pairs from collections")
score_function = text_similarity
#Computing mean of such pairs. (Which is also similarity value at peak of gaussian distribution)
mean = get_mean_score_pair(lr_pairs, text_similarity)
print("Target score computed")
#Creating collections using the mean computed above
lr_list = documents
start_lr = lr_list[random.randint(0,len(lr_list))]
new_collection = create_collection(3, start_lr, lr_list, mean, text_similarity)
print("New collection: ",new_collection) |
class IRespond:
pass
class Kline(IRespond):
def __init__(self,open_time, close_time, open_dt, close_dt, open, close, high, low, vol, instrument):
self.open_time = open_time
self.close_time = close_time
self.open_dt = open_dt
self.close_dt = close_dt
self.open = open
self.close = close
self.high = high
self.low = low
self.vol = vol
self.instrument = instrument
def __str__(self):
return "Open_time: {}, Open_dt: {}, Close_dt: {} Open: {}, Close: {}, High: {}, Vol : {}".format(self.open_time,
self.open_dt,
self.close_dt,
self.open,
self.close,
self.high,
self.vol)
def to_dict(self):
fields={
"open": float(self.open),
"close": float(self.close),
"high": float(self.high),
"low" : float(self.low),
"vol" : float(self.vol)
}
return fields
|
#!/usr/bin/env python
#
# This code was created by Richard Campbell '99 (ported to Python/PyOpenGL by John Ferguson 2000)
#
# The port was based on the PyOpenGL tutorial module: dots.py
#
# If you've found this code useful, please let me know (email John Ferguson at hakuin@voicenet.com).
#
# See original source and C based tutorial at http://nehe.gamedev.net
#
# Note:
# -----
# This code is not a good example of Python and using OO techniques. It is a simple and direct
# exposition of how to use the Open GL API in Python via the PyOpenGL package. It also uses GLUT,
# which in my opinion is a high quality library in that it makes my work simpler. Due to using
# these APIs, this code is more like a C program using function based programming (which Python
# is in fact based upon, note the use of closures and lambda) than a "good" OO program.
#
# To run this code get and install OpenGL, GLUT, PyOpenGL (see http://www.python.org), and PyNumeric.
# Installing PyNumeric means having a C compiler that is configured properly, or so I found. For
# Win32 this assumes VC++, I poked through the setup.py for Numeric, and chased through disutils code
# and noticed what seemed to be hard coded preferences for VC++ in the case of a Win32 OS. However,
# I am new to Python and know little about disutils, so I may just be not using it right.
#
# BTW, since this is Python make sure you use tabs or spaces to indent, I had numerous problems since I
# was using editors that were not sensitive to Python.
#
# Modified on May 2nd,2004 by Travis Wells to fix some GLUT issues and missing import
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from PIL import Image
import sys
import numpy
import ast
# Some api in the chain is translating the keystrokes to this octal string
# so instead of saying: ESCAPE = 27, we use the following.
ESCAPE = '\033'
LEFT = '\037'
UP = '\038'
RIGHT = '\039'
DOWN = '\040'
# Number of the glut window.
window = 0
vertices = {}
vertices['a'] = [-1.0, 1.0, 0.0]
vertices['b'] = [1.0, 1.0, 0.0]
vertices['c'] = [-1.0, -1.0, 0.0]
vertices['d'] = [1.0, -1.0, 0.0]
vertices['e'] = [-1.0, 1.0, 0.0]
vertices['f'] = [1.0, 1.0, 0.0]
vertices['g'] = [-1.0, -1.0, 0.0]
vertices['h'] = [1.0, -1.0, 0.0]
vNames = ['a','b','c','d','e','f','g']
currentV = 0
currentM = 1
pickV = 0.1
textures = []
ready = False
time = 0
sentido = 0
vertices2 = {}
vertices2['a'] = [-1.0, 1.0, 0.0]
vertices2['b'] = [1.0, 1.0, 0.0]
vertices2['c'] = [-1.0, -1.0, 0.0]
vertices2['d'] = [1.0, -1.0, 0.0]
vertices2['e'] = [-1.0, 1.0, 0.0]
vertices2['f'] = [1.0, 1.0, 0.0]
vertices2['g'] = [-1.0, -1.0, 0.0]
vertices2['h'] = [1.0, -1.0, 0.0]
for i in vNames:
vertices2[i][1]+=2.5
vertices2[i][2]+=0.01
vertices_actual = vertices
# A general OpenGL initialization function. Sets all of the initial parameters.
def InitGL(Width, Height): # We call this right after our OpenGL window is created.
glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The Background Color To Black
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glDepthFunc(GL_LESS) # The Type Of Depth Test To Do
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
images = []
for x in xrange(1,101):
images.append("Fractal/Julia"+ str(x) + ".bmp")
'''images = [
"Fractal/Julia1.bmp",
"Fractal/Julia2.bmp",
"Fractal/Julia3.bmp",
]'''
LoadTextures(images)
glMatrixMode(GL_PROJECTION)
glLoadIdentity() # Reset The Projection Matrix
# Calculate The Aspect Ratio Of The Window
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
# The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)
def ReSizeGLScene(Width, Height):
if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small
Height = 1
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)
glMatrixMode(GL_MODELVIEW)
def DrawColorQuad(v1, v2, v3, v4, color):
glBegin(GL_QUADS)
glColor3f(color[0], color[1], color[2])
glVertex3f(v1[0], v1[1], v1[2])
glVertex3f(v2[0], v2[1], v2[2])
glVertex3f(v3[0], v3[1], v3[2])
glVertex3f(v4[0], v4[1], v4[2])
glEnd()
glColor3f(1.0, 1.0, 1.0)
def DrawTextureQuad(v1, v2, v3, v4, t):
# print 'Using buffer %s with texture %s' % (buffers[t], textures[t])
# glBindFramebuffer(GL_FRAMEBUFFER, buffers[t])
glActiveTexture(GL_TEXTURE0+(t%8))
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, textures[t])
glBegin(GL_QUADS) # Start Drawing The Quad
glMultiTexCoord2f(GL_TEXTURE0+t, 0.0, 1.0); glVertex3f(v1[0], v1[1], v1[2])
glMultiTexCoord2f(GL_TEXTURE0+t, 1.0, 1.0); glVertex3f(v2[0], v2[1], v2[2])
glMultiTexCoord2f(GL_TEXTURE0+t, 1.0, 0.0); glVertex3f(v3[0], v3[1], v3[2])
glMultiTexCoord2f(GL_TEXTURE0+t, 0.0, 0.0); glVertex3f(v4[0], v4[1], v4[2])
glEnd() # Done Drawing The Quad
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
def LoadTextures(images):
glTextures = glGenTextures(len(images))
for path in images:
print 'binding texture %s as %s' % (path, glTextures[len(textures)])
image = Image.open(path)
ix = image.size[0]
iy = image.size[1]
image = image.tobytes("raw", "RGBX", 0, -1)
glBindTexture(GL_TEXTURE_2D, glTextures[len(textures)])
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
textures.append(glTextures[len(textures)])
glBindTexture(GL_TEXTURE_2D, 0)
# The main drawing function.
def DrawGLScene():
global vertices, vNames, currentV, currentTime, vertices2, textures, time, sentido
# Clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity() # Reset The View
#Move into the screen 6.0 units.
glTranslatef(0.0, 0.0, -5.0)
if not ready:
#CUBO 1
DrawColorQuad(vertices['a'], vertices['b'], vertices['f'], vertices['e'], [0.0,1.0,0.0])
DrawColorQuad(vertices['a'], vertices['b'], vertices['d'], vertices['c'], [1.0,0.0,0.0])
DrawColorQuad(vertices['a'], vertices['e'], vertices['g'], vertices['c'], [0.0,0.0,1.0])
#DrawColorQuad(vertices2['a'], vertices2['b'], vertices2['f'], vertices2['e'], [0.0,1.0,0.0])
#DrawColorQuad(vertices2['a'], vertices2['b'], vertices2['d'], vertices2['c'], [1.0,0.0,0.0])
#DrawColorQuad(vertices2['a'], vertices2['e'], vertices2['g'], vertices2['c'], [0.0,0.0,1.0])
else:
DrawTextureQuad(vertices['a'], vertices['b'], vertices['f'], vertices['e'], time)
DrawTextureQuad(vertices['a'], vertices['b'], vertices['d'], vertices['c'], 1)
DrawTextureQuad(vertices['a'], vertices['e'], vertices['g'], vertices['c'], 2)
if sentido == 0 and time < 100:
time+=1
else:
time-=1
'''if time == 0:
sentido = 0
if time == 100:
sentido = 1 '''
'''glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(0.0,1.0,0.0) # Set The Color To Green
glVertex3f(vertices['a'][0],vertices['a'][1],vertices['a'][2])
glVertex3f(vertices['b'][0],vertices['b'][1],vertices['b'][2])
glVertex3f(vertices['f'][0],vertices['f'][1],vertices['f'][2])
glVertex3f(vertices['e'][0],vertices['e'][1],vertices['e'][2])
glEnd()
# Move Left 3.0 units and
#glTranslatef(-3.0, 0.0, 0.0)
# FRONT
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(1.0,0.0,0.0) # Set The Color To Red
glVertex3f(vertices['a'][0],vertices['a'][1],vertices['a'][2])
glVertex3f(vertices['b'][0],vertices['b'][1],vertices['b'][2])
glVertex3f(vertices['d'][0],vertices['d'][1],vertices['d'][2])
glVertex3f(vertices['c'][0],vertices['c'][1],vertices['c'][2])
glEnd() # We are done with the polygon # We are done with the polygon
# Move Right 3.0 units.
#glTranslatef(6.0, 0.0, 0.0)
# LEFT face
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(0.0,0.0,1.0) # Set The Color To Blue
glVertex3f(vertices['a'][0],vertices['a'][1],vertices['a'][2])
glVertex3f(vertices['e'][0],vertices['e'][1],vertices['e'][2])
glVertex3f(vertices['g'][0],vertices['g'][1],vertices['g'][2])
glVertex3f(vertices['c'][0],vertices['c'][1],vertices['c'][2])
glEnd() # We are done with the polygon
#SEGUNDO CUBO
# TOP
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(0.0,1.0,0.0) # Set The Color To Green
glVertex3f(vertices2['a'][0],vertices2['a'][1],vertices2['a'][2])
glVertex3f(vertices2['b'][0],vertices2['b'][1],vertices2['b'][2])
glVertex3f(vertices2['f'][0],vertices2['f'][1],vertices2['f'][2])
glVertex3f(vertices2['e'][0],vertices2['e'][1],vertices2['e'][2])
glEnd()
# Move Left 3.0 units and
#glTranslatef(-3.0, 0.0, 0.0)
# FRONT
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(1.0,0.0,0.0) # Set The Color To Red
glVertex3f(vertices2['a'][0],vertices2['a'][1],vertices2['a'][2])
glVertex3f(vertices2['b'][0],vertices2['b'][1],vertices2['b'][2])
glVertex3f(vertices2['d'][0],vertices2['d'][1],vertices2['d'][2])
glVertex3f(vertices2['c'][0],vertices2['c'][1],vertices2['c'][2])
glEnd() # We are done with the polygon # We are done with the polygon
# Move Right 3.0 units.
#glTranslatef(6.0, 0.0, 0.0)
# LEFT face
glBegin(GL_QUADS) # Start drawing a 4 sided polygon
glColor3f(0.0,0.0,1.0) # Set The Color To Blue
glVertex3f(vertices2['a'][0],vertices2['a'][1],vertices2['a'][2])
glVertex3f(vertices2['e'][0],vertices2['e'][1],vertices2['e'][2])
glVertex3f(vertices2['g'][0],vertices2['g'][1],vertices2['g'][2])
glVertex3f(vertices2['c'][0],vertices2['c'][1],vertices2['c'][2])
glEnd() '''
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
global vertices, vNames, currentV, rquadSum, vertices_actual, currentM, vertices2, pickV, ready
# If escape is pressed, kill everything.
if args[0] == ESCAPE:
glutDestroyWindow(window)
sys.exit()
# X axis
elif args[0] == 'a':
vertices_actual[vNames[currentV]][0]-=pickV
elif args[0] == 'd':
vertices_actual[vNames[currentV]][0]+=pickV
# Y axis
elif args[0] == 's':
vertices_actual[vNames[currentV]][1]-=pickV
elif args[0] == 'w':
vertices_actual[vNames[currentV]][1]+=pickV
# Z axis
elif args[0] == 'e':
vertices_actual[vNames[currentV]][2]-=pickV
elif args[0] == 'q':
vertices_actual[vNames[currentV]][2]+=pickV
# change pickV
elif args[0] == 'm':
if pickV == 0.1 :
pickV = 0.05
else :
pickV = 0.1
# change vertix
elif args[0] == 'p':
currentV = (currentV+1)%8
elif args[0] == 'o':
if currentM == 1:
vertices_actual = vertices2
currentM = 2
currentV = 0
else:
vertices_actual = vertices
currentM = 1
currentV = 0
elif args[0] == 'g':
f=file("cubo1.txt","w");
for k in vertices:
f.write(k+"@"+str(vertices[k])+"\n")
f.close()
elif args[0] == 'b':
f=file("cubo2.txt","w");
for k in vertices2:
f.write(k+"@"+str(vertices2[k])+"\n")
f.close()
elif args[0] == 'h':
lines = [line.rstrip('\n') for line in open('cubo1.txt')]
aux_vert = {}
for i in lines:
k,lista = i.split("@")
final = map(float, lista[1:-2].split(','))
aux_vert[k] = final
vertices = aux_vert
currentV = 0
elif args[0] == 'n':
lines2 = [line.rstrip('\n') for line in open('cubo2.txt')]
aux_vert2 = {}
for i2 in lines2:
k2,lista2 = i2.split("@")
final2 = map(float, lista2[1:-2].split(','))
aux_vert2[k2] = final2
vertices2 = aux_vert2
currentV = 0
elif args[0] == ' ':
if not ready:
ready = True
def main():
global window
# For now we just pass glutInit one empty argument. I wasn't sure what should or could be passed in (tuple, list, ...)
# Once I find out the right stuff based on reading the PyOpenGL source, I'll address this.
glutInit(())
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to use when closing, but for those of you new
# to Python (like myself), remember this assignment would make the variable local and not global
# if it weren't for the global declaration at the start of main.
window = glutCreateWindow("EntE")
# Register the drawing function with glut, BUT in Python land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc (DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
# When we are doing nothing, redraw the scene.
glutIdleFunc(DrawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc (ReSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc (keyPressed)
# Initialize our window.
InitGL(640, 480)
# Print message to console, and kick off the main to get it rolling.
print "Hit ESC key to quit."
if __name__ == '__main__':
try:
GLU_VERSION_1_2
except:
print "Need GLU 1.2 to run this demo"
sys.exit(1)
main()
glutMainLoop()
|
t = input()
for ti in range(t):
n = input()
stocks = map(int, raw_input().split(' '))
transactions = [0 for x in range(n)]
stocks.reverse()
max_so_far = 0
for i in range(n):
if stocks[i] > max_so_far:
transactions[i] = 0 #sell
max_so_far = stocks[i]
else:
transactions[i] = 1 #buy
stocks.reverse()
transactions.reverse()
bought_stocks = 0
profit = 0
for i in range(n):
if transactions[i] == 1:
bought_stocks += 1
profit -= stocks[i]
else:
profit += stocks[i] * bought_stocks
bought_stocks = 0
print profit
|
"""
@author Mateus Paiva Matiazzi
"""
import time
class PID:
def __init__(self, kp=1.0, ki=0.0, kd=0.0, maxIntegral=1000.0, maxDerivative=1000.0, target=0.0):
# Constants
self.kp = kp
self.ki = ki
self.kd = kd
# Max integral and derivative value
self.maxIntegral = maxIntegral
self.maxDerivative = maxDerivative
# Derivative e integral value
self.derivative = 0.0
self.integral = 0.0
# SetPoint
self.target = target
self.error = 0.0
# Initialize time sample
self.lastTime = time.time()
def setKp(self, num):
self.kp = num
def setKi(self, num):
self.ki = num
def setKd(self, num):
self.kd = num
def setTarget(self, num):
self.setTarget = setTarget
def getConstants(self):
return self.kp, self.ki, self.kd
def update(self, value):
"""Update the error value and return as float"""
error = self.target - value
timerAux = time.time()
deltaTime = timerAux - self.lastTime
# Calculating pid values
proportional = error
integral = self.integral + (self.error*deltaTime)
derivative = (error - self.error)/deltaTime
# Verify if integral value is greater than maxIntegral value
if integral > self.maxIntegral:
integral = self.maxIntegral
# Verify if derivative value is greater than maxDerivative value
if derivative > self.maxDerivative:
derivative = self.maxDerivative
# Output value
output = self.kp*proportional + self.ki*integral + self.kd*derivative
# Update values
self.integral = integral
self.derivative = derivative
self.error = error
self.lastTime = timerAux
return output
def reset(self):
"""Reset pid values"""
self.derivative = 0.0
self.integral = 0.0
self.error = 0.0
self.lastTime = time.time() |
# -*- coding: UTF-8 -*-
from user import User
from attack import Attacker
from box import Box
from equip import Equip
from item import Item
from shop import Shop
from growup import Growup
from card import Card
from req import KfReq
from log import Logging
import time
import datetime
class UserTask(object):
run = True
def __init__(self, username, password,
openBoxType=[True, True, True, True],
smeltEquipType=[True, True, True, False],
useItemType=[True, True, True, True, True, True],
sellItemType=[True, True, True, True, True, True],
buyExp=True, log=None):
self.username = username
self.password = password
self.openBoxType = openBoxType
self.smeltEquipType = smeltEquipType
self.useItemType = useItemType
self.sellItemType = sellItemType
if log is None or len(logfilePath)==0:
self.log = Logging(self.username)
else:
self.log = log
self.buyExp = buyExp
self.statistic = {
"username": self.username
}
def init(self):
self.user = User(self.username, self.password)
self.user.login()
self.attacker = Attacker(self.user, log = self.log)
self.box = Box(self.user, self.openBoxType)
self.equip = Equip(self.user, self.smeltEquipType)
self.item = Item(self.user, self.useItemType, self.sellItemType)
self.shop = Shop(self.user)
self.growup = Growup(self.user)
self.card = Card(self.user)
def task(self):
stage = self.attacker.autoAttack()
self.statistic['stage'] = stage
self.box.autoOpenBox()
self.equip.autoSmelt()
self.item.autoItemCommand()
# if self.buyExp is True:
# self.shop.autoBuy()
self.growup.growup()
# self.card.getCard()
self.log.info('============= END USER TASK ================')
|
from django.contrib import admin
from .models import Person
# Register your models here.
class PersonAdmin(admin.ModelAdmin):
list_display = ('id', 'first_name', 'last_name', 'email')
list_filter = ('is_verify',)
admin.site.register(Person, PersonAdmin)
|
import configparser
import time
from paho34 import Paho34
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--config', type=str, help='set config path')
args = parser.parse_args()
if not os.path.isfile(args.config):
print("config file not found")
sys.exit(1)
config=configparser.ConfigParser()
config.read(args.config)
def main():
mqtt = Paho34(config)
while True:
mqtt.loop()
time.sleep(0.2)
return 0
if __name__ == "__main__":
main()
|
import argparse
import gym
import numpy as np
from gym.envs.registration import register
from gym.spaces import Discrete
from gym.utils import seeding
from tensorboardX import SummaryWriter
register(
id="FrozenLakeNotSlippery-v0",
entry_point="gym.envs.toy_text:FrozenLakeEnv",
kwargs={"map_name": "4x4", "is_slippery": False},
)
register(
id="LocalMaximaEnv-v0",
entry_point="qlearning.local_maxima_env:Env",
kwargs=dict(num_states=100),
)
class QLearning:
def __init__(self, seed=0):
self.random, _ = seeding.np_random(seed)
def argmax(self, array: np.ndarray):
max_val = array.max(initial=-np.inf)
max_indices = np.arange(array.size)[array == max_val]
return self.random.choice(max_indices)
def train_loop(
self,
env: gym.Env,
alpha: float = 0.1,
gamma: float = 0.99,
epsilon: float = 1,
epsilon_decay: float = 0.9,
min_epsilon: float = 0.001,
):
# Implementing Upper Bound Confidence
assert isinstance(env.action_space, Discrete)
assert isinstance(env.observation_space, Discrete)
q = np.zeros((env.observation_space.n, env.action_space.n))
while True:
states = []
actions = []
rewards = []
s = env.reset()
t = 0
d = False
while not d:
states.append(s)
a = yield q, s, d
if a is None:
a = (
self.argmax(q[s])
if self.random.random() < epsilon
else env.action_space.sample()
)
epsilon *= epsilon_decay
epsilon = max(epsilon, min_epsilon)
s, r, d, _ = env.step(a)
actions.append(a)
rewards.append(r)
t += 1
if d:
for state, action, reward, next_state, next_action in zip(
states, actions, rewards, states[1:], actions[1:]
):
td_target = reward + gamma * max(q[next_state])
q[state, action] += alpha * (td_target - q[state, action])
state = states[-1]
action = actions[-1]
q[state, action] += alpha * (rewards[-1] - q[state, action])
yield q, s, d
s = env.reset()
states = []
actions = []
rewards = []
def evaluate(self, env: gym.Env, q: np.ndarray, render: bool = False):
d = False
s = env.reset()
while not d:
if render:
env.render()
input("waiting")
s, r, d, i = env.step(self.argmax(q[s]))
yield r
def main(env_id, iterations):
env = gym.make(env_id)
eval_env = gym.make(env_id)
env.seed(0)
eval_env.seed(0)
writer = SummaryWriter("/tmp/qlearning")
episode = 0
for i, (q, s, d, r) in enumerate(QLearning().train_loop(env, eval_env)):
if d:
writer.add_scalar("return", r, episode)
episode += 1
# print(r)
if i == iterations:
break
print(q)
for _ in range(5):
list(QLearning().evaluate(eval_env, q, render=True))
if __name__ == "__main__":
PARSER = argparse.ArgumentParser()
PARSER.add_argument("env_id")
PARSER.add_argument("--iterations", "-i", type=int)
main(**vars(PARSER.parse_args()))
|
"""Assorted utility functions and values.
safe_plt: A Pyplot that won't attempt to open a display if not available
iso639_3: A mapping of Norwegian language names (as used in the data) to
ISO639_3 codes.
"""
import datetime as dt
import itertools
import os
from pathlib import Path
import pickle
import random
import sys
from typing import Iterable, List, Optional, Sequence, Set, TextIO, Tuple, Union
import keras.backend as K
import matplotlib
if 'SLURM_JOB_NODELIST' in os.environ or (
os.name == 'posix' and 'DISPLAY' not in os.environ
): # noqa: E402
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
try:
import seaborn as sns
sns.set(context='paper', style='whitegrid')
except ImportError:
pass
RANDOM_SEED = 350
CMAP = 'gnuplot2_r'
safe_plt = plt
conll_cols = [
'ID',
'FORM',
'LEMMA',
'UPOS',
'XPOS',
'FEATS',
'HEAD',
'DEPREL',
'DEPS',
'MISC',
]
iso639_3 = dict(
engelsk='eng',
polsk='pol',
russisk='rus',
somali='som',
spansk='spa',
tysk='deu',
vietnamesisk='vie',
)
PROJECT_ROOT = Path(__file__).resolve().parents[1] # type: Path
DATA_DIR = PROJECT_ROOT / 'ASK'
RESULTS_DIR = PROJECT_ROOT / 'results'
MODEL_DIR = PROJECT_ROOT / 'models'
VECTOR_DIR = MODEL_DIR / 'vectors'
IMG_DIR = PROJECT_ROOT / 'thesis' / 'img'
CEFR_LABELS = ['A2', 'A2/B1', 'B1', 'B1/B2', 'B2', 'B2/C1', 'C1']
ROUND_CEFR_LABELS = CEFR_LABELS[::2]
LANG_LABELS = ['eng', 'pol', 'rus', 'som', 'spa', 'deu', 'vie']
REPRESENTATION_LAYER = 'vector_representation'
ATTENTION_LAYER = 'attention_layer'
EMB_LAYER_NAME = 'embedding_layer'
OUTPUT_NAME = 'output'
AUX_OUTPUT_NAME = 'aux_output'
if 'seaborn' in sys.modules:
def heatmap(
values: np.ndarray,
xticks: Sequence[str],
yticks: Sequence[str],
normalize: bool = False,
ax: Optional[plt.Axes] = None,
) -> None:
"""Plot a 2D array as a heatmap with overlayed values.
Args:
values: The 2D array to plot
xticks: The labels to place on the X axis
yticks: The labels to place on the Y axis
ax: An optional Axes object to plot the heatmap on
"""
if normalize:
values = values / values.sum(axis=1, keepdims=True)
annot = False
cbar = True
vmin = 0.0 # type: Optional[float]
vmax = 1.0 # type: Optional[float]
else:
annot = True
cbar = False
vmin = None
vmax = None
ax = sns.heatmap(
values,
square=True,
annot=annot,
fmt='.2g',
ax=ax,
cbar=cbar,
vmin=vmin,
vmax=vmax,
cmap=CMAP,
)
ax.set_xticklabels(xticks)
ax.set_yticklabels(yticks, rotation=0)
else:
def heatmap(
values: np.ndarray,
xticks: Sequence[str],
yticks: Sequence[str],
normalize: bool = False,
ax: Optional[plt.Axes] = None,
) -> None:
"""Plot a 2D array as a heatmap with overlayed values.
Args:
values: The 2D array to plot
xticks: The labels to place on the X axis
yticks: The labels to place on the Y axis
ax: An optional Axes object to plot the heatmap on
"""
if ax is None:
ax = plt.gca()
if normalize:
values = values / np.sum(values, axis=1, keepdims=True)
ax.imshow(values, cmap=CMAP)
ax.set(
yticks=range(len(yticks)),
xticks=range(len(xticks)),
yticklabels=yticks,
xticklabels=xticks,
)
color_cutoff = values.max() / 2
for row, col in itertools.product(
range(values.shape[0]), range(values.shape[1])
):
val = values[row, col]
color = 'white' if val < color_cutoff else 'black'
if np.issubdtype(values.dtype, np.floating):
label = '%.2f' % val
else:
label = str(int(val))
ax.text(
col,
row,
label,
horizontalalignment='center',
verticalalignment='center',
color=color,
)
def load_train_and_dev() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Load the train and dev splits as dataframes.
Args:
project_root: Useful if running a script from somewhere else
than the project root dir, such as a notebook
Returns:
Frames with the metadata for the documents in the train and
dev splits.
"""
return load_split('train'), load_split('dev')
def round_cefr_score(cefr: str) -> str:
"""Round intermediate CEFR levels up.
>>> round_cefr('A2')
'A2'
>>> round_cefr('B1/B2')
'B2'
"""
return cefr[-2:] if '/' in cefr else cefr
def load_split(split: str, round_cefr: bool = False) -> pd.DataFrame:
"""Load the test split as a dataframe.
Args:
split: {train, dev, test}
Returns:
A frame with the metadata for documents in the requested split.
"""
if split not in ["train", "dev", "test", "train,dev", "norsk"]:
raise ValueError('Split must be train, dev or test')
filepath = DATA_DIR / "metadata.csv"
df = pd.read_csv(filepath)
if split == "norsk":
return df[df.lang.isin({"bokmål", "nynorsk"})]
df = df.dropna(subset=['cefr'])
if round_cefr:
df.loc[:, 'cefr'] = df.cefr.apply(round_cefr_score)
if split == "train,dev":
return df[df.split != "test"]
else:
return df[df.split == split]
def load_test() -> pd.DataFrame:
"""Load the test split as a dataframe.
Args:
project_root: Useful if running a script from somewhere else
than the project root dir, such as a notebook
Returns:
A frame with the metadata for the documents in the test split.
"""
return load_split('test')
def document_iterator(doc: TextIO) -> Iterable[str]:
"""Iterate over tokens in a document.
Args:
doc: A file object where each line is a line of tokens
separated by a space
Yields:
The tokens in the document.
"""
for line in doc:
tokens_iter = iter(line.split(' '))
yield next(tokens_iter)
def conll_reader(
file: Union[str, Path], cols: Sequence[str], tags: bool = False
) -> Iterable[List[Tuple[str, ...]]]:
"""Iterate over sentences in a CoNLL file.
Args:
file: The CoNLL file to read from
cols: The columns to read
tags: Whether to include start and end tags around sentences.
The tags are '<s>' and '</s>' regardless of column.
Yields:
Each sentence in file as a list of tuples corresponding to the
specified cols.
"""
if isinstance(file, str):
file = Path(file)
try:
col_idx = [conll_cols.index(c) for c in cols]
except ValueError:
raise ValueError('All column names must be one of %s' % set(conll_cols))
if tags:
start_tags = tuple('<s>' for __ in cols)
end_tags = tuple('</s>' for __ in cols)
with file.open(encoding='utf8') as stream:
tuple_sequence = [] # type: List[Tuple[str, ...]]
for line in stream:
line = line.strip()
if line.startswith('#'):
continue
if not line: # Empty line = end of sentence
if tags:
yield [start_tags] + tuple_sequence + [end_tags]
else:
yield tuple_sequence
tuple_sequence = []
else:
fields = line.split('\t')
tup = tuple(fields[i] for i in col_idx)
tuple_sequence.append(tup)
if tuple_sequence: # Flush if there is no empty line at end of file
yield tuple_sequence
def get_split_len(split: str) -> int:
"""Return the number of documents in the split."""
if split == 'train':
return 966
elif split in ('dev', 'test'):
return 123
elif split == "norsk":
return 200
raise ValueError(
"Unrecognized split '%s', should be 'train', 'dev' or 'test'" % split
)
def get_file_name(name: str) -> str:
if 'SLURM_ARRAY_JOB_ID' in os.environ:
slurm_job_id = os.environ.get('SLURM_ARRAY_JOB_ID', None)
suf = os.environ.get('SLURM_ARRAY_TASK_ID', None)
return '%s-%s_%s' % (name, slurm_job_id, suf)
elif 'SLURM_JOB_ID' in os.environ:
slurm_job_id = os.environ['SLURM_JOB_ID']
fn = name + '-' + slurm_job_id
suf = os.environ.get('SUF', None)
if suf is not None:
fn = fn + '_' + suf
return fn
timestamp = dt.datetime.utcnow().strftime('%m-%d_%H-%M-%S')
return name + '-' + timestamp
def save_model(name: str, model, w2i, pos2i=None):
if not MODEL_DIR.is_dir():
MODEL_DIR.mkdir()
model.save(str(MODEL_DIR / (name + '_model.h5')))
if w2i is not None:
w2i_file = MODEL_DIR / (name + '_model_w2i.pkl')
pickle.dump(w2i, w2i_file.open('wb'))
if pos2i is not None:
pos2i_file = MODEL_DIR / (name + '_model_pos2i.pkl')
pickle.dump(pos2i, pos2i_file.open('wb'))
def get_stopwords() -> Set[str]:
"""Read and return stop words from a text file."""
with (MODEL_DIR / 'stopwords' / 'norwegian-funcwords.txt').open(
encoding='utf8'
) as f:
res = set(line.strip() for line in f)
return res
def set_reproducible(seed_delta: int = 0) -> None:
"""Fix random seeds and disable multithreading in order to guarantee reproducible results."""
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
seed = RANDOM_SEED + seed_delta
np.random.seed(seed)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(seed)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(seed)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def rescale_regression_results(predictions, highest_class):
return np.clip(
np.floor(predictions * highest_class + 0.5), 0, highest_class
).astype(int)
|
import fresh_tomatoes
import media
# Creating Different Movie Object and proving the values as per the constructor
toy_story = media.Movie("Toy Story",
"A Story of a boy and his toy that comes to life",
"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg", # NOQA
"https://www.youtube.com/watch?v=KYz2wyBy3kc")
avatar = media.Movie("Avatar",
"A paraplegic marine dispatched to the moon Pandora on"
"a unique mission becomes torn between following his"
"orders"
"and protecting the world he feels is his home.",
"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg", # NOQA
"https://www.youtube.com/watch?v=d1_JBMrrYw8")
lagaan = media.Movie("Lagaan",
"The people of a small village in Victorian India "
"stake their future on a game of cricket against"
"their ruthless British rulers.",
"https://upload.wikimedia.org/wikipedia/en/b/b6/Lagaan.jpg", # NOQA
"https://www.youtube.com/watch?v=oSIGQ0YkFxs")
kahaani = media.Movie("Kahaani 2",
"A woman with a mysterious past is charged with"
"kidnapping and murder",
"https://upload.wikimedia.org/wikipedia/en/thumb/3/34/Kahaani_2_film_poster.jpg/220px-Kahaani_2_film_poster.jpg", # NOQA
"https://www.youtube.com/watch?v=Ez4mXaeSKuk")
paan_singh_tomar = media.Movie("Paan Singh Tomar",
"The story of Paan Singh Tomar, an Indian"
"athlete"
"and seven-time national steeplechase champion "
"who"
"becomes one of the most feared dacoits in "
"Chambal "
"Valley after his retirement.",
"https://upload.wikimedia.org/wikipedia/en/9/93/Paan_Singh_Tomar_Poster.jpg", # NOQA
"https://www.youtube.com/watch?v=EH0O75KNqkg")
# Creating a list of Objects for passing the movies object to
# the Fresh Tomatoes File to display
movies = [toy_story, avatar, lagaan, kahaani, paan_singh_tomar]
# Calling the funtion present in fresh_tomatoes to display the
# movie list and show trailer in Webbrowser
fresh_tomatoes.open_movies_page(movies)
|
#Embedded file name: ACEStream\Core\Overlay\OverlayApps.pyo
from MetadataHandler import MetadataHandler
from threading import Lock
from threading import currentThread
from time import time
from traceback import print_exc
import sys
from ACEStream.Core.BitTornado.BT1.MessageID import *
from ACEStream.Core.BuddyCast.buddycast import BuddyCastFactory
from ACEStream.Core.ProxyService.CoordinatorMessageHandler import CoordinatorMessageHandler
from ACEStream.Core.ProxyService.HelperMessageHandler import HelperMessageHandler
from ACEStream.Core.NATFirewall.DialbackMsgHandler import DialbackMsgHandler
from ACEStream.Core.NATFirewall.NatCheckMsgHandler import NatCheckMsgHandler
from ACEStream.Core.SocialNetwork.FriendshipMsgHandler import FriendshipMsgHandler
from ACEStream.Core.SocialNetwork.RemoteQueryMsgHandler import RemoteQueryMsgHandler
from ACEStream.Core.SocialNetwork.RemoteTorrentHandler import RemoteTorrentHandler
from ACEStream.Core.SocialNetwork.SocialNetworkMsgHandler import SocialNetworkMsgHandler
from ACEStream.Core.Statistics.Crawler import Crawler
from ACEStream.Core.Statistics.DatabaseCrawler import DatabaseCrawler
from ACEStream.Core.Statistics.FriendshipCrawler import FriendshipCrawler
from ACEStream.Core.Statistics.SeedingStatsCrawler import SeedingStatsCrawler
from ACEStream.Core.Statistics.VideoPlaybackCrawler import VideoPlaybackCrawler
from ACEStream.Core.Statistics.RepexCrawler import RepexCrawler
from ACEStream.Core.Statistics.PunctureCrawler import PunctureCrawler
from ACEStream.Core.Statistics.ChannelCrawler import ChannelCrawler
from ACEStream.Core.Statistics.UserEventLogCrawler import UserEventLogCrawler
from ACEStream.Core.Utilities.utilities import show_permid_short
from ACEStream.Core.simpledefs import *
from ACEStream.Core.Subtitles.SubtitlesHandler import SubtitlesHandler
from ACEStream.Core.Subtitles.SubtitlesSupport import SubtitlesSupport
from ACEStream.Core.Subtitles.PeerHaveManager import PeersHaveManager
DEBUG = False
class OverlayApps:
__single = None
def __init__(self):
if OverlayApps.__single:
raise RuntimeError, 'OverlayApps is Singleton'
OverlayApps.__single = self
self.coord_handler = None
self.help_handler = None
self.metadata_handler = None
self.buddycast = None
self.collect = None
self.dialback_handler = None
self.socnet_handler = None
self.rquery_handler = None
self.chquery_handler = None
self.friendship_handler = None
self.msg_handlers = {}
self.connection_handlers = []
self.text_mode = None
self.requestPolicyLock = Lock()
def getInstance(*args, **kw):
if OverlayApps.__single is None:
OverlayApps(*args, **kw)
return OverlayApps.__single
getInstance = staticmethod(getInstance)
def register(self, overlay_bridge, session, launchmany, config, requestPolicy):
self.overlay_bridge = overlay_bridge
self.launchmany = launchmany
self.requestPolicy = requestPolicy
self.text_mode = config.has_key('text_mode')
overlay_bridge.register_recv_callback(self.handleMessage)
overlay_bridge.register_conns_callback(self.handleConnection)
i_am_crawler = False
if config['crawler']:
crawler = Crawler.get_instance(session)
self.register_msg_handler([CRAWLER_REQUEST], crawler.handle_request)
database_crawler = DatabaseCrawler.get_instance()
crawler.register_message_handler(CRAWLER_DATABASE_QUERY, database_crawler.handle_crawler_request, database_crawler.handle_crawler_reply)
seeding_stats_crawler = SeedingStatsCrawler.get_instance()
crawler.register_message_handler(CRAWLER_SEEDINGSTATS_QUERY, seeding_stats_crawler.handle_crawler_request, seeding_stats_crawler.handle_crawler_reply)
friendship_crawler = FriendshipCrawler.get_instance(session)
crawler.register_message_handler(CRAWLER_FRIENDSHIP_STATS, friendship_crawler.handle_crawler_request, friendship_crawler.handle_crawler_reply)
natcheck_handler = NatCheckMsgHandler.getInstance()
natcheck_handler.register(launchmany)
crawler.register_message_handler(CRAWLER_NATCHECK, natcheck_handler.gotDoNatCheckMessage, natcheck_handler.gotNatCheckReplyMessage)
crawler.register_message_handler(CRAWLER_NATTRAVERSAL, natcheck_handler.gotUdpConnectRequest, natcheck_handler.gotUdpConnectReply)
videoplayback_crawler = VideoPlaybackCrawler.get_instance()
crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_EVENT_QUERY, videoplayback_crawler.handle_event_crawler_request, videoplayback_crawler.handle_event_crawler_reply)
crawler.register_message_handler(CRAWLER_VIDEOPLAYBACK_INFO_QUERY, videoplayback_crawler.handle_info_crawler_request, videoplayback_crawler.handle_info_crawler_reply)
repex_crawler = RepexCrawler.get_instance(session)
crawler.register_message_handler(CRAWLER_REPEX_QUERY, repex_crawler.handle_crawler_request, repex_crawler.handle_crawler_reply)
puncture_crawler = PunctureCrawler.get_instance()
crawler.register_message_handler(CRAWLER_PUNCTURE_QUERY, puncture_crawler.handle_crawler_request, puncture_crawler.handle_crawler_reply)
channel_crawler = ChannelCrawler.get_instance()
crawler.register_message_handler(CRAWLER_CHANNEL_QUERY, channel_crawler.handle_crawler_request, channel_crawler.handle_crawler_reply)
usereventlog_crawler = UserEventLogCrawler.get_instance()
crawler.register_message_handler(CRAWLER_USEREVENTLOG_QUERY, usereventlog_crawler.handle_crawler_request, usereventlog_crawler.handle_crawler_reply)
if crawler.am_crawler():
i_am_crawler = True
self.register_msg_handler([CRAWLER_REPLY], crawler.handle_reply)
self.register_connection_handler(crawler.handle_connection)
if 'database' in sys.argv:
crawler.register_crawl_initiator(database_crawler.query_initiator)
if 'videoplayback' in sys.argv:
crawler.register_crawl_initiator(videoplayback_crawler.query_initiator)
if 'seedingstats' in sys.argv:
crawler.register_crawl_initiator(seeding_stats_crawler.query_initiator, frequency=1800)
if 'friendship' in sys.argv:
crawler.register_crawl_initiator(friendship_crawler.query_initiator)
if 'natcheck' in sys.argv:
crawler.register_crawl_initiator(natcheck_handler.doNatCheck, 3600)
if 'repex' in sys.argv:
crawler.register_crawl_initiator(repex_crawler.query_initiator)
if 'puncture' in sys.argv:
crawler.register_crawl_initiator(puncture_crawler.query_initiator)
if 'channel' in sys.argv:
crawler.register_crawl_initiator(channel_crawler.query_initiator)
if 'usereventlog' in sys.argv:
crawler.register_crawl_initiator(usereventlog_crawler.query_initiator)
else:
self.register_msg_handler([CRAWLER_REQUEST, CRAWLER_REPLY], self.handleDisabledMessage)
self.metadata_handler = MetadataHandler.getInstance()
if config['download_help']:
self.coord_handler = CoordinatorMessageHandler(launchmany)
self.register_msg_handler(HelpHelperMessages, self.coord_handler.handleMessage)
self.help_handler = HelperMessageHandler()
self.help_handler.register(session, self.metadata_handler, config['download_help_dir'], config.get('coopdlconfig', False))
self.register_msg_handler(HelpCoordinatorMessages, self.help_handler.handleMessage)
self.metadata_handler.register(overlay_bridge, self.help_handler, launchmany, config)
self.register_msg_handler(MetadataMessages, self.metadata_handler.handleMessage)
config['subtitles_collecting'] = False
if not config['subtitles_collecting']:
self.subtitles_handler = None
else:
self.subtitles_handler = SubtitlesHandler.getInstance()
self.subtitles_handler.register(self.overlay_bridge, self.launchmany.richmetadataDbHandler, self.launchmany.session)
self.peersHaveManger = PeersHaveManager.getInstance()
if not self.peersHaveManger.isRegistered():
self.peersHaveManger.register(self.launchmany.richmetadataDbHandler, self.overlay_bridge)
self.subtitle_support = SubtitlesSupport.getInstance()
keypair = self.launchmany.session.keypair
permid = self.launchmany.session.get_permid()
self.subtitle_support._register(self.launchmany.richmetadataDbHandler, self.subtitles_handler, self.launchmany.channelcast_db, permid, keypair, self.peersHaveManger, self.overlay_bridge)
self.subtitle_support.runDBConsinstencyRoutine()
if not config['torrent_collecting']:
self.torrent_collecting_solution = 0
else:
self.torrent_collecting_solution = config['buddycast_collecting_solution']
if config['buddycast']:
self.buddycast = BuddyCastFactory.getInstance(superpeer=config['superpeer'], log=config['overlay_log'])
self.buddycast.register(overlay_bridge, launchmany, launchmany.rawserver_fatalerrorfunc, self.metadata_handler, self.torrent_collecting_solution, config['start_recommender'], config['buddycast_max_peers'], i_am_crawler)
self.register_msg_handler(BuddyCastMessages, self.buddycast.handleMessage)
self.register_connection_handler(self.buddycast.handleConnection)
if config['dialback']:
self.dialback_handler = DialbackMsgHandler.getInstance()
self.dialback_handler.register(overlay_bridge, launchmany, launchmany.rawserver, config)
self.register_msg_handler([DIALBACK_REQUEST], self.dialback_handler.olthread_handleSecOverlayMessage)
self.register_connection_handler(self.dialback_handler.olthread_handleSecOverlayConnection)
else:
self.register_msg_handler([DIALBACK_REQUEST], self.handleDisabledMessage)
if config['socnet']:
self.socnet_handler = SocialNetworkMsgHandler.getInstance()
self.socnet_handler.register(overlay_bridge, launchmany, config)
self.register_msg_handler(SocialNetworkMessages, self.socnet_handler.handleMessage)
self.register_connection_handler(self.socnet_handler.handleConnection)
self.friendship_handler = FriendshipMsgHandler.getInstance()
self.friendship_handler.register(overlay_bridge, launchmany.session)
self.register_msg_handler(FriendshipMessages, self.friendship_handler.handleMessage)
self.register_connection_handler(self.friendship_handler.handleConnection)
if config['rquery']:
self.rquery_handler = RemoteQueryMsgHandler.getInstance()
self.rquery_handler.register(overlay_bridge, launchmany, config, self.buddycast, log=config['overlay_log'])
self.register_msg_handler(RemoteQueryMessages, self.rquery_handler.handleMessage)
self.register_connection_handler(self.rquery_handler.handleConnection)
if config['subtitles_collecting']:
hndl = self.subtitles_handler.getMessageHandler()
self.register_msg_handler(SubtitleMessages, hndl)
if config['torrent_collecting']:
self.rtorrent_handler = RemoteTorrentHandler.getInstance()
self.rtorrent_handler.register(overlay_bridge, self.metadata_handler, session)
self.metadata_handler.register2(self.rtorrent_handler)
self.register_connection_handler(self.notifier_handles_connection)
if config['buddycast']:
self.buddycast.register2()
def early_shutdown(self):
if self.friendship_handler is not None:
self.friendship_handler.shutdown()
def register_msg_handler(self, ids, handler):
for id in ids:
if DEBUG:
print >> sys.stderr, 'olapps: Message handler registered for', getMessageName(id)
self.msg_handlers[id] = handler
def register_connection_handler(self, handler):
if DEBUG:
print >> sys.stderr, 'olapps: Connection handler registered for', handler
self.connection_handlers.append(handler)
def handleMessage(self, permid, selversion, message):
if not self.requestAllowed(permid, message[0]):
if DEBUG:
print >> sys.stderr, 'olapps: Message not allowed', getMessageName(message[0])
return False
if message[0] in self.msg_handlers:
id_ = message[0]
else:
if DEBUG:
print >> sys.stderr, 'olapps: No handler found for', getMessageName(message[0:2])
return False
if DEBUG:
print >> sys.stderr, 'olapps: handleMessage', getMessageName(id_), 'v' + str(selversion)
try:
if DEBUG:
st = time()
ret = self.msg_handlers[id_](permid, selversion, message)
et = time()
diff = et - st
if diff > 0:
print >> sys.stderr, 'olapps: ', getMessageName(id_), 'returned', ret, 'TOOK %.5f' % diff
return ret
return self.msg_handlers[id_](permid, selversion, message)
except:
print_exc()
return False
def handleDisabledMessage(self, *args):
return True
def handleConnection(self, exc, permid, selversion, locally_initiated):
if DEBUG:
print >> sys.stderr, 'olapps: handleConnection', exc, selversion, locally_initiated, currentThread().getName()
for handler in self.connection_handlers:
try:
handler(exc, permid, selversion, locally_initiated)
except:
print >> sys.stderr, 'olapps: Exception during connection handler calling'
print_exc()
def requestAllowed(self, permid, messageType):
self.requestPolicyLock.acquire()
try:
rp = self.requestPolicy
finally:
self.requestPolicyLock.release()
allowed = rp.allowed(permid, messageType)
if DEBUG:
if allowed:
word = 'allowed'
else:
word = 'denied'
print >> sys.stderr, 'olapps: Request type %s from %s was %s' % (getMessageName(messageType), show_permid_short(permid), word)
return allowed
def setRequestPolicy(self, requestPolicy):
self.requestPolicyLock.acquire()
try:
self.requestPolicy = requestPolicy
finally:
self.requestPolicyLock.release()
def notifier_handles_connection(self, exc, permid, selversion, locally_initiated):
self.launchmany.session.uch.notify(NTFY_PEERS, NTFY_CONNECTION, permid, True)
|
import os
import time
import subprocess
from mininet.node import Host
from mininet_test.pendingresult import PendingResult
from mininet_test.runresult import RunResult
from mininet_test.errors import RunResultError
class TestMonitorHost(Host):
def __init__(self, *args, **kwargs):
""" Host wrapper to intercept calls which launch new
processes.
"""
super(TestMonitorHost, self).__init__(*args, **kwargs)
self.test_monitor = None
def set_test_monitor(self, test_monitor):
""" Set the process monintor """
self.test_monitor = test_monitor
def popen(self, args, cwd=None, daemon=False, **kwargs):
if cwd is None:
cwd = os.getcwd()
process = super(TestMonitorHost, self).popen(
args, cwd=cwd, **kwargs)
if self.test_monitor:
self.test_monitor.add_process(
process=process, args=args, cwd=cwd, daemon=daemon)
# If we are launching a deamon we wait 0.5 sec for
# it to launch
if daemon:
time.sleep(0.5)
return PendingResult(process=process, command=args, cwd=cwd)
def pexec(self, args, cwd, **kwargs):
popen = super(TestMonitorHost, self).popen(
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=cwd, **kwargs)
# Warning: this can fail with large numbers of fds!
stdout, stderr = popen.communicate()
returncode = popen.wait()
result = RunResult(command=args, cwd=cwd, stdout=stdout,
stderr=stderr, returncode=returncode)
if result.returncode != 0:
raise RunResultError(runresult=result)
return result
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from ...wxwork_api.helper.common import *
from odoo.exceptions import UserError
class Users(models.Model):
_inherit = "res.users"
_description = "Enterprise WeChat system users"
_order = "wxwork_user_order"
notification_type = fields.Selection(
[("wxwork", "Handle by Enterprise WeChat")], default="mail", required=True,
)
wxwork_id = fields.Char(string="Enterprise WeChat user ID", readonly=True,)
is_wxwork_notice = fields.Boolean("Whether to receive reminders", default=True,)
is_wxwork_user = fields.Boolean("Is an enterprise WeChat user", readonly=True,)
# qr_code = fields.Binary(string='个人二维码', help='员工个人二维码,扫描可添加为外部联系人', readonly=True)
wxwork_user_order = fields.Char(
"Enterprise WeChat ranking",
default="0",
help="The sort value in the department, the default is 0. The number must be the same as the department. The larger the number, the higher the order.The value range is [0, 2^32)",
readonly=True,
)
# ----------------------------------------------------------
# 变更用户类型向导
# ----------------------------------------------------------
class ChangeTypeWizard(models.TransientModel):
_name = "wizard.change.user.type"
_description = "Wizard to change user type(Enterprise WeChat)"
def _default_user_ids(self):
user_ids = (
self._context.get("active_model") == "res.users"
and self._context.get("active_ids")
or []
)
return [
(
0,
0,
{"user_id": user.id, "user_login": user.login, "user_name": user.name,},
)
for user in self.env["res.users"].browse(user_ids)
]
user_ids = fields.One2many(
"user.type.change", "wizard_id", string="user", default=_default_user_ids
)
def change_type_button(self):
self.ensure_one()
self.user_ids.change_type_button()
if self.env.user in self.mapped("user_ids.user_id"):
return {"type": "ir.actions.client", "tag": "reload"}
return {"type": "ir.actions.act_window_close"}
class ChangeTypeUser(models.TransientModel):
_name = "user.type.change"
_description = "User, Change Type Wizard"
wizard_id = fields.Many2one(
"wizard.change.user.type", string="Wizard", required=True, ondelete="cascade"
)
user_id = fields.Many2one(
"res.users", string="User", required=True, ondelete="cascade"
)
user_login = fields.Char(string="Login account", readonly=True, translate=True,)
user_name = fields.Char(string="Login name", readonly=True)
choices = [("1", _("Internal User")), ("8", _("Portal")), ("9", _("Public"))]
new_type = fields.Selection(
choices, string="User Type", default="1", tracking=True, translate=True,
)
def change_type_button(self):
for line in self:
if not line.new_type:
raise UserError(
_(
"Before clicking the 'Change User Type' button, you must modify the new user type"
)
)
if (
line.user_id.id == 1
or line.user_id.id == 2
or line.user_id.id == 3
or line.user_id.id == 4
or line.user_id.id == 5
):
pass
else:
line.user_id.write({"groups_id": [(6, 0, line.new_type)]})
self.write({"new_type": False})
|
#!/usr/bin/env python
"""
'Triangle'-button on controller to start the mission
'publish setpoints to '/set_point', Twist'
"""
import rospy
import numpy as np
from geometry_msgs.msg import Twist
from std_msgs.msg import Empty, Bool
import time
est_relative_position = None
# States of the quadcopter
S_INIT = 0
S_PRE_LANDING = 1
S_LANDING = 2
S_LANDED = 3
STATE_TEXT = [
"INIT",
"PRE LANDING",
"LANDING",
"LANDED"
]
global_state = S_INIT
global_mission = np.array([
[0.0, 0.0, 2.0],
[0.0, 0.0, 2.0],
[0.0, 0.0, 0.2]
])
# d_x = 2
# d_y = 3
# global_mission = np.array([
# [0.0, 0.0, 1.0],
# [d_x, 0.0, 1.0],
# [d_x, d_y, 1.0],
# [-d_x, d_y, 1.0],
# [-d_x, -d_y, 1.0],
# [0.0, -d_y, 1.0],
# [0.0, 0.0, 1.0],
# [0.0, 0.0, 0.9],
# [0.0, 0.0, 0.8],
# [0.0, 0.0, 0.7],
# [0.0, 0.0, 0.6],
# [0.0, 0.0, 0.5]
# ])
#############
# Callbacks #
#############
def estimate_callback(data):
global est_relative_position
est_relative_position = np.array([data.linear.x, data.linear.y, data.linear.z, 0, 0, data.angular.z])
def initiate_landing_callback(data):
global received_start_time
global position_at_start_time
global global_mission
global global_state
received_start_time = rospy.get_time()
position_at_start_time = est_relative_position
global_mission[0] = position_at_start_time[:3]
global_state = S_PRE_LANDING
#######################################
def print_state(state):
rospy.loginfo("State: " + STATE_TEXT[state])
def get_distance(point_a, point_b):
translation = point_b - point_a
distance = np.linalg.norm(translation)
return distance
def is_position_close_to_goal(curr_position, goal, margin):
# rospy.loginfo(str(np.abs(curr_position[:3] - goal)))
return np.all(np.abs(curr_position[:3] - goal) < margin)
def publish_set_point(pub_set_point, set_point):
set_point_msg = Twist()
set_point_msg.linear.x = set_point[0]
set_point_msg.linear.y = set_point[1]
set_point_msg.linear.z = set_point[2]
pub_set_point.publish(set_point_msg)
def main():
global global_state
rospy.init_node('automated_landing', anonymous=True)
rospy.Subscriber('/estimate/dead_reckoning', Twist, estimate_callback)
rospy.Subscriber('/initiate_automated_landing', Empty, initiate_landing_callback)
pub_set_point = rospy.Publisher("/set_point", Twist, queue_size=1)
pub_land = rospy.Publisher("/ardrone/land", Empty, queue_size=10)
set_point_msg = Twist()
rospy.loginfo("Starting automated landing module")
publish_rate = 10 # Hz
mission_speed = 0.4 # m/s
distance_margin = 0.01 # m
distance_speed_reduction_margin = 1.0 # m
# mission_speed = 0.6 # m/s
# distance_margin = 0.01 # m
# distance_speed_reduction_margin = 0.2 # m
margin = np.array([distance_margin]*3)
pre_mission_time = 1 # second(s)
rate = rospy.Rate(publish_rate) # Hz
while not rospy.is_shutdown():
use_cv = True
current_position = est_relative_position
if global_state == S_INIT:
global_state = S_INIT
elif global_state == S_PRE_LANDING:
curr_time = rospy.get_time()
if curr_time - received_start_time > pre_mission_time:
mission_count = 0
prev_major_set_point = global_mission[0]
next_major_set_point = global_mission[1]
next_minor_set_point = next_major_set_point
global_state = S_LANDING
elif global_state == S_LANDING:
# Time to change to next major setpoint
distance_to_target = get_distance(next_minor_set_point, next_major_set_point)
if distance_to_target < distance_margin:
if mission_count == len(global_mission)-1:
mission_count = 0
pub_land.publish(Empty())
global_state = S_LANDED
else:
next_major_set_point = global_mission[mission_count+1]
translation = next_major_set_point - prev_major_set_point
distance = np.linalg.norm(translation)
step_time = distance / mission_speed
num_steps = step_time * publish_rate
step_distance = translation / num_steps
next_minor_set_point = prev_major_set_point
prev_major_set_point = next_major_set_point
publish_set_point(pub_set_point, next_minor_set_point)
mission_count += 1
else:
if distance_to_target < distance_speed_reduction_margin:
speed_reduction = np.maximum(distance_to_target / distance_speed_reduction_margin, 0.1)
else:
speed_reduction = 1.0
next_minor_set_point += step_distance*speed_reduction
publish_set_point(pub_set_point, next_minor_set_point)
elif global_state == S_LANDED:
publish_set_point(pub_set_point, np.zeros(3))
rospy.loginfo("Autonomy disabled")
break
print_state(global_state)
rate.sleep()
if __name__ == '__main__':
main() |
aa = []
bb = []
value = 0
for i in range(0,100):
aa.append(value)
value += 2
for i in range(0, 100):
bb.append(aa[99 - i])
print(f"bb[0]에는 {bb[0]}이, bb[99]에는 {bb[99]}값이 입력됩니다.") |
from django_tablib import Field, ModelDataset
from api.export import PublicMetricDatasetMixin
from ..export import MetricDatasetMixin
from .models import YumYuck
class YumYuckDatasetMixin(object):
recorded = Field(header='recorded')
crop = Field(header='crop')
yum_before = Field(header='yum before')
yuck_before = Field(header='yuck before')
yum_after = Field(header='yum after')
yuck_after = Field(header='yuck after')
class Meta:
model = YumYuck
fields = [
'recorded',
'crop',
'yum_before',
'yuck_before',
'yum_after',
'yuck_after',
]
field_order = (
'recorded',
'crop',
'yum_before',
'yuck_before',
'yum_after',
'yuck_after',
)
class YumYuckDataset(YumYuckDatasetMixin, MetricDatasetMixin, ModelDataset):
pass
class PublicYumYuckDataset(YumYuckDatasetMixin, PublicMetricDatasetMixin,
ModelDataset):
pass
|
# This method takes in a string name and returns a string
# saying goodbye to that name
##### SOLUTION 1
# def goodbye(name):
# # using concatenation
# # return ("Goodbye " + name)
# # using arguments by position
# # return ('Goodbye {personName}'.format(personName=name))
# # using F-Strings - this is similar to JavaScript Template Literals
# return (f'Goodbye {name}')
# print(goodbye("Omolemo"))
##### SOLUTION 2
# def goodbye():
# # Asking the user to enter their name using the input function
# name = input("Please enter your name: ")
# return (f'Goodbye {name}')
# print(goodbye())
|
from csv import reader
counter_t = 0
counter_f = 0
with open(f'media/results/result_HFK_Export_Prosperus_V1_KWVLybf.csv', 'r') as read_obj:
csv_reader = reader(read_obj)
for line in read_obj.readlines():
array = line.split(',')
item = array[2]
if item == 'True':
counter_t += 1
else:
counter_f += 1
print(f'True: {counter_t}')
print(f'False: {counter_f}')
|
#! /usr/bin/python
import sys
import signal
import commands
import os
import time
import string
import logging
import traceback
from Exceptions import *
import Test_utils
def main():
fails=0
utils = Test_utils.Test_utils(sys.argv[0],"test glite-wms-job-submit commmad")
utils.prepare(sys.argv[1:])
logging.info("Test glite-wms-job-submit command")
signal.signal(signal.SIGINT,utils.exit_interrupt)
COMMAND="glite-wms-job-submit"
utils.show_progress("Test 1")
logging.info("Test 1: Check if command %s exists",COMMAND)
utils.info ("Check if command %s exists"%(COMMAND))
utils.run_command ("which %s"%(COMMAND))
# Test --version option
utils.show_progress("Test 2")
try:
logging.info("Test 2: Check --version option")
utils.remove(utils.get_tmp_file())
utils.info ("")
utils.info ("Test --version option")
utils.run_command_continue_on_error ("%s --version >> %s"%(COMMAND,utils.get_tmp_file()))
utils.info ("Check the output command")
version=utils.run_command_continue_on_error("grep \"WMS User Interface version\" %s"%(utils.get_tmp_file()))
utils.info("We are testing %s"%(version))
utils.remove(utils.get_tmp_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test --autm-delegaiton (-a) option
utils.show_progress("Test 3")
try:
logging.info("Test 3: Check --autm-delegation option")
utils.remove(utils.get_tmp_file())
utils.info ("")
utils.info ("Test --autm-delegation option")
utils.run_command_continue_on_error ("%s --autm-delegation %s >> %s"%(COMMAND,utils.get_jdl_file(),utils.get_tmp_file()))
utils.info ("Check the command's output")
logging.info ("Check the command's output")
utils.run_command_continue_on_error("cat %s"%(utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_tmp_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
utils.remove(utils.get_tmp_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test --delegationid option
utils.show_progress("Test 4")
try:
logging.info("Test 4: Check --delegationid option")
Delegation="DelegationTest"
logging.info("Set delegationid to %s",Delegation)
utils.run_command_continue_on_error("glite-wms-job-delegate-proxy -d %s"%(Delegation))
utils.remove(utils.get_tmp_file())
utils.info ("")
utils.info ("Test --delegationid option")
utils.run_command_continue_on_error ("%s --delegationid %s %s >> %s"%(COMMAND,Delegation,utils.get_jdl_file(),utils.get_tmp_file()))
utils.info ("Check the command's output")
logging.info("Check the command's output")
utils.run_command_continue_on_error("cat %s"%(utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_tmp_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
utils.remove(utils.get_tmp_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test --config option
utils.show_progress("Test 5")
try:
logging.info("Test 5: Check --config option")
utils.remove(utils.get_tmp_file())
utils.info ("")
utils.info ("Test --config option")
utils.run_command_continue_on_error ("%s %s --config %s %s >> %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jdl_file(),utils.get_tmp_file()))
utils.info ("Check the output command")
logging.info("Check the command's output")
utils.run_command_continue_on_error("cat %s"%(utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_tmp_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
utils.remove(utils.get_tmp_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --endpoint
utils.show_progress("Test 6")
try:
logging.info("Test 6: Check --endpoint option")
utils.remove(utils.get_tmp_file())
utils.info ("")
utils.info ("Test --endpoint option")
utils.run_command_continue_on_error ("%s %s --endpoint https://%s:7443/glite_wms_wmproxy_server %s >> %s"%(COMMAND,utils.get_delegation_options(),utils.get_WMS(),utils.get_jdl_file(),utils.get_tmp_file()))
utils.info ("Check the connected endpoint")
logging.info("Check the connected endpoint")
endpoint=utils.run_command_continue_on_error("grep \"Connecting to the service\" %s"%(utils.get_tmp_file()))
utils.info("Connected Endpoint: %s"%(endpoint))
logging.info("Connected Endpoint: %s",endpoint)
utils.info ("Check the output command")
logging.info("Check the command's output")
utils.run_command_continue_on_error("cat %s"%(utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_tmp_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
utils.remove(utils.get_tmp_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --output
utils.show_progress("Test 7")
try:
logging.info("Test 7: Check --output option")
utils.info ("")
utils.info ("Test --output option")
utils.run_command_continue_on_error ("%s %s --output %s %s "%(COMMAND,utils.get_delegation_options(),utils.get_output_file(),utils.get_jdl_file()))
utils.info ("Check the output file")
logging.info("Check the output file")
utils.run_command_continue_on_error("cat %s"%(utils.get_output_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_output_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
utils.remove(utils.get_output_file())
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --logfile
utils.show_progress("Test 8")
try:
logging.info("Test 8: Check --logfile option")
utils.info ("")
utils.info ("Test --logfile option")
JOBID=utils.run_command_continue_on_error ("%s %s --nomsg --logfile %s %s "%(COMMAND,utils.get_delegation_options(),utils.get_log_file(),utils.get_jdl_file()))
utils.info ("Check the log file")
logging.info("Check the log file")
utils.run_command_continue_on_error("cat %s"%(utils.get_log_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --nomsg
utils.show_progress("Test 9")
try:
logging.info("Test 9: Check --nomsg option")
utils.info ("")
utils.info ("Test --nomsg option")
JOBID=utils.run_command_continue_on_error ("%s %s --nomsg --config %s %s "%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jdl_file()))
utils.dbg ("Job Id is %s"%(JOBID))
utils.info ("Check the job status")
logging.info("Check the job status")
utils.run_command_continue_on_error ("glite-wms-job-status %s"%(JOBID))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test all options together
utils.show_progress("Test 10")
try:
logging.info("Test 10: Check all options together")
utils.info ("")
utils.info ("Test all options together")
utils.run_command_continue_on_error ("%s %s --noint --logfile %s --output %s --endpoint https://%s:7443/glite_wms_wmproxy_server %s"%(COMMAND,utils.get_delegation_options(),utils.get_log_file(),utils.get_output_file(),utils.get_WMS(),utils.get_jdl_file()))
utils.info ("Check the output file")
logging.info("Check the output file")
utils.run_command_continue_on_error ("cat %s"%(utils.get_output_file()))
utils.info ("Check the log file")
logging.info("Check the log file")
utils.run_command_continue_on_error ("cat %s"%(utils.get_log_file()))
JOBID=utils.run_command_continue_on_error("grep ':9000' %s"%(utils.get_output_file()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
utils.remove ("%s"%(utils.get_log_file()))
utils.remove ("%s"%(utils.get_output_file()))
# Test --input option
utils.show_progress("Test 11")
try:
logging.info("Test 11: Check --input option")
utils.info ("")
utils.info ("Test --input option")
utils.info ("Build CE file")
logging.info("Build CE file")
utils.run_command_continue_on_error ("glite-wms-job-list-match %s --config %s --rank --output %s %s"%(utils.get_delegation_options(),utils.get_config_file(),utils.get_output_file(),utils.get_jdl_file()))
logging.info("Execute the command: grep \"No Computing Element\" %s",utils.get_output_file())
result=commands.getstatusoutput("grep \"No Computing Element\" %s"%(utils.get_output_file()))
if result != 0 :
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error("awk -F ' ' '/:[[:digit:]]*\// {print $2}' %s > %s"%(utils.get_output_file(),utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --noint --input %s --nomsg %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_tmp_file(),utils.get_jdl_file()))
CENAME=utils.get_CE (JOBID)
utils.info ("Check if it has used the right CE")
logging.info("Check if it has used the right CE")
CE=utils.run_command_continue_on_error("head -1 %s"%(utils.get_tmp_file()))
if CE != CENAME :
logging.error("Job has been submitted to wrong CE: %s instead of %s",CENAME,CE)
raise GeneralError("Check destination","Job has been submitted to wrong CE: %s (instead of %s)"%(CENAME,CE))
else:
logging.info("Check success")
utils.info ("Check success")
utils.remove(utils.get_tmp_file())
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
else:
logging.warning("No matching found. TEST SKIPPED")
utils.info ("No matching found. Test skipped.")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
utils.remove (utils.get_tmp_file())
utils.remove (utils.get_output_file())
# Test --resource option
utils.show_progress("Test 12")
try:
logging.info("Test 12: Check --resource option")
utils.info ("")
utils.info ("Test --resource option")
utils.info ("Look for a usable CE")
logging.info("Look for a usable CE")
utils.run_command_continue_on_error ("glite-wms-job-list-match --noint --config %s %s --rank --output %s %s"%(utils.get_config_file(),utils.get_delegation_options(),utils.get_output_file(),utils.get_jdl_file()))
logging.info("Execute command: grep \"No Computing Element\" %s",utils.get_output_file())
result=commands.getstatusoutput("grep \"No Computing Element\" %s"%(utils.get_output_file()))
if result != 0 :
CE_ID=utils.run_command_continue_on_error("awk -F ' ' '/:[[:digit:]]*\// {print $2}' %s | head -1"%(utils.get_output_file()))
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --noint --resource %s --nomsg %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),CE_ID,utils.get_jdl_file()))
CENAME=utils.get_CE(JOBID)
utils.info ("Check if it has used the right CE")
if CE_ID != CENAME :
logging.error("Job has been submitted to the wrong CE: %s (instead of %s)",CENAME,CE_ID)
raise GeneralError ("Check destination ","Job has been submitted to the wrong CE: %s (instead of %s)"%(CENAME,CE_ID))
else:
logging.info("Check success")
utils.info ("Check success")
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
else:
logging.warning("No matching found, TEST SKIPPED")
utils.info ("No matching found. Test skipped.")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
utils.remove (utils.get_output_file())
# Test option --register-only
utils.show_progress("Test 13")
try:
utils.remove (utils.get_tmp_file())
logging.info("Test 13: Check --register-only option")
utils.info ("")
utils.info ("Test --register-only option")
utils.run_command_continue_on_error ("%s %s --config %s --register-only --output %s %s >> %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jobid_file(),utils.get_jdl_file(),utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error ("tail -1 %s"%(utils.get_jobid_file()))
utils.info ("Check if the output of the command is as expected")
logging.info ("Check if the output of the command is as expected")
utils.run_command_continue_on_error ("grep \"\--start %s\" %s"%(JOBID,utils.get_tmp_file()))
utils.remove (utils.get_tmp_file())
utils.info("wait 10 secs ...")
time.sleep(10)
utils.job_status(JOBID)
if utils.get_job_status().find("Submitted")==-1:
logging.warning("The job %s is not in the correct status. It's status is %s",JOBID,utils.get_job_status())
utils.info ("WARNING: The job %s is not in the correct status. It's status is %s ."%(JOBID,utils.get_job_status()))
else:
logging.info("Check success")
utils.info ("Check success")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --start
utils.show_progress("Test 14")
try:
logging.info("Test 14: Check --start option")
utils.info ("")
utils.info ("Test --start option")
utils.run_command_continue_on_error ("%s --start %s"%(COMMAND,JOBID))
utils.wait_until_job_finishes(JOBID)
utils.job_status(JOBID)
if utils.get_job_status().find("Done") == -1 :
logging.warning("The job %s fails. Its final status is %s",JOBID,utils.get_job_status())
utils.info ("WARNING: The job %s fails. Its final status is %s."%(JOBID,utils.get_job_status()))
else:
logging.info("Check success")
utils.info ("Check success")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --transfer-files
utils.show_progress("Test 15")
try:
logging.info("Test 15: Check --transfer-files and --proto options")
utils.info ("")
utils.info ("Test --transfer-files and --proto options")
utils.set_isb_jdl(utils.get_jdl_file())
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --register-only --transfer-files --proto gsiftp --nomsg %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jdl_file()))
utils.job_status(JOBID)
if utils.get_job_status().find("Submitted") ==-1 :
logging.warning("The job %s is not in the correct status. Its status is %s",JOBID,utils.get_job_status())
utils.info ("WARNING: The job %s is not in the correct status. Its status is %s."%(JOBID,utils.get_job_status()))
else:
logging.info("Check success")
utils.info ("Check success")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --start
utils.show_progress("Test 16")
try:
logging.info("Test 16: Check --start option (after using submit command with options --register-only and transfer-files)")
utils.info ("")
utils.info ("Test --start option (after using submit command with options --register-only and transfer-files)")
utils.run_command_continue_on_error ("%s --start %s"%(COMMAND,JOBID))
# ... wait loop with job-status calls
utils.wait_until_job_finishes (JOBID)
utils.info ("Retrieve the output")
logging.info("Retrieve the output")
utils.run_command_continue_on_error ("glite-wms-job-output --noint --nosubdir --dir %s %s"%(utils.get_job_output_dir(),JOBID))
utils.info ("Check the output file")
logging.info("Check the output file")
if os.path.isfile("%s/std.out"%(utils.get_job_output_dir())) and os.path.isfile("%s/std.err"%(utils.get_job_output_dir())):
utils.run_command_continue_on_error ("grep \"example.jdl\" %s/std.out"%(utils.get_job_output_dir()))
else:
logging.error("Job output is not correct")
raise GeneralError("Check output","Job output is not correct")
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
#Test --valid option
utils.show_progress("Test 17")
try:
logging.info("Test 17: Check --valid option")
utils.info ("")
utils.info ("Test --valid option")
# we ask 60 seconds of validity
NOW=time.strftime('%s')
MYEXPIRY=time.strftime('%s', time.localtime(time.time() + 60))
utils.remove(utils.get_tmp_file())
# we need a jdl which doesn't match
utils.set_requirements("\"false\"")
# ... submit a jdl valid for max 1 minute from NOW
JOBID=utils.run_command_continue_on_error("%s %s --config %s --nomsg --valid 00:01 %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jdl_file()))
#wait for job submission
utils.info("Wait 30 secs ...")
time.sleep(30)
# Check the status
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
STATUS=utils.run_command_continue_on_error ("grep -m 1 \'Current Status\' %s | awk -F: \'{print $2}\'"%(utils.get_tmp_file()))
STATUS = string.strip(STATUS)
if STATUS.find("Waiting") != -1 :
utils.run_command_continue_on_error ("grep \"BrokerHelper: no compatible resources\" %s"%(utils.get_tmp_file()))
logging.info("Job doesn;t match as expected")
utils.info ("Job doesn't match as expected")
else:
logging.error("Job is in a wrong state: %s",STATUS)
raise GeneralError ("Check job state","Job is in a wrong state: %s"%(STATUS))
# Check the jdl
utils.info ("Check the job's jdl")
logging.info("Check the job's jdl")
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-info --noint -j --output %s %s"%(utils.get_tmp_file(),JOBID))
JDLEXPIRY = utils.run_command_continue_on_error ("grep ExpiryTime %s | awk -F= '{print $2}' | sed -e \"s/;//\""%(utils.get_tmp_file()))
## MYEXPIRY and JDLEXPIRY should be equal
if int(MYEXPIRY) < int(JDLEXPIRY) :
logging.error("Expiry time has not be set correctly! (%s != %s)",MYEXPIRY,JDLEXPIRY)
raise GeneralError ("Check expriry time","Expiry time has not be set correctly! (%s != %s)"%(MYEXPIRY,JDLEXPIRY))
else:
logging.info("Attribute ExpiryTime has been correctly set in jdl")
utils.info ("Attribute ExpiryTime has been correctly set in jdl")
# wait until expiration
utils.info("Wait 30 secs ...")
time.sleep (30)
utils.info ("Wait until job aborts... this will take some minutes..")
utils.wait_until_job_finishes (JOBID)
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
STATUS=utils.run_command_continue_on_error ("grep -m 1 \'Current Status\' %s | awk -F: \'{print $2}\'"%(utils.get_tmp_file()))
STATUS = string.strip(STATUS)
if STATUS.find("Aborted") != -1 :
utils.run_command_continue_on_error ("grep \"request expired\" %s"%(utils.get_tmp_file()))
logging.info("Job correctly aborts")
utils.info ("Job correctly aborts")
else:
logging.error("Job is in a wrong state: %s",STATUS)
raise GeneralError("Check job state","Job is in a wrong state: %s"%(STATUS))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
utils.remove(utils.get_tmp_file())
# Test option --to
utils.show_progress("Test 18")
try:
logging.info("Test 18: Check --to option")
utils.info ("")
utils.info ("Test --to option")
# ... make a timestamp which is 1 minute (max) from now
currenttime=time.time()
NOW = time.strftime('%H:%M',time.localtime(currenttime+60))
NOW_EPOCH=time.strftime("%Y-%m-%d %H:%M:00",time.localtime(currenttime+60))
# we need a jdl which doesn't match
utils.set_requirements ("\"false\"")
#.. submit a jdl valid for max 1 minute from NOW
JOBID=utils.run_command_continue_on_error ("%s %s --nomsg --config %s --to %s %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),NOW,utils.get_jdl_file()))
# wait for job submission
utils.info("Wait 30 secs ...")
time.sleep (30)
# Check the status
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
STATUS=utils.run_command_continue_on_error("grep -m 1 \'Current Status\' %s | awk -F: \'{print $2}\'"%(utils.get_tmp_file()))
STATUS = string.strip(STATUS)
if STATUS.find("Waiting") != -1 :
utils.run_command_continue_on_error ("grep \"BrokerHelper: no compatible resources\" %s"%(utils.get_tmp_file()))
else:
logging.error("Job is in a wrong state: %s",STATUS)
raise GeneralError("","Job is in a wrong state: %s"%(STATUS))
utils.info ("Check the job's jdl")
logging.info ("Check the job's jdl")
utils.run_command_continue_on_error ("glite-wms-job-info --noint -j --output %s %s"%(utils.get_tmp_file(),JOBID))
JDLEXPIRY=utils.run_command_continue_on_error ("grep ExpiryTime %s | awk -F= '{print $2}' | sed -e \"s/;//\" "%(utils.get_tmp_file()))
JDLEXPIRY=string.strip(JDLEXPIRY)
MYEXPIRY=int(time.mktime(time.strptime(NOW_EPOCH, '%Y-%m-%d %H:%M:%S')))
if MYEXPIRY != int(JDLEXPIRY) :
logging.error("Expiry time has not been set correctly! (%s != %s)",MYEXPIRY,JDLEXPIRY)
raise GeneralError ("Check expiry time","Expiry time has not been set correctly! ( %s != %s )"%(MYEXPIRY,JDLEXPIRY))
else:
logging.info ("Attribute ExpiryTime has been correctly set in jdl")
utils.info ("Attribute ExpiryTime has been correctly set in jdl")
# wait until expiration
utils.info("Wait 30 secs...")
time.sleep (30)
utils.info ("Wait until job aborts... this will take some minutes..")
utils.wait_until_job_finishes (JOBID)
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
STATUS=utils.run_command_continue_on_error ("grep -m 1 \'Current Status\' %s | awk -F: \'{print $2}\'"%(utils.get_tmp_file()))
STATUS = string.strip(STATUS)
if STATUS.find("Aborted") != -1 :
utils.run_command_continue_on_error ("grep \"request expired\" %s"%(utils.get_tmp_file()))
utils.info ("Job correctly aborts")
logging.info("Job correctly aborts")
else:
logging.error("Job is in a wrong state %s",STATUS)
raise GeneralError("Check job state","Job is in a wrong state: %s"%(STATUS))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --default-jdl
utils.show_progress("Test 19")
try:
logging.info("Test 19: Check --default-jdl option")
utils.info ("")
utils.info ("Test --default-jdl option")
utils.remove(utils.get_tmp_file())
# ... make a test default JDL file
os.system("echo \"Attribute = 'Test default jdl';\" > %s"%(utils.get_tmp_file()))
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --nomsg --default-jdl %s %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_tmp_file(),utils.get_jdl_file()))
utils.dbg ("Job ID is %s"%(JOBID))
utils.info ("Check the jdl")
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-info -j %s >> %s"%(JOBID,utils.get_tmp_file()))
utils.run_command_continue_on_error ("grep \"Attribute = 'Test default jdl';\" %s"%(utils.get_tmp_file()))
utils.remove(utils.get_tmp_file())
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --json
utils.show_progress("Test 20")
try:
logging.info("Test 20: Check --json option")
utils.info ("")
utils.info ("Test --json option")
utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("%s %s --config %s --json %s >> %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_jdl_file(),utils.get_tmp_file()))
utils.dbg ("Get job ID")
message=utils.run_command_continue_on_error("awk '{print $4}' %s"%(utils.get_tmp_file()))
message=message.split("\"")
JOBID=message[1]
utils.info("Check job status")
#utils.remove(utils.get_tmp_file())
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
utils.info ("Check command's output")
logging.info("Check command's output")
utils.run_command_continue_on_error ("cat %s"%(utils.get_tmp_file()))
utils.remove(utils.get_tmp_file())
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error ("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --collection
utils.show_progress("Test 21")
try:
logging.info("Test 21: Check --collection option")
utils.info ("")
utils.info ("Test --collection option")
utils.remove(utils.get_tmp_file())
# create 3 jdl files based on basic jdl file
logging.info("Create collection's jdl files")
os.mkdir("%s/collection_jdls"%(utils.get_tmp_dir()))
os.system("cp %s %s/collection_jdls/1.jdl"%(utils.get_jdl_file(),utils.get_tmp_dir()))
os.system("cp %s %s/collection_jdls/2.jdl"%(utils.get_jdl_file(),utils.get_tmp_dir()))
os.system("cp %s %s/collection_jdls/3.jdl"%(utils.get_jdl_file(),utils.get_tmp_dir()))
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --nomsg --collection %s/collection_jdls"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),utils.get_tmp_dir()))
utils.dbg ("Job ID is %s"%(JOBID))
utils.info ("Check the collection status")
logging.info("Check the collection status")
utils.run_command_continue_on_error ("glite-wms-job-status %s >> %s"%(JOBID,utils.get_tmp_file()))
utils.info ("Check command's output")
logging.info("Check command's output")
utils.run_command_continue_on_error ("cat %s"%(utils.get_tmp_file()))
utils.remove(utils.get_tmp_file())
os.system("rm -rf %s/collection_jdls/"%(utils.get_tmp_dir()))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error ("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
# Test option --nodes-resource
utils.show_progress("Test 22")
try:
logging.info("Test 22: Check --nodes-resource option")
utils.info ("")
utils.info ("Test --nodes-resource option")
utils.info ("Look for a usable CE")
utils.set_jdl("%s/list.jdl"%(utils.get_tmp_dir()))
FILE=open("%s/list.jdl"%(utils.get_tmp_dir()),"a")
FILE.write("Requirements=RegExp(\"2119/jobmanager\",other.GlueCEUniqueID);\n")
FILE.close()
output=utils.run_command_continue_on_error ("glite-wms-job-list-match --noint --config %s %s %s/list.jdl"%(utils.get_config_file(),utils.get_delegation_options(),utils.get_tmp_dir()))
if output.find("No Computing Element")!=-1:
logging.warning("No matching CE found. TEST SKIPPED")
utils.info ("No matching CE found. Test skipped.")
utils.show_critical("No matching CE found. TEST SKIPPED")
else:
for line in output.split("\n"):
if line.find(" - ")!=-1:
CE_ID=line.split(" - ")[1].strip()
break
if utils.has_external_jdl() == 0 :
utils.set_dag_jdl(utils.get_jdl_file())
JOBID=utils.run_command_continue_on_error ("%s %s --config %s --nomsg --nodes-resource %s %s"%(COMMAND,utils.get_delegation_options(),utils.get_config_file(),CE_ID,utils.get_jdl_file()))
CENAME=utils.get_dag_CE(JOBID)
utils.info ("Check if it has used the right CE")
logging.info("Check if it has used the right CE")
if CE_ID != CENAME :
logging.error("Job has been submitted to the wrong CE: %s (instead of %s)",CENAME,CE_ID)
raise GeneralError("Check destination","Job has been submitted to the wrong CE: %s (instead of %s)"%(CENAME,CE_ID))
else:
logging.info("Check success (%s)",CE_ID)
utils.info ("Check success (%s)"%(CE_ID))
logging.info ("Cancel submitted job with id: %s",JOBID)
utils.run_command_continue_on_error("glite-wms-job-cancel --noint %s"%(JOBID))
except (RunCommandError,GeneralError,TimeOutError) , e :
fails=fails+1
utils.log_error("%s"%(utils.get_current_test()))
utils.log_error("Command: %s"%(e.expression))
utils.log_error("Message: %s"%(e.message))
utils.log_traceback("%s"%(utils.get_current_test()))
utils.log_traceback(traceback.format_exc())
utils.remove (utils.get_output_file())
if fails > 0 :
utils.exit_failure("%s test(s) fail(s)"%(fails))
else:
utils.exit_success()
if __name__ == "__main__":
main()
|
# Generated by Django 2.1.7 on 2019-03-20 06:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('homedetail', '0011_auto_20190316_2138'),
('payment', '0003_auto_20190319_2151'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='RoomId',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='homedetail.Room'),
),
]
|
# Generated by Django 3.0.7 on 2020-06-24 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0008_auto_20200624_2251'),
]
operations = [
migrations.AlterField(
model_name='article',
name='field_name',
field=models.ManyToManyField(related_name='field_name', through='articles.ArticleField', to='articles.FieldScope'),
),
]
|
import threading
import time
import ex
import servo
import Motor_contorol
global command
import Camera
def e():
while True:
ex.main()
def s():
servo.main()
def mc():
Motor_contorol
def c():
Camera.get_image()
def command(co):
while True:
co[0]=input()
if __name__=="__main__":
#th=threading.Thread(target=command,name="th",args=(command,))
# th.start()
while True:
command=input()
if command=='srf02':
th1=threading.Thread(target=e,name="th1",args=())
th1.start()
elif command=='servo':
th2=threading.Thread(target=s,name="th2",args=())
th2.start()
elif command=='motor_control':
th3=threading.Thread(target=mc,name="th3",args=())
th3.start()
elif command=='camera':
th4=threading.Thread(target=c,name="th4",args=())
th4.start()
elif command=='e':
threading.Event().set()
break
print("a")
|
#!/usr/bin/env python
# Copyright 2021 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from jupyter_client.kernelapp import KernelApp
argv = sys.argv[1:]
parser = argparse.ArgumentParser("hydra-subkernel")
parser.add_argument("--log-file", help="Output log to file")
args, argv = parser.parse_known_args(argv)
if args.log_file:
sys.stderr = sys.stdout = open(args.log_file, "w")
KernelApp.launch_instance(argv)
|
r"""http://codeforces.com/contest/474/problem/A
"""
import fileinput
inp = fileinput.input()
kb = r'''qwertyuiopasdfghjkl;zxcvbnm,./'''
def solve():
direction = {'R': -1, 'L': 1}[next(inp).strip()]
keys = next(inp).strip()
sol = ''.join([kb[kb.find(k) + direction] for k in keys])
print(sol)
if __name__ == '__main__':
solve()
|
import os
import csv
import random
import re
f_train = open("train_ori.csv", "r", encoding='utf-8')
freader = csv.reader(f_train, dialect='excel')
train_header = next(freader)
out_data = []
for i, rowlist in enumerate(freader):
out_data.append([re.sub(r"[^a-zA-Z0-9?.!,']+", " ", rowlist[-2]).lower(), rowlist[-1]])
print(out_data[0])
print(out_data[1])
print(out_data[2])
random.shuffle(out_data)
f_out = open("train.csv", 'w', newline='', encoding='utf-8')
freader = csv.writer(f_out, dialect='excel-tab')
freader.writerows(out_data[int(len(out_data)*0.8):])
f_out = open("dev.csv", 'w', newline='', encoding='utf-8')
freader = csv.writer(f_out, dialect='excel-tab')
freader.writerows(out_data[:int(len(out_data)*0.8)])
|
#!/usr/bin/python3
"""
Michael duPont - sumvals.py
Example file using the begin(s) library
Usage: python3 sumvals.py [-h] [other args...]
Demo example: invoke makerand | cut -f1 -d- | xargs python3 sumvals.py
"""
#library
import begin
@begin.start
#Parameter descriptions read from annotations
#begin.start can only be used with one function per file
def main(*vals: 'Values to be summed',
setabs: 'Sums absolute values'=False):
"""Sums an arbitrary number of integer arguments
"""
if setabs:
vals = [abs(int(val)) for val in vals]
else:
vals = [int(val) for val in vals]
print(sum(vals))
|
# open("file pass", opening mode like "r", buffering, encoding, errors)
# Режимы отркывания:
# "r" = открыть только для чтения
# "w" = открыть для записи, если файл не существует то он его создаст, все что было в файле удалится
# "x" = создание эксклюзивного файла, если файл существует то выдасться ошибка
# "a" = добавляет запись в конец файла "appends"
# "b" = запускает в бинарном режиме
# "t" = текстовый режим, только чтение
# "w+" и "r+" = работают также но не удаляют все содержиммое, и в том и в том можно производить и чтение и запись
# "r+b" и "w+b" = работают также но в бинарном режиме
# Режимы буферизации:
# 0 = выключает буферизацию, только в бинарном режимеonly when writing.
# 1 = линейная буферизация, только в текстовом режиме
# -1 = стандартная буферизация
# >1 = фиксированный размер блока буферизации
# Encoding имя кодировки, по дефолту используеться кодировка вашего ПК
# error attributes:
# 'strict' = raises a ValueError if wrong encoding
# 'ignore' = ignore errors
# 'replace' = replaces damaged data with '?'
# 'surrogateescape' = represents incorrect bytes as code points, then turning them to correct. Useful for unknown
# encode
# 'xmlcharrefreplace' = only when writing. Chars what not supported in enconding will be replaced with XML chars
# 'backslashreplace' = replaced damaged data with python backslashed escape sequences
# 'namereplace' = only when writing. Replaces unsupported chars with \n escape sequences
handle = open("AdditionalFiles/test.txt", "r")
data = handle.read()
print(data)
handle.close()
handle = open("AdditionalFiles/test.txt", "r")
data = handle.readline()
print(data)
data = handle.readlines()
print(data)
handle.close()
|
#programmer : Prasath Ram R
new_cipher = input()
replacement = "PER"
replacementIndex = 0
count = 0
for index in range(len(new_cipher)):
if(cipher[index] != replacement[replacementIndex]):
count+=1
replacementIndex = (replacementIndex + 1) % 3
print(count)
|
# Generated by Django 3.1.7 on 2021-06-22 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hospital_app', '0004_auto_20210617_1613'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_Name', models.CharField(max_length=50)),
('Password', models.CharField(max_length=50)),
('User_Type', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='clinic',
name='Password',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='doctor',
name='Password',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='patient',
name='Password',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
|
import pygame
pygame.init() # 초기화
# 화면크기 설정
screen_width = 480 # 가로
screen_hight = 640 # 세로
screen = pygame.display.set_mode((screen_width, screen_hight))
# 화면 타이틀
pygame.display.set_caption("GS game")
# 이벤트 루프
running = True
while running:
for event in pygame.event.get(): # 어떤 이벤트가 발생하는가?
if event.type == pygame.QUIT: # 종료 이벤트이면?
running = False # running 종료 설정
pygame.quit() # 종료 |
#!/usr/bin/python
# coding: utf8
import os
import json
import datetime
import time
import smtplib
import subprocess
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import Encoders
from openpyxl import Workbook
from openpyxl.styles import colors, Font, Border, Side
DATABASE_FILE_NAME = '/home/lax/PycharmProjects/learn1/database4'
ADD_DATABASE_FILE_NAME = '/home/lax/PycharmProjects/learn1/add_database4'
EXCEL_TABLE_NAME = '/home/lax/PycharmProjects/learn1/new method test.xlsx'
SENDER_EADRESS = '***@gmail.com'
SENDER_EPASSWORD = '***'
RECEIVER_EADRESS = '***@gmail.com'
DATETIME_TEMPLATE = '%Y-%d-%m %H:%M:%S'
def get_sysresinfo(command):
""" Executes console command and returns data as list """
command_data = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
command_data, error_data = command_data.communicate()
return command_data.replace(',', '.').split()
def get_cpu_info():
cpu_console_data = get_sysresinfo('mpstat')
cpu_user = float(cpu_console_data[21])
cpu_sys = sum(float(x) for x in cpu_console_data[22:29])
cpu_total = cpu_user + cpu_sys
cpu_idle = float(cpu_console_data[30])
return {'cpu_user': cpu_user, 'cpu_sys': cpu_sys, 'cpu_total': cpu_total, 'cpu_idle': cpu_idle}
def get_mem_info():
mem_console_data = get_sysresinfo('free -m')
mem_total, mem_used = int(mem_console_data[7]), int(mem_console_data[8])
mem_free, mem_cached = int(mem_console_data[9]), int(mem_console_data[12])
return {'mem_total': mem_total, 'mem_used': mem_used, 'mem_free': mem_free, 'mem_cached': mem_cached}
def get_hdd_info():
hdd_console_data = get_sysresinfo('df -m --total')
hdd_total, hdd_used = int(hdd_console_data[50]), int(hdd_console_data[51])
hdd_free = int(hdd_console_data[52])
return {'hdd_total': hdd_total, 'hdd_used': hdd_used, 'hdd_free': hdd_free}
###############################################################################################################
class SysinfoDatabase(object):
""" Current database structure
{timestamp (in seconds): {
'cpu_user': 45.33,
'cpu_sys': 17.22,
'cpu_total': 60.11,
...
'hdd_free':5952
}
} """
def __init__(self, db_file_name, datetime_format):
self.db_file_name = db_file_name
self.datetime_format = datetime_format
if not os.path.isfile(self.db_file_name):
self.sysinfo_database = {}
with open(self.db_file_name, 'w') as self.database_file:
self.database_file.write(json.dumps(self.sysinfo_database))
else:
with open(self.db_file_name, 'r') as self.database_file:
self.sysinfo_database = self.database_file.read().strip()
self.sysinfo_database = json.loads(self.sysinfo_database)
self.db_index_timestamps = self.sysinfo_database.keys()
self.db_index_timestamps.sort(reverse=True)
self.sysinfo_keywords = ['cpu_user', 'cpu_sys', 'cpu_total', 'cpu_idle', 'mem_total', 'mem_used',
'mem_free', 'mem_cached', 'hdd_total', 'hdd_used', 'hdd_free']
def get_last_record_hour(self): # Returns hour of last record in database
return datetime.datetime.fromtimestamp(float(self.db_index_timestamps[0])).hour
def select(self, start=None, end=None, hour_periods_limit=12): # Select from database
select_result = {}
if not self.sysinfo_database:
return select_result
if start is None:
start = self.db_index_timestamps[0]
else:
start = str(time.mktime(datetime.datetime.strptime(start, self.datetime_format).timetuple()))
if end is None:
end = self.db_index_timestamps[len(self.db_index_timestamps) - 1]
else:
end = str(time.mktime(datetime.datetime.strptime(end, self.datetime_format).timetuple()))
if hour_periods_limit < 0:
hour_periods_limit = 12
current_period_timestamp = []
periods_in_select_result = 0
for index_timestamp in self.db_index_timestamps:
if start >= index_timestamp >= end:
index_timestamp_asobject = datetime.datetime.fromtimestamp(float(index_timestamp))
if current_period_timestamp != index_timestamp_asobject.strftime('%Y,%m,%d,%H'):
current_period_timestamp = index_timestamp_asobject.strftime('%Y,%m,%d,%H')
periods_in_select_result += 1
select_result[index_timestamp_asobject] = self.sysinfo_database[index_timestamp]
if periods_in_select_result >= hour_periods_limit:
break
else:
if index_timestamp < end:
break
return select_result
def average(self, input_data, groupbyhour=True):
input_data_timestamps = input_data.keys()
input_data_timestamps.sort(reverse=True)
avg_period_result = {'cpu_user': 0, 'cpu_sys': 0, 'cpu_total': 0, 'cpu_idle': 0,
'mem_total': 0, 'mem_used': 0, 'mem_free': 0, 'mem_cached': 0,
'hdd_total': 0, 'hdd_used': 0, 'hdd_free': 0
}
average_result = {}
periods_in_result = 0
averaged_in_period = 0
period_timestamp = None
for in_data_timestamp in input_data_timestamps:
if not period_timestamp:
period_timestamp = in_data_timestamp.strftime('%Y,%m,%d,%H')
if groupbyhour:
if period_timestamp != in_data_timestamp.strftime('%Y,%m,%d,%H'):
for key in self.sysinfo_keywords:
avg_period_result[key] /= averaged_in_period
average_result[datetime.datetime.strptime(period_timestamp, '%Y,%m,%d,%H')] = avg_period_result
periods_in_result += 1
averaged_in_period = 0
avg_period_result = {'cpu_user': 0, 'cpu_sys': 0, 'cpu_total': 0, 'cpu_idle': 0,
'mem_total': 0, 'mem_used': 0, 'mem_free': 0, 'mem_cached': 0,
'hdd_total': 0, 'hdd_used': 0, 'hdd_free': 0
}
period_timestamp = in_data_timestamp.strftime('%Y,%m,%d,%H')
single_db_record = input_data[in_data_timestamp]
for key in self.sysinfo_keywords:
avg_period_result[key] += single_db_record[key]
averaged_in_period += 1
if averaged_in_period != 0:
for key in self.sysinfo_keywords:
avg_period_result[key] /= averaged_in_period
average_result[datetime.datetime.strptime(period_timestamp, '%Y,%m,%d,%H')] = avg_period_result
periods_in_result += 1
return average_result, periods_in_result
def new_record(self, timestamp, data): # Adding new record into database
self.sysinfo_database[time.mktime(timestamp.timetuple())] = data
with open(self.db_file_name, 'w') as self.database_file:
self.database_file.write(json.dumps(self.sysinfo_database))
self.db_index_timestamps = self.sysinfo_database.keys()
self.db_index_timestamps.sort(reverse=True)
def erase(self): # Database full erase
self.sysinfo_database = {}
with open(self.db_file_name, 'w') as self.database_file:
self.database_file.write(json.dumps(self.sysinfo_database))
def clean(self, size_limit=500): # Cleans database from old records (default is 500 record limit)
database_size = len(self.db_index_timestamps)
while database_size > size_limit:
del self.sysinfo_database[self.db_index_timestamps[database_size - 1]]
database_size -= 1
with open(self.db_file_name, 'w') as self.database_file:
self.database_file.write(json.dumps(self.sysinfo_database))
self.db_index_timestamps = self.sysinfo_database.keys()
self.db_index_timestamps.sort(reverse=True)
###################################################################################################################
def start_html_table():
return """
<html>
<head></head>
<body>
<h1>System hour usage stats</h1>
<table border = '1'>
"""
def extend_html_table(e_html_table, data, timestamp):
e_html_table += """
<tr>
<td>Averaging period {11} {12}:00 - {12}:59</td>
</tr>
<tr>
<td>CPU</td>
<td>total:{0:.2f}</td>
<td>user:{1:.2f}</td>
<td>system:{2:.2f}</td>
<td>idle:{3:.2f}</td>
</tr>
<tr>
<td>Memory</td>
<td>total:{4:d}</td>
<td>used:{5:d}</td>
<td>free:{6:d}</td>
<td>cached:{7:d}</td>
</tr>
<tr>
<td>Hard disk drive</td>
<td>total:{8:d}</td>
<td>used:{9:d}</td>
<td>free:{10:d}</td>
</tr>
""".format(data['cpu_total'], data['cpu_user'], data['cpu_sys'],
data['cpu_idle'], data['mem_total'], data['mem_used'],
data['mem_free'], data['mem_cached'], data['hdd_total'],
data['hdd_used'], data['hdd_free'], timestamp.date(),
timestamp.hour)
return e_html_table
def end_html_table(e_html_table):
return e_html_table + """
</tr>
</table>
</body>
</html>
"""
######################################################################################################################
class ExcelTable(object):
def __init__(self):
self.new_workbook = Workbook()
self.new_worksheet = self.new_workbook.active
self.black_font = Font(color=colors.BLACK)
self.blue_font = Font(color=colors.BLUE)
self.red_font = Font(color=colors.RED)
self.border_allthin = Border(left=Side(border_style='thin', color=colors.BLACK),
right=Side(border_style='thin', color=colors.BLACK),
bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.border_LTthik = Border(left=Side(border_style='thick', color=colors.BLACK),
bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thick', color=colors.BLACK))
self.border_Tthik = Border(bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thick', color=colors.BLACK))
self.border_TRthik = Border(right=Side(border_style='thick', color=colors.BLACK),
bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thick', color=colors.BLACK))
self.border_Lthik = Border(left=Side(border_style='thick', color=colors.BLACK),
right=Side(border_style='thin', color=colors.BLACK),
bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.border_Rthik = Border(left=Side(border_style='thin', color=colors.BLACK),
right=Side(border_style='thick', color=colors.BLACK),
bottom=Side(border_style='thin', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.border_LBthik = Border(left=Side(border_style='thick', color=colors.BLACK),
right=Side(border_style='thin', color=colors.BLACK),
bottom=Side(border_style='thick', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.border_Bthik = Border(left=Side(border_style='thin', color=colors.BLACK),
right=Side(border_style='thin', color=colors.BLACK),
bottom=Side(border_style='thick', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.border_BRthik = Border(left=Side(border_style='thin', color=colors.BLACK),
right=Side(border_style='thick', color=colors.BLACK),
bottom=Side(border_style='thick', color=colors.BLACK),
top=Side(border_style='thin', color=colors.BLACK))
self.active_cell = self.new_worksheet.cell(row=2, column=2)
self.active_cell.value = 'System 12 hour usage stats'
self.active_cell.font = Font(color=colors.GREEN, italic=True, size=16)
self.table_row_index = 2
self.toprow_border_style = [self.border_LTthik, self.border_Tthik, self.border_Tthik,
self.border_Tthik, self.border_TRthik]
self.middlerow_border_style = [self.border_Lthik, self.border_allthin, self.border_allthin,
self.border_allthin, self.border_Rthik]
self.botmrow_border_style = [self.border_LBthik, self.border_Bthik, self.border_Bthik,
self.border_Bthik, self.border_BRthik]
self.row1_data = []
self.row2_data = []
self.row3_data = []
self.row4_data = []
def table_data_update(self, data, timestamp):
self.row1_data = ['Averaging period %s %s:00 - %s:59' % (timestamp.date(), timestamp.hour, timestamp.hour),
'', '', '', '']
self.row2_data = ['CPU', 'total: %.2f' % (data['cpu_total']), 'user: %.2f' % (data['cpu_user']), 'system: %.2f'
% (data['cpu_sys']), 'idle: %.2f' % (data['cpu_idle'])]
self.row3_data = ['Memory', 'total: %d' % (data['mem_total']), 'used: %d'
% (data['mem_used']), 'free: %d' % (data['mem_free']), 'cached: %d' % (data['mem_cached'])]
self.row4_data = ['Hard disk drive', 'total: %d' % (data['hdd_total']), 'used: %d'
% (data['hdd_used']), 'free: %d' % (data['hdd_free']), '']
def extend_helper(self, exthe_row_data, exthe_border_style, exthe_first_col_font):
self.table_row_index += 1
for x in range(0, 5):
self.active_cell = self.new_worksheet.cell(row=self.table_row_index, column=x+2)
self.active_cell.value = exthe_row_data[x]
self.active_cell.border = exthe_border_style[x]
if x == 0:
self.active_cell.font = exthe_first_col_font
else:
self.active_cell.font = self.black_font
def extend(self, data, timestamp):
self.table_data_update(data, timestamp) # Update table variables
self.extend_helper(self.row1_data, self.toprow_border_style, self.blue_font)
self.extend_helper(self.row2_data, self.middlerow_border_style, self.red_font)
self.extend_helper(self.row3_data, self.middlerow_border_style, self.red_font)
self.extend_helper(self.row4_data, self.botmrow_border_style, self.red_font)
def save(self, filename):
self.new_worksheet.row_dimensions[2].height = 20
self.new_worksheet.column_dimensions['A'].width = 5
self.new_worksheet.column_dimensions['B'].width = 18
self.new_worksheet.column_dimensions['C'].width = 15
self.new_worksheet.column_dimensions['D'].width = 15
self.new_worksheet.column_dimensions['E'].width = 15
self.new_worksheet.column_dimensions['F'].width = 15
self.new_workbook.save(filename)
#######################################################################################################################
def send_email(attach_table, excel_file, sender_adress, sender_pass, receiver_adress):
mail = MIMEMultipart()
mail['Subject'] = 'Test message new select'
mail['From'] = 'Python interpreter'
mail['To'] = 'To Lax-T'
em_table_part = MIMEText(attach_table, 'html')
if excel_file is not None:
em_excel_file = MIMEBase('application', 'octet-stream')
with open(excel_file, 'rb') as ef:
em_excel_file.set_payload(ef.read())
Encoders.encode_base64(em_excel_file)
em_excel_file.add_header('Content-Disposition', 'attachment', filename='stats.xlsx')
mail.attach(em_excel_file)
mail.attach(em_table_part)
em_client = smtplib.SMTP_SSL('smtp.gmail.com', '465')
em_client.ehlo()
em_client.login(sender_adress, sender_pass) # password deleted
em_client.sendmail(sender_adress, receiver_adress, mail.as_string()) # e-mail deleted
em_client.close()
######################################################################################################################
def load_additionad_database(adb_file_name):
if os.path.isfile(adb_file_name): # Check if additional database exists
with open(adb_file_name, 'r') as a_database_file:
a_database = a_database_file.read().strip()
a_database = json.loads(a_database)
return a_database
else:
with open(adb_file_name, 'w') as a_database_file:
a_database = {'last_em_send_hour': 24,
'emails_sent': 0}
a_database_file.write(json.dumps(a_database))
return a_database
def update_additional_database(adb_file_name, a_database):
with open(adb_file_name, 'w') as a_database_file:
a_database_file.write(json.dumps(a_database))
######################################################################################################################
if __name__ == '__main__':
system_usage_info = dict(get_cpu_info().items() + get_mem_info().items() + get_hdd_info().items())
system_time = datetime.datetime.now()
systeminfo_database = SysinfoDatabase(DATABASE_FILE_NAME, DATETIME_TEMPLATE)
select_result = systeminfo_database.select()
averaging_result, periods_in_avg_result = systeminfo_database.average(select_result)
additional_database = load_additionad_database(ADD_DATABASE_FILE_NAME)
last_em_send_hour = additional_database['last_em_send_hour']
emails_sent = additional_database['emails_sent']
current_system_hour = system_time.hour
html_table = start_html_table()
if last_em_send_hour != current_system_hour+1 and periods_in_avg_result >= 1: # check if averaging period changed and need to send email
avg_res_timestamps = averaging_result.keys()
avg_res_timestamps.sort(reverse=True)
for index, timestamp_key in enumerate(avg_res_timestamps): # 5 - table size limit
html_table = extend_html_table(html_table, averaging_result[timestamp_key], timestamp_key)
if index >= 4:
break
html_table = end_html_table(html_table)
if emails_sent >= 11: # 11 - is to include excel table in every 12th email (every 12 hours)
new_excel_table = ExcelTable()
for index, timestamp_key in enumerate(avg_res_timestamps): # 12 - table size limit
new_excel_table.extend(averaging_result[timestamp_key], timestamp_key)
if index >= 11:
break
new_excel_table.save(EXCEL_TABLE_NAME)
send_email(html_table, EXCEL_TABLE_NAME, SENDER_EADRESS, SENDER_EPASSWORD, RECEIVER_EADRESS)
emails_sent = 0
else:
send_email(html_table, None, SENDER_EADRESS, SENDER_EPASSWORD, RECEIVER_EADRESS)
emails_sent += 1
additional_database['emails_sent'] = emails_sent
additional_database['last_em_send_hour'] = current_system_hour
update_additional_database(ADD_DATABASE_FILE_NAME, additional_database)
systeminfo_database.new_record(system_time, system_usage_info)
systeminfo_database.clean()
|
#!/usr/bin/python3
#tests camera input for the presence of a QR code
#3 QR codes need to be present in the image
#if the final stitched to the concatenated QR-codes
#an output is updated to be displayed
#works with FlaskScope
from PIL import Image
from time import sleep
from pyzbar.pyzbar import decode
#import picamera
import blinkt
import os
import time
#import numpy as np
def show_color_line(r,g,b,t):
blinkt.set_brightness(0.5)
for i in range(0,8):
blinkt.set_pixel(i, r, g, b)
blinkt.show()
time.sleep(t)
def show_color(r,g,b):
blinkt.set_brightness(0.5)
for i in range(0,8):
blinkt.set_pixel(i, r, g, b)
blinkt.show()
#current information is found here:
current_file = "/media/ramdisk/seq_current.txt"
QR2file={}
code_list=[]
#read config file - links the final sequence code to the file
config_file = "/media/ramdisk/seq_config.txt"
cf = open(config_file,'r')
cf_lines = cf.readlines()
cf.close()
for line in cf_lines:
line = line.replace('\n', '')
parts = line.split("\t")
QR2file[parts[1]] = parts[0]
#record all QR codes to check if a legible code was given
code_list.append(parts[1])
old_id = ""
#initialise QR code container
QR_id = ""
old_QR_id = QR_id
cont = 1
#container for the final code
final_code = ''
#do infinite cycle
while cont == 1:
#capture image - uses raspbians raspistill for this task
os.system('raspistill -w 640 -h 480 -t 1 -o /media/ramdisk/qr_image.png > /media/ramdisk/qr_image.out')
#decode all QR codes in the image
qrcodes = decode(Image.open('/media/ramdisk/qr_image.png'))
#check if any detected code is registered for a task
#if more than 1 valid code is detected - the last analysed one is used
if len(qrcodes) == 3:
final_code_list = []
for qrcode in qrcodes:
code=qrcode.data.decode("utf-8")
final_code_list.append(code)
#sort the QR codes to make sure the
#filename is concatenated correctly
final_code_list.sort()
final_code = final_code_list[0] + final_code_list[1] + final_code_list[2]
#indicate that three QR codes were detected
show_color(255,255,0)
time.sleep(1)
blinkt.clear()
blinkt.show()
print (final_code)
if final_code in code_list:
#save oldID
old_QR_id = QR_id
QR_id = final_code
#valid code detected
show_color_line(0,255,0,0)
time.sleep(1)
blinkt.clear()
blinkt.show()
#save info for for image to be displayed
if old_QR_id != QR_id:
f = open(current_file,'w+')
f.write("current_file\t" + QR2file[QR_id])
f.close()
else:
show_color_line(255,0,0,0)
time.sleep(1)
blinkt.clear()
blinkt.show()
else:
show_color_line(255,0,0,0)
time.sleep(1)
blinkt.clear()
blinkt.show()
#report invalid sequence
f = open(current_file,'w+')
f.write("current_file\tseqscope/no_valid_seq.jpg")
f.close()
old_QR_id = ''
#check if stop file exists - same for DisplayScope and this
if (os.path.exists('/media/ramdisk/qr_code.stop')):
cont = 0
sleep(1.5)
|
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers import json
from django.shortcuts import render,render_to_response
from django.http import HttpResponse
from account.forms import RegisterForm,LoginForm
from account.models import Users
def getid(request):
name=""
try:
ids=request.session['id']
name = Users.objects.get(id=ids)
except:
pass
return name
def connect(request):
Rform=RegisterForm
return render(request, 'connect.html')
def register(request):
name = "OLA"
if request.method == "POST":
mlf = RegisterForm(request.POST)
if mlf.is_valid():
mlf.save()
email=mlf.cleaned_data['email']
password = mlf.cleaned_data['password']
try:
name = Users.objects.get(email=email, password=password)
except ObjectDoesNotExist:
pass
else:
name="NOT VALID"
else:
name = "Error"
form = RegisterForm(request.POST)
return render(request, 'register.html', {"name": name, "phno": form})
def login(request):
nexturl=request.GET['next']
if request.method == 'POST':
logform = LoginForm(request.POST)
if logform.is_valid():
email = logform.cleaned_data['email']
password = logform.cleaned_data['password']
try:
name = Users.objects.get(email=email, password=password)
request.session['id'] = name.id
return render(request, 'login.html', {'name': name, 'nexturl':nexturl})
except:
return HttpResponse("<h1>Invalid User Name or Password</h1>")
else:
return HttpResponse("<h1>You left a field empty</h1>")
def logout(request):
try:
del request.session['id']
del request.session['cart']
except KeyError:
pass
return render(request, 'loggedout.html')
def dreamsjob(request):
name = getid(request)
array = ([
['Year', 'Sales', 'Expenses'],
['2004', 1000, 400],
['2005', 1170, 460],
['2006', 660, 1120],
['2007', 1030, 540]
])
return render_to_response('dreamsjob.html',{'name':'array'})
|
lines = open('resume.tex').read().splitlines()
result = []
should_keep = False
for line in lines:
if line.find('{document}') > -1:
should_keep = not should_keep
continue
if should_keep:
result.append(line)
open('resume.tex', 'w').write(("\n".join(result)))
# pandoc resume.md -o resume.tex --template=foo.tex; python clean.py;
|
#!/usr/bin/env python
# encoding: utf-8
class postag(object):
# ref: http://universaldependencies.org/u/pos/index.html
# Open class words
ADJ = "ADJ"
ADV = "ADV"
INTJ = "INTJ"
NOUN = "NOUN"
PROPN = "PROPN"
VERB = "VERB"
# Closed class words
ADP = "ADP"
AUX ="AUX"
CCONJ = "CCONJ"
DET = "DET"
NUM = "NUM"
PART = "PART"
PRON = "PRON"
SCONJ = "SCONJ"
# Other
PUNCT = "PUNCT"
SYM = "SYM"
X = "X"
class dep_v1(object):
# VERSION
VERSION = "1.0"
# subj relations
nsubj = "nsubj"
nsubjpass = "nsubjpass"
csubj = "csubj"
csubjpass = "csubjpass"
# obj relations
dobj = "dobj"
iobj = "iobj"
# copular
cop = "cop"
# auxiliary
aux = "aux"
auxpass = "auxpass"
# negation
neg = "neg"
# non-nominal modifier
amod = "amod"
advmod = "advmod"
# nominal modifers
nmod = "nmod"
nmod_poss = "nmod:poss"
nmod_tmod = "nmod:tmod"
nmod_npmod = "nmod:npmod"
obl = "nmod"
obl_npmod = "nmod:npmod"
# appositional modifier
appos = "appos"
# cooordination
cc = "cc"
conj = "conj"
cc_preconj = "cc:preconj"
# marker
mark = "mark"
case = "case"
# fixed multiword expression
mwe = "fixed"
# parataxis
parataxis = "parataxis"
# punctuation
punct = "punct"
# clausal complement
ccomp = "ccomp"
xcomp = "xcomp"
# relative clause
advcl = "advcl"
acl = "acl"
aclrelcl = "acl:relcl"
# unknown dep
dep = "dep"
SUBJ = {nsubj, csubj, nsubjpass, csubjpass}
OBJ = {dobj, iobj}
NMODS = {nmod, obl, nmod_npmod, nmod_tmod}
ADJ_LIKE_MODS = {amod, appos, acl, aclrelcl}
ARG_LIKE = {nmod, obl, nmod_npmod, nmod_tmod, nsubj, csubj, csubjpass,
dobj, iobj}
# trivial symbols to be stripped out
TRIVIALS = {mark, cc, punct}
# These dependents of a predicate root shouldn't be included in the
# predicate phrase.
PRED_DEPS_TO_DROP = {ccomp, csubj, advcl, acl, aclrelcl, nmod_tmod,
parataxis, appos, dep}
# These dependents of an argument root shouldn't be included in the
# argument pharse if the argument root is the gov of the predicate root.
SPECIAL_ARG_DEPS_TO_DROP = {nsubj, dobj, iobj, csubj, csubjpass, neg,
aux, advcl, auxpass, ccomp, cop, mark, mwe,
parataxis}
# Predicates of these rels are hard to find arguments.
HARD_TO_FIND_ARGS = {amod, dep, conj, acl, aclrelcl, advcl}
class dep_v2(object):
# VERSION
VERSION = "2.0"
# subj relations
nsubj = "nsubj"
nsubjpass = "nsubj:pass"
csubj = "csubj"
csubjpass = "csubj:pass"
# obj relations
dobj = "obj"
iobj = "iobj"
# auxiliary
aux = "aux"
auxpass = "aux:pass"
# negation
neg = "neg"
# copular
cop = "cop"
# non-nominal modifier
amod = "amod"
advmod = "advmod"
# nominal modifers
nmod = "nmod"
nmod_poss = "nmod:poss"
nmod_tmod = "nmod:tmod"
nmod_npmod = "nmod:npmod"
obl = "obl"
obl_npmod = "obl:npmod"
# appositional modifier
appos = "appos"
# cooordination
cc = "cc"
conj = "conj"
cc_preconj = "cc:preconj"
# marker
mark = "mark"
case = "case"
# fixed multiword expression
mwe = "fixed"
# parataxis
parataxis = "parataxis"
# punctuation
punct = "punct"
# clausal complement
ccomp = "ccomp"
xcomp = "xcomp"
# relative clause
advcl = "advcl"
acl = "acl"
aclrelcl = "acl:relcl"
# unknown dep
dep = "dep"
SUBJ = {nsubj, csubj, nsubjpass, csubjpass}
OBJ = {dobj, iobj}
NMODS = {nmod, obl, nmod_npmod, nmod_tmod}
ADJ_LIKE_MODS = {amod, appos, acl, aclrelcl}
ARG_LIKE = {nmod, obl, nmod_npmod, nmod_tmod, nsubj, csubj, csubjpass,
dobj, iobj}
# trivial symbols to be stripped out
TRIVIALS = {mark, cc, punct}
# These dependents of a predicate root shouldn't be included in the
# predicate phrase.
PRED_DEPS_TO_DROP = {ccomp, csubj, advcl, acl, aclrelcl, nmod_tmod,
parataxis, appos, dep}
# These dependents of an argument root shouldn't be included in the
# argument pharse if the argument root is the gov of the predicate root.
SPECIAL_ARG_DEPS_TO_DROP = {nsubj, dobj, iobj, csubj, csubjpass, neg,
aux, advcl, auxpass, ccomp, cop, mark, mwe,
parataxis}
# Predicates of these deps are hard to find arguments.
HARD_TO_FIND_ARGS = {amod, dep, conj, acl, aclrelcl, advcl}
|
# import context
import unittest
import numpy as np
from minesweeper.internal.ms_board import ms_board
# TODO in wherever the gameplay section is, make sure 0 mines ends in one click
# and width * height mines ends in 0 clicks (without making the board visible?)
def count_mines(ms_board):
return np.count_nonzero(ms_board.board == -1)
class test_ms_board_setup(unittest.TestCase):
def test_proper_size(self):
board = ms_board(5, 3, 3)
self.assertEqual(board.board.shape, (5, 3))
def test_size_sanity(self):
ms_board(5, 1, 2)
ms_board(1, 5, 2)
with self.assertRaises(ValueError):
ms_board(0, 1, 1)
with self.assertRaises(ValueError):
ms_board(1, 0, 1)
with self.assertRaises(ValueError):
ms_board(-1, 5, 1)
def test_mine_sanity(self):
ms_board(5, 5, 0)
ms_board(5, 5, 25)
with self.assertRaises(ValueError):
ms_board(5, 5, -1)
with self.assertRaises(ValueError):
ms_board(5, 6, 31)
def test_mine_count(self):
for i in range(11):
board = ms_board(5, 2, i)
self.assertEqual(count_mines(board), i)
class test_ms_neighbor_search(unittest.TestCase):
def setUp(self):
self.board = ms_board(3, 4, 0)
@staticmethod
def compare_neighborlists(list1, list2):
'''
Used for list comparison
'''
return sorted(list1) == sorted(list2)
def test_simple_case(self):
central_neighbors = self.board.neighbors(1, 2)
self.assertEqual(len(central_neighbors), 8)
correct_neighbors = [(1, 1), (1, 2), (1, 3),
(2, 1), (2, 3),
(3, 1), (3, 2), (3, 3)]
self.compare_neighborlists(central_neighbors, correct_neighbors)
def test_edges(self):
# bottom left
neighbors = self.board.neighbors(0, 0)
correct_neighbors = [(0, 1), (1, 0), (1, 1)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# left
neighbors = self.board.neighbors(1, 0)
correct_neighbors = [(0, 0), (0, 1), (1, 1), (2, 1), (2, 0)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# top left
neighbors = self.board.neighbors(2, 0)
correct_neighbors = [(1, 0), (1, 1), (2, 1)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# top
neighbors = self.board.neighbors(2, 1)
correct_neighbors = [(2, 0), (1, 0), (1, 1), (1, 2), (2, 2)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# top right
neighbors = self.board.neighbors(2, 3)
correct_neighbors = [(2, 2), (1, 2), (1, 3)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# right
neighbors = self.board.neighbors(1, 3)
correct_neighbors = [(2, 3), (0, 3), (0, 2), (1, 2), (2, 2)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# bottom right
neighbors = self.board.neighbors(0, 3)
correct_neighbors = [(1, 3), (1, 2), (0, 2)]
self.assertTrue(self.compare_neighborlists(
neighbors, correct_neighbors))
# bottom
class test_board_neighbor_counts(unittest.TestCase):
def setUp(self):
self.board = ms_board(3, 3, 0)
def test_does_not_override_mine(self):
self.board.board[0, 0] = -1
self.board.board[1, 1] = -1
self.board._assign_neighbors()
self.assertEqual(self.board.board[0, 0], -1)
def test_zero_neighbors(self):
self.board._assign_neighbors()
self.assertEqual(self.board.board[1, 1], 0)
def test_1_neighbor(self):
self.board.board[0, 0] = -1
self.board._assign_neighbors()
self.assertEqual(self.board.board[1, 1], 1)
def test_4_neighbors(self):
self.board.board[0, 0] = -1
self.board.board[1, 0] = -1
self.board.board[0, 1] = -1
self.board.board[0, 2] = -1
self.board._assign_neighbors()
self.assertEqual(self.board.board[1, 1], 4)
def test_8_neighbors(self):
self.board.board[0, 0] = -1
self.board.board[0, 1] = -1
self.board.board[0, 2] = -1
self.board.board[1, 0] = -1
self.board.board[1, 2] = -1
self.board.board[2, 0] = -1
self.board.board[2, 1] = -1
self.board.board[2, 2] = -1
self.board._assign_neighbors()
self.assertEqual(self.board.board[1, 1], 8)
if __name__ == "__main__":
unittest.main()
|
import sys
import os
import json
#path = "/Users/micha/Google Drive/WORKSPACE/travaille/side projects/Python/2013_UWash_IntroDataScience/datasci_course_materials/assignment1"
#os.chdir(path)
def createSentimentDict(afinnfile):
scores = {} # initialize an empty dictionary
for line in afinnfile:
term, score = line.split("\t") # The file is tab-delimited. "\t" means "tab character"
scores[term] = int(score) # Convert the score to an integer.
# return dictionary
return scores
def main():
## open the 2 files that are added via the arguments
#sent_file = open(sys.argv[1])
sent_file = open("AFINN-111.txt")
#tweet_file = open(sys.argv[2])
tweet_file = open("/Users/micha/Google Drive/WORKSPACE/travaille/side projects/Python/2013_UWash_IntroDataScience/uWash.introDS.hw1/src/output_20.txt")
## create dictionary
sentiDict = createSentimentDict(sent_file)
newDict = {}
## process tweet_file
sentiTweetList = [] #adds the sentiment of each tweet to list (e.g., first element, first tweet, etc
#tweetNum = 0
for line in tweet_file:
tweet = json.loads(line).get('text',"xyxyempty").split() #.keys() #[u"text"]
sentCount = 0
for word in tweet:
sentCount += sentiDict.get(word,0)
# if word not in sentiDict check if it's on new Dict
if sentiDict.get(word) == None:
# if word is not on newDict: add it and add sentiment score to list
if newDict.get(word) == None:
newDict[word]=[]
# assign total tweet sentiment to list
sentiTweetList.append(sentCount)
# go through all tweets again from the beginning
tweet_file.seek(0)
tweetCounter = 0
for line in tweet_file:
tweet = json.loads(line).get('text',"xyxyempty").split() #.keys() #[u"text"]
# if word is newDict, add sentiment of tweet from sentiTweetList
for word in tweet:
if newDict.get(word) != None:
newDict[word].append(sentiTweetList[tweetCounter])
tweetCounter += 1
# go through all keys in dict and calculate the average of all values
for key in newDict:
newDict[key] = sum(newDict[key])/float(len(newDict[key]))
# print results key-value pairs
for key in newDict:
print key.encode('ascii', 'ignore'), newDict[key]
if __name__ == '__main__':
main()
|
'''1º Contar el número de productos que hay en la home.'''
import unittest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class CountElementsHome(unittest.TestCase):
def setUp(self) -> None:
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(options=options)
self.driver.get('http://automationpractice.com/index.php')
def test_count(self):
number_elements = self.driver.find_elements_by_xpath("//ul[@id ='homefeatured']/li//a/img")
if number_elements is not None:
number_elements = len(number_elements)
print('Hay:', number_elements, 'elementos')
if __name__ == '__main__':
unittest.main()
|
import powers
# write a recursive function that takes as input a nested list of strings, and returns
#a single string with each word separated by a space.
##
##def nested_build(nested_list:[str or [str]]) ->str:
##
##
## unpacked = ''
## for element in nested_list:
## if type(element) == str:
## unpacked += element + ' '
## else:
## unpacked += nested_build(element)
## return unpacked
##
##print(nested_build(['Boo', ['is', 'happy', ['today']]]))
##
# these both functions work perfectly
def nested_build(nested_list:[str or [str]]) ->str:
unpacked = ''
for element in nested_list:
if type(element) == list:
unpacked += nested_build(element)
else:
unpacked += element + " "
return unpacked
print(nested_build(['Boo', ['is', ["Nani", "Kaji Karki"],'happy', ['today']]]))
def sum_numbers(numlist:[int or [int]]) ->int:
total = 0
for item in numlist:
if type(item) == list:
for num in item:
total += num
else:
total += item
return total
print(sum_numbers([4,5,[1,2]]))
def nested_sum(nested_list: 'nested list of integers')->int:
''' adds up the integers in a nested list of integers'''
total = 0
for element in nested_list:
if type(element) == int:
total += element
else:
total += nested_sum(element)
return total
print(nested_sum([3, 6, 4]))
def read_and_square() -> None:
while True:
number = input('Number: ')
if number != 'exit':
try:
squared = powers.square(number)
print('{} squared = {}\n'.format(number, squared))
except:
print('{} cannot be squared.\n'.format(number))
else:
break
read_and_square()
|
from __future__ import unicode_literals
from django.db import models
import json
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailsearch import index
class HomePage(Page):
body = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('body', classname="full")
]
class InventoryItem(Page, models.Model):
cost = models.DecimalField(decimal_places=2, max_digits=5)
price = models.DecimalField(decimal_places=2, max_digits=5)
item_number = models.IntegerField(blank=True, null=True)
SKU = models.TextField(blank=True, null=True, max_length=8)
description = models.TextField(default="Please include a description of the product")
stocked_quantity = models.IntegerField(default=0)
pic = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
content_panels = Page.content_panels + [
FieldPanel('cost'),
FieldPanel('price'),
FieldPanel('description'),
FieldPanel('item_number'),
FieldPanel('SKU'),
ImageChooserPanel('pic'),
]
search_fields = Page.search_fields + (
index.SearchField('description'),
)
def generate_json(self):
data = []
# BEGIN SEED DATA
data.append(
{
'date': '2015-1',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-2',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-3',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-4',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-5',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-6',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-7',
'Stocked': 10,
'Sold': 5,
}
)
data.append(
{
'date': '2015-8',
'Stocked': 10,
'Sold': 5,
}
)
# END SEED DATA
for month_data in self.itemsalesdata_set.all():
data.append(
{
'date': '2015-{0}'.format(month_data.month),
'Stocked': str(month_data.stocked),
'Sold': str(month_data.sold),
}
)
# data["2015-" + str(month_data.month)] = ['Stocked', str(month_data.stocked), 'Sold', str(month_data.sold)]
return data
class ItemSalesData(models.Model):
product = models.ForeignKey(InventoryItem)
month = models.IntegerField(default=1)
stocked = models.IntegerField(default=0)
sold = models.IntegerField(default=0)
|
import os
import multiprocessing as mp
def multicore(func, args, num_cores = os.cpu_count()):
"""
This function triggers a parallel processing of specified functions on each element of the args.
Params:
func -- (function)
args -- (list) arguments for specified functions
num_cores -- (int) number of processes running simultaneously at one time, default is number of virtual cores of
device
"""
pool = mp.Pool(processes=min(len(args), num_cores))
pool.map(func, args)
pool.close()
pool.join()
def square(i):
print(i*i) |
import sys
input = sys.stdin.readline
total_invitees = int(input())
n = int(input())
list_multiples = []
for j in range(n):
multiple = int(input())
list_multiples.append(multiple)
list_invitees = []
for integer in range(1, total_invitees + 1):
list_invitees.append(integer)
for r in range(n):
current_multiple = list_multiples[r]
counter = 0
for c in range(current_multiple - 1, total_invitees, current_multiple):
list_invitees[c] = 0
counter += 1
total_invitees -= counter
for q in range(counter):
list_invitees.remove(0)
for element in list_invitees:
print(element)
|
import unittest
import cnn_framework
from .predictor import Predictor
from .input import *
from .layers import MoleculeConv
from keras.layers.core import Dense
class Test_Input(unittest.TestCase):
def test_read_input_file(self):
predictor_test = Predictor()
path = os.path.join(os.path.dirname(cnn_framework.__file__),
'test_data',
'minimal_predictor',
'predictor_input.py'
)
read_input_file(path, predictor_test)
predictor_model = predictor_test.model
self.assertEqual(len(predictor_model.layers), 3)
self.assertTrue(isinstance(predictor_model.layers[0], MoleculeConv))
self.assertTrue(isinstance(predictor_model.layers[1], Dense))
self.assertEqual(predictor_model.layers[0].inner_dim, 38)
self.assertEqual(predictor_model.layers[0].units, 512)
|
#. try & except
x=3
y='bob'
try:
print(x+y)
except:
print('dont add string with numbers')
# quit()
#. right dangerous code in try indent. it would be safe factor
#. too much code in try could make you hard to find where the traceback happened
#. we can use quit() in except indent to not make more traceback further
|
import time
from selenium import webdriver
# 创建浏览器驱动对象
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
# 窗口最大化
driver.maximize_window()
# 打开测试网站
driver.get("file:///D:/software/UI%E8%87%AA%E5%8A%A8%E5%8C%96%E6%B5%8B%E8%AF%95%E5%B7%A5%E5%85%B7/web%E8%87%AA%E5%8A%A8%E5%8C%96%E5%B7%A5%E5%85%B7%E9%9B%86%E5%90%88/pagetest/%E6%B3%A8%E5%86%8CA.html")
# 在注册A页面中点击 新浪网站
driver.find_element(By.CSS_SELECTOR, "#linkto>a").click()
# 调用浏览器的后退
time.sleep(3)
driver.back()
# 再调用浏览器的前进
time.sleep(3)
driver.forward()
# 再调用浏览器的后退
time.sleep(3)
driver.back()
# 点击击访问新浪网站
driver.find_element(By.XPATH, "//*[text()='访问 新浪 网站']").click()
time.sleep(3)
# 再调用关闭按钮
driver.close()
time.sleep(3)
# 等待3S
time.sleep(3)
# 退出
driver.quit() |
# Data 생성
from scipy.stats import norm
x = [norm.rvs() * 5 + 170. for _ in range(10)]
x_1 = x
x_2 = x
# 정렬 함수
def sort_function (X) :
# 주어진 데이터의 길이를 구한다.
N = len(X)
# 0부터 주어진 길이-1 까지 반복한다.
for i in range(N-1):
# 스위칭을 하기 위해 임시로 k라는 변수를 만든다.
k = i
# 위에 반복문에 해당하는 기준 숫자와 그 뒷숫자와 1대1로 비교하여 만약 뒷수자가 더 작으면 자리를 스위칭한다.
for j in range(i+1,N) :
# 인접한 두개의 숫자를 비교해서
if X[k] > X[j] :
# 만약 뒷숫자가 더 작으면 인덱싱을 스위칭
k = j
# 스위칭된 인덱싱에 바뀐 값을 넣어준다.
X[k],X[i] = X[i],X[k]
# 정렬이 된 데이터를 반환한다.
return X
# 사용자 정의 함수로 만든 데이터
print(sort_function(x_1))
# 파이썬에 내장된 함수
x_2.sort()
print(x_2)
|
release_major = 1
release_minor = 11
release_patch = 33
release_so_abi_rev = release_patch
# These are set by the distribution script
release_vc_rev = None
release_datestamp = 0
release_type = 'unreleased'
|
"""Python Cookbook 2nd ed.
Tests for ch12_r06_server
"""
import base64
import json
from unittest.mock import Mock
import Chapter_12.ch12_r06_server
import Chapter_12.ch12_r06_user
from pytest import * # type: ignore
@fixture # type: ignore
def fixed_salt(monkeypatch):
mocked_os = Mock(urandom=Mock(return_value=bytes(range(30))))
monkeypatch.setattr(Chapter_12.ch12_r06_user, "os", mocked_os)
@fixture # type: ignore
def dealer_client(monkeypatch, fixed_salt):
monkeypatch.setenv("DEAL_APP_SEED", "42")
app = Chapter_12.ch12_r06_server.dealer
return app.test_client()
def test_openapi_spec(dealer_client):
spec_response = dealer_client.get("/dealer/openapi.json")
print(spec_response)
assert spec_response.status_code == 200
assert (
spec_response.get_json()["info"]["title"]
== "Python Cookbook Chapter 12, recipe 6."
)
def test_deal_cards_sequence(dealer_client):
expected_player = {
"email": "packt@example.com",
"name": "Packt",
"twitter": "https://twitter.com/PacktPub",
"lucky_number": 8,
}
response1 = dealer_client.post(
path="/dealer/players",
json={
"email": "packt@example.com",
"name": "Packt",
"twitter": "https://twitter.com/PacktPub",
"lucky_number": 8,
"password": "OpenSesame",
},
headers={"Accept": "application/json"},
)
print(response1.data)
assert response1.status_code == 201
response_document = response1.get_json()
assert response_document["player"] == expected_player
player_url = response1.headers["Location"]
player_id = response_document["id"]
credentials = base64.b64encode(f"{player_id}:OpenSesame".encode("utf-8"))
response2 = dealer_client.post(
path="/dealer/decks",
json={"decks": 6},
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response2.status_code == 201
deck_url = response2.headers["Location"]
response_document = response2.get_json()
assert response_document["status"] == "ok"
deck_id = response_document["id"]
response3 = dealer_client.get(
deck_url,
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response3.status_code == 200
response3_doc = response3.get_json()
assert response3_doc["id"] == deck_id
assert response3_doc["cards"] == 6 * 52
response4 = dealer_client.get(
path=f"/dealer/decks/{deck_id}/hands",
query_string={"cards": 5},
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response4.status_code == 200
assert response4.json == [
{
"cards": [
{"__class__": "Card", "__init__": {"rank": 10, "suit": "♡"}},
{"__class__": "Card", "__init__": {"rank": 1, "suit": "♠"}},
{"__class__": "Card", "__init__": {"rank": 9, "suit": "♡"}},
{"__class__": "Card", "__init__": {"rank": 11, "suit": "♢"}},
{"__class__": "Card", "__init__": {"rank": 5, "suit": "♡"}},
],
"hand": 0,
}
]
def test_player_sequence(dealer_client):
expected_player = {
"email": "player@example.com",
"name": "Farrier",
"twitter": "https://twitter.com/F_L_Stevens",
"lucky_number": 8,
}
response1 = dealer_client.post(
path="/dealer/players",
json={
"email": "player@example.com",
"name": "Farrier",
"twitter": "https://twitter.com/F_L_Stevens",
"lucky_number": 8,
"password": "OpenSesame",
},
headers={"Accept": "application/json"},
)
print(response1.data)
assert response1.status_code == 201
response_document = response1.get_json()
assert response_document["player"] == expected_player
player_url = response1.headers["Location"]
player_id = response_document["id"]
credentials = base64.b64encode(f"{player_id}:OpenSesame".encode("utf-8"))
response2 = dealer_client.get(
path=player_url,
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response2.status_code == 200
response_document = response2.get_json()
assert response_document["player"] == expected_player
response3 = dealer_client.get(
path="/dealer/players",
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response3.status_code == 200
response_document = response3.get_json()
# There may be more than one, depending on the order
# the tests were run.
assert (
response_document["players"]["79dcaabe80c651157e6c67dcef7812b0"]
== expected_player
)
def test_bad_credentials(dealer_client):
expected_player = {
"email": "test_bad_credentials@example.com",
"name": "test_bad_credentials",
"twitter": "https://twitter.com/test_bad_credentials",
"lucky_number": 8,
}
response1 = dealer_client.post(
path="/dealer/players",
json={
"email": "test_bad_credentials@example.com",
"name": "test_bad_credentials",
"twitter": "https://twitter.com/test_bad_credentials",
"lucky_number": 8,
"password": "OpenSesame",
},
headers={"Accept": "application/json"},
)
print(response1.data)
assert response1.status_code == 201
response_document = response1.get_json()
player_url = response1.headers["Location"]
player_id = response_document["id"]
credentials = base64.b64encode(f"{player_id}:Not-OpenSesame".encode("utf-8"))
response2 = dealer_client.get(
path=player_url,
headers={
"Accept": "application/json",
"Authorization": f"BASIC {credentials.decode('ascii')}",
},
)
assert response2.status_code == 401
response_document = response2.get_json()
assert response_document == {'error': '401 Unauthorized: Invalid credentials'}
|
#!/usr/bin/python
#
# Copyright (C) 2007 Saket Sathe
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $LastChangedBy$
# $LastChangedDate$
# $LastChangedRevision$
#
#
import threading, random
class ChooseEdges(threading.Thread):
""" Thread for selecting a set of edges
\ingroup RandomGraphs
"""
## Common serial edge list. Updated by each thread in a semaphoric operation
serialEdgeList = []
## Lock that a thread acquires for performing a semaphoric operation
lck = threading.Lock()
evnt = threading.Event()
## Thread ID
id = 0
def __init__(self, noOfEdges, noSelfLoops, startVertX, endVertX, startVertY, endVertY, probA, probB, probC, probD):
""" Constructs a selector thread
@see RandomGraphs::DirectedPowerLawRandomGraph
"""
threading.Thread.__init__(self)
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.startVertX = startVertX
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.endVertX = endVertX
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.startVertY = startVertY
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.endVertY = endVertY
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.noOfEdges = noOfEdges
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.probA = probA
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.probB = probB
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.probC = probC
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.probD = probD
self.debug = 0
## @see RandomGraphs::DirectedPowerLawRandomGraph
self.noSelfLoops = noSelfLoops
ChooseEdges.id += 1
## Thread ID
self.id = ChooseEdges.id
def selectVertex(self, sVertX, eVertX, sVertY, eVertY, cumulativeA, cumulativeB, cumulativeC):
""" Selects start and end vertices recursively
@param sVertX Starting column of the adjacency matrix
@param eVertX Ending column of the adjacency matrix
@param sVertY Starting row of the adjacency matrix
@param eVertY Ending column of the adjacency matrix
@param cumulativeA Cumulative distribution
@param cumulativeB Cumulative distribution
@param cumulativeC Cumulative distribution
@return Selected vertices
"""
# Commented for future use:
#
# #print "sVertX = %s, eVertX = %s, sVertY = %s, eVertY = %s" % (sVertX, eVertX, sVertY, eVertY)
# if sVertX == eVertX and sVertY == eVertY:
# #if self.debug: print "sVertX = %s, eVertX = %s, sVertY = %s, eVertY = %s" % (sVertX, eVertX, sVertY, eVertY)
# return [sVertX, eVertX, sVertY, eVertY]
if abs(sVertX - eVertX) <= 1 and abs(sVertY - eVertY) <= 1:
#if self.debug: print "sVertX = %s, eVertX = %s, sVertY = %s, eVertY = %s" % (sVertX, eVertX, sVertY, eVertY)
return [sVertX, eVertX, sVertY, eVertY]
selectedQuadrant = random.randint(0,3)
initialSeed = random.uniform(0,1)
if initialSeed >= 0 and initialSeed < cumulativeA:
selectedQuadrant = 0
elif initialSeed >= cumulativeA and initialSeed < cumulativeB:
selectedQuadrant = 1
elif initialSeed >= cumulativeB and initialSeed < cumulativeC:
selectedQuadrant = 2
elif initialSeed >= cumulativeC and initialSeed <= 1:
selectedQuadrant = 3
# print "quadrant selected = %s" % (selectedQuadrant)
midPointX = (eVertX-sVertX)/2
midPointY = (eVertY-sVertY)/2
if selectedQuadrant == 0:
sVertX = sVertX
eVertX = eVertX - midPointX
sVertY = sVertY
eVertY = eVertY - midPointY
if selectedQuadrant == 1:
sVertX = sVertX + midPointX
eVertX = eVertX
sVertY = sVertY
eVertY = eVertY - midPointY
if selectedQuadrant == 2:
sVertX = sVertX
eVertX = eVertX - midPointX
sVertY = sVertY + midPointY
eVertY = eVertY
if selectedQuadrant == 3:
sVertX = sVertX + midPointX
eVertX = eVertX
sVertY = sVertY + midPointY
eVertY = eVertY
return self.selectVertex(sVertX, eVertX, sVertY, eVertY, cumulativeA, cumulativeB, cumulativeC)
def run(self):
""" Start the thread
"""
threadEdgeList = []
step = int(0.05*self.noOfEdges)
if step == 0:
progressBar = xrange(0,self.noOfEdges,1)
else:
progressBar = xrange(0,self.noOfEdges,int(0.05*self.noOfEdges))
noOfEdges = self.noOfEdges
startVertX = self.startVertX
endVertX = self.endVertX
startVertY = self.startVertY
endVertY = self.endVertY
id = self.id
selectVertex = self.selectVertex
cumulativeA = self.probA
cumulativeB = cumulativeA + self.probB
cumulativeC = cumulativeB + self.probC
threadEdgeListAppend = threadEdgeList.append
for i in xrange(0,noOfEdges):
# if i in progressBar:
# print "Thread id %s, |--> %s edges added<--|" % (id, i)
a = selectVertex(startVertX, endVertX, startVertY, endVertY, cumulativeA, cumulativeB, cumulativeC)
startVertexNumber = min(a[0:1])
endVertexNumber = min(a[2:3])
if self.noSelfLoops == 0:
threadEdgeListAppend(startVertexNumber)
threadEdgeListAppend(endVertexNumber)
elif startVertexNumber != endVertexNumber:
threadEdgeListAppend(startVertexNumber)
threadEdgeListAppend(endVertexNumber)
ChooseEdges.lck.acquire()
ChooseEdges.serialEdgeList.extend(threadEdgeList)
ChooseEdges.lck.release()
|
from .helpers import is_valid_email
def email(email_str):
"""Return email_str if valid, raise an exception in other case."""
if is_valid_email(email_str):
return email_str
else:
raise ValueError('{} is not a valid email'.format(email_str))
|
"""
Script reads LENS data for selected variables
Notes
-----
Author : Zachary Labe
Date : 28 November 2016
Usage
-----
lats,lons,var = readLENS(directory,varq)
"""
def readLENSEnsemble(directory,varq):
"""
Function reads LENS ensembles netCDF4 data array
Parameters
----------
directory : string
working directory for stored PIOMAS files
varq : string
variable from LENS
Returns
-------
lats : 1d array
latitudes
lons : 1d array
longitudes
varq : 5d array [ens,year,month,lat,lon]
selected variable
Usage
-----
lats,lons,var = readLENS(directory,varq)
"""
print '\n>>> Using readLENS function!'
### Import modules
import numpy as np
from netCDF4 import Dataset
ens = ['02','03','04','05','06','07','08','09'] + \
map(str,np.arange(10,36,1)) + map(str,np.arange(101,106,1))
### Modify directory
directory = directory + '%s/' % (varq)
varn = np.empty((len(ens),161*12,14,144)) # 96 for all
for i in xrange(len(ens)):
filename = '%s_0%s_1920-2100.nc' % (varq,ens[i])
if int(ens[i]) > 100:
filename = '%s_%s_1920-2100.nc' % (varq,ens[i])
data = Dataset(directory + filename)
lats = data.variables['latitude'][82:] # > 70N 85 11, 65N 82
lons = data.variables['longitude'][:]
varn[i,:,:,:] = data.variables['%s' % varq][:-240,82:,:] # -2080
data.close()
print 'Completed: Read LENS Ensemble #%s - %s!' % (ens[i],varq)
var = np.reshape(varn,(len(ens),varn.shape[1]/12,12,
lats.shape[0],lons.shape[0]))
var = np.squeeze(np.asarray(var))
### Modify Units
if varq == 'SLP':
var = var/100. #Pa to hPa
elif varq == 'T2M' or varq == 'T':
var = var - 273.15 #K to C
print '*Completed: Read %s data!' % varq
return var,lats,lons |
import jprops
def Property(key_file,properties_file):
if not properties_file.startswith("/"):
properties_file = "/var/www/goblin/current/etc/" + properties_file
with open(properties_file) as fp:
properties = jprops.load_properties(fp)
return properties
|
"""This module contains update handlers"""
import logging
from abc import ABC, abstractmethod
from typing import Callable, Optional, Type
from celery import group
from shaman.forms import Form
from shaman.models import Update
log = logging.getLogger(__name__)
class Handler(ABC):
"""The base class for all update handlers
Args:
name: The handler name.
callback: The callback function for the handler.
async_callback: The Celery task for the handler.
form_class: Form class to be used for the handler.
suppress_form:
"""
def __init__(self,
name: str,
callback: Optional[Callable[[Update], None]] = None,
async_callback=None,
form_class: Type[Form] = None,
suppress_form: bool = False):
self.name = name
self.callback = callback
self.async_callback = async_callback
self.form_class = form_class
self.form = None
self.suppress_form = suppress_form
@abstractmethod
def match(self, update: Update) -> bool:
"""
This method is called to determine if the handler matches the update.
It should always be overridden.
Args:
update: The update.
Returns:
True if the handler matches the update, False otherwise.
"""
return False
def handle_update(self, update: Update):
"""
Call callback/async_callback/form
Should be called only if the handler matches the update.
"""
update.handler = self.name
update.save()
if self.callback:
self.callback(update)
elif self.async_callback:
group(self.async_callback.signature(update.id)).delay()
elif self.form_class:
if self.form:
form = self.form
else:
form = self.form_class()
form.update(update)
class DefaultHandler(Handler):
"""This handler matches any update. Use it as the default handler.
Args:
name: The handler name.
callback: The callback function for the handler.
async_callback: The Celery task for the handler.
form_class: Form class to be used for the handler.
"""
def match(self, update: Update) -> bool:
return True
class CommandHandler(Handler):
"""Handler class to handle a Telegram command.
Args:
name: The handler name.
callback: The callback function for the handler.
async_callback: The Celery task for the handler.
form_class: Form class to be used for the handler.
command: The telegram command to handle.
"""
def __init__(self, *args, command, **kwargs):
self.command = command
super().__init__(*args, **kwargs)
def match(self, update: Update) -> bool:
match = False
message = update.message
if message and message.entities:
if [
entity for entity in message.entities
if entity.type == 'bot_command' and entity.text == self.command
]:
match = True
return match
|
import numpy as np
from scipy import sparse
from scipy.sparse import linalg as splinalg
import sklearn.linear_model as sklin
import sklearn.decomposition
def learn(X_train, y_train, mode='lsmr', reduction=None, n_components=10, alphas=[0.1, 1., 10.], normalize=False):
def ridge():
model = sklin.Ridge(normalize=normalize, alpha=alphas[0])
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def ridgeCV():
model = sklin.RidgeCV(normalize=normalize, alphas=np.array(alphas))
model.fit(X_train, y_train)
print "FYI alpha = %f" % model.alpha_
return (model.intercept_,model.coef_)
def ARDRegression():
model = sklin.ARDRegression()
if sparse.issparse(X_train): X_t = X_train.toarray()
else: X_t = X_train
model.fit(X_t, y_train)
return (model.intercept_,model.coef_)
def BayesianRidge():
model = sklin.BayesianRidge()
if sparse.issparse(X_train): X_t = X_train.toarray()
else: X_t = X_train
model.fit(X_t, y_train)
return (model.intercept_,model.coef_)
def ElasticNetCV():
model = sklin.ElasticNetCV()
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def Lasso():
model = sklin.Lasso(alpha=alphas[0])
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def LassoCV():
model = sklin.LassoCV(alpha=alphas[0])
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def LarsCV():
model = sklin.LarsCV()
if sparse.issparse(X_train): X_t = X_train.toarray()
else: X_t = X_train
model.fit(X_t, y_train)
return (model.intercept_,model.coef_)
def LassoLarsCV():
model = sklin.LassoLarsCV()
if sparse.issparse(X_train): X_t = X_train.toarray()
else: X_t = X_train
model.fit(X_t, y_train)
return (model.intercept_,model.coef_)
def LassoLarsIC():
model = sklin.LassoLarsIC()
if sparse.issparse(X_train): X_t = X_train.toarray()
else: X_t = X_train
model.fit(X_t, y_train)
return (model.intercept_,model.coef_)
def LinearRegression():
model = sklin.LinearRegression()
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def LogisticRegression():
model = sklin.LogisticRegression()
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
def Perceptron():
model = sklin.Perceptron()
model.fit(X_train, y_train)
print model.intercept_.shape
print model.coef_.shape
return (model.intercept_,model.coef_)
def SGDRegressor():
model = sklin.SGDRegressor()
model.fit(X_train, y_train)
return (model.intercept_,model.coef_)
decomp = None
if reduction=='tsvd':
print "Decomposing with TruncatedSVD into %d components" % n_components
decomp = sklearn.decomposition.TruncatedSVD(n_components=n_components)
X_train = decomp.fit_transform(X_train)
elif reduction == 'nmf':
print "Decomposing with NMF into %d components" % n_components
decomp = sklearn.decomposition.NMF(n_components=n_components, sparseness='data')
X_train = decomp.fit_transform(X_train)
modes = {
'lsmr': lambda: (0,splinalg.lsmr(X_train,y_train)[0]),
'lsqr': lambda: (0,splinalg.lsqr(X_train,y_train)[0]),
'ridge': ridge,
'ridgeCV': ridgeCV,
'lasso':Lasso,
'ARDRegression':ARDRegression,
'BayesianRidge':BayesianRidge,
'ElasticNetCV':ElasticNetCV,
'LassoCV':LassoCV,
'LarsCV':LarsCV,
'LassoLarsCV':LassoLarsCV,
'LassoLarsIC':LassoLarsIC,
'LinearRegression':LinearRegression,
'LogisticRegression':LogisticRegression,
'Perceptron':Perceptron,
'SGDRegressor':SGDRegressor
}
learned_w = modes[mode]()
if decomp != None:
return (learned_w, lambda X: decomp.transform(X))
else:
return learned_w
|
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
firstProfit = [0 for _ in range(len(prices))]
cost = prices[0]
for i in range(1, len(prices)):
firstProfit[i] = max(firstProfit[i - 1], prices[i] - cost)
cost = min(cost, prices[i])
secondProfit = [0 for _ in range(len(prices))]
# 在这里从后往前扫的时候,prices[i]变成了cost,之前的cost变成了sell price,所以这里变量名改成了maxSellPrice
# 这个转变非常重要也非常妙
maxSellPrice = prices[-1]
# 为什么一定要从后往前扫呢:从后往前扫可以利用上一次算出来的profit更新下一次的,如果是从前往后扫,
# 那上一次算出来的profit如果是第3天的,第4天的profit是不能用第3的,因为3不在4天之后的范围内,但是如果是第4天的profit,
# 可以用来的更新第3天的profit,因为4天之后在3天之后的范围内
for i in range(len(prices) - 2, -1, -1): # range(len(prices)-2, 0, -1)也能AC,但是说不通啊 是要包含0的吧
# 解释上面的问题:firstProfit[i]记录的是拿i作为卖出点的profit,在day[0, i]之内的收益
# secondProfit[i]记录的是拿i作为买入点的profit,在day[i, n)之内的收益
# 对于第0天,firstProfit[0]为0,secondProfit[0]为一次买卖的最大profit,如果不更新到0,secondProfit[0]恒为0,
# 说明最值点永远不会在0点取得?NO,secondProfit[0]=firstProfit[n-1],只要firstProfit[n-1]和secondProfit[0]
# 有一个更新了就行,firstProfit[n-1] + secondProfit[n-1](=0) = firstProfit[0](=0) + secondProfit[0],
# eg, prices: [3,3,5,0,0,3,1,4]
# firstProfit: [0, 0, 2, 2, 2, 3, 3, 4]
# secondProfit: [4, 4, 4, 4, 4, 3, 3, 0]
secondProfit[i] = max(secondProfit[i + 1], maxSellPrice - prices[i])
maxSellPrice = max(maxSellPrice, prices[i])
res = 0
for i in range(len(firstProfit)):
res = max(res, firstProfit[i] + secondProfit[i])
return res
class Solution:
""" no comments """
def maxProfit(self, prices: List[int]) -> int:
if not prices:
return 0
firstProfit = [0 for _ in range(len(prices))]
cost = prices[0]
for i in range(1, len(prices)):
firstProfit[i] = max(firstProfit[i - 1], prices[i] - cost)
cost = min(cost, prices[i])
secondProfit = [0 for _ in range(len(prices))]
maxSellPrice = prices[-1]
for i in range(len(prices) - 2, -1, -1):
secondProfit[i] = max(secondProfit[i + 1], maxSellPrice - prices[i])
maxSellPrice = max(maxSellPrice, prices[i])
print(firstProfit)
print(secondProfit)
res = 0
for i in range(len(firstProfit)):
res = max(res, firstProfit[i] + secondProfit[i])
return res
print(Solution().maxProfit([3,3,5,0,0,3,1,4]))
|
import numpy as np
import refltool as rt
from os import chdir
from pylab import *
############
##import current data
#this one is half-sapph-no-ito
chdir("../data/expt2")
data_new=rt.reflm_std("mirror_VNATrc.001","mirror_VNATrc.002","half-sapph-no-ito_VNATrc.001","half-sapph-no-ito_VNATrc.002")
###########
##import old data
chdir("../sapph-old")
data_old=rt.reflm_std("mirror_VNATrc.001","mirror_VNATrc.002","sapphire_VNATrc.001","sapphire_VNATrc.002")
########
#inputs
#######
freq=data_new[:,0]
plot(freq,data_old[:,2:3])
plot(freq,data_new[:,2:3])
#plot(freq,abs(rt.f2c(data_new[:,2:4]))**2)
#plot(freq,abs(rt.f2c(data_old[:,2:4]))**2)
ylim(-1.5,1.5)
show()
'''
#incident e-field phase at surface
ph_i=0.
#..gives incident e-field, normalized to E_max = 1
e_inc=np.exp(1j*ph_i)
#reflected e-field
e_refl= e_inc*rt.f2c(data[:,2:4])[:,0]
#total field at surface is real part of ( incident e-field at surface plus reflected e-field at surface)
e_tot=e_inc+e_refl
plot(freq, e_tot.real)
plot(freq, e_refl.real)
ylim(-1.5,1.5)
show()
'''
|
from behave import *
from behave.model import Table, Row
import copy
from test_project.test_utils.dbt_test_utils import DBTVAULTGenerator
use_step_matcher("parse")
dbtvault_generator = DBTVAULTGenerator()
def set_stage_metadata(context, model_name) -> dict:
"""
Setup the context to include required staging metadata and return as a dictionary to
support providing the variables in the command line to dbt instead
"""
if hasattr(context, "processed_stage_name"):
context.processed_stage_name = context.dbt_test_utils.process_stage_names(
context.processed_stage_name,
model_name)
else:
context.processed_stage_name = model_name
context.include_source_columns = getattr(context, "include_source_columns", True)
context.hashing = getattr(context, "hashing", "MD5")
if not getattr(context, "hashed_columns", None):
context.hashed_columns = dict()
context.hashed_columns[model_name] = dict()
else:
if not context.hashed_columns.get(model_name, None):
context.hashed_columns[model_name] = dict()
if not getattr(context, "derived_columns", None):
context.derived_columns = dict()
context.derived_columns[model_name] = dict()
else:
if not context.derived_columns.get(model_name, None):
context.derived_columns[model_name] = dict()
dbt_vars = {
"include_source_columns": context.include_source_columns,
"hashed_columns": context.hashed_columns,
"derived_columns": context.derived_columns,
"hash": context.hashing
}
return dbt_vars
@given("the {model_name} table does not exist")
def check_exists(context, model_name):
"""Check the model exists"""
logs = context.dbt_test_utils.run_dbt_operation(macro_name="check_model_exists",
args={"model_name": model_name})
context.target_model_name = model_name
assert f"Model {model_name} does not exist." in logs
@given("the raw vault contains empty tables")
def clear_schema(context):
context.dbt_test_utils.replace_test_schema()
model_names = context.dbt_test_utils.context_table_to_dict(table=context.table,
orient="list")
context.vault_model_names = model_names
models = [name for name in DBTVAULTGenerator.flatten([v for k, v in model_names.items()]) if name]
for model_name in models:
headings_dict = dbtvault_generator.evaluate_hashdiff(copy.deepcopy(context.vault_structure_columns[model_name]))
headings = list(DBTVAULTGenerator.flatten([v for k, v in headings_dict.items() if k != "source_model"]))
row = Row(cells=[], headings=headings)
empty_table = Table(headings=headings, rows=row)
seed_file_name = context.dbt_test_utils.context_table_to_csv(table=empty_table,
model_name=model_name)
dbtvault_generator.add_seed_config(seed_name=seed_file_name,
seed_config=context.seed_config[model_name])
logs = context.dbt_test_utils.run_dbt_seed(seed_file_name=seed_file_name)
assert "Completed successfully" in logs
@step("the {model_name} {vault_structure} is empty")
@given("the {model_name} {vault_structure} is empty")
def load_empty_table(context, model_name, vault_structure):
"""Creates an empty table"""
context.target_model_name = model_name
columns = context.vault_structure_columns
if vault_structure == "stage":
headings = context.stage_columns[model_name]
else:
headings = list(DBTVAULTGenerator.flatten([val for key, val in columns[model_name].items()]))
row = Row(cells=[], headings=headings)
empty_table = Table(headings=headings, rows=row)
seed_file_name = context.dbt_test_utils.context_table_to_csv(table=empty_table,
model_name=model_name)
dbtvault_generator.add_seed_config(seed_name=seed_file_name,
seed_config=context.seed_config[model_name])
logs = context.dbt_test_utils.run_dbt_seed(seed_file_name=seed_file_name)
if not vault_structure == "stage":
metadata = {"source_model": seed_file_name, **context.vault_structure_columns[model_name]}
context.vault_structure_metadata = metadata
dbtvault_generator.raw_vault_structure(model_name, vault_structure, **metadata)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name)
assert "Completed successfully" in logs
@step("the {model_name} {vault_structure} is already populated with data")
@given("the {model_name} {vault_structure} is already populated with data")
def load_populated_table(context, model_name, vault_structure):
"""
Create a table with data pre-populated from the context table.
"""
context.target_model_name = model_name
seed_file_name = context.dbt_test_utils.context_table_to_csv(table=context.table,
model_name=model_name)
dbtvault_generator.add_seed_config(seed_name=seed_file_name,
seed_config=context.seed_config[model_name])
context.dbt_test_utils.run_dbt_seed(seed_file_name=seed_file_name)
metadata = {"source_model": seed_file_name, **context.vault_structure_columns[model_name]}
context.vault_structure_metadata = metadata
dbtvault_generator.raw_vault_structure(model_name, vault_structure, **metadata)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name)
assert "Completed successfully" in logs
@step("I load the {model_name} {vault_structure}")
def load_table(context, model_name, vault_structure):
metadata = {"source_model": context.processed_stage_name, **context.vault_structure_columns[model_name]}
config = dbtvault_generator.append_end_date_config(context, dict())
context.vault_structure_metadata = metadata
dbtvault_generator.raw_vault_structure(model_name=model_name,
vault_structure=vault_structure,
config=config,
**metadata)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name)
assert "Completed successfully" in logs
@step("I use insert_by_period to load the {model_name} {vault_structure} "
"by {period} with date range: {start_date} to {stop_date}")
def load_table(context, model_name, vault_structure, period, start_date=None, stop_date=None):
metadata = {"source_model": context.processed_stage_name,
**context.vault_structure_columns[model_name]}
config = {"materialized": "vault_insert_by_period",
"timestamp_field": "LOAD_DATE",
"start_date": start_date,
"stop_date": stop_date,
"period": period}
config = dbtvault_generator.append_end_date_config(context, config)
context.vault_structure_metadata = metadata
dbtvault_generator.raw_vault_structure(model_name=model_name,
vault_structure=vault_structure,
config=config,
**metadata)
is_full_refresh = context.dbt_test_utils.check_full_refresh(context)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name,
full_refresh=is_full_refresh)
assert "Completed successfully" in logs
@step("I use insert_by_period to load the {model_name} {vault_structure} by {period}")
def load_table(context, model_name, vault_structure, period):
metadata = {"source_model": context.processed_stage_name,
**context.vault_structure_columns[model_name]}
config = {"materialized": "vault_insert_by_period",
"timestamp_field": "LOAD_DATE",
"date_source_models": context.processed_stage_name,
"period": period}
config = dbtvault_generator.append_end_date_config(context, config)
context.vault_structure_metadata = metadata
dbtvault_generator.raw_vault_structure(model_name=model_name,
vault_structure=vault_structure,
config=config,
**metadata)
is_full_refresh = context.dbt_test_utils.check_full_refresh(context)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name,
full_refresh=is_full_refresh)
assert "Completed successfully" in logs
@step("I load the vault")
def load_vault(context):
models = [name for name in DBTVAULTGenerator.flatten([v for k, v in context.vault_model_names.items()]) if name]
for model_name in models:
metadata = {**context.vault_structure_columns[model_name]}
context.vault_structure_metadata = metadata
vault_structure = model_name.split("_")[0]
dbtvault_generator.raw_vault_structure(model_name, vault_structure, **metadata)
is_full_refresh = context.dbt_test_utils.check_full_refresh(context)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=model_name,
full_refresh=is_full_refresh)
assert "Completed successfully" in logs
@given("the {raw_stage_model_name} table contains data")
def create_csv(context, raw_stage_model_name):
"""Creates a CSV file in the data folder"""
seed_file_name = context.dbt_test_utils.context_table_to_csv(table=context.table,
model_name=raw_stage_model_name)
dbtvault_generator.add_seed_config(seed_name=seed_file_name,
seed_config=context.seed_config[raw_stage_model_name])
logs = context.dbt_test_utils.run_dbt_seed(seed_file_name=seed_file_name)
context.raw_stage_models = seed_file_name
context.raw_stage_model_name = raw_stage_model_name
assert "Completed successfully" in logs
@when("the {raw_stage_model_name} is loaded")
@step("the {raw_stage_model_name} is loaded for day 1")
@step("the {raw_stage_model_name} is loaded for day 2")
@step("the {raw_stage_model_name} is loaded for day 3")
@step("the {raw_stage_model_name} is loaded for day 4")
def create_csv(context, raw_stage_model_name):
"""Creates a CSV file in the data folder
"""
context.raw_stage_model_name = raw_stage_model_name
seed_file_name = context.dbt_test_utils.context_table_to_csv(table=context.table,
model_name=raw_stage_model_name)
dbtvault_generator.add_seed_config(seed_name=seed_file_name,
seed_config=context.seed_config[raw_stage_model_name])
logs = context.dbt_test_utils.run_dbt_seed(seed_file_name=seed_file_name)
context.raw_stage_models = seed_file_name
assert "Completed successfully" in logs
@step("I create the {processed_stage_name} stage")
def stage_processing(context, processed_stage_name):
stage_metadata = set_stage_metadata(context, model_name=processed_stage_name)
args = {k: v for k, v in stage_metadata.items() if k == "hash"}
dbtvault_generator.stage(model_name=processed_stage_name,
source_model=context.raw_stage_models,
hashed_columns=context.hashed_columns[processed_stage_name],
derived_columns=context.derived_columns[processed_stage_name],
include_source_columns=context.include_source_columns)
logs = context.dbt_test_utils.run_dbt_model(mode="run", model_name=processed_stage_name,
args=args)
assert "Completed successfully" in logs
@then("the {model_name} table should contain expected data")
def expect_data(context, model_name):
expected_output_csv_name = context.dbt_test_utils.context_table_to_csv(table=context.table,
model_name=f"{model_name}_expected")
columns_to_compare = context.dbt_test_utils.context_table_to_dict(table=context.table, orient="records")[0]
compare_column_list = [k for k, v in columns_to_compare.items()]
unique_id = compare_column_list[0]
ignore_columns = context.dbt_test_utils.find_columns_to_ignore(context.table)
test_yaml = dbtvault_generator.create_test_model_schema_dict(target_model_name=model_name,
expected_output_csv=expected_output_csv_name,
unique_id=unique_id,
columns_to_compare=columns_to_compare,
ignore_columns=ignore_columns)
dbtvault_generator.append_dict_to_schema_yml(test_yaml)
dbtvault_generator.add_seed_config(seed_name=expected_output_csv_name,
include_columns=compare_column_list,
seed_config=context.seed_config[model_name])
context.dbt_test_utils.run_dbt_seed(expected_output_csv_name)
logs = context.dbt_test_utils.run_dbt_command(["dbt", "test"])
assert "1 of 1 PASS" in logs
@step("I have hashed columns in the {processed_stage_name} model")
def hashed_columns(context, processed_stage_name):
context.processed_stage_name = processed_stage_name
context.hashed_columns = {processed_stage_name: context.dbt_test_utils.context_table_to_dict(table=context.table,
orient="records")[0]}
@step("I have derived columns in the {processed_stage_name} model")
def derive_columns(context, processed_stage_name):
context.processed_stage_name = processed_stage_name
context.derived_columns = {processed_stage_name: context.dbt_test_utils.context_table_to_dict(table=context.table,
orient="records")[0]}
@step("I do not include source columns")
def source_columns(context):
context.include_source_columns = False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-02-28 15:47:20
# @Author : cdl (1217096231@qq.com)
# @Link : https://github.com/cdlwhm1217096231/python3_spider
# @Version : $Id$
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
torch.manual_seed(123)
"""
1.深度学习的实质是反向传播求导数,在pytorch中的autograd模块则实现了这个功能。在Tensor上的所有操作,autograd都能为它自动计算微分,避免手动计算微分。
2.从0.4版本开始,Variable正式合并入Tensor,Variable本来实现的自动微分功能,Tensor就能支持。还是可以使用
Variable(Tensor),但这个操作其实什么都没做。
3.要使得Tensor有自动求导的功能,需要将属性tensor.requires_grad=True
"""
# 在创建张量时,通过设置requires_grad=True来告诉Pytorch需要对该张量进行自动的求导,pytorch会记录该张量的每一步操作历史并自动计算。
x = torch.randn(5, 5, requires_grad=True)
print("x: \n", x)
y = torch.randn(5, 5, requires_grad=True)
print("y:\n", y)
z = torch.sum(x + y)
print("z: \n", z)
# 简单的自动求导
z.backward()
print("x和y的梯度是: \n", x.grad, "\n", y.grad)
# 复杂的求导
z = x ** 2 + y**3
print(z)
# 返回值不是一个scalar,所以需要输入一个大小相同的张量作为参数,这里我们用ones_like函数根据x生成一个张量
z.backward(torch.ones_like(x))
print(x.grad, '\n', y.grad)
# 使用with torch.no_grad():禁止已经设置requires_grad=True的向量进行自动求导,这个方法在测试集上测试准确率的时候回经常用到!!!!
with torch.no_grad():
print((x + y * 2).requires_grad)
|
# coding=utf-8
from flask import request, render_template, jsonify, make_response, url_for
from peewee import create_model_tables
import simplejson as json
from werkzeug.exceptions import abort
from app import app
from models import User, WXUser, Group, Product, Purchase, WxJsapiTicket
from auth import auth
from admin import admin
from api import api
from assets import assets
from wechat import Sign, WXOAuth2, auth_required, wx_userinfo_fetched
from photos import PhotoManager, UploadNotAllowed
auth.setup()
admin.setup()
api.setup()
assets.init_app(app)
wx_auth = WXOAuth2()
wx_auth.init_app(app, '/wx_auth')
photo_manager = PhotoManager()
photo_manager.init_app(app)
@wx_userinfo_fetched.connect_via(app)
def save_wx_userinfo(sender, userinfo):
app.logger.debug(u'WeChat User %s authorized us for personal info: %s' % (userinfo['openid'], userinfo))
# WeChat User oXhUnw7OIvYKGj8ljstNJzXUZeZ0 authorized us for personal info:
# {u'province': u'', u'openid': u'oXhUnw7OIvYKGj8ljstNJzXUZeZ0', u'headimgurl': u'', u'language': u'en',
# u'city': u'', u'country': u'\u4e2d\u56fd', u'sex': 0, u'privilege': [], u'nickname': u'\u90c1\u9a8f'}
wx_user, created = WXUser.get_or_create(openid=userinfo['openid'])
for key, value in userinfo.items():
if not hasattr(wx_user, key):
continue
# privilege字段是一个json数组
if isinstance(value, list):
value = u','.join(value)
setattr(wx_user, key, value)
wx_user.save()
@app.route('/upload_photo/', methods=['POST'])
def upload_photo():
photo = request.files['photo']
if photo:
try:
filename = photo_manager.save(photo, process='resize', width=1080, height=1080)
# 生成一张默认的缩略图,文件名和原图相同
photo_manager.make_thumb(filename, miniature=filename, override=True, size='400x400')
return jsonify(filename=filename)
except UploadNotAllowed:
return make_response(jsonify(error='Upload Not Allowed'), 403)
abort(400)
def wx_user_data():
# wx_user = WXUser.get(WXUser.openid == 'oXhUnw7OIvYKGj8ljstNJzXUZeZ0')
wx_user = WXUser.get(WXUser.openid == wx_auth.openid)
# 调用api插件来输出json,保证json序列化的一致性
wx_user_resource = api._registry[WXUser]
return json.dumps(wx_user_resource.serialize_object(wx_user))
@app.route('/group_leader/')
@auth_required
def group_leader():
appid = app.config['WX_APP_ID']
wx_jsapi_ticket = WxJsapiTicket.get(WxJsapiTicket.appid == appid)
sign = Sign(wx_jsapi_ticket.ticket, request.url).sign()
return render_template('group_leader.html', wx_user_data=wx_user_data(), appid=appid, sign=sign)
@app.route('/group_member/')
@auth_required
def group_member():
appid = app.config['WX_APP_ID']
wx_jsapi_ticket = WxJsapiTicket.get(WxJsapiTicket.appid == appid)
sign = Sign(wx_jsapi_ticket.ticket, request.url).sign()
return render_template('group_member.html', wx_user_data=wx_user_data(), appid=appid, sign=sign)
if __name__ == '__main__':
import logging
import logging.handlers
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(stream_handler)
create_model_tables(models=(User, WXUser, Group, Product, Purchase), fail_silently=True)
# 创建测试用户
from flask_peewee.utils import make_password
defaults = dict(password=make_password('123456'), admin=True)
User.get_or_create(username='admin', defaults=defaults)
u, _ = WXUser.get_or_create(openid='oXhUnw7OIvYKGj8ljstNJzXUZeZ0')
g, _ = Group.get_or_create(leader=u, title=u'白菜团')
Product.get_or_create(group=g, title=u'一般小白菜', defaults=dict(content=u'可以吃', price=12.4))
Product.get_or_create(group=g, title=u'辐射大白菜', defaults=dict(content=u'巨大', price=100))
for group_name in (u'跑鞋团', u'啤酒团', u'桑拿团', u'烧鸡团', u'烤肉团', u'坦克团'):
Group.get_or_create(leader=u, title=group_name)
app.run(host='0.0.0.0', debug=True, threaded=True) |
from code.data_structures.linkedlist.singly_linkedlist import LinkedList
'''
Time efficiency: O(1)
'''
def deleteMiddleNode(linkedlist, node):
linkedlist.deleteMiddleNode(node)
return linkedlist |
#-*-coding:utf-8-*-
print "pyhon中返回函数的说明"
print "高阶函数不仅可以把函数作为参数,也可以把函数作为返回值"
'''
普通的求和函数
'''
def calc_sum(*args):
ax = 0
for n in args:
ax += n
return ax
"""
但是当我们不想要立即求和,而是在后面的代码中,根据
需要再计算怎么办?不返回求和的结果,只是返回求和的函数
"""
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax = ax + n
return ax
return sum
"""
当我们调用lazy_sum()事返回的并不是求和的结果,而是求和的函数
"""
f = lazy_sum(1, 3, 5, 7, 9)
print f
print f()
"""
调用函数f时才真正得到结果
"""
print "而且值得注意的是就算我们两次传入相同的参数,返回的函数也是不同的"
f1 = lazy_sum(1, 3, 5, 7, 9)
f2 = lazy_sum(1, 3, 5, 7, 9)
print "f1 == f2 = " ,f1 == f2
"""
闭包的概念
参考: http://www.cnblogs.com/ma6174/archive/2013/04/15/3022548.html
"""
print "闭包就是根据不同的配置信息得到不同的结果"
"""
闭包: 是词法闭包(Lexical Closure)的简写,是引用了自由变量的函数。这个被引用的自由变量讲和这个函数一同存在,即使已经离开了创造它的环境也不例外。所以另一种说法是认为闭包是由函数和与其相关的引用环境组合而成的实体
"""
def make_adder(addend):
def adder(augend):
return augend + addend
return adder
p = make_adder(22)
q = make_adder(44)
print p(100)
print q(100)
print "结果分别是122,144"
print "make_adder做作为一个函数,返回另外一个函数,而且这个函数携带的\
外部传来的一个参数addend,我们可以将这个参数当做函数a返回dder的一个配置信息\
,配置信息的不同p = make_adder(22), q = make_adder(44)传递的信息不同,返回的adder携带的配置信息就不同,那么当我们调用返回函数时,传入相同的参数得到的结果也是不同的,p(100)得到是 100 +22 q(100)得到的是 100 + 44"
def hellocounter(name):
count = [0]#不能定义为count,会报错
def counter():
count[0] += 1
print "hello,",name,",",str(count[0]) + " access!"
return counter
hello = hellocounter("yy")
hello()
hello()
hello()
def makebold(fn):
def wrapped():
return "<b>" + fn() + "</b>"
return wrapped
def makeitalic(fn):
def wrapped():
return "<i>" + fn() + "</i>"
return wrapped
@makebold
@makeitalic
def hello():
return "hello python"
print hello()
"""
上面的函数意味着装饰器就是闭包的一种
"""
|
"""empty message
Revision ID: b83831b4b242
Revises: 21166afba474
Create Date: 2016-03-14 00:49:43.914692
"""
# revision identifiers, used by Alembic.
revision = 'b83831b4b242'
down_revision = '21166afba474'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('gender', sa.String(length=1), nullable=False),
sa.Column('dob', sa.Date(), nullable=False),
sa.Column('blood_group', sa.String(length=3), nullable=False),
sa.Column('mobile_number', sa.String(length=10), nullable=False),
sa.Column('address', sa.String(length=200), nullable=False),
sa.Column('city', sa.String(length=64), nullable=False),
sa.Column('country', sa.String(length=64), nullable=False),
sa.Column('pincode', sa.String(length=6), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.drop_table('doctor_info')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('doctor_info',
sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('first_name', mysql.VARCHAR(length=64), nullable=False),
sa.Column('middle_name', mysql.VARCHAR(length=64), nullable=True),
sa.Column('last_name', mysql.VARCHAR(length=64), nullable=False),
sa.Column('gender', mysql.VARCHAR(length=1), nullable=False),
sa.Column('dob', sa.DATE(), nullable=False),
sa.Column('blood_group', mysql.VARCHAR(length=3), nullable=False),
sa.Column('mobile_number', mysql.VARCHAR(length=10), nullable=False),
sa.Column('address', mysql.VARCHAR(length=200), nullable=False),
sa.Column('city', mysql.VARCHAR(length=64), nullable=False),
sa.Column('country', mysql.VARCHAR(length=64), nullable=False),
sa.Column('pincode', mysql.VARCHAR(length=6), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset=u'latin1',
mysql_engine=u'InnoDB'
)
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
### end Alembic commands ###
|
"""Human readable quantities of some units.
This modules provides subclasses of :class:`int` or :class:`float` to
denote particular quantities such as time intervalls or amounts of
memory. The classes provide string representations in a more human
readable form and constructors that accept these string
representations as input.
"""
import re
class Time(float):
"""Human readable time intervals.
The value is a :class:`float` that indicates the time in seconds.
>>> t1 = Time(4500)
>>> str(t1)
'1.250 h'
>>> t2 = Time("1.5 d")
>>> t2
129600.0
>>> t3 = Time("36 h")
>>> t3 == t2
True
>>> t4 = Time(1/200)
>>> str(t4)
'5.000 ms'
>>> t5 = 0.5*Time("1 h") + 5*Time("15 min")
>>> str(t5)
'1.750 h'
"""
second = 1
minute = 60*second
hour = 60*minute
day = 24*hour
millisecond = (1/1000)*second
units = { 'ms':millisecond, 's':second, 'min':minute, 'h':hour, 'd':day, }
regexp = re.compile(r'^(\d+(?:\.\d+)?)\s*(ms|s|min|h|d)$')
def __new__(cls, value):
if isinstance(value, str):
m = cls.regexp.match(value)
if not m:
raise ValueError("Invalid Time string '%s'" % value)
v = float(m.group(1)) * cls.units[m.group(2)]
return super().__new__(cls, v)
else:
v = float(value)
if v < 0:
raise ValueError("Invalid time value %f" % v)
return super().__new__(cls, v)
def __str__(self):
for u in ['d', 'h', 'min', 's']:
if self >= self.units[u]:
return "%.3f %s" % (self / self.units[u], u)
else:
return "%.3f ms" % (self / self.units['ms'])
def __add__(self, other):
v = super().__add__(other)
if isinstance(other, Time):
return Time(v)
else:
return v
def __sub__(self, other):
v = super().__sub__(other)
if isinstance(other, Time) and v >= 0:
return Time(v)
else:
return v
def __rmul__(self, other):
if type(other) in { float, int }:
return Time(other*float(self))
else:
return super().__rmul__(other)
class MemorySize(int):
"""Human readable amounts of memory.
The value is an :class:`int` that indicates the number of bytes.
>>> s1 = MemorySize(2)
>>> str(s1)
'2 B'
>>> s2 = MemorySize("2.0 B")
>>> s2
2
>>> s2 == s1
True
>>> s3 = MemorySize(33117290228613)
>>> str(s3)
'30.12 TiB'
>>> s4 = MemorySize("1 PiB") + 3*MemorySize("128 TiB")
>>> str(s4)
'1.38 PiB'
>>> s5 = s4 - MemorySize("512 TiB")
>>> str(s5)
'896.00 TiB'
>>> s5
985162418487296
"""
sizeB = 1
sizeKiB = 1024*sizeB
sizeMiB = 1024*sizeKiB
sizeGiB = 1024*sizeMiB
sizeTiB = 1024*sizeGiB
sizePiB = 1024*sizeTiB
sizeEiB = 1024*sizePiB
units = { 'B':sizeB, 'KiB':sizeKiB, 'MiB':sizeMiB, 'GiB':sizeGiB,
'TiB':sizeTiB, 'PiB':sizePiB, 'EiB':sizeEiB, }
regexp = re.compile(r'^(\d+(?:\.\d+)?)\s*(B|KiB|MiB|GiB|TiB|PiB|EiB)$')
def __new__(cls, value):
if isinstance(value, str):
m = cls.regexp.match(value)
if not m:
raise ValueError("Invalid MemorySize string '%s'" % value)
v = float(m.group(1)) * cls.units[m.group(2)]
return super().__new__(cls, v)
else:
v = int(value)
if v < 0:
raise ValueError("Invalid size value %d" % v)
return super().__new__(cls, v)
def __str__(self):
for u in ['EiB', 'PiB', 'TiB', 'GiB', 'MiB', 'KiB']:
if self >= self.units[u]:
return "%.2f %s" % (self / self.units[u], u)
else:
return "%d B" % (int(self))
def __add__(self, other):
v = super().__add__(other)
if isinstance(other, MemorySize):
return MemorySize(v)
else:
return v
def __sub__(self, other):
v = super().__sub__(other)
if isinstance(other, MemorySize) and v >= 0:
return MemorySize(v)
else:
return v
def __rmul__(self, other):
if type(other) == int:
return MemorySize(other*int(self))
else:
return super().__rmul__(other)
|
"""
An operator is a callable object representing an operation that when applied to a puzzle state, produces a generator of
possible next states.
This file defines the abstract base class for operators and the operators used in the peg puzzle
"""
from typing import Generator, Tuple, Callable
from state import PuzzleState
from peg import Peg
import state
def next_states(start_state: PuzzleState, find: Tuple[Peg, ...], replace: Tuple[Peg, ...])\
-> Generator[PuzzleState, None, None]:
"""
Generate states by applying a replacement rule
:param start_state: The initial state
:param find: The sequence of pegs to find
:param replace: The sequence of pegs to replace
:return: Yields all possible replacement states
Example: In "R_R_BB" find R_ and replace with _R would produce "_RR_BB" and "R__RBB"
"""
start_state.path.append(start_state)
for peg_index in range(len(start_state.state_tuple)):
if start_state.state_tuple[peg_index:peg_index + len(find)] == find:
new_state_array = list(start_state.state_tuple)
new_state_array[peg_index:peg_index + len(find)] = replace
yield PuzzleState(tuple(new_state_array), start_state.path)
start_state.path.pop()
def RedSlideOperator(start_state: PuzzleState) -> Generator[PuzzleState, None, None]:
return next_states(start_state, (Peg.RED, Peg.NONE), (Peg.NONE, Peg.RED))
def BlueSlideOperator(start_state: PuzzleState) -> Generator[PuzzleState, None, None]:
return next_states(start_state, (Peg.NONE, Peg.BLUE), (Peg.BLUE, Peg.NONE))
def RedJumpOperator(start_state: PuzzleState) -> Generator[PuzzleState, None, None]:
yield from next_states(start_state, (Peg.RED, Peg.BLUE, Peg.NONE), (Peg.NONE, Peg.BLUE, Peg.RED))
def BlueJumpOperator(start_state: PuzzleState) -> Generator[PuzzleState, None, None]:
yield from next_states(start_state, (Peg.NONE, Peg.RED, Peg.BLUE), (Peg.BLUE, Peg.RED, Peg.NONE))
|
from __future__ import unicode_literals
from django.contrib import admin
from models import *
admin.site.register(Group)
admin.site.register(Card)
|
import os
def main():
n = int(input())
nums = list( map( int, input().split() ) )
nums.sort()
#print( nums )
# Get median element
if len( nums ) % 2 == 0:
median = ( nums[ int(len(nums) / 2) ] + nums[ int(len(nums) / 2) - 1 ] ) / 2
else:
median = nums[ int(len(nums) / 2) ]
numDict = {}
max = 1
maxElement = nums[0]
sum = 0
max = 1
for element in nums:
sum += element
# Hash to get mode
if element not in numDict.keys():
#print( "inserting , ", element )
numDict[ element ] = 1
else:
numDict[ element ] = numDict[ element ] + 1
if numDict[ element ] == max:
# Check if new element is less than old
if element < maxElement:
max = numDict[ element ]
maxElement = element
elif numDict[ element ] > max:
max = numDict[ element ]
maxElement = element
print( "{:.1f}".format( sum / len(nums) ) )
print( "{:.1f}".format( median ) )
print( maxElement )
if __name__ == "__main__":
main()
|
from functools import wraps
import contextlib
import shutil
import tempfile
from nose import SkipTest
def build_po_string(data):
return (
'#, fuzzy\n'
'msgid ""\n'
'msgstr ""\n'
'"Project-Id-Version: foo\\n"\n'
'"POT-Creation-Date: 2013-06-05 14:16-0700\\n"\n'
'"PO-Revision-Date: 2010-04-26 18:00-0700\\n"\n'
'"Last-Translator: Automatically generated\\n"\n'
'"Language-Team: English\\n"\n'
'"Language: \\n"\n'
'"MIME-Version: 1.0\\n"\n'
'"Content-Type: text/plain; charset=UTF-8\\n"\n'
'"Content-Transfer-Encoding: 8bit\\n"\n'
'"X-Generator: Translate Toolkit 1.6.0\\n"\n\n'
+ data)
@contextlib.contextmanager
def tempdir():
"""Builds a tempdir and cleans up afterwards
Usage::
with tempdir() as dir_:
# blah blah blah
"""
dir_ = tempfile.mkdtemp()
yield dir_
shutil.rmtree(dir_)
def skip_if(testfun):
def _skip_if(fun):
@wraps(fun)
def _skip_if_inner(*args, **kwargs):
if testfun():
raise SkipTest
return fun(*args, **kwargs)
return _skip_if_inner
return _skip_if
|
from django.db import models
from apps.registro.models.Region import Region
from apps.seguridad.models import Ambito
class Jurisdiccion(models.Model):
prefijo = models.CharField(null = True, max_length = 3)
region = models.ForeignKey(Region)
nombre = models.CharField(max_length = 50)
ambito = models.ForeignKey(Ambito, editable = False, null =True)
class Meta:
app_label = 'registro'
ordering = ['nombre']
def __unicode__(self):
return self.nombre
def save(self):
self.updateAmbito()
models.Model.save(self)
def updateAmbito(self):
if self.pk is None or self.ambito is None:
self.ambito = Ambito.objects.get(level=0).createChild(self.nombre, self)
else:
self.ambito.descripcion = self.nombre
self.ambito.save()
def delete(self):
if (self.departamento_set.count() > 0
or self.dependenciafuncional_set.count() > 0
or self.titulo_set.count() > 0
or self.carrerajurisdiccional_set.count() > 0
or self.normativajurisdiccional_set.count() > 0):
raise Exception('Entidad en uso')
models.Model.delete(self)
|
chassis_511 = '192.168.65.24'
server_511 = 'localhost:8888'
server_properties = {'windows_511': {'server': server_511,
'locations': [f'{chassis_511}/1/1', f'{chassis_511}/1/2'],
'install_dir': 'C:/Program Files/Spirent Communications/Spirent TestCenter 5.11'}}
# Default for options.
api = ['rest']
server = ['windows_511']
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@CreateTime: 2018-10-10T18:36:20+09:00
@Email: guozhilingty@gmail.com
@Copyright: Shiba-lab
@License: MIT
"""
import numpy as np
import torch
import torch.nn as nn
class VDSR(nn.Module):
"""
Superresolution using Very Deep Convolutional Networks
The network takes an interpolated low-resolution image as input and predicts image details.
Reference:
CVPR2016: Accurate Image Super-Resolution Using Very Deep Convolutional Networks
"""
def __init__(self,
nb_channel=3,
base_kernel=64,
num_residuals=18):
super(VDSR, self).__init__()
# input conv
self.input_conv = nn.Sequential(nn.Conv2d(nb_channel, base_kernel, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True))
# residual_layers
conv_blocks = []
for _ in range(num_residuals):
conv_blocks.append(nn.Sequential(nn.Conv2d(base_kernel, base_kernel, kernel_size=3, stride=1, padding=1, bias=False),
nn.ReLU(inplace=True)))
self.residual_layers = nn.Sequential(*conv_blocks)
# output
self.output_conv = nn.Conv2d(base_kernel, nb_channel, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
residual = x
x = self.input_conv(x)
x = self.residual_layers(x)
x = self.output_conv(x)
x = torch.add(x, residual)
return x
def weight_init(self):
for m in self._modules:
weights_init_kaiming(m)
def weights_init_kaiming(m):
class_name = m.__class__.__name__
if class_name.find('Linear') != -1:
nn.init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif class_name.find('Conv2d') != -1:
nn.init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif class_name.find('ConvTranspose2d') != -1:
nn.init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif class_name.find('Norm') != -1:
m.weight.data.normal_(1.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
if __name__ == "__main__":
# Hyper Parameters
img_col, img_row, nb_channel = 224, 224, 3
base_kernel = 64
x = torch.FloatTensor(
np.random.random((1, nb_channel, img_col, img_row)))
model = VDSR(nb_channel, base_kernel)
gen_y = model(x)
print("VDSR->:")
print(" Network input: ", x.shape)
print(" output: ", gen_y.shape) |
print('学习:Python-dict')
#定义字典列表
d = {'Bob':95,'Lisa':96,'Luncode':100}
#修改key的值
d['Bob'] = 99
#检查key是否在列表中
print('Lisa' in d)
#输出对应key的value
print(d['Bob'])
#删除列表中的key和value
d.pop('Lisa')
#输出列表
print(d)
#小结
d = {'Michael': 95,'Bob': 75,'Tracy': 85}
print('d[\'Michael\'] =', d['Michael'])
print('d[\'Bob\'] =', d['Bob'])
print('d[\'Tracy\'] =', d['Tracy'])
print('d.get(\'Thomas\', -1) =', d.get('Thomas', -1))
|
import json
from terminaltables import SingleTable
import os
import io
sensors_dictionary = {}
def parse_json(received_bytes):
# Convert received bytes to a string
json_string = received_bytes.decode("utf-8")
# Use json library to convert the json string into a python dictionary
sensor_dictionary = json.loads(json_string)
# Create a tuple from the dictionary to gain order
sensor_values = (sensor_dictionary["Art der Messung"], sensor_dictionary["Messwert"], sensor_dictionary["Einheit"],
sensor_dictionary["Raum"], sensor_dictionary["Sensor ID"], sensor_dictionary["Sensor Sub ID"],
sensor_dictionary["Sensor"], sensor_dictionary["Zeitstempel"])
write_to_file(sensor_values)
# Create another dictionary that holds the sensor data as a tuple
# key: tuple of sensor id and sub id
# value: sensor data as tuple
sensor_id = sensor_dictionary["Sensor ID"]
sensor_sub_id = sensor_dictionary["Sensor Sub ID"]
sensors_dictionary[(sensor_id, sensor_sub_id)] = sensor_values
draw()
def draw():
table_data = [('Messung', 'Wert', 'Einheit', 'Raum', 'SensorID', 'SubID', 'Bezeichnung', 'Zeitstempel')]
for e in sensors_dictionary:
table_data.append(sensors_dictionary[e])
table_instance = SingleTable(table_data, "IoT Sensors")
table_instance.justify_columns[8] = 'left'
os.system('cls' if os.name == 'nt' else 'clear')
print(table_instance.table, end='\r')
def write_to_file(sensor_tuple):
file = open('log.txt', 'a')
file.write(str(sensor_tuple) + ';')
|
# Napisz algorytm, który na wejście przyjmie listę zawierającą tylko stringi i integery.
# Zwróci listą zawierającą tylko powtarzające się wartości, nie zmieniając ich kolejności.
# Przykład: [1, 2, 3, 1, 3] 1 i 3 nie są unikalne, więc wynikiem będzie [1, 3, 1, 3].
def notunique(my_list):
unique_list = []
nonunique_list = []
for item in my_list:
if my_list.count(item) == 1:
unique_list.append(item)
else:
nonunique_list.append(item)
return nonunique_list
print(notunique([1, 2, 3, 1, 3, 2, "w", "kkk", 1, 17, "kkk"]))
|
secrets = {
'ssid' : '[Wifi SSID]',
'password' : '[Wifi Password]',
'public_key' : '[Bayou Public Key]',
'private_key' : '[Bayou Private Key',
'base_url': 'http://192.168.1.163:5000/data/'
} |
'''
Created on 6 Jul 2015
@author: leo
'''
from fractions import Fraction
class F(Fraction):
'''
Classe que representa um numero fracionario, extendendo fractions.Fraction
'''
def __init__(self,n,m=Fraction(0)):
self.fraction = Fraction(n)
self.m = Fraction(m)
def __repr__(self):
"""repr(self)"""
return str(float(self.fraction)) if self.m == 0 else str(float(self.fraction)) + ' + (' + str(float(self.m)) + '*M)'
def __str__(self):
"""str(self)"""
return str(float(self.fraction)) if self.m == 0 else str(float(self.fraction)) + ' + (' + str(float(self.m)) + '*M)'
def __eq__(self, f):
"""a == b"""
if type(f) is not type(self):
f = F(f)
return self.fraction.__eq__(f.fraction) and self.m.__eq__(f.m)
def __add__(self, f):
"""a + b"""
if type(f) is not type(self):
f = F(f)
return F(self.fraction.__add__(f.fraction),self.m.__add__(f.m))
def ___sub__(self, f):
"""a - b"""
if type(f) is not type(self):
f = F(f)
return F(self.fraction.__sub__(f.fraction),self.m.___sub__(f.m))
def __mul__(self, f):
"""a * b"""
if type(f) is not type(self):
f = F(f)
if f.m == 0:
return F(self.fraction.__mul__(f.fraction))
else:
return F(self.fraction.__mul__(f.fraction),self.m.__mul__(f.m))
def __div__(self, f):
"""a / b"""
if type(f) is not type(self):
f = F(f)
if f.m == 0:
return F(self.fraction.__div__(f.fraction))
else:
return F(self.fraction.__div__(f.fraction),self.m.__div__(f.m))
def __lt__(self, f):
"""a < b"""
if type(f) is not type(self):
f = F(f)
if self.m == f.m:
return self.fraction.__lt__(f.fraction)
else:
return self.m.__lt__(f.m)
def __gt__(self, f):
"""a > b"""
if type(f) is not type(self):
f = F(f)
if self.m == f.m:
return self.fraction.__gt__(f.fraction)
else:
return self.m.__gt__(f.m)
def __le__(self, f):
"""a <= b"""
if type(f) is not type(self):
f = F(f)
if self.m == f.m:
return self.fraction.__le__(f.fraction)
else:
return self.m.__le__(f.m)
def __ge__(self, f):
"""a >= b"""
if type(f) is not type(self):
f = F(f)
if self.m == f.m:
return self.fraction.__ge__(f.fraction)
else:
return self.m.__ge__(f.m) |
import os
import numpy as numpy
import pandas as pd
from StringIO import StringIO
class DataHandler:
def __init__(self):
print "starting to load data"
data = pd.read_csv(os.path.realpath('Data/train.csv'), delimiter=",").values
self.trainLabels=data[:,0]
self.trainFeatures=data[:,1:]
data = pd.read_csv(os.path.realpath('Data/test.csv'), delimiter=",").values
self.test = data[:,:]
print "finished loading data"
|
import exlotto
def output(m, n):
nums = exlotto.lotto(m, n)
snums = sorted(nums)
return snums
#跑一萬次出現最多的數字六個
if __name__ == '__main__':
import operator
lilotto = list()
dictlotto = {}
for i in range(10000):
#將新的list加入
lilotto.extend(output(48,6))
#統計每個數字出現的次數
myset = set(lilotto)
for item in myset:
dictlotto[item] = lilotto.count(item)
#使用operator 排序dict的value
sorted_x = sorted(dictlotto.items(), key=operator.itemgetter(1))
print(sorted_x)
|
list = []
def add_item(name):
list.append(name)
def remove_item(name):
list.remove(name)
def print_list():
print(list)
def print_inst():
print("1. print Instruction\n2. Add Item\n3. Remove Item\n4.Quit\n5.Print List")
print("1. print Instruction\n2. Add Item\n3. Remove Item\n4.Quit\n5.Print List")
while True:
choice = int(input("Enter Your Choice: "))
if choice == 1:
print_inst()
continue
elif choice == 2:
new_name = input("Enter a Name to add: ")
add_item(new_name)
print(new_name, " is added to the list")
elif choice == 3:
del_name = input("Enter a Item to remove: ")
old_name = del_name
remove_item(del_name)
print(old_name, " is deleted")
elif choice == 4:
break
elif choice == 5:
print_list()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
class TestB(unittest.TestCase):
def setUp(self):
print('test TestB start')
def test_1(self):
print('test TestB test_1')
def test_cc(self):
print('test TestB test_cc')
def test_aa(self):
print('test TestB test_aa')
def tearDown(self):
print('test TestB end')
class TestAdd(unittest.TestCase):
def setUp(self):
print('test TestAdd start')
def test_bb(self):
print('test TestAdd test_bb')
def tearDown(self):
print('test TestAdd end')
if __name__ == '__main__':
unittest.main()
# (C:\Users\Han\Anaconda3) C:\Users\Han\selenium_test\ch07>python caltest_unittest_test_sequence.py
# test TestAdd start
# test TestAdd test_bb
# test TestAdd end
# .test TestB start
# test TestB test_1
# test TestB end
# .test TestB start
# test TestB test_aa
# test TestB end
# .test TestB start
# test TestB test_cc
# test TestB end
# .
# ----------------------------------------------------------------------
# Ran 4 tests in 0.005s
# OK
# Order of tests in python unittest
# https://stackoverflow.com/questions/30286268/order-of-tests-in-python-unittest
# Option 1.
# One solution to this (as a workaround) was given here - which suggests writing the tests in numbered methods step1, step2, etc., then collecting and storing them via dir(self) and yielding them to one test_ method which trys each.
# Not ideal but does what you expect. Each test sequence has to be a single TestClass (or adapt the method given there to have more than one sequence generating method).
# Option 2.
# Another solution, also in the linked question, is you name your tests alphabetically+numerically sorted so that they will execute in that order.
# But in both cases, write monolithic tests, each in their own Test Class.
# P.S. I agree with all the comments that say unit testing shouldn't be done this way; but there are situations where unit test frameworks (like unittest and pytest) get used to do integration tests which need modular independent steps to be useful. Also, if QA can't influence Dev to write modular code, these kinds of things have to be done.
# https://stackoverflow.com/a/5387956/1431750 |
import os
bash_scrapy = 'scrapy crawl jobs -o test.json'
def main():
os.system(bash_scrapy)
if __name__ == '__main__':
main() |
# 클래스 기반 메소드 심화
class Car:
"""
Car class
Author: jinsuSang
Date: 2021.07.17
"""
# 클래스 변수
# del 사용시 클래스 변수에 접근하는 것은 좋지 못하다고 생각함
# 클래스 변수는 read only 형식을 사용하는 것을 개인적으로 추천
__price_per_raise = 1.0
def __init__(self, company, details):
self.__company = company
self.__details = details
self.__secret = 'secret'
def __repr__(self):
return 'repr: {} {}'.format(self.company, self.details)
# static method, class method 차이
# https://hamait.tistory.com/635
# 클래스 메소드
@classmethod
def set_per(cls, per):
if per < 1:
print('greater than or equal to 1')
return
cls.__price_per_raise = per
print('change per')
@staticmethod
def is_tesla(inst):
if inst.__company == 'tesla':
return True
else:
return False
@property
def company(self):
return self.__company
@property
def details(self):
return self.__details
# 인스턴스 메소드
# self: 객체의 고유한 속성값
def instance_details(self):
print('instance id: {}'.format(id(self)))
print('company = {}, price = {}'.format(self.company, self.details.get('price')))
def get_price(self):
return self.details.get('price') * Car.__price_per_raise
car1 = Car('kia', {'color': 'White', 'horsepower': 400, 'price': 4000})
car2 = Car('tesla', {'color': 'Red', 'horsepower': 600, 'price': 14000})
car1.instance_details()
car2.instance_details()
print(car1.get_price())
Car.set_per(5)
print(car1.get_price())
print(car1.is_tesla(car1))
print(Car.is_tesla(car1))
|
# -*- coding: utf-8 -*-
"""
Create Second Defect on DAGM Images
@author: josemiguelarrieta
"""
#Load libraries
import os
import cv2
os.chdir('Documents/SIVA')
from utils_dagm import load_image_dagm, load_labels_dagm,write_labels_defectA,rectangle_expanded_roi
from utils_dagm import write_labels_expROI, defect_B_rect_ROI, write_labels_defectB, ellipse_inside_rect
from utils_dagm import add_salt,add_blur,add_defect_B
from utils_dagm import save_image_defect
path = '/Users/josemiguelarrieta/Dropbox/11_Semestre/Jovenes_Investigadores/images/optical2/Class'
cl_number = 5 #Class number
num = 28
cv2.destroyAllWindows()
###############################################
#Add Defect B to Image with already defect A. #
###############################################
defect = 'AB' #Number of defects
for num in range (1,20):
#Load Image and Labels [Ground Truth]
image = load_image_dagm(path,num,cl_number)
gt = load_labels_dagm(path,cl_number,num)
#cv2.ellipse(image,(gt['x_position_center'],gt['y_position_center']),(gt['semi_major_ax'],gt['semi_minor_ax']),gt['rotation_angle'],0,360,(0,255,0),2) #Draw Ellipse [Ground Truth]
cv2.imshow('image'+str(num),image)
cv2.destroyAllWindows()
write_labels_defectA(cl_number,num,gt) #Write labels defectA [Ground Truth]
x1, y1, x2, y2, x,y = rectangle_expanded_roi(gt) #Dimentions of expanded rectangle
cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2) #Draw Rectangle
write_labels_expROI(cl_number,num,x1,y1,x2,y2,defect=defect) #Write dimentions expanded rectangle [ROI]
d1, d2, d3, d4 = defect_B_rect_ROI(x1, x, y1, y2) #Second defect rectangle dimentions
#cv2.rectangle(image,(d1,d2),(d3,d4),(0,255,0),2) #Draw Rectangle defect B
write_labels_defectB(cl_number,num,d1,d2,d3,d4,defect=defect) #Write labels defect B [Ground Truth]
c11, c22, A, B = ellipse_inside_rect(x1,y1,x,d4) #Dimentions of Ellipse insed rectangle for defect B.
#cv2.ellipse(image, (c11,c22), (B/2,A/2),0,0,360,(0,255,0),1) #Draw an Ellipse Below.
image_salted = add_salt(image,cl_number) #Added salt to Original image
blured = add_blur(image_salted,cl_number) #Blured image salted
image_res = add_defect_B(c11, c22, A, B, blured, image) #Image with Final Defect B
cv2.imshow('image'+str(num),image_res)
#save_image_defect(defect,num,cl_number,image)
#filename_with_defect = str(num) + '_'+defect+'.png'
#cv2.imwrite('/Users/josemiguelarrieta/Dropbox/11_Semestre/Jovenes_Investigadores/'+filename_with_defect, image)
print num
##########################################
#Add Defect B to Image without Defect A. #
##########################################
defect = 'B' #Number of defects
for num in range (1,15):
image = load_image_dagm(path,num,cl_number,defect='') #Path de sin defectos
gt = load_labels_dagm(path,cl_number,num) #Path de con defectos A
x1, y1, x2, y2, x,y = rectangle_expanded_roi(gt) #Dimentions of expanded rectangle
write_labels_expROI(cl_number,num,x1,y1,x2,y2,defect = defect) #Write dimentions expanded rectangle [ROI]
d1, d2, d3, d4 = defect_B_rect_ROI(x1, x, y1, y2) #Second defect rectangle dimentions
write_labels_defectB(cl_number,num,d1,d2,d3,d4,defect = defect) #Write labels defect B [Ground Truth]
c11, c22, A, B = ellipse_inside_rect(x1,y1,x,d4) #Dimentions of Ellipse insed rectangle for defect B.
image_salted = add_salt(image,cl_number) #Added salt to Original image
blured = add_blur(image_salted,cl_number) #Blured image salted
image_res = add_defect_B(c11, c22, A, B, blured, image) #Image with Final Defect B
save_image_defect(defect,num,cl_number,image)
print num |
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
if __name__ == '__main__':
from vnf_metrics import VNFMetrics
else:
from nfvmaddpg.model.gan.utils.vnf_metrics import VNFMetrics
def topos2grid_image(topos, vnf_decoder):
# topos = [e if e is not None else Chem.RWMol() for e in topos]
subset_color = [
"antiquewhite",
"blanchedalmond",
"coral",
"dodgerblue",
"firebrick",
"gainsboro",
"honeydew",
"indigo",
"khaki",
"limegreen",
"mistyrose",
"navy",
"orchid",
"plum",
"rosybrown",
"silver",
"teal"
]
for num, topo in enumerate(topos):
if topo is None:
continue
# nx.draw(topo, with_labels=True, font_weight='bold')
labels = dict()
for node in list(topo.nodes):
labels[node] = [vnf_decoder[index]
for index, usage in enumerate(topo.nodes[node]['usagelist']) if usage != 0]
if labels[node] == []:
labels[node] = ''
else:
labels[node] = ' '.join(labels[node])
elarge = [(u, v)
for (u, v, d) in topo.edges(data=True) if d["edgeDatarate"] > 0.5]
esmall = [(u, v) for (u, v, d) in topo.edges(
data=True) if d["edgeDatarate"] <= 0.5]
pos = nx.spring_layout(topo) # positions for all nodes
# nodes
colorindex = np.random.randint(17)
color = [subset_color[colorindex]
for v, data in topo.nodes(data=True)]
# node_size = 0.5 + np.random.random(len(topo.nodes)) / 2 * 1000
nx.draw_networkx_nodes(
topo, pos, node_color=color, node_size=500)
# edges
nx.draw_networkx_edges(topo, pos, edgelist=elarge,
width=3, edge_color="black")
nx.draw_networkx_edges(
topo, pos, edgelist=esmall, width=1.5, alpha=0.5, edge_color="black", style="dashed"
)
# labels
nx.draw_networkx_labels(topo, pos, labels=labels, font_size=10,
font_family="sans-serif")
# nx.draw_networkx_labels(topo, pos=pos, labels=labels)
plt.axis("off")
plt.savefig('pics/topo_' + str(num) + '.jpg')
plt.show()
# return Draw.MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=(150, 150))
# def classification_report(data, model, session, sample=False):
# _, _, _, a, x, _, f, _, _ = data.next_validation_batch()
# n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
# model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
# model.node_features: f, model.training: False,
# model.variational: False})
# n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
# y_true = e.flatten()
# y_pred = a.flatten()
# target_names = [str(Chem.rdchem.BondType.values[int(e)]) for e in data.bond_decoder_m.values()]
# print('######## Classification Report ########\n')
# print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
# target_names=target_names))
# print('######## Confusion Matrix ########\n')
# print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
# y_true = n.flatten()
# y_pred = x.flatten()
# target_names = [Chem.Atom(e).GetSymbol() for e in data.atom_decoder_m.values()]
# print('######## Classification Report ########\n')
# print(sk_classification_report(y_true, y_pred, labels=list(range(len(target_names))),
# target_names=target_names))
# print('\n######## Confusion Matrix ########\n')
# print(confusion_matrix(y_true, y_pred, labels=list(range(len(target_names)))))
# def reconstructions(data, model, session, batch_dim=10, sample=False):
# m0, _, _, a, x, _, f, _, _ = data.next_train_batch(batch_dim)
# n, e = session.run([model.nodes_gumbel_argmax, model.edges_gumbel_argmax] if sample else [
# model.nodes_argmax, model.edges_argmax], feed_dict={model.edges_labels: a, model.nodes_labels: x,
# model.node_features: f, model.training: False,
# model.variational: False})
# n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
# m1 = np.array([e if e is not None else Chem.RWMol() for e in [data.matrices2mol(n_, e_, strict=True)
# for n_, e_ in zip(n, e)]])
# mols = np.vstack((m0, m1)).T.flatten()
# return mols
def samples(data, model, session, embeddings, sample=False):
n, e = session.run([model.nodes_gumbel_softmax, model.edges_logits] if sample else [
model.nodes_gumbel_softmax, model.edges_logits], feed_dict={
model.embeddings: embeddings, model.training: False})
# n, e = np.argmax(n, axis=-1), np.argmax(e, axis=-1)
n, e = standardization(n), standardization(e)
mols = [data.matrices2topo(n_, e_, strict=True) for n_, e_ in zip(n, e)]
return mols
def all_scores(topos, data, norm=False, reconstruction=False):
m0 = {k: list(filter(lambda e: e is not None, v)) for k, v in {
'typenum score': VNFMetrics.vnftype_num(topos, data.vnf_decoder)}.items()}
# m1 = {'valid score': MolecularMetrics.valid_total_score(mols) * 100,
# 'unique score': MolecularMetrics.unique_total_score(mols) * 100,
# 'novel score': MolecularMetrics.novel_total_score(mols, data) * 100}
return m0
def standardization(data):
mu = np.min(data, axis=-1)
sigma = np.max(data, axis=-1)
return (data - mu[:, :, np.newaxis]) / (sigma[:, :, np.newaxis] - mu[:, :, np.newaxis] + 3.14e-8)
def standardization2(data):
mu = np.min(data, axis=-1)
sigma = np.max(data, axis=-1)
return (data - mu[:, np.newaxis]) / (sigma[:, np.newaxis] - mu[:, np.newaxis] + 3.14e-8)
|
import pandas as pd
import time
import calendar
from datetime import datetime, timedelta
# Filenames (Files are in same folder as the python script)
chicago = 'chicago.csv'
new_york_city = 'new_york_city.csv'
washington = 'washington.csv'
def get_city():
'''Asks the user for a city and returns the filename for that city's bike share data.
Args:
None.
Returns:
(str) Filename for a city's bikeshare data.
'''
city = input('\nHello! Let\'s explore some US bikeshare data!\n'
'Would you like to see data for Chicago, New York, or Washington?\n')
city = city.lower()
if city == 'chicago':
return pd.read_csv(chicago)
elif city == 'new york':
return pd.read_csv(new_york_city)
elif city == 'washington':
return pd.read_csv(washington)
else:
print('Please choose a value from options')
return get_city()
def get_time_period():
'''Asks the user for a time period and returns the specified filter.
Args:
None.
Returns:
(str) Time period to filter data by.
'''
time_period = input('\nWould you like to filter the data by "month", "day", "both" or not at'
' all? Type "none" for no time filter.\n')
time_period = time_period.lower()
if time_period == 'month' or time_period == 'day' or time_period == 'both' or time_period == 'none':
return time_period
else:
print('Please choose a value from options')
return get_time_period()
def get_month():
'''Asks the user for a month and returns the specified month.
Args:
None.
Returns:
(str) Specified month value selected by user
'''
month_ = input(
'\nWhich month? January, February, March, April, May, or June?\n')
if month_.lower() in months_list:
return month_.lower()
else:
print('Please choose a value from options')
return get_month()
def get_day(month):
'''Asks the user for a day and returns the specified day.
Args:
(str) Month
Returns:
(str) Specified day value selected by user
'''
year = 2017
day = input('\nWhich day? Please type your response as an integer.\n')
try:
int_day = int(day)
if int_day > 0 and int_day <= calendar.monthrange(year, month)[1]:
return int_day
else:
print('Please choose a valid day value for the specified month')
return get_day(month)
except:
print('Please enter an integer day value')
return get_day(month)
def get_weekday():
'''Asks the user for a weekday(Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday)
and returns the specified day.
Args:
None
Returns:
(str) Specified weekday value selected by user
'''
week_day = input(
'\nWhich weekday? Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday?\n')
if week_day.lower() in day_list:
return day_list.index(week_day)
else:
print('Please choose a valid weekday from the values specified above')
return get_weekday()
def popular_month(city_file):
'''Asks the user for a day and returns the specified day.
Args:
city_file
Returns:
(str) The most popular month for start time
'''
each_month_start_date_count = city_file['Start Time'].dt.month.value_counts(
)
return months_list[each_month_start_date_count.index[0]-1]
def popular_day(city_file):
'''This method finds the most popular day of the week for start time
Args:
city_file
Returns:
(str) The most popular day of week for start time
'''
popular_start_day_count = city_file['Start Time'].dt.weekday.value_counts()
popular_start_day = day_list[popular_start_day_count.index[0]]
return popular_start_day
def popular_hour(city_file):
'''This method finds the most popular hour of the day for the start time
Args:
city_file
Returns:
(str) The most popular hour of day for start time
'''
return city_file['Start Time'].dt.hour.value_counts().index[0]
def trip_duration(city_file):
'''This method calculate the total trip duration and mean trip duration
Args:
city_file
Returns:
None
'''
total_trip_duration = datetime(
1, 1, 1) + timedelta(seconds=int(city_file['Trip Duration'].sum()))
mean_trip_duration = city_file['Trip Duration'].mean()
print("Total trip duration is : %d years" %
(total_trip_duration.year-1), end=',')
print(" %d months" % (total_trip_duration.month-1), end=',')
print(" %d days" % (total_trip_duration.day-1), end=',')
print(" %d hours" % (total_trip_duration.hour), end=',')
print(" %d minutes" % (total_trip_duration.minute), end=',')
print(" %d seconds" % (total_trip_duration.second), end='\n\n')
print("Mean trip duration is : %.4f seconds" %
(mean_trip_duration))
def popular_stations(city_file):
'''This method finds out the most popular start and end stations
Args:
city_file
Returns:
(tuple) The most popular start station and most popular end station
'''
frequent_start_station = city_file['Start Station'].value_counts(
).index[0]
frequent_end_station = city_file['End Station'].value_counts().index[0]
return frequent_start_station, frequent_end_station
def popular_trip(city_file):
'''This method prints out the most popular trip for start and end station
Args:
city_file
Returns:
None
'''
trip = city_file.groupby(['Start Station', 'End Station'], as_index=False)[
'Trip Duration'].count().nlargest(1, 'Trip Duration')
print("The most popular trip is from '{}' to '{}'".format(
trip.iloc[0, 0], trip.iloc[0, 1]))
def users(city_file):
'''This method print out the counts of each user type
Args:
city_file
Returns:
None
'''
user_type_count_df = city_file['User Type'].value_counts()
print('The counts of each user type are given below:')
for index, row in user_type_count_df.iteritems():
print('{} : {}'.format(index, row))
def gender(city_file):
'''This method prints the total counts of different gender
Args:
city_file
Returns:
None
'''
if 'Gender' in city_file.columns:
gender_count_df = city_file['Gender'].value_counts()
print('The count of each gender is given below:')
for index, row in gender_count_df.iteritems():
print('{} : {}'.format(index, row))
else:
print('This file doesn\'t contains gender data')
def birth_years(city_file):
'''This function prints the earliest (i.e. oldest user), most recent (i.e. youngest user),
and most popular birth years.
Args:
city_file
Returns:
None
'''
if 'Birth Year' in city_file.columns:
birth_year = city_file.sort_values('Birth Year', ascending=True)[
'Birth Year'].dropna()
oldest_year = birth_year.iloc[0]
youngest_year = birth_year.iloc[-1]
popular_birth_year = birth_year.value_counts().index[0]
print("The most oldest birth year is %d, the most youngest birth year is %d and the popular birth year is %d" % (
oldest_year, youngest_year, popular_birth_year))
else:
print('This file doesn\'t contains birth year data')
def display_data(city_file):
'''Displays five lines of data if the user specifies that they would like to.
After displaying five lines, ask the user if they would like to see five more,
continuing asking until they say stop.
Args:
city_file
Returns:
None
'''
display = input('\nWould you like to view individual trip data?'
'Type \'yes\' or \'no\'.\n')
if display.lower() == 'yes':
index = 0
while index+5 <= len(city_file):
print(city_file[index: index+5])
index += 5
choice = input(
'\nWould you like to see 5 more rows?\nType \'no\' to stop.\n')
if choice.lower() == 'no':
break
def statistics():
'''Calculates and prints out the descriptive statistics about a city and time period
specified by the user via raw input.
Args:
None
Returns:
None
'''
# Filter by city (Chicago, New York, Washington)
city_df = get_city()
convert_datetime(city_df)
# Filter by time period (month, day, none)
time_period = get_time_period()
#########################################################
# #
# This logic will filter the data set on the basis #
# of time period selected by the user i.e. 'month', #
# 'day' or 'both'. Filtering only required once now. #
# #
#########################################################
if time_period == 'month':
m = get_month()
selected_month = months_list.index(m) + 1
city_df = city_df[city_df['Start Time'].dt.month == selected_month]
elif time_period == 'day':
week_day = get_weekday()
city_df = city_df[city_df['Start Time'].dt.weekday == week_day]
elif time_period == 'both':
m = get_month()
selected_month = months_list.index(m) + 1
selected_day = get_day(selected_month)
city_df = city_df[city_df['Start Time'].dt.month == selected_month]
city_df = city_df[city_df['Start Time'].dt.day == selected_day]
print('Calculating the first statistic...')
# What is the most popular month for start time?
if time_period == 'none':
start_time = time.time()
print("The most popular month for start time is {}.".format(
popular_month(city_df)).title())
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
# What is the most popular day of week (Monday, Tuesday, etc.) for start time?
if time_period == 'none' or time_period == 'month':
start_time = time.time()
print("The most popular day for start time is {}.".format(
popular_day(city_df)).title())
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What is the most popular hour of day for start time?
print("The popular hour of the day for start time is {}th hour.".format(
popular_hour(city_df)))
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What is the total trip duration and average trip duration?
trip_duration(city_df)
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What is the most popular start station and most popular end station?
freq_start_station, freq_end_station = popular_stations(
city_df)
print("The most popular start station is {} and the most popular end station is {}.".format(
freq_start_station, freq_end_station))
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What is the most popular trip?
popular_trip(city_df)
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What are the counts of each user type?
users(city_df)
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What are the counts of gender?
gender(city_df)
print("That took %s seconds." % (time.time() - start_time))
print("Calculating the next statistic...")
start_time = time.time()
# What are the earliest (i.e. oldest user), most recent (i.e. youngest user), and
# most popular birth years?
birth_years(city_df)
print("That took %s seconds." % (time.time() - start_time))
# Display five lines of data at a time if user specifies that they would like to
display_data(city_df)
# Restart?
restart = input('\nWould you like to restart? Type \'yes\' or \'no\'.\n')
if restart.lower() == 'yes':
statistics()
def convert_datetime(city_file):
''' This method will convert the Start Time and End Time columns from string to datetime
Args:
city_file
Return:
None
'''
city_file['Start Time'] = pd.to_datetime(city_file['Start Time'])
city_file['End Time'] = pd.to_datetime(city_file['End Time'])
if __name__ == "__main__":
day_list = ['monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday']
months_list = ['january', 'february', 'march', 'april', 'may', 'june']
statistics()
|
# 平均拆單 N張 拆M次
import sys
def order_split(number, count, max_split):
"""
input: number 量
input: count 次
output: array
"""
print(f"input: number:{number} count: {count}")
ret = []
# sum = 0 # 前序
sum = count / 2 # 中序
# sum = count - 1 # 後序
nn = 0
for i in range(count):
sum += number
# print(f"sum1 {sum}")
while (sum >= count):
sum -= count
# print(f"sum2 {sum}")
nn += 1
if (nn > max_split):
# 超過最大分拆值
return []
ret.append(nn)
nn = 0
return ret
if __name__ == "__main__":
ret = order_split(1000, 2, 499)
sum = 0
index = 0
for i in ret:
index += 1
print(f"{index}. num: {i} sum: {sum}")
sum += i
print(f"sum: {sum}")
sys.exit(0)
|
# Generated by Django 2.0.6 on 2019-12-10 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='is_staff',
field=models.BooleanField(default=True, help_text='决定着用户是否可登录管理后台', verbose_name='staff status'),
),
migrations.AlterField(
model_name='bigcourse',
name='study_num',
field=models.PositiveIntegerField(default=75),
),
migrations.AlterField(
model_name='course',
name='study_num',
field=models.PositiveIntegerField(default=70),
),
]
|
import json
import pymysql
import redis
import schedule
from pymongo import MongoClient
from utils.wirte_logs import Logger
from config import environments
r_2 = redis.StrictRedis(host='192.168.1.180', port=30378)
env_dict = environments.get('dev')
client = MongoClient(env_dict.get('mongodb_host'), env_dict.get('mongodb_port'))
db = client['v3_monitoring']
def anchor_timing_run():
res = db['anchors'].find()
for anchor_info in res:
anchor_id = anchor_info.get('_id')
anchor_name = anchor_info.get('name')
state = anchor_info.get('state')
if not state:
continue
Logger('logs/check_anchor.log', level='info').logger.info(f'(schedele_task.py):{anchor_name}该主播任务推送成功!')
v = {
'type': 'operate_broadcaster',
'id': anchor_id
}
r_2.lpush('operate_broadcaster:tasks', json.dumps(v))
def live_timing_run():
pass
schedule.every(30).minutes.do(anchor_timing_run)
while True:
schedule.run_pending()
|
def plot_var_contours_with_distance(df, mask, var, dist=100, bins=5, wd=12, ht=5, varmin=33, varmax=35, nlevs=10,
colorunit=' ', save=False, savename="Untitled.png",
zbin=10, xbin=10, zmin=0, nmin=0):
zlowest = df.loc[mask, 'DEPTH'].min()
depth_bins = np.arange(zlowest, 0+zbin, zbin)
dist_bins = np.arange(0, dist+xbin, xbin)
var_binned = np.zeros((len(dist_bins), len(depth_bins)))
dist_binned_group = df.loc[mask].groupby(pd.cut(df.loc[mask].DIST_GLINE, dist_bins))
var_mean = np.zeros((len(dist_bins), len(depth_bins)-1))
var_count = np.zeros((len(dist_bins), len(depth_bins)-1))
var_sd = np.zeros((len(dist_bins), len(depth_bins)-1))
i = 0
for groupList, xGroup in dist_binned_group:
zGroup = xGroup.groupby(pd.cut(xGroup.DEPTH, depth_bins))
var_mean[i] = zGroup[var].mean().values
var_count[i] = zGroup[var].count().values
var_sd[i] = zGroup[var].std().values
i += 1
var_mean = ma.masked_array(var_mean)
var_mean = ma.masked_less(var_count, nmin)
fig, ax = plt.subplots(1,2, figsize=(wd, ht))
#fig.subplots_adjust(hspace=1.3)
X, Y = np.meshgrid(dist_bins[:], depth_bins[:-1])
levels = np.linspace(varmin, varmax, nlevs)
CF = ax[0].contourf(X.T[:-1, :], Y.T[:-1, :], var_mean[:-1, :], levels)
ax[0].set_ylabel('Depth (m)')
ax[0].set_xlabel('Distance from grounding line (km)')
if(zmin != 0):
ax[0].set_ylim(zmin, 0)
else:
ax[0].set_ylim(zlowest, 0)
cbar1 = fig.colorbar(CF, ax=ax[0])
cbar1.set_label(colorunit)
conf_int = 1.96*var_sd/np.sqrt(var_count)
conf_int[np.isnan(conf_int)] = 100000.0
levels2 = np.linspace(0,1, 10)
print(np.max(conf_int[~np.isnan(conf_int)]))
CF2 = ax[1].contourf(X.T[:-1, :], Y.T[:-1, :], conf_int[:-1, :], levels2)
ax[1].set_xlabel('Distance from grounding line (km)')
if(zmin != 0):
ax[1].set_ylim(zmin, 0)
cbar2 = fig.colorbar(CF2, ax=ax[1])
cbar2.set_label('Error in sample mean')
if(save== True):
plt.savefig(savename)
plt.show()
def plot_dist_quarters_region(df, mask_region, varname='CTEMP', varunit='$\\theta^o$C', varmin=-2,
varmax=1.6, zmin=0, savename='Untitled', nlevs=10, dist=160):
months_quarters = np.array([[12,1,2], [3,4,5], [6,7,8], [9,10,11]])
mth_qt_names = ['DJF', 'MAM', 'JJA', 'SON']
for i in range(4):
mask_months = sel_months(df, months_quarters[i])
mask = get_mask_from_prof_mask(df, (mask_region & mask_months ) )
plot_var_contours_with_distance(df, mask=mask, var=varname, dist=dist,
colorunit=varunit, save=True,
savename=savename+mth_qt_names[i]+'.png',
wd=14, ht=5,
zmin=zmin, varmin=varmin, varmax=varmax, nlevs=nlevs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.