text stringlengths 38 1.54M |
|---|
n=int(input())
for i in range(1,n+n+1,2):
ls=[]
for j in range(i):
ls.append(1)
print(*ls)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 11:37:04 2021
@author: Maxi
"""
# Problem 1
"""
Assume s is a string of lower case characters.
Write a program that counts up the number of vowels contained in the string s. Valid vowels are: 'a', 'e', 'i', 'o', and 'u'. For example, if s = 'azcbobobegghakl', your program should print:
"""
numVowels = 0
s = 'asdsagfvjdcsa'
for char in range(0,len(s)):
if s[char] == 'a' or s[char] == 'e' or s[char] == 'i' or s[char] == 'o' or s[char] == 'u':
numVowels += 1
else:
numVowels += 0
print("Number of vowels: ",numVowels)
# Problem 2
"""
Assume s is a string of lower case characters.
Write a program that prints the number of times the string 'bob' occurs in s. For example, if s = 'azcbobobegghakl', then your program should print
"""
s = 'azcbobobegghaklbobob'
n=0
for i in range(1,len(s)-1):
if s[i-1] == 'b' and s[i] == 'o' and s[i+1] == 'b':
n += 1
else: n+=0
print('Number of times bob occurs is: ' + str(n))
# Problem 3
"""
Assume s is a string of lower case characters.
Write a program that prints the longest substring of s in which the letters occur in alphabetical order. For example, if s = 'azcbobobegghakl', then your program should print
Longest substring in alphabetical order is: beggh
In the case of ties, print the first substring. For example, if s = 'abcbcd', then your program should print
Longest substring in alphabetical order is: abc
"""
s = 'azcd'
now = ""
ans = ""
for n in range(len(s)):
if n+1 < len(s):
if s[n] <= s[n+1]:
now = now + s[n:n+1]
else:
now = now + s[n]
if len(ans) < len(now):
ans = now
now = ""
else:
now = ""
elif n+1 == len(s):
if s[n] > s[n-1]:
now = now + s[n]
if len(now) > len(ans):
ans = now
print("Longest substring in alphabetical order is:",ans)
else:
print("Longest substring in alphabetical order is:",ans) |
from tkinter import *
from tkinter import ttk
root_window = Tk()
root_window.geometry('+550+350')
root_window.title('Temperature converter')
root_window.columnconfigure(0, weight=1)
root_window.rowconfigure(0, weight=1)
root_window.resizable(False, False)
celsius_value = StringVar()
fahrenheit_value = StringVar()
frame = ttk.Frame(root_window, padding=(10,10,10,10))
frame.grid(column=0, row=0, sticky='EWSN')
frame.columnconfigure(0, weight=1)
# frame.rowconfigure(0, weight=1)
def clear_all_entries():
c_entry.delete(0, END)
f_entry.delete(0, END)
c_label = ttk.Label(frame, text='Celsius:', font=('Helvetica Neue', 15))
c_entry = ttk.Entry(frame, textvariable=celsius_value)
c_entry.focus()
c_label.grid(column=0, row=0, sticky='W')
c_entry.grid(column=1, row=0, sticky='EW', padx=10, pady=5)
f_label_name = ttk.Label(frame, text='Fahrenheit:', font=('Helvetica Neue', 15))
f_entry = ttk.Entry(frame, textvariable=fahrenheit_value)
f_label_name.grid(column=0, row=1, sticky='W')
f_entry.grid(column=1, row=1, sticky='EW', padx=10, pady=5)
def converter(*args):
if c_entry:
try:
celsius = float(celsius_value.get())
fahrenheit = celsius * 9/5 + 32
fahrenheit_value.set(str(round(fahrenheit, 1)))
except ValueError:
pass
if f_entry:
try:
fahrenheit = float(fahrenheit_value.get())
celsius = (fahrenheit - 32) * 5/9
celsius_value.set(str(round(celsius, 1)))
except ValueError:
pass
convert_button = ttk.Button(frame, text='Convert', command=converter)
convert_button.grid(column=0, row=2, columnspan=2, pady=5)
clear_button = ttk.Button(frame, text='Clear all', command=clear_all_entries)
clear_button.grid(column=0, row=3, columnspan=2, pady=5)
root_window.bind('<Return>', converter)
s = ttk.Style()
s.theme_use('aqua')
root_window.mainloop() |
############################################################################
# Copyright 2018 Anthony Ma & Stanford University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
from collections import defaultdict
from itertools import product
from vmd import *
from .contact_utils import *
__all__ = ['compute_hydrogen_bonds']
WATER_SHELL_RAD = 3.5 # Experimentally determined to be a low value that doesn't preclude any bridges
int_pattern = re.compile(r'\d+')
def compute_hydrogen_bonds(molid, frame, index_to_atom, sele1, sele2,
sele1_atoms, sele2_atoms, geom_criteria):
"""
Compute hydrogen bonds involving protein for a single frame of simulation
Parameters
----------
molid: int
Specifies which trajectory fragment in VMD to perform computations upon
frame: int
Specify frame index with respect to the smaller trajectory fragment
index_to_atom: dict
Maps VMD atom index to Atom
sele1: string, default = None
Compute contacts on subset of atom selection based on VMD query
sele2: string, default = None
If second VMD query is specified, then compute contacts between atom selection 1 and 2
sele1_atoms: set
Indices of atoms in selection 1
sele2_atoms: set
Indices of atoms in selection 2
geom_criteria: dict
Container for geometric criteria
Return
------
list of tuples, [(frame_idx, atom1_label, atom2_label, itype), ...]
"""
cutoff_distance = geom_criteria['HBOND_CUTOFF_DISTANCE']
cutoff_angle = geom_criteria['HBOND_CUTOFF_ANGLE']
res_diff = geom_criteria['HBOND_RES_DIFF']
if sele1 == sele2:
evaltcl("set selunion [atomselect %s \"(%s) and not (carbon or sulfur or solv)\" frame %s]" %
(molid, sele1, frame))
evaltcl("set shell [atomselect %s \"(solv) and within %s of (%s)\" frame %s]" %
(molid, WATER_SHELL_RAD, sele1, frame))
else:
evaltcl("set selunion [atomselect %s \"((%s) or (%s)) and not (carbon or sulfur or solv)\" frame %s]" %
(molid, sele1, sele2, frame))
evaltcl("set shell [atomselect %s \"(solv) and within %s of ((%s) or (%s))\" frame %s]" %
(molid, WATER_SHELL_RAD, sele1, sele2, frame))
ligand_indices = get_selection_indices(molid, frame, "ligand") # TODO: This can be sped up by not fetching selection indices at every frame
solvent_atoms = get_selection_indices(molid, frame, "solv") # TODO: This can be sped up by not fetching selection indices at every frame
sel_sel = extract_donor_acceptor(evaltcl("measure hbonds %s %s $selunion" % (cutoff_distance, cutoff_angle)))
sel_sel = [(d, a) for (d, a) in sel_sel if filter_dual_selection(sele1_atoms, sele2_atoms, d, a)]
sel_solv = set([])
sel_solv |= extract_donor_acceptor(evaltcl("measure hbonds %s %s $selunion $shell" % (cutoff_distance, cutoff_angle)))
sel_solv |= extract_donor_acceptor(evaltcl("measure hbonds %s %s $shell $selunion" % (cutoff_distance, cutoff_angle)))
solv_solv = extract_donor_acceptor(evaltcl("measure hbonds %s %s $shell" % (cutoff_distance, cutoff_angle)))
evaltcl("$selunion delete")
evaltcl("$shell delete")
hbonds = []
# Stratify hbonds within sele1 and sele2
for d_idx, a_idx in sel_sel:
d_atom = index_to_atom[d_idx]
a_atom = index_to_atom[a_idx]
# Filter away local interactions
if d_atom.chain == a_atom.chain and abs(d_atom.resid - a_atom.resid) < res_diff:
continue
# if a_atom.resname in solvent_resn or d_atom.resname in solvent_resn:
# continue
d_bb = d_atom.is_bb()
a_bb = a_atom.is_bb()
d_lig = d_atom.index in ligand_indices # d_atom.resname in ligand_resn
a_lig = a_atom.index in ligand_indices # a_atom.resname in ligand_resn
if d_lig and a_lig:
hb_type = "hbll"
elif d_lig:
if a_bb:
hb_type = "hblb"
else:
hb_type = "hbls"
elif a_lig:
if d_bb:
hb_type = "hblb"
else:
hb_type = "hbls"
elif d_bb and a_bb:
hb_type = "hbbb"
elif not d_bb and not a_bb:
hb_type = "hbss"
else:
hb_type = "hbsb"
hbonds.append([frame, hb_type, d_atom.get_label(), a_atom.get_label()])
# Build dictionary for water bridges
water_dict = defaultdict(set) # Associates water ids to lists of neighbors
for d_idx, a_idx in sel_solv | solv_solv:
d_atom = index_to_atom[d_idx]
a_atom = index_to_atom[a_idx]
# Filter away local interactions
if d_atom.chain == a_atom.chain and d_atom.resid == a_atom.resid:
continue
d_solv = d_idx in solvent_atoms # d_atom.resname in solvent_resn
a_solv = a_idx in solvent_atoms # a_atom.resname in solvent_resn
if d_solv:
water_dict[d_idx].add(a_idx)
if a_solv:
water_dict[a_idx].add(d_idx)
# Iterate over waters with hbonds and stratify water mediated interactions
for w_idx in water_dict:
w_atom = index_to_atom[w_idx]
# Iterate over all pairs of neighbors to w_atom
for n1, n2 in product(water_dict[w_idx], repeat=2):
n1_atom = index_to_atom[n1]
n2_atom = index_to_atom[n2]
n1_solv = n1 in solvent_atoms # n1_atom.resname in solvent_resn
n2_solv = n2 in solvent_atoms # n2_atom.resname in solvent_resn
# If n1 and n2 are both waters, ignore the pair
if n1_solv and n2_solv:
continue
if n1_solv:
n1, n2 = n2, n1
n1_solv, n2_solv = n2_solv, n1_solv
n1_atom, n2_atom = n2_atom, n1_atom
n1_lig = n1_atom.index in ligand_indices # n1_atom.resname in ligand_resn
# If n1 is not water but n2 is, check for extended water-bridges between n1 and neighbors of n2
if n2_solv:
# Disregard n2 if index is lower than w_atom to avoid double-counting wb2
if n2 <= w_idx:
continue
for n2_n in water_dict[n2]:
n2_n_atom = index_to_atom[n2_n]
# if n2_n_atom.resname not in solvent_resn:
if n2_n not in solvent_atoms:
# This neighbor of n2 is not a water, so check for extended bridge from n1
if not filter_dual_selection(sele1_atoms, sele2_atoms, n1, n2_n):
continue
if n1_atom.chain == n2_n_atom.chain and abs(n1_atom.resid - n2_n_atom.resid) < res_diff:
continue
n2_n_lig = n2_n_atom.index in ligand_indices # n2_n_atom.resname in ligand_resn
if n1_lig or n2_n_lig:
hb_type = "lwb2"
else:
hb_type = "wb2"
hbonds.append([frame, hb_type, n1_atom.get_label(), n2_n_atom.get_label(), w_atom.get_label(), n2_atom.get_label()])
else:
# Both n1 and n2 are non solvent, so check for a regular water-bridge
if not filter_dual_selection(sele1_atoms, sele2_atoms, n1, n2):
continue
if n1_atom.chain == n2_atom.chain and abs(n1_atom.resid - n2_atom.resid) < res_diff:
continue
n2_lig = n2_atom.index in ligand_indices # n2_atom.resname in ligand_resn
if n1_lig or n2_lig:
hb_type = "lwb"
else:
hb_type = "wb"
hbonds.append([frame, hb_type, n1_atom.get_label(), n2_atom.get_label(), w_atom.get_label()])
return hbonds
def extract_donor_acceptor(hbond_string):
"""
Extract donors and acceptors from a vmd hbond output
Parameters
----------
hbond_string: str
The output of vmds `measure hbonds` command; three lists indicating indices of donors, acceptors and hydrogens
respectively. For example: "{106 91} {91 55} {107 92}"
Returns
-------
set of (int, int)
Set of donor and acceptor indices. For example: `set([(106, 91), (91, 55)])`
"""
atom_indices = [int(index) for index in int_pattern.findall(hbond_string)]
third = len(atom_indices) // 3
return set(zip(atom_indices[0:third], atom_indices[third:third*2]))
def filter_dual_selection(sele1_atoms, sele2_atoms, idx1, idx2):
"""
Filter interactions between selection 1 and selection 2
Parameters
----------
sele1_atoms: list
List of atom label strings for all atoms in selection 1
sele2_atoms: list
List of atom label strings for all atoms in selection 2
idx1: int
Atom index for cation
idx2: int
Atom index for aromatic atom
Returns
-------
bool
True if interaction should be included
"""
return ((idx1 in sele1_atoms) and (idx2 in sele2_atoms)) or ((idx1 in sele2_atoms) and (idx2 in sele1_atoms))
|
import csv
import zipfile
from io import TextIOWrapper
from tempfile import TemporaryDirectory
from typing import List, Optional
from uuid import UUID
from .base import HQBase
from .interviews import InterviewsApi
from .models import AssignmentWebLink, QuestionnaireDocument
class QuestionnairesApi(HQBase):
""" Set of functions to access information on Questionnaires. """
_apiprefix = "/api/v1/questionnaires"
def get_list(self, fields: Optional[List[str]] = None, questionnaire_id: Optional[str] = None,
version: Optional[int] = None, skip: int = 0, take: Optional[int] = None):
if not fields:
fields = [
"id",
"questionnaire_id",
"version",
"title",
"variable",
]
# we always have workspace parameter
q_args = {
"workspace": self.workspace
}
if questionnaire_id:
q_args["id"] = questionnaire_id
if version:
q_args["version"] = version
op = self._graphql_query_operation('questionnaires', q_args)
op.questionnaires.nodes.__fields__(*fields)
yield from self._get_full_list(op, 'questionnaires', skip=skip, take=take)
def statuses(self):
return self._make_call(method="get", path=f"{self.url}/statuses")
def document(self, id: UUID, version: int) -> QuestionnaireDocument:
return QuestionnaireDocument.parse_obj(
self._make_call(method="get", path=f"{self.url}/{id}/{version}/document"))
def interviews(self, id: UUID, version: int):
api = InterviewsApi(client=self._hq)
return api.get_list(questionnaire_id=id, questionnaire_version=version)
def update_recordaudio(self, id: UUID, version: int, enabled: bool):
_ = self._make_call(method="post",
path=f"{self.url}/{id}/{version}/recordAudio",
json={"Enabled": enabled})
def download_web_links(self, id: UUID, version: int, path: Optional[str] = None):
"""Download links for the assignments in Web Mode.
:param id: questionnaire id
:param version: questionnaire version
:param path: optionally specify the download location
if `path` is specified, zip archive will be downloaded to the location.
Otherwise, list of ``AssignmentWebLink`` objects will be returned
"""
common_args = {
"method": "get",
"path": f"{self._hq.baseurl}/{self.workspace}/api/LinksExport/Download/{id}${version}",
"stream": True,
"use_login_session": True,
}
if path:
return self._make_call(**common_args, filepath=path)
with TemporaryDirectory() as tempdir:
outfile = self._make_call(**common_args, filepath=tempdir)
with zipfile.ZipFile(outfile, "r") as zip_ref:
with zip_ref.open("interviews.tab") as infile:
data = csv.DictReader(TextIOWrapper(infile, 'utf-8'), delimiter="\t")
return [AssignmentWebLink.parse_obj(row) for row in data]
|
def coroutine_decorator(func):
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start
def natural_numbers(*targets):
try:
number = int(input("Enter any natural numer > 1:"))
if number < 1:
print("Please enter a number greater than one")
return natural_numbers(*targets)
for num in range(1, number):
targets[0].send(num)
targets[1].send(num)
except ValueError:
print("Wrong input, enter valid integer")
return natural_numbers(*targets)
@coroutine_decorator
def check_number_kind(next_target):
"""Sorts numbers into even, odd, odd-prime and even-prime"""
while True:
number = (yield)
if number < 2 and number % 2 != 0:
next_target.send(f"{number} is an odd number")
elif number % 2 == 0 and number == 2:
next_target.send(f"{number} is an even and a prime number ")
elif number % 2 == 0:
next_target.send(f"{number} is an even number")
else:
for num in range(2, number):
if number % num != 0:
next_target.send(f"{number} is an odd and a prime number")
break
@coroutine_decorator
def output_number_kind():
"""Outputs values from check_number_kind function"""
try:
while True:
line = (yield)
print(line)
except GeneratorExit:
print("Done. Goodbye")
@coroutine_decorator
def sum_numbers(next_target):
"""Calculates sum of numbers in a range"""
summation = 0
while True:
number = (yield)
summation = summation + number
next_target.send(summation)
@coroutine_decorator
def output_sum():
"""Outputs sum of numbers from sum_number function"""
try:
while True:
line = (yield)
print({'summation': line})
except GeneratorExit:
print("Done. Goodbye")
if __name__ == "__main__":
natural_numbers(check_number_kind(output_number_kind()),
sum_numbers(output_sum())
) |
#Embedded file name: eve/client/script/ui/station/fitting\fittingLayout.py
from eve.client.script.ui.control.themeColored import SpriteThemeColored
import uicls
import carbonui.const as uiconst
import trinity
import uiprimitives
import uicontrols
class FittingLayout(uiprimitives.Container):
__guid__ = 'uicls.FittingLayout'
default_name = 'fittingbase'
default_width = 640
default_height = 640
default_align = uiconst.CENTERLEFT
default_state = uiconst.UI_HIDDEN
def ApplyAttributes(self, attributes):
uiprimitives.Container.ApplyAttributes(self, attributes)
overlay = uiprimitives.Sprite(parent=self, name='overlay', align=uiconst.TOALL, state=uiconst.UI_DISABLED, texturePath='res:/UI/Texture/classes/Fitting/fittingbase_overlay.png', color=(1.0, 1.0, 1.0, 0.39))
self.sr.calibrationStatusPoly = uicls.Polygon(parent=self, name='calibrationStatusPoly', align=uiconst.CENTER, spriteEffect=trinity.TR2_SFX_FILL, blendMode=trinity.TR2_SBM_ADD)
self.sr.powergridStatusPoly = uicls.Polygon(parent=self, name='powergridStatusPoly', align=uiconst.CENTER, spriteEffect=trinity.TR2_SFX_FILL, blendMode=trinity.TR2_SBM_ADD)
self.sr.cpuStatusPoly = uicls.Polygon(parent=self, name='cpuStatusPoly', align=uiconst.CENTER, spriteEffect=trinity.TR2_SFX_FILL, blendMode=trinity.TR2_SBM_ADD)
baseDOT = uiprimitives.Sprite(parent=self, name='baseDOT', align=uiconst.TOALL, state=uiconst.UI_DISABLED, texturePath='res:/UI/Texture/classes/Fitting/fittingbase_dotproduct.png', spriteEffect=trinity.TR2_SFX_DOT, blendMode=trinity.TR2_SBM_ADD)
self.sr.baseColor = SpriteThemeColored(parent=self, name='baseColor', align=uiconst.TOALL, state=uiconst.UI_DISABLED, texturePath='res:/UI/Texture/classes/Fitting/fittingbase_basecircle.png', colorType=uiconst.COLORTYPE_UIBASE)
self.sr.baseShape = uiprimitives.Sprite(parent=self, name='baseShape', align=uiconst.TOALL, state=uiconst.UI_DISABLED, texturePath='res:/UI/Texture/classes/Fitting/fittingbase.png', color=(0.0, 0.0, 0.0, 0.86))
class FittingSlotLayout(uiprimitives.Transform):
__guid__ = 'uicls.FittingSlotLayout'
default_name = 'fittingSlot'
default_left = (256,)
default_width = 44
default_height = 54
default_align = uiconst.TOPLEFT
default_state = uiconst.UI_NORMAL
def ApplyAttributes(self, attributes):
uiprimitives.Transform.ApplyAttributes(self, attributes)
groupParent = uiprimitives.Transform(parent=self, name='groupParent', pos=(-10, 14, 16, 16), align=uiconst.CENTER, state=uiconst.UI_PICKCHILDREN)
groupMark = uicontrols.Icon(parent=groupParent, name='groupMark', pos=(0, 0, 20, 20), align=uiconst.TOPLEFT)
iconParent = uiprimitives.Transform(parent=self, name='iconParent', align=uiconst.TOPLEFT, state=uiconst.UI_DISABLED, pos=(0,
0,
self.width,
self.height))
icon = uicontrols.Icon(parent=iconParent, name='icon', pos=(8, 8, 8, 8), align=uiconst.TOALL, state=uiconst.UI_NORMAL)
underlay = uicontrols.Icon(parent=self, name='underlay', align=uiconst.TOALL, state=uiconst.UI_DISABLED, padding=(-10, -5, -10, -5), icon='ui_81_64_1', filter=True)
chargeIndicator = uicontrols.Icon(parent=self, name='chargeIndicator', padTop=-2, height=7, align=uiconst.TOTOP, state=uiconst.UI_HIDDEN, icon='ui_81_64_2', ignoreSize=True)
chargeIndicator.rectWidth = 44
chargeIndicator.rectHeight = 7
self.sr.underlay = underlay
self.sr.chargeIndicator = chargeIndicator
self.sr.flagIcon = icon
self.sr.groupMark = groupMark
|
from flask import Flask, jsonify, g
from flask_login import LoginManager
import os
from flask_cors import CORS
import models
from resources.users import users
from resources.profiles import profiles
from resources.jobposts import jobposts
from resources.companies import companies
from resources.jobapplications import jobapplications
PORT=8000
DEBUG=True
app = Flask(__name__)
app.secret_key = "os.environ.get('SECRET_KEY')"
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
try:
print("loading the following user")
user = models.User.get_by_id(user_id)
return user
except models.DoesNotExist:
return None
## unauthorized handler, called after login_required call and
## no user is logged in
@login_manager.unauthorized_handler
def unauthorized():
return jsonify(
data={
'error': 'User not logged in'
},
message="You must be logged in",
status=401
), 401
## CORS implementation
CORS(users, origins=['http://localhost:3000', 'https://untapped-society.herokuapp.com'], supports_credentials=True)
CORS(profiles, origins=['http://localhost:3000', 'https://untapped-society.herokuapp.com'], supports_credentials=True)
CORS(jobposts, origins=['http://localhost:3000', 'https://untapped-society.herokuapp.com'], supports_credentials=True)
CORS(companies, origins=['http://localhost:3000', 'https://untapped-society.herokuapp.com'], supports_credentials=True)
CORS(jobapplications, origins=['http://localhost:3000', "https://untapped-society.herokuapp.com"], supports_credentials=True)
app.register_blueprint(users, url_prefix='/api/v1/users')
app.register_blueprint(profiles, url_prefix='/api/v1/profiles')
app.register_blueprint(jobposts, url_prefix='/api/v1/jobposts')
app.register_blueprint(companies, url_prefix='/api/v1/companies')
app.register_blueprint(jobapplications, url_prefix='/api/v1/jobapplications')
@app.before_request
def before_request():
g.db = models.DATABASE
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/')
def hello():
return 'Server running'
if 'ON_HEROKU' in os.environ:
print('\non heroku!')
models.initialize()
if __name__ == '__main__':
models.initialize()
app.run(debug=DEBUG, port=PORT) |
from collections import namedtuple
Data = namedtuple('Data', ['station', 'timestamp', 'value'])
DataComparison = namedtuple('DataComparison', ['timestamp', 'pg_value', 'mongo_value'])
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Contact_types(models.Model):
type = models.CharField(verbose_name='Тип контакта', max_length=30)
def __unicode__(self):
return self.type
class Meta:
verbose_name = 'Тип контакта'
verbose_name_plural = 'Типы контактов'
class Stages(models.Model):
name = models.CharField(verbose_name='Название', max_length=127)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Стадия сделки'
verbose_name_plural = 'Справочник стадий сделок'
class Product_groups(models.Model):
name = models.CharField(verbose_name='Название', max_length=127)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Группа продуктов'
verbose_name_plural = 'Группы продуктов'
class Products(models.Model):
product_group = models.ForeignKey(Product_groups, verbose_name='Группа продукта', blank=True, null=True)
name = models.CharField(verbose_name='Название', max_length=127)
price = models.IntegerField(verbose_name='Стоимость')
date_create = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Продукт'
verbose_name_plural = 'Продукты'
class Customers(models.Model):
name = models.CharField(verbose_name='Имя', max_length=191)
date_create = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Покупатель'
verbose_name_plural = 'Покупатели'
class Contacts(models.Model):
user = models.ForeignKey(Customers, verbose_name='Пользователь')
type = models.ForeignKey(Contact_types, verbose_name='Тип контакта')
contact = models.CharField(verbose_name='Контакт', max_length=191)
date_create = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')
def __unicode__(self):
return self.user.name
class Meta:
verbose_name = 'Контакт'
verbose_name_plural = 'Контакты'
class Deal_stages(models.Model):
stage = models.ForeignKey(Stages, verbose_name='Стадия')
date_open = models.DateTimeField(auto_now_add=True, verbose_name='Дата открытия')
date_close = models.DateTimeField(verbose_name='Дата закрытия', blank=True, null=True)
comments = models.TextField(verbose_name='Комментарий', max_length=2000, blank=True, null=True)
def __unicode__(self):
return self.stage.name
class Meta:
verbose_name = 'Стадия сделки'
verbose_name_plural = 'Стадии сделок'
class Sources(models.Model):
name = models.CharField(verbose_name='Исчтоник', max_length=127)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Источник сделки'
verbose_name_plural = 'Источники сделок'
class Deals(models.Model):
customer = models.ForeignKey(Customers, verbose_name='Покупатель')
manager = models.ForeignKey(User, verbose_name='Менеджер')
product = models.ForeignKey(Products, verbose_name='Продукт')
deal_stage = models.ForeignKey(Deal_stages, verbose_name='Стадия сделки')
date_open = models.DateTimeField(auto_now_add=True, verbose_name='Дата открытия')
date_close = models.DateTimeField(verbose_name='Дата закрытия', blank=True, null=True)
comments = models.TextField(verbose_name='Комментарий', max_length=2000, blank=True, null=True)
source = models.ForeignKey(Sources, verbose_name='Источник сделки', blank=True, null=True)
class Meta:
verbose_name = 'Сделка'
verbose_name_plural = 'Сделки' |
import os
import pandas as pd
import numpy as np
import math
from scipy.stats import gmean
from fuzzywuzzy import fuzz
pd.options.display.max_seq_items = 2000
pd.options.display.max_colwidth = 1000
x =pd.read_csv('/home/mint/my-data/WebScrapingPhase2/Data/test/_main_.csv',encoding = "latin_1")
#x = pd.read_csv('/home/mint/my-data/Web scraped CPI/Data/data_imputed_20160504.csv',encoding="latin_1")
x=x[x['item_price_num']>0]
#%%
x1 = x[x["ons_item_no"]==212722]
def _removeNonAscii(s): return "".join(i for i in s if ord(i)<128)
x1["product_name"]=x1["product_name"].apply(lambda s: _removeNonAscii(s))
#%%
x["idvar"]=x["product_name"]+" "+x["store"]
x = x[["ons_item_no","product_name","store","monthday","item_price_num","idvar"]]
fulldata = x
#%%
dates14 = pd.DataFrame()
dates14["date"] = [20140610,20140715, 20140812, 20140919, 20141014, 20141111, 20141209, 20150113]
dates15 = pd.DataFrame()
dates15["date"] = [20150113, 20150217, 20150317, 20150414, 20150512, 20150609, 20150714, 20150811, 20150908, 20151013, 20151110,20151215, 20160112]
dates16 = pd.DataFrame()
dates16["date"] = [20160112, 20160216]
fx14=fulldata[fulldata.monthday.isin(dates14["date"])]
fx15=fulldata[fulldata.monthday.isin(dates15["date"])]
fx16=fulldata[fulldata.monthday.isin(dates16["date"])]
#%%
def compreplace(data,sda,i):
row=data[data["Missing"]=="Yes"][i-1:i]
if row.empty:
data =data[["product_name","ons_item_no","idvar","store","monthday","item_price_num","Missing"]]
return data.sort_values("monthday")
else:
sda = sda[sda["ons_item_no"]==int(row["ons_item_no"])]
k1 = sda[sda["monthday"] .isin(row["monthday"])]
k1 = k1[k1["store"] .isin(row["store"])]
d = k1.apply(lambda x: fuzz.ratio(x['product_name'], row['product_name']), axis=1)
d = d[d >= 60]
if len(d) != 0:
v = k1.ix[d.idxmax(), ['store','monthday','item_price_num','product_name','idvar','ons_item_no']].values
v= pd.Series(v, index=['store', 'monthday','item_price_num','product_name','idvar','ons_item_no'])
v= pd.DataFrame([{"product_name":v["product_name"],"store":v["store"],"ons_item_no":v["ons_item_no"],"idvar":v["idvar"],"monthday":v["monthday"],"item_price_num":v["item_price_num"],"Missing":"Replaced"}])
data = data[data["monthday"] != int(v["monthday"])]
data =data[["product_name","store","monthday","item_price_num","Missing","ons_item_no","idvar"]]
data = data.append(v).sort_values("monthday")
else:
data =data[["product_name","store","monthday","item_price_num","Missing","ons_item_no","idvar"]]
data = data.sort_values("monthday")
row2 = data[data["monthday"]==int(row["monthday"])]["idvar"].item()
sdb = sda[sda["idvar"]==str(row2)]
sdb["Missing"]= "Replaced"
data2 = pd.concat([data.loc[data['item_price_num'].isnull()],sdb])
data3 = pd.concat([data.loc[data['item_price_num'].notnull()],data2.drop_duplicates(subset='monthday',keep='last')])
data3 = data3.drop_duplicates(subset='monthday',keep='first')
return data3.sort_values("monthday")
def runfuzz(data,fx2):
for i in range(1,6):
the1 = compreplace(data,fx2,i)
the2 = compreplace(the1,fx2,i)
the3 = compreplace(the2,fx2,i)
the4= compreplace(the3,fx2,i)
the5 = compreplace(the4,fx2,i)
the6 = compreplace(the5,fx2,i)
return the6
def LCPI(x1,fx2, dates):
dates["dateplus"]= dates["date"].apply(lambda s: int(s)+1)
dates["datemin"]= dates["date"].apply(lambda s: int(s)-1)
x2=x1[x1.monthday.isin(dates["date"])]
if x2.empty:
print("empty")
return None
xplus=x1[x1.monthday.isin(dates["dateplus"])]
xmin=x1[x1.monthday.isin(dates["datemin"])]
#add missing column saying no
x2["Missing"] = "No"
#mydata2["std_price_origin"] = mydata2["std_price"]
xplus["Missing"] = "Plus replace"
xmin["Missing"] = "Min replace"
xplus["monthday"]=xplus["monthday"]-1
xmin["monthday"]=xmin["monthday"]+1
bplus=x2.append(xplus,ignore_index = True)
bplus2= bplus.drop_duplicates(subset='monthday', take_last=False)
bmin=bplus2.append(xmin,ignore_index = True)
x3 = bmin.drop_duplicates(subset='monthday', take_last=False).sort_values("monthday")
del dates["dateplus"]
del dates["datemin"]
table = pd.merge(x3, dates, how='outer', left_on='monthday', right_on='date')
del table["monthday"]
table["monthday"] = table["date"]
del table["date"]
table["ons_item_no"] = table["ons_item_no"][table["ons_item_no"].notnull()].iloc[0]
table["idvar"] = table["idvar"][table["idvar"].notnull()].iloc[0]
table["product_name"] = table["product_name"][table["product_name"].notnull()].iloc[0]
table["idvar"] = table["idvar"][table["idvar"].notnull()].iloc[0]
table["store"] = table["store"][table["store"].notnull()].iloc[0]
table["Missing"] = table["Missing"].apply(lambda x: "Yes" if pd.isnull(x) else x)
data =table.sort_values("monthday")
take =runfuzz(data,fx2)
take2 =runfuzz(take,fx2)
take3 =runfuzz(take2,fx2)
take4 =runfuzz(take3,fx2)
take4=take4.fillna(method="pad",limit=1)
print(table["idvar"])
return take4
#%%
a14=[]
a14.append(x.groupby('idvar').apply(lambda L: LCPI(L,fx14, dates14)))
Results14 = np.concatenate(a14, axis=0) # axis = 1 would append things as new columns
results14=pd.DataFrame(Results14)
results14.columns=["i", "ons_item_number", "period", "unit"]
results14.to_csv("/home/mint/my-data/WebScrapingPhase2/Data/test/LCPI14.csv")
a15=[]
a15.append(x.groupby('idvar').apply(lambda L: LCPI(L,fx15, dates15)))
Results15 = np.concatenate(a15, axis=0) # axis = 1 would append things as new columns
results15=pd.DataFrame(Results15)
results15.columns=["i", "ons_item_number", "period", "unit"]
results15.to_csv("/home/mint/my-data/WebScrapingPhase2/Data/test/LCPI15.csv")
a16=[]
a16.append(x.groupby('idvar').apply(lambda L: LCPI(L,fx16, dates16)))
Results16 = np.concatenate(a16, axis=0) # axis = 1 would append things as new columns
results16=pd.DataFrame(Results16)
results16.columns=["i", "ons_item_number", "period", "unit"]
results16.to_csv("/home/mint/my-data/WebScrapingPhase2/Data/test/LCPI16.csv") |
p=int(input("enter the p value"))
t=int(input("enter the t value"))
r=int(input("enter the r value"))
si=(p*t*r)/100
print("simple intrest",si) |
import torch
from collections import OrderedDict, Counter
from functools import partial
from dataclasses import dataclass
from typing import Tuple, List
from copy import deepcopy
from math import prod
class StatsCounter:
def __init__(self):
self._dict = {}
def update(self, key, v=None):
if key not in self._dict:
self._dict[key] = 1
else:
self._dict[key] += v if v is not None else 1
def __iadd__(self, other):
if isinstance(other, StatsCounter):
for k, v in other.items():
self.update(k, v)
else:
raise TypeError("Can only add other StatsCounters to other StatsCounters")
return self
def __getitem__(self, key):
return self._dict[key]
def __str__(self):
return self._dict.__str__()
def __repr__(self):
return self._dict.__repr__()
def items(self):
return self._dict.items()
@dataclass
class LayerDimensions:
kernel_size: Tuple[int, int]
stride: Tuple[int, int]
padding: Tuple[int, int]
input_size: List[int]
output_size: List[int]
class ModelStatCollector:
def __init__(self):
self.model_stats = OrderedDict()
self.hooks = []
def __get_next_conv_layers(self, model):
for name, module in model.named_modules():
if isinstance(module, torch.nn.modules.conv.Conv2d):
yield (name, module)
def __extract_stats(self, name, module, input, output):
self.model_stats[name] = LayerDimensions(
module.kernel_size,
module.stride,
module.padding,
input_size=list(input[0].size()),
output_size=list(output[0].size()),
)
def __attach_collection_hooks_to_model(self, model):
for name, conv_layer in self.__get_next_conv_layers(model):
layer_collector = partial(self.__extract_stats, name)
self.hooks.append(conv_layer.register_forward_hook(layer_collector))
def __detach_stats_collection_hooks(self):
for hook in self.hooks:
hook.remove()
def __reset(self):
self.model_stats = {}
self.hooks = []
@classmethod
def collect_stats_from_model(cls, model, input_batch):
collector = cls()
collector.__attach_collection_hooks_to_model(model)
model.eval()
with torch.no_grad():
model(input_batch)
collector.__detach_stats_collection_hooks()
collected_stats = deepcopy(collector.model_stats)
collector.__reset()
return collected_stats
class ModelStatAnalyser:
@classmethod
def get_kernel_stats(cls, model_stats):
kernel_size_counter = StatsCounter()
stride_counter_dict = {}
for layer in model_stats.values():
kernel_size = layer.kernel_size
stride = layer.stride
kernel_size_counter.update(f"{kernel_size[0]}")
if f"{kernel_size[0]}" not in stride_counter_dict:
stride_counter_dict[f"{kernel_size[0]}"] = StatsCounter()
stride_counter_dict[f"{kernel_size[0]}"].update(f"{stride[0]}")
return kernel_size_counter
@classmethod
def operation_analysis(cls, model):
for name, module in model.named_modules():
print(name)
@classmethod
def get_intermediate_layer_sizes(cls, model_stats):
intermediate_layer_sizes = [
(prod(layer.input_size), layer.input_size) for layer in model_stats.values()
]
intermediate_layer_sizes.append(
(
prod(list(model_stats.values())[-1].output_size),
list(model_stats.values())[-1].output_size,
)
)
return intermediate_layer_sizes
@classmethod
def get_intermediate_layer_size_bounds(cls, model_stats):
intermediate_layer_sizes = cls.get_intermediate_layer_sizes(model_stats)
max_layer_size = max(intermediate_layer_sizes, key=lambda elem: elem[0])
min_layer_size = min(intermediate_layer_sizes, key=lambda elem: elem[0])
return (max_layer_size, min_layer_size)
@classmethod
def get_ub_input_size(cls, model_stats):
return max([prod(layer.kernel_size[0]) for layer in model_stats.values()])
@classmethod
def get_in_channel_stats(cls, model_stats):
in_channel_dict = {}
for layer in model_stats.values():
kernel_size = layer.kernel_size
if f"{kernel_size[0]}" not in in_channel_dict:
in_channel_dict[f"{kernel_size[0]}"] = {}
if f"{layer.input_size[1]}" not in in_channel_dict[f"{kernel_size[0]}"]:
in_channel_dict[f"{kernel_size[0]}"][f"{layer.input_size[1]}"] = 0
in_channel_dict[f"{kernel_size[0]}"][f"{layer.input_size[1]}"] += 1
return in_channel_dict
@classmethod
def get_filter_stats(cls, model_stats):
out_channel_dict = {}
for layer in model_stats.values():
kernel_size = layer.kernel_size
if f"{kernel_size[0]}" not in out_channel_dict:
out_channel_dict[f"{kernel_size[0]}"] = {}
if f"{layer.output_size[0]}" not in out_channel_dict[f"{kernel_size[0]}"]:
out_channel_dict[f"{kernel_size[0]}"][f"{layer.output_size[0]}"] = 0
out_channel_dict[f"{kernel_size[0]}"][f"{layer.output_size[0]}"] += 1
return out_channel_dict
@classmethod
def get_stride_stats(cls, model_stats):
stride_counter_dict = {}
for layer in model_stats.values():
kernel_size = layer.kernel_size
stride = layer.stride
if f"{kernel_size[0]}" not in stride_counter_dict:
stride_counter_dict[f"{kernel_size[0]}"] = StatsCounter()
stride_counter_dict[f"{kernel_size[0]}"].update(f"{stride[0]}")
return stride_counter_dict
@classmethod
def get_models_stats_dict(cls, model_dict, input_batch, ssd_input_batch=None):
stats_dict = {}
raw_stats_dict = {}
if torch.cuda.is_available():
input_batch = input_batch.to("cuda")
if ssd_input_batch is not None:
ssd_input_batch = ssd_input_batch.to("cuda")
for model_name, model in model_dict.items():
model.to("cuda")
if model_name == "ssd" and ssd_input_batch is not None:
model_stats = ModelStatCollector.collect_stats_from_model(
model, ssd_input_batch
)
else:
model_stats = ModelStatCollector.collect_stats_from_model(
model, input_batch
)
model.to("cpu")
stats_dict[model_name] = {
"kernel": ModelStatAnalyser.get_kernel_stats(model_stats),
"stride": ModelStatAnalyser.get_stride_stats(model_stats),
"in_channel": ModelStatAnalyser.get_in_channel_stats(model_stats),
"filters": ModelStatAnalyser.get_filter_stats(model_stats),
"intermediate_layer_sizes": ModelStatAnalyser.get_intermediate_layer_sizes(
model_stats
),
"intermediate_layer_bounds": ModelStatAnalyser.get_intermediate_layer_size_bounds(
model_stats
),
"raw_stats": model_stats,
}
return stats_dict
class ModelStatsAggregator:
@classmethod
def get_aggregate_kernel_stats_as_percentages(cls, stats_dict):
aggregate_kernel_stats = StatsCounter()
for model, stats in stats_dict.items():
aggregate_kernel_stats += stats["kernel"]
ksize, counts = zip(
*[(ksize, count) for ksize, count in aggregate_kernel_stats.items()]
)
total_kernels = sum(counts)
aggregate_kernel_stats_percentages = {
ksize: counts / total_kernels for ksize, counts in zip(ksize, counts)
}
return aggregate_kernel_stats_percentages
@classmethod
def get_aggregate_stride_stats_per_kernel(cls, stats_dict):
aggregate_stride_stats = {}
for model, stats in stats_dict.items():
for kernel, counter in stats["stride"].items():
if kernel not in aggregate_stride_stats:
aggregate_stride_stats[kernel] = StatsCounter()
aggregate_stride_stats[kernel] += counter
return aggregate_stride_stats
@classmethod
def get_stride_stats_per_kernel_as_percentages(cls, stats_dict):
aggregate_stride_stats = cls.get_aggregate_stride_stats_per_kernel(stats_dict)
aggregate_stride_stats_percentages = {}
for ksize, stride_counter in aggregate_stride_stats.items():
total_kernels = sum(stride_counter.values())
aggregate_stride_stats_percentages[ksize] = {
stride: count / total_kernels
for stride, count in dict(stride_counter).items()
}
return aggregate_stride_stats_percentages
@classmethod
def get_aggregate_in_channel_stats_per_kernel(cls, stats_dict):
aggregate_in_channel_stats = {}
for _, stats in stats_dict.items():
for kernel, channel_dict in stats["in_channel"].items():
if kernel not in aggregate_in_channel_stats:
aggregate_in_channel_stats[kernel] = {}
for channel, count in channel_dict.items():
if channel not in aggregate_in_channel_stats[kernel]:
aggregate_in_channel_stats[kernel][channel] = 0
aggregate_in_channel_stats[kernel][channel] += count
for kernel, channel_dict in aggregate_in_channel_stats.items():
channel_dict = {
k: v
for k, v in sorted(
channel_dict.items(), key=lambda item: item[1], reverse=True
)
}
aggregate_in_channel_stats[kernel] = channel_dict
return aggregate_in_channel_stats
@classmethod
def get_aggregate_in_channel_stats(cls, stats_dict):
aggregate_in_channel_stats = cls.get_aggregate_in_channel_stats_per_kernel(
stats_dict
)
cross_kernel_aggregate_channel = {}
for channel_dicts in aggregate_in_channel_stats.values():
for channel_size, count in channel_dicts.items():
if channel_size not in cross_kernel_aggregate_channel:
cross_kernel_aggregate_channel[channel_size] = 0
cross_kernel_aggregate_channel[channel_size] += count
cross_kernel_aggregate_channel = {
k: v
for k, v in sorted(
cross_kernel_aggregate_channel.items(),
key=lambda item: item[1],
reverse=True,
)
}
return cross_kernel_aggregate_channel
@classmethod
def get_aggregate_filter_stats_per_kernel(cls, stats_dict):
aggregate_filter_stats = {}
for model, stats in stats_dict.items():
for kernel, channel_dict in stats["filters"].items():
if kernel not in aggregate_filter_stats:
aggregate_filter_stats[kernel] = {}
for channel, count in channel_dict.items():
if channel not in aggregate_filter_stats[kernel]:
aggregate_filter_stats[kernel][channel] = 0
aggregate_filter_stats[kernel][channel] += count
for kernel, channel_dict in aggregate_filter_stats.items():
channel_dict = {
k: v
for k, v in sorted(
channel_dict.items(), key=lambda item: item[1], reverse=True
)
}
aggregate_filter_stats[kernel] = channel_dict
return aggregate_filter_stats
@classmethod
def get_aggregate_filter_stats(cls, stats_dict):
aggregate_filter_stats = cls.get_aggregate_filter_stats_per_kernel(stats_dict)
cross_kernel_aggregate_channel = {}
for filter_dicts in aggregate_filter_stats.values():
for filter_size, count in filter_dicts.items():
if filter_size not in cross_kernel_aggregate_channel:
cross_kernel_aggregate_channel[filter_size] = 0
cross_kernel_aggregate_channel[filter_size] += count
cross_kernel_aggregate_filters = {
k: v
for k, v in sorted(
cross_kernel_aggregate_channel.items(),
key=lambda item: item[1],
reverse=True,
)
}
return cross_kernel_aggregate_filters
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from knack.log import get_logger
from azext_aks_deploy.dev.common.github_api_helper import get_check_run_status_and_conclusion
from azext_aks_deploy.dev.common.prompting import prompt_not_empty
logger = get_logger(__name__)
def poll_workflow_status(repo_name, check_run_id):
import colorama
import humanfriendly
import time
check_run_status = None
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id)
if check_run_status == 'queued':
# When workflow status is Queued
colorama.init()
with humanfriendly.Spinner(label="Workflow is in queue") as spinner:
while True:
spinner.step()
time.sleep(0.5)
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id)
if check_run_status in ('in_progress', 'completed'):
break
colorama.deinit()
if check_run_status == 'in_progress':
# When workflow status is inprogress
colorama.init()
with humanfriendly.Spinner(label="Workflow is in progress") as spinner:
while True:
spinner.step()
time.sleep(0.5)
check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id)
if check_run_status == 'completed':
break
colorama.deinit()
print('GitHub workflow completed.')
if check_run_conclusion == 'success':
print('Workflow succeeded')
else:
raise CLIError('Workflow status: {}'.format(check_run_conclusion))
def get_new_workflow_yaml_name():
logger.warning('A yaml file main.yml already exists in the .github/workflows folder.')
new_workflow_yml_name = prompt_not_empty(
msg='Enter a new name for workflow yml file: ',
help_string='e.g. /new_main.yml to add in the .github/workflows folder.')
return new_workflow_yml_name
|
from netket import legacy as nk
# 1D Lattice
L = 20
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Hilbert space of spins on the graph
hi = nk.hilbert.Spin(s=1 / 2, N=g.n_nodes, total_sz=0)
ha = nk.operator.Ising(hilbert=hi, graph=g, h=1.0)
alpha = 1
ma = nk.machine.JaxRbm(hi, alpha, dtype=complex)
ma.init_random_parameters(seed=1232)
# Jax Sampler
sa = nk.sampler.MetropolisLocal(machine=ma, n_chains=2)
# Using Sgd
op = nk.optimizer.Sgd(ma, 0.05)
# Stochastic Reconfiguration
sr = nk.optimizer.SR(ma, diag_shift=0.1)
# Create the optimization driver
gs = nk.VMC(
hamiltonian=ha,
sampler=sa,
optimizer=op,
n_samples=1000,
sr=sr,
n_discard_per_chain=2,
)
# The first iteration is slower because of start-up jit times
gs.run(out="test", n_iter=2)
gs.run(out="test", n_iter=300)
|
import random
import pygame
import numpy as np
from random import randint
class Sudoku(object):
def __init__(self,):
self.board = np.zeros((9,9), dtype=int)
def get_row(self, i):
b = self.board
row = []
row.append(b[i])
return b[i]
def get_col(self, j):
b = self.board
col = []
col.append(b[:, j])
return b[:, j]
def get_square(self, y, x):
y1 = int(y/3)
x1 = int(x/3)
square = []
for i in range(y1*3, y1*3+3):
for j in range(x1*3, x1*3+3):
square.append(self.board[i, j])
return square
def check_number(self, num, i, j, num_list):
b = self.board
row = self.get_row(i)
col = self.get_col(j)
square = self.get_square(i, j)
if num in row:
return False
elif num in col:
return False
elif num in square:
return False
else:
num_list.append(num)
return num_list
def set_number(self, num, i, j):
b = self.board
b[i, j] = num
return b[i, j]
def main():
valid_numlist = []
sudoku = Sudoku()
for i in range(9):
for j in range(9):
for num in range(1, 10):
sudoku.check_number(num, i, j, valid_numlist)
cell_value = int(random.choice(valid_numlist))
sudoku.board[i, j] = cell_value
valid_numlist = []
print(sudoku.board)
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
__author__ = 'chuter'
def string_json(str):
sb=[];
for char in str:
if char == '\"':
sb.append("\\\"")
elif char == '\\':
sb.append('\\\\')
elif char == '/':
sb.append('\\/')
elif char == '\b':
sb.append('\\b');
elif char == '\f':
sb.append('\\f')
elif char == '\n':
sb.append('<br\\/>')
elif char == '\r':
sb.append('\\r')
elif char == '\t':
sb.append('\\t')
else:
sb.append(char)
return ''.join(sb)
from datetime import datetime
import json
class DateEncoder(json.JSONEncoder ):
def default(self, obj):
if isinstance(obj, datetime):
return obj.__str__()
return json.JSONEncoder.default(self, obj) |
#!/usr/bin/env python3
##
## generate-ec2-ssh-config.py
##
## EC2 - Boto 3 Docs 1.9.49 documentation
## https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html
##
import boto3
client = boto3.client('ec2')
response = client.describe_instances()
for reservation in response['Reservations']:
for instance in reservation['Instances']:
if 'KeyName' not in instance:
continue
if instance['PublicDnsName'] == "":
continue
for tag in instance['Tags']:
if tag['Key'] == 'Name':
print('Host ' + tag['Value'])
print(' HostName ' + instance['PublicDnsName'])
login_name_exist = None
print(' User ', end='')
for tag in instance['Tags']:
if tag['Key'] == 'login_name':
print(tag['Value'])
login_name_exist = 'EXIST'
if login_name_exist is None:
print('ec2-user')
print(' IdentityFile ~/.ssh/' + instance['KeyName'] + '.pem')
print()
|
import data
import graph
import fileManager
import random
from NNModel import NNModel as NN
dirName = fileManager.get_random_directory()
print("New project:", dirName)
new_data = data.Data()
new_data.generate_data(2, 20)
# print(new_data.positions)
# print(new_data.types)
graph_detail = 20
def randomClass(x, y):
return random.random()
model = NN(2, 1, hidden_layers=[32, 32, 32])
new_graph = graph.DataGraph()
new_graph.show_data(new_data.get_data()[0], new_data.get_data()[1], model.predict, x_parts=graph_detail, y_parts=graph_detail, save_file=dirName+"/Start.png")
for i in range(1000):
print()
print("Run", i)
model.teach(new_data.get_data()[0], new_data.get_data()[1])
print("Loss -", model.get_loss_history()[i][-1])
file_name = dirName+"/Step "+str(i+1)+" - "+"MSE {:.6f}".format(float(model.get_loss_history()[i][-1])) + ".png"
# print(file_name)
new_graph.show_data(new_data.get_data()[0], new_data.get_data()[1], model.predict, x_parts=graph_detail, y_parts=graph_detail, save_file=file_name)
# model.show_prediction_accuracy(new_data.get_data()[0], new_data.get_data()[1])
# new_graph.show_data(new_data.get_data()[0], new_data.get_data()[1], model.predict, x_parts=10, y_parts=10)
model.show_prediction_accuracy(new_data.get_data()[0], new_data.get_data()[1])
# f = open(dirName + "/Loss.txt", 'w')
# f.writelines(str(s for s in model.get_loss_history()[:][-1]))
# f.close()
print("Finished", dirName) |
#! /usr/bin/env python3
import os
import sys
import shutil
import yaml
import subprocess
def examineTraceFile(work_dir):
try:
inputyaml = open(os.path.join(work_dir, 'input.yaml'), 'r')
except:
print ("FAIL: (ERROR) input.yaml not found! work_dir:", work_dir)
return False
config_dict = yaml.load(inputyaml)
try:
if config_dict['compileOption']['tracingPropagation'] == True:
## we should have trace file
tracefile = os.path.join(work_dir, 'llfi', 'baseline', 'llfi.stat.trace.prof.txt')
if os.path.isfile(tracefile) and os.path.getsize(tracefile):
return True
else:
return False
else:
## Tracing option disabled, pass
return True
except:
## Tracing option disabled, pass
return True
def checkLLFIDir(work_dir, target_IR, prog_input):
llfi_dir = os.path.join(work_dir, "llfi")
if os.path.isdir(llfi_dir) == False:
return "FAIL: No ./llfi folder found!"
stats_dir = os.path.join(llfi_dir, "llfi_stat_output")
if os.path.isdir(stats_dir) == False:
return "FAIL: No ./llfi/llfi_stat_output folder found!"
baseline_dir = os.path.join(llfi_dir, "baseline")
if os.path.isdir(baseline_dir) == False:
return "FAIL: No ./llfi/baseline folder found!"
prog_output_dir = os.path.join(llfi_dir, "prog_output")
if os.path.isdir(prog_output_dir) == False:
return "FAIL: No ./llfi/prog_output folder found!"
std_output_dir = os.path.join(llfi_dir, "std_output")
if os.path.isdir(std_output_dir) == False:
return "FAIL: No ./llfi/std_output folder found!"
stats = [f for f in os.listdir(stats_dir)]
if len(stats) == 0:
return "FAIL: No stats file found!"
if examineTraceFile(work_dir) == False:
return "FAIL: Tracing was enabled byt trace file not generated!"
return "PASS"
def check_injection(*prog_list):
r = 0
suite = {}
script_dir = os.path.dirname(os.path.realpath(__file__))
testsuite_dir = os.path.join(script_dir, os.pardir)
with open(os.path.join(testsuite_dir, "test_suite.yaml")) as f:
try:
suite = yaml.load(f)
except:
print("ERROR: Unable to load yaml file: test_suite.yaml", file=sys.stderr)
return -1
work_dict = {}
for test in suite["SoftwareFaults"]:
if len(prog_list) == 0 or test in prog_list or "SoftwareFaults" in prog_list:
work_dict["./SoftwareFaults/"+test] = suite["SoftwareFaults"][test]
for test in suite["HardwareFaults"]:
if len(prog_list) == 0 or test in prog_list or "HardwareFaults" in prog_list:
work_dict["./HardwareFaults/"+test] = suite["HardwareFaults"][test]
for test in suite["BatchMode"]:
if len(prog_list) == 0 or test in prog_list or "BatchMode" in prog_list:
work_dict["./BatchMode/"+test] = suite["BatchMode"][test]
result_list = []
for test_path in work_dict:
inject_dir = os.path.abspath(os.path.join(testsuite_dir, test_path))
inject_prog = suite["PROGRAMS"][work_dict[test_path]][0]
inject_input = str(suite["INPUTS"][work_dict[test_path]])
if test_path.startswith('./BatchMode'):
# print("\tChecking on BatchMode:", test_path)
models = [m for m in os.listdir(inject_dir) if os.path.isdir(os.path.join(inject_dir, m))]
for m in models:
subdir = os.path.join(inject_dir, m)
# print("\t\tChecking on model:", m)
result = checkLLFIDir(subdir, inject_prog, inject_input)
if result != "PASS":
break
if len(models) == 0:
result = "Subdirectories for failure modes not found!"
else:
result = checkLLFIDir(inject_dir, inject_prog, inject_input)
if result != "PASS":
r += 1
record = {"name":test_path, "result":result}
result_list.append(record)
return r, result_list
if __name__ == "__main__":
r, result_list = check_injection(*sys.argv[1:])
print ("=============== Result ===============")
for record in result_list:
print(record["name"], "\t\t", record["result"])
sys.exit(r) |
from flask_api import FlaskAPI
from debug import debug_app
from deeprole import deeprole_app
import os
import markdown
DIR = os.path.abspath(os.path.dirname(__file__))
app = FlaskAPI(__name__)
app.config['DEFAULT_RENDERERS'] = [
'flask_api.renderers.JSONRenderer',
]
app.register_blueprint(debug_app, url_prefix='/debug')
app.register_blueprint(deeprole_app, url_prefix='/deeprole')
@app.route('/')
def index():
with open(os.path.join(DIR, 'README.md'), 'r') as f:
html = markdown.markdown(f.read())
return "<!doctype HTML><html><body>{}</body></html>".format(html)
if __name__ == "__main__":
import os
os.environ['FLASK_ENV'] = 'development'
app.run(debug=True)
|
import re
# list of patterns to search for
patterns = ['term1', 'term2']
# Text to parse
text = 'This is a string with term1, but it does not have the other term.'
for pattern in patterns:
print('Searching for {} in \n {}'.format(pattern ,text))
# check for match
if re.search(pattern, text):
print("\t Match found.\n")
else:
print("\t No match found.\n")
|
#! /usr/bin/env python
#
# Summary of BX distributions
#
import sys
import optparse
import string
import os
# options
parser = optparse.OptionParser()
parser.add_option("-d", "--dir", action="store")
(opts, args)=parser.parse_args()
dataset=opts.dir
# ROOT stupidity
sys.argv=[]
sys.argv.append('-b')
from ROOT import *
from math import *
from style import *
from plots import *
TH1.SetDefaultSumw2
# set style
tdrStyle()
gROOT.SetStyle("tdrStyle")
gROOT.ForceStyle()
# output file
ofile = dataset+"/"+dataset+"_bxSummary.ps"
opdf = dataset+"/"+dataset+"_bxSummary.pdf"
# prepare canvas for plotting
canvas = TCanvas("c")
canvas.SetLogy(1)
canvas.Print(ofile+"[", "Portrait")
# open file
ifile1 = TFile(dataset+"/histograms.root", "read")
ifile2 = TFile(dataset+"/BXDistributions.root", "read")
# baseline summary plots
collBaseNorm = ifile2.Get("hCollBaseRateNorm")
collMinusOneNorm = ifile2.Get("hCollMinusOneRateNorm")
upBaseNorm = ifile2.Get("hUPBaseRateNorm")
upMinusOneNorm = ifile2.Get("hUPMinusOneRateNorm")
time = ifile1.Get("fills/hfilltime")
# set errors properly
for bin in range(1,time.GetNbinsX()+1):
collBaseNorm.SetBinError(bin,0)
collMinusOneNorm.SetBinError(bin,0)
upBaseNorm.SetBinError(bin,0)
upMinusOneNorm.SetBinError(bin,0)
time.SetBinError(bin,0)
# collision base rates
collBaseSumm = ifile2.Get("hCollBaseRate")
collBaseSumm.Divide(collBaseNorm)
collBaseSumm.Divide(time)
collBaseSumm.Draw()
canvas.Print(ofile)
collBaseHalo = ifile2.Get("hCollBaseRateHalo")
collBaseHalo.Divide(collBaseNorm)
collBaseHalo.Divide(time)
collBaseHalo.Draw()
canvas.Print(ofile)
collBaseBG = ifile2.Get("hCollBaseRateBeamgas")
collBaseBG.Divide(collBaseNorm)
collBaseBG.Divide(time)
collBaseBG.Draw()
canvas.Print(ofile)
collBaseCosm = ifile2.Get("hCollBaseRateCosmic")
collBaseCosm.Divide(collBaseNorm)
collBaseCosm.Divide(time)
collBaseCosm.Draw()
canvas.Print(ofile)
collBaseNoise = ifile2.Get("hCollBaseRateNoise")
collBaseNoise.Divide(collBaseNorm)
collBaseNoise.Divide(time)
collBaseNoise.Draw()
canvas.Print(ofile)
collBaseNoID = ifile2.Get("hCollBaseRateNoID")
collBaseNoID.Divide(collBaseNorm)
collBaseNoID.Divide(time)
collBaseNoID.Draw()
canvas.Print(ofile)
# collision BX-1 rates
collMinusOneSumm = ifile2.Get("hCollMinusOneRate")
collMinusOneSumm.Divide(collMinusOneNorm)
collMinusOneSumm.Divide(time)
collMinusOneSumm.Draw()
canvas.Print(ofile)
collMinusOneHalo = ifile2.Get("hCollMinusOneRateHalo")
collMinusOneHalo.Divide(collMinusOneNorm)
collMinusOneHalo.Divide(time)
collMinusOneHalo.Draw()
canvas.Print(ofile)
collMinusOneBG = ifile2.Get("hCollMinusOneRateBeamgas")
collMinusOneBG.Divide(collMinusOneNorm)
collMinusOneBG.Divide(time)
collMinusOneBG.Draw()
canvas.Print(ofile)
collMinusOneCosm = ifile2.Get("hCollMinusOneRateCosmic")
collMinusOneCosm.Divide(collMinusOneNorm)
collMinusOneCosm.Divide(time)
collMinusOneCosm.Draw()
canvas.Print(ofile)
collMinusOneNoise = ifile2.Get("hCollMinusOneRateNoise")
collMinusOneNoise.Divide(collMinusOneNorm)
collMinusOneNoise.Divide(time)
collMinusOneNoise.Draw()
canvas.Print(ofile)
collMinusOneNoID = ifile2.Get("hCollMinusOneRateNoID")
collMinusOneNoID.Divide(collMinusOneNorm)
collMinusOneNoID.Divide(time)
collMinusOneNoID.Draw()
canvas.Print(ofile)
# unpaired bunch base rates
upBaseSumm = ifile2.Get("hUPBaseRate")
upBaseSumm.Divide(upBaseNorm)
upBaseSumm.Divide(time)
upBaseSumm.Draw()
canvas.Print(ofile)
upBaseHalo = ifile2.Get("hUPBaseRateHalo")
upBaseHalo.Divide(upBaseNorm)
upBaseHalo.Divide(time)
upBaseHalo.Draw()
canvas.Print(ofile)
upBaseBG = ifile2.Get("hUPBaseRateBeamgas")
upBaseBG.Divide(upBaseNorm)
upBaseBG.Divide(time)
upBaseBG.Draw()
canvas.Print(ofile)
upBaseCosm = ifile2.Get("hUPBaseRateCosmic")
upBaseCosm.Divide(upBaseNorm)
upBaseCosm.Divide(time)
upBaseCosm.Draw()
canvas.Print(ofile)
upBaseNoise = ifile2.Get("hUPBaseRateNoise")
upBaseNoise.Divide(upBaseNorm)
upBaseNoise.Divide(time)
upBaseNoise.Draw()
canvas.Print(ofile)
upBaseNoID = ifile2.Get("hUPBaseRateNoID")
upBaseNoID.Divide(upBaseNorm)
upBaseNoID.Divide(time)
upBaseNoID.Draw()
canvas.Print(ofile)
# unpaired bunch BX-1 rates
upMinusOneSumm = ifile2.Get("hUPMinusOneRate")
upMinusOneSumm.Divide(upMinusOneNorm)
upMinusOneSumm.Divide(time)
upMinusOneSumm.Draw()
canvas.Print(ofile)
upMinusOneHalo = ifile2.Get("hUPMinusOneRateHalo")
upMinusOneHalo.Divide(upMinusOneNorm)
upMinusOneHalo.Divide(time)
upMinusOneHalo.Draw()
canvas.Print(ofile)
upMinusOneBG = ifile2.Get("hUPMinusOneRateBeamgas")
upMinusOneBG.Divide(upMinusOneNorm)
upMinusOneBG.Divide(time)
upMinusOneBG.Draw()
canvas.Print(ofile)
upMinusOneCosm = ifile2.Get("hUPMinusOneRateCosmic")
upMinusOneCosm.Divide(upMinusOneNorm)
upMinusOneCosm.Divide(time)
upMinusOneCosm.Draw()
canvas.Print(ofile)
upMinusOneNoise = ifile2.Get("hUPMinusOneRateNoise")
upMinusOneNoise.Divide(upMinusOneNorm)
upMinusOneNoise.Divide(time)
upMinusOneNoise.Draw()
canvas.Print(ofile)
upMinusOneNoID = ifile2.Get("hUPMinusOneRateNoID")
upMinusOneNoID.Divide(upMinusOneNorm)
upMinusOneNoID.Divide(time)
upMinusOneNoID.Draw()
canvas.Print(ofile)
def correlationGraph(h1, h2, title, xtitle, ytitle):
# get number of points
n = h1.GetNbinsX()
# create graph
g = TGraphErrors(n)
# loop over bins
for i in range(1,n+1):
# check we have the same fill in both histograms!
if (h1.GetXaxis().GetBinLabel(i) != h1.GetXaxis().GetBinLabel(i)):
print "Mismatching bin labels!", h1.GetXaxis().GetBinLabel(i), h1.GetXaxis().GetBinLabel(i)
g.SetPoint(i-1, h1.GetBinContent(i), h2.GetBinContent(i))
g.SetPointError(i-1, h1.GetBinError(i), h2.GetBinError(i))
g.SetTitle(title)
g.GetXaxis().SetTitle(xtitle)
g.GetYaxis().SetTitle(ytitle)
g.SetMarkerStyle(22)
g.Draw("AP")
canvas.Print(ofile)
print "Making correlation graphs"
canvas.SetLogy(0)
#sanity checks
correlationGraph(collBaseSumm, upBaseSumm, "All events (sanity check)", "Base_{coll} Rate", "Base_{upb} Rate")
correlationGraph(collMinusOneSumm, upMinusOneSumm, "All events", "BX-1_{coll} Rate", "BX-1_{upb} Rate")
# halo prediction
correlationGraph(collMinusOneHalo, collBaseHalo, "Halo", "BX-1_{coll} Rate", "Base_{coll} Rate")
correlationGraph(upMinusOneHalo, collBaseHalo, "Halo", "BX-1_{upb} Rate", "Base_{coll} Rate")
# beamgas prediction
correlationGraph(collMinusOneBG, collBaseBG, "Beam-gas", "BX-1_{coll} Rate", "Base_{coll} Rate")
correlationGraph(upMinusOneBG, collBaseBG, "Beam-gas", "BX-1_{upb} Rate", "Base_{coll} Rate")
# no ID check
correlationGraph(collBaseNoise, collBaseNoID, "Noise vs NoID", "Noise Base_{coll} Rate", "No ID Base_{coll} Rate")
# close file
canvas = TCanvas("c")
canvas.Print(ofile+"]")
# convert to PDF
subprocess.call(["ps2pdf", ofile, opdf])
subprocess.call(["rm", ofile])
|
#from django.conf.urls import patterns, include, url
from django.conf.urls.defaults import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
from hmslincs_server.views import *
from settings import _djangopath
import os.path as op
urlpatterns = patterns('',
# Login / logout.
# Note: the name "login_url" name is set to the request by the registered hmslincs.context_procesor.login_url_with_redirect
(r'^db/login/$', 'django.contrib.auth.views.login', {'template_name': 'db/login.html'}),
url(r'^db/logout/$', logout_page, name='logout'),
url(r'^db/', include('db.urls')),
(r'^explore/pathway/$', 'django.views.static.serve',
{'path': 'index.html',
'document_root': op.join(_djangopath, 'pathway', 'static', 'pathway')}),
(r'^explore/responses/$', 'django.views.generic.simple.direct_to_template',
{'template': 'responses/index.html'}),
(r'^explore/(?:sensitivities|10.1038-nchembio.1337)/(?!fallahi-sichani-2013)(?P<suffix>.*)$',
'django.views.generic.simple.redirect_to',
{'url': '/explore/10.1038-nchembio.1337/fallahi-sichani-2013/%(suffix)s'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/index.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/tools_table\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/tools_table.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/intro_to_dose_response_curves\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/intro_to_dose_response_curves.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/dose_response_grid\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/dose_response_grid.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/scatterplot_browser\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/scatterplot_browser.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/intro_to_dose_response_curves_iframe\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/intro_to_dose_response_curves_iframe.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/dose_response_grid_iframe\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/dose_response_grid_iframe.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/scatterplot_browser_iframe\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/scatterplot_browser_iframe.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/intro_to_dose_response_curves_content\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/intro_to_dose_response_curves_content.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/dose_response_grid_content\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/dose_response_grid_content.html'}),
(r'^explore/10.1038-nchembio.1337/fallahi-sichani-2013/scatterplot_browser_content\.html$',
'django.views.generic.simple.direct_to_template',
{'template': '10_1038_nchembio_1337__fallahi_sichani_2013/scatterplot_browser_content.html'}),
(r'^explore/responses/scatterplot_browser\.html$',
'django.views.generic.simple.direct_to_template',
{'template': 'responses/scatterplot_browser.html'}),
(r'^explore/adaptive-drug-resistance/$',
'django.views.generic.simple.direct_to_template',
{'template': 'adaptive_drug_resistance/index.html'}),
(r'^explore/adaptive-drug-resistance/plsr-loadings/$',
'django.views.generic.simple.direct_to_template',
{'template': 'adaptive_drug_resistance/plsr_loadings/index.html'}),
(r'^explore/adaptive-drug-resistance/vips/$',
'django.views.generic.simple.direct_to_template',
{'template': 'adaptive_drug_resistance/vips/index.html'}),
# breast_cancer_signaling and single_cell_dynamics are currently served by
# apache directly from STATIC_ROOT, so no url patterns are listed here.
)
# For DEBUG mode only (development) serving of static files
urlpatterns += staticfiles_urlpatterns()
|
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\Roland_FA\transport.py
# Compiled at: 2020-05-05 13:23:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface.control import ButtonControl
from ableton.v2.control_surface.components import TransportComponent as TransportComponentBase
class TransportComponent(TransportComponentBase):
jump_to_start_button = ButtonControl()
@jump_to_start_button.pressed
def jump_to_start_button(self, _):
self.song.current_song_time = 0.0
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/Roland_FA/transport.pyc
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
import pickle
def load_variable(filename):
with open(filename, 'rb') as f:
var_list = pickle.load(f)
return var_list
x = np.arange(0.0, 1.1, 0.1)
y = x
true_y, pred_y, = load_variable("netgan_baseline_roc_curve.pkl")
true_y_1, pred_y_1 = load_variable("cell_baseline_roc_curve.pkl")
true_y_2, pred_y_2 = load_variable("netgan_non_baseline_roc_curve.pkl")
true_y_3, pred_y_3 = load_variable("cell_non_baseline_roc_curve.pkl")
base_fpr, base_tpr, _ = roc_curve(true_y, pred_y)
base_fpr_1, base_tpr_1, _ = roc_curve(true_y_1, pred_y_1)
non_base_fpr, non_base_tpr, _ = roc_curve(true_y_2, pred_y_2)
non_base_fpr_1, non_base_tpr_1, _ = roc_curve(true_y_3, pred_y_3)
b1, b2, _ = roc_curve(true_y, true_y)
plt.subplot(121)
plt.plot(base_fpr, base_tpr, 'tab:orange', linestyle='dashed', label='Base NetGAN')
plt.plot(base_fpr_1, base_tpr_1, 'tab:blue', linestyle='dashed', label='Base CELL')
plt.plot(non_base_fpr, non_base_tpr, 'tab:orange', label='NetGAN')
plt.plot(non_base_fpr_1, non_base_tpr_1, 'tab:blue', label='CELL')
#plt.plot(x, y, linestyle='--', label='random classification')
#plt.plot(b1, b2, label='perfect classification')
plt.title('ROC curve with AR = 0.5', fontsize=12)
plt.xlabel('False Positive Rate', fontsize=10)
plt.ylabel('True Positive Rate', fontsize=10)
plt.legend()
plt.subplot(122)
base_AUC_NetGAN = [0.686, 0.654, 0.633, 0.604, 0.597, 0.595, 0.590, 0.574, 0.577]
base_AUC_CELL = [0.72, 0.656, 0.569, 0.591, 0.591, 0.578, 0.566, 0.594, 0.590]
non_base_AUC_NetGAN = [0.752, 0.730, 0.742, 0.754, 0.724, 0.705, 0.673, 0.610, 0.614]
non_base_AUC_CELL = [0.859, 0.856, 0.852, 0.828, 0.828, 0.810, 0.797, 0.777, 0.763]
x = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
y1_lists = [base_AUC_NetGAN, base_AUC_CELL, non_base_AUC_NetGAN, non_base_AUC_CELL]
plt.plot(x, base_AUC_NetGAN, 'tab:orange', linestyle='dashed', label = 'Base NetGAN')
plt.plot(x, base_AUC_CELL, 'tab:blue', linestyle='dashed', label='Base CELL')
plt.plot(x, non_base_AUC_NetGAN, 'tab:orange', label='NetGAN')
plt.plot(x, non_base_AUC_CELL, 'tab:blue', label='CELL')
# Using set_dashes() to modify dashing of an existing line
plt.ylim(0.5, 1)
plt.xlabel('Annotatoin Ratio (AR)', fontsize=10)
plt.ylabel('AUC ROC Score', fontsize=10)
plt.title('AUC ROC Score with different AR', fontsize=12)
plt.legend()
plt.show()
|
from django.shortcuts import render
from django.http import HttpResponse
def social_view(request, *args, **kwargs):
print(request)
return render(request, "home.html", {})
def contact_view(request, *args, **kwargs):
print(request)
return render(request, "contact.html", {})
def about_view(request, *args, **kwargs):
my_context = {
'my_text': "This is about us.",
'address': 'Usa, Washington DC.',
"my_list": [1, 2, 3, "A"],
'range': range(10)
}
print(my_context)
return render(request, "about.html", my_context )
def service_view(request, *args, **kwargs):
print(request)
return render(request, "service.html", {})
|
#https://leetcode.com/problems/intersection-of-two-arrays/
class Solution:
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
n=set(nums1)
m=set(nums2)
ans=[]
for i in n:
if i in m:
ans.append(i)
return ans
|
from odoo import api, fields, models, _
from datetime import date,datetime
from dateutil.relativedelta import relativedelta
import logging
_logger = logging.getLogger(__name__)
class SchoolStudentModel(models.Model):
_name = "school.student"
# _description = "School students data"
_rec_name = "name"
name = fields.Char(string="Name")
date_of_birth = fields.Date(string="Date of Birth")
age = fields.Integer(compute = "AgeCalculator",string = "Age")
gender = fields.Selection([("m","Male"),("f","Female")], string="Gender")
image = fields.Binary(string="Image")
email = fields.Char(string="E-mail")
phone = fields.Char(string="Phone", size=10)
registration_number = fields.Char(string="Registration Number")
date_registration = fields.Date(string = "Date of Registration")
street = fields.Text(string="Street")
city = fields.Char(string="City")
state_id = fields.Many2one("res.country.state",string="State ID")
country_id = fields.Many2one("res.country",string = "Country ID")
zip = fields.Char(string="Zip", size=6)
state = fields.Selection([("new","New"),("current","Current"),("passout","Pass Out")], string="State",default="new")
course_id = fields.Many2one(comodel_name="school.course",relation="student_course_relation",column1="studentid",column2="courseid", string = "Course ID")
subject_ids = fields.Many2many(comodel_name="school.subject",relation="student_subject_relation",column1="studentid",column2="subjectid", string = "Subject IDs")
student_course = fields.One2many("school.course",inverse_name="student_ids")
def action_new(self):
self.state = "new"
def action_passout(self):
self.state = "passout"
def action_current(self):
self.state = "current"
@api.depends("date_of_birth")
def AgeCalculator(self):
for rec in self:
rec.age = 0
# _logger.info("===========Search-----%r-------",rec.date_of_birth)
# today = datetime.date.today()
# dob = rec.date_of_birth
if rec.date_of_birth:
# _logger.info("===========Search-----%r---%r----",date.today().year - rec.date_of_birth.year.year, rec.date_of_birth)
rec.write({
"age": date.today().year - rec.date_of_birth.year,
})
# @api.constrains("course_id")
# def studentcourseids(self):
# for rec in self:
# _logger.info("===========Search-----%r---%r---",type(rec.course_id),rec.course_id)
# for course in rec.student_course:
# _logger.info("===========Search-----%r---%r---",type(course.id),course.id)
class SchoolTeacherModel(models.Model):
_name = "school.teacher"
_rec_name = "name"
name = fields.Char(string="Name")
date_of_birth = fields.Date(string="Date of Birth")
age = fields.Integer(compute = "AgeCalculator",string = "Age")
gender = fields.Selection([("m","Male"),("f","Female")], string="Gender")
image = fields.Binary(string="Image")
email = fields.Char(string="E-mail")
phone = fields.Char(string="Phone", size=10)
registration_number = fields.Char(string="Registration Number")
date_registration = fields.Date(string = "Date of Registration")
street = fields.Text(string="Street")
city = fields.Char(string="City")
state_id = fields.Many2one("res.country.state",string="State ID")
country_id = fields.Many2one("res.country",string = "Country ID")
zip = fields.Char(string="Zip", size=6)
state = fields.Selection([("active","Active"),("inactive","Inactive")], string="State")
course_ids = fields.Many2many(comodel_name="school.course",relation="teacher_course_relation",column1="teacherid",column2="courseid", string = "Course IDs")
subject_ids = fields.Many2many(comodel_name="school.subject",relation="teacher_subject_relation",column1="teacherid",column2="subjectid", string = "Subject IDs")
department = fields.Selection([("photo","Photography"),("softdev","Software Development"),("webdev","Web Development")], string="Department")
teacher_course = fields.Many2one("school.course",string="Teacher IDs")
def action_active(self):
self.state = "active"
def action_inactive(self):
self.state = "inactive"
@api.depends("date_of_birth")
def AgeCalculator(self):
for rec in self:
rec.age = 0
if rec.date_of_birth:
rec.write({
"age": date.today().year - rec.date_of_birth.year,
})
# @api.constrains("course_ids")
# def teachercourseids(self):
# for rec in self:
# _logger.info("===========Search-----%r------",type(rec.course_ids))
class SchoolCourseModel(models.Model):
_name = "school.course"
_rec_name = "name"
name = fields.Char(string="Name")
description = fields.Text(string="Description")
duration = fields.Integer(string="Duration")
fee = fields.Integer(string="Fee")
department = fields.Selection([("photo","Photography"),("softdev","Software Development"),("webdev","Web Development")], string="Department")
position = fields.Char(string="Position/Designation")
# teacher_ids = fields.One2many("school.teacher",inverse_name="teacher_course")
student_ids = fields.One2many("school.student",inverse_name="student_course",string="Student IDs")
student_name = fields.Char("Student's Name")
# teacher_name = fields.Char(string="Teacher IDs")
# @api.depends("teacher_ids")
# def _teachercourse_(self):
# for rec in self:
# _logger.info("===========Search-----%r------",rec.teacher_ids.course_ids)
# for course in rec.teacher_ids:
# _logger.info("===========Search-----%r------",course.course_ids)
# if rec.id == course.course_ids:
# rec.write({
# "teacher_name" : course.name,
# })
# @api.depends("student_ids")
# def _studentcourse_(self):
# for rec in self:
# for course in rec.student_ids:
# _logger.info("===========Search-----%r------",course.course_id)
# if rec.id == course.course_id:
# rec.write({
# "student_name": course.name
# })
class SchoolSubjectModel(models.Model):
_name = "school.subject"
_rec_name = "name"
name = fields.Char(string="Name")
description = fields.Text(string="Description")
department = fields.Selection([("photo","Photography"),("softdev","Software Development"),("webdev","Web Development")], string="Department")
class SchoolFeesModel(models.Model):
_name = "school.fee"
_rec_name = "student_id"
student_id = fields.Many2one("school.student",string="Student ID")
total_fee = fields.Float(string="Total Fees")
due_fee = fields.Float(compute = "DuesCalculator", string="Due Fees")
amount_paid = fields.Float(string="Amount Paid")
date_paid = fields.Date(string="Date Paid")
course_id = fields.Many2one("school.course",string="Course ID")
@api.depends("total_fee","amount_paid")
def DuesCalculator(self):
for rec in self:
rec.due_fee = 0
if rec.total_fee and rec.amount_paid:
rec.write({
"due_fee" : rec.total_fee - rec.amount_paid,
})
def action_register_payment(self):
return{
"name":("Register Payment"),
"view_mode":"form",
"res_model":"school.fee.wizard",
"type":"ir.actions.act_window",
"target":"new",
} |
# Generated by Django 3.1.6 on 2021-03-23 19:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('response', '0004_auto_20210308_1958'),
]
operations = [
migrations.RemoveField(
model_name='response',
name='profile',
),
]
|
import pyautogui as pg
import time
start_range = int(input('Enter start point: '))
file_x = 470
file_y = 190
for i in range(start_range, 291, -1):
print(i)
# click on the Add button
pg.click(269, 105)
time.sleep(1.5)
# click on 'select tweet ID file' button
pg.click(182, 289)
time.sleep(1.5)
# click on the 'processed_for_ids' folder
pg.click(165, 407)
time.sleep(1.5)
# get to the file
pg.moveTo(500, 300)
time.sleep(1.5)
if i < 310:
pg.scroll(-24)
time.sleep(1.5)
file_y = 360 + 23 * (309 - i)
elif i < 334:
pg.scroll(-16)
time.sleep(1.5)
file_y = 150 + 23 * (333 - i)
elif i < 359:
pg.scroll(-8)
time.sleep(1.5)
file_y = 183 + 24 * (358 - i)
else:
file_y = 190 + 24 * (382 - i)
# click on file
pg.click(file_x, file_y, 1)
time.sleep(1.5)
pg.click(file_x, file_y, 2)
time.sleep(1.5)
# click on title
pg.click(152, 493)
time.sleep(1.5)
# enter title
pg.typewrite(str(i))
time.sleep(1.5)
# click on 'add dataset' button
pg.click(156, 733)
time.sleep(1.5)
# click on 'datasets' button
pg.click(186, 105)
time.sleep(1.5)
# click on start button
pg.click(496, 314)
time.sleep(1.5)
# click on hydrating folder
pg.click(165, 408)
time.sleep(1.5)
# click on save
pg.click(1238, 52)
time.sleep(1.5)
# sleep for 6 min and pause
time.sleep(360)
pg.click(496, 314)
# sleep 10 sec and start
time.sleep(10)
pg.click(496, 314)
# sleep for 6 min and start new
time.sleep(360)
|
import matplotlib.pyplot as plt
import numpy as np
import csv
import sys
data = [[] for i in range(int(sys.argv[2]))]
colors = ['red','orange','pink','green','blue','black','purple','grey']
num = -1
for index,row in enumerate(csv.reader(open(sys.argv[1],"r"))):
if index%15==0:
num =num +1
data[num].append(row)
fig, ax = plt.subplots()
for i in range(int(sys.argv[2])):
temp = data[i]
temp = np.array(temp,dtype= float)
print(temp.shape)
loss = np.around(temp[:,4],decimals=2)
ax.scatter(temp[:,0],temp[:,1], s=20, label="model"+str(i),
alpha=0, edgecolors='none')
for j, txt in enumerate(loss):
ax.annotate(txt, (temp[j][0],temp[j][1]), color = colors[i])
ax.legend(bbox_to_anchor=(1,1), loc='center left')
ax.grid(True)
plt.show()
fig, ax = plt.subplots()
for i in range(int(sys.argv[2])):
temp = data[i]
temp = np.array(temp,dtype= float)
loss = np.around(temp[:,4],decimals=2)
ax.scatter(temp[:,2],temp[:,3], s=20, label="model"+str(i),
alpha=0, edgecolors='none')
for j, txt in enumerate(loss):
ax.annotate(txt, (temp[j][0],temp[j][1]), color = colors[i])
ax.legend(bbox_to_anchor=(1,1), loc='center left')
ax.grid(True)
plt.show()
|
from typing import Any
from resources.exceptions.error import *
def find_key(*objective_key, search_dict: dict = None, error_message: str = "Keys not found in dict") -> Any:
"""[summary]
returns search dict value if objective key is in the dictionary. objective key can be multiple arguments.
Args:
search_dict (dict, optional): [dict where keys are stored]. Defaults to None.
error_message (str, optional): [message displayed when MissingKeyArgument is raised].
Defaults to "Keys not found in dict".
Raises:
NotEnoughArgumentsError: [search dict and a objective key must be provided]
MissingKeyArgument: [objective key was not found in search dict]
Returns:
Any: [value of search dict]
"""
if search_dict is None or len(objective_key) == 0:
raise NotEnoughArgumentsError("A search dict and a objective key must be provided")
for key in objective_key:
if key in search_dict.keys():
return search_dict[key]
raise MissingKeyArgument(error_message)
|
import math
class Umrechnung(object):
# Radius der Meccanum-Räder in m
__radius = 0.152/2
# Senkrechter Abstand der Räder zur Mittelachse in m = Drehradius bei Rotation um Mittelpunkt (noch nicht ausgemessen!)
__abstand = 0.17
## Konstruktor
# Eingabeparameter:
# uMax: maximal mögliche Umdrehungen pro s
def __init__(self,uMax):
self.__vBetrag = 0
self.__richtung = 0
self.__rotation = 0
self.__uMax = uMax
## Liest neue Parameter ein und rechnet sie in Umdrehungen pro s für jedes einzelne Rad um. Berücksichtigt die maximale Umdrehungsanzahl
# Eingabeparameter:
# vBetrag: Geschwindigkeitsbetrag in m/s
# richtung: Fahrtrichtung als Winkel zur Blickrichtung in Grad. Drehsinn IM Uhrzeigersinn!
# rotation: Rotationsgeschwindigkeit um den Mittelpunkt in rad/s. Drehsinn IM Uhrzeigersinn!
def setEingabe(self, vBetrag, richtung, rotation):
self.__vBetrag = vBetrag
self.__richtung = richtung
self.__rotation = rotation
# Umrechnung in Radialmaß
winkel = richtung/180*math.pi
# Berechnung der Umdrehungszahlen ohne Rotation in Umdrehungen pro s
# Die Berchnung basiert auf folgenden Gleichungen (w = Winkelgeschwindigkeit der Räder, r = Radius der Räder):
# vX = (w1+w2+w3+w4) * r/4
# vY = (-w1+w2+w3-w4) * r/4
# Ohne Rotation gilt zudem: w1 = w3 & w2 = w4
# Zerlegung von vBetrag in vX und vY erlaubt Lösung des Gleichungssystems
uVL = (math.cos(winkel)+math.sin(winkel))*vBetrag/(Umrechnung.__radius*2*math.pi)
uVR = (math.cos(winkel)-math.sin(winkel))*vBetrag/(Umrechnung.__radius*2*math.pi)
uHL = uVR
uHR = uVL
# Berechnung der Rotation und Überlagerung mit der Geschwindigkeit
uRot = rotation * Umrechnung.__abstand/Umrechnung.__radius
uVL += uRot
uVR -= uRot
uHL += uRot
uHR -= uRot
uList = [uVL,uVR,uHL,uHR]
# Überprüfung ob maximale Umdrehungszahl überschritten wird. Wenn ja, werden alle Räder gleich proportional verlangsamt
flag = False
n = 0
for i in range(4):
if abs(uList[i]) > self.__uMax:
if flag:
if abs(uList[i]) >= abs(uList[n]):
n = i
else:
n = i
flag = True
if flag:
x = abs(uList[n])
for i in range(4):
uList[i] = uList[i]*self.__uMax/x
return uList
|
import requests
class TestCheckCookies:
def test_check(self):
url = 'https://playground.learnqa.ru/api/homework_cookie'
response = requests.get(url)
cookie = dict(response.cookies)
cookie_value = response.cookies.get('HomeWork')
assert cookie_value == 'hw_value', "Wrong value" |
#!/usr/bin/env python
import threading
from std_msgs.msg import *
import rospy
import numpy
import sys
from std_srvs.srv import *
from irpos import *
import math
from transformations import *
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
# mutexy dla zmiennych, do ktorych zapisywane sa dane z wirtualnego receptora
door_homog_matrix_lock = threading.Lock()
door_pos_points_lock = threading.Lock()
elements_pos_points_lock = threading.Lock()
element_homog_matrix_lock = threading.Lock()
# zmienne, do ktorych zapisywane sa dane z wirtualnego receptora
door_homog_matrix = None
door_pos_points = None
elements_pos_points = None
element_homog_matrix = None
# flagi okreslajace, czy wartosci w zmiennych sa swieze
door_homog_matrix_fresh = False
door_pos_points_fresh = False
elements_pos_points_fresh = False
element_homog_matrix_fresh = False
door_homog_counter = 0
# odczytanie elementu z Float32MultiArray
def array_elem(array, row, col):
if array is not None:
index = array.layout.dim[1].stride*row+col
if index < len(array.data):
return array.data[index]
return None
# wpisanie Float32MultiArray do macierzy
def read_data(data):
m = numpy.zeros(shape=(data.layout.dim[0].size,data.layout.dim[1].size))
for row in range(0,data.layout.dim[0].size):
for col in range(0,data.layout.dim[1].size):
m[row,col] = array_elem(data,row,col)
return m
# funkcje odbierajace dane, gdy na topicu pojawia sie nowe dane
def callback_door_pos(data):
global door_pos_points
global door_pos_points_fresh
door_pos_points_lock.acquire()
door_pos_points = read_data(data)
door_pos_points_fresh = True
door_pos_points_lock.release()
def callback_door_homog(data):
global door_homog_matrix
global door_homog_matrix_fresh
global door_homog_counter
door_homog_matrix_lock.acquire()
door_homog_matrix = read_data(data)
door_homog_matrix_fresh = True
if door_homog_counter<100:
door_homog_counter = door_homog_counter+1
door_homog_matrix_lock.release()
def callback_elements_pos(data):
global elements_pos_points
global elements_pos_points_fresh
elements_pos_points_lock.acquire()
elements_pos_points = read_data(data)
elements_pos_points_fresh = True
elements_pos_points_lock.release()
def callback_element_homog(data):
global element_homog_matrix
global element_homog_matrix_fresh
element_homog_matrix_lock.acquire()
element_homog_matrix = read_data(data)
element_homog_matrix_fresh = True
element_homog_matrix_lock.release()
if __name__ == '__main__':
#rospy.init_node('receiver', anonymous=True)
# utworzenie subscriberow
rospy.Subscriber("door_homog_matrix", Float32MultiArray, callback_door_homog)
rospy.Subscriber("door_position", Float32MultiArray, callback_door_pos)
rospy.Subscriber("door_elements_position", Float32MultiArray, callback_elements_pos)
rospy.Subscriber("element_homog_matrix", Float32MultiArray, callback_element_homog)
publisher = rospy.Publisher('door_marker_array', Marker)
publisher2 = rospy.Publisher('elements_marker_array', Marker)
irpos = IRPOS("door", "Irp6ot", 7, "irp6ot_manager")
# # wylaczenie taskow na wszelki wypadek
# rospy.wait_for_service('/Door/stop')
# ts = rospy.ServiceProxy('/Door/stop', Empty)
# ts()
# rospy.wait_for_service('/Elements/stop')
# ts = rospy.ServiceProxy('/Elements/stop', Empty)
# ts()
# # flagi ktory task jest aktywny
# door_active = 0
# elements_active = 0
# print "SUBTASKS STOPPED"
# rospy.wait_for_service('/Elements/stop')
# ts = rospy.ServiceProxy('/Elements/stop', Empty)
# ts()
# rospy.wait_for_service('/Door/start')
# ts = rospy.ServiceProxy('/Door/start', Empty)
# ts()
print "DOOR LOCALIZATION ACTIVE"
while not rospy.is_shutdown():
actual_pose = irpos.get_cartesian_pose()
# mnozenie macierzy:
# p_base = T_base_d * p_d
# T_base_d = T_base_tl6 * T_tl6_cam * T_cam_opt * T_opt_d
door_homog_matrix_lock.acquire()
T_opt_d = door_homog_matrix
door_homog_matrix_lock.release()
T_cam_opt = numpy.matrix([[-1,0,0,0],[0,-1,0,0],[0,0,1,0],[0,0,0,1]])
T_tl6_cam = numpy.matrix([[0,-1,0,-0.0551],[1,0,0,0],[0,0,1,0.13],[0,0,0,1]])
pose = irpos.get_cartesian_pose()
qx = pose.orientation.x
qy = pose.orientation.y
qz = pose.orientation.z
qw = pose.orientation.w
px = pose.position.x
py = pose.position.y
pz = pose.position.z
quaternion = [qx, qy, qz, qw]
T_base_tl6 = quaternion_matrix(quaternion) + numpy.matrix([[0,0,0,px],[0,0,0,py],[0,0,0,pz],[0,0,0,0]])
T_base_d = T_base_tl6 * T_tl6_cam * T_cam_opt * T_opt_d
door_points = door_pos_points
base_door_points = []
for i in range(0,door_points.shape[0]):
p = numpy.matrix([[door_points[i,0]],[door_points[i,1]],[0.0],[1.0]])
base_door_points.append(T_base_d * p)
marker = Marker()
marker.header.frame_id = "/tl_base"
marker.type = marker.TRIANGLE_LIST
marker.action = marker.ADD
marker.scale.x = 1.0
marker.scale.y = 1.0
marker.scale.z = 1.0
marker.color.a = 1.0
marker.color.g = 1.0
marker.pose.orientation.w = 1.0
p = Point()
p.x = base_door_points[0][0]
p.y = base_door_points[0][1]
p.z = base_door_points[0][2]
p1 = Point()
p1.x = base_door_points[1][0]
p1.y = base_door_points[1][1]
p1.z = base_door_points[1][2]
p2 = Point()
p2.x = base_door_points[2][0]
p2.y = base_door_points[2][1]
p2.z = base_door_points[2][2]
p3 = Point()
p3.x = base_door_points[3][0]
p3.y = base_door_points[3][1]
p3.z = base_door_points[3][2]
marker.points.append(p)
marker.points.append(p1)
marker.points.append(p2)
marker.points.append(p3)
marker.points.append(p2)
marker.points.append(p)
publisher.publish(marker)
elements_points = elements_pos_points
base_elements_points = []
for i in range(0,elements_points.shape[0]):
p = numpy.matrix([[elements_points[i,0]],[elements_points[i,1]],[0.3],[1.0]])
base_elements_points.append(T_base_d * p)
marker2 = Marker()
marker2.header.frame_id = "/tl_base"
marker2.type = marker.SPHERE_LIST
marker2.action = marker.ADD
marker2.scale.x = 0.1
marker2.scale.y = 0.1
marker2.scale.z = 0.1
marker2.color.a = 1.0
marker2.color.r = 1.0
marker2.pose.orientation.w = 1.0
for i in range(0,len(base_elements_points)):
p =Point()
p.x = base_elements_points[i][0]
p.y = base_elements_points[i][1]
p.z = base_elements_points[i][2]
marker2.points.append(p)
publisher.publish(marker)
publisher2.publish(marker2)
print "**"
print base_elements_points
rospy.sleep(1.0)
|
#!/usr/bin/env python
# coding: utf-8
from django.core.context_processors import csrf
from django.template import Context, RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.conf import settings as _settings
from datetime import datetime
from contest.models import Contest, ContestProblem
from submission.models import Submission
from course.models import CourseRegister
from ojutility.func import *
from course.views import isCourseRegister
from problemset.models import Problem
import json
def contestNotStart(contest):
now = int(datetime.now().strftime('%s'))
start_time = getLocalTime(contest.start_time)
return now < start_time
def canShowContest(cid, user):
contest = get_object_or_404(Contest, cid = cid)
if user != contest.creater and contestNotStart(contest):
return printError('Contest Not Start')
if not isCourseRegister(contest.courseid, user):
return printError('You have no privilege to view this contest')
return True
def showContest(request, cid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
problem = ContestProblem.objects.filter(cid = cid)
context = {'contest': contest, 'problem': problem}
return render_to_response('contest.html', context, context_instance = RequestContext(request))
def showContestProblem(request, cid, pid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
problem = get_object_or_404(Problem, pid = pid)
isok = get_object_or_404(ContestProblem, cid = contest, pid = problem)
context = {'contest': contest, 'problem': problem}
context.update(csrf(request))
return render_to_response('contestproblem.html', context, context_instance = RequestContext(request))
def showSubmission(request, cid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
if not request.user.is_authenticated():
submissionList = []
else:
if request.user.is_superuser:
submissionList = Submission.objects.filter(cid = cid)
else:
submissionList = Submission.objects.filter(cid = cid, user = request.user)
context = {'contest': contest, 'submissionList': submissionList}
return render_to_response('contest_submission.html', context, context_instance = RequestContext(request))
def showProblemSubmission(request, cid, pid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
if not request.user.is_authenticated():
submissionList = []
else:
submissionList = Submission.objects.filter(cid = cid, user = request.user, pid = pid)
context = {'contest': contest, 'submissionList': submissionList}
return render_to_response('contest_submission.html', context, context_instance = RequestContext(request))
def solvedCount(val):
return val[1]
def SCORE(val):
return val[1]
def showOIStanding(request, cid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
problem = ContestProblem.objects.filter(cid = cid)
submissionList = Submission.objects.filter(cid = cid).order_by('user')
statistics = {}
rankList = {}
# status == 1 mean Accepted
for submission in submissionList:
detail = submission.judge_detail
try:
detail = eval(json.loads(json.dumps(detail)))
print detail['score']
except:
detail = {'score' : 0}
score = int(detail['score'])
user_pid = (submission.user, submission.pid)
if statistics.has_key(user_pid):
statistics[user_pid] = max(statistics[user_pid], score)
else:
statistics[user_pid] = score
for item in statistics.items():
if not rankList.has_key(item[0][0]):
rankList[item[0][0]] = 0
rankList[item[0][0]] = rankList[item[0][0]] + item[1]
rankList = rankList.items()
rankList.sort(key = SCORE, reverse = True)
context = {'statistics': statistics, 'rankList': rankList, 'problem': problem, 'contest': contest}
context.update(csrf(request))
return render_to_response('oistanding.html', context, context_instance = RequestContext(request))
def showACMStanding(request, cid):
res = canShowContest(cid, request.user)
if res != True: return res
contest = get_object_or_404(Contest, cid = cid)
problem = ContestProblem.objects.filter(cid = cid)
submissionList = Submission.objects.filter(cid = cid).order_by('user')
statistics = {}
rankList = {}
# status == 1 mean Accepted
for submission in submissionList:
status = submission.status
if status != 1: status = 0
user_pid = (submission.user, submission.pid)
if statistics.has_key(user_pid):
statistics[user_pid] = statistics[user_pid] | status
else:
statistics[user_pid] = status
for item in statistics.items():
if not rankList.has_key(item[0][0]):
rankList[item[0][0]] = 0
if item[1] == 1:
rankList[item[0][0]] = rankList[item[0][0]] + 1
rankList = rankList.items()
rankList.sort(key = solvedCount, reverse = True)
context = {'statistics': statistics, 'rankList': rankList, 'problem': problem, 'contest': contest}
context.update(csrf(request))
return render_to_response('standing.html', context, context_instance = RequestContext(request))
|
import numpy as np
import cv2
start = 1
duration = 10
fps = '30'
capture = cv2.VideoCapture('vtest.avi')
outfile = 'heatmap.avi'
while True:
try:
_, f = capture.read()
f = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
f = cv2.GaussianBlur(f, (11, 11), 2, 2)
cnt = 0
res = 0.05 * f
res = res.astype(np.float64)
break
except:
print('s')
fgbg = cv2.createBackgroundSubtractorMOG2(history=1, varThreshold=100,
detectShadows=True)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
cnt = 0
sec = 0
while True:
# if sec == duration: break
cnt += 1
if cnt % int(fps) == 0:
print(sec)
sec += 1
ret, frame = capture.read()
if not ret: break
fgmask = fgbg.apply(frame, None, 0.01)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# if cnt == 30: res
gray = cv2.GaussianBlur(gray, (11, 11), 2, 2)
gray = gray.astype(np.float64)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
fgmask = fgmask.astype(np.float64)
res += (40 * fgmask + gray) * 0.01
res_show = res / res.max()
res_show = np.floor(res_show * 255)
res_show = res_show.astype(np.uint8)
res_show = cv2.applyColorMap(res_show, cv2.COLORMAP_JET)
cv2.imshow('s', res_show)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
capture.release()
cv2.destroyAllWindows() |
#!/usr/bin/env python
import requests
import os
import subprocess
import argparse
# 从环境变量中获取OpenAI API密钥
api_key = os.environ.get("OPENAI_API_KEY")
if api_key is None:
print("错误:未设置OPENAI_API_KEY环境变量。请设置变量并重试。")
exit()
def text_to_command(text):
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
},
json={
"model": "gpt-3.5-turbo-0301",
"messages": [
{"role": "system", "content": "将下面文本转换为shell命令,不需要输出任何解释,如果需要执行多个命令,输出多行,文本:"},
{"role": "user", "content": f"{text}"}
],
"temperature": 0.2
},
)
try:
command = response.json()["choices"][0]["message"]["content"].strip()
except KeyError:
print("错误:无法从OpenAI API响应中检索命令。")
print(response.json())
return None
commands = command.split('\n')
return [c for c in commands if c and "```" not in c]
def main():
parser = argparse.ArgumentParser(description='将文本转换为shell命令')
parser.add_argument('text', type=str, help='要转换的文本')
args = parser.parse_args()
# 将文本转换为shell命令
commands = text_to_command(args.text)
# 如果命令不为None,则打印命令并询问用户确认
if commands is not None:
print("要执行的命令:")
for command in commands:
print(command)
confirm = input("您是否要执行这些命令?(Y/n)").lower()
# 如果用户确认,则执行命令
if confirm == "y" or confirm == "":
command_string = ';'.join(commands)
# 执行命令
subprocess.run(command_string, shell=True)
if __name__ == '__main__':
main()
|
import datetime
import logging
import pickle
import re
import time
import urllib
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import search
import accounts
import provider
import system
class QuoteException(Exception):
pass
class InvalidQuoteStateException(QuoteException): pass
class InvalidKeyException(QuoteException): pass
class NoPermissionException(QuoteException): pass
VERB_PUBLISHED = system.Verb('published')
VERB_DELETED = system.Verb('deleted')
VERB_UPDATED = system.Verb('updated')
@system.capture(VERB_PUBLISHED)
def onQuotePublished(action):
system.incrementQuoteCount(timestamp=action.timestamp)
@system.capture(VERB_DELETED)
def onQuoteDeleted(action):
system.incrementQuoteCount(-1)
class Line:
def __init__(self, line, preserve_formatting=False):
self.original = line
if preserve_formatting:
self.formatting = []
else:
self.formatting = list(ILineParser(None).parse(line))
def __repr__(self):
if len(self.original) > 20:
line = self.original[:17] + '...'
else:
line = self.original
return '<Line: %r formatting=%s>' % (line, self.formatting)
class ILineParser(provider.Interface):
def parseDialog(self, dialog, preserve_formatting): pass
def parse(self, line): pass
class LineFormatterRegistry(type):
NL = re.compile(r'\r?\n')
INDENT = re.compile(r'^(\s*)')
registry = []
def __new__(cls, *args, **kwargs):
instance = type.__new__(cls, *args, **kwargs)
cls.registry.append(instance)
return instance
@classmethod
def parseDialog(cls, dialog, preserve_formatting):
dialog = dialog.strip()
line_start_indent = 0
cur_line = []
for line in cls.NL.split(dialog):
indent = len(cls.INDENT.match(line).group(1))
if indent <= line_start_indent:
if cur_line:
yield Line(' '.join(cur_line),
preserve_formatting=preserve_formatting)
del cur_line[:]
line_start_indent = indent
cur_line.append(line.strip())
if cur_line:
yield Line(' '.join(cur_line),
preserve_formatting=preserve_formatting)
@classmethod
def parse(cls, line):
for formatter in cls.registry:
while True:
match = formatter.match(line)
if match:
yield match
line = line[:match.range[0]] + line[match.range[1]:]
if not match.multiple:
break
else:
break
@provider.adapter(type(None), ILineParser)
def line_parser(_):
return LineFormatterRegistry
class LineFormatter(object):
"""Instances of this class describe how to apply formatting to a quote.
@type multiple: bool
@ivar multiple: Whether the formatter could possibly match again.
@type range: (int, int)
@ivar range: A tuple giving the range of characters this formatting effect
applies to. E.g., line[range[0]:range[1]].
@type params: A dictionary of data to export to the recipient of the formatted
line.
"""
__metaclass__ = LineFormatterRegistry
def __init__(self, range=None, params=None, multiple=False):
self.range = range
self.params = params
self.multiple = multiple
def __repr__(self):
#return '%s(%r, %r)' % (self.__class__.__name__, self.range, self.params)
return '%s: %r' % (self.__class__.__name__, self.__dict__)
@classmethod
def match(cls, line):
return None
class TimestampFormatter(LineFormatter):
TIME = re.compile(r'^\s*[\[(]?(?P<hour>\d?\d):(?P<minute>\d\d)(:(?P<second>\d\d))?[)\]]?\s*')
@classmethod
def match(cls, line):
match = cls.TIME.match(line)
if match:
groups = match.groupdict(0)
try:
timestamp = datetime.time(int(groups['hour']), int(groups['minute']),
int(groups['second']))
return cls(range=(match.start(), match.end()),
params={'timestamp': timestamp},
)
except ValueError:
return None
class NickFormatter(LineFormatter):
NICK = re.compile(r'^\s*[\[<\(]?'
r'(?P<nickflag>[\s@+])?'
r"(?P<nick>[\w\d^`\[\]{}\\|-]+)[\]>\):]+\s?")
NORMALIZATION = re.compile('[^\w\d]')
@classmethod
def match(cls, line):
match = cls.NICK.match(line)
if match and filter(lambda c: not c.isdigit(), match.group('nick')):
params = {
'normalized_nick':
cls.NORMALIZATION.sub('', match.group('nick')).lower(),
}
params.update(match.groupdict())
return cls(range=(match.start(), match.end()),
params=params,
)
class Quote(search.SearchableModel):
# State constants. Their values should be ascending according to increasing
# visibility.
DELETED = 0
DRAFT = 1
PUBLISHED = 10
# Rating range
MIN_RATING = -5
MAX_RATING = 5
# The text data
dialog_source = db.TextProperty(required=True)
formatting = db.BlobProperty()
preserve_formatting = db.BooleanProperty(default=False)
note = db.TextProperty()
labels = db.StringListProperty()
location_labels = db.StringListProperty()
# State bits
draft = db.BooleanProperty(required=True, default=True)
state = db.IntegerProperty(default=DRAFT)
# Timestamps
submitted = db.DateTimeProperty(required=True, auto_now_add=True)
modified = db.DateTimeProperty()
built = db.DateTimeProperty(default=datetime.datetime.fromtimestamp(0))
# Migration support
legacy_id = db.IntegerProperty()
# Editing support
clone_of = db.SelfReferenceProperty()
# Rating support
rating_total = db.IntegerProperty(default=0)
rating_count = db.IntegerProperty(default=0)
rating_buckets = db.BlobProperty()
@classmethod
def createDraft(cls, account, source,
context=None,
note=None,
submitted=None,
legacy_id=None,
):
logging.info('creating draft by %r', account)
kwargs = {}
if submitted:
kwargs['submitted'] = submitted
quote = cls(parent=account,
draft=True,
state=cls.DRAFT,
context=context,
dialog_source=source,
note=note,
legacy_id=legacy_id,
**kwargs
)
quote.rebuild()
def transaction():
acc = accounts.Account.get(account.key())
acc.draft_count += 1
acc.put()
return quote
return db.run_in_transaction(transaction)
@classmethod
def createLegacy(cls, quote_id, account, network, server, channel, source,
note, modified, submitted):
loc_labels = []
def labelize(type, value):
value = value.strip().replace(' ', '-').replace('#', '')
if value:
loc_labels.append('%s:%s' % (type, value))
labelize('network', network)
labelize('server', server)
labelize('channel', channel)
quote = cls.getByLegacyId(quote_id)
if quote:
new = False
quote.dialog_source = source
quote.note = note
quote.submitted = submitted
quote.modified = modified or submitted
quote.labels = loc_labels
else:
new = True
quote = cls(parent=account,
legacy_id=quote_id,
dialog_source=source,
note=note,
submitted=submitted,
modified=modified or submitted,
draft=False,
state=cls.PUBLISHED,
labels=loc_labels,
)
quote.rebuild()
if new:
system.record(account, VERB_PUBLISHED, quote, timestamp=submitted)
return quote
@classmethod
def getByKey(cls, key):
cache = memcache.Client()
logging.info('checking quote:key:%s', key)
quote = cache.get('quote:key:%s' % key)
if not quote:
logging.info('missed!')
quote = cls.get(key)
if quote:
quote.updateCache()
return quote
@classmethod
def getById(cls, id, parent):
cache = memcache.Client()
logging.info('checking quote:id:%s:%s', id, parent.key().id())
quote = cache.get('quote:id:%s:%s' % (id, parent.key().id()))
if not quote:
logging.info('missed!')
quote = cls.get_by_id(id, parent)
if quote:
quote.updateCache()
return quote
@classmethod
def getDraft(cls, account, key):
draft = cls.getQuoteByKey(account, key)
if draft.state != cls.DRAFT:
raise InvalidQuoteStateException
return draft
@classmethod
def getQuoteByKey(cls, account, key):
quote = cls.getByKey(key)
if not quote:
raise InvalidKeyException
if quote.state < cls.PUBLISHED and account.key() != quote.parent_key():
raise NoPermissionException
return quote
@classmethod
def getQuoteByShortId(cls, account, id, parent):
quote = cls.getById(id, parent)
if not quote:
raise InvalidKeyException
if quote.state < cls.PUBLISHED and account.key() != quote.parent_key():
raise NoPermissionException
return quote
@classmethod
def getByLegacyId(cls, legacy_id):
return cls.all().filter('legacy_id =', legacy_id).get()
@classmethod
def getRecentQuotes(cls, reversed=False, **kwargs):
return cls.getQuotesByTimestamp('submitted',
descending=not reversed,
include_drafts=False,
**kwargs)
@classmethod
def getQuotesByBuildTime(cls, **kwargs):
return cls.getQuotesByTimestamp('built', **kwargs)
@classmethod
def getQuotesByTimestamp(cls, property,
start=None,
offset=0,
limit=10,
descending=False,
include_drafts=True,
ancestor=None,
where=[],
params={},
):
logging.info('quotes by ts: property=%s, start=%s, offset=%s limit=%s, descending=%s, drafts=%s, ancestor=%s',
property, start, offset, limit, descending, include_drafts, ancestor)
where = where[:]
params = params.copy()
op = '>='
if descending:
op = '<='
if not start: start = datetime.datetime.now()
if start is not None:
where.append('%s %s :start' % (property, op))
params['start'] = start
if ancestor:
where.append('ANCESTOR IS :ancestor')
params['ancestor'] = ancestor
if not include_drafts:
where.append('state = :published')
params['published'] = cls.PUBLISHED
if descending:
order = 'ORDER BY %s DESC' % property
else:
order = 'ORDER BY %s' % property
logging.info('offset=%d, limit=%d', offset, limit)
gql = ("WHERE %s %s" % (' AND '.join(where), order))
logging.info('GQL: %s', gql)
query = cls.gql(gql, **params)
quotes = list(query.fetch(offset=offset, limit=limit))
logging.info('got back %d quotes', len(quotes))
logging.info('%s', [(i, str(quotes[i].submitted), quotes[i].submitted) for i in xrange(len(quotes))])
if len(quotes) == limit:
for i in xrange(2, limit + 1):
if quotes[-i].submitted != quotes[-1].submitted:
break
start = quotes[-1].submitted
offset = i - 1
return quotes, start, offset
@classmethod
def getDraftQuotes(cls, account, offset=0, limit=10, order='-submitted'):
query = (cls.all()
.ancestor(account)
.filter('state =', cls.DRAFT)
.order(order)
)
return list(query.fetch(offset=offset, limit=limit))
@classmethod
def search(cls, query, offset=0, limit=10):
logging.info('quote search: query=%r, offset=%r, limit=%r', query, offset, limit)
# The search index only supports searching by one term. We'll filter any
# additional terms as we fetch from the index.
query_terms = query.lower().split()
db_query = cls.all()
db_query.search(query_terms[0])
db_query.filter('state =', cls.PUBLISHED)
# TODO: Don't overfetch so much, and make more efficient.
def matchAll(quote):
index = quote.__dict__['_entity']['__searchable_text_index']
for term in query_terms[1:]:
if term not in index:
return False
return True
results = [quote for quote in db_query.fetch(offset=0, limit=1000)
if matchAll(quote)]
return results[offset:offset+limit]
def put(self):
if True or self.state is None:
if self.draft:
self.state = self.DRAFT
else:
self.state = self.PUBLISHED
return db.Model.put(self)
def invalidateCache(self):
cache = memcache.Client()
logging.info('invalidating quote:key:%s', self.key())
logging.info('invalidating quote:id:%s:%s', self.key().id(), self.parent_key().id())
cache.delete('quote:key:%s' % self.key())
cache.delete('quote:id:%s:%s' % (self.key().id(), self.parent_key().id()))
def updateCache(self):
cache = memcache.Client()
logging.info('caching quote:key:%s', self.key())
logging.info('caching quote:id:%s:%s', self.key().id(), self.parent_key().id())
cache.set('quote:key:%s' % self.key(), self)
cache.set('quote:id:%s:%s' % (self.key().id(), self.parent_key().id()), self)
def getProperties(self):
return dict([(prop, getattr(self, prop, None))
for prop in self.properties() if prop != 'clone_of'])
def clone(self, target=None):
if target is None:
return Quote(parent=self.parent(), **self.getProperties())
else:
if self.clone_of.key() != target.key():
raise InvalidQuoteStateException
if target.clone_of.key() != self.key():
raise InvalidQuoteStateException
for name, value in self.getProperties().iteritems():
setattr(target, name, value)
return target
def edit(self, account):
if self.state < self.PUBLISHED:
raise InvalidQuoteStateException
if account.key() != self.parent_key():
raise NoPermissionException
if self.clone_of:
return self.clone_of
draft = self.clone()
draft.clone_of = self
draft.draft = True
draft.state = self.DRAFT
draft.put()
self.clone_of = draft
self.put()
return draft
def unpublish(self):
system.record(self.parent(), VERB_DELETED, self)
self.invalidateCache()
self.delete()
def republish(self, modified=None):
self.publish(modified=modified, update=True)
system.record(self.parent(), VERB_UPDATED, self)
def publish(self, modified=None, update=False):
logging.info('publish: modified=%s, update=%s', modified, update)
if not update:
if self.state != self.DRAFT:
raise InvalidQuoteStateException
if self.clone_of:
self.clone_of.republish(modified=modified)
return
def transaction():
logging.info('xaction: draft=%r, clone_of=%s', self.draft, self.clone_of)
self.draft = False
self.state = self.PUBLISHED
if self.clone_of:
self.clone_of.invalidateCache()
self.clone_of.delete()
self.clone_of = None
if update:
self.modified = modified or datetime.datetime.now()
else:
self.submitted = modified or datetime.datetime.now()
self.put()
account = accounts.Account.get(self.parent_key())
account.quote_count += 1
account.draft_count -= 1
account.put()
db.run_in_transaction(transaction)
system.record(self.parent(), VERB_PUBLISHED, self, timestamp=self.submitted)
self.updateCache()
return self
def update(self,
dialog=None,
note=None,
preserve_formatting=None,
modified=None,
publish=False,
):
if self.state != self.DRAFT:
raise InvalidQuoteStateException
if dialog is not None:
self.dialog_source = dialog
if note is not None:
self.note = note or None
if preserve_formatting is not None:
self.preserve_formatting = preserve_formatting
self.rebuild()
if publish:
self.publish(modified=modified)
else:
system.record(self.parent(), VERB_UPDATED, self)
def getDialog(self):
return self.getFormattedDialog()
def getFormattedDialog(self):
lines = pickle.loads(self.formatting)
for line in lines:
params = {}
text = line.original
for formatter in line.formatting:
text = text[:formatter.range[0]] + text[formatter.range[1]:]
params.update(formatter.params)
yield {'text': text, 'params': params}
def rebuild(self):
lines = list(LineFormatterRegistry.parseDialog(self.dialog_source,
preserve_formatting=
self.preserve_formatting))
self.formatting = db.Blob(pickle.dumps(lines))
nicks = set()
for line in lines:
if line.formatting:
for formatter in line.formatting:
if 'normalized_nick' in formatter.params:
nicks.add(formatter.params['normalized_nick'])
for nick in nicks:
self.addLabel('nick:%s' % nick)
logging.info('labels: %r', self.labels)
self.built = datetime.datetime.now()
self.put()
def label(self, prefix):
return [value for value in self.labels if value.startswith(prefix)]
def clearLabels(self):
del self.labels[:]
def addLabel(self, label):
if self.labels is None:
self.labels = []
label = label.strip().lower()
if label not in self.labels:
logging.info('adding label: %r', label)
self.labels.append(label)
def formattedSubmitted(self):
return self.formattedTimestamp(self.submitted)
def formattedModified(self):
return self.formattedTimestamp(self.modified)
@staticmethod
def formattedTimestamp(timestamp):
return timestamp.strftime('%B %e, %Y, at %T %Z')
def getLabelDict(self):
# TODO: Support different label sets
quote_labels = {}
other = []
for label in self.labels:
parts = label.split(':', 1)
if len(parts) == 2 and parts[0] in ['network', 'server', 'channel']:
if parts[0] not in quote_labels:
quote_labels[parts[0]] = parts[1]
continue
other.append(label)
quote_labels['other'] = ', '.join(other)
quote_labels['other_list'] = other
return quote_labels
def getAccountRating(self, account):
return Rating.all().ancestor(account).filter('quote =', self).get()
def rate(self, account, value):
rating = self.getAccountRating(account)
def transaction(rating=rating):
buckets = self.getRatingBuckets()
if rating and rating.value in buckets:
buckets[rating.value] -= 1
buckets[value] = buckets.get(value, 0) + 1
self.setRatingBuckets(buckets)
if rating:
self.rating_total += value - rating.value
else:
self.rating_count += 1
self.rating_total += value
self.put()
db.run_in_transaction(transaction)
if rating:
rating.timestamp = datetime.datetime.now()
rating.value = value
else:
rating = Rating(parent=account,
quote=self,
value=value,
)
return rating.put()
def getRatingBuckets(self):
logging.info('get rating buckets')
if self.rating_buckets:
logging.info('loading from pickle')
return pickle.loads(self.rating_buckets)
else:
return {}
def rebuildRatingBuckets(self):
buckets = {}
logging.info('loading from ratings')
query = Rating.all().filter('quote =', self.key())
for rating in query.fetch(offset=0, limit=1000):
buckets[rating.value] = buckets.get(rating.value, 0) + 1
if sum(buckets.itervalues()) >= 1000:
logging.info('quote has > 1000 ratings?')
for rating in query.fetch(offset=1000, limit=1000):
buckets[rating.value] = buckets.get(rating.value, 0) + 1
self.setRatingBuckets(buckets)
self.put()
def setRatingBuckets(self, buckets):
self.rating_buckets = pickle.dumps(buckets)
def ratingBucketChart(self, size='350x75'):
try:
logging.info('ratingBucketChart')
keys = range(self.MIN_RATING, self.MAX_RATING + 1)
buckets = self.getRatingBuckets()
if not buckets:
self.rebuildRatingBuckets()
buckets = self.getRatingBuckets()
logging.info('buckets: %r', buckets)
except:
logging.exception('failed to get buckets')
raise
if buckets:
top = max(buckets.values())
else:
top = 1
params = {
'cht': 'bvs',
'chs': size,
'chd':
't:%s' % ','.join(str(100 * buckets.get(i, 0) // top) for i in keys),
'chco': '334433',
'chxt': 'x',
'chxl': '0:|%s' % '|'.join(map(str, keys)),
'chf': 'bg,s,ccffcc',
}
return 'http://chart.apis.google.com/chart?%s' % urllib.urlencode(params)
class Rating(db.Model):
quote = db.ReferenceProperty(Quote, required=True)
value = db.IntegerProperty(required=True)
timestamp = db.DateTimeProperty(required=True, auto_now_add=True)
|
import tensorflow as tf
from .piece import Piece
class Board:
SIZE = 9
ROW_LENGTH = 3
def __init__(
self, board_positions, previous=None, next_player=Piece.CROSS
):
self._array = board_positions
self.previous = previous
self._next_player = next_player
@staticmethod
def create():
return Board([Piece.BLANK for _ in range(Board.SIZE)])
def set_piece_at_index(self, index_to_set, new_value):
return Board(
[
new_value if index == index_to_set else old_value
for index, old_value in enumerate(self._array)
],
previous=self,
)
def play(self, at_index):
if self._array[at_index] != Piece.BLANK:
raise RuntimeError("cannot play on taken space")
board = self.set_piece_at_index(at_index, self._next_player)
self._next_player = self._next_player.invert()
return board
def to_array(self):
return self._array
def get_indices_of_blanks(self):
return [i for i, p in enumerate(self._array) if p == Piece.BLANK]
def to_nn_input(self):
noughts = [p == Piece.NOUGHT for p in self._array]
crosses = [p == Piece.CROSS for p in self._array]
players = [
self._next_player == Piece.NOUGHT,
self._next_player == Piece.CROSS,
]
return tf.constant(noughts + crosses + players, shape=(1, 20))
def has_winner(self):
return bool(self.get_winning_player())
def is_full(self):
return all([p != Piece.BLANK for p in self._array])
def is_game_over(self):
return self.has_winner() or self.is_full()
@staticmethod
def _get_winner_for_run(run):
first_piece = run[0]
if first_piece == Piece.BLANK:
return None
if all(map(lambda p: p == first_piece, run)):
return first_piece
return None
def _run_indices_to_pieces(self, run):
return [self._array[i] for i in run]
def get_winning_player(self):
runs = (
# horizontals
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
# verticals
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
# diagonals
[0, 4, 8],
[2, 4, 6],
)
for run in map(self._run_indices_to_pieces, runs):
winning_piece = self._get_winner_for_run(run)
if winning_piece:
return winning_piece
return None
|
from dataclasses import dataclass
@dataclass
class Student:
name: str
age: int
li = [Student('小明', 11), Student('小红', 12),
Student('小青', 9)]
li.sort(key=lambda e: e.age)
print(li)
li = [Student('小明', 11), Student('小红', 12),
Student('小青', 9)]
li2 = sorted(li, key=lambda e: e.age)
print(li)
print(li2)
|
import argparse
import os
from torch.backends import cudnn
from data_loader import get_loader
from solver import Solver
def str2bool(v):
return v.lower() in ('true')
def main(config):
if config.mode == 'gen_mobile_model':
solver = Solver(config, None, None)
solver.gen_mobile_model()
return
svhn_loader, mnist_loader = get_loader(config)
solver = Solver(config, svhn_loader, mnist_loader)
cudnn.benchmark = True
# create directories if not exist
if not os.path.exists(config.model_path):
os.makedirs(config.model_path)
if not os.path.exists(config.sample_path):
os.makedirs(config.sample_path)
if config.mode == 'train':
solver.train()
elif config.mode == 'sample':
solver.sample()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model hyper-parameters
parser.add_argument('--image_size', type=int, default=256)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)
# training hyper-parameters
parser.add_argument('--train_iters', type=int, default=40000)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
parser.add_argument('--rec_loss_weight', type=float, default=1.0)
parser.add_argument('--edge_loss_weight', type=float, default=1.0)
# misc
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--model_path', type=str, default='output/models')
parser.add_argument('--sample_path', type=str, default='output/samples')
parser.add_argument('--photo_path', type=str, default='data/horse/trainA')
parser.add_argument('--washink_path', type=str, default='data/horse/trainB')
parser.add_argument('--log_step', type=int , default=10)
parser.add_argument('--sample_step', type=int , default=100)
parser.add_argument('--sample_count', type=int , default=64)
config = parser.parse_args()
print(config)
main(config)
|
import os
import glob
import pandas as pd
import boto3
import logging
from botocore.exceptions import ClientError
import sys
import threading
text_path = str(os.path.abspath(os.path.join('../data/preprocessed')))
csv_path = str(os.path.abspath(os.path.join('../data/text.csv')))
bucket_name = "chang-stt-bucket"
cloud_text_file_name = "chang-amharic-text-final.csv"
def readData(path, start,end):
allSentence = []
for i in range(start,end):
file = open(path+'/data_%i.txt'%i,encoding="utf8")
sentences = file.readlines()
allSentence.extend(sentences)
return allSentence
class TextDataServ:
def __init__(self, textPath: str):
self.textPath = textPath
pass
def get_file_length(self):
os.chdir(self.textPath)
return len(glob.glob("*.txt"))
def creat_csv_text(self, csv_path = str(os.path.abspath(os.path.join('../data/text.csv')))):
total_file = self.get_file_length()
sents = readData(self.textPath, 0, total_file)
text_df = pd.DataFrame()
text_df['text'] = sents
text_df['length'] = text_df['text'].map(lambda x: len(x))
file_names = [ f"data_{i}" for i in range(0, len(text_df['text'].to_list())) ]
text_df['file_name'] = file_names
text_df.to_csv(csv_path, index=False)
text_df.head()
text_df.info()
print(len(file_names))
return text_df
def create_text_bucket(self, bucket_name="chang-stt-bucket", region=None):
try:
if region is None:
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket_name)
else:
s3_client = boto3.client('s3', region_name=region)
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
except ClientError as e:
logging.error(e)
return False
def upload_text_csv(self, csv_path: str, bucket: str, upload_file_name: str):
s3_client = boto3.client('s3')
print("transfering csv file to s3 bucket")
try:
with open(csv_path, "rb") as f:
response = s3_client.upload_fileobj(f, bucket, upload_file_name,
Callback=ProgressPercentage(csv_path))
print("Finished transfering csv file to s3 bucket")
except ClientError as e:
logging.error(e)
return False
return True
def download_text_csv(self, csv_path: str, bucket: str, file_name: str):
s3 = boto3.client('s3')
try:
with open(csv_path, 'wb') as f:
s3.download_fileobj(bucket, file_name, f)
print("Finished downloading csv file from s3 bucket")
return True
except ClientError as e:
logging.error(e)
return False
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
if __name__ == "__main__":
text_serv = TextDataServ(text_path)
text_serv.creat_csv_text(csv_path)
text_serv.create_text_bucket(bucket_name)
text_serv.upload_text_csv(csv_path, bucket_name, cloud_text_file_name)
# text_serv.download_text_csv(csv_path, bucket_name, cloud_text_file_name) |
from blocks.initialization import IsotropicGaussian, Constant
import data
from model.mlp_emb import Model, Stream
use_cuts_for_training = True
dim_embeddings = [
# ('origin_call', data.origin_call_train_size, 100),
# ('origin_stand', data.stands_size, 100),
# ('week_of_year', 52, 100),
# ('day_of_week', 7, 100),
('qhour_of_day', 24 * 4, 10),
('day_type', 3, 1),
]
dim_input = sum(x for (_, _, x) in dim_embeddings)
dim_hidden = [10, 10]
output_mode = "destination"
dim_output = 2
embed_weights_init = IsotropicGaussian(0.01)
mlp_weights_init = IsotropicGaussian(0.01)
mlp_biases_init = IsotropicGaussian(0.001)
learning_rate = 0.001
momentum = 0.9
batch_size = 100
max_splits = 100
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 14:35:11 2020
@author: Ethan
"""
import rospy
from pololu_drv8835_rpi import motors # MAX_SPEED is 480 (hard-coded)
'''
Future work: write these in yaml file
'''
speed_desired = 0.5 # desired wheel speed in rpm
angle_desired = 0.0 # desired angle - 0
k_p_angle = 4*480.0/90.0 # propotional gain for angle control
k_i_angle = k_p_angle/4. # integral gain for angle control
k_d_angle = 1 # derivatibe gain for angle control
k_p_speed = 15 # proportional gain for speed control (60)
k_i_speed = 35 # integral gain for speed control(30)
time_old= 0.0
time_current = rospy.get_rostime()
def talker():
# Launch a node called "controller_node"
rospy.init_node('controller_node', anonymous=False)
# Subscribe to encoder data
wheel_speed = rospy.Subscriber('/Encoder_data', Float32, update_current_wheel_speed)
# Subscribe to imu angle data
imu_angle = rospy.Subscriber('/IMU_angle', Float32,update_imu_angle)
#PID control
PID_control()
def update_current_wheel_speed(msg_in):
global current_wheel_speed
current_wheel_speed = msg_in.data
def update_imu_angle(msg_in):
global current_imu_angle
current_imu_angle = msg_in.data
def PID_control():
global current_wheel_speed
global current_imu_angle
speed_error_cum = 0.0
while True:
#### time update
time_old = time_current # set previous time reading
time_current = rospy.get_rostime() # set current time reading
dt = time_current - time_old # time step
# P
speed_error = speed_desired - current_wheel_speed
# I
speed_error_cum += speed_error * dt
# Effort
# not sure what speed_direction_comp is used for
angle_desired = 1 * (k_p_speed * speed_error + k_i_speed * speed_error_cum)
|
from django.contrib.auth.models import User
from django.db import models
from ckeditor.fields import RichTextField
# Create your models here.
from django.urls import reverse
class ScopeManage(models.Model):
SCOPE_TYPES = (
('Fellowship', 'Fellowship'),
('Internship', 'Internship'),
('Research Assistant', 'Research Assistant'),
('Teacher Assistant', 'Teacher Assistant'),
('Trainee', 'Trainee'),
)
user=models.ForeignKey(User, on_delete=models.CASCADE)
scope_title=models.CharField(max_length=100, blank=False)
category=models.CharField(choices=SCOPE_TYPES, max_length=30)
post_date=models.DateTimeField(auto_now=True, auto_now_add=False)
end_date=models.DateField(auto_now=False,auto_now_add=False)
details = RichTextField()
reactions = models.ManyToManyField(User, related_name="scope_like", blank=True)
love = models.ManyToManyField(User, related_name="scope_love", blank=True)
scope_viewer = models.ManyToManyField(User, blank=True, related_name="scope_viewer")
scope_counter = models.IntegerField(default=0)
cover_image=models.ImageField(upload_to="scope/", blank=False)
url=models.URLField(blank=True)
def __str__(self):
return self.scope_title
def total_reactions(self):
return self.reactions.count()
def total_love(self):
return self.love.count()
def get_absolute_url(self):
return reverse("scopeview", kwargs={"category": self.category, "id": self.id})
def get_absolute_url_manage(self):
return reverse("managescope")
def post_comment(self):
post=ScopeComment.objects.filter(post=self).count()
return post
class ScopeComment(models.Model):
post = models.ForeignKey(ScopeManage,on_delete=models.CASCADE)
user = models.ForeignKey(User,on_delete=models.CASCADE)
reply = models.ForeignKey('ScopeComment',on_delete=models.CASCADE,null=True,related_name="replies")
content = models.TextField(max_length=160)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{}-{}'.format(self.post.scope_title,str(self.post.user))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from collections import defaultdict
import datetime
import logging
import unicodedata
# pip install python-telegram-bot
# https://github.com/python-telegram-bot/python-telegram-bot
# https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/README.md
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from startrek_fsm import TrekGame
CONTACTLIST_FN = 'startrekbot_contacts.tsv'
BOT_KEY = "PUT HERE API KEY";
def load_contacts(fn=None):
if fn is None: fn = CONTACTLIST_FN
contacts = set()
try:
for l in open(fn):
z = l.strip()
contacts.add(z)
except:
return set()
return contacts
def save_contacts(contacts,fn=None):
if fn is None: fn = CONTACTLIST_FN
fh = open(fn,"w+")
for k in contacts:
print >>fh,str(k)
fh.close()
def update_contacts(contacts,uu,fn=None):
contacts.add(uu)
save_contacts(contacts)
return contacts
def bot_error(bot, update, error):
logging.warn('BOT\tUpdate "%s" caused error "%s"' % (update, error))
def send_msg(bot,contact,msg,mono=True):
if mono:
msg = u"```\n"+msg+u"\n```"
bot.sendMessage(contact, text=msg, parse_mode="Markdown", disable_web_page_preview=True)
def help_handler(bot, update):
uu = update.message.chat_id
logging.info("USER\tServe user '%s' with command '/help'" % (str(uu),) )
send_msg(bot,uu,"As a captain of the Enterprise, you should to fly through the galaxy and hunt down a number of Klingon ships. Each game starts with a different number of Klingons, friendly starbases and stars, spread throughout the galaxy.",False)
send_msg(bot,uu,"The galaxy map is arranged as an 8 by 8 grid of quadrants. Each quadrant is further divided into an 8 by 8 grid of sectors. The Enterprise's local surroundings can seen on a text-based map of the current quadrant's sectors.",False)
send_msg(bot,uu,"Stars were represented with a `*`, Klingon ships as a `>!<`, star bases as an `<O>`, and the Enterprise itself with an `-O-`.",False)
send_msg(bot,uu,"The user can also use the long-range scan, LRS, to print out an abbreviated map of the quadrants lying directly around the Enterprise, listing the number of stars, Klingons and starbases in each quadrant.",False)
send_msg(bot,uu,"Klingon ships can be attacked with either phasers or photon torpedos. Phasers do not have to be aimed, but their power falls off with distance, requiring the player to estimate how much power to put into each shot. Also phasers can affect the Enterprise's shields.",False)
send_msg(bot,uu,"Torpedoes do not suffer this drop in power and will destroy a Klingon ship with a single hit, but they have to be aimed using polar coordinates, so misses are possible. Movement, combat and shields all drain the energy supply of the Enterprise, which can be topped up again by flying to a starbase. In case the Enterprise is low on energy or torpedoes, the player could warp to a starbase to refuel and repair.",False)
send_msg(bot,uu,"The game ends when the Enterprise is destroyed or all Klingons are destroyed.\n\nUse these digits to specify the direction for the movement/combat:\n\n",False)
send_msg(bot,uu," 7 8 9\n \\|/ \n 4-o-6\n /|\\ \n 1 2 3\n\n")
send_msg(bot,uu,"Press /start to start a new game. Use /help to read this info again and /about to get a short story about original game.",False)
def about_handler(bot, update):
uu = update.message.chat_id
logging.info("USER\tServe user '%s' with command '/about'" % (str(uu),) )
send_msg(bot,uu," _____________ ___ \n / __/_ __/ _ | / _ \\\n _\ \ / / / __ |/ , _/\n/___/ /_/ /_/ |_/_/|_| \n _________ ______ __\n /_ __/ _ \/ __/ //_/\n / / / , _/ _// ,< \n /_/ /_/|_/___/_/|_| \n")
send_msg(bot,uu,"Star Trek is a text-based computer game that puts the player in command of the USS Enterprise on a mission to hunt down and destroy an invading fleet of Klingon warships.\n",False)
send_msg(bot,uu,"Trek developed out of a brainstorming session between Mike Mayfield and several high school friends in 1971. The original Star Trek television show had only recently ended its run and was still extremely popular. ",False)
send_msg(bot,uu,"Mayfield and his \"geek friends\" wrote down a number of ideas for a game, and during the summer holidays he then started incorporating as many of them as he could on an SDS Sigma 7, using an illicitly borrowed account at the University of California, Irvine.\n\nThe original Sigma 7 version, and its descendants, were ported or copied to a wide variety of platforms. Several years later a lot of microcomputer versions appeared and were widely available and modified.\n\nStar Trek was reviewed in The Dragon magazine #38. Reviewer Mark Herro described the game in 1980 as \"one of the most popular (if not the most popular) computer games around.\"",False)
send_msg(bot,uu,"This telegram version was built by [altsoph](http://altsoph.com) based on [a Python port](https://github.com/psychotimmy/trek-game) of the original game by [Tim Holyoake](http://www.tenpencepiece.net/).\nThanks to Evgeny Vasin and Ivan Yamshchikov for beta-testing.",False)
send_msg(bot,uu,"_____________ _\n\\_(=====/_=_/___.--'-`--.__\n \\ \\ `,--,-.__.---'\n .--`\\\\--'../\n '---._____.|]\n\n ...dif-tor heh smusma...\n")
send_msg(bot,uu,"\nPress /start to start a new game. Use /help to read about controls and /about to get this info again.",False)
def start_handler(bot, update):
[fsm_objects,contacts] = bot.alt_data
uu = update.message.chat_id
if uu not in contacts: contacts = update_contacts(contacts,uu)
logging.info("USER\tInit user '%s'" % (str(uu),) )
fsm_objects[uu] = TrekGame()
fsm_objects[uu].step()
send_msg(bot,uu,fsm_objects[uu].result())
bot.alt_data = [fsm_objects,contacts]
def command_handler(bot, update):
[fsm_objects,contacts] = bot.alt_data
uu = update.message.chat_id
m = update.message.text
if uu not in contacts: contacts = update_contacts(contacts,uu)
if uu not in fsm_objects:
start_handler(bot,update)
return
prev_state = fsm_objects[uu].get_state()
fsm_objects[uu].step(m)
if fsm_objects[uu].get_state() == 'main_cmd':
fsm_objects[uu].step(clear = False)
send_msg(bot,uu,fsm_objects[uu].result())
escm = "".join(ch if unicodedata.category(ch)[0]!="C" else " " for ch in m[:min(len(m),128)])
logging.info("USER\tUser`s '%s' command received: '%s'. State changed from '%s' to '%s'" % (str(uu),escm,prev_state,fsm_objects[uu].get_state()) )
bot.alt_data = [fsm_objects,contacts]
def main():
fsm_objects = dict()
contacts = load_contacts()
logging.basicConfig(filename='startrekbot_%s.log' % (datetime.datetime.now().strftime("%Y%m%d%H%M%S"),),
format='%(asctime)s\t%(levelname)s\t%(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
logging.info('STATUS\tStarted')
logging.info('STATUS\t%d contacts found' %(len(contacts,)))
# Create the EventHandler and pass it your bot's token.
updater = Updater(BOT_KEY)
updater.bot.alt_data = [fsm_objects,contacts]
# updater.bot.alt_data = [fsm_states,fsm_objects,contacts]
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start_handler))
dp.add_handler(CommandHandler("help", help_handler))
dp.add_handler(CommandHandler("about", about_handler))
# on noncommand i.e message - echo the message on Telegram
dp.add_handler(MessageHandler([Filters.text], command_handler))
# log all errors
dp.add_error_handler(bot_error)
# Start the Bot
updater.start_polling(poll_interval=0.2)
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
import makEasy
import types,json
import meTime as mT
import math
from pathlib import Path
Machine=makEasy.Machine("MAC032","Impianto Taglio Plasma - BBS")
makEasy.WORKSET['PlasmaCut'].Machines.append(Machine)# ??????
f = open(Path(makEasy.FOLDER+"/Machines/MAC032")/"MaterialParameters.json", "r")
MAC_PARAMETERS=json.loads(f.read())
f.close()
#Machine.MacProperties= {
# "XMax": 6000,
# "YMax": 2500,
# "HPrices":{"Load":23.00,
# "Tool":23.00,
# "Move":23.00,
# "Work":40.00,
# "Look":23.00,
# "DwLd":23.00}
# }
Machine.Parameters={
"Load":{
"DEFAULT":{"CTime":3,"KFactor":1.6,"KW":3.5}
},
"Tool":{
"DEFAULT": {"CTime": 3,"KW": 2.5}
},
"Move":{
"DEFAULT": {"CTime": 0.1,"Speed":8000,"KW": 13.5}
},
"Work":{
"DEFAULT": {"CTime":0.1,"Speed": 2000,"KW":40},
"S235JR|2": {"Speed": 3000,"KW":21.75},
"S235JR|3": {"Speed": 2900,"KW":22.75},
"S235JR|4": {"Speed": 2700,"KW":22.75},
"S235JR|5": {"Speed": 2500,"KW":22.75},
"S235JR|6": {"Speed": 2400,"KW":25.75},
"S235JR|8": {"Speed": 2100,"KW":28.75},
"S235JR|10": {"Speed": 2000,"KW":33.55},
"S235JR|12": {"Speed": 1950,"KW":39.00},
"S235JR|15": {"Speed": 1800,"KW":42.50},
"S235JR|20": {"Speed": 1050,"KW":43.75},
"S235JR|25":{"Speed": 750,"KW": 46.50},
"S275JR|5": {"Speed": 2700,"KW": 24.5},
"S275JR|10":{"Speed": 2100,"KW": 31.50}
},
"Look":{
"DEFAULT": {"CTime": 1,"KW": 12}
},
"DwLd":{
"DEFAULT": {"CTime": 0.15,"KW": 3.5}
}
}
def getParameters(self,work_parameters):
material=work_parameters['sheet_mat']
thk=work_parameters['sheet_thk']
result={'Material':material,'Thickness':thk}
id_mat=material+'|'+str(thk)
for t in makEasy.TTimes:
result[t]=self.Parameters[t]["DEFAULT"]
if id_mat in self.Parameters[t]:
for p in self.Parameters[t][id_mat]:
result[t][p]=self.Parameters[t][id_mat][p]
return result
def newMacWorkTime(self,work_parameters):
wt=mT.WorkTime()
wt.Load.HourlyCost=20.00
wt.Tool.HourlyCost=20.00
wt.Move.HourlyCost=30.00
wt.Work.HourlyCost=50.00
wt.Look.HourlyCost=20.00
wt.Dwld.HourlyCost=20.00
material=work_parameters['sheet_mat']
thk=work_parameters['sheet_thk']
max_area=6.0*2.5
#load
w=work_parameters['total_area']*7.9*thk
pa=work_parameters['total_area']/max_area
wt.Load.TimeParameters=mT.TimeParameters(BlockTime=3+2*math.log(w+1))
print('load',wt.Load.TimeParameters)
#tool
wt.Tool.TimeParameters=mT.TimeParameters(BlockTime=0.5)
print('tool',wt.Tool.TimeParameters)
#move
wt.Move.TimeParameters=mT.TimeParameters(BlockTime=0.05,Speed=8000.0)
print('move',wt.Move.TimeParameters)
#work
mp=MAC_PARAMETERS[material][str(thk)]
#mp=lp[list(lp.keys())[0]]
#print(mp)
wt.Work.TimeParameters=mT.TimeParameters(BlockTime=0.16,
Speed=mp['Speed'])
print('work',wt.Work.TimeParameters)
#look
wt.Look.TimeParameters=mT.TimeParameters(BlockTime=2)
#print('look',wt.Look.TimeParameters)
#dwld
wt.Dwld.TimeParameters=mT.TimeParameters(BlockTime=0.3,Speed=1/thk)## non va bene!!!
print('dwld',wt.Dwld.TimeParameters)
return wt
Machine.getParameters = types.MethodType( getParameters, Machine )
Machine.newMacWorkTime = types.MethodType( newMacWorkTime, Machine )
makEasy.MACHINES[Machine.Id]= Machine
|
import numpy as np
from process_testset import *
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def score(trans_file="", test="test1", exp="exp1"):
"""reads in the test set (as returned by read_excel()) or from arg;
calculates Scores_mwe; trans_file is the reference MWE translations file;
change where the trans_file variable reads the input
sentences to to adapt to different reference translations"""
values = read_excel_t2()
vals = []
if test=="test2":
with open(trans_file, "r", encoding="utf-8") as t:
trans = t.readlines()
elif test=="test1":
trans = []
for num in values:
trans.append(values[num][exp])
print(trans[:10])
for p,line in enumerate(trans):
trans[p] = line.lower()
for sent_num in range(len(trans)):
ref_trans = re.sub("[{}]".format(string.punctuation)," ", trans[sent_num]).split() # ignores punctuation,
# including ... indicating discontinuous MWEs
for mwe in values[sent_num]["mwes"]:
new_match = []
if mwe is not '':
mwe_proc = re.sub("[{}]".format(string.punctuation)," ", mwe).split()
for item in mwe_proc:
try:
distances = [levenshteinDistance(item,w) for w in ref_trans]
item_dist = min(distances)/len(item) if min(distances)/len(item) <= 1 else 1
new_match.append(item_dist)
except:
pass
vals.append(1-(np.mean(new_match)))
return np.mean(vals) |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Handlers for customizing oauthclient endpoints."""
from functools import partial, wraps
from flask import (
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from flask_security.utils import do_flash, get_message
from invenio_db import db
from invenio_i18n import gettext as _
from ..errors import (
AlreadyLinkedError,
OAuthClientAlreadyAuthorized,
OAuthClientError,
OAuthClientMustRedirectLogin,
OAuthClientMustRedirectSignup,
OAuthClientTokenNotFound,
OAuthClientTokenNotSet,
OAuthClientUnAuthorized,
OAuthClientUserNotRegistered,
OAuthClientUserRequiresConfirmation,
OAuthError,
OAuthRejectedRequestError,
)
from ..utils import create_registrationform, fill_form
from .authorized import authorized_handler, extra_signup_handler
from .base import base_disconnect_handler
from .decorators import can_extra_signup
from .token import response_token_setter, token_session_key
def _oauth_error_handler(remote, f, *args, **kwargs):
"""Function to handle exceptions."""
try:
return f(remote, *args, **kwargs)
except OAuthClientError as e:
current_app.logger.warning(e.message, exc_info=True)
return oauth2_handle_error(e.remote, e.response, e.code, e.uri, e.description)
except OAuthClientUnAuthorized:
return current_app.login_manager.unauthorized()
except AlreadyLinkedError:
flash(
_("External service is already linked to another account."),
category="danger",
)
return redirect(url_for("invenio_oauthclient_settings.index"))
except OAuthRejectedRequestError:
flash(_("You rejected the authentication request."), category="info")
return redirect("/")
except OAuthClientAlreadyAuthorized:
return redirect("/")
except OAuthClientTokenNotFound:
return redirect("/")
except OAuthClientUserNotRegistered:
raise OAuthError("Could not create user.", remote)
except OAuthClientTokenNotSet:
raise OAuthError("Could not create token for user.", remote)
except OAuthClientMustRedirectSignup as e:
return redirect(
url_for(
".signup",
remote_app=remote.name,
)
)
except OAuthClientMustRedirectLogin as e:
return redirect(
url_for(
".login",
remote_app=remote.name,
)
)
#
# Error handling decorators
#
def oauth_resp_remote_error_handler(f):
"""Decorator to handle exceptions."""
@wraps(f)
def inner(resp, remote, *args, **kwargs):
# OAuthErrors should not happen, so they are not caught here. Hence
# they will result in a 500 Internal Server Error which is what we
# are interested in.
_f = partial(f, resp)
return _oauth_error_handler(remote, _f, *args, **kwargs)
return inner
def oauth_remote_error_handler(f):
"""Decorator to handle exceptions."""
@wraps(f)
def inner(remote, *args, **kwargs):
# OAuthErrors should not happen, so they are not caught here. Hence
# they will result in a 500 Internal Server Error which is what we
# are interested in.
return _oauth_error_handler(remote, f, *args, **kwargs)
return inner
#
# Handlers
#
@oauth_resp_remote_error_handler
def authorized_default_handler(resp, remote, *args, **kwargs):
"""Store access token in session.
Default authorized handler.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
response_token_setter(remote, resp)
db.session.commit()
return redirect(url_for("invenio_oauthclient_settings.index"))
@oauth_resp_remote_error_handler
def authorized_signup_handler(resp, remote, *args, **kwargs):
"""Handle sign-in/up functionality.
:param remote: The remote application.
:param resp: The response.
:returns: Redirect response.
"""
try:
next_url = authorized_handler(resp, remote, *args, **kwargs)
# Redirect to next
if next_url:
return redirect(next_url)
return redirect(url_for("invenio_oauthclient_settings.index"))
except OAuthClientUserRequiresConfirmation as exc:
do_flash(
_(
f"A confirmation email has already been sent to {exc.user.email}. Please confirm your email to be able to log in."
),
category="success",
)
return redirect("/")
@oauth_remote_error_handler
def disconnect_handler(remote, *args, **kwargs):
"""Handle unlinking of remote account.
This default handler will just delete the remote account link. You may
wish to extend this module to perform clean-up in the remote service
before removing the link (e.g. removing install webhooks).
:param remote: The remote application.
:returns: Redirect response.
"""
base_disconnect_handler(remote, *args, **kwargs)
return redirect(url_for("invenio_oauthclient_settings.index"))
@oauth_remote_error_handler
@can_extra_signup
def signup_handler(remote, *args, **kwargs):
"""Handle extra signup information.
This should be called when the account info from the remote `info` endpoint is
not enough to register the user (e.g. e-mail missing): it will show the
registration form, validate it on submission and register the user.
:param remote: The remote application.
:returns: Redirect response or the template rendered.
"""
form = create_registrationform(request.form, oauth_remote_app=remote)
if not form.is_submitted():
# Show the form when the user is redirected here after `authorized`
# (GET request), to fill in the missing information (e.g. e-mail)
session_prefix = token_session_key(remote.name)
account_info = session.get(session_prefix + "_account_info")
# Pre-fill form
fill_form(form, account_info["user"])
return render_template(
current_app.config["OAUTHCLIENT_SIGNUP_TEMPLATE"],
form=form,
remote=remote,
app_title=current_app.config["OAUTHCLIENT_REMOTE_APPS"][remote.name].get(
"title", ""
),
app_description=current_app.config["OAUTHCLIENT_REMOTE_APPS"][
remote.name
].get("description", ""),
app_icon=current_app.config["OAUTHCLIENT_REMOTE_APPS"][remote.name].get(
"icon", None
),
)
elif form.is_submitted() and not form.errors:
# Form is submitted (POST request): validate the user input and register
# the user
try:
next_url = extra_signup_handler(remote, form, *args, **kwargs)
except OAuthClientUnAuthorized:
# Redirect the user to login page
return redirect(url_for("security.login"))
except OAuthClientUserRequiresConfirmation as exc:
# Redirect the user after registration (which doesn't include the
# activation), waiting for user to confirm his email.
do_flash(*get_message("CONFIRM_REGISTRATION", email=exc.user.email))
return redirect("/")
if next_url:
return redirect(next_url)
else:
return redirect("/")
def oauth2_handle_error(remote, resp, error_code, error_uri, error_description):
"""Handle errors during exchange of one-time code for an access tokens."""
flash(_("Authorization with remote service failed."))
return redirect("/")
|
# import libraries
from gtts import gTTS
import pygame,picamera
import pytesseract
import os,time
from PIL import Image
import numpy as np
import cv2
import datetime
import pyaudio
from subprocess import call
import speech_recognition as sr
import shelve,os
#to get foldername for captured image by date
dt=datetime.datetime.now()
month=dt.month
day=dt.day
folderName=str(month)+'-'+str(day)
#create the folders if its new day
if folderName not in os.listdir('/home/pi/project'):
validNewFolder=True
currentDir = '/home/pi/project/'+str(folderName)
os.makedirs(currentDir)
os.makedirs(currentDir+'/input') #folder for captured images
os.makedirs(currentDir+'/audio') #folder for audio outputs
os.makedirs(currentDir+'/process') #folder for images after image processing
os.makedirs(currentDir+'/textfile')#folder for textfile to write output text
os.makedirs(currentDir+'/pdffile')#folder for pdffiles to write output text
imageNum=0 # for naming of each image by counter
shelfFile = shelve.open('number') #to store the count in shelve
shelfFile['imageNum']=imageNum
shelfFile.close()
else:
#if folder is already there(not a new day)
shelfFile = shelve.open('number')
imageNum = shelfFile['imageNum']
shelfFile.close()
def recognition(): #function for voice recognition part
r = sr.Recognizer() # voice for assistant
r.energy_threshold=4000
os.system("omxplayer /home/pi/Downloads/plucky.mp3")
with sr.Microphone() as source:
print ('listening..')
time.sleep(1)
audio = r.listen(source) #listen to blind people's command
print ('processing')
#voice recognition part
try:
message = (r.recognize_google(audio, language = 'en-us', show_all=False))
#call(["espeak", message])
print(message)
except sr.UnknownValueError:
print("Could not understand audio")
#time.sleep(3)
recognition()
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
#time.sleep(3)
recognition()
return message #return text recognized by voice recognition
def picam(imageNum):
# fuction for capture image of page
os.chdir('/home/pi/project/'+str(folderName)+'/input')#change directory to input folder
image=str(imageNum)+'.jpg' #name for image
camera=picamera.PiCamera()# initialize picamera
camera.capture(image) #capture image
camera.close() #close picamera
def image_process(imageNum):
#function for image process
os.chdir('/home/pi/project/'+str(folderName)+'/input') # change the directory to folder
image=str(imageNum)+'.jpg'
image = cv2.imread(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#cv2.imshow('bin',thresh)
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h,w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h),
flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
kernal = np.ones((1,1),np.uint8)
img = cv2.dilate(rotated, kernal,iterations=1)
img = cv2.erode(rotated, kernal, iterations=1)
#img = cv2.adaptiveThreshold(img,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31,2)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
# threshold the image, setting all foreground pixels to
# 255 and all background pixels to 0
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
os.chdir('/home/pi/project/'+str(folderName)+'/process')
cv2.imwrite(str(imageNum)+'.png',thresh)
#def ocr
im=Image.open(str(imageNum)+'.png')
text=pytesseract.image_to_string(im, lang='eng')
return text #return the text extracted from image
def tts(text,imageNum):
#function for audio conversion
os.chdir('/home/pi/project/'+str(folderName)+'/audio')
name= str(imageNum)+'.mp3'
tts=gTTS(text=text, lang='en')
tts.save(name)
pygame.mixer.init()
pygame.mixer.music.load(name)
pygame.mixer.music.play()
def musicPlay(imageNum):
#function for play the audio
os.chdir('/home/pi/project/'+str(folderName)+'/audio')
name= str(imageNum)+'.mp3'
pygame.mixer.init()
pygame.mixer.music.load(name)
pygame.mixer.music.play()
def textfile(text,imageNum):
#function for textfile, pdf conversion
textPath= os.chdir('/home/pi/project/'+str(folderName)+'/textfile')
filename=str(imageNum)+'.txt'
textfile=open(filename, 'w')
textfile.write(text)
textfile.close()
textPath=str(textPath) + filename
pdfPath='/home/pi/project/'+str(folderName)+'/pdffile/'+str(imageNum)+'.pdf'
path=pdfPath+' '+textPath
cmnd='python3 txt2pdf.py -o '+path
os.system(cmnd)
def reader(imageNum):
#function for structure the program
picam(imageNum)
#text = image_process(imageNum)
text = 'good morning'
textfile(text, imageNum)
os.system('''flite -voice slt -t "place the next page and say ok
for better performence"''')
chumm=recognition()
if 'ok' in chumm:
import threading
imageNum2=imageNum
imageNum+=1
threadObj = threading.Thread(target=main, args=['start reading',imageNum])
threadObj.start()
tts(text,imageNum2) #thrading
else:
tts(text,imageNum)
imageNum+=1
def main(message,imageNum):
#main program
if 'reading' in message:
os.system('flite -voice slt -t "place the book or page on table and say ok"')
#time.sleep(1)
message=recognition()
if 'ok' in message:
reader(imageNum)
shelfFile = shelve.open('number')
shelfFile['imageNum']=imageNum
shelfFile.close()
os.system('flite -voice slt -t "do you want to hear once more?"')
message = recognition()
if 'yes' in message:
musicPlay(imageNum-1)
#elif 'no' in message:
#break
if ('previous' or 'page') in message:
shelfFile = shelve.open('number')
imageNum=shelfFile['imageNum']
shelfFile.close()
imageNum=imageNum-2
musicPlay(imageNum)
if 'shutdown' in message:
os.system("sudo shutdown")
if ('refresh' or 'reboot') in message:
os.system("sudo reboot")
if ('pdf' or 'convert') in message:
import PyPDF2
os.system('flite -voice slt -t "which date of pages do you want to convert?"')
message=recognition()
if 'today' in message:
os.chdir('/home/pi/project/'+ folderName+'/pdffile')
if 'yesterday' in message:
dt=datetime.datetime.now()
month=dt.month
day=dt.day
day-=1
date=str(month)+'-'+str(day)
os.chdir('/home/pi/project/'+date+'/pdffile')
else:
dt=datetime.datetime.strptime(str(message),'%B %d')
month=dt.month
day=dt.day
date=str(month)+'-'+str(day)
os.chdir('/home/pi/project/'+date+'/pdffile')
message=recognition()
main(message,imageNum)
|
import sys
from os import listdir, makedirs
from os.path import join, exists, splitext
import numpy as np
from sklearn.utils import shuffle
from sklearn.externals import joblib
from sklearn import linear_model
import pickle
# Configure
train_by_all = False
n_trains = 200
# Read command line arguments
argv = sys.argv
argc = len(argv)
if argc < 2:
print "Not enough arguments."
quit(0)
if argc >=3:
save_model = True
model_save_dir = argv[2]
else:
save_model = False
# Prepare directory
feature_dir = argv[1]
if save_model and not exists(model_save_dir):
makedirs(model_save_dir)
# Iteration to load features
filenames = listdir(feature_dir)
class_id = 0
class_names = []
for filename in filenames:
filepath = join(feature_dir, filename)
if not exists(filepath):
continue
# Get class label
class_name, _ = splitext(filename)
# Load and shuffle features
features = shuffle(np.load(filepath))
# Construct train set
if train_by_all:
n_trains = features.shape[0]
## Features
try:
train_feat = np.vstack((train_feat, features[0:n_trains,:]))
except:
train_feat = np.atleast_2d(features[0:n_trains,:])
## Labels
train_label_vec = np.ones(n_trains) * class_id
try:
train_label = np.hstack((train_label, train_label_vec))
except:
train_label = train_label_vec
# Construct test set
if train_by_all:
n_tests = 100
else:
n_tests = features.shape[0] - n_trains
ind_test = features.shape[0] - n_tests
# Features
try:
test_feat = np.vstack((test_feat, features[ind_test:]))
except:
test_feat = np.atleast_2d(features[ind_test:])
# Labels
test_label_vec = np.ones(n_tests) * class_id
try:
test_label = np.hstack((test_label, test_label_vec))
except:
test_label = test_label_vec
# Preserve class label and its id
class_names.append(class_name)
class_id += 1
# Shuffle train data
train_feat, train_label = shuffle(train_feat, train_label)
print "Train Classifier..."
clf = linear_model.SGDClassifier(average=50, n_iter=20)
clf.fit(train_feat, train_label)
print "Predict..."
acc = np.sum(clf.predict(test_feat) == test_label) * 1.0 / np.size(test_label)
print "Accuracy: ", acc
if save_model:
print "Save model"
joblib.dump(clf, join(model_save_dir, 'scene.pkl'))
with open(join(model_save_dir, 'class_names.txt'), 'wb') as f:
pickle.dump(class_names, f) |
"""Selenium Page Adapter."""
import os
from pkg_resources import get_distribution, DistributionNotFound
from .descriptors import ElementDescriptor, AllElements, AvailableElements
from .adapter import PageAdapter
try:
_dist = get_distribution('selenium_page_adapter')
if not __file__.startswith(os.path.join(_dist.location, 'selenium_page_adapter', '')):
# not installed, but there is another version that *is*
raise DistributionNotFound
except DistributionNotFound:
__version__ = 'development'
else:
__version__ = _dist.version
|
from sklearn.datasets import load_iris
dane = load_iris()
print(dane.keys())
print("ilość danych z rozmiarami płatków: {}".format(dane.data.shape))
print("dane iris zawierają {} cech o nazwach: {}".format(dane.data.shape[1], dane.feature_names))
print("dane iris zawierają {} gatunków o nazwach: {}".format(dane.target.shape[0], dane.target_names))
#dane ze 150 pomiarów tworzą 3 kolejne bloki po 50 dla każdego gatunku
x=dane["data"][:50, 2] #pierwsze 50 pomiarów długości płatka (gatunek setosa)
y=dane["data"][:50, 3] #piewsze 50 pomiarów szerokości płatka (gatunek setosa)
#import matplotlib.pyplot as plt
#plt.plot(x, y, "go", alpha=0.3)
#plt.xlabel("długość płatka setosa (cm)", fontsize=14)
#plt.ylabel("szerokość płatka setosa (cm)", rotation=90, fontsize=14)
#plt.axis([0, 2, 0, 1])
#plt.show() #pokaż interaktywny wykres
from sklearn.linear_model import LinearRegression
import numpy as np
x = x.reshape(-1,1) #w x powinna być lista list - tak chyba najprościej ją zrobić
l_r = LinearRegression().fit(x, y)
#print(l_r.get_params())
x_pred = [[0], [2]] # policzmy dla tych dwu szerokosci
y_pred = l_r.predict(x_pred) #jaka powinny miec dlugosc w naszym modelu
plt.plot(x, y, "go", alpha=0.3)
plt.plot(x_pred, y_pred, "r-")
plt.xlabel("długość płatka setosa (cm)", fontsize=14)
plt.ylabel("szerokość płatka setosa (cm)", rotation=90, fontsize=14)
plt.axis([0, 2, 0, 1])
plt.savefig("pomiary_setosa.png", dpi=300) #zapisz wykres w formacie png
plt.show() #pokaż interaktywny wykres
|
#!/usr/bin/python3
import requests
from wxpy import *
import json
import logging
logging.basicConfig(level=logging.INFO)
# 减少网络层日志的干扰
for m in 'requests', 'urllib3':
logging.getLogger(m).setLevel(logging.WARNING)
bot = Bot(cache_path=True)
# 开启 puid 获取功能,指定 puid 映射数据的保存路径
bot.enable_puid('wxpy_puid.pkl')
# 在群中回复用户文本消息
# @bot.register(Group, TEXT)
# def auto_reply_text_in_groups(msg):
# sender = msg.sender
# message = {'type':msg.type,'text':msg.text,'file_path':''}
# data = {'sender_puid':sender.puid,'member_puid':msg.member.puid,'message' : message}
# res = requests.post('http://localhost:3000/wechat',json = data)
# res_data = json.loads(res.text)
# if (res_data['type']=='Text'):
# sender.send(res_data['info'])
# 机器人自动接受好友请求
@bot.register(msg_types=FRIENDS)
def auto_accept_friends(msg):
sender = bot.accept_friend(msg.card)
message = {'type':msg.type,'text':msg.text,'file_path':''}
data = {'sender_puid':sender.puid,'member_puid':'','message' : message}
res = requests.post('http://localhost:3000/wechat',json = data)
res_data = json.loads(res.text)
if (res_data['type']=='Text'):
sender.send(res_data['info'])
# 私聊回复用户文本消息
@bot.register(Friend,TEXT)
def auto_reply_text_to_firends(msg):
sender = msg.sender
message = {'type':msg.type,'text':msg.text,'file_path':''}
data = {'sender_puid':sender.puid,'member_puid':'','message' : message}
res = requests.post('http://localhost:3000/wechat',json = data)
res_data = json.loads(res.text)
if res_data['type']=='Text':
sender.send(res_data['info'])
elif res_data['type']=='add_member':
gs = bot.groups().search(res_data['groupname'])
if len(gs) == 0:
wy = bot.friends().search('文洋')[0]
g = bot.create_group([wy,sender], topic=res_data['groupname'])
g.send('Welcome! 欢迎 {}加入我们'.format(sender.name))
if len(gs) > 0:
g = gs[0]
if sender not in g:
g.add_members(sender, 'Welcome!')
g.send('Welcome! 欢迎 {}加入我们'.format(sender.name))
if res_data['info']!='':
sender.send(res_data['info'])
embed()
|
from spell_checker.Radix import *
class RadixTree(Radix):
"""Tree of radices, where each path, if final, represents a word.
"""
def __contains__(self,other,tree=None):
"""Checks if a word is contained in the CharacterTree.
In other words, checks if all characters are present in the object and if the last one is considered final.
Parameters
----------
self : CharacterTree
A CharacterTree object.
other : str
A string (word) whose existance in the CharacterTree is to be checked.
tree : Radix, NoneType (default = None)
Due to the recursive nature of this method, this keyworded attribute is employed in the recursive call. If left with the default value, the object itself will be set as tree.
Returns
-------
bool
The result of the verification.
"""
if isinstance(other,str):
#Finding the radix in the Tree among all the possibilities
if tree == None:
#print("No tree was defined. Defining tree to be self...")
tree = self
#else: print("tree = {} | word: {}".format(tree.radix,other))
if other == "": return True
if len(other) > 0:
if len(tree.next_radices) > 0:
#print("There are children! Analyzing one-by-one...")
for next_radix in tree.next_radices:
#print("Current radix: {}".format(next_radix.radix))
try:
for i in range(len(next_radix.radix)):
if next_radix.radix[i] != other[i]:
break
else:
if i == len(next_radix.radix) - 1: return self.__contains__(other[i + 1:],next_radix)
except IndexError:
continue
#print("None of the words are related! Returning False.")
return False
else:
#print("There are no children! Returning False.")
return False
else:
#print("Word processed! Returning .is_final.")
return tree.is_final
else:
#print("The other parameter is not a string object! Returning False.")
return False
def __init__(self,*args):
"""Constructor of the RadixTree class.
Parameters
----------
self : RadixTree
A RadixTree object.
*args : list, str, tuple
All the remaining parameters shall be added as words of this RadixTree. If an argument is of type list or tuple, each of its elements will be added individually as well. If a string is provided as an argument, it will be normally added. Otherwise, an Exception will be raised during addition.
Returns
-------
RadixTree
A RadixTree object containing the elements provided in *args as possible words (if any).
"""
super().__init__('*')
self.is_final = True
self.loaded_words = 1
for arg in args:
if isinstance(arg,list) or isinstance(arg,tuple):
for item in arg: self.insert(item)
else: self.insert(arg)
def __repr__(self):
"""Representation of the RadixTree object.
Parameters
----------
self : RadixTree
A CharacterTree object.
Returns
-------
str
The string representation of the object.
"""
self.next_radices.sort()
return "<RadixTree object>\n{} words loaded.\nAvailable Initial Radices: {}".format(self.loaded_words,
", ".join([radix.radix for radix in self.next_radices]))
def check(self,path):
"""Checks a text for mis-spellings.
Parameters
----------
self : RadixTree
A RadixTree object.
path : str (path)
The path to the file to be spell-checked.
Returns
-------
tuple
A tuple containing all the unknown words.
"""
with open(path, "r", encoding="utf-8") as file:
misspellings = []
for word in [line.strip("\n").split(" ") for line in file.readlines()]:
if word[0] not in self: misspellings.append(word)
return misspellings
def insert(self,word,tree=None):
"""Inserts a word in the CharacterTree.
It will travel through the existing Character objects, appending new ones to the previous ones if needed, and will mark the last character of the word as final. In other words, for this final character, the is_final attribute will be set to true.
Parameters
----------
self : CharacterTree
A CharacterTree object.
word : str
The word string to be added. A ValueError will be raised if other object type is provided.
tree : Radix, NoneType (default = None)
Due to the recursive nature of this method, this keyworded attribute is employed in the recursive call. If left with the default value, the object itself will be set as tree.
"""
if not isinstance(word,str): raise TypeError("Only strings can be added to CharacterTree objects.")
for i in list(range(10)) + ["*"]:
if str(i) in word: raise ValueError("Non-allowed characters were found in the word. Did your word contain any number or \"*\" as a character?")
#Finding the radix in the Tree among all the possibilities
if tree == None:
#print("No tree was defined. Defining tree to be self...")
tree = self
#else: print("tree = {} | word: {}".format(tree.radix,word))
if len(word) > 0:
if len(tree.next_radices) > 0:
#print("There are children! Analyzing one-by-one...")
for next_radix in tree.next_radices:
#print("Current radix: {}".format(next_radix.radix))
try:
for i in range(len(next_radix.radix)):
if next_radix.radix[i] != word[i]:
#print("{} is not equal to {}!".format(next_radix.radix[i],word[i]))
if i == 0: break
else:
if len(word[i:]) > 0:
#print("There's a relation, but there's a suffix! Calling
#.insert()...\n")
new_next_radices = next_radix.next_radices
#print("new_next_radices: {}".format(new_next_radices))
#print("new Radix
#object:\n{}".format(Radix(next_radix.radix[i:],next_radix,new_next_radices).__repr__()))
next_radix.next_radices = [Radix(next_radix.radix[i:],next_radix,new_next_radices)]
next_radix.radix = word[:i]
next_radix.is_final = False
for radix in new_next_radices: radix.previous_radix = next_radix.next_radices[0]
del(new_next_radices)
return self.insert(word[i:],next_radix)
else:
if next_radix.is_final == True: raise ValueError("The word {} has already been inserted!".format(word[:i]))
else:
next_radix.is_final = True
self.loaded_words += 1
else:
if i == len(next_radix.radix) - 1: return self.insert(word[i + 1:],next_radix)
except IndexError:
#print("The word {} is contained in {}! Making proper adjustments...\n".format(word,next_radix.radix))
new_next_radices = next_radix.next_radices
#print("new_next_radices: {}".format(new_next_radices))
#print("new Radix
#object:\n{}".format(Radix(next_radix.radix[i:],next_radix,new_next_radices).__repr__()))
next_radix.next_radices = [Radix(next_radix.radix[i:],next_radix,new_next_radices)]
next_radix.radix = word
for radix in new_next_radices: radix.previous_radix = next_radix.next_radices[0]
del(new_next_radices)
self.loaded_words += 1
return
#if tree.is_final == False:
# tree.is_final = True
# self.loaded_words += 1
#print("None of the words are related! Appending...")
tree.next_radices.append(Radix(word,tree))
self.loaded_words += 1
else:
#print("There are no children! Appending...")
tree.next_radices.append(Radix(word,tree))
self.loaded_words += 1
else:
#print("Word processed!")
if tree.is_final == False:
tree.is_final = True
self.loaded_words += 1
def remove(self,word,tree=None):
"""
Parameters
----------
self : RadixTree
A RadixTree object.
word : str
A string (word) whose existance in the RadixTree is to be checked.
tree : Radix (default = None)
Due to the recursive nature of this method, this keyworded attribute is employed in the recursive call. If left with the default value, the object itself will be set as tree.
"""
if isinstance(word,str):
#Finding the radix in the Tree among all the possibilities
if tree == None:
#print("No tree was defined. Defining tree to be self...")
tree = self
#else: print("tree = {} | word: {}".format(tree.radix,word))
if len(word) > 0:
if len(tree.next_radices) > 0:
#print("There are children! Analyzing one-by-one...")
for next_radix in tree.next_radices:
#print("Current radix: {}".format(next_radix.radix))
try:
for i in range(len(next_radix.radix)):
if next_radix.radix[i] != word[i]: break
elif i == len(next_radix.radix) - 1: return self.remove(word[i + 1:],next_radix)
except IndexError: continue
raise ValueError("The word {} is not contained in this RadixTree.".format(word))
else: raise ValueError("The word {} is not contained in this RadixTree.".format(word))
else:
if tree.is_final == True:
#print("{} is in the RadixTree! Removing...".format(word))
previous_radix, tree.is_final = tree.previous_radix, False
self.loaded_words -= 1
while tree.is_final == False and tree.radix != "*":
#print("Attempting to optimize RadixTree...")
if len(tree.next_radices) == 0:
#print("len(tree.next_radices)==0. Removing {}...".format(tree.radix))
previous_radix.next_radices.remove(tree)
elif len(tree.next_radices) == 1:
#print("len(tree.next_radices)==1. Merging {} with {}...".format(tree.radix,tree.next_radices[0].radix))
tree.radix += tree.next_radices[0].radix
tree.next_radices += tree.next_radices[0].next_radices
tree.is_final, tree.next_radices = True, tree.next_radices[1:]
for radix in tree.next_radices:
radix.previous_radix = tree
else: break
tree, previous_radix = previous_radix, tree.previous_radix
else: raise TypeError("The other parameter is not a string object.") |
from AttackerCalc import AttackerCalc
from FeaturesCalc import FeaturesCalc
from PacketFilter import PacketFilter
from MTADownloader import MTADownloader
from NeuralNetwork import NeuralNetwork
from LegitimateDownloader import LegitimateDownloader
from Agent import Agent
from CSV import CSV
from scapy.all import *
import os, signal, sys, glob, time
import gym
import gym_DRLMal
class StartTrainingHandler():
def __init__(self, nb_malware_training=1, nb_legitimate_training=1, window_size=10, single_csv=False):
assert window_size > 0, "Inserire un valore valido per window size"
assert nb_malware_training > 0, "Inserire un valore valido per nb_malware_training"
assert nb_legitimate_training > 0, "Inserire un valore valido per nb_legitimate_training"
assert (single_csv is True) or (single_csv is False), "Inserire un valore valido per single_csv"
self.single_csv = single_csv
self.nb_malware_training = nb_malware_training
self.nb_legitimate_training = nb_legitimate_training
self.window_size = window_size
self.mta = MTADownloader(save_downloaded=False)
self.lg = LegitimateDownloader()
self.ip_to_ignore = ["127.0.0.1"]
self.trained_pcaps = "trained_pcaps.txt"
self.featuresCalc = FeaturesCalc(flow_type="malware", min_window_size=self.window_size)
filter_1 = PacketFilter(ip_whitelist_filter=[], ip_blacklist_filter=self.ip_to_ignore, TCP=True)
filter_2 = PacketFilter(ip_whitelist_filter=[], ip_blacklist_filter=self.ip_to_ignore, UDP=True)
filter_3 = PacketFilter(ip_whitelist_filter=[], ip_blacklist_filter=self.ip_to_ignore, ICMP=True)
self.filters = [filter_1, filter_2, filter_3]
# Env, Agent and NN definition
ENV_NAME = 'DRLMal-v0'
self.env = gym.make(ENV_NAME)
nn = NeuralNetwork(input_shape=(1,) + self.env.observation_space.shape, output_dim=self.env.get_action_space().n)
model = nn.create_default_mlp_network()
self.agent = Agent(env=self.env, model=model, num_actions=self.env.get_action_space().n, batch_size=2)
self.agent.create_default_dqn_agent(nb_steps_warmup=5)
def start_training(self):
def check_if_pcap_is_in_trained(pcap):
try:
trained_pcaps_file = open(self.trained_pcaps, "r")
lines = trained_pcaps_file.readlines()
trovato = False
pcap = pcap.replace("\n", "")
for line in lines:
line = line.replace("\n", "")
if (line == pcap):
trovato = True
break
trained_pcaps_file.close()
return trovato
except:
return False
def add_pcap_in_trained(pcap):
trained_pcaps_file = open(self.trained_pcaps, "a+")
trained_pcaps_file.write(pcap + "\n")
trained_pcaps_file.close()
def create_malware_features_csv(pcap):
flow_type = "malware"
pcap_name = pcap.split("/")
pcap_name = pcap_name[len(pcap_name) - 1].replace(".pcap", "")
if (self.featuresCalc.get_flow_type() == flow_type):
pass
else:
self.featuresCalc.set_flow_type(flow_type)
array_of_pkts = []
attacker = AttackerCalc(pcap=pcap)
ip_to_consider = attacker.compute_attacker()
for filter in self.filters:
filter.set_ip_whitelist_filter(ip_to_consider)
pkts = rdpcap(pcap)
csv = CSV(file_name=pcap_name, folder_name="Features_Malware")
csv.create_empty_csv()
csv.add_row(self.featuresCalc.get_features_name())
filter_res = []
for pkt in pkts:
for filter in self.filters:
if(filter.check_packet_filter(pkt) is True):
filter_res.append(True)
else:
filter_res.append(False)
if(True in filter_res):
array_of_pkts.append(pkt)
if (len(array_of_pkts) >= self.featuresCalc.get_min_window_size()):
features = self.featuresCalc.compute_features(array_of_pkts)
csv.add_row(features)
array_of_pkts.clear()
filter_res.clear()
csv.close_csv()
return csv
def create_legitimate_features_csv(pcap):
flow_type = "legitimate"
pcap_name = pcap.split("/")
pcap_name = pcap_name[len(pcap_name) - 1].replace(".pcap", "")
if (self.featuresCalc.get_flow_type() == flow_type):
pass
else:
self.featuresCalc.set_flow_type(flow_type)
array_of_pkts = []
for filter in self.filters:
filter.set_ip_whitelist_filter([])
pkts = rdpcap(pcap)
csv = CSV(file_name=pcap_name, folder_name="Features_Legitimate")
csv.create_empty_csv()
csv.add_row(self.featuresCalc.get_features_name())
filter_res = []
for pkt in pkts:
for filter in self.filters:
if(filter.check_packet_filter(pkt) is True):
filter_res.append(True)
else:
filter_res.append(False)
if(True in filter_res):
array_of_pkts.append(pkt)
if (len(array_of_pkts) >= self.featuresCalc.get_min_window_size()):
features = self.featuresCalc.compute_features(array_of_pkts)
csv.add_row(features)
array_of_pkts.clear()
filter_res.clear()
csv.close_csv()
return csv
def malware_train(line):
trained = False
line = line.replace("\n", "")
csv_path = ""
if (check_if_pcap_is_in_trained(line) is False):
csv = create_malware_features_csv(line)
csv_path = csv.get_file_path()
if (self.env.check_history(csv_path) is False):
print("\nStart Malware Training\n")
self.env.set_csv(csv_path)
self.agent.train_agent(steps=self.env.get_rows_number() - 1,
log_interval=self.env.get_rows_number() - 1, verbose=2,
nb_max_episode_steps=self.env.get_rows_number() - 1)
self.env.add_csv_to_history(csv_path)
add_pcap_in_trained(line)
trained = True
else:
print("\n" + csv_path + " gia' utilizzato. Saltato.\n")
trained = False
else:
print("\n" + line + " gia' utilizzato. Saltato.\n")
trained = False
return trained
def legitimate_train(line):
trained = False
line = line.replace("\n", "")
csv_path = ""
if(check_if_pcap_is_in_trained(line) is False):
csv = create_legitimate_features_csv(line)
csv_path = csv.get_file_path()
if (self.env.check_history(csv_path) is False):
print("\nStart Legitimate Training\n")
self.env.set_csv(csv_path)
self.agent.train_agent(steps=self.env.get_rows_number() - 1,
log_interval=self.env.get_rows_number() - 1, verbose=2,
nb_max_episode_steps=self.env.get_rows_number() - 1)
self.env.add_csv_to_history(csv_path)
add_pcap_in_trained(line)
trained = True
else:
print("\n" + csv_path + " gia' utilizzato. Saltato.\n")
trained = False
else:
print("\n" + line + " gia' utilizzato. Saltato.\n")
trained = False
return trained
def compute_features_single_file():
csv = CSV(file_name="features")
csv.create_empty_csv()
csv.add_row(self.featuresCalc.get_features_name())
mal_lines = glob.glob(self.mta.get_folder_name() + "/" + "*.pcap")
leg_lines = glob.glob(self.lg.get_folder_name() + "/" + "*.pcap")
pcaps_in_csv = []
for line in mal_lines:
line = line.replace("\n", "")
if (check_if_pcap_is_in_trained(line) is False):
flow_type = "malware"
if (self.featuresCalc.get_flow_type() == flow_type):
pass
else:
self.featuresCalc.set_flow_type(flow_type)
array_of_pkts = []
attacker = AttackerCalc(pcap=line)
ip_to_consider = attacker.compute_attacker()
for filter in self.filters:
filter.set_ip_whitelist_filter(ip_to_consider)
pkts = rdpcap(line)
filter_res = []
for pkt in pkts:
for filter in self.filters:
if (filter.check_packet_filter(pkt) is True):
filter_res.append(True)
else:
filter_res.append(False)
if (True in filter_res):
array_of_pkts.append(pkt)
if (len(array_of_pkts) >= self.featuresCalc.get_min_window_size()):
features = self.featuresCalc.compute_features(array_of_pkts)
csv.add_row(features)
array_of_pkts.clear()
filter_res.clear()
add_pcap_in_trained(line)
pcaps_in_csv.append(True)
else:
print("\n" + line + " gia' utilizzato. Saltato.\n")
pcaps_in_csv.append(False)
for line in leg_lines:
line = line.replace("\n", "")
if (check_if_pcap_is_in_trained(line) is False):
flow_type = "legitimate"
if (self.featuresCalc.get_flow_type() == flow_type):
pass
else:
self.featuresCalc.set_flow_type(flow_type)
array_of_pkts = []
for filter in self.filters:
filter.set_ip_whitelist_filter([])
pkts = rdpcap(line)
filter_res = []
for pkt in pkts:
for filter in self.filters:
if (filter.check_packet_filter(pkt) is True):
filter_res.append(True)
else:
filter_res.append(False)
if (True in filter_res):
array_of_pkts.append(pkt)
if (len(array_of_pkts) >= self.featuresCalc.get_min_window_size()):
features = self.featuresCalc.compute_features(array_of_pkts)
csv.add_row(features)
array_of_pkts.clear()
filter_res.clear()
add_pcap_in_trained(line)
pcaps_in_csv.append(True)
else:
print("\n" + line + " gia' utilizzato. Saltato.\n")
pcaps_in_csv.append(False)
if(True in pcaps_in_csv):
return csv
else:
return None
def single_train():
trained = False
csv = compute_features_single_file()
if(csv is not None):
csv_path = csv.get_file_path()
if (self.env.check_history(csv_path) is False):
self.env.set_csv(csv_path)
self.agent.train_agent(steps=self.env.get_rows_number() - 1,
log_interval=self.env.get_rows_number() - 1, verbose=2,
nb_max_episode_steps=self.env.get_rows_number() - 1)
self.env.add_csv_to_history(csv_path)
trained = True
else:
print("\n" + csv_path + " gia' utilizzato. Saltato.\n")
trained = False
else:
trained = False
return trained
if(self.single_csv):
if(single_train()):
print("\nAddestramento Completato\n")
else:
print("\nAddestramento Non Eseguito\n")
else:
mal_lines = glob.glob(self.mta.get_folder_name() + "/" + "*.pcap")
leg_lines = glob.glob(self.lg.get_folder_name() + "/" + "*.pcap")
lenght_mal_lines = len(mal_lines)
lenght_leg_lines = len(leg_lines)
leg_iter = 0
mal_iter = 0
mal_counter = 0
leg_counter = 0
overflow_mal = False
overflow_leg = False
finish_mal = False
finish_leg = False
while (True):
if(mal_counter < lenght_mal_lines):
if(mal_iter == self.nb_malware_training):
finish_mal = True
else:
if(malware_train(mal_lines[mal_counter]) is True):
mal_iter += 1
mal_counter +=1
else:
mal_counter += 1
else:
overflow_mal = True
if(leg_counter < lenght_leg_lines):
if(leg_iter == self.nb_legitimate_training):
finish_leg = True
else:
if(legitimate_train(leg_lines[leg_counter]) is True):
leg_iter += 1
leg_counter += 1
else:
leg_counter += 1
else:
overflow_leg = True
if(overflow_leg is True and overflow_mal is True):
break
if(finish_mal is True and finish_leg is True):
break
if (overflow_leg is True and finish_mal is True):
break
if (overflow_mal is True and finish_leg is True):
break
print ("\nAddestramento Completato:\n")
print ("\nMalware Training = " + str(mal_iter) + "\n")
print ("\nLegitimate Training = " + str(leg_iter) + "\n")
return
|
"""
Created on Sat June 06 2020
@author: evadatinez
"""
from MyAIGuide.data.export_excel import exportParticipantDataframeToExcel
from MyAIGuide.data.complaintsData import complaintsData
import numpy as np
import os.path
import pandas as pd
from pandas._testing import assert_frame_equal
# create empty test dataframe with NaN
i = pd.date_range('2015-11-19', periods=1550, freq='1D')
sLength = len(i)
empty = np.empty(sLength)
empty[:] = np.nan
d = {
'complaintsAwesomeDay': empty,
'complaintsLoneliness': empty,
'complaintsPoorSleep': empty,
'complaintsSadness': empty,
'complaintsStress': empty,
'complaintsTired': empty,
'complaintsWorriedAnxious': empty,
'anotherNonRelevantColumn': empty
}
test_data = pd.DataFrame(data=d, index=i)
# update it with complaints data
fname = './data/raw/ParticipantData/Participant8Anonymized'
test_data = complaintsData(fname=fname, data=test_data)
# call function to test
export_cols = ['complaintsSadness', 'anotherNonRelevantColumn']
start_date = '2019-11-27'
end_date = '2019-12-05'
excel_fname = 'data_export_test'
exported_data = exportParticipantDataframeToExcel(test_data, start_date,
end_date, export_cols,
fname=excel_fname)
# expected data
expected_data = test_data[export_cols].copy()
# filter to have only the values from the date range we are interested on
dateRange = pd.date_range(start=start_date, end=end_date, freq='D')
expected_data = expected_data[expected_data.index.isin(dateRange)]
def test_exported_data():
# verify excel file was exported correctly
file_path = excel_fname + '.xlsx'
os.path.isfile(file_path)
# compare test and expected dataframes
assert_frame_equal(expected_data, exported_data)
|
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
data, target = make_blobs(n_samples=400, centers=4, cluster_std=0.95, random_state=0)
#obs:the random_state is
#it is like a read.csv. We already generate it separated between data and data
#in the real world - we only have the data. The target is like : yes, no (is it Prof.Tom or not)
# print(data)
plt.scatter(data[:,0],data[:,1]) #x1 and x2
plt.savefig('scatterplot.png')
def run_kmeans(n):
machine = KMeans(n_clusters=n)
machine.fit(data)
results = machine.predict(data)
centroids = machine.cluster_centers_
ssd = machine.inertia_
print(ssd)
# print(results)
# print(centroids)
plt.scatter(data[:,0],data[:,1], c=results)
plt.scatter(centroids[:,0], centroids[:,1], c='red', marker="*", s=200)
plt.savefig('scatterplot_color.png')
# run_kmeans(1)
# run_kmeans(2)
# run_kmeans(3)
run_kmeans(4)
# run_kmeans(5)
# run_kmeans(6)
# run_kmeans(7)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
from .models import Book
from .models import Rental
from .forms import UserForm
def index(request):
if (request.user.is_authenticated()):
books = Book.objects.filter(is_rented=False)
rentals = Rental.objects.filter(user=request.user, is_active=True)
return render(request, 'library.html', {'books' : books, 'rentals' : rentals})
def rent_book(request, book_id):
if (request.user.is_authenticated()):
book_set = Book.objects.filter(id=book_id, is_rented=False)
if book_set.count() == 0:
return render(request, 'action.html', {'response' : 'This book does not exist or is not available'})
book = book_set[0]
rental = Rental()
rental.user = request.user
rental.book = book
rental.is_active = True
book.is_rented = True
book.save()
rental.save()
return render(request, 'action.html', {'response' : 'You rented: ' + str(book.title)})
def return_book(request, book_id):
if (request.user.is_authenticated()):
book_set = Book.objects.filter(id=book_id)
if book_set.count() == 0:
return render(request, 'action.html', {'response' : 'This book does not exist'})
rented_book = book_set[0]
rental_set = Rental.objects.filter(user=request.user, book=rented_book, is_active=True)
if rental_set.count() == 0:
return render(request, 'action.html', {'response' : 'You have not rented this book'})
rental = rental_set[0]
rented_book.is_rented = False
rental.is_active = False
rented_book.save()
rental.save()
return render(request, 'action.html', {'response' : 'Thank you for returning: ' + str(rented_book.title)})
|
import numpy as np
from scipy.signal import windows
eps = np.finfo(np.float32).eps
def median_filter(x, kernel_size):
pad = (kernel_size - 1) // 2
x_pad = np.concatenate((np.zeros(pad), x, np.zeros(pad)))
y = np.zeros_like(x)
for i in range(len(y)):
median_arg = np.argsort(np.abs(x_pad[i : i + kernel_size]))[pad]
y[i] = x_pad[i + median_arg]
return y
def spectrogram(x, fft_size, hop_size, zero_pad=1):
S = None
for i in range(0, len(x), hop_size):
x_win = x[i : i + fft_size]
x_pad = np.zeros(fft_size * zero_pad)
x_pad[:len(x_win)] = x_win
if S is None:
S = np.array([np.fft.fft(x_pad)])
else:
S = np.append(S, np.array([np.fft.fft(x_pad)]), axis=0)
return S
def hpss(x, perc_kernel=17, harm_kernel=17, mask_power=2, fft_size=4096, hop_size=1024, zero_pad=2):
''' Simple harmonic/percussive source separation based on median filter method '''
print('Computing HPSS...')
print('\tComputing STFTs...')
S = spectrogram(x, fft_size, hop_size, zero_pad)
# percussive signal
print('\tSeparating percussive signal...')
P = np.copy(S)
for i in range(S.shape[0]):
P[i, :] = median_filter(np.abs(S[i, :]), kernel_size=perc_kernel)
# harmonic signal
print('\tSeparating harmonic signal...')
H = np.copy(S)
for h in range(S.shape[1]):
H[:, h] = median_filter(np.abs(S[:, h]), kernel_size=harm_kernel)
# create filter masks
print('\tCreating filter masks...')
M_H = np.copy(S)
M_P = np.copy(S)
for i in range(S.shape[0]):
for h in range(S.shape[1]):
H_p = H[i,h]**mask_power
P_p = P[i,h]**mask_power
denom = H_p + P_p + eps
M_H[i, h] = H_p / denom
M_P[i, h] = P_p / denom
H_hat = np.multiply(S, M_H)
P_hat = np.multiply(S, M_P)
print('\tComputing time-domain signal...')
h_sig = np.zeros_like(x)
p_sig = np.zeros_like(x)
for i in range(S.shape[0]):
start_idx = int(i * hop_size)
n_samples = min(fft_size, len(x) - start_idx)
win = windows.hann(fft_size)[:n_samples] / ((fft_size // hop_size) // 2)
h_sig[start_idx : start_idx + fft_size] += win * np.real(np.fft.ifft(H_hat[i,:])[:n_samples])
p_sig[start_idx : start_idx + fft_size] += win * np.real(np.fft.ifft(P_hat[i,:])[:n_samples])
return h_sig, p_sig
|
from graph import Graph
def f(x):
return x**2
g1 = Graph()
g1.plot("Linear Function")
g2 = Graph(f)
g2.plot("Quadratic Function") |
#
# -*-encoding:gb2312-*-
import os
import hashlib
from twisted.internet import reactor, defer
from bitfield import Bitfield
from tools import sleep
class BTFileError (Exception) :
pass
class BTHashTestError (Exception):
pass
class BTFile:
def __init__(self, metainfo, index, saveDir):
fileinfo = metainfo.files[index]
piece_len = metainfo.piece_length
self.fileInfo = fileinfo
self.path = os.path.join(saveDir, fileinfo['path'])
self.length = fileinfo['length']
self.piece_len = piece_len
self.abs_pos0, self.abs_pos1 = fileinfo['pos_range']
self.fd = None
idx0, ext = divmod(self.abs_pos0, self.piece_len)
self.idx0_piece = idx0
idx1, ext = divmod(self.abs_pos1, self.piece_len)
self.idx1_piece = idx1+1 if ext else idx1
#print self.abs_pos0, self.abs_pos1, self.piece_len, self.idx0_piece, self.idx1_piece
h, t = os.path.split(self.path)
if not os.path.exists(h):
os.makedirs(h)
def __str__(self):
return u'piece=[{},{}) size={:,d} "{}" '.format(self.idx0_piece, self.idx1_piece, self.length, os.path.split(self.path)[1]).encode('gb2312')
def __getIntersection(self, index, beg, data_len):
# p0,p1,f0,f1 absolute position in files
p0 = index * self.piece_len + beg
p1 = p0 + data_len
f0, f1 = self.abs_pos0, self.abs_pos1
# intersect sub piece
pf0 = max(p0, f0)
pf1 = min(p1, f1)
# pb,pe relative positioin in piece
pb = pf0 - p0
pe = pf1 - p0
# fb,fe relative position in current file
fb = pf0 - f0
fe = pf1 - f0
return (pb, pe), (fb, fe)
def write(self, index, beg, data):
(pb,pe), (fb,fe) = self.__getIntersection(index, beg, len(data))
if pb >= pe :
raise BTFileError("index isn't in this file")
my_data = data[pb:pe]
# print len(my_data)
if self.fd is None :
if os.path.exists(self.path) :
length = os.path.getsize(self.path)
if length != self.length:
raise BTFileError(u'old file size is error: {}'.format(self.path))
fd = open(self.path, 'rb+')
else :
fd = open(self.path, 'wb+')
fd.truncate(self.length)
self.fd = fd
self.fd.seek(fb)
self.fd.write(my_data)
return pb, len(my_data)
def read(self, index, beg, data_len):
(pb,pe), (fb,fe) = self.__getIntersection(index, beg, data_len)
#print pb, pe, fb, fe
if pb >= pe :
raise BTFileError("index isn't in this file")
if self.fd is None:
try:
self.fd = open(self.path, 'rb+')
except IOError as error:
raise BTFileError(str(error))
self.fd.seek(fb)
data = self.fd.read(fe-fb)
return pb, data
def close(self):
if self.fd :
self.fd.close()
def __getitem__(self, idx):
return self.read(idx, 0, self.piece_len)
def __setitem__(self, idx, data):
self.write(idx, 0, data)
def __iter__(self) :
for idx in xrange(self.idx0_piece, self.idx1_piece) :
yield idx, self[idx]
def __len__(self) :
return self.idx1_piece - self.idx0_piece
def __contains__(self, idx) :
# (pb,pe), (fb,fe) = self.__getIntersection(index, 0, self.piece_len)
# return pb < pe
return self.idx0_piece <= idx < self.idx1_piece
class BTFiles :
def __init__(self, metainfo, saveDir, selectedFileIndex=None):
if selectedFileIndex is None :
selectedFileIndex = range(len(metainfo.files))
selectedFileIndex.sort()
self.metainfo = metainfo
self.saveDir = saveDir
self.totalSize = metainfo.total_length
self.pieceNum = metainfo.pieces_size
self.pieceLength = metainfo.piece_length
self.hashArray = metainfo.pieces_hash
self.files = []
for i in selectedFileIndex :
self.files.append(BTFile(metainfo, i, saveDir))
def doHashTest(self, idx, data):
return hashlib.sha1(data).digest() == self.hashArray[idx]
def getBitfield(self) :
bfNeed = Bitfield(self.pieceNum)
for f in self.files :
for i in xrange(f.idx0_piece, f.idx1_piece) :
bfNeed[i] = 1
bfHave = Bitfield(self.pieceNum)
for i in xrange(self.pieceNum):
try :
ds = self[i]
if len(ds) == 1:
beg, dat = ds[0]
if self.doHashTest(i, dat):
bfHave[i] = 1
bfNeed[i] = 0
except BTFileError as error :
pass
return bfHave, bfNeed
def write(self, idx, data) :
ds = [f.write(idx,0,data) for f in self.files if idx in f]
if len(ds) <= 1 :
return ds
else :
_ds = ds[0:1]
for d in ds[1:] :
beg0, len0 = _ds[-1]
beg1, len1 = d
assert beg0+len0 <= beg1
if beg0+len0==beg1:
_ds[-1] = beg0, len0+len1
else:
_ds.append(d)
return _ds
def __getitem__(self, idx) :
# ds = [f[idx] for f in self.files if idx in f]
ds = []
for f in self.files:
if idx in f:
try:
ds.append(f[idx])
except BTFileError as error:
pass
if len(ds) <=1 :
return ds
else :
_ds = ds[0:1]
for d in ds[1:] :
beg0, dat0 = _ds[-1]
beg1, dat1 = d
assert beg0+len(dat0) <= beg1
if beg0+len(dat0)==beg1:
_ds[-1] = beg0, dat0+dat1
else:
_ds.append(d)
return _ds
def __setitem__(self, idx, data) :
for f in self.files:
if idx in f :
f[idx] = data
def __iter__(self):
for idx in xrange(len(self)) :
yield idx, self[idx]
def __contains__(self, idx) :
return any(idx in f for f in self.files)
def __len__(self):
return self.pieceNum
def __str__(self):
return '\n'.join(str(f) for f in self.files)
class BTFileManager :
'''
需要下载的文件
'''
slice_size = 2**14
def __init__(self, btm):
self.btm = btm
self.config = btm.config
metainfo = self.config.metainfo
self.download_list = self.config.downloadList
self.metainfo = metainfo
self.piece_length = metainfo.piece_length
self.pieceNum = metainfo.pieces_size
self.btfiles = BTFiles(metainfo, self.config.saveDir, self.config.downloadList)
self.bitfieldHave, self.bitfieldNeed = self.btfiles.getBitfield()
print self.config.saveDir
print self.bitfieldNeed
self.buffer_reserved = {} # 永驻内存的块,并且单独保存,主要针对文件边界所在的块
self.buffer_max_size = 100 * 2**20 / self.piece_length # 100M缓冲大小
#print self.buffer_max_size
def start(self) :
self.status = 'started'
self.buffer = {} # 缓冲piece
self.buffer_record = [] # 访问先后次序
self.buffer_dirty = {} # 需要写入硬盘的piece
reactor.callLater(10, self.deamon_write)
reactor.callLater(10, self.deamon_read)
def stop(self) :
for idx, data in self.buffer_dirty.iteritems():
self.write(idx, data)
self.buffer_dirty.clear()
self.buffer.clear()
del self.buffer_record[:]
self.status = 'stopped'
@defer.inlineCallbacks
def deamon_write(self):
while self.status == 'started':
self.__thread_write()
yield sleep(10)
def __thread_write(self):
if not hasattr(self, '__thread_write_status') :
self.__thread_write_status = 'stopped'
if self.__thread_write_status == 'running' :
return
if not self.buffer_dirty :
return
bfd = self.buffer_dirty.copy()
def call_in_thread():
print '--O-O write to disk', len(bfd), bfd.keys()
for idx in sorted(bfd.keys()) :
data = bfd[idx]
self.write(idx, data)
reactor.callFromThread(call_from_thread)
def call_from_thread():
self.__thread_write_status = 'stopped'
for idx, data in bfd.iteritems() :
if data is self.buffer_dirty[idx] :
#print '删除写回硬盘的数据'
del self.buffer_dirty[idx]
if self.__thread_write_status == 'stopped' :
self.__thread_write_status = 'running'
reactor.callInThread(call_in_thread)
@defer.inlineCallbacks
def deamon_read(self):
while self.status == 'started':
size = len(self.buffer)
if size > self.buffer_max_size :
remove_count = size - self.buffer_max_size
remove_count += self.buffer_max_size / 5
for idx in self.buffer_record[:remove_count] :
del self.buffer[idx]
del self.buffer_record[:remove_count]
yield sleep(10)
############################################################
# 高层操作 buffer
def readPiece(self, index) :
if not (0 <= index < self.pieceNum) :
raise BTFileError('index is out of range')
if not self.bitfieldHave[index] :
raise BTFileError('index is not downloaded')
if index in self.buffer :
data = self.buffer[index]
self.buffer_record.remove(index)
self.buffer_record.append(index)
return data
else:
for idx in [index-1, index, index+1] :
if 0 <= idx < self.pieceNum and idx not in self.buffer :
data = self.read(idx)
assert data
self.buffer[idx] = data
self.buffer_record.append(idx)
#print 'index =', idx
data = self.readPiece(index)
return data
def writePiece(self, index, piece) :
if not (0 <= index < self.pieceNum) :
raise BTFileError('index is out of range')
if not self.bitfieldNeed[index] :
raise BTFileError('index is not need')
if not self.btfiles.doHashTest(index, piece):
raise BTHashTestError()
else:
self.bitfieldHave[index] = 1
self.bitfieldNeed[index] = 0
if index in self.buffer :
self.buffer[index] = piece
self.buffer_dirty[index] = piece
# self.write(index, piece)
return True
############################################################
# 针对piece的读写,操作 buffer_dirty, buffer_reserved
def read(self, index):
if index in self.buffer_dirty:
return self.buffer_dirty[index]
elif index in self.buffer_reserved :
return self.buffer_reserved[index]
data_list = self.btfiles[index]
if len(data_list) == 1 :
assert data_list[0][0] == 0
return data_list[0][1]
else:
assert False
return data_list
def write(self, index, data) :
ds = self.btfiles.write(index, data)
if len(ds) > 1 : # 文件边界,但是相邻文件用户并没有选择下载
# print len(data), length
# assert False
self.buffer_reserved[index] = data # 保存该数据
elif not ds :
assert False
def __iter__(self):
return self.btfiles.__iter__()
if __name__ == '__main__':
from MetaInfo import BTMetaInfo
from bitfield import Bitfield
from random import randint
from app import BTConfig
from BTManager import BTManager
metainfo = BTMetaInfo('test.torrent')
# bfm = BTFileManager(metainfo)
# bfm.start()
# def _read(idx):
# return bfm.readPiece(idx)
# def _write(idx, data):
# return bfm.writePiece(idx, data)
bfs = BTFiles(metainfo, 'C:/')
def _read(idx):
res = bfs[idx]
assert len(res) == 1 and res[0][0] == 0
return res[0][1]
def _write(idx, data):
bfs[idx] = data
pieceNum = len(bfs)
pieceLen = bfs.pieceLength
char = []
for i in xrange(pieceNum) :
c = chr(randint(0, 0xFF))
_len = pieceLen if i != pieceNum-1 else metainfo.last_piece_length
data = c * _len
_write(i, data)
char.append(c)
for i in xrange(pieceNum - 1):
c = char[i]
_data = c * pieceLen
data = _read(i)
#print i, len(data), ord(c)
assert _data == data
# c = char[-1]
# _data = c * metainfo.last_piece_length
# idx, beg, data = bfm.readPiece(bfm.pieces_size-1)
# print bfm.pieces_size-1, len(data), ord(c)
# assert _data == data
def read_write():
# write
i = randint(0, pieceNum-1)
c = chr(randint(0, 0xFF))
char[i] = c
if i == pieceNum-1:
_len = metainfo.last_piece_length
else:
_len = metainfo.piece_length
_data = c * _len
_write(i, _data)
print '<<< ', i, len(_data)
# read
i = randint(0, pieceNum-1)
c = char[i]
if i == pieceNum-1:
_len = metainfo.last_piece_length
else:
_len = metainfo.piece_length
_data = c * _len
data = _read(i)
print '>>>', i, len(data), _len, ord(c), ord(data[0])
assert _data == data
#print 'length, ', len(bfm.buffer_dirty), len(bfm.buffer), len(bfm.buffer_reserved)
reactor.callLater(0, read_write)
read_write()
reactor.run()
|
import pandas as pd
my_stat = pd.read_csv('my_stat.csv')
print(my_stat.head(10))
# В переменную subset_1 сохраните только те наблюдения, у которых значения переменной V1 строго больше 0,
# и значение переменной V3 равняется 'A'.
subset_1 = my_stat.loc[(my_stat['V1'] > 0) & (my_stat['V3'] == 'A')]
print(subset_1)
# В переменную subset_2 сохраните только те наблюдения, у которых значения переменной V2 не равняются 10,
# или значения переменной V4 больше или равно 1.
subset_2 = my_stat.loc[(my_stat['V2'] != 10) | (my_stat['V4'] >= 1)]
print(subset_2)
# Как и в предыдущей задаче результат фильтрации - это тоже dataframe.
|
import numpy as np
from pvfactors.irradiance.utils import \
perez_diffuse_luminance, breakup_df_inputs, calculate_circumsolar_shading
def test_perez_diffuse_luminance(df_perez_luminance):
"""
Test that the calculation of luminance -- first step in using the vf model
with Perez -- is functional
"""
df_inputs = df_perez_luminance[['surface_tilt', 'surface_azimuth',
'solar_zenith', 'solar_azimuth', 'dni',
'dhi']]
(timestamps, surface_tilt, surface_azimuth, solar_zenith, solar_azimuth,
dni, dhi) = breakup_df_inputs(df_inputs)
df_outputs = perez_diffuse_luminance(timestamps, surface_tilt,
surface_azimuth, solar_zenith,
solar_azimuth, dni, dhi)
col_order = df_outputs.columns
tol = 1e-8
np.testing.assert_allclose(df_outputs.values,
df_perez_luminance[col_order].values,
atol=0, rtol=tol)
def test_calculate_circumsolar_shading():
"""
Test that the disk shading function stays consistent
"""
# Test for one value of 20% of the diameter being covered
percentage_distance_covered = 20.
percent_shading = calculate_circumsolar_shading(
percentage_distance_covered, model='uniform_disk')
# Compare to expected
expected_disk_shading_perc = 14.2378489933
atol = 0
rtol = 1e-8
np.testing.assert_allclose(expected_disk_shading_perc, percent_shading,
atol=atol, rtol=rtol)
|
import solution
import utils
import DNA_RNA_REVC
class ORF(solution.Solution):
def _read(self, f):
return utils.read_fasta(f, dna_only=True)[0]
def _translate(self, rna):
"""Translate the RNA block starting with the start codon
and without a stop codon."""
return ''.join([utils.PROT_CODON_TABLE[rna[i:(i + 3)]]
for i in range(0, len(rna), 3)])
def _solve_helper(self, rna):
res = set()
starts = []
for i in range(0, len(rna) - len(rna) % 3, 3):
codon = rna[i:(i + 3)]
if codon == utils.PROT_START_CODON:
starts.append(i)
continue
if utils.PROT_CODON_TABLE[codon] == -1 and starts:
res.update([self._translate(rna[s:i]) for s in starts])
starts = []
return res
def solve(self, data):
rna = data.replace(utils.T, utils.U)
rna_rc = DNA_RNA_REVC.REVC().solve(data).replace(utils.T, utils.U)
res = self._solve_helper(rna)
res.update(self._solve_helper(rna[1:]))
res.update(self._solve_helper(rna[2:]))
res.update(self._solve_helper(rna_rc))
res.update(self._solve_helper(rna_rc[1:]))
res.update(self._solve_helper(rna_rc[2:]))
return res
def _write(self, f, answer):
f.write('\n'.join(answer))
|
from django.test import TestCase
from lists.models import Item
class HomePageTests(TestCase):
def test_uses_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_can_save_post_request(self):
response = self.client.post('/', data={'text': 'Brand new item'})
self.assertIn('Brand new item', response.content.decode())
class ItemModelTest(TestCase):
def test_save_retrieve_items(self):
item_a = Item()
item_a.text = 'This is the first item'
item_a.save()
item_b = Item()
item_b.text = 'Item the second'
item_b.save()
item_list = Item.objects.all()
self.assertEqual(item_list.count(), 2)
self.assertEqual(item_list[0].text, 'This is the first item')
self.assertEqual(item_list[1].text, 'Item the second')
def test_save_post_request(self):
self.client.post('/', data={'text': 'Brand new item'})
self.assertEqual(Item.objects.count(), 1)
item = Item.objects.first()
self.assertEqual(item.text, 'Brand new item')
def test_redirect_after_post(self):
response = self.client.post('/', data={'text': 'Brand new item'})
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'], '/')
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 15:37:40 2020
Maker : bychoi@deu.ac.kr
@author: Com
"""
# sample player file which must be made by student
from player import *
from stone import *
from random import *
class iot12345_student(player):
def __init__(self, clr):
super().__init__( clr) # call constructor of super class
self.__max_x= 9
self.__max_y= 9
def __del__(self): # destructor
pass
def ai_calculate(self,board):
score = 0 # ai가 돌상태를 수치화 했을 때의 값
total_score = 0 # ai가 오목보드상태를 수치화 했을 때의 값
enemy = -1
team = 1
stone = []
for i in range(0,22):
stone.append([])
for j in range(0,22):
try:
stone[i].append(board[i][j])
except:
stone[i].append(3)
for i in range(0, 19):
for k in range(0, 19):
####/가로####/
if (stone[i][k - 2] == team and stone[i][k - 1] == team and stone[i][k] == team and stone[i][k + 1] == team and
stone[i][k + 2] == team):
score = 990000
total_score = total_score + score
# ooooo 승리시
if (stone[i][k - 2] == team and stone[i][k - 1] == team and stone[i][k] == team and stone[i][
k + 1] == team and stone[i][k + 2] == 0):
score = 50
total_score = total_score + score
# oooo 4개 연속
if (stone[i][k - 2] == team and stone[i][k - 1] == team and stone[i][k] == team and stone[i][
k + 1] == team and stone[i][k + 2] == enemy):
score = 40
total_score = total_score + score
# oooox 4개 연속 막힘
if (stone[i][k - 2] == enemy and stone[i][k - 1] == team and stone[i][k] == team and stone[i][k + 1] == team and
stone[i][k + 2] == team):
score = 40
total_score = total_score + score
# xoooo 4개 연속 막힘
if (stone[i][k - 2] == 0 and stone[i][k - 1] == team and stone[i][k] == team and stone[i][k + 1] == team and
stone[i][k + 2] == 0):
score = 6
total_score = total_score + score
# ooo 3개 연속
if (stone[i][k - 2] == 0 and stone[i][k - 1] == team and stone[i][k] == team and stone[i][k + 1] == team and
stone[i][k + 2] == enemy):
score = 4
total_score = total_score + score
# ooox 3개 연속 막힘
if (stone[i][k - 2] == enemy and stone[i][k - 1] == team and stone[i][k] == team and stone[i][k + 1] == team and
stone[i][k + 2] == 0):
score = 4
total_score = total_score + score
# xooo 3개 연속 막힘
if (stone[i][k - 1] == 0 and stone[i][k] == team and stone[i][k + 1] == team and stone[i][k + 2] == 0):
score = 2
total_score = total_score + score
# oo 2개 연속
if (stone[i][k - 1] == 0 and stone[i][k] == team and stone[i][k + 1] == team and stone[i][k + 2] == enemy):
score = 1
total_score = total_score + score
# oox 2개 연속 막힘
if (stone[i][k - 1] == enemy and stone[i][k] == team and stone[i][k + 1] == team and stone[i][k + 2] == 0):
score = 1
total_score = total_score + score
# xoo 2개 연속 막힘
####/세로####/
if (stone[i - 2][k] == team and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][
k] == team and stone[i + 2][k] == team):
score = 990000
total_score = total_score + score
# ooooo 승리시
if (stone[i - 2][k] == team and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][
k] == team and stone[i + 2][k] == 0):
score = 50
total_score = total_score + score
# oooo 4개 연속
if (stone[i - 2][k] == team and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][
k] == team and stone[i + 2][k] == enemy):
score = 40
total_score = total_score + score
# oooox 4개 연속 막힘
if (stone[i - 2][k] == enemy and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][k] == team and
stone[i + 2][k] == team):
score = 40
total_score = total_score + score
# xoooo 4개 연속 막힘
if (stone[i - 2][k] == 0 and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][k] == team and
stone[i + 2][k] == 0):
score = 6
total_score = total_score + score
# ooo 3개 연속
if (stone[i - 2][k] == 0 and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][k] == team and
stone[i + 2][k] == enemy):
score = 4
total_score = total_score + score
# ooox 3개 연속 막힘
if (stone[i - 2][k] == enemy and stone[i - 1][k] == team and stone[i][k] == team and stone[i + 1][k] == team and
stone[i + 2][k] == 0):
score = 4
total_score = total_score + score
# xooo 3개 연속 막힘
if (stone[i - 1][k] == 0 and stone[i][k] == team and stone[i + 1][k] == team and stone[i + 2][k] == 0):
score = 2
total_score = total_score + score
# oo 2개 연속
if (stone[i - 1][k] == 0 and stone[i][k] == team and stone[i + 1][k] == team and stone[i + 2][k] == enemy):
score = 1
total_score = total_score + score
# oox 2개 연속 막힘
if (stone[i - 1][k] == enemy and stone[i][k] == team and stone[i + 1][k] == team and stone[i + 2][k] == 0):
score = 1
total_score = total_score + score
# xoo 2개 연속 막힘
####/대각선####/
if (stone[i - 2][k + 2] == team and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == team):
score = 990000
total_score = total_score + score
# ooooo 승리시
if (stone[i - 2][k + 2] == team and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == 0):
score = 50
total_score = total_score + score
# oooo 4개 연속
if (stone[i - 2][k + 2] == team and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == enemy):
score = 40
total_score = total_score + score
# oooox 4개 연속 막힘
if (stone[i - 2][k + 2] == enemy and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == team):
score = 40
total_score = total_score + score
# xoooo 4개 연속 막힘
if (stone[i - 2][k + 2] == 0 and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == 0):
score = 7
total_score = total_score + score
# ooo 3개 연속
if (stone[i - 2][k + 2] == 0 and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == enemy):
score = 5
total_score = total_score + score
# ooox 3개 연속 막힘
if (stone[i - 2][k + 2] == enemy and stone[i - 1][k + 1] == team and stone[i][k] == team and stone[i + 1][
k - 1] == team and stone[i + 2][k - 2] == 0):
score = 5
total_score = total_score + score
# xooo 3개 연속 막힘
if (stone[i - 1][k + 1] == 0 and stone[i][k] == team and stone[i + 1][k - 1] == team and stone[i + 2][
k - 2] == 0):
score = 3
total_score = total_score + score
# oo 2개 연속
if (stone[i - 1][k + 1] == 0 and stone[i][k] == team and stone[i + 1][k - 1] == team and stone[i + 2][
k - 2] == enemy):
score = 1
total_score = total_score + score
# oox 2개 연속 막힘
if (stone[i - 1][k + 1] == enemy and stone[i][k] == team and stone[i + 1][k - 1] == team and stone[i + 2][
k - 2] == 0):
score = 1
total_score = total_score + score
# xoo 2개 연속 막힘
####/반대 대각선####/
if (stone[i - 2][k - 2] == team and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == team):
score = 990000
total_score = total_score + score
# ooooo 승리시
if (stone[i - 2][k - 2] == team and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == 0):
score = 50
total_score = total_score + score
# oooo 4개 연속
if (stone[i - 2][k - 2] == team and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == enemy):
score = 40
total_score = total_score + score
# oooox 4개 연속 막힘
if (stone[i - 2][k - 2] == enemy and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == team):
score = 40
total_score = total_score + score
# xoooo 4개 연속 막힘
if (stone[i - 2][k - 2] == 0 and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == 0):
score = 7
total_score = total_score + score
# ooo 3개 연속
if (stone[i - 2][k - 2] == 0 and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == enemy):
score = 5
total_score = total_score + score
# ooox 3개 연속 막힘
if (stone[i - 2][k - 2] == enemy and stone[i - 1][k - 1] == team and stone[i][k] == team and stone[i + 1][
k + 1] == team and stone[i + 2][k + 2] == 0):
score = 5
total_score = total_score + score
# xooo 3개 연속 막힘
if (stone[i - 1][k - 1] == 0 and stone[i][k] == team and stone[i + 1][k + 1] == team and stone[i + 2][
k + 2] == 0):
score = 3
total_score = total_score + score
# oo 2개 연속
if (stone[i - 1][k - 1] == 0 and stone[i][k] == team and stone[i + 1][k + 1] == team and stone[i + 2][
k + 2] == enemy):
score = 1
total_score = total_score + score
# oox 2개 연속 막힘
if (stone[i - 1][k - 1] == enemy and stone[i][k] == team and stone[i + 1][k + 1] == team and stone[i + 2][
k + 2] == 0):
score = 1
total_score = total_score + score
# xoo 2개 연속 막힘
####/수비!!####/
if (stone[i][k - 4] == enemy and stone[i][k - 2] == enemy and stone[i][k-3] == enemy and stone[i][k -1] == enemy and
stone[i][k] == team):
score = 1000
total_score = total_score + score
if (stone[i][k - 2] == enemy and stone[i][k - 1] == enemy and stone[i][k] == enemy and stone[i][k + 1] == enemy and
stone[i][k + 2] == team):
score = 1000
total_score = total_score + score
# oooox 가로 수비시
if (stone[i][k - 2] == team and stone[i][k - 1] == enemy and stone[i][k] == enemy and stone[i][k + 1] == enemy and
stone[i][k + 2] == enemy):
score = 1000
total_score = total_score + score
# xoooo 가로 수비시
if (stone[i - 2][k] == enemy and stone[i - 1][k] == enemy and stone[i][k] == enemy and stone[i + 1][k] == enemy and
stone[i + 2][k] == team):
score = 1000
total_score = total_score + score
# oooox 세로 수비시
if (stone[i - 2][k] == team and stone[i - 1][k] == enemy and stone[i][k] == enemy and stone[i + 1][k] == enemy and
stone[i + 2][k] == enemy):
score = 1000
total_score = total_score + score
# xoooo 세로 수비시
if (stone[i - 2][k - 2] == enemy and stone[i - 1][k - 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k + 1] == enemy and stone[i + 2][k + 2] == team):
score = 1000
total_score = total_score + score
# oooox 반대 대각선 수비시
if (stone[i - 2][k - 2] == team and stone[i - 1][k - 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k + 1] == enemy and stone[i + 2][k + 2] == enemy):
score = 1000
total_score = total_score + score
# xoooo 반대 대각선 수비시
if (stone[i - 2][k + 2] == enemy and stone[i - 1][k + 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k - 1] == enemy and stone[i + 2][k - 2] == team):
score = 1000
total_score = total_score + score
# oooox 대각선 수비시
if (stone[i - 2][k + 2] == team and stone[i - 1][k + 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k - 1] == enemy and stone[i + 2][k - 2] == enemy):
score = 1000
total_score = total_score + score
# xoooo 대각선 수비시
if (stone[i][k - 1] == enemy and stone[i][k] == enemy and stone[i][k + 1] == enemy and stone[i][k + 2] == team):
if (stone[i][k - 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 30
total_score = total_score + score
# ooox 가로 수비시
if (stone[i][k - 2] == team and stone[i][k - 1] == enemy and stone[i][k] == enemy and stone[i][k + 1] == enemy):
if (stone[i][k + 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# xooo 가로 수비시
if (stone[i - 1][k] == enemy and stone[i][k] == enemy and stone[i + 1][k] == enemy and stone[i + 2][k] == team):
if (stone[i - 2][k] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# ooox 세로 수비시
if (stone[i - 2][k] == team and stone[i - 1][k] == enemy and stone[i][k] == enemy and stone[i + 1][k] == enemy):
if (stone[i + 2][k] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# xooo 세로 수비시
if (stone[i - 1][k + 1] == enemy and stone[i][k] == enemy and stone[i + 1][k - 1] == enemy and stone[i + 2][
k - 2] == team):
if (stone[i - 2][k + 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# ooox 대각선 수비시
if (stone[i - 2][k + 2] == team and stone[i - 1][k + 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k - 1] == enemy):
if (stone[i + 2][k - 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# xooo 대각선 수비시
if (stone[i - 1][k - 1] == enemy and stone[i][k] == enemy and stone[i + 1][k + 1] == enemy and stone[i + 2][
k + 2] == team):
if (stone[i - 2][k - 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# ooox 반대 대각선 수비시
if (stone[i - 2][k - 2] == team and stone[i - 1][k - 1] == enemy and stone[i][k] == enemy and stone[i + 1][
k + 1] == enemy):
if (stone[i + 2][k + 2] == team):
score = 3 # 한쪽이 막혀있는데 수비 하려는 경우
else:
score = 250
total_score = total_score + score
# xooo 반대 대각선 수비시
return total_score
'''
if (stone[i][k - 1] == enemy and stone[i][k] == enemy and stone[i][k + 1] == enemy and stone[i][k + 2] == 0 and stone[i][k-2] == 0): # ooo 수비
score = 400
total_score = total_score + score
if (stone[i-1][k] == enemy and stone[i][k] == enemy and stone[i+1][k] == enemy and stone[i+2][k] == 0 and stone[i-2][k] == 0):
score = 400
total_score = total_score + score
if (stone[i-1][k - 1] == enemy and stone[i][k] == enemy and stone[i+1][k + 1] == enemy and stone[i+2][k + 2] == 0 and stone[i-2][k-2] == 0): #대각선
score = 400
total_score = total_score + score
if (stone[i-1][k +1] == enemy and stone[i][k] == enemy and stone[i+1][k -1] == enemy and stone[i+2][k -2] == 0 and stone[i-2][k+2] == 0):
score = 400
total_score = total_score + score
'''
def next(self, board,length): # override
print (" **** White player : My Turns **** ")
stn = stone(self._color)
temp = 0 # 수치화된 오목보드의 상태를 잠시 저장하는 변수
AI = 0
count = 0
for i in range(0, 19):
for k in range(0, 19):
if(board[i][k] == -1):
count = count +1
if (board[i][k] == 0):
board[i][k] = 1 # stone함수의 값이 0(돌이 없는 상태)일때 임의의 돌을 삽입
AI = self.ai_calculate(board) # 그 결과 얼마나 유리한 수 인지 수치값으로 반환
if (temp <AI): # temp에는 이전의 수치값 저장! , 이전의 수치값보다 좋은 수 일때
temp = AI
self.__max_x = i
self.__max_y= k
board[i][k] = 0
if count == 1:
self.__max_x = self.__max_x+1
print("white:",AI)
stn.setX(self.__max_x)
stn.setY(self.__max_y)
return stn
def check33(self,stone,i,k):
count =0
if (i - 4 >= 0 and i + 4 < 19 and k - 4 >= 0 and k + 4 < 19):
if ((stone[i][k - 3] == 0 and stone[i][k - 2] == -1 and stone[i][
k - 1] == -1 and stone[
i][k + 1] == 0) or
(stone[i][k - 2] == 0 and stone[i][k - 1] == -1 and stone[i][
k + 1] == -1 and stone[
i][k + 2] == 0) or
(stone[i][k - 1] == 0 and stone[i][k + 1] == -1 and stone[i][
k + 2] == -1 and stone[
i][k + 3] == 0) or
(stone[i][k - 4] == 0 and stone[i][k - 3] == -1 and stone[i][
k - 2] == -1 and stone[
i][k - 1] == 0 and stone[i][k + 1] == 0) or
(stone[i][k + 4] == 0 and stone[i][k + 3] == -1 and stone[i][
k + 2] == -1 and stone[
i][k + 1] == 0 and stone[i][k - 1] == 0) or
(stone[i][k - 2] == 0 and stone[i][k - 1] == -1 and stone[i][
k + 1] == 0 and stone[
i][k + 2] == -1 and stone[i][k + 3] == 0) or
(stone[i][k + 2] == 0 and stone[i][k + 1] == -1 and stone[i][
k - 1] == 0 and stone[
i][k - 2] == -1 and stone[i][k - 3] == 0)):
count = count +1
if ((stone[i - 3][k] == 0 and stone[i - 2][k] == -1 and stone[i - 1][
k] == -1 and stone[
i + 1][k] == 0) or
(stone[i - 2][k] == 0 and stone[i - 1][k] == -1 and stone[i + 1][
k] == -1 and stone[
i + 2][k] == 0) or
(stone[i - 1][k] == 0 and stone[i + 1][k] == -1 and stone[i + 2][
k] == -1 and stone[
i + 3][k] == 0) or
(stone[i - 4][k] == 0 and stone[i - 3][k] == -1 and stone[i - 2][
k] == -1 and stone[
i - 1][k] == 0 and stone[i + 1][k] == 0) or
(stone[i + 4][k] == 0 and stone[i + 3][k] == -1 and stone[i + 2][
k] == -1 and stone[
i + 1][k] == 0 and stone[i - 1][k] == 0) or
(stone[i - 2][k] == 0 and stone[i - 1][k] == -1 and stone[i + 1][
k] == 0 and stone[
i + 2][k] == -1 and stone[i + 3][k] == 0) or
(stone[i + 2][k] == 0 and stone[i + 1][k] == -1 and stone[i - 1][
k] == 0 and stone[
i - 2][k] == -1 and stone[i - 3][k] == 0)):
count = count +1
if ((stone[i - 3][k - 3] == 0 and stone[i - 2][k - 2] == -1 and stone[i - 1][
k - 1] == -1 and stone[
i + 1][k + 1] == 0) or
(stone[i - 2][k - 2] == 0 and stone[i - 1][k - 1] == -1 and stone[
i + 1][k + 1] == -1 and stone[i + 2][k + 2] == 0) or
(stone[i - 1][k - 1] == 0 and stone[i + 1][k + 1] == -1 and stone[
i + 2][k + 2] == -1 and stone[i + 3][k + 3] == 0) or
(stone[i - 3][k - 3] == 0 and stone[i - 2][k - 2] == -1 and stone[
i - 1][k - 1] == 0 and stone[i + 1][k + 1] == -1 and stone[i + 2][
k + 2] == 0) or
(stone[i + 3][k + 3] == 0 and stone[i + 2][k + 2] == -1 and stone[
i + 1][k + 1] == 0 and stone[i - 1][k - 1] == -1 and stone[i - 2][
k - 2] == 0) or
(stone[i - 4][k - 4] == 0 and stone[i - 3][k - 3] == -1 and stone[
i - 2][k - 2] == -1 and stone[i - 1][k - 1] == 0 and stone[i + 1][
k + 1] == 0) or
(stone[i + 4][k + 4] == 0 and stone[i + 3][k + 3] == -1 and stone[
i + 2][k + 2] == -1 and stone[i + 1][k + 1] == 0 and stone[i - 1][
k - 1] == 0)):
count = count +1
if ((stone[i - 3][k + 3] == 0 and stone[i - 2][k + 2] == -1 and stone[i - 1][
k + 1] == -1 and stone[
i + 1][k - 1] == 0) or
(stone[i - 2][k + 2] == 0 and stone[i - 1][k + 1] == -1 and stone[
i + 1][k - 1] == -1 and stone[i + 2][k - 2] == 0) or
(stone[i - 1][k + 1] == 0 and stone[i + 1][k - 1] == -1 and stone[
i + 2][k - 2] == -1 and stone[i + 3][k - 3] == 0) or
(stone[i - 3][k + 3] == 0 and stone[i - 2][k + 2] == -1 and stone[
i - 1][k + 1] == 0 and stone[i + 1][k - 1] == -1 and stone[i + 2][
k - 2] == 0) or
(stone[i + 3][k - 3] == 0 and stone[i + 2][k - 2] == -1 and stone[
i + 1][k - 1] == 0 and stone[i - 1][k + 1] == -1 and stone[i - 2][
k + 2] == 0) or
(stone[i - 4][k + 4] == 0 and stone[i - 3][k + 3] == -1 and stone[
i - 2][k + 2] == -1 and stone[i - 1][k + 1] == 0 and stone[i + 1][
k - 1] == 0) or
(stone[i + 4][k - 4] == 0 and stone[i + 3][k - 3] == -1 and stone[
i + 2][k - 2] == -1 and stone[i + 1][k - 1] == 0 and stone[i - 1][
k + 1] == 0)):
count = count +1
if count > 1:
return False
return True
|
from django.contrib import admin
from .models import Course, Module, Enrollment, Coupon, Subject, ModuleFile,Payment,Zoom
# Register your models here.
admin.site.register(Course)
admin.site.register(Module)
admin.site.register(Enrollment)
admin.site.register(Coupon)
admin.site.register(Subject)
admin.site.register(ModuleFile)
admin.site.register(Payment)
admin.site.register(Zoom)
|
#!/usr/bin/env python3
import argparse
import os
import re
from contextlib import contextmanager
from typing import Iterable
@contextmanager
def working_directory(dir: str):
prev_wd = os.getcwd()
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
try:
yield
finally:
os.chdir(prev_wd)
def parse_page_info():
parser = argparse.ArgumentParser(description="Run python snippets "
"inside a zim file")
parser.add_argument('attachment_dir', type=str,
help="attachment directory of the current dir")
parser.add_argument('page_path', type=str,
help="dir to the page file")
return parser.parse_args()
def load_page(path: str) -> str:
with open(path, 'r') as file:
return file.read()
def extract_snippets(text: str) -> Iterable[str]:
pattern = r"{{{code: (?P<args>.*)\s(?P<code>(?:.|\s)*?)}}}"
for match in re.finditer(pattern, text):
lang_pattern = r"lang=\"(?:python|python3)\""
if re.match(lang_pattern, match.group('args')):
yield match.group('code')
if __name__ == '__main__':
args = parse_page_info()
page_text = load_page(args.page_path)
for code in extract_snippets(page_text):
with working_directory(args.attachment_dir):
exec(code, {}) |
from typedclass.fields.data.f_binary.core import Binary
from typedclass.fields.data.f_bool.core import Bool
from typedclass.fields.data.f_date.core import Date
from typedclass.fields.data.f_datetime.core import DateTime
from typedclass.fields.data.f_decimal.core import Decimal
from typedclass.fields.data.f_int.core import Int
from typedclass.fields.data.f_float.core import Float
from typedclass.fields.data.f_string.core import String
from typedclass.fields.data.f_string_int.core import StringInt
from typedclass.fields.data.f_string_email.core import StringEmail
from typedclass.fields.data.f_string_english.core import StringEnglish
from typedclass.fields.data.f_time.core import Time
from typedclass.fields.rawlist.core import RawList
from typedclass.fields.rawmap.core import RawMap
from typedclass.fields.special.list import List
from typedclass.fields.special.ref import Ref
from typedclass.fields.special.set import Set
from typedclass.fields.typedmap.core import TypedMap
__all__ = (
# data:
'Binary',
'Bool',
'Date',
'DateTime',
'Decimal',
'Int',
'Float',
'String',
'Time',
# extra-data, can be removed in future:
'StringInt',
'StringEmail',
'StringEnglish',
# special:
'List',
'Ref',
'Set',
# map-like:
'RawMap',
'TypedMap',
# list-like:
'RawList',
)
|
# coding=utf-8
import sys
def solution(long_string, alphabet):
# 在这里写你的实现
if len(long_string)<len(alphabet):
return ""
result=[]
for i in range(len(long_string)):
array=[]
alphabets=list(alphabet)
if long_string[i] in alphabets:
start=i
alphabets.remove(long_string[i])
array.append(long_string[i])
while start+1<len(long_string):
start=start+1
array.append(long_string[start])
if long_string[start] in alphabets:
alphabets.remove(long_string[start])
if len(alphabets)==0:
result.append(array)
break
if len(result)==0:
return ""
else:
b=[]
for i in range(len(result)):
b.append(len(result[i]))
index=b.index(min(b))
return ''.join(result[index])
if __name__ == "__main__":
# 读取第一行的n
long_string = sys.stdin.readline().strip()
alphabet = sys.stdin.readline().strip()
print(solution(long_string, alphabet)) |
"""
Tests for app news_feed
"""
from textwrap import dedent
from django.test import TestCase
from django.db.utils import IntegrityError
from django.core.exceptions import ValidationError
from news_feed.models import NewsReport
DEFAULT_DICT_NEWS = {
'name': 'Test. Test2',
'preview': 'test',
'text': dedent('''\
Here's a small example of an article.
This is multi-line text.
It respects left padding.
''').strip()
}
DEFAULT_ARRAY_PARAGRAPH_TEXT = [
'Here\'s a small example of an article.',
'This is multi-line text.',
'It respects left padding.'
]
DEFAULT_SLUG = 'test-test2'
class NewsReportTestCase(TestCase):
"""Test for model NewsReport"""
def setUp(self) -> None:
self.news = NewsReport(**DEFAULT_DICT_NEWS)
self.news.save()
def test_true_raise_exception_when_creating_same_news(self) -> None:
with self.assertRaises(IntegrityError):
NewsReport(**DEFAULT_DICT_NEWS).save()
def test_not_raise_exception_with_update_news_in_admin(self):
news = NewsReport.objects.get(slug=DEFAULT_SLUG)
assert self.news.id == news.id
news.clean() # no exception should be thrown when checking the object
def test_true_raise_exception_with_create_news_in_admin(self):
news = NewsReport(**DEFAULT_DICT_NEWS)
with self.assertRaises(ValidationError):
news.clean()
def test_true_generate_slug(self):
assert DEFAULT_SLUG == self.news.slug
def test_get_array_paragraph_text(self) -> None:
assert DEFAULT_ARRAY_PARAGRAPH_TEXT == self.news.array_paragraph_text()
|
import json
import logging
from urllib import urlencode
from urllib2 import quote, urlopen, URLError
logger = logging.getLogger(__name__)
BASE_URL = 'http://api.phila.gov/opa/v1.0/'
ADDRESS_ENDPOINT = 'address/'
ACCOUNT_ENDPOINT = 'account/'
params = {
'format': 'json',
}
def get_address_data(address):
if not address:
raise ValueError('address must not be None')
try:
url = BASE_URL + ADDRESS_ENDPOINT + quote(address) + '/?' + urlencode(params)
data = json.load(urlopen(url, None, 30))['data']
return data['property']
except KeyError:
logger.debug(('Could not find unique property in response for %s. '
'Trying by account number') % address)
# Try to find a matching property in the response's properties
if 'properties' in data:
for prop in data['properties']:
if prop['full_address'].lower() == address.lower():
return prop
logger.debug('Could not find property by account number, either.')
return None
except URLError:
logger.exception('Exception while querying OPA API with URL "%s"' %
url)
def get_account_data(account):
try:
url = BASE_URL + ACCOUNT_ENDPOINT + account + '?' + urlencode(params)
data = json.load(urlopen(url))['data']
return data['property']
except Exception:
logger.exception('Exception while getting OPA data for account %s'
% str(account))
return None
|
import os
import time
from datetime import datetime
from shutil import which
from urllib.parse import urljoin, urlparse
import scrapy
from scrapy_selenium import SeleniumRequest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
class NaverBlogSpider(scrapy.Spider):
name = 'naver_blog'
custom_settings = {
'ITEM_PIPELINES': {
'crawling.pipelines.MongodbPipeline': 100
},
'DOWNLOADER_MIDDLEWARES': {
'scrapy_selenium.SeleniumMiddleware': 100
}
}
mongo_config = {
'host': os.environ.get('MONGO_HOST', 'localhost:27017'),
'db': 'crawling',
'collection': 'naver_blog'
}
def start_requests(self):
yield SeleniumRequest(url=self.generate_url(1),
wait_time=5,
wait_until=EC.presence_of_element_located((By.CLASS_NAME, "info_post")),
callback=self.parse_page,
meta={'page': 1})
def parse_page(self, response):
article_urls = response.css('a.desc_inner::attr("href")').extract()
page = response.meta['page']
if len(article_urls) and page <= 100:
yield SeleniumRequest(url=self.generate_url(page + 1),
wait_time=5,
wait_until=EC.presence_of_element_located((By.CLASS_NAME, "info_post")),
callback=self.parse_page,
meta={'page': page + 1})
for url in article_urls:
yield scrapy.Request(url=url, callback=self.parse_article)
def parse_article(self, response):
if response.css('#mainFrame'):
path = response.css('#mainFrame::attr("src")').get()
return scrapy.Request(url=urljoin('https://blog.naver.com', path), callback=self.parse_article)
result = {
'_id': response.css('meta[property="og:url"]::attr("content")').get(),
'crawled_at': datetime.utcnow(),
'title': response.css('meta[property="og:title"]::attr("content")').get(),
'content': response.css('div.se-main-container').get(),
'url': response.url
}
return result
def generate_url(self, page):
BASE_URL = 'https://section.blog.naver.com/BlogHome.nhn?directoryNo=0¤tPage={page}&groupId=0'
return BASE_URL.format(page=page)
|
import numpy as np
from utils.signal_stats import detect_R_peak
import random
class HeartBeatSegmenter(object):
"""Split ecg signal into individuals heartbeats.
Args:
segment_size(tuple or int): Desired segment size.
take_average(boolean): average the heartbeats pairs
"""
def __init__(self, segment_size=110, take_average=False):
self.segment_size = segment_size
self.take_average = take_average
def __call__(self, sample):
# find out the R peaks
r_peak = detect_R_peak(sample)
sample = sample.cpu().numpy()
all_segments = []
half_size_int = int(self.segment_size//2)
for recording in range(len(sample)):
segments = []
# throw away first and last R peaks as it may lead to segments
# of len less than desired length.
for i in r_peak[recording][1: -1]:
# make a segment
new_heart_beat = sample[recording][int(
i)-int(half_size_int*0.8): int(i)+int(half_size_int*1.2)]
if len(new_heart_beat) != self.segment_size:
continue
# append it to the list
segments.append(new_heart_beat)
if self.take_average is True:
segments = [np.mean(segments, axis=0)]
all_segments.extend(segments)
return np.array(all_segments)
class PairedHeartBeatSegmenter(object):
"""Split ecg signal into pairs of heartbeats.
Args:
segment_size(tuple or int): Desired segment size.
take_average(boolean): average the heartbeats pairs
"""
def __init__(self, segment_size=230, take_average=False):
self.segment_size = segment_size
self.take_average = take_average
def __call__(self, sample):
# find out the R peaks
r_peak = detect_R_peak(sample)
sample = sample.cpu().numpy()
all_segments = []
for recording in range(len(sample)):
segments = []
# throw away first and last R peaks as it may lead to segments
# of len less than desired length.
for i in range(1, len(r_peak[recording])-1):
curr_peak = r_peak[recording][i]
next_peak = r_peak[recording][i+1]
pad_size = self.segment_size - (next_peak-curr_peak)
# make a segment
pair_begining = int(curr_peak)-int(np.round(pad_size*0.4))
pair_end = int(next_peak)+int(np.round(pad_size*0.6))
new_heart_beat = sample[recording][pair_begining:pair_end]
if len(new_heart_beat) != self.segment_size:
continue
# append it to the list
segments.append(new_heart_beat)
if self.take_average is True:
segments = np.mean(segments, axis=0)
all_segments.extend(segments)
return np.array(all_segments)
class RandomPairedHeartBeatSegmenter(object):
"""Split ecg signal into pairs of heartbeats.
Args:
segment_size(tuple or int): Desired segment size.
take_average(boolean): average the heartbeats pairs
"""
def __init__(self, segment_size=230, nsegments=8, nrep=100):
self.segment_size = segment_size
self.nsegments = nsegments
self.nrep = nrep
def __call__(self, sample):
# find out the R peaks
r_peak = detect_R_peak(sample)
sample = sample.cpu().numpy()
all_segments = []
half_size_int = int(self.segment_size // 2)
for recording in range(len(sample)):
segments = []
# throw away first and last R peaks as it may lead to segments
# of len less than desired length.
for i in r_peak[recording][1: -1]:
# make a segment
new_heart_beat = sample[recording][int(
i) - int(half_size_int * 0.4): int(i) + int(half_size_int * 1.6)]
if len(new_heart_beat) != self.segment_size:
continue
# append it to the list
segments.append(new_heart_beat)
for i in range(self.nrep):
random_index = []
if len(segments) >= self.nsegments:
random_index = random.sample(set(range(len(segments))), self.nsegments)
else:
random_index = range(len(segments))
random_segments = np.array([segments[index] for index in random_index])
average_segment = np.mean(random_segments, axis=0)
all_segments.append(average_segment)
return np.array(all_segments)
|
# EXERCÍCIO Nº 16- LISTA 04 - LISTAS
print('\nVendedoes e comissões')
print('#####################\n')
faixas_salariais=[(200,299),(300,399),(400,499),(500,599),(600,699),(700,799),(800,899),(900,999),(1000,'em diante')]
salarioBase=200
valorvendas=1
vendas = [0, 0, 0, 0, 0, 0, 0, 0, 0]
a=0
while valorvendas!=0:
valorvendas = float(input('Informe o valor das vendas do vendedor ou digite "0" para encerrar: '))
if valorvendas != 0:
salario = valorvendas * 0.09 + salarioBase
indice = int(salario / 100)-1
if indice >=9:
indice=9
vendas[indice-1]+=1
a=+1
if valorvendas!=0 or a!=0:
print('###############################################')
print('As vendas foram as seguintes:')
for i in range (0,9):
print('-',vendas[i],'vendas','na faixa de',faixas_salariais[i],'reais')
print('###############################################')
print('As vendas foram as seguintes:')
for i in range (0,9):
print('-',vendas[i],'vendas','na faixa de R$',(i*100+salarioBase),'a R$',(i+1)*100+199)
elif valorvendas==0 and a==0:
print('Programa encerrado')
|
from SVM import SVM
from SVM import *
from bagged_forest import build_roots, setToN
import numpy as np
import dt
import copy
def data_set_from_roots(roots, X, debug=False):
transformed = []
for x in copy.deepcopy(X):
predictions = []
for root in roots:
prediction = root.predict(x)
predictions.append(prediction)
transformed.append(np.array(predictions))
return np.array(transformed)
def run_svm_trees(X_train, Y_train, X_test, Y_test, XCV, YCV, CV_Labels):
print '******************* SVM TREES *****************'
feature_count = 0
for example in X_train:
feature_count = max(feature_count, max(example))
for example in X_test:
feature_count = max(feature_count, max(example))
X_train, Y_train = convert_collection(X_train, Y_train, feature_count)
X_test, Y_test = convert_collection(X_test, Y_test, feature_count)
for i in range(0, len(XCV)):
new_x, new_y = convert_collection(XCV[i], YCV[i], feature_count)
XCV[i] = new_x
YCV[i] = new_y
learning_rates = [10**0, 10**-1, 10**-2, 10**-3, 10**-4, 10**-5]
trade_offs = [10**-1, 10**0, 10**1, 10**2, 10**3, 10**4]
depths = [3]
depth = 3
best_avg = 0.0
best_learning_rate = 0.1
best_trade_off = 10000
best_depth = depths[0]
attrs = setToN(feature_count)
for learning_rate in learning_rates:
for trade_off in trade_offs:
current_avg = 0.0
for depth in depths:
for (X, Y, data_label, i) in zip(XCV, YCV, CV_Labels, range(0, len(XCV))):
X_cv, Y_cv = merged_excluding(XCV, YCV, i)
roots = build_roots(X_cv, Y_cv, attrs, depth)
transformed_X = data_set_from_roots(roots, X_cv)
weight_vector = compute_weights(transformed_X, Y_cv, len(roots), learning_rate=learning_rate, tradeoff=trade_off)
predictions = get_predictions(weight_vector, data_set_from_roots(roots, XCV[i]))
accuracy = score(predictions, YCV[i])
current_avg += accuracy
dt.zeroed.clear()
print 'SVM Trees: Accuracy = %.3f, Learning rate = %.5f, Trade off = %.3f, Depth = %d on test set %s' % (accuracy, learning_rate, trade_off, depth, data_label)
if current_avg / len(XCV) > best_avg and i == 4:
current_avg = current_avg / len(XCV)
best_avg = current_avg
best_learning_rate = learning_rate
best_trade_off = trade_off
best_depth = depth
print 'Best learning rate = %.5f and trade off = %.5f and depth = %d with an average accuracy of %.3f' % (best_learning_rate, best_trade_off, best_depth, best_avg)
roots = build_roots(X_train, Y_train, attrs, 3)
transformed_X = data_set_from_roots(roots, X_train)
weight_vector = compute_weights(transformed_X, Y_train, len(roots), learning_rate=best_learning_rate, tradeoff=best_trade_off)
predictions = get_predictions(weight_vector, transformed_X)
accuracy = score(predictions, Y_train)
print 'SVM Trees train accuracy with best hyperparams = %.3f' % accuracy
predictions = get_predictions(weight_vector, data_set_from_roots(roots, X_test))
accuracy = score(predictions, Y_test)
print 'SVM Trees test accuracy with best hyperparams = %.3f' % accuracy
|
#!/usr/bin/env python3
# Define function ...
def return_video_crop_parameters(fname, /, *, debug = False, dt = 2.0, playlist = -1, timeout = 60.0):
# Import standard modules ...
import shutil
import subprocess
# Import sub-functions ...
from .return_media_duration import return_media_duration
from .return_video_height import return_video_height
from .return_video_width import return_video_width
# Check input ...
if fname.startswith("bluray:") and playlist < 0:
raise Exception("a Blu-ray was specified but no playlist was supplied") from None
# Check that "ffmpeg" is installed ...
if shutil.which("ffmpeg") is None:
raise Exception("\"ffmpeg\" is not installed") from None
# Initialize variables ...
dur = return_media_duration(
fname,
debug = debug,
playlist = playlist,
timeout = timeout,
) # [s]
inW = return_video_width(
fname,
debug = debug,
playlist = playlist,
timeout = timeout,
) # [px]
inH = return_video_height(
fname,
debug = debug,
playlist = playlist,
timeout = timeout,
) # [px]
outX = 0 # [px]
outY = 0 # [px]
# Loop over fractions ...
for frac in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]:
# Deduce start time ...
t = frac * dur - dt / 2.0 # [s]
# Check if it is a Blu-ray ...
if fname.startswith("bluray:"):
# Find crop parameters ...
resp = subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-probesize", "3G",
"-analyzeduration", "1800M",
"-playlist", f"{playlist:d}",
"-ss", f"{t:.3f}",
"-i", fname,
"-an",
"-sn",
"-t", f"{dt:f}",
"-vf", "cropdetect",
"-y",
"-f", "null",
"/dev/null"
],
check = True,
encoding = "utf-8",
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE,
timeout = timeout,
)
else:
# Attempt to survey the file ...
try:
# Find crop parameters ...
resp = subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-probesize", "3G",
"-analyzeduration", "1800M",
"-ss", f"{t:.3f}",
"-i", fname,
"-an",
"-sn",
"-t", f"{dt:f}",
"-vf", "cropdetect",
"-y",
"-f", "null",
"/dev/null"
],
check = True,
encoding = "utf-8",
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE,
timeout = timeout,
)
except subprocess.CalledProcessError:
# Fallback and attempt to find crop parameters as a raw M-JPEG
# stream ...
resp = subprocess.run(
[
"ffmpeg",
"-hide_banner",
"-probesize", "3G",
"-analyzeduration", "1800M",
"-ss", f"{t:.3f}",
"-f", "mjpeg",
"-i", fname,
"-an",
"-sn",
"-t", f"{dt:f}",
"-vf", "cropdetect",
"-y",
"-f", "null",
"/dev/null"
],
check = True,
encoding = "utf-8",
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE,
timeout = timeout,
)
# Loop over lines ...
for line in resp.stdout.splitlines():
# Skip irrelevant lines ...
if not line.startswith("[Parsed_cropdetect"):
continue
# Extract the information part of the line and make a dictionary of
# all of the key+value pairs ...
db = {}
info = line.strip().split("]")[-1]
for keyvalue in info.strip().split():
if keyvalue.count(":") == 1:
key, value = keyvalue.split(":")
elif keyvalue.count("=") == 1:
key, value = keyvalue.split("=")
else:
raise Exception(f"an unexpected string format was encountered (\"{keyvalue}\")") from None
db[key] = value
# Update variables ...
outX = max(outX, int(db["x"])) # [px]
outY = max(outY, int(db["y"])) # [px]
# Update variables ...
outW = inW - 2 * outX # [px]
outH = inH - 2 * outY # [px]
cropParams = f"{outW:d}:{outH:d}:{outX:d}:{outY:d}"
# Check results ...
if outW > inW or outW <= 0:
raise Exception(f"failed to find cropped width (inW = {inW:d}, inH = {inH:d}, outX = {outX:d}, outY = {outY:d}, outW = {outW:d}, outH = {outH:d})") from None
if outH > inH or outH <= 0:
raise Exception(f"failed to find cropped height (inW = {inW:d}, inH = {inH:d}, outX = {outX:d}, outY = {outY:d}, outW = {outW:d}, outH = {outH:d})") from None
# Return top-left corner, width, height and FFMPEG crop parameter string ...
return outX, outY, outW, outH, cropParams
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import io
from unittest import mock
import boto3
import pytest
from moto import mock_s3
from airflow import AirflowException
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.operators.s3 import (
S3CopyObjectOperator,
S3CreateObjectOperator,
S3DeleteObjectsOperator,
)
S3_BUCKET = "test-airflow-bucket"
S3_KEY = "test-airflow-key"
TASK_ID = "test-s3-operator"
class TestS3CopyObjectOperator:
def setup_method(self):
self.source_bucket = "bucket1"
self.source_key = "path1/data.txt"
self.dest_bucket = "bucket2"
self.dest_key = "path2/data_copy.txt"
@mock_s3
def test_s3_copy_object_arg_combination_1(self):
conn = boto3.client("s3")
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket, Key=self.source_key, Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
assert "Contents" not in conn.list_objects(Bucket=self.dest_bucket, Prefix=self.dest_key)
op = S3CopyObjectOperator(
task_id="test_task_s3_copy_object",
source_bucket_key=self.source_key,
source_bucket_name=self.source_bucket,
dest_bucket_key=self.dest_key,
dest_bucket_name=self.dest_bucket,
)
op.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket, Prefix=self.dest_key)
# there should be object found, and there should only be one object found
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == self.dest_key
@mock_s3
def test_s3_copy_object_arg_combination_2(self):
conn = boto3.client("s3")
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket, Key=self.source_key, Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
assert "Contents" not in conn.list_objects(Bucket=self.dest_bucket, Prefix=self.dest_key)
source_key_s3_url = f"s3://{self.source_bucket}/{self.source_key}"
dest_key_s3_url = f"s3://{self.dest_bucket}/{self.dest_key}"
op = S3CopyObjectOperator(
task_id="test_task_s3_copy_object",
source_bucket_key=source_key_s3_url,
dest_bucket_key=dest_key_s3_url,
)
op.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket, Prefix=self.dest_key)
# there should be object found, and there should only be one object found
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == self.dest_key
@mock_s3
class TestS3DeleteObjectsOperator:
def test_s3_delete_single_object(self):
bucket = "testbucket"
key = "path/data.txt"
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key, Fileobj=io.BytesIO(b"input"))
# The object should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key)
assert len(objects_in_dest_bucket["Contents"]) == 1
assert objects_in_dest_bucket["Contents"][0]["Key"] == key
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_single_object", bucket=bucket, keys=key)
op.execute(None)
# There should be no object found in the bucket created earlier
assert "Contents" not in conn.list_objects(Bucket=bucket, Prefix=key)
def test_s3_delete_multiple_objects(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
assert len(objects_in_dest_bucket["Contents"]) == n_keys
assert sorted(x["Key"] for x in objects_in_dest_bucket["Contents"]) == sorted(keys)
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_multiple_objects", bucket=bucket, keys=keys)
op.execute(None)
# There should be no object found in the bucket created earlier
assert "Contents" not in conn.list_objects(Bucket=bucket, Prefix=key_pattern)
def test_s3_delete_prefix(self):
bucket = "testbucket"
key_pattern = "path/data"
n_keys = 3
keys = [key_pattern + str(i) for i in range(n_keys)]
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
for k in keys:
conn.upload_fileobj(Bucket=bucket, Key=k, Fileobj=io.BytesIO(b"input"))
# The objects should be detected before the DELETE action is taken
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_pattern)
assert len(objects_in_dest_bucket["Contents"]) == n_keys
assert sorted(x["Key"] for x in objects_in_dest_bucket["Contents"]) == sorted(keys)
op = S3DeleteObjectsOperator(task_id="test_task_s3_delete_prefix", bucket=bucket, prefix=key_pattern)
op.execute(None)
# There should be no object found in the bucket created earlier
assert "Contents" not in conn.list_objects(Bucket=bucket, Prefix=key_pattern)
def test_s3_delete_empty_list(self):
bucket = "testbucket"
key_of_test = "path/data.txt"
keys = []
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key_of_test, Fileobj=io.BytesIO(b"input"))
# The object should be detected before the DELETE action is tested
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_of_test)
assert len(objects_in_dest_bucket["Contents"]) == 1
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
op = S3DeleteObjectsOperator(task_id="test_s3_delete_empty_list", bucket=bucket, keys=keys)
op.execute(None)
# The object found in the bucket created earlier should still be there
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
def test_s3_delete_empty_string(self):
bucket = "testbucket"
key_of_test = "path/data.txt"
keys = ""
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key_of_test, Fileobj=io.BytesIO(b"input"))
# The object should be detected before the DELETE action is tested
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_of_test)
assert len(objects_in_dest_bucket["Contents"]) == 1
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
op = S3DeleteObjectsOperator(task_id="test_s3_delete_empty_string", bucket=bucket, keys=keys)
op.execute(None)
# The object found in the bucket created earlier should still be there
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
@pytest.mark.parametrize(
"keys, prefix",
[
pytest.param("path/data.txt", "path/data", id="single-key-and-prefix"),
pytest.param(["path/data.txt"], "path/data", id="multiple-keys-and-prefix"),
pytest.param(None, None, id="both-none"),
],
)
def test_validate_keys_and_prefix_in_constructor(self, keys, prefix):
with pytest.raises(AirflowException, match=r"Either keys or prefix should be set\."):
S3DeleteObjectsOperator(
task_id="test_validate_keys_and_prefix_in_constructor",
bucket="foo-bar-bucket",
keys=keys,
prefix=prefix,
)
@pytest.mark.parametrize(
"keys, prefix",
[
pytest.param("path/data.txt", "path/data", id="single-key-and-prefix"),
pytest.param(["path/data.txt"], "path/data", id="multiple-keys-and-prefix"),
pytest.param(None, None, id="both-none"),
],
)
def test_validate_keys_and_prefix_in_execute(self, keys, prefix):
bucket = "testbucket"
key_of_test = "path/data.txt"
conn = boto3.client("s3")
conn.create_bucket(Bucket=bucket)
conn.upload_fileobj(Bucket=bucket, Key=key_of_test, Fileobj=io.BytesIO(b"input"))
# Set valid values for constructor, and change them later for emulate rendering template
op = S3DeleteObjectsOperator(
task_id="test_validate_keys_and_prefix_in_execute",
bucket=bucket,
keys="keys-exists",
prefix=None,
)
op.keys = keys
op.prefix = prefix
# The object should be detected before the DELETE action is tested
objects_in_dest_bucket = conn.list_objects(Bucket=bucket, Prefix=key_of_test)
assert len(objects_in_dest_bucket["Contents"]) == 1
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
with pytest.raises(AirflowException, match=r"Either keys or prefix should be set\."):
op.execute(None)
# The object found in the bucket created earlier should still be there
assert len(objects_in_dest_bucket["Contents"]) == 1
# the object found should be consistent with dest_key specified earlier
assert objects_in_dest_bucket["Contents"][0]["Key"] == key_of_test
class TestS3CreateObjectOperator:
@mock.patch.object(S3Hook, "load_string")
def test_execute_if_data_is_string(self, mock_load_string):
data = "data"
operator = S3CreateObjectOperator(
task_id=TASK_ID,
s3_bucket=S3_BUCKET,
s3_key=S3_KEY,
data=data,
)
operator.execute(None)
mock_load_string.assert_called_once_with(data, S3_KEY, S3_BUCKET, False, False, None, None, None)
@mock.patch.object(S3Hook, "load_bytes")
def test_execute_if_data_is_bytes(self, mock_load_bytes):
data = b"data"
operator = S3CreateObjectOperator(
task_id=TASK_ID,
s3_bucket=S3_BUCKET,
s3_key=S3_KEY,
data=data,
)
operator.execute(None)
mock_load_bytes.assert_called_once_with(data, S3_KEY, S3_BUCKET, False, False, None)
@mock.patch.object(S3Hook, "load_string")
def test_execute_if_s3_bucket_not_provided(self, mock_load_string):
data = "data"
operator = S3CreateObjectOperator(
task_id=TASK_ID,
s3_key=f"s3://{S3_BUCKET}/{S3_KEY}",
data=data,
)
operator.execute(None)
mock_load_string.assert_called_once_with(data, S3_KEY, S3_BUCKET, False, False, None, None, None)
|
from enum import Enum
from enum import auto
import pyglet
from button import Button
from chunkmanager import ChunkManager
from scoremanager import ScoreManager
from configmanager import ConfigManager
class MineField:
window_size = ConfigManager.config_dict.get("window_size")
score_label = pyglet.text.Label('score: ' + str(0),
font_name='Times New Roman',
font_size=12,
x=50, y=window_size[1] - 50,
anchor_x='left', anchor_y='center')
clearedtiles_label = pyglet.text.Label('Tiles cleared: ' + str(0),
font_name='Times New Roman',
font_size=12,
x=50, y=window_size[1] - 75,
anchor_x='left', anchor_y='center')
@classmethod
def draw(cls, offset, window):
cls.score_label.text = 'score: ' + str(ScoreManager.getscore())
cls.clearedtiles_label.text = 'Tiles cleared: ' + str(ScoreManager.getclearedtiles())
ChunkManager.screenspaceocclude_drawchunks(offset, (window.width, window.height))
ChunkManager.updategenchunks(offset, (window.width, window.height))
cls.score_label.draw()
cls.clearedtiles_label.draw()
class MainMenu:
window_size = ConfigManager.config_dict.get("window_size")
buttons_dict = {"button_New": Button("New game", pos=(window_size[0]//2, window_size[1]-150)),
"button_Load": Button("Load game", pos=(window_size[0]//2, window_size[1]-200)),
"button_Save": Button("Save game", pos=(window_size[0]//2, window_size[1]-250)),
"button_Exit": Button("Exit game", pos=(window_size[0]//2, window_size[1]-300))}
@classmethod
def draw(cls, offset, window):
ChunkManager.screenspaceocclude_drawchunks(offset, (window.width, window.height))
for button in cls.buttons_dict.values():
button.draw(offset, window)
@classmethod
def getbuttonclicked(cls, mouse_pos, window):
#TODO: rewrite to ask button for collision
for button in cls.buttons_dict.values():
if button.getcollision(mouse_pos):
return button
return None
state_dict = {"MineField":MineField,
"MainMenu":MainMenu} |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import QCoreApplication
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setupUI()
def setupUI(self):
self.setGeometry(800, 400, 230, 450)
# Label
label = QLabel("층수", self)
label.move(20, 20)
label1 = QLabel("룸형태", self)
label1.move(20, 60)
label2 = QLabel("크기", self)
label2.move(20, 100)
label3 = QLabel("주방구조", self)
label3.move(20, 140)
label4 = QLabel("보증금", self)
label4.move(20, 180)
label5 = QLabel("월세", self)
label5.move(20, 220)
label6 = QLabel("관리비", self)
label6.move(20, 260)
label7 = QLabel("보증금조정", self)
label7.move(20, 300)
label8 = QLabel("입주시기", self)
label8.move(20, 340)
# LineEdit
lineEdit = QLineEdit("", self)
lineEdit.move(80, 20)
lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 60)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 100)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 140)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 180)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 220)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 260)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 300)
self.lineEdit.textChanged.connect(self.lineEditChanged)
self.lineEdit = QLineEdit("", self)
self.lineEdit.move(80, 340)
self.lineEdit.textChanged.connect(self.lineEditChanged)
#푸쉬버튼
button = QPushButton('작성하기', self)
button.move(80, 380)
# button.clicked
# self.pushbutton(button)
# button.clicked.connect(QCoreApplication.instance().quit)
button.clicked.connect(self.pushbutton)
# button.clicked.connect()
# StatusBar
self.statusBar = QStatusBar(self)
self.setStatusBar(self.statusBar)
openFile = QAction('Open', self)
openFile.triggered.connect(self.pushbutton)
def pushbutton(self):
# print("작성하기")
self.lineEdit.clear()
def lineEditChanged(self):
self.statusBar.showMessage(self.lineEdit.text())
if __name__ == "__main__":
app = QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
app.exec_() |
import torch
import gc
import os
import logging
import torch.optim as optim
import torch.nn as nn
from datetime import datetime
"""
5 Fold CV Best Loss: 0.6709386929869652 | 0.6842632591724396 | 0.6594144776463509 | 0.6695199608802795 | 0.6728682070970535
"""
# ==============================================
# Setup
# ==============================================
# Model / Data Set Choice
from rick.experiments.models.exp2_dynamic_image_model import get_model
from rick.experiments.datasets.exp2_dynamic_image_dataset import get_training_and_validation_dataloaders
from rick.experiments.utilities.calculate_metrics import calculate_accuracy
# GPU
device = torch.device("cuda:0")
# Training Settings
batch_size = 128
num_epochs = 50
folds_to_train = [0, 1, 2, 3, 4]
# Load data-loaders, i.e. [(train_dataloader, val_dataloader), ...]
data_loaders = get_training_and_validation_dataloaders(n_splits=5, batch_size=batch_size)
# File-name
file_name = ''.join(os.path.basename(__file__).split('.py')[:-1])
# Logging
logger = logging.getLogger(file_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('{}.log'.format(file_name))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# ==============================================
# Train K-Folds
# ==============================================
best_loss_per_fold = {}
for fold_i, (train_loader, validation_loader) in enumerate(data_loaders):
if fold_i not in folds_to_train:
print('Skipping Fold: {}'.format(fold_i))
continue
# Get Model
net = get_model()
net.to(device)
# Initialize optimizer and loss function.
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
time_start = datetime.now()
print('Starting Training on Fold: {}\n'.format(fold_i))
best_val_loss = float('inf')
for epoch_i in range(num_epochs):
train_running_loss = 0.0
train_running_acc = 0.0
num_mini_batches = 0
# ==============================================
# Training Pass
# ==============================================
net.train()
for i, data in enumerate(train_loader):
x_train, y_train = data
x_train, y_train = x_train.to(device), y_train.to(device)
# Zero the parameter gradients.
optimizer.zero_grad()
# Prediction.
y_pred = net(x_train)
# Calculate Loss.
loss = criterion(y_pred, y_train)
# Step
loss.backward()
optimizer.step()
# Keep track of the loss and number of batches.
num_mini_batches += 1
train_running_loss += loss.item()
train_running_acc += calculate_accuracy(y_pred, y_train)
# ==============================================
# Validation Pass
# ==============================================
val_running_loss = 0.0
val_running_acc = 0.0
num_val_mini_batches = 0
net.eval()
with torch.no_grad():
for i, data in enumerate(validation_loader):
x_val, y_val = data
x_val, y_val = x_val.to(device), y_val.to(device)
# Prediction.
y_pred = net(x_val)
# Calculate Loss.
loss = criterion(y_pred, y_val)
# Keep track of the loss and number of batches.
num_val_mini_batches += 1
val_running_loss += loss.item()
val_running_acc += calculate_accuracy(y_pred, y_val)
# ==============================================
# Statistics
# ==============================================
time_elapsed = datetime.now() - time_start
avg_loss = train_running_loss / num_mini_batches
avg_acc = train_running_acc / num_mini_batches
avg_val_loss = val_running_loss / num_val_mini_batches
avg_val_acc = val_running_acc / num_val_mini_batches
# Keep track of best model.
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
# Output training status.
output_msg = 'Epoch: {}/{}\n' \
'---------------------\n' \
'train loss: {:.6f}, val loss: {:.6f}\n' \
'train acc: {:.4f}, val acc: {:.4f}\n' \
'best val loss: {:.6f}, time elapsed: {}\n'. \
format(epoch_i + 1, num_epochs,
avg_loss, avg_val_loss,
avg_acc, avg_val_acc,
best_val_loss, str(time_elapsed).split('.')[0])
print(output_msg)
logger.info(output_msg)
best_loss_per_fold[fold_i] = best_val_loss
del net
gc.collect()
print('Finished Training')
print("Best Loss per Fold:\n", best_loss_per_fold)
|
"""Handle the loading of configuration files."""
import yaml
def load_config(config_file='/etc/thermopi.yaml'):
"""Load the configuration file.
Parameters
----------
config_file : str
Path to the configuration file to load.
Returns
-------
dict
Dict containing the configuration file.
"""
with open(config_file) as in_file:
return yaml.load(in_file, Loader=yaml.FullLoader)
|
import openpyxl
from datetime import datetime
from openpyxl import Workbook
wb = openpyxl.load_workbook('subbixl.xlsx')
sheets = ['Sheet1']
xin = input("P or A")
count = 0
yin = str(input("Date"))
my = datetime.strptime(yin, "%d-%m-%Y")
for sheet in sheets:
sh = wb[sheet] # Get a sheet from the workbook.
max_r = sh.max_row
max_c = sh.max_column
for r in range(1, max_r+1):
for c in range(4, max_c+1):
if sh.cell(row=1, column=c).value == my:
if sh.cell(row=r, column=c).value == xin:
count = count + 1
print(str((count/(max_r-1))*100)+"%")
|
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from numcodecs.categorize import Categorize
from numcodecs.tests.common import (check_encode_decode, check_config,
check_backwards_compatibility,
check_encode_decode_array)
labels = ['ƒöõ', 'ßàř', 'ßāẑ', 'ƪùüx']
arrays = [
np.random.choice(labels, size=1000),
np.random.choice(labels, size=(100, 10)),
np.random.choice(labels, size=(10, 10, 10)),
np.random.choice(labels, size=1000).reshape(100, 10, order='F'),
]
arrays_object = [a.astype(object) for a in arrays]
def test_encode_decode():
# unicode dtype
for arr in arrays:
codec = Categorize(labels, dtype=arr.dtype)
check_encode_decode(arr, codec)
# object dtype
for arr in arrays_object:
codec = Categorize(labels, dtype=arr.dtype)
check_encode_decode_array(arr, codec)
def test_encode():
for dtype in 'U', object:
arr = np.array(['ƒöõ', 'ßàř', 'ƒöõ', 'ßāẑ', 'ƪùüx'], dtype=dtype)
# miss off quux
codec = Categorize(labels=labels[:-1], dtype=arr.dtype, astype='u1')
# test encoding
expect = np.array([1, 2, 1, 3, 0], dtype='u1')
enc = codec.encode(arr)
assert_array_equal(expect, enc)
assert expect.dtype == enc.dtype
# test decoding with unexpected value
dec = codec.decode(enc)
expect = arr.copy()
expect[expect == 'ƪùüx'] = ''
assert_array_equal(expect, dec)
assert arr.dtype == dec.dtype
def test_config():
codec = Categorize(labels=labels, dtype='U4')
check_config(codec)
def test_repr():
dtype = '<U3'
astype = '|u1'
codec = Categorize(labels=['foo', 'bar', 'baz', 'qux'],
dtype=dtype, astype=astype)
expect = "Categorize(dtype='<U3', astype='|u1', labels=['foo', 'bar', 'baz', ...])"
actual = repr(codec)
assert expect == actual
dtype = '<U4'
astype = '|u1'
codec = Categorize(labels=labels, dtype=dtype, astype=astype)
expect = "Categorize(dtype='<U4', astype='|u1', labels=['ƒöõ', 'ßàř', 'ßāẑ', ...])"
actual = repr(codec)
assert expect == actual
def test_backwards_compatibility():
codec = Categorize(labels=labels, dtype='<U4', astype='u1')
check_backwards_compatibility(Categorize.codec_id, arrays, [codec],
prefix='U')
codec = Categorize(labels=labels, dtype=object, astype='u1')
check_backwards_compatibility(Categorize.codec_id, arrays_object,
[codec], prefix='O')
def test_errors():
with pytest.raises(TypeError):
Categorize(labels=['foo', 'bar'], dtype='S6')
with pytest.raises(TypeError):
Categorize(labels=['foo', 'bar'], dtype='U6', astype=object)
|
# -*- coding: utf-8 -*-
import os
import json
import time
worldpath = './server/world'
savepath = './savelib'
class mcsave(object):
def __init__(self, savepath):
self.path = savepath
def backup(self, meta = {}, writeTime = True):
if writeTime:
clock = time.strftime('%Y%m%d',time.localtime()) + '-' + time.strftime('%H%M%S',time.localtime())
targetpath = self.path + '/' + clock
os.system('mkdir ' + targetpath)
result = os.system('cp -r ' + worldpath + ' ' + targetpath)
if result == 0:
meta['time'] = clock
else:
meta['time'] = " "
return meta
def restore(self, meta):
targetpath = self.path + '/' + meta['time'] + '/world'
os.system('rm -rf ' + worldpath)
result = os.system('cp -r ' + targetpath + ' ' + './server/')
return result
def readMeta(self):
with open(self.path + '/meta.json', 'r') as handler:
return json.loads(handler.read())
def writeMeta(self, meta):
with open(self.path + '/meta.json', 'w') as handler:
handler.write(json.dumps(meta)) |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 13:22:55 2018
挖掘twitter tweets,找共现词,具体可见machine learning in action 235页
@author: Max Yang
"""
import twitter
from time import sleep
import re
import pandas as pd
import fpGrowth
def getLotsofTweets(searchStr):
CONSUMER_KEY='4nDxF5gYmarjAcAVCs8B0zSEA'
CONSUMER_SECRET='at8BWgNXWEWewqFLZD7fd1KK45x2hIRvUIGQvtZK8cGhzJMbiS'
ACCESS_TOKEN_KEY='2181272090-8S7mIynrCwEpSrEoD5UsFOIF23YSvK6Z2RpcCQs'
ACCESS_TOKEN_SECRET='4EZ7g4ZlaxDaxQ9F4qdcBAq63aGYCSmKVc0DUdzOvyAGU'
api=twitter.Api(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token_key=ACCESS_TOKEN_KEY,
access_token_secret=ACCESS_TOKEN_SECRET)
#you can get 1500 results 15 pages * 100 per page
resultsPages=[]
date_r=pd.date_range('2018-07-01','2018-07-14')
date_list=date_r.format(formatter=lambda x: x.strftime('%Y-%m-%d'))
for i in range(14):
print("fetching page %d" % i)
searchResults=api.GetSearch(searchStr, count=100,lang='en',since=date_list[i])
resultsPages.append(searchResults)
sleep(6)
return(resultsPages)
def textParse(bigString):
urlsRemoved=re.sub('(http[s]?:[/][/]|www.)([a-z][A-Z][0-9]|[/.][~])*','',bigString)
if urlsRemoved=='':
return(urlsRemoved)
else:
listofTokens=re.split(r'\W*',urlsRemoved)
return([tok.lower() for tok in listofTokens if len(tok)>2])
def mineTweets(tweetArr,minSup=5):
parsedList=[]
for i in range(len(tweetArr)):
for j in range(len(tweetArr[i])):
#twitter爬出的数据是status类,因此需要status.text来找到实际需要的文本
parsedList.append(textParse(tweetArr[i][j].text))
initSet=fpGrowth.createInitSet(parsedList)
myFPtree,myHeaderTab=fpGrowth.createTree(initSet,minSup)
myFreqList=[]
fpGrowth.mineTree(myFPtree,myHeaderTab,minSup,set([]),myFreqList)
return(myFreqList) |
#
#=BEGIN
#
# This file is part of the Bluetooth use-case verification
#
# Copyright (C) ST-Ericsson SA 2010. All rights reserved.
#
# This code is ST-Ericsson proprietary and confidential.
# Any use of the code for whatever purpose is subject to
# specific written permission of ST-Ericsson SA.
#
#=END
#
import setup_paths
import sys, os
curdir = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(curdir + "/.."))
from includes.BluetoothTestCase import BluetoothTestCase
from core.session.Session import Session
from core.general.Exceptions import Error, Failure
from includes.BluetoothDialogInput import BluetoothDialogInput
from includes.BluetoothOpCodes import OpCodes
from includes.BluetoothTestComponent import BluetoothTestComponent
from string import find
import time,re
class TC_AD_Bluetooth_5_05_HSP_Audio_Transfer_HS_Inititated_All_1(BluetoothTestCase):
def execute(self):
ComponentOne = '1'
ComponentTwo = '2'
ptsAddress = Session.getSetupFileParam('bt', 'addresspts')
ptsTestCase = 'TC_AG_ACT_BV_01_I'
UE1_SIM_umber = self.Configuration.getSectionKeyValue('UE1', 'phonenumber')
UE2_SIM_umber = self.Configuration.getSectionKeyValue('UE2', 'phonenumber')
self.Components[ComponentOne] = BluetoothTestComponent(self.devices[0])
self.Components[ComponentTwo] = BluetoothTestComponent(self.devices[1])
self.StartupExecution()
#Initialization
deviceOneAddress,deviceOneName = self.InititalizeApplication(ComponentOne)
deviceOneAddress,deviceOneName = self.InititalizeApplication(ComponentTwo)
# Make local device discoverable
self.SendAndWaitForEvent(ComponentOne, OpCodes.SET_DISCOVERABLE, OpCodes.SET_DISCOVERABLE, message='300', timeout=10)
time.sleep(4)
self.pressYes(ComponentOne)
# Place and end call to add UE2 number to Call log. PTS will use first number from the Call log
self.SendCommand(ComponentOne, OpCodes.MAKE_CALL, message=UE2_SIM_umber)
time.sleep(2)
self.SendCommand(ComponentOne, OpCodes. END_CALL, message='End Call')
self.SendAndWaitForEvent(ComponentOne, OpCodes.CLEAR_NOTIFICATIONS, OpCodes.CLEAR_NOTIFICATIONS, message='Clear notifications', timeout=25)
BluetoothDialogInput('Press OK, then start the test case: %s in PTS'%ptsTestCase)
time.sleep(5)
# Accept incoming pairing request
self.WaitForEvent(ComponentOne, OpCodes.PAIRING_VARIANT_PIN, timeout=60)
time.sleep(5)
self.SendCommand(ComponentOne, OpCodes.SET_PIN, message='%s;0000'%ptsAddress)
# UE2 answer call from UE1, then UE1 ends the call
time.sleep(15)
self.SendCommand(ComponentTwo, OpCodes.END_CALL, message='End Call')
time.sleep(2)
#Disconnect Handsfree
self.SendAndWaitForEvent(ComponentOne, OpCodes.DISCONNECT_HEADSET, OpCodes.DISCONNECT_HEADSET, message=ptsAddress, timeout=10)
time.sleep(2)
#Check Handsfree disconnected
isHandsfreeConnected = self.SendAndWaitForEvent(ComponentOne, OpCodes.GET_IS_HEADSET_CONNECTED, OpCodes.GET_IS_HEADSET_CONNECTED, message=ptsAddress, timeout=10)
if isHandsfreeConnected == "true":
self.log.info("Handsfree still connected")
raise Failure('Handsfree2DP still connected')
#Clean up
self.RestoreApplication(ComponentOne)
self.RestoreApplication(ComponentTwo)
self.CloseDownExecution()
if __name__ == '__main__':
from core.script.Script import Script
from core.setup.Environment import Environment
from plugins.android.device.AndroidDevice import AndroidDevice
Session.init(Script(__file__))
duts = []
dut1 = AndroidDevice('DUT1', connection=1)
dut2 = AndroidDevice('DUT2', connection=1)
duts.append(dut1)
duts.append(dut2)
env = Environment()
env.addEquipment(dut1)
env.addEquipment(dut2)
if(env.setup()):
tc = TC_AD_Bluetooth_5_05_HSP_Audio_Transfer_HS_Inititated_All_1("TC_AD_Bluetooth_5_05_HSP_Audio_Transfer_HS_Inititated_All_1", duts)
tc.run()
env.tearDown()
Session.summary()
|
#!/usr/bin/env python
import sqlite3
from dataclasses import fields
from jupyterlab_chameleon import db
try:
with sqlite3.connect("/work/.chameleon/chameleon.db") as conn:
cur = conn.cursor()
cur.execute("PRAGMA table_info(artifacts);")
columns = cur.fetchall()
column_names = [c[1] for c in columns]
expected_fields = fields(db.LocalArtifact)
expected_columns = []
type_map = {
str: "text"
}
for field in expected_fields:
if field.name not in column_names:
print(f"Migrating artifact database, adding '{field.name}' column")
column_type = type_map[field.type]
cur.execute(f"ALTER TABLE artifacts ADD COLUMN {field.name} {column_type};")
cur.close()
except sqlite3.OperationalError:
# If database does not exist, no migration to run.
pass |
import os
from .iterators import chunked_iterable
def list_s3_prefix(s3_client, *, bucket, prefix=""):
"""
Lists all the keys in a given S3 bucket/prefix.
"""
paginator = s3_client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket, Prefix=prefix):
try:
for s3_obj in page["Contents"]:
yield s3_obj["Key"]
except KeyError:
return
def copy_s3_prefix(s3_client, *, src_bucket, src_prefix, dst_bucket, dst_prefix):
"""
Copies all the objects between two prefixes in S3.
"""
for src_key in list_s3_prefix(s3_client, bucket=src_bucket, prefix=src_prefix):
dst_key = os.path.join(dst_prefix, os.path.relpath(src_key, start=src_prefix))
s3_client.copy(
CopySource={"Bucket": src_bucket, "Key": src_key},
Bucket=dst_bucket,
Key=dst_key,
)
def delete_s3_prefix(*, s3_list_client, s3_delete_client, bucket, prefix=""):
"""
Delete all the objects in a given S3 bucket/prefix.
"""
# We can delete up to 1000 objects in a single DeleteObjects request.
for batch in chunked_iterable(
list_s3_prefix(s3_list_client, bucket=bucket, prefix=prefix), size=1000
):
assert all(s3_key.startswith(prefix) for s3_key in batch)
s3_delete_client.delete_objects(
Bucket=bucket, Delete={"Objects": [{"Key": s3_key} for s3_key in batch]}
)
|
# -*- coding: utf-8 -*-
import jieba
import sys
import matplotlib.pyplot as plt
from wordcloud import WordCloud
#打开本体TXT文件
text = open('data.txt',encoding='utf-8').read()
print(type(text))
#结巴分词 cut_all=True 设置为精准模式
wordlist = jieba.cut(text, cut_all = False)
#使用空格连接 进行中文分词
wl_space_split = " ".join(wordlist)
print(wl_space_split)
#对分词后的文本生成词云
my_wordcloud = WordCloud().generate(wl_space_split)
#显示词云图
plt.imshow(my_wordcloud)
#是否显示x轴、y轴下标
plt.axis("off")
plt.show()
|
# -*- coding: utf-8 -*-
__author__ = 'Vit'
from bs4 import BeautifulSoup
from data_format.url import URL
from common.util import _iter, quotes, psp, collect_string
from interface.view_manager_interface import ViewManagerFromModelInterface
from model.site.parser import BaseSiteParser
class PlusoneSite(BaseSiteParser):
@staticmethod
def can_accept_url(url: URL) -> bool:
return url.contain('plusone8.com/')
@staticmethod
def create_start_button(view:ViewManagerFromModelInterface):
menu_items=dict(Popular=URL('http://plusone8.com/?filter=rate*'),
Latest=URL('http://plusone8.com/?filter=date*'),
Random=URL('http://plusone8.com/?filter=random*'),
MostViewed=URL('http://plusone8.com/?filter=views*'),
Categories=URL('http://plusone8.com/categories/'),
Longest=URL('http://plusone8.com/?filter=duration*'))
view.add_start_button(picture_filename='model/site/resource/plusone8.png',
menu_items=menu_items,
url=URL("http://plusone8.com/?filter=date*", test_string='Porn'))
def get_shrink_name(self):
return 'PO8'
def parse_thumbs(self, soup: BeautifulSoup, url: URL):
container = soup.find('main',{'id':'main'})
if container:
for thumbnail in _iter(container.find_all('div', {'class': 'column'})):
# psp(thumbnail.prettify())
xref=thumbnail.find('a')
if xref:
href = URL(xref.attrs['href'], base_url=url)
description = xref.attrs['title']
thumb_url = URL(thumbnail.img.attrs['src'], base_url=url)
duration = thumbnail.find('span', {'class': "length"})
dur_time = '' if duration is None else collect_string(duration)
quality = thumbnail.find('span', {'class': "quality"})
qual = '' if quality is None else str(quality.string)
self.add_thumb(thumb_url=thumb_url, href=href, popup=description,
labels=[{'text': dur_time, 'align': 'top right'},
{'text': description, 'align': 'bottom center'},
{'text': qual, 'align': 'top left', 'bold': True}])
def get_pagination_container(self, soup: BeautifulSoup):
return soup.find('div', {'class': 'pagination'})
def parse_video(self, soup: BeautifulSoup, url: URL):
video = soup.find('div',{'class':'video-player'})
if video is not None:
# psp(video.prettify())
for source in _iter(video.find_all('source')):
psp(source)
self.add_video('DEFAULT', URL(source.attrs['src'], base_url=url))
self.set_default_video(-1)
def parse_video_tags(self, soup: BeautifulSoup, url: URL):
for actor_container in _iter(soup.find_all('div', {'id': 'video-actors'})):
for href in _iter(actor_container.find_all('a')):
psp(href)
self.add_tag(str(href.attrs['title']), URL(href.attrs['href'], base_url=url), style={'color': 'blue'})
for tag_container in _iter(soup.find_all('div', {'id': 'cat-list'})):
for href in _iter(tag_container.find_all('a')):
psp(href)
self.add_tag(str(href.attrs['title']), URL(href.attrs['href'], base_url=url))
if __name__ == "__main__":
pass |
import tensorflow as tf
import numpy as np
class KMeans:
def __init__(self, k, n_features, n_classes, sess=tf.Session()):
self.k = k
self.n_features = n_features
self.n_classes = n_classes
self.sess = sess
self.build_graph()
# end constructor
def build_graph(self):
self.X = tf.placeholder(tf.float32, shape=[None, self.n_features])
self.Y = tf.placeholder(tf.float32, shape=[None, self.n_classes])
params = tf.contrib.factorization.KMeans(
inputs=self.X,
num_clusters=self.k,
distance_metric='cosine',
use_mini_batch=True)
(_, cluster_idx, scores, _, _, self.init_op, self.train_op) = params.training_graph()
self.cluster_idx = cluster_idx[0]
self.avg_distance = tf.reduce_mean(scores)
self.cluster_label = None # to be filled after calling fit()
# end method
def fit(self, X, Y, n_iter=50):
self.sess.run(tf.global_variables_initializer())
self.sess.run(self.init_op, {self.X: X})
for i in range(n_iter):
_, d, idx = self.sess.run([self.train_op, self.avg_distance, self.cluster_idx], {self.X: X})
print("Step %i, Avg Distance: %f" % (i, d))
counts = np.zeros(shape=(self.k, self.n_classes))
for i, k in enumerate(idx):
counts[k] += Y[i]
# cluster index -> label
labels_map = [np.argmax(c) for c in counts]
# self.cluster_idx: (60000, ) cluster index for each sample
self.cluster_label = tf.nn.embedding_lookup(tf.convert_to_tensor(labels_map), self.cluster_idx)
# end method
def predict(self, X_test):
return self.sess.run(self.cluster_label, {self.X:X_test})
# end method
# end class |
from dataset import Dataset
from Model import ConvEncoder, Decoder, SimpleE
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
from radam import RAdam
import numpy as np
import scipy.misc
from scipy.misc import imsave
import torch.autograd as autograd
import time
class Trainer:
def __init__(self, dataset, args):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.dataset = dataset
self.encoder = ConvEncoder(self.dataset.num_ent(), self.dataset.num_rel(), args.emb_dim, self.device)
self.decoder = Decoder(self.dataset.num_ent(), self.dataset.num_rel() , self.device)
self.discriminator = SimpleE(self.dataset.num_ent(), self.dataset.num_rel(), args.emb_dim, self.device)
self.args = args
self.adversarial_loss = nn.BCEWithLogitsLoss()
self.reconstruction_loss = nn.BCELoss()
def train(self):
self.encoder.train()
self.decoder.train()
self.discriminator.train()
print('entity', self.dataset.num_ent(), 'relation', self.dataset.num_rel())
print('ConvEncoder')
print('train_simple1')
print('epoch', self.args.ne)
print('D_lr',self.args.D_lr)
print('G_lr',self.args.G_lr)
print('emb_dim',self.args.emb_dim)
print('batch_size',self.args.batch_size)
print('discriminator_range',self.args.discriminator_range)
entity_onehot = []
relation_onehot = []
for i in range(self.dataset.num_ent()):
onehot = [0 for x in range(self.dataset.num_ent())]
onehot[i] = 1
entity_onehot.append(onehot)
for i in range(self.dataset.num_rel()):
onehot = [0 for x in range(self.dataset.num_rel())]
onehot[i] = 1
relation_onehot.append(onehot)
#********************************admgrad*********************************************************************
optimizer_D = torch.optim.Adagrad(
self.discriminator.parameters(),
lr = self.args.D_lr,
weight_decay= 0,
initial_accumulator_value= 0.1)
optimizer_Encoder = torch.optim.Adagrad(
self.encoder.parameters(),
lr = self.args.G_lr,
weight_decay= 0,
initial_accumulator_value= 0.1)
optimizer_Decoder = torch.optim.Adagrad(
self.decoder.parameters(),
lr = self.args.G_lr,
weight_decay= 0,
initial_accumulator_value= 0.1)
for epoch in range(1, self.args.ne+1):
# start_time = time.time()
last_batch = False
total_d_loss = 0.0
total_g_loss = 0.0
while not last_batch:
pos_batch = self.dataset.next_pos_batch(self.args.batch_size)
last_batch = self.dataset.was_last_batch()
h_onehot = []
r_onehot = []
t_onehot = []
for i in pos_batch[:,0]:
one_hot = entity_onehot[i]
h_onehot.append(one_hot)
for i in pos_batch[:,2]:
one_hot = entity_onehot[i]
t_onehot.append(one_hot)
for i in pos_batch[:,1]:
one_hot = relation_onehot[i]
r_onehot.append(one_hot)
h = torch.tensor(h_onehot).float().to(self.device)
r = torch.tensor(r_onehot).float().to(self.device)
t = torch.tensor(t_onehot).float().to(self.device)
# -----------------
# Train Generator
# ----------------
optimizer_Encoder.zero_grad()
optimizer_Decoder.zero_grad()
encoder_batch = np.repeat(np.copy(pos_batch), 1, axis=0)
for i in range(self.args.batch_size):
if np.random.random()<0.5:
encoder_batch[i][0] = pos_batch[i][0]
encoder_batch[i][1] = 0
encoder_batch[i][2] = pos_batch[i][1]
else:
encoder_batch[i][0] = pos_batch[i][2]
encoder_batch[i][1] = 1
encoder_batch[i][2] = pos_batch[i][1]
encoder_h_onehot = []
encoder_r_onehot = []
encoder_position = []
for i in encoder_batch[:,0]:
one_hot = entity_onehot[i]
encoder_h_onehot.append(one_hot)
for i in encoder_batch[:,1]:
encoder_position.append([i])
for i in encoder_batch[:,2]:
one_hot = relation_onehot[i]
encoder_r_onehot.append(one_hot)
encoder_h = torch.tensor(encoder_h_onehot).float().to(self.device)
encoder_p = torch.tensor(encoder_position).float().to(self.device)
encoder_r = torch.tensor(encoder_r_onehot).float().to(self.device)
fake_tails =self.encoder(encoder_h, encoder_p, encoder_r)
construction_heads, construction_postions, construction_rels = self.decoder(fake_tails)
g_loss = self.reconstruction_loss(construction_heads, encoder_h) + self.reconstruction_loss(construction_rels, encoder_r) + self.reconstruction_loss(construction_postions, encoder_p)
g_loss.backward()
total_g_loss += g_loss.cpu().item()
optimizer_Encoder.step()
optimizer_Decoder.step()
neg_batch = np.repeat(np.copy(pos_batch), self.args.neg_ratio, axis=0)
for _ in range(self.args.discriminator_range):
neg_entity = []
for i in range(len(neg_batch)):
if np.random.random() < 0.5:
temp = []
temp_h = pos_batch[i][0]
temp_p = [0]
temp_r = pos_batch[i][1]
temp.append(temp_h)
temp.append(temp_p)
temp.append(temp_r)
neg_entity.append(temp)
else:
temp = []
temp_h = pos_batch[i][2]
temp_p = [1]
temp_r = pos_batch[i][1]
temp.append(temp_h)
temp.append(temp_p)
temp.append(temp_r)
neg_entity.append(temp)
temp_h_one_hot = []
temp_r_one_hot = []
temp_p = []
for ele in neg_entity:
temp_h_one_hot.append(entity_onehot[ele[0]])
temp_r_one_hot.append(relation_onehot[ele[2]])
temp_p.append(ele[1])
temp_h_one_hot = torch.tensor(temp_h_one_hot).float().to(self.device)
temp_r_one_hot = torch.tensor(temp_r_one_hot).float().to(self.device)
temp_p = torch.tensor(temp_p).float().to(self.device)
neg_tails_index = np.argmax(self.encoder(temp_h_one_hot, temp_p, temp_r_one_hot).cpu().data.numpy(), axis=1)
for i in range(len(neg_batch)):
if neg_entity[i][1] == [0]:
neg_batch[i][2] = neg_tails_index[i]
elif neg_entity[i][1] == [1]:
neg_batch[i][0] = neg_tails_index[i]
else:
print('GG')
neg_batch[:,-1] = -1
batch = np.append(pos_batch, neg_batch, axis=0)
np.random.shuffle(batch)
full_h_onehot = []
full_r_onehot = []
full_t_onehot = []
for i in batch[:,0]:
one_hot = entity_onehot[i]
full_h_onehot.append(one_hot)
for i in batch[:,2]:
one_hot = entity_onehot[i]
full_t_onehot.append(one_hot)
for i in batch[:,1]:
one_hot = relation_onehot[i]
full_r_onehot.append(one_hot)
full_h = torch.tensor(full_h_onehot).float().to(self.device)
full_r = torch.tensor(full_r_onehot).float().to(self.device)
full_t = torch.tensor(full_t_onehot).float().to(self.device)
labels = torch.tensor(batch[:,3]).float().to(self.device)
optimizer_D.zero_grad()
scores = self.discriminator(full_h, full_r, full_t)
d_loss = torch.sum(F.softplus(-labels * scores)) + (self.args.reg_lambda * self.discriminator.l2_loss() / self.dataset.num_batch(self.args.batch_size))
d_loss.backward()
optimizer_D.step()
for p in self.discriminator.parameters():
p.data.clamp_(-1, 1)
total_d_loss += d_loss.cpu().item()
# =================== generator training =======================
optimizer_Encoder.zero_grad()
fake_tails =self.encoder(encoder_h, encoder_p, encoder_r)
generator_score = self.discriminator(encoder_h, encoder_r, fake_tails)
G_loss = -0.2 * torch.mean(torch.log(generator_score + 1e-6))
G_loss.backward()
optimizer_Encoder.step()
# finish_time = time.time()
# with open("train_time_log.log",'a') as f:
# f.write(str(epoch)+" "+str(start_time)+" "+str(finish_time)+"\n")
print("Loss in iteration " + str(epoch) + ": " + str(total_d_loss) + "(" + self.dataset.name + ")")
print("Loss in iteration " + str(epoch) + ": " + str(total_g_loss) + "(" + self.dataset.name + ")")
if epoch % self.args.save_each == 0:
self.save_model(epoch)
if epoch % 25 == 0:
print('epoch', epoch, scores)
print('neg_batch', neg_batch[:,2])
def save_model(self, chkpnt):
print("Saving the model")
directory = "models/" + self.dataset.name + "/" + 'simple' + "/"
if not os.path.exists(directory):
os.makedirs(directory)
torch.save(self.discriminator, directory + str(chkpnt) + ".chkpnt") |
#끝나는 시간이 빠른 순으로 정렬
import sys
n = int(sys.stdin.readline())
s = []
for i in range(n):
first, second = map(int, input().split())
s.append([first, second])
#먼저, 시작 시간을 기준으로 정렬
s = sorted(s, key=lambda a: a[0])
#끝나는 사간을 기준으로 정렬
s = sorted(s, key=lambda a: a[1])
last = 0
cnt = 0
for start, finish in s:
if start >= last:
cnt += 1
last = finish
print(cnt) |
from collections import defaultdict
from tqdm import tqdm
from advent import AdventProblem
def preprocess(lines):
return [int(x) for x in lines[0].split(',')]
def part_1(starting_nums, end=2020):
nums = defaultdict(list)
for i, s in enumerate(starting_nums):
nums[s].append(i)
last_num = starting_nums[-1]
for i in tqdm(range(len(starting_nums), end)):
if len(nums[last_num]) == 1:
last_num = 0
else:
last_num = nums[last_num][-1] - nums[last_num][-2]
nums[last_num].append(i)
return last_num
def part_2(starting_nums):
return part_1(starting_nums, 30000000)
if __name__ == '__main__':
part1 = AdventProblem(15, 1, preprocess, "file")
part1.add_solution(part_1)
part1.run()
part2 = AdventProblem(15, 2, preprocess, "file")
part2.add_solution(part_2)
part2.run()
|
option = input("Enter 1 to add name \nEnter 2 to remove a name from the list \nEnter 3 to view family members\nEnter 4 to exit")
listFamily = ["Ala"]
while option != "4" :
if option == "1" :
name = input("Enter a name to add : ")
listFamily.append(name)
option = input("Enter 1 to add name \nEnter 2 to remove a name from the list \nEnter 3 to view family members\nEnter 4 to exit")
elif option == "2" :
name = input("Enter a name to remove : ")
if name in listFamily :
listFamily.remove(name)
else :
print("there is no name like this")
option = input("Enter 1 to add name \nEnter 2 to remove a name from the list \nEnter 3 to view family members\nEnter 4 to exit")
elif option == "3" :
print(listFamily)
option = input("Enter 1 to add name \nEnter 2 to remove a name from the list \nEnter 3 to view family members\nEnter 4 to exit")
|
# Import necessary libraries
import numpy as np
# Calculation of the root mean squared error
def rmse(y_true, y_pred):
return np.sqrt(((y_pred - y_true) ** 2).mean())
# Moving window approach for the nextday models
def moving_window_nextday(data, window_size):
# Empty list were we will store the result
data_window = []
n = len(data)
# create all possible sequences of length seq_len
for index in range(0, n - window_size):
data_window.append(data[index:(index + window_size + 1),:])
data_window = np.array(data_window)
x_data = data_window[:,:-1,:]
y_data = data_window[:,-1,:]
return [x_data, y_data]
# Moving window approach for the intraday model
# Here the Open price is used as a predictor for th3e low, high and close prices of the same day
def moving_window_intraday(data, window_size):
# Empty list were we will store the result
data_window = []
n = len(data)
# create all possible sequences of length seq_len
for index in range(0, n - window_size):
data_window.append(data[index:(index + window_size + 1),:])
data_window = np.array(data_window)
# Create the x data, considering the open price of the last day (day in which we are predicting)
x_data_no_open = data_window[:,:-1,1:]
x_data_open = data_window[:,1:,0,None]
x_data = np.concatenate([x_data_open,x_data_no_open], axis=2)
# Select low, high and close prices
y_data = data_window[:,-1,1:4]
return [x_data, y_data] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.