text stringlengths 38 1.54M |
|---|
import tkinter
import tkinter.messagebox
class MyGUI:
def __init__(self):
self.main_window = tkinter.Tk()
self.text_frame = tkinter.Frame(self.main_window)
self.services_frame = tkinter.Frame(self.main_window)
self.button_frame = tkinter.Frame(self.main_window)
self.cb_var1 = tkinter.IntVar()
self.cb_var2 = tkinter.IntVar()
self.cb_var3 = tkinter.IntVar()
self.cb_var4 = tkinter.IntVar()
self.cb_var5 = tkinter.IntVar()
self.cb_var6 = tkinter.IntVar()
self.cb_var7 = tkinter.IntVar()
self.cb_var1.set(0)
self.cb_var2.set(0)
self.cb_var3.set(0)
self.cb_var4.set(0)
self.cb_var5.set(0)
self.cb_var6.set(0)
self.cb_var7.set(0)
self.intro = tkinter.Label(self.text_frame, text='Wybierz usługi, z któych chcesz skorzystać:')
self.intro.pack(side='left')
self.cb1 = tkinter.Checkbutton(self.services_frame, text='Wymiana oleju: 30 zł',
variable=self.cb_var1)
self.cb2 = tkinter.Checkbutton(self.services_frame, text='Smarowanie: 20 zł',
variable=self.cb_var2)
self.cb3 = tkinter.Checkbutton(self.services_frame, text='Sprawdzanie chłodnicy: 40 zł',
variable=self.cb_var3)
self.cb4 = tkinter.Checkbutton(self.services_frame, text='Sprawdzanie skrzyni biegów: 100 zł',
variable=self.cb_var4)
self.cb5 = tkinter.Checkbutton(self.services_frame, text='Przegląd: 35 zł',
variable=self.cb_var5)
self.cb6 = tkinter.Checkbutton(self.services_frame, text='Wymiana tłumika: 200 zł',
variable=self.cb_var6)
self.cb7 = tkinter.Checkbutton(self.services_frame, text='Wyważenie kół: 20 zł',
variable=self.cb_var7)
self.cb1.pack()
self.cb2.pack()
self.cb3.pack()
self.cb4.pack()
self.cb5.pack()
self.cb6.pack()
self.cb7.pack()
self.show_button = tkinter.Button(self.button_frame, text='Oblicz koszt usług', command=self.show_cost)
self.quit_button = tkinter.Button(self.button_frame, text='Zakończ', command=self.main_window.destroy)
self.show_button.pack(side='left')
self.quit_button.pack(side='left')
self.text_frame.pack()
self.services_frame.pack()
self.button_frame.pack()
tkinter.mainloop()
def show_cost(self):
self.cost = 0
if self.cb_var1.get() == 1:
self.cost += 30
if self.cb_var2.get() == 1:
self.cost += 20
if self.cb_var3.get() == 1:
self.cost += 40
if self.cb_var4.get() == 1:
self.cost += 100
if self.cb_var5.get() == 1:
self.cost += 35
if self.cb_var6.get() == 1:
self.cost += 200
if self.cb_var7.get() == 1:
self.cost += 20
tkinter.messagebox.showinfo('Koszt', 'Łączny koszt usług: ' + str(self.cost) + 'zł.')
my_gui = MyGUI()
|
# coding: utf-8
# # Project: Visualizing Race and Gender Representation In American Movies
#
# In this project you'll use data visualization techniques to analyze how the top 50 movies of 2016 performed according to the <a href = "https://en.wikipedia.org/wiki/Bechdel_test" target="_blank"> Bechdel Test </a>and other representation-based tests.
#
# **The Bechdel-Wallace Test** — often abbreviated to the "Bechdel Test" — evaluates movies based on two simple questions:
#
# 1. Does the movie have at least two named female characters?
# 2. And do those characters have at least one conversation that is not about a man?
#
# To perform your visualization, you'll use a dataset called "The Next Bechdel Test" from <a href="https://www.fivethirtyeight.com" target="_blank">fivethirtyeight.com</a>. You can read more about the dataset at the following link:
#
# <a href = "https://projects.fivethirtyeight.com/next-bechdel/" target="_blank">The Next Bechdel Test </a>
#
#
#
# In addition to results of the Bechdel test, this dataset includes the results of similar representation tests developed by members of the movie industry and evaluated by <a href = "http://fivethirtyeight.com" target="_blank">fivethirtyeight.com. </a>
#
# **The Waithe Test** (Lena Waithe)
#
# A movie passes if:
# - There's a black woman in the work
# - Who's in a position of power
# - And she's in a healthy relationship
#
# **The Ko Test** (Naomi Ko)
#
# A movie passes if:
# - There's a non-white, female-identifying person in the film
# - Who speaks in five or more scenes
# - And speaks english
#
# ## 1. SETUP
# Import matplotlib.pyplot as plt and pandas as pd.
# In[2]:
import matplotlib.pyplot as plt
import pandas as pd
# Use Pandas `pd.read_csv()` to load the dataset **bechdelExpanded.csv** into a DataFrame and save the results to a variable. You can name the variable anything you like. `df` is often used as the variable name for a DataFrame.
# In[3]:
df = pd.read_csv('bechdelExpanded.csv')
# ## 2. Learn About Your Data
# Inspect the DataFrame using `.head()`
# The first column in the dataset is the name of a movie. Each column that follows represents one of the tests being applied to each movie. Each row represents whether that movie passed, or failed each test. A `1` represents a passing score, and a `0` represents a failing score.
# In[5]:
df.head()
# Call `.info()` on your DataFrame and print the result. This will display a summary of the basic information about your DataFrame and its data.
# In[7]:
df.info()
# ## 3. Data Manipulation
# Create a column for total_score and set the value of each of its entries equal to the sum of the three columns: bechdel, waithe, ko. This will give us the total score each movie received based on these three tests.
# In[10]:
df['total_score'] = (df.bechdel + df.waithe + df.ko)
# Check your DataFrame again with `.head()` to see the new column.
# In[11]:
df.head()
# ## 4. Sorting Data
# It will be easier to visualize our data if it is organized by each movie's total score.
#
# Using Pandas `.sort_values()`, create a new DataFrame called `df_sorted` that is a copy of your current DataFrame, sorted by `total_score`. Make sure to reset the index of the new DataFrame using `.reset_index(drop = True)`.
# Hint: The syntax for sorting by a column in Pandas is:
# `df.sort_values("column_name").reset_index(drop = True)`
# In[12]:
df_sorted = df.sort_values('total_score').reset_index(drop = True)
# Use .head() to check your new `df_sorted` DataFrame.
# In[13]:
df_sorted.head()
# ## 5. Isolating the Data
# For this project, we have selected three of the representation tests to focus on. To make it easier for us to look at the relevant data, create a new DataFrame containing only the columns 'movie', 'bechdel', 'waithe', 'ko', 'total_score' from the `df_sorted` DataFrame.
# In[26]:
df_partial = df_sorted[['movie', 'bechdel', 'waithe', 'ko', 'total_score']]
# Use .head() to check the new DataFrame.
# In[27]:
df_partial.head()
# ## 6. Plot DataFrame with Matplotlib
#
# Next, use Pandas to create a new DataFrame using only the data you want to use in a plot. Then, create a bar chart with that data using Matplotlib.
#
#
# **6.A**
#
# Using `[[]]` notation, select the columns `movie` and `total_score` from the DataFrame `df_partial`, then using `.set_index()`, set the `index` to the columns `movie`, and save it all to a variable named `ax`.
#
# In[28]:
ax = df_partial[['movie', 'total_score']]
ax.set_index('movie')
# **6.B**
#
# Create a plot of the `ax` DataFrame with the Matplotlib method `.plot()`. Include the following arguments inside of `.plot()` . You can change these on your own and run the plot again if you would like.
# - `kind = 'bar'`
# - `title ='Representation In Movies'`
# - `figsize=(15, 10)`
# - `legend=True`
#
# In[32]:
ax.plot(kind = 'bar', figsize =(15, 10), title = 'Representation In Movies', legend=True)
# ## 7. Iterate and Discover Meaning
# There are many aspects of a Matplotlib plot that can be customized to make it easier to visualize data. Try a few of them below:
#
# Set the `kind` argument of `.plot()` to `barh` to make the plot a horizontal bar chart.
#
# Add the following argument to `.plot()` so it is easier to see the names of each movie: `fontsize=12`.
#
# Make the visualization taller, and even the spread by changing the figsize argument to `figsize=(15, 15)`.
# In[38]:
ax.plot(kind = 'barh', figsize =(15, 15), title = 'Representation In Movies', legend=True, fontsize=12)
# ## You're done!
#
# Feel free to use this notebook to continue experimenting.
# ---
|
# -*- coding: UTF-8 -*-
# Copyright 2012-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.utils.text import format_lazy
from lino.api import dd, rt, _
from lino import mixins
from lino.core.roles import Explorer
from lino.utils import join_elems
from etgen.html import E
# from lino.utils.report import Report
# from lino.modlib.system.choicelists import PeriodEvents
# from lino.modlib.users.mixins import My
from lino_xl.lib.ledger.ui import ByJournal
from lino_xl.lib.ledger.choicelists import VoucherTypes
from lino_xl.lib.sales.models import InvoicesByPartner
from .roles import OrdersUser, OrdersStaff
from .choicelists import OrderStates
# cal = dd.resolve_app('cal')
try:
worker_model = dd.plugins.orders.worker_model
except AttributeError:
# Happens only when Sphinx autodoc imports it and this module is
# not installed.
worker_model = 'foo.Bar'
class OrderDetail(dd.DetailLayout):
required_roles = dd.login_required(OrdersUser)
# start = "start_date start_time"
# end = "end_date end_time"
# freq = "every every_unit"
# start end freq
main = "general cal_tab enrolments"
general = dd.Panel("""
entry_date subject printed #print_actions:15 workflow_buttons start_date
project invoice_recipient
description
EnrolmentsByOrder ItemsByOrder
""", label=_("General"))
cal_tab = dd.Panel("""
start_time end_time #end_date every_unit every positions:10 max_events:8 max_date
monday tuesday wednesday thursday friday saturday sunday
cal.EntriesByController cal.TasksByController
""", label=_("Calendar"))
# first_entry_panel = dd.Panel("""
# start_time
# end_time #end_date
# """, label=_("First appointment"))
# repeat_panel = dd.Panel("""
#
#
# """, label=_("Recursion"))
enrolments_top = 'journal number id:8 user'
enrolments = dd.Panel("""
enrolments_top
InvoicesByOrder
# EnrolmentsByOrder
remark
""", label=_("Miscellaneous"))
# Order.detail_layout_class = OrderDetail
class InvoicesByOrder(InvoicesByPartner):
label = _("Sales invoices (of client)")
@classmethod
def get_master_instance(cls, ar, model, pk):
# the master instance of InvoicesByPartner must be a Partner, but since
# we use this on an order, we get the pk of an order
assert model is rt.models.contacts.Partner
order = rt.models.orders.Order.objects.get(pk=pk)
return order.project
class Orders(dd.Table):
# _order_area = None
required_roles = dd.login_required(OrdersUser)
model = 'orders.Order'
detail_layout = 'orders.OrderDetail'
insert_layout = """
project
journal
entry_date
"""
column_names = "start_date project remark workflow_buttons *"
order_by = ['-start_date', '-start_time']
auto_fit_column_widths = True
# parameters = mixins.ObservedDateRange(
# worker=dd.ForeignKey(
# worker_model, blank=True, null=True),
# # user=dd.ForeignKey(
# # settings.SITE.user_model,
# # blank=True, null=True),
# show_exposed=dd.YesNo.field(
# _("Exposed"), blank=True,
# help_text=_("Whether to show rows in an exposed state"))
# )
#
# params_layout = """user worker state
# start_date end_date show_exposed"""
# @classmethod
# def get_actor_label(self):
# if self._order_area is not None:
# return self._order_area.text
# return super(Orders, self).get_actor_label()
# @classmethod
# def get_simple_parameters(cls):
# s = list(super(Orders, cls).get_simple_parameters())
# s.append('project')
# # s.append('order_state')
# # s.add('user')
# return s
# @classmethod
# def get_request_queryset(self, ar, **kwargs):
# # dd.logger.info("20160223 %s", self)
# qs = super(Orders, self).get_request_queryset(ar, **kwargs)
# if isinstance(qs, list):
# return qs
#
# if self._order_area is not None:
# qs = qs.filter(order_area=self._order_area)
#
# pv = ar.param_values
# qs = PeriodEvents.active.add_filter(qs, pv)
#
# qs = self.model.add_param_filter(
# qs, show_exposed=pv.show_exposed)
#
# # if pv.start_date:
# # # dd.logger.info("20160512 start_date is %r", pv.start_date)
# # qs = PeriodEvents.started.add_filter(qs, pv)
# # # qs = qs.filter(start_date__gte=pv.start_date)
# # if pv.end_date:
# # qs = PeriodEvents.ended.add_filter(qs, pv)
# # # qs = qs.filter(end_date__lte=pv.end_date)
# # dd.logger.info("20160512 %s", qs.query)
# return qs
class OrdersByJournal(Orders, ByJournal):
# required_roles = dd.login_required(OrdersUser)
# _order_area = OrderLayouts.default
required_roles = dd.login_required(OrdersUser)
master_key = 'journal'
column_names = "number project start_date remark weekdays_text workflow_buttons *"
insert_layout = """
project
# journal
entry_date
"""
VoucherTypes.add_item_lazy(OrdersByJournal)
class AllOrders(Orders):
# _order_area = None
required_roles = dd.login_required(Explorer)
column_names = "id journal number entry_date:8 workflow_buttons user *"
order_by = ['id']
class OrdersByProject(Orders):
master_key = 'project'
column_names = "entry_date:8 detail_link workflow_buttons user " \
"weekdays_text:10 times_text:10 *"
order_by = ['entry_date']
insert_layout = """
# project
journal
entry_date
"""
class OrdersByRecipient(Orders):
master_key = 'invoice_recipient'
column_names = "project entry_date:8 journal number workflow_buttons user " \
"weekdays_text:10 times_text:10 *"
class WaitingOrders(Orders):
label = _("Waiting orders")
order_by = ['entry_date']
column_names = "entry_date:8 project detail_link workflow_buttons user " \
"weekdays_text:10 times_text:10 *"
@classmethod
def param_defaults(self, ar, **kw):
kw = super(WaitingOrders, self).param_defaults(ar, **kw)
kw.update(state=OrderStates.draft)
return kw
class ActiveOrders(Orders):
label = _("Active orders")
order_by = ['entry_date']
column_names = "entry_date:8 project detail_link workflow_buttons user " \
"weekdays_text:10 times_text:10 *"
@classmethod
def param_defaults(self, ar, **kw):
kw = super(ActiveOrders, self).param_defaults(ar, **kw)
kw.update(state=OrderStates.active)
return kw
class UrgentOrders(Orders):
label = _("Urgent orders")
order_by = ['entry_date']
column_names = "entry_date:8 project detail_link workflow_buttons user " \
"weekdays_text:10 times_text:10 *"
@classmethod
def param_defaults(self, ar, **kw):
kw = super(UrgentOrders, self).param_defaults(ar, **kw)
kw.update(state=OrderStates.urgent)
return kw
# class MyOrders(My, Orders):
# column_names = "entry_date:8 name id workflow_buttons *"
# order_by = ['entry_date']
# @classmethod
# def param_defaults(self, ar, **kw):
# kw = super(MyOrders, self).param_defaults(ar, **kw)
# # kw.update(state=OrderStates.active)
# kw.update(show_exposed=dd.YesNo.yes)
# return kw
# class EnrolmentDetail(dd.DetailLayout):
# main = """
# #request_date user start_date end_date
# order worker
# remark workflow_buttons
# confirmation_details
# """
class Enrolments(dd.Table):
# _order_area = None
required_roles = dd.login_required(OrdersUser)
# debug_permissions=20130531
model = 'orders.Enrolment'
stay_in_grid = True
# order_by = ['request_date']
column_names = 'order order__state worker *'
# hidden_columns = 'id state'
insert_layout = """
order
worker
remark
"""
# detail_layout = "orders.EnrolmentDetail"
# @classmethod
# def get_title_tags(self, ar):
# for t in super(Enrolments, self).get_title_tags(ar):
# yield t
#
# if ar.param_values.state:
# yield str(ar.param_values.state)
# elif not ar.param_values.participants_only:
# yield str(_("Also ")) + str(EnrolmentStates.cancelled.text)
# if ar.param_values.order_state:
# yield str(
# settings.SITE.models.orders.Order._meta.verbose_name) \
# + ' ' + str(ar.param_values.order_state)
# if ar.param_values.author:
# yield str(ar.param_values.author)
class AllEnrolments(Enrolments):
required_roles = dd.login_required(Explorer)
order_by = ['-id']
column_names = 'id order worker worker__birth_date worker__age worker__country worker__city worker__gender *'
class EnrolmentsByWorker(Enrolments):
params_panel_hidden = True
required_roles = dd.login_required(OrdersUser)
master_key = "worker"
column_names = 'order order__project remark workflow_buttons *'
auto_fit_column_widths = True
insert_layout = """
# order_area
order
remark
"""
@classmethod
def param_defaults(self, ar, **kw):
kw = super(EnrolmentsByworker, self).param_defaults(ar, **kw)
kw.update(participants_only=False)
return kw
# @classmethod
# def get_actor_label(cls):
# if cls._order_area is not None:
# orders = cls._order_area.text
# else:
# orders = rt.models.orders.Order._meta.verbose_name_plural
# return format_lazy(
# _("{enrolments} in {orders}"),
# enrolments=rt.models.orders.Enrolment._meta.verbose_name_plural,
# orders=orders)
class EnrolmentsByOrder(Enrolments):
params_panel_hidden = True
required_roles = dd.login_required(OrdersUser)
# required_roles = dd.login_required(OrdersUser)
master_key = "order"
column_names = 'worker guest_role ' \
'remark #workflow_buttons *'
auto_fit_column_widths = True
# cell_edit = False
# display_mode = 'html'
insert_layout = """
worker
guest_role
remark
"""
label = _("Workers")
# @classmethod
# def get_actor_label(self):
# return rt.models.orders.Enrolment._meta.verbose_name_plural
# class OrderItemDetail(dd.DetailLayout):
# main = """
# seqno product #discount
# #unit_price qty total_base total_vat total_incl
# title
# description"""
#
# window_size = (80, 20)
#
class OrderItems(dd.Table):
"""Shows all order items."""
model = 'orders.OrderItem'
required_roles = dd.login_required(OrdersStaff)
auto_fit_column_widths = True
# detail_layout = 'orders.OrderItemDetail'
insert_layout = """
product qty
remark
"""
stay_in_grid = True
class ItemsByOrder(OrderItems):
label = _("Needed per mission")
master_key = 'voucher'
order_by = ["seqno"]
required_roles = dd.login_required(OrdersUser)
column_names = "product qty remark *"
|
class Toolkit:
"""
Python Toolkit for interacting with the IBM i via different transports.
"""
def __init__(self, connection):
self.connection = connection
self.payload = []
def add(self, o):
"""
Add an object to the payload that will be passed to the connection.
:param o: Object to be added
:return: void
"""
self.payload.append(o)
def execute(self):
"""
Execute the transport's payload.
:return: object
"""
payload = []
for p in self.payload:
try:
payload.append(p.get_payload())
except AttributeError:
"""
p does not have get_payload
assume that the payload is raw json to be sent straight to the transport
"""
payload = self.payload
try:
result = self.connection.execute(payload)
except TypeError:
print("The payload was not structured properly.")
return {}
self.payload = []
return result
|
# Copyright 2019 VMware, Inc.
# SPDX-License-Indentifier: Apache-2.0
import json
from collections import OrderedDict
from numbers import Number
from .exception import TemplateEngineException
from .tags.tag_base import TagBase
from .tag_resolver import TagResolver
from .string_resolver import StringResolver
class ElementResolver(object):
"""
`ElementResolver` resolves a JSON element.
"""
def __init__(self, template_loader, stats):
"""
Construct a new ElementResolver.
:param template_loader: TemplateLoader object for loading
additional templates
:type template_loader: 'TemplateLoader'
:param stats: Stats object collecting stats
:type stats: 'Stats'
"""
self._string_resolver = StringResolver(self, stats)
self._tag_resolver = TagResolver(self, template_loader)
def resolve(self, element, binding_data_list):
"""
Resolve one element in a JSON template. The element could be one of
JSON data types. The resolution is recursive in a depth first manner.
:param element: A JSON object of one of JSON types.
:type element: JSON data type
:param binding_data_list: Binding data list used to expand parameters
in the template element.
:type binding_data_list: 'list'
:return: Resolved JSON element.
:rtype: JSON data type
"""
if isinstance(element, str):
return self._string_resolver.resolve(element, binding_data_list)
elif isinstance(element, Number):
return element
elif isinstance(element, bool):
return element
elif element is None:
return element
elif isinstance(element, dict):
new_element = OrderedDict()
for key, value in element.items():
if TagResolver.is_key_tag(key):
if not isinstance(value, list):
raise TemplateEngineException(
"Value must be a list if name is a tag: \"{}\". Found \"{}\".".
format(key, value))
tag_temp = [key] + value
resolved_tuple = self.resolve(
tag_temp, binding_data_list)
if isinstance(resolved_tuple, dict):
new_element.update(resolved_tuple)
elif resolved_tuple is not TagBase.TAG_NONE:
raise TemplateEngineException(
"Invalid tag result format for JSON"
" object name tag: {} {} => {}.".
format(json.dumps(key, separators=(',', ':')),
json.dumps(value, separators=(',', ':')),
json.dumps(resolved_tuple, separators=(',', ':'))))
else:
new_key = self.resolve(key, binding_data_list)
new_value = self.resolve(value, binding_data_list)
if isinstance(new_key, str) and \
new_value is not TagBase.TAG_NONE:
new_element[new_key] = new_value
return new_element
elif isinstance(element, list):
if TagResolver.is_tag(element):
return self._tag_resolver.resolve(element, binding_data_list)
new_element = list()
for item in element:
new_item = self.resolve(item, binding_data_list)
if new_item is not TagBase.TAG_NONE:
new_element.append(new_item)
return new_element
raise TemplateEngineException(
"Unknown data type {} of {}".format(type(element), element))
|
import math
vectorx=[]
vectory=[]
for i in range(3):
x=int(input("Ingrese las coordenadas x: "))
y=int(input("Ingrese las coordenadas y: "))
vectorx.append(x)
vectory.append(y)
base=vectorx[2]-vectorx[0]
altura=vectory[1]-vectory[0]
hip= math.sqrt(base**2+altura**2)
perimetro=base+altura+hip
area=base*altura/2
print("El perímetro del triángulo es: ",perimetro)
print("El área del triángulo es: ",area) |
from machine import I2C
import struct
import time
import utime
class Clock:
""" CLOCK is a HT1382 I2C/3-Wire Real Time Clock with a 32 kHz crystal """
def __init__(self, i2, a = 0x68):
self.i2 = i2
self.a = a
def set(self, tt = None):
""" tt is (year, month, mday, hour, minute, second, weekday, yearday), as used
by utime. """
if tt is None:
tt = utime.localtime()
(year, month, mday, hour, minute, second, weekday, yearday) = tt
def bcd(x):
return (x % 10) + 16 * (x // 10)
self.i2.writeto_mem(self.a, 7, bytes([0]))
self.i2.writeto_mem(self.a, 0, bytes([
bcd(second),
bcd(minute),
0x80 | bcd(hour), # use 24-hour mode
bcd(mday),
bcd(month),
1 + weekday,
bcd(year % 100)]))
def regrd(self, addr, fmt = "B"):
b = self.i2.readfrom_mem(self.a, addr, struct.calcsize(fmt))
return struct.unpack(fmt, b)
def read(self):
(ss,mm,hh,dd,MM,ww,yy) = self.regrd(0, "7B")
def dec(x):
return (x % 16) + 10 * (x // 16)
return (
2000 + dec(yy),
dec(MM),
dec(dd),
dec(hh & 0x7f),
dec(mm),
dec(ss),
dec(ww) - 1)
def main():
i2 = I2C(1, freq = 100000)
d = Clock(i2)
# Set the clock to 2010-2-10 14:45:00
d.set((2019, 2, 10, 14, 45, 0, 0, 1))
while True:
print('year=%4d month=%2d mday=%2d time=%02d:%02d:%02d weekday=%d' % d.read())
time.sleep(1)
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Hei, verden!"
@app.route("/navn")
def navn():
return f"Hei, !"
|
"""
编写一个函数,以字符串作为输入,反转该字符串中的元音字母。
示例 1:
输入: "hello"
输出: "holle"
示例 2:
输入: "leetcode"
输出: "leotcede"
说明:
元音字母不包含字母"y"。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/reverse-vowels-of-a-string
"""
class Solution:
def reverseVowels(self, s: str) -> str:
vowels = ["a","e","i","o","u"]
size = len(s)
s = list(s)
if size == 0:
return ""
l = 0
r = size - 1
while l < r:
if s[l].lower() in vowels and s[r].lower() in vowels:
s[l],s[r] = s[r],s[l]
l += 1
r -= 1
if s[l].lower() not in vowels:
l += 1
if s[r].lower() not in vowels:
r -= 1
return "".join(s)
s = "Euston saw I was not Sue."
print(Solution().reverseVowels(s))
|
#!/usr/bin/env python
import sys
import json
import BaseHTTPServer
#sys.path.append('/Users/surendrashrestha/Projects/degree_planner/degree_planner')
from degree_planner.degree_parser import *
from degree_planner.handbook import *
from degree_planner.degree_planner import *
from bottle import template, request, redirect, route, post, run, static_file, view, app
from beaker.middleware import SessionMiddleware
session_opts = {
'session.type': 'file',
'session.cookie_expires': 5000,
'session.data_dir': './data',
'session.auto': True
}
app = SessionMiddleware(app(), session_opts)
@route('/static/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./static')
@route('/documentation', method='GET')
def index():
return template('documentation')
# Procedure to handle the home page /
@route("/", method='GET')
@route("/", method='POST')
def index():
handbook = Handbook()
#degree_planner = Degree_Planner()
year = '2014'
s = request.environ.get('beaker.session')
all_degrees = handbook.extract_all_degrees('2014')
dp = Degree_Planner()
if request.forms:
degree_code = request.forms.get("degree")
major_code = request.forms.get("major")
#global dp
dp = Degree_Planner(degree_code, major_code, year, 's1')
pp = Prereq_Parser(degree_code, year)
#s.delete()
all_available_units = dp.get_available_units_for_entire_degree()
sorted_years = sorted(all_available_units.keys())
dp.gen_degree_req = handbook.extract_general_requirements_of_degree(degree_code, '2014')
dp.degree_req_units = handbook.extract_degree_requirements(degree_code, year)
print 'degree_req_units: ', dp.degree_req_units
if major_code:
dp.major_req_units = handbook.extract_major_requirements(major_code, year)
else:
dp.major_req_units = []
print 'dp.gen_degree_req: ', dp.gen_degree_req
planned_student_units = list(set(dp.planned_student_units))
if "True " in planned_student_units:
planned_student_units.remove("True ")
dp.planned_student_units = planned_student_units
print 'planned_student_units_json: ', dp.planned_student_units_json
s['planned_student_units'] = dp.planned_student_units
s['planned_student_units_json'] = dp.planned_student_units_json
s['gen_degree_req'] = dp.gen_degree_req
s['degree_req_units'] = dp.degree_req_units
s['major_req_units'] = dp.major_req_units
s['degree_code'] = degree_code
s['year'] = year
s.save()
updated_gen_degree_req = pp.update_general_requirements_of_degree(dp.planned_student_units, dp.gen_degree_req)
updated_degree_req_units = pp.update_degree_major_reqs(dp.planned_student_units, dp.degree_req_units)
updated_major_req_units = pp.update_major_reqs(dp.planned_student_units, dp.major_req_units)
print 'degree_req_units: ', dp.degree_req_units
print 'updated_degree_req_units: ', updated_degree_req_units
print 'updated_gen_degree_req: ', updated_gen_degree_req
print 'planned_student_units_json: ', dp.planned_student_units_json
#print updated_gen_degree_req
else:
degree_code = major_code = all_available_units = sorted_years = gen_degree_req = degree_req_units = major_req_units = None
updated_gen_degree_req = updated_degree_req_units = updated_major_req_units = None
return template('index',
degree_year = year,
all_degrees=all_degrees,
selected_degree=degree_code,
selected_major=major_code,
all_available_units=all_available_units,
sorted_years=sorted_years,
gen_degree_req=dp.gen_degree_req,
degree_req_units=dp.degree_req_units,
major_req_units=dp.major_req_units,
updated_gen_degree_req =updated_gen_degree_req,
updated_degree_req_units=updated_degree_req_units,
updated_major_req_units=updated_major_req_units
)
@route("/populate_modal", method='POST')
def index():
handbook = Handbook()
print 'request_json: ', request.json
year_session = str(request.json['year_session'])
print "year_session:", year_session
[year, session] = year_session.split('_')
# TOdo : fix it to work for any year.. but not possible due to handbook
if int(year) > 2014:
handbook_year = '2014'
else:
handbook_year = year
dp = Degree_Planner()
people_units = dp.people_units
planet_units = dp.planet_units
comp_units = dp.comp_units
bus_eco_units = dp.bus_eco_units
# TODO Uncomment these lines
#filtered_comp_units = handbook.filter_units_by_offering(comp_units, handbook_year, session)
#filtered_bus_eco_units = handbook.filter_units_by_offering(bus_eco_units, handbook_year, session)
if session == 's1':
filtered_comp_units = [unit for unit in comp_units if unit in dp.units_2014_s1 ]
filtered_bus_eco_units = [unit for unit in bus_eco_units if unit in dp.units_2014_s1 ]
elif session == 's2':
filtered_comp_units = [unit for unit in comp_units if unit in dp.units_2014_s2 ]
filtered_bus_eco_units = [unit for unit in bus_eco_units if unit in dp.units_2014_s2 ]
print 'filtered_comp_units: ', filtered_comp_units
s = request.environ.get('beaker.session')
s['planned_student_units_json'] = s.get('planned_student_units_json')
# Get all the units prior to that particular session
print 'in populate modal'
print 'session'
print s['planned_student_units_json']
student_units = dp.get_all_units_prior_to_session(s['planned_student_units_json'], year, session)
print 'student_units_prior_to_session: ', student_units
if session == 's1' and year in s['planned_student_units_json'].keys():
student_units_in_same_session = s['planned_student_units_json'][year][0]['s1']
elif session == 's2' and year in s['planned_student_units_json'].keys():
student_units_in_same_session = s['planned_student_units_json'][year][1]['s2']
else:
student_units_in_same_session = []
print 'student_units_in_same_session: ', student_units_in_same_session
remaining_comp_units = list(set(filtered_comp_units) - set(student_units) - set(student_units_in_same_session))
print 'remaining_comp_units: ', remaining_comp_units
remaining_bus_eco_units = list(set(filtered_bus_eco_units) - set(student_units) - set(student_units_in_same_session))
print 'units in same session: ', student_units_in_same_session
# Todo
# Find the prereq and get all the units which satisfy the prereq
available_comp_units = dp.filter_units_by_prereq(student_units, remaining_comp_units, handbook_year)
print 'available comp units: ', available_comp_units
available_bus_eco_units = dp.filter_units_by_prereq(student_units, remaining_bus_eco_units, handbook_year)
print 'available bus eco units: ', available_bus_eco_units
#filtered_people_units = handbook.filter_units_by_offering(people_units, year, session)
#filtered_planet_units = handbook.filter_units_by_offering(planet_units, year, session)
#return {"planet_units": filtered_planet_units, "people_units": filtered_people_units}
return { "planet_units": planet_units,
"people_units": people_units,
"comp_units": available_comp_units,
"bus_eco_units": available_bus_eco_units
}
@route("/populate_major", method='POST')
def index():
handbook = Handbook()
degree_code = str(request.json['degree_code'])
try:
majors = handbook.extract_all_majors_of_degree(degree_code, '2014')
except:
majors = None
return {"majors": majors}
@route("/update_requirements", method='POST')
def index():
#global dp
s = request.environ.get('beaker.session')
degree_code = s.get('degree_code')
year = s.get('year')
pp = Prereq_Parser(degree_code, year)
selected_unit = str(request.json['selected_unit']).strip()
current_unit = str(request.json['current_unit']).strip()
s['planned_student_units'] = s.get('planned_student_units')
s['planned_student_units_json'] = s.get('planned_student_units_json')
print 'planned_student_units', s['planned_student_units']
print 'planned_student_units_json', s['planned_student_units_json']
print 'major_req_units: ', s['major_req_units']
if selected_unit in s['planned_student_units']:
return {}
s['planned_student_units'].append(selected_unit)
# Update planned_student_units_json
year_session = str(request.json['year_session'])
[year, session] = year_session.split('_')
print '[year, session] : ', year_session
if session == 's1':
dict_code = 0
elif session == 's2':
dict_code = 1
s['planned_student_units_json'][year][dict_code][session].append(selected_unit)
# Remove Current unit (unit which was already present in the cell before selecting a new unit)
if current_unit:
s['planned_student_units'].remove(current_unit)
s['planned_student_units_json'][year][dict_code][session].remove(current_unit)
print 'new planned_student_units', s['planned_student_units']
print 'new planned_student_units_json', s['planned_student_units_json']
s.save()
#print "s['planned_student_units']: ", s['planned_student_units']
#print "s['planned_student_units_json']: ", s['planned_student_units_json']
#print "s['degree_req_units']: ", s['degree_req_units']
updated_gen_degree_req = pp.update_general_requirements_of_degree(s['planned_student_units'], s['gen_degree_req'])
updated_degree_req_units = pp.update_degree_major_reqs(s['planned_student_units'], s['degree_req_units'])
updated_major_req_units = pp.update_major_reqs(s['planned_student_units'], s['major_req_units'])
print 'updated_gen_degree_req: ', updated_gen_degree_req
print 'updated_degree_req_units: ', updated_degree_req_units
print 'updated_major_req_units: ', updated_major_req_units
return {"updated_gen_degree_req" : updated_gen_degree_req,
"updated_degree_req_units" :updated_degree_req_units,
"updated_major_req_units" : updated_major_req_units
}
if __name__ == "__main__":
# start a server but have it reload any files that
# are changed
setattr(BaseHTTPServer.HTTPServer,'allow_reuse_address',0)
run(app=app, host="localhost", port=8000, reloader=True)
|
import requests
import extruct
import validators
from pathlib import Path
headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
#method to check status of url
def is_url_ok(url):
try:
return 200 == requests.head(url).status_code
except Exception:
return False
def clean_data(data):
out=[]
if data['json-ld']!=[]:
for rec in data['json-ld']:
if rec['@type'] == 'Recipe':
d = rec.copy()
out.append(d)
if data['microdata'] != []:
for rec in data['microdata']:
if rec['type'] in ('http://schema.org/Recipe',
'https://schema.org/Recipe'):
d = rec['properties'].copy()
#@context and @type to match json-ld style
if rec['type'][:6] == 'https:':
d['@context'] = 'https://schema.org'
else:
d['@context'] = 'http://schema.org'
d['@type'] = 'Recipe'
for key in d.keys():
if isinstance(d[key], dict) and 'type' in d[key]:
type_ = d[key].pop('type')
d[key]['@type'] = type_.split('/')[3] #taking last part of url which holds type
out.append(d)
return out
def parse_from_url(url):
if not isinstance(url,str):
raise TypeError
good_data={}
if(is_url_ok(url)):
response = requests.get(url, headers=headers)
data = extruct.extract(response.text, response.url)
good_data=clean_data(data)
else:
print('URL may be Dead/Not Working !')
return good_data
def parse(obj):
if isinstance(obj,str): #if it is str object
if validators.url(obj): #if it is url
return parse_from_url(obj)
else: #it is file path
with open(obj, 'rt') as f:
data=extruct.extract(f.read())
elif hasattr(obj,'read'): #if it is and object with read attribute
data=extruct.extract(obj.read())
elif isinstance(obj,Path): #if it is a path instance
with obj.open(mode='rt') as fobj:
data=extruct.extract(fobj.read())
else:
raise TypeError('unexpected type encountered') #unexpected type
out = clean_data(data)
return out
|
def get_transaction_info(sms):
credit = ['credit','credited']
debit = ['debit','debited']
transaction_type = 'none'
company = 'none'
amount = 0
if any([word in sms for word in credit]):
transaction_type = 'CREDIT'
amount = extract_amount(sms)
elif any([word in sms for word in debit]):
transaction_type = 'DEBIT'
amount = extract_amount(sms)
company = extract_company(sms)
return transaction_type,company,amount
def extract_amount(sms):
import re
pattern = "(?:Rs\.?|INR)\s*(\d+(?:[.,]\d+)*)|(\d+(?:[.,]\d+)*)\s*(?:Rs\.?|INR)"
groups = re.search(pattern,sms)
amount = 0
if groups:
amount = groups.group(1)
return amount
def extract_company(sms):
import re
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
companies = ['paytm','zomato','swiggy','kotak','icici','hdfc','sbi','reliance','jio','bharatpe']
pattern = "([a-zA-Z0-9\.\-]{2,256}\@[a-zA-Z][a-zA-Z]{2,64})"
groups = re.search(pattern,sms)
company = 'unknown'
if groups:
upi = groups.group(1)
upi = upi.split('@')[0]
upi_chars = ''
for char in upi:
if not char.isnumeric():
upi_chars += char
company = process.extract(upi_chars, companies, scorer=fuzz.partial_ratio)[0][0]
return company |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 11:40:32 2018
@author: huyn
"""
try:
from queue import Queue as q
except:
from multiprocessing import Queue as q
# given n nodes, and edges, find the number of connected components
def bfsConnectedComponents(n,edges):
neighbors = {}
visited = set()
for i in range(n):
neighbors[i] = []
for e in edges:
node1 = e[0]
node2 = e[1]
neighbors[node1].append(node2)
neighbors[node2].append(node1)
connected = []
while neighbors:
newConnectedComponent= []
item = neighbors.popitem()
currentNode = item[0]
visited.add(currentNode)
newConnectedComponent.append(currentNode)
currentNeighbors = item[1]
nextNeighbors = []
while currentNeighbors:
neighbor = currentNeighbors.pop(-1)
if neighbor not in visited:
newConnectedComponent.append(neighbor)
visited.add(neighbor)
moreNeighbors = neighbors.pop(neighbor)
for m in moreNeighbors:
if m not in visited:
nextNeighbors.append(m)
if not currentNeighbors:
currentNeighbors = nextNeighbors
nextNeighbors = []
connected.append(newConnectedComponent)
return connected
edges= [[0,1],[0,2],[3,4],[6,5],[7,8],[8,4]]
n = 10
connectedBFS = bfsConnectedComponents(n,edges)
def dfsConnectedComponents(n,edges):
neighbors = {}
visited = set()
for i in range(n):
neighbors[i] = []
for e in edges:
node1 = e[0]
node2 = e[1]
neighbors[node1].append(node2)
neighbors[node2].append(node1)
connected = []
def dfs(currentNode):
if neighbors:
connected[-1].append(currentNode)
visited.add(currentNode)
newN= neighbors.pop(currentNode)
for neighbor in newN:
if neighbor not in visited:
dfs(neighbor)
for i in range(n):
if i not in visited:
connected.append([])
dfs(i)
return connected
edges= [[0,1],[0,2],[3,4],[6,5],[7,8],[8,4]]
n = 10
connectedDFS = dfsConnectedComponents(n,edges)
def dfsHasCycle(n,edges):
neighbors = {}
visited = set()
for i in range(n):
neighbors[i] = []
for e in edges:
node1 = e[0]
node2 = e[1]
neighbors[node1].append(node2)
neighbors[node2].append(node1)
def dfs(currentNumber,parentNode):
if neighbors:
visited.add(currentNumber)
newNeighbors = neighbors.pop(currentNumber)
check = False
for neighbor in newNeighbors:
if neighbor == parentNode:
continue
elif neighbor in visited:
return True
else:
check = dfs(neighbor,currentNumber)
if check:
return True
return check
return dfs(0,-1)
#edges= [[0,1],[0,2],[1,2],[6,5],[7,8],[8,4]]
#n = 10
#check= dfsHasCycle(n,edges)
def validTree(n,edges):
neighbors = {}
visited = set()
for i in range(n):
neighbors[i] = []
for e in edges:
node1 = e[0]
node2 = e[1]
neighbors[node1] = node2
neighbors[node2] = node1
def dfs(currentNode,parentNode):
if neighbors:
visited.add(currentNode)
newNeighbors = neighbors.pop(currentNode)
for n in newNeighbors:
if n in visited:
if n!= parentNode:
return True
else:
if dfs(n,currentNode):
return True
return False
return len(visited)==n and not dfs(0,-1)
edges= [[0,1],[0,2],[1,2],[6,5],[7,8],[8,4]]
n = 10
check= validTree(n,edges)
|
import pytest
import FredMD as fmd
def test_maxfactor():
# Model should estimate the maximum number of factors as IC chooses 7 normally
x = fmd.FredMD(Nfactor=None, vintage=None, maxfactor=8, standard_method=2, ic_method=2)
assert hasattr(x, 'rawseries')
x.estimate_factors()
assert x.factors.shape[1] == 7
def test_Nfactor():
# Model should estimate 2 factors since that is what we are setting
x = fmd.FredMD(Nfactor=2, vintage=None, maxfactor=1, standard_method=2, ic_method=2)
x.estimate_factors()
assert x.factors.shape[1] == 2
|
#!/usr/bin/env python
import argparse
import subprocess
import gzip
import time
import logging
import nltk
import pickle
import random
from nltk.tokenize.punkt import PunktWordTokenizer
def sequences(corpus):
result = []
for sentence in corpus:
result.append([(part, '') for part in sentence])
return result
def symbols(corpus):
result = set()
for sentence in corpus:
result.update(sentence)
return list(result)
def sample(hmm, length):
sample = hmm.random_sample(random.Random(), length)
sentence = ' '.join(t[0] for t in sample)
return sentence
def save_model(filename, hmm):
try:
with gzip.GzipFile(filename, 'wb') as f:
f.write(pickle.dumps(hmm, 1))
except:
logging.exception('save model failure')
raise
def load_model(filename):
try:
with gzip.GzipFile(filename, 'rb') as f:
buf = ''
while True:
data = f.read()
if data == '':
break
buf += data
return pickle.loads(buf)
except:
logging.exception('load model failure')
return None
def cleanup_words(words):
exclude = [',', '.', '!', '@', '"', ';', "'", '?', '-', '--', ']', '[', ':', '/', '#', 'http',
'(', ')', '*', '<', '>']
return [word for word in words if not any(e in word for e in exclude)]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--model', '-m', metavar='MODEL', type=str, nargs='?')
group.add_argument('--corpus', '-c', metavar='CORPUS', type=str, nargs='?')
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--speak', '-s', action='store_true')
args = parser.parse_args()
logging_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=logging_level)
if args.model:
logging.debug('loading model...')
hmm = load_model(args.model)
if args.corpus:
logging.debug('loading corpus...')
corpus = open(args.corpus, 'rb').read()
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
word_detector = PunktWordTokenizer()
sentences = sent_detector.tokenize(corpus.strip())
words = [cleanup_words(word_detector.tokenize(s)) for s in sentences]
logging.debug('training model...')
trainer = nltk.tag.hmm.HiddenMarkovModelTrainer(states=range(8), symbols=symbols(words))
hmm = trainer.train_unsupervised(sequences(words), max_iterations=5)
logging.debug('saving model...')
save_model(args.corpus + '.hmm', hmm)
logging.debug('sampling model...')
while(True):
utterance = sample(hmm, random.randint(5, 15)) + '.'
print utterance
if args.speak:
subprocess.call('say "{}!"'.format(utterance), shell=True)
time.sleep(1)
|
import pytest
from unittest.mock import MagicMock
from homieclient import Device
def test_ready_no_nodes():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': '',
'$state': 'ready'
})
assert d.is_ready()
def test_nodelist_no_nodes():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': '',
'$state': 'ready'
})
assert len(d.nodes) == 0
def test_basic_attributes():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': '',
'$state': 'ready'
})
assert d.name == 'Test Device'
assert d.state == 'ready'
def test_not_ready_when_lost():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': '',
'$state': 'lost'
})
assert not d.is_ready()
def test_not_ready_when_nodes_unknown():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready'
})
assert not d.is_ready()
def test_node_init():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready',
'sensor/$name': 'Sensor',
'sensor/$type': 'unit-test-sensor',
'sensor/$properties': ''
})
assert d.is_ready()
assert len(d.nodes) == 1
assert d.sensor is not None
assert d.sensor.name == 'Sensor'
assert d.sensor._initializing == False
def test_partial_node():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready',
'sensor/$name': 'Sensor',
'sensor/$type': 'unit-test-sensor'
})
assert not d.is_ready()
assert len(d.nodes) == 0
with pytest.raises(AttributeError):
d.sensor
def test_node_init_out_of_order():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready',
'sensor/$name': 'Sensor',
'sensor/$someattr': 'this is extra',
'sensor/$type': 'unit-test-sensor',
'sensor/$properties': ''
})
assert d.is_ready()
assert len(d.nodes) == 1
assert d.sensor is not None
assert d.sensor.name == 'Sensor'
assert d.sensor._initializing == False
assert d.sensor.someattr == 'this is extra'
def test_send_msg_to_complete_node():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready',
'sensor/$name': 'Sensor',
'sensor/$type': 'unit-test-sensor',
'sensor/$properties': ''
})
with pytest.raises(AttributeError):
d.sensor.someattr
d.on_message('sensor/$someattr', 'new-value')
assert d.sensor.someattr == 'new-value'
def test_node_discovery_callback():
d = get_device_after_msgs('test-device', {
'$name': 'Test Device',
'$nodes': 'sensor',
'$state': 'ready',
'sensor/$name': 'Sensor',
'sensor/$type': 'unit-test-sensor',
'sensor/$properties': ''
})
d._homie_client.on_node_discovered.assert_called_with(d.sensor)
def get_device_after_msgs(id: str, msgs: dict) -> Device:
d = Device(MagicMock(), id)
for topic, msg in msgs.items():
d.on_message(topic, msg)
return d |
"""
给你一个字符串 s 和一个字符规律 p,请你来实现一个支持 '.' 和 '*' 的正则表达式匹配。
'.' 匹配任意单个字符
'*' 匹配零个或多个前面的那一个元素
所谓匹配,是要涵盖 整个 字符串 s的,而不是部分字符串。
说明:
s 可能为空,且只包含从 a-z 的小写字母。
p 可能为空,且只包含从 a-z 的小写字母,以及字符 . 和 *。
示例 1:
输入:
s = "aa"
p = "a"
输出: false
解释: "a" 无法匹配 "aa" 整个字符串。
示例 2:
输入:
s = "aa"
p = "a*"
输出: true
解释: 因为 '*' 代表可以匹配零个或多个前面的那一个元素, 在这里前面的元素就是 'a'。因此,字符串 "aa" 可被视为 'a' 重复了一次。
示例 3:
输入:
s = "ab"
p = ".*"
输出: true
解释: ".*" 表示可匹配零个或多个('*')任意字符('.')。
示例 4:
输入:
s = "aab"
p = "c*a*b"
输出: true
解释: 因为 '*' 表示零个或多个,这里 'c' 为 0 个, 'a' 被重复一次。因此可以匹配字符串 "aab"。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/regular-expression-matching
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
import numpy as np
class Solution:
def seperatePattern(self, p: str):
index, new_p = 0, []
while index < len(p) - 1:
if p[index + 1] == '*':
new_p.append(p[index:index + 2])
index += 2
else:
new_p.append(p[index])
index += 1
if index == len(p) - 1:
new_p.append(p[index])
return new_p
def match_this(self, i, j):
# dp[i-1][j-1] match_this(i, j)
return True if j[0] == '.' else i == j[0]
def match_p_pre(self, i, j):
# dp[i-1][j] match_p_pre(i, j)
return False if len(j) == 1 else i == j[0] or j[0] == '.'
def match_s_pre(self, i, j):
# dp[i][j-1] match_s_pre(i, j)
return True if len(j) == 2 else False
def isMatch(self, s: str, p: str) -> bool:
p = self.seperatePattern(p)
dp = np.zeros((len(s) + 1, len(p) + 1), dtype=np.bool)
dp[0][0] = True
for i in range(1, len(s) + 1):
dp[i][0] = False
for j in range(1, len(p) + 1):
dp[0][j] = dp[0][j - 1] and len(p[j - 1]) > 1
for i in range(1, len(s) + 1):
for j in range(1, len(p) + 1):
up = dp[i - 1][j] and self.match_p_pre(s[i - 1], p[j - 1])
left = dp[i][j - 1] and self.match_s_pre(s[i - 1], p[j - 1])
up_left = dp[i - 1][j - 1] and self.match_this(s[i - 1], p[j - 1])
dp[i][j] = up or left or up_left
return bool(dp[len(s)][len(p)])
# 作者:yanfan - 4
# 链接:https: // leetcode - cn.com / problems / regular - expression - matching / solution / dong - tai - gui - hua - python3qiu - jie - by - yanfan - 4 /
# 来源:力扣(LeetCode)
# 著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。 |
"""
Implement Merge Sort
*Reference: Algorithms in Python (Michael T. Goodrich)
"""
def merge(left, right, sorted_array):
i = j = 0 # i points to first index of left and j to right
# traverse, compare left array & right array value & copy into sorted_array
# There are total 4 cases:
# A) Cases when we copy from left[i] to sorted_array[i+j]
# 1. [left[i] < right[j] and i < len(left)], then, copy from left[i] to sorted_array[i+j]
# 2. pointer j has reached end. (j == len(right)), then, copy remaining element of left array to sorted_array
# B) Cases when we copy from right[j] to sorted_array[i+j]
# 3. left[i] > right[j], then, copy from right[j] to sorted_array [i+j]
# 4. pointer i has reached end. (i == len(left)), then, copy remaining element of right array to sorted_array
# Case1 & Case 2 are implemented in 'if' conditon and Case 3 & 4 handles 'else' condition
while i + j < len(sorted_array):
if j == len(right) or (i < len(left) and left[i] < right[j]):
sorted_array[i+j] = left[i]
i += 1
else:
sorted_array[i+j] = right[j]
j += 1
def merge_sort(unsorted_array):
array_size = len(unsorted_array)
if array_size < 2:
return
# Divide
mid = array_size / 2
left = unsorted_array[0: mid]
right = unsorted_array[mid: ]
# Conquer
merge_sort(left)
merge_sort(right)
# Merge
merge(left, right, unsorted_array)
if __name__ == '__main__':
unsorted_array = [
85, 24, 63, 45, 17, 31, 96, 50
]
merge_sort(unsorted_array)
expected_result = sorted(unsorted_array)
assert expected_result == unsorted_array
|
def sequential_search(a_list, item):
length = len(a_list)
pos = 0
while pos < length:
if a_list[pos] == item:
return True
pos = pos + 1
return False
def ordered_sequential_search(a_list, item):
length = len(a_list)
pos = 0
while pos < length:
if a_list[pos] == item:
return True
elif a_list[pos] > item:
return False
pos = pos + 1
return False
test_list= [0, 1, 2, 3, 7, 8, 10]
print(ordered_sequential_search(test_list, 7))
print(ordered_sequential_search(test_list, -3)) |
#!/usr/bin/env python
##Author: rsalem
##Purpose: Model to train car to clone behavior
import tensorflow as tf
import numpy as np
import processData
import json
import h5py
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.layers import Activation, Dense, Dropout, ELU, Flatten, Input, Lambda
from keras.layers.convolutional import Conv2D, Cropping2D
from keras.models import Sequential, Model, load_model, model_from_json
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint
import keras.backend.tensorflow_backend as KTF
import os
dataPath = "data/driving_log.csv"
GPU_FRACTION = 0.8
# Function to set the fraction of GPU to use
def get_session(gpu_fraction=GPU_FRACTION):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
KTF.set_session(get_session())
# Defining model to train. Inspired from Nvidia- End-to-end model for cars
def get_model():
model = Sequential([
# Normalizimg the image to -1.0 to 1.0
Lambda(processData.normalize_image, input_shape=(66, 200, 3)),
# Convolutional layer 1 24@31x98 | 5x5 kernel | 2x2 stride | elu activation
Conv2D(24, (5,5),kernel_initializer ="he_normal", activation= 'elu', padding = 'valid', strides = (2,2), kernel_regularizer= l2(0.001)),
# Dropout with drop probability of .1 (keep probability of .9)
Dropout(.1),
# Convolutional layer 2 36@14x47 | 5x5 kernel | 2x2 stride | elu activation
Conv2D(36, (5,5),kernel_initializer ="he_normal", activation= 'elu', padding = 'valid', strides = (2,2), kernel_regularizer= l2(0.001)),
# Dropout with drop probability of .2 (keep probability of .8)
Dropout(.2),
# Convolutional layer 3 48@5x22 | 5x5 kernel | 2x2 stride | elu activation
Conv2D(48, (5,5),kernel_initializer ="he_normal", activation= 'elu', padding = 'valid', strides = (2,2), kernel_regularizer= l2(0.001)),
# Dropout with drop probability of .2 (keep probability of .8)
Dropout(.2),
# Convolutional layer 4 64@3x20 | 3x3 kernel | 1x1 stride | elu activation
Conv2D(64, (3,3),kernel_initializer ="he_normal", activation= 'elu', padding = 'valid', strides = (1,1), kernel_regularizer= l2(0.001)),
# Dropout with drop probability of .2 (keep probability of .8)
Dropout(.2),
# Convolutional layer 5 64@1x18 | 3x3 kernel | 1x1 stride | elu activation
Conv2D(64, (3,3),kernel_initializer ="he_normal", activation= 'elu', padding = 'valid', strides = (1,1), kernel_regularizer= l2(0.001)),
# Flatten
Flatten(),
# Dropout with drop probability of .3 (keep probability of .7)
Dropout(.3),
# Fully-connected layer 1 | 100 neurons | elu activation
Dense(100, activation = 'elu' , kernel_initializer = 'he_normal', kernel_regularizer = l2(0.001)),
# Dropout with drop probability of .5
Dropout(.5),
# Fully-connected layer 2 | 50 neurons | elu activation
Dense(50, activation = 'elu' , kernel_initializer = 'he_normal', kernel_regularizer = l2(0.001)),
# Dropout with drop probability of .5
Dropout(.5),
# Fully-connected layer 3 | 10 neurons | elu activation
Dense(10, activation = 'elu' , kernel_initializer = 'he_normal', kernel_regularizer = l2(0.001)),
# Dropout with drop probability of .5
Dropout(.5),
# Output
Dense(1,activation = 'linear', kernel_initializer ='he_normal')
])
model.compile(optimizer=Adam(0.0001), loss='mse')
# model.load_weights('model.h5')
return model
if __name__=="__main__":
X_train, y_train = processData.get_csv_data(dataPath)
X_train, y_train = shuffle(X_train, y_train, random_state=14)
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=0.3, random_state=14)
model = get_model()
model.summary()
filepath = "model.h5"
# Checkpoint to save best weights only
checkpoint = ModelCheckpoint(filepath, monitor = "loss", verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit_generator(processData.generate_batch(X_train, y_train), steps_per_epoch=200,
epochs=15,
verbose=1,
validation_data=processData.generate_batch(X_validation, y_validation),
validation_steps=50,
callbacks = callbacks_list)
print ('Saving model weights and configuration file')
with open('model.json', 'w') as outfile:
json.dump(model.to_json(), outfile)
# Explicitly end tensorflow session
from keras import backend as K
K.clear_session()
|
import diff
def diff_wordMode(text1, text2):
dmp = diff.diff_match_patch()
a = dmp.diff_linesToWords(text1, text2)
lineText1 = a[0]
lineText2 = a[1]
lineArray = a[2]
diffs = dmp.diff_main(lineText1, lineText2)
dmp.diff_charsToLines(diffs, lineArray)
dmp.diff_cleanupSemantic(diffs)
return (diffs)
file1 = open("ch46_fragment.txt")
file2 = open("chRU_fragment.txt")
text1 = file1.read()
text2 = file2.read()
#text1 = "This is an example\nAnd the second line"
#text2 = "This is a bad example\nAnd the second line"
#text1 = '\n'.join(text1.split())
#text2 = '\n'.join(text2.split())
a = diff_wordMode(text1, text2)
print a
|
# This file is part of Timetracker.
#
# Timetracker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timetracker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timetracker. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) <2015-2016> Martin Billinger
from datetime import datetime, timedelta, time
from PyQt4 import QtGui as qtgui
from PyQt4 import QtCore as qtcore
from PyQt4.Qt import Qt
from worktimedialog import WorktimeDialog
from specialdaydialog import SpecialDayDialog
from neweventdialog import NewEventDialog
from statisticsdialog import StatisticsDialog
from database import WEEKDAYS
def sec_to_hstr(time, fmt='{h:02}:{m:02}'):
if isinstance(time, timedelta):
time = time.total_seconds()
h, r = divmod(time, 3600)
m, s = divmod(r, 60)
return fmt.format(h=int(h), m=int(m), s=int(s))
class CalendarDBWidget(qtgui.QWidget):
def __init__(self, db, parent=None):
qtgui.QWidget.__init__(self, parent=parent)
self.db = db
self.specialdaydialog = SpecialDayDialog(self.db, self)
self.worktimedialog = WorktimeDialog(db)
self.neweventdialog = NewEventDialog(db, self)
self.statisticsdialog = StatisticsDialog(db)
lay_tools = qtgui.QVBoxLayout()
self.home = qtgui.QPushButton(self.style().standardIcon(qtgui.QStyle.SP_DialogHelpButton), None)
self.home.clicked.connect(self.go_home)
self.constraints = qtgui.QPushButton(self.style().standardIcon(qtgui.QStyle.SP_DialogApplyButton), None)
self.constraints.clicked.connect(self.show_worktime)
self.refresh = qtgui.QPushButton(self.style().standardIcon(qtgui.QStyle.SP_BrowserReload), None)
self.refresh.clicked.connect(self.update)
self.stats = qtgui.QPushButton(self.style().standardIcon(qtgui.QStyle.SP_ComputerIcon), None)
self.stats.clicked.connect(self.show_statistics)
lay_tools.addWidget(self.home)
lay_tools.addWidget(self.refresh)
lay_tools.addWidget(self.constraints)
lay_tools.addSpacing(10)
lay_tools.addWidget(self.stats)
lay_tools.addStretch()
lay_grid = qtgui.QGridLayout()
self.calendar = qtgui.QCalendarWidget()
self.calendar.setFirstDayOfWeek(1)
self.calendar.currentPageChanged.connect(self.cal_browse)
self.calendar.selectionChanged.connect(self.cal_select)
self.calendar.activated.connect(self.specialday)
lay_current = qtgui.QVBoxLayout()
box_current = qtgui.QGroupBox("Today")
box_current.setLayout(lay_current)
box_current.setFlat(False)
lay_tmp = qtgui.QHBoxLayout()
self.button_checkin = qtgui.QPushButton("Check IN")
self.button_checkout = qtgui.QPushButton("Check OUT")
self.button_checkin.setDisabled(True)
self.button_checkout.setDisabled(True)
self.button_checkin.setStyleSheet("background-color: pink")
self.button_checkout.setStyleSheet("background-color: pink")
self.button_checkin.clicked.connect(lambda: self.new_event(1))
self.button_checkout.clicked.connect(lambda: self.new_event(2))
lay_tmp.addWidget(self.button_checkin)
lay_tmp.addWidget(self.button_checkout)
lay_current.addLayout(lay_tmp)
self.current_state = qtgui.QLabel('--')
self.current_state.setAlignment(Qt.AlignCenter);
lay_current.addWidget(self.current_state)
lay_day = qtgui.QVBoxLayout()
self.box_day = qtgui.QGroupBox("00. 00. 0000")
self.box_day.setLayout(lay_day)
self.box_day.setFlat(False)
self.neweventbutton = qtgui.QPushButton(self.style().standardIcon(qtgui.QStyle.SP_VistaShield), None)
self.neweventbutton.clicked.connect(self.show_newevent)
lay_day.addWidget(self.neweventbutton)
self.day_events = qtgui.QListWidget()
lay_day.addWidget(self.day_events)
shortcut = qtgui.QShortcut(qtgui.QKeySequence(Qt.Key_Delete), self.day_events)
shortcut.activated.connect(self.delete_event)
self.day_hours = qtgui.QLabel('n/a')
lay_day.addWidget(self.day_hours)
lay_week = qtgui.QFormLayout()
self.box_week = qtgui.QGroupBox("Week")
self.box_week.setLayout(lay_week)
self.box_week.setFlat(False)
self.week_hours = qtgui.QLabel('n/a')
self.week_total = qtgui.QLabel('n/a')
self.week_dayh = {}
for d in WEEKDAYS:
label = qtgui.QLabel('n/a')
self.week_dayh[d] = label
lay_week.addRow(d + ':', label)
lay_week.addRow('', qtgui.QWidget())
lay_week.addRow('Total:', self.week_total)
lay_week.addRow('Desired:', self.week_hours)
lay_year = qtgui.QVBoxLayout()
self.box_year = qtgui.QGroupBox("Year")
self.box_year.setLayout(lay_year)
self.box_year.setFlat(False)
self.vacation_taken = qtgui.QLabel('Vacation...')
lay_year.addWidget(self.vacation_taken)
lay_total = qtgui.QVBoxLayout()
self.box_total = qtgui.QGroupBox("Total")
self.box_total.setLayout(lay_total)
self.box_total.setFlat(False)
self.total_start = qtgui.QDateTimeEdit()
lay_total.addWidget(self.total_start)
self.total_start.setDisplayFormat("dd.MM.yyyy")
self.total_start.setCalendarPopup(True)
self.total_start.setDate(db.get_misc('start_total', type='date'))
self.total_start.dateChanged.connect(self.change_start)
self.total_end = qtgui.QDateTimeEdit()
lay_total.addWidget(self.total_end)
self.total_end.setDisplayFormat("dd.MM.yyyy")
self.total_end.setCalendarPopup(True)
now = datetime.now()
self.total_end.setDate(now - timedelta(now.weekday() + 1))
self.total_end.dateChanged.connect(self.change_start)
self.total_balance = qtgui.QLabel('Work balance: ---')
lay_total.addWidget(self.total_balance)
lay_grid.addWidget(self.calendar, 0, 0)
lay_grid.addWidget(box_current, 0, 1)
lay_grid.addWidget(self.box_day, 1, 0)
lay_grid.addWidget(self.box_week, 1, 1)
lay_grid.addWidget(self.box_year, 2, 0)
lay_grid.addWidget(self.box_total, 2, 1)
lay_tmp = qtgui.QHBoxLayout()
lay_tmp.addLayout(lay_tools)
lay_tmp.addLayout(lay_grid)
self.setLayout(lay_tmp)
self.update()
def go_home(self):
self.calendar.setSelectedDate(datetime.now())
def show_statistics(self):
self.statisticsdialog.show()
self.statisticsdialog.raise_()
def show_worktime(self):
self.worktimedialog.show()
self.worktimedialog.raise_()
def show_newevent(self):
self.neweventdialog.set_date(self.calendar.selectedDate())
self.neweventdialog.show()
def new_event(self, code):
self.db.new_event(code)
self.update()
def delete_event(self):
if not self.day_events.hasFocus():
return
n = len(self.day_events.selectedItems())
if n == 0:
return
box = qtgui.QMessageBox()
box.setText("Really delete event(s)?")
box.setText("Do you really want to delete {} events?".format(n))
box.setStandardButtons(qtgui.QMessageBox.Yes | qtgui.QMessageBox.No)
box.setDefaultButton(qtgui.QMessageBox.No)
if box.exec() == qtgui.QMessageBox.Yes:
for ev in self.day_events.selectedItems():
self.db.delete_event(ev.data(1))
self.update()
def update(self):
self.update_calendar()
self.update_state()
self.update_day()
self.update_week()
self.update_year()
self.update_total()
def update_calendar(self):
format = qtgui.QTextCharFormat()
self.calendar.setDateTextFormat(qtcore.QDate(-4713, 1, 1), format)
format.setFontWeight(qtgui.QFont.Black)
format.setForeground(qtgui.QColor('blue'))
format.setBackground(qtgui.QColor('lightblue'))
self.calendar.setDateTextFormat(datetime.now(), format)
format = qtgui.QTextCharFormat()
for date, tid, name, tstr, _, color in self.db.specialdays():
format.setBackground(qtgui.QColor(color))
self.calendar.setDateTextFormat(date, format)
def update_state(self):
dbstate = self.db.get_state()
if dbstate is None:
last_id = 2
else:
last_event, last_id, last_description = dbstate
if last_id == 1:
self.button_checkin.setDisabled(True)
self.button_checkout.setDisabled(False)
self.button_checkin.setStyleSheet("background-color: pink")
self.button_checkout.setStyleSheet("background-color: red")
elif last_id == 2:
self.button_checkin.setDisabled(False)
self.button_checkout.setDisabled(True)
self.button_checkin.setStyleSheet("background-color: red")
self.button_checkout.setStyleSheet("background-color: pink")
else:
raise RuntimeError("Invalid event code: {} ({}) at {}".format(last_id, last_description, last_event))
if dbstate is None:
self.current_state.setText('checked out since')
else:
last_description = last_description.replace('check-', 'checked ').replace('in', 'IN').replace('out', 'OUT')
if last_event.date() == datetime.now().date():
last_event_str = last_event.strftime('%H:%M')
elif (last_event + timedelta(1, 0, 0)).date() == datetime.now().date():
last_event_str = 'yesterday, ' + last_event.strftime('%H:%M')
else:
last_event_str = last_event.strftime('%d.%m.%Y, %H:%M')
self.current_state.setText('{} since {}'.format(last_description, last_event_str))
def update_day(self):
date = self.calendar.selectedDate()
self.box_day.setTitle(date.toString())
self.day_events.clear()
times = []
for date, str in self.db.get_events(date.toPyDate()):
item = qtgui.QListWidgetItem("{} {}".format(date.strftime('%H:%M'), str), self.day_events)
item.setData(1, date)
times.append(date)
totaltime = sum([b - a for a, b in zip(times[0::2], times[1::2])], timedelta(0))
if len(times) % 2 == 1:
totaltime += datetime.now() - times[-1]
s = totaltime.total_seconds()
h, r = divmod(s, 3600)
m, s = divmod(r, 60)
self.day_hours.setText('Total work time: {:02}:{:02}'.format(int(h), int(m)))
def update_week(self):
date = self.calendar.selectedDate()
self.box_week.setTitle('Week: {}, {}'.format(*date.weekNumber()))
date = date.toPyDate()
beginning_of_week = date - timedelta(date.weekday())
now = datetime.now()
weektime = timedelta(0)
for n, daystr in enumerate(WEEKDAYS):
day = beginning_of_week + timedelta(n)
if day == now.date():
worktime = self.db.get_worktime(day, now)
else:
worktime = self.db.get_worktime(day)
weektime += worktime
self.week_dayh[daystr].setText(sec_to_hstr(worktime))
self.week_total.setText(sec_to_hstr(weektime))
self.week_hours.setText(sec_to_hstr(self.db.get_desired_weektime(beginning_of_week)))
def update_year(self):
year = self.calendar.yearShown()
date = datetime(year, 1, 1)
self.box_year.setTitle(date.strftime('%Y'))
vac_taken = self.db.get_vacation(year)
vac_total = self.db.get_constraints(datetime(year, 12, 31))['vacationdays_per_year'][0]
self.vacation_taken.setText('Vacation days taken: {} / {}'.format(vac_taken, vac_total))
def update_total(self):
date = self.total_start.date().toPyDate()
until = self.total_end.date().toPyDate()
balance, actual, target = timedelta(), timedelta(), timedelta()
while date <= until:
target += self.db.get_desired_weektime(date)
actual += self.db.get_actual_weektime(date)
date += timedelta(7)
balance = actual - target
if balance > timedelta():
self.total_balance.setText("Work balance: <font color='green'>{}</font>".format(balance))
else:
self.total_balance.setText("Work balance: <font color='red'>-{}</font>".format(-balance))
def cal_browse(self):
self.update_year()
def cal_select(self):
self.update_day()
self.update_week()
self.update_year()
def specialday(self):
date = self.calendar.selectedDate()
self.specialdaydialog.set_date(date)
self.specialdaydialog.exec()
self.update()
def change_start(self):
date = self.total_start.date().toPyDate()
beginning_of_week = date - timedelta(date.weekday())
self.total_start.setDate(beginning_of_week)
self.db.set_misc('start_total', beginning_of_week)
self.update_total()
|
import mlflow
import torch
mlflow.set_experiment("Sample Model")
def train_model(hyperparam1=42, hyperparam2=9801):
with mlflow.start_run():
mlflow.log_param("hyperparam1", hyperparam1)
mlflow.log_param("hyperparam2", hyperparam2)
mlflow.log_metric("loss", 100)
print(f'training model with hyperparams ({hyperparam1}, {hyperparam2})')
print(f'GPU Available: {torch.cuda.is_available()}')
mlflow.log_metric("loss", 0)
|
from tkinter import *
from random import randint
root = Tk()
root.title('Strong Password Generator')
root.iconbitmap('')
root.geometry("600x400")
def new_rand():
pw_entry.delete(0, END)
pw_length = int(my_entry.get())
my_password = ''
for x in range(pw_length):
my_password += chr(randint(33, 126))
pw_entry.insert(0, my_password)
def clipper():
root.clipboard_clear()
root.clipboard_append(pw_entry.get())
# lable of Frame
lf = LabelFrame(root, text="How Many Characters?")
lf.pack(pady=20)
# Create Entry Box to design number of character
my_entry = Entry(lf, font=("Helvatica", 24))
my_entry.pack(pady=20, padx=20)
# Create Entry Box Our Returned Password
pw_entry = Entry(root, text='', font=("Helvetica", 24), bg="systembuttonface")
pw_entry.pack(pady=20)
# create a frame for our button
my_frame = Frame(root)
my_frame.pack(pady=20)
# Create Our Buttons
my_button = Button(my_frame, text="Generate Strong Password", command=new_rand)
my_button.grid(row=0, column=0, padx=20)
clip_button = Button(my_frame, text="Copy to clipboard", command=clipper)
clip_button.grid(row=0, column=1, padx=10)
root.mainloop()
|
#######################################################################################################################
#
# xyBalance
#
# We'll say that a String is xy-balanced if for all the 'x' chars in the string,
# there exists a 'y' char somewhere later in the string. So "xxy" is balanced, but "xyx" is not.
# One 'y' can balance multiple 'x's. Return true if the given string is xy-balanced.
#
#######################################################################################################################
#
# xyBalance("aaxbby") → true
# xyBalance("aaxbb") → false
# xyBalance("yaaxbb") → false
# xyBalance("yaaxbby") → true
# xyBalance("xaxxbby") → true
# xyBalance("xaxxbbyx") → false -
# xyBalance("xxbxy") → true
# xyBalance("xxbx") → false
# xyBalance("bbb") → true-
# xyBalance("bxbb") → false
# xyBalance("bxyb") → true
# xyBalance("xy") → true
# xyBalance("y") → true
# xyBalance("x") → false
# xyBalance("") → true-
# xyBalance("yxyxyxyx") → false-
# xyBalance("yxyxyxyxy") → true
# xyBalance("12xabxxydxyxyzz") → true
#
#######################################################################################################################
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
from parladata.models import Person, Membership, Organization
from datetime import datetime
sklici = {'6':{'start_time': datetime(day=21, month=12, year=2011),
'end_time': datetime(day=1, month=8, year=2014),
'nep_wo': datetime(day=2, month=8, year=2014),},
'5':{'start_time': datetime(day=2, month=10, year=2008),
'end_time': datetime(day=21, month=10, year=2011),
'nep_wo': datetime(day=22, month=10, year=2011)}}
parser_vars = {'6': {'member_class': ' width: 216px;',
'pg_class': ' width: 652px;',
'table_search': {'dir':"ltr"},
'imns': ['ITAL. NAR. SK.', 'MAD. NAR. SK.']},
'5': {'member_class': ' width: 140px;',
'pg_class': ' width: 653px;',
'table_search': {'style':" width: 796px;"},
'imns': ['MADŽARSKA NS', 'ITALIJANSKA NS']}}
def add_membership(name, parties, sklic=6):
person = find_person(name)
for i in parties:
print "input: ", name, i
#organization = Organization.objects.filter(name_parser__icontains=i)
organization = find_org(i, name, sklic)
if not organization:
continue
print person, organization
if organization.classification == 'nepovezani poslanec' or organization._acronym == 'NP':
Membership(person = person[0],
organization=organization,
start_time=sklici[str(sklic)]['nep_wo'],
end_time=None,
role='član',
label='cl').save()
else:
Membership(person = person[0],
organization=organization,
start_time=sklici[str(sklic)]['start_time'],
end_time=sklici[str(sklic)]['end_time'],
role='član',
label='cl').save()
def parse_memberships(url, sklic):
html_doc = requests.get(url)
soup = BeautifulSoup(html_doc.content, 'html.parser')
i=0
"""
poslanci = []
stranke = []
for i, mp in enumerate(soup.find_all('table', **parser_vars[str(sklic)]['table_search'])[0].find_all(style=parser_vars[str(sklic)]['member_class'])):
if i > 0:
# preprocessing here
poslanci.append(mp.string)
for pg in soup.find_all('table', **parser_vars[str(sklic)]['table_search'])[0].find_all(style=parser_vars[str(sklic)]['pg_class']):
# preprocessing here
if str(sklic) == '6':
stranke.append(pg.string.split(', prej '))
elif str(sklic) == '5':
if 'na listi' in pg.string:
spl = pg.string.split(' ')
stranke.append([spl[0], spl[-1]])
else:
stranke.append([pg.string])
data = zip(poslanci, stranke)
"""
data = []
table = soup.find('table', **parser_vars[str(sklic)]['table_search'])
for i, tr in enumerate(table.find_all('tr')):
if i > 0:
tds = tr.find_all('td')
if len(tds) == 3:
if str(sklic) == '6':
orgs = tds[2].string.split(', prej ')
elif str(sklic) == '5':
if 'na listi' in tds[2].string:
spl = tds[2].string.split(' ')
orgs = [spl[0], spl[-1][:-1]]
else:
orgs = [tds[2].string]
data.append((tds[1].string, orgs))
for line in data:
add_membership(*line, sklic=sklic)
def find_org(name_parser, person_name, sklic):
if 'dr. ' in person_name.lower():
person_name = person_name[4:]
elif 'mag. ' in person_name.lower():
person_name = person_name[5:]
if name_parser == 'PNeP':
name_parser = 'NP'
elif name_parser == 'NeP':
name_parser = 'NeP - ' + ''.join(reversed([name[0] for name in person_name.split(' ')]))
print 'NEP ', name_parser
elif name_parser in parser_vars[str(sklic)]['imns']:
print name_parser
name_parser = 'NS'
organizations = Organization.objects.filter(name_parser__icontains=name_parser)
for organization in organizations:
if name_parser in organization.name_parser.split(','):
return organization
print '______________________________', person_name
return None
def find_person(name_parser):
if 'dr. ' in name_parser.lower():
name_parser = name_parser[4:]
elif 'mag. ' in name_parser.lower():
name_parser = name_parser[5:]
person = Person.objects.filter(name_parser__icontains=name_parser.strip())
if len(person)>1:
print "TUKI NEKI NI OK"
return person
parse_memberships("https://www.dz-rs.si/wps/portal/Home/ODrzavnemZboru/Dogodki/20obletnicaDrzavnegazbora/bee07e17-39db-4caa-935b-92e8f781828b/!ut/p/z1/tVRLU8IwEP4reOBYs0lf6bGA1gKlI22F5uL0EbBqE8QKyq83Mo7jYxAch1wymf0eu8luEENTxES2quZZU0mR3atzyqzrzmhs-gMPQ9ihBPxxEJk9Y4R9z0KTLWDgh5HTwS54Y0MB8MCJh8EQgwKwL-Ez3FPhwL_wRy4BwN_5oYPPwU90z9CDGCAgh_Fhx3Jhy__InzqOBW7Uv0yuuqBDz3zn_wJgf6r_a_4ePdB_V_0J2ce_QgyxQjSL5gal5aZVyuKp5qJ5bINslctNthK8bm1yuXxqQynnsryr2kDgtCXze96IqsjakHMONse2pjtlrhlFlmmObuaaQzid2RRTQvM3n0VRlSg9CD3Z1zjs92dLFd_eyacGmqwqvkaJkMta9Wn0x_QufjhQ6lrKIR4a59ElUTf0T4dv8jCw3wqIeyQMOzp4xnHlrePKm8eVt_8p3983tepbI8ugG8yVbNbcaJWYSTQ9SFtRq9uHB-aqsZOi4c8Nmh517hZ1sl011V-0tL9ar-NZ3c3pp21CH92TV0q17Hw!/dz/d5/L2dBISEvZ0FBIS9nQSEh/", 5) |
# -*- coding: utf-8 -*-
from .. import utils
class BubbleSort():
def __init__(self, array):
self.array = array
def sort(self):
sorted = self.array.copy()
swapped = True
while swapped:
swapped = False
for i in range(1, len(sorted)):
if sorted[i] < sorted[i-1]:
sorted = utils.array_swap_items(sorted, i-1, i)
swapped = True
return sorted
|
import logging
import threading
import sys
from libs import Leap
from core.listeners import LeapListener
class LeapMotionListenerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# set this thread as a daemon to terminate when the
# main program terminates.
# self.daemon = True
def run(self):
try:
listener = LeapListener()
controller = Leap.Controller()
controller.set_policy_flags(Leap.Controller.POLICY_BACKGROUND_FRAMES)
controller.add_listener(listener)
sys.stdin.readline()
except Exception as e:
logging.error(e);
finally:
controller.remove_listener(listener)
|
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision import transforms
from torch.autograd import Variable
from PIL import Image
import numpy as np
class Model(nn.Module):
def __init__(self, params=None):
super(Model, self).__init__()
self.model1 = models.resnext101_32x8d(pretrained=True).cuda().eval()
self.model2 = models.vgg19_bn(pretrained=True).cuda().eval()
modules = list(self.model1.children())[:-1]
self.model1=nn.Sequential(*modules)
modules2 = list(self.model2.children())[:-1]
self.model2=nn.Sequential(*modules2)
self.image_loader = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
def forward(self, images= []):
# '''gets a list of input image paths and returns a numpy tensor of embedding'''
list_images = []
for file_name in images:
list_images.append(self.image_loader(Image.open(file_name)).cuda())
input_data = torch.stack(list_images, dim=0)
# print(self.model2(input_data).reshape(len(images), -1, 1, 1).size(), self.model1(input_data).size())
return torch.cat(( self.model2(input_data).reshape(len(images), -1, 1, 1) , self.model1(input_data)), 1).squeeze().cpu().data.numpy()
model = Model()
list_of_image_locations = ["/shared/saurabh.m/101_ObjectCategories/airplanes/image_0002.jpg", "/shared/saurabh.m/101_ObjectCategories/airplanes/image_0002.jpg"]
image_embeddings = model(list_of_image_locations)
# list_of_image_embeddings = torch.unbind(image_embeddings, dim =0)
# print(list_of_image_embeddings)
print(image_embeddings.shape)
print(np.sum(image_embeddings, axis=1)) |
class student:
def __init__(self,name,roll):
self.name=name
self.roll=roll
self.lap = self.laptop()
def show(self):
print(self.name,self.roll)
self.lap.show()
class laptop:
def __init__(self):
self.brand='hp'
self.cpu='i5'
self.ram=8
def show(self):
print(self.brand)
print(self.cpu)
print(self.ram)
s1=student("navin",65)
s2=student("pola",78)
s1.show()
|
tabela_combustivel, comando = {"1": 0, "2": 0, "3": 0}, 0
while comando != "4":
comando = input()
if comando in tabela_combustivel.keys():
tabela_combustivel[comando] += 1
print("MUITO OBRIGADO\nAlcool: {}\nGasolina: {}\nDiesel: {}".format(tabela_combustivel["1"], tabela_combustivel["2"],
tabela_combustivel["3"]))
|
#!/usr/bin/env python
# encoding: utf-8
from django.urls import path
from . import views
app_name = 'comment'
urlpatterns = [
path('<int:song_id>.html', views.comment_view, name='comment_view'),
]
|
import numpy as np
import cv2
import urllib.request
# Sets up the webcam and connects to it and initalizes a variable we use for it
stream=urllib.request.urlopen('http://192.168.0.90/mjpg/video.mjpg')
bytes=b''
while True:
try:
# Takes frames from the camera that we can use
bytes+=stream.read(16384)
a = bytes.find(b'\xff\xd8')
b = bytes.find(b'\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
frame = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
# copy parameters to arrays
K = np.array(([[1.25790446e+03,0.00000000e+00,6.78148700e+02],[0.00000000e+00,1.25628991e+03,3.90337199e+02],[0.00000000e+00,0.00000000e+00,1.00000000e+00]]))
d = np.array(([ -4.32680150e-01,-4.78137707e-01,1.52852660e-03,-2.97190159e-03,2.33311192e+00]))
# just use first two terms (no translation)
# read one of your images
img = frame
h, w = img.shape[:2]
# undistort
newcamera, roi = cv2.getOptimalNewCameraMatrix(K, d, (w,h), 0)
newimg = cv2.undistort(img, K, d, None, newcamera)
# Displays the final product
cv2.imshow('frame', newimg)
except:
pass
# Hit esc to kill
if cv2.waitKey(1) ==27:
exit(0) |
## open the file 'mbox-short.txt', find the lines containing
## "X-DSPAM-Confidence:" and slice the numeric value from the end.
## convert the values to floats and give back an average
fname = input('Enter file name: ')
fh = open(fname)
count = 0
tot = 0
fin = 0
for line in fh:
if not line.startswith("X-DSPAM-Confidence:") : continue
count = count + 1
num = float(line[21:])
tot = num + tot
fin = tot / count
print("Average spam confidence:", fin)
|
import math, copy, sys
import numpy as np
class Map(object):
def __init__(self, size_x, size_y, offset_x, offset_y, resolution, data):
self.size_x_ = size_x
self.size_y_ = size_y
self.offset_x_ = offset_x
self.offset_y_ = offset_y
self.resolution_ = resolution
self.data_ = data
def world2map(self, x, y):
map_x = np.round( (x-self.offset_x_)/self.resolution_ )
map_y = np.round( (y-self.offset_y_)/self.resolution_ )
if map_x<0 or map_y<0 or map_x>=self.size_x_ or map_y>=self.size_y_: return 0, 0, False
return map_x, map_y, True
def map2world(self, map_x, map_y):
x = (map_x + 0.5)*self.resolution_ + self.offset_x_
y = (map_y + 0.5)*self.resolution_ + self.offset_y_
return x, y
class Laser(object):
def __init__(self, max_range, min_angle, resolution_angle, no_of_beams, noise_variance, map_ptr):
self.max_range_ = max_range
self.min_angle_ = min_angle
self.resolution_angle_ = resolution_angle
self.no_of_beams_ = no_of_beams
self.noise_variance_ = noise_variance
self.map_ptr_ = map_ptr
self.ranges_ = np.zeros(no_of_beams)
def scan(self, x, y, theta):
current_angle = self.updateAngle(theta, self.min_angle_)
for r in range(self.ranges_.shape[0]):
self.ranges_[r] = self.rayCast(x, y, current_angle)
current_angle = self.updateAngle(current_angle, self.resolution_angle_)
return self.ranges_
def rayCast(self, x, y, theta):
start_x, start_y, is_ok = self.map_ptr_.world2map(x,y)
if not is_ok: return 0
# Initialization
direction = np.zeros(2, dtype="float64")
direction[0] = np.cos(theta)
direction[1] = np.sin(theta)
origin = np.zeros(2, dtype="float64")
origin[0] = x
origin[1] = y
current = np.zeros(2, dtype="float64")
current[0] = start_x
current[1] = start_y
voxelBorder = np.zeros(2, dtype="float64")
voxelBorder[0], voxelBorder[1] = self.map_ptr_.map2world(current[0], current[1])
voxelBorder[0] -= 0.5 * self.map_ptr_.resolution_
voxelBorder[1] -= 0.5 * self.map_ptr_.resolution_
step = np.zeros(2, dtype="float64")
tMax = np.zeros(2, dtype="float64")
tDelta = np.zeros(2, dtype="float64")
# Compute step direction
for i in range(2):
if direction[i]>0.0: step[i] = 1
elif direction[i]<0.0: step[i] = -1
else: step[i] = 0
if step[i]!=0:
if step[i]==1:
voxelBorder[i] += float( step[i] * self.map_ptr_.resolution_ * 1.0 )
tMax[i] = (voxelBorder[i] - origin[i]) / direction[i]
tDelta[i] = self.map_ptr_.resolution_ / abs(direction[i])
else:
tMax[i] = sys.float_info.max
tDelta[i] = sys.float_info.max
# Incremental phase
while True:
# Find minimum tMax
if tMax[0] < tMax[1]: dim = 0
else: dim = 1
# Advance in direction of dim
current[dim] += step[dim]
tMax[dim] += tDelta[dim]
if np.sqrt((current[0] - start_x)*(current[0] - start_x) + (current[1] - start_y)*(current[1] - start_y)) * self.map_ptr_.resolution_ > self.max_range_:
return self.max_range_
else:
if current[0] < 0 or current[1] < 0 or current[0]>=self.map_ptr_.size_x_ or current[1]>=self.map_ptr_.size_y_:
flag = False
else:
value = self.map_ptr_.data_[current[0], current[1]]
flag = True
if flag:
if value < 0.55:
mu = 0
sigma = np.sqrt(self.noise_variance_)
noise = sigma * np.random.randn() + mu
return np.sqrt((current[0] - start_x)*(current[0] - start_x) + (current[1] - start_y)*(current[1] - start_y)) * self.map_ptr_.resolution_ + noise
else:
return self.max_range_
def updateAngle(self, angle, increment):
angle += increment
if angle > math.pi*2:
angle -= math.pi*2
return angle |
# Author: Mayuri Gujja
# Functional test on a greenkart ( e-commerce application) that performs the following tasks
# 1. Searches for a keyword
# 2. Adds veggies to the cart
# 3. Proceeds to checkout
# 4. Applies promos
# 5. Does all validations in these two pages
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome(executable_path="C:\\Users\\mayurigu\\Downloads\\chromedriver_win32 (7)\\chromedriver.exe")
# Global wait
driver.implicitly_wait(5)
# Navigate to the greenkart application
driver.get("https://rahulshettyacademy.com/seleniumPractise/#/")
# Search with "ber" keyword
driver.find_element_by_css_selector("input.search-keyword").send_keys("ber")
ExpectedVeggies = ['Cucumber - 1 Kg', 'Raspberry - 1/4 Kg', 'Strawberry - 1/4 Kg']
time.sleep(4)
# Count the number of fruits/veggies returned
countNUM = len(driver.find_elements_by_css_selector("div.products"))
# Initialize two lists to validate the veggies name before and after checkout
VeggiesFirstPage = []
VeggiesLastPage = []
# Click on add to cart button of all the veggies in the search results
addToCartButtons = driver.find_elements_by_xpath("//div[@class='product-action']/button")
# Get the text of all the veggies and store it in the list
for btn in addToCartButtons:
vegname = btn.find_element_by_xpath("parent::div/parent::div/h4").text
# Append each veggie name to the list
VeggiesFirstPage.append(vegname)
btn.click()
assert ExpectedVeggies == VeggiesFirstPage
# Click the add to cart button
driver.find_element_by_css_selector("a.cart-icon").click()
driver.find_element_by_xpath("//button[text()='PROCEED TO CHECKOUT']").click()
wait = WebDriverWait(driver, 10)
wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME, "promoCode")))
vegTable = driver.find_elements_by_xpath("//p[@class='product-name']")
for veg in vegTable:
veggie = veg.text
VeggiesLastPage.append(veggie)
print(VeggiesLastPage)
print(VeggiesFirstPage)
assert VeggiesFirstPage == VeggiesLastPage
discountBeforePromo = driver.find_element_by_class_name("discountAmt").text
driver.find_element_by_class_name("promoCode").send_keys("rahulshettyacademy")
wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME, "promoBtn")))
driver.find_element_by_class_name("promoBtn").click()
wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME, "promoInfo")))
CodeText = driver.find_element_by_class_name("promoInfo").text
assert "Code applied" in CodeText
discountAfterPromo = driver.find_element_by_class_name("discountAmt").text
assert float(discountAfterPromo) < int(discountBeforePromo)
# Count the total amount of the vegetables
sum = 0
Amounts = driver.find_elements_by_xpath("//tr/td[5]/p")
for Amount in Amounts:
sum = sum + int(Amount.text)
print(sum)
TotalAmount = driver.find_element_by_class_name("totAmt").text
assert sum == int(TotalAmount)
|
from django.urls import path, include
from file.views import *
from api.views import *
urlpatterns = [
path('filedownload', FileDownload.as_view()),
path('upfile', UpFile.as_view()),
path('getfilebytime', GetFileByTime.as_view()),
path('insertcoffer', InsertCoffer.as_view()),
path('createnote',CreateNote.as_view()),
path('getlist', GetList.as_view()),
path('restore', Restore.as_view()),
path('delnote', delNote.as_view()),
path('updatenote', UpdateNote.as_view()),
path('getnote', GetNote.as_view()),
path('deletefile', DeleteFile.as_view()),
path('gototrash', GotoTrash.as_view()),
path('huifu', HuiFu.as_view()),
path('getfile', GetFile.as_view()),
]
|
import numpy as np
import copy
import numpy.random as npr
npr.seed(0)
class crp_hawkes(object):
def __init__(self, b_prior_mu, b_prior_sigma, zeta_prior_mu, zeta_prior_sigma, eta_prior_mu, eta_prior_sigma,
observation_list, max_b, particle_num = 100, c_max=20,
d_num = 0, u_id = [], nu = 1, xi = 1, b = 1, zeta = 1, eta = 1, T = 0):
# auxilary
self.u_id = u_id
self.undefine_para_value = -100
self.c_max = c_max
self.min = -10
self.max = 10
#hyperparamter
self.b_prior_mu = b_prior_mu
self.b_prior_sigma = b_prior_sigma
self.zeta_prior_mu = zeta_prior_mu
self.zeta_prior_sigma = zeta_prior_sigma
self.eta_prior_mu = eta_prior_mu
self.eta_prior_sigma = eta_prior_sigma
self.b_max = max_b
self.T = T
# parameter for chinese restaurant process
self.nu = nu # fragmentation parameter
self.xi = xi # coagulation parameter
# hawkes scaling parameter
self.b = b
self.eta = eta
self.zeta = zeta
# hawkes parameter for community pair(b,zeta,eta)
self.Hawkes_b = np.zeros((self.c_max,self.c_max))+self.undefine_para_value
self.Hawkes_eta = np.zeros((self.c_max,self.c_max))+self.undefine_para_value
self.Hawkes_zeta = np.zeros((self.c_max,self.c_max))+self.undefine_para_value
# FCPNode information
self.entity_num = np.zeros(c_max,dtype=int)
self.entity_list = [[] for i in range(c_max)]
self.c_time = np.zeros((self.c_max,2)) # record beginning and ending time
self.c_time[:,0] = -1 # use c_time[:,0] to check the community is alive
self.c_time[:,1] = self.T # set initializtion of ending time being self.T for convenience
# entity information
self.d_num = d_num # entity number
self.observation_list = observation_list # observation_list
self.path_entity = [[] for i in range(self.d_num)] # community path for each entity
#initialization
# initialize c_time (community at time 0)
initial_c_num = 1
for i in range(self.d_num):
c_index = npr.randint(initial_c_num, size = 1)[0]
self.path_entity[i]=c_index
if c_index == initial_c_num-1:
self.c_time[c_index,0] = 0
initial_c_num = initial_c_num+1
# initialize ----------entity_list,entity_num
for i in range(self.d_num):
c_index = self.path_entity[i]
self.entity_list[c_index].append(i)
self.entity_num[c_index] = self.entity_num[c_index]+1
# initialize ----------- hawkes parameter
index = np.where(self.c_time[:,0]==0)[0]
for p in index:
for q in index:
v = npr.normal(loc = self.b_prior_mu, scale = self.b_prior_sigma)
self.Hawkes_b[p,q] = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.eta_prior_mu, scale = self.eta_prior_sigma)
self.Hawkes_eta[q,p] = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.zeta_prior_mu, scale = self.zeta_prior_sigma)
self.Hawkes_zeta[q,p] = np.clip(v, self.min, self.max)
def preprocess_i(self, i): #
c = self.path_entity[i]
# print(c,'c in preprocess')
self.entity_list[c].remove(i)
self.entity_num[c] = self.entity_num[c]-1
if self.entity_num[c] == 0:
self.c_time[c,0] = -1
self.remove_hawkes_parameter(c)
def remove_hawkes_parameter(self,community_index):
self.Hawkes_b[community_index,:] = self.undefine_para_value
self.Hawkes_b[:,community_index] = self.undefine_para_value
self.Hawkes_eta[community_index,:] = self.undefine_para_value
self.Hawkes_eta[:,community_index] = self.undefine_para_value
self.Hawkes_zeta[community_index,:] = self.undefine_para_value
self.Hawkes_zeta[:,community_index] = self.undefine_para_value
def sample_z(self):
for i in range(self.d_num):
# print(i, 'entity index in sample z')
self.preprocess_i(i)
c_live = np.where(self.c_time[:,0]==0)[0]
prior = np.zeros(len(c_live)+1)
for c in range(len(c_live)):
prior[c] = self.entity_num[c_live[c]]/(self.d_num-1+self.nu/self.xi)
prior[-1] = self.nu/self.xi/(self.d_num-1+self.nu/self.xi)
v = npr.normal(loc = self.b_prior_mu, scale = self.b_prior_sigma,size = len(c_live))
new_b = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.eta_prior_mu, scale = self.eta_prior_sigma, size = len(c_live))
new_eta = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.zeta_prior_mu, scale = self.zeta_prior_sigma, size = len(c_live))
new_zeta = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.b_prior_mu, scale = self.b_prior_sigma, size = len(c_live))
new_b_ = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.eta_prior_mu, scale = self.eta_prior_sigma, size = len(c_live))
new_eta_ = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.zeta_prior_mu, scale = self.zeta_prior_sigma, size = len(c_live))
new_zeta_ = np.clip(v, self.min, self.max)
like = np.zeros(len(c_live)+1)
for n in range(len(c_live)):
for j in range(self.d_num):
if i!=j:
x_ij = self.observation_list[i][j]
x_ji = self.observation_list[j][i]
b = self.Hawkes_b[c_live[n],self.path_entity[j]]
eta = self.Hawkes_eta[c_live[n],self.path_entity[j]]
zeta = self.Hawkes_zeta[c_live[n],self.path_entity[j]]
b_ = self.b * logit(b)
eta_ = self.eta * logit(eta)
zeta_ = self.zeta * logit(zeta)
# print('1 sample z')
like[n] = like[n] + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
b = self.Hawkes_b[self.path_entity[j],c_live[n]]
eta = self.Hawkes_eta[self.path_entity[j],c_live[n]]
zeta = self.Hawkes_zeta[self.path_entity[j],c_live[n]]
b_ = self.b * logit(b)
eta_ = self.eta * logit(eta)
zeta_ = self.zeta * logit(zeta)
# print('2 sample z')
like[n] = like[n] + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ji, x_ij)
for j in range(self.d_num):
if i!=j:
x_ij = self.observation_list[i][j]
x_ji = self.observation_list[j][i]
cluster_j = self.path_entity[j]
cluster_index = np.where(c_live == cluster_j)[0]
j_b = new_b[cluster_index]
j_eta = new_eta[cluster_index]
j_zeta = new_zeta[cluster_index]
j_b_ = new_b_[cluster_index]
j_eta_ = new_eta_[cluster_index]
j_zeta_ = new_zeta_[cluster_index]
b_ = self.b * logit(j_b)
eta_ = self.eta * logit(j_eta)
zeta_ = self.zeta * logit(j_zeta)
# print('3 sample z')
like[-1] = like[-1] + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
b_ = self.b * logit(j_b_)
eta_ = self.eta * logit(j_eta_)
zeta_ = self.zeta * logit(j_zeta_)
# print('4 sample z')
like[-1] = like[-1] + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ji, x_ij)
post = np.log(prior)+like
normalize_weight(post)
index = np.argmax(npr.multinomial(1, post))
if index != len(post)-1:
c = c_live[index]
self.path_entity[i] = c
self.entity_list[c].append(i)
self.entity_num[c] = self.entity_num[c]+1
else:
c_null = np.where(self.c_time[:,0]<0)[0]
if c_null != []:
c_new = c_null[0]
self.path_entity[i] = c_new
self.entity_list[c_new].append(i)
self.entity_num[c_new] = self.entity_num[c_new]+1
self.c_time[c_new,0] = 0
self.Hawkes_b[c_new,c_live] = new_b[:]
self.Hawkes_eta[c_new,c_live] = new_eta[:]
self.Hawkes_zeta[c_new,c_live] = new_zeta[:]
self.Hawkes_b[c_live,c_new] = new_b_[:]
self.Hawkes_eta[c_live,c_new] = new_eta_[:]
self.Hawkes_zeta[c_live,c_new] = new_zeta_[:]
v = npr.normal(loc = self.b_prior_mu, scale = self.b_prior_sigma, size = 1)
new_b = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.eta_prior_mu, scale = self.eta_prior_sigma, size = 1)
new_eta = np.clip(v, self.min, self.max)
v = npr.normal(loc = self.zeta_prior_mu, scale = self.zeta_prior_sigma, size = 1)
new_zeta = np.clip(v, self.min, self.max)
self.Hawkes_b[c_new,c_new] = new_b
self.Hawkes_eta[c_new,c_new] = new_eta
self.Hawkes_zeta[c_new,c_new] = new_zeta
else:
print('wrong!!!')
exit()
def cal_hawkes_likelihood_(self, b, eta, zeta, t_base, t_trigger):
# calculate the likelihood
# print(b, eta, zeta, t_base, t_trigger)
s = 0
for i in range(len(t_base)):
s_ = b
for j in range(len(t_trigger)):
if t_base[i]>t_trigger[j]:
# print(eta[j]*np.exp(-zeta[j]*(t_base[i]-t_trigger[j])),'eta[j]*np.exp(-zeta[j]*(t_base[i]-t_trigger[j]))')
s_ = s_ + eta*np.exp(-zeta*(t_base[i]-t_trigger[j]))
s = s + np.log(s_)
s = s - b*self.T
for j in range(len(t_trigger)):
# print(- eta[j]*(1 - np.exp( -zeta[j]*(self.T-t_trigger[j])))/zeta[j],'- eta[j]*(1 - np.exp( -zeta[j]*(self.T-t_trigger[j])))/zeta[j]')
s = s - eta*(1 - np.exp( -zeta*(self.T-t_trigger[j])))/zeta
# print((s),'s in cal')
return s
def sample_scaling(self, para_select):
# para_select 0:b 1:eta 2:zeta
loglike = 0
loglike_ = 0
new_b = 0
new_eta = 0
new_zeta = 0
if para_select == 0:
new_b = npr.uniform(low = 0, high = self.b_max)
if para_select == 1:
new_eta = npr.uniform(low = 0, high = 1/self.zeta)
if para_select == 2:
new_zeta = npr.uniform(low = 0, high = 1/self.eta)
for i in range(self.d_num):
for j in range(self.d_num):
if i!=j:
x_ij = self.observation_list[i][j]
x_ji = self.observation_list[j][i]
b = self.Hawkes_b[self.path_entity[i],self.path_entity[j]]
eta = self.Hawkes_eta[self.path_entity[i],self.path_entity[j]]
zeta = self.Hawkes_zeta[self.path_entity[i],self.path_entity[j]]
b_ = self.b * logit(b)
eta_ = self.eta * logit(eta)
zeta_ = self.zeta * logit(zeta)
loglike = loglike + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
if para_select == 0:
b_ = new_b * logit(b)
if para_select == 1:
eta_ = new_eta * logit(eta)
if para_select == 2:
zeta_ = new_zeta * logit(zeta)
loglike_ = loglike_ + self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
u = loglike_ - loglike
if u>0:
if para_select == 0:
self.b = new_b
if para_select == 1:
self.eta = new_eta
if para_select == 2:
self.zeta = new_zeta
return loglike_
else:
u = np.exp(u)
if npr.uniform(low=0.0, high=1.0) < u:
if para_select == 0:
self.b = new_b
if para_select == 1:
self.eta = new_eta
if para_select == 2:
self.zeta = new_zeta
return loglike_
else:
return loglike
def sample_hawkes(self, para_select, c_send, c_receive):
# not consider bound yet!!!
# detemine the community location
new_b = 0
new_eta = 0
new_zeta = 0
mu = 0
sigma = 1
if para_select == 0:
new_b = npr.normal(self.Hawkes_b[c_send,c_receive], 1)
mu = self.Hawkes_b[c_send,c_receive]
if para_select == 1:
new_eta = npr.normal(self.Hawkes_eta[c_send,c_receive], 1)
mu = self.Hawkes_eta[c_send,c_receive]
if para_select == 2:
new_zeta = npr.normal(self.Hawkes_zeta[c_send,c_receive], 1)
mu = self.Hawkes_zeta[c_send,c_receive]
new_b = np.clip(new_b,self.min,self.max)
new_eta = np.clip(new_eta,self.min,self.max)
new_zeta = np.clip(new_zeta,self.min,self.max)
loglike = 0
loglike_ = 0
for i in self.entity_list[c_send]:
for j in self.entity_list[c_receive]:
if i!=j:
x_ij = self.observation_list[i][j]
x_ji = self.observation_list[j][i]
b = self.Hawkes_b[self.path_entity[i],self.path_entity[j]]
eta = self.Hawkes_eta[self.path_entity[i],self.path_entity[j]]
zeta = self.Hawkes_zeta[self.path_entity[i],self.path_entity[j]]
b_ = self.b * logit(b)
eta_ = self.eta * logit(eta)
zeta_ = self.zeta * logit(zeta)
s = self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
# print(s,'s')
loglike = loglike + s
if para_select == 0:
b_ = self.b * logit(new_b)
else:
if para_select == 1:
eta_ = self.eta * logit(new_eta)
if para_select == 2:
zeta_ = self.zeta * logit(new_zeta)
s = self.cal_hawkes_likelihood_(b_, eta_, zeta_, x_ij, x_ji)
loglike_ = loglike_ + s
ll = 0
if para_select == 0:
q_ = -(new_b-mu)**2/2
q = -(self.Hawkes_b[c_send,c_receive]-mu)**2/2
new_sample,ll = MH(loglike_, loglike, q_, q, new_b, self.Hawkes_b[c_send,c_receive])
self.Hawkes_b[c_send,c_receive] = new_sample
if para_select == 1:
q_ = -(new_eta-mu)**2/2
q = -(self.Hawkes_eta[c_send,c_receive]-mu)**2/2
new_sample,ll = MH(loglike_, loglike, q_, q, new_eta, self.Hawkes_eta[c_send,c_receive])
self.Hawkes_eta[c_send,c_receive] = new_sample
if para_select == 2:
q_ = -(new_zeta-mu)**2/2
q = -(self.Hawkes_zeta[c_send,c_receive]-mu)**2/2
new_sample,ll = MH(loglike_, loglike, q_, q, new_zeta, self.Hawkes_zeta[c_send,c_receive])
self.Hawkes_zeta[c_send,c_receive] = new_sample
# print(ll,'ll in sample_hawkes')
return ll
def logit(x): # checked
return 1/(1+np.exp(-x))
def MH(f_, f, q_, q, x_, x): # checked
u = npr.uniform(low = 0.0 , high = 1.0)
u_ = f_ + q_ - f - q
if u_>0:
return x_,f_
elif np.exp(u_)>u:
return x_,f_
else:
return x,f
def normalize_weight(weight): # checked
# function: nomalize the log weight to 1
weight[:] = weight[:] - np.amax(weight)
weight[:] = np.exp(weight[:])
weight[:] = weight[:]/np.sum(weight)
|
from random import shuffle, randint, choice
from collections import deque
import card_data
import logging
from time import strftime, gmtime
class Game():
def __init__(self, hero1, hero2, deck1, deck2):
# weirdly cyclic dependency with player, game and deck
self.player1 = Player(hero=hero1, deck=None)
self.player2 = Player(hero=hero2, deck=None)
self.player1.deck = card_data.get_deck(deck1, self.player1)
self.player2.deck = card_data.get_deck(deck2, self.player2)
# consider changing this terminology to current_player and
# current_enemy
self.player = self.player1
self.enemy = self.player2
self.turn = 0
self.effect_pool = []
self.action_queue = deque()
self.minion_pool = {}
self.minion_counter = 1000 # dummy value
self.logger = get_logger()
self.aux_vals = deque()
def choice(self, lst, random=False):
rtn = None
if self.aux_vals:
rtn = self.aux_vals.popleft()
elif random:
rtn = lst[randint(0, len(lst) - 1)]
else:
# TODO(adamvenis): add a pretty prompt here
rtn = lst[input()]
self.logger.info('AUX %d' % rtn)
return rtn
def get_aux(self, size, random=False):
rtn = None
if self.aux_vals:
rtn = self.aux_vals.popleft()
elif random:
rtn = randint(0, size)
else:
rtn = target(self)
self.logger.info('AUX %d' % rtn)
return rtn
def resolve(self):
while self.action_queue:
display(self)
action = self.action_queue.popleft() #TODO(adamvenis): fix resolution order
print 'ACTION:', action[0].__name__, list(action[1][1:])
# [1:] 'game' gets cut out, as it's always the first parameter
trigger_effects(self, [action[0].__name__] + list(action[1][1:]))
action[0](*action[1]) # tuple with arguments in second slot
class Player():
def __init__(self, hero, deck):
self.hero = hero
self.deck = deck
self.hand = []
self.board = []
self.secrets = []
self.crystals = 0
self.current_crystals = 0
self.armor = 0
self.weapon = None
self.auras = set([])
self.spellpower = 0
self.fatigue = 0
self.can_hp = True
self.overload = 0
self.combo = 0
def __str__(self):
print 'PLAYER' # TODO: this is stupid
class Minion():
def __init__(self, game, card):
self.name = card.name
self.neutral_attack = card.attack
self.current_attack = card.attack
self.neutral_health = card.health
self.max_health = card.health
self.current_health = card.health
self.mechanics = card.mechanics
self.race = card.race
self.attacks_left = 0
self.minion_id = game.minion_counter
self.owner = card.owner
game.minion_pool[self.minion_id] = self
game.minion_counter += 1
def attack(self, game): # TODO : add these to account for auras
rtn = self.current_attack
# this minion is a hero
if self.owner.board.index(self) == 0 and self.owner.weapon:
rtn += self.owner.weapon.attack
rtn = apply_auras(game, self.owner, self, 'attack', rtn)
return rtn
def health(self, game):
rtn = self.current_health
rtn = apply_auras(game, self.owner, self, 'health', rtn)
return rtn
def transform_into(self, new_minion):
self.name = new_minion.name
self.neutral_attack = new_minion.neutral_attack
self.current_attack = new_minion.current_attack
self.neutral_health = new_minion.neutral_health
self.max_health = new_minion.max_health
self.current_health = new_minion.current_health
self.mechanics = new_minion.mechanics
self.attacks_left = 0
def __repr__(self):
return self.name
class Weapon():
def __init__(self, attack, durability):
self.current_attack = attack
self.durability = durability
def attack(self, game): # to make room for auras (aka for spiteful smith)
rtn = self.current_attack
return rtn
class Aura():
def __init__(self, id, modifier):
self.id = id
self.modifier = modifier
self.aux_vars = {}
def apply_auras(game, player, object, stat, value):
for aura in player.auras:
value = aura.modifier(game, object, stat, value)
return value
def validate_attack(game, player_ind, enemy_ind):
if player_ind not in range(len(game.player.board)):
print 'wrong index for ally minion. Must supply a number from 0 to %s' % str(len(game.player.board))
return False
elif enemy_ind not in range(len(game.enemy.board)):
print 'wrong index for enemy minion. Must supply a number from 0 to %s' % str(len(game.enemy.board))
return False
ally_minion = game.player.board[player_ind]
enemy_minion = game.enemy.board[enemy_ind]
if ally_minion.attacks_left <= 0:
print 'this minion cannot attack'
return False
elif ally_minion.attack(game) <= 0:
print 'this minion has no attack'
return False
elif 'Frozen' in ally_minion.mechanics or 'Thawing' in ally_minion.mechanics:
print 'This minion is frozen, and cannot attack'
return False
elif 'Stealth' in enemy_minion.mechanics:
print 'cannot attack a minion with stealth'
return False
elif 'Taunt' not in enemy_minion.mechanics and any('Taunt' in minion.mechanics for minion in game.enemy.board[1:]):
print 'must target a minion with taunt'
return False
return True
def func_to_name(s):
s = s.split('_')
return ' '.join([word[0].upper() + word[1:] for word in s])
def name_to_func(s):
s = s.replace(' ', '_')
return s.lower()
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def is_hero(minion):
return minion.owner.board.index(minion) == 0
def display(game):
print game.minion_pool
player1_board_string = [[' ' * 9], 'P1 Board: %s' %
' '.join(map(lambda x:'|' + x.name + '|', game.player1.board[1:])), [' ' * 9]]
player2_board_string = [[' ' * 9], 'P2 Board: %s' %
' '.join(map(lambda x:'|' + x.name + '|', game.player2.board[1:])), [' ' * 9]]
for minion in game.player1.board[1:]:
player1_board_string[0].append('-' * (len(minion.name) + 2))
player1_board_string[2].append('|' + str(minion.attack(game))
+ ' ' *
(len(
minion.name) - len(str(minion.attack(game))) - len(str(minion.health(game))))
+ str(minion.health(game)) + '|')
player1_board_string[0] = ' '.join(player1_board_string[0])
player1_board_string[2] = ' '.join(player1_board_string[2])
player1_board_string.append(player1_board_string[0])
for minion in game.player2.board[1:]:
player2_board_string[0].append('-' * (len(minion.name) + 2))
player2_board_string[2].append('|' + str(minion.attack(game))
+ ' ' *
(len(
minion.name) - len(str(minion.attack(game))) - len(str(minion.health(game))))
+ str(minion.health(game)) + '|')
player2_board_string[0] = ' '.join(player2_board_string[0])
player2_board_string[2] = ' '.join(player2_board_string[2])
player2_board_string.append(player2_board_string[0])
print '-' * 79
print 'Player2 Hero: %s, Crystals: %s/%s, Life: %s%s%s%s' % (
game.player2.hero, game.player2.current_crystals, game.player2.crystals, game.player2.board[
0].health(game),
'' if game.player2.armor == 0 else ', Armor : ' +
str(game.player2.armor),
'' if game.player2.weapon == None else ', Weapon : ' +
str(game.player2.weapon.attack(game)) +
'/' + str(game.player2.weapon.durability),
'' if game.player2.board[0].attack == 0 else ', Attack : ' + str(game.player2.board[0].attack(game)))
print 'Player2 Hand: %s' % ' | '.join(map(lambda x: x.name, game.player2.hand))
for i in range(len(player2_board_string[0]) / 79 + 1):
for j in player2_board_string:
print j[i * 79:(i + 1) * 79]
for i in range(len(player1_board_string[0]) / 79 + 1):
for j in player1_board_string:
print j[i * 79:(i + 1) * 79]
print 'Player1 Hand: %s' % ' | '.join(map(lambda x: x.name, game.player1.hand))
print 'Player1 Hero: %s, Crystals: %s/%s, Life: %s%s%s%s' % (
game.player1.hero, game.player1.current_crystals, game.player1.crystals, game.player1.board[
0].health(game),
'' if game.player1.armor == 0 else ', Armor : ' +
str(game.player1.armor),
'' if game.player1.weapon == None else ', Weapon : ' +
str(game.player1.weapon.attack(game)) +
'/' + str(game.player1.weapon.durability),
'' if game.player1.board[0].attack == 0 else ', Attack : ' + str(game.player1.board[0].attack(game)))
def trigger_effects(game, trigger):
game.effect_pool = filter(lambda x: not x(game, trigger), game.effect_pool)
def opponent(game, player):
if player == game.player:
return game.enemy
else:
return game.player
def lazy(original_class):
orig_init = original_class.__init__
orig_execute = original_class.execute
def __init__(self, *args):
self.is_init = False
self.init_args = args
def execute(self, game):
if not self.is_init:
self.is_init = True
orig_init(self, *self.init_args)
orig_execute(self, game)
original_class.__init__ = __init__
original_class.execute = execute
return original_class
def get_logger():
logger = logging.getLogger()
time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
log_file_handler = logging.FileHandler('%s.hsrep' % time)
logger.addHandler(log_file_handler)
logger.setLevel(logging.INFO)
return logger
|
import io
from .messages import Message
from .midifiles_meta import tempo2bpm, bpm2tempo
from .midifiles import MidiTrack, MetaMessage, MidiFile
MESSAGES = [
Message('program_change', channel=0, program=12, time=0),
Message('note_on', channel=0, note=64, velocity=64, time=32),
Message('note_off', channel=0, note=64, velocity=127, time=128),
MetaMessage('end_of_track', time=0),
]
EXPECTED_BYTES = (b'MThd\x00\x00\x00\x06\x00\x01\x00\x01\x01\xe0'
b'MTrk\x00\x00\x00\x10\x00\xc0\x0c \x90@@\x81\x00\x80@\x7f\x00\xff/\x00')
def compare_tracks(track1, track2):
"""Return True if tracks are equal, otherwise False.
Ideally track1 == track2 would be enough, but since message comparison
doesn't include time we need a function for this.
"""
# Convert to tuples so we compare time.
track1 = [(msg, msg.time) for msg in track1]
track2 = [(msg, msg.time) for msg in track2]
return track1 == track2
def test_tempo2bpm_bpm2tempo():
for bpm, tempo in [
(20, 3000000),
(60, 1000000),
(120, 500000),
(240, 250000),
]:
assert bpm == tempo2bpm(tempo)
assert tempo == bpm2tempo(bpm)
def test_track_slice():
track = MidiTrack()
# Slice should return MidiTrack object.
assert isinstance(track[::], MidiTrack)
def test_track_name():
name1 = MetaMessage('track_name', name='name1')
name2 = MetaMessage('track_name', name='name2')
# The track should use the first name it finds.
track = MidiTrack([name1, name2])
assert track.name == name1.name
def test_save_to_bytes():
mid = MidiFile()
mid.tracks.append(MidiTrack(MESSAGES))
bio = io.BytesIO()
mid.save(file=bio)
assert bio.getvalue() == EXPECTED_BYTES
def test_load_from_bytes():
mid = MidiFile(file=io.BytesIO(EXPECTED_BYTES))
assert compare_tracks(mid.tracks[0], MESSAGES)
|
def happy_numbers(n):
sum=0
p=[]
while True:
for i in n:
sum=sum+i**2
if sum==1:
print('Happy number')
break
elif sum in p:
print('Not A happy number')
break
else:
p.append(sum)
into_array(sum)
def into_array(n):
intarray=str(n)
l=[]
for i in intarray:
l.append(int(i))
return happy_numbers(l)
def main():
x=int(input())
return into_array(x)
'''
def get_digits(number):
digits = []
while number:
digits.append(number % 10)
number //= 10
digits.reverse()
return digits
def is_happy_number(number):
previous_numbers = []
while True:
digits = get_digits(number)
sum_of_squared_digits = sum(list(map(lambda x: x **2, digits)))
if sum_of_squared_digits == 1:
return True
elif sum_of_squared_digits in previous_numbers:
return False
else:
number = sum_of_squared_digits
previous_numbers.append(number)
def print_happy_number(number):
happy_numbers = []
count = 0
while count < 8:
if is_happy_number(number):
happy_numbers.append(number)
count += 1
number += 1
return happy_numbers
print(print_happy_number(int(input())))
''' |
import socket
import struct
import hashlib
import json
import blng.LogHandler as LogHandler
"""
This class provides a nice interface to read data from a multicast
socket. The payload is a dictionary in json format padded to exactly
1200 bytes.
"""
class Multicast:
DISABLE_CHECKSUM = True
CHECKSUM = "ABFJDSGF"
MCAST_GROUP = "239.232.168.250"
MCAST_PORT = 5000
def __init__(self, mcast_port=0, log_component=''):
self.log = LogHandler.LogHandler(log_component + 'Multicast')
self.sendSocket = None
if mcast_port:
self.MCAST_PORT = mcast_port
def _open_mcast_write_socket(self):
"""
Open a socket for u to braodcast messges on
"""
self.log.info('Opening multicast send socket')
self.sendSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sendSocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 3)
def _verify_checksum(self, controlMessage, port='Unknown'):
received_checksum = controlMessage['_checksum']
controlMessage['_checksum'] = " "
pre_verify = "%s%s" % (controlMessage, self.CHECKSUM)
recalculated_checksum = hashlib.sha1(pre_verify.encode('utf-8')).hexdigest()
if not recalculated_checksum == received_checksum:
self.log.err("Checksum mismatch for data on port %s: %s != %s" % (
port, received_checksum, recalculated_checksum))
return recalculated_checksum == received_checksum
def _calculate_and_set_checksum(self, controlMessage):
# generate the checksum with a bank string first
controlMessage['_checksum'] = " "
checksum = "%s%s" % (controlMessage, self.CHECKSUM)
# then update the message with the actual checksum
controlMessage['_checksum'] = hashlib.sha1(checksum.encode('utf-8')).hexdigest()
return controlMessage
def send_mcast_message(self, msg, port, app='unknown-app'):
if not self.sendSocket:
self._open_mcast_write_socket()
controlMessage = msg
controlMessage['_operation'] = app
controlMessage = self._calculate_and_set_checksum(controlMessage)
msg = json.dumps(controlMessage)
msg = "%s%s" % (msg, " " * (1200 - len(msg)))
self.sendSocket.sendto(msg.encode('UTF-8'), (self.MCAST_GROUP, self.MCAST_PORT))
def open_socket(self, callback, port):
"""
Open a socket a listen for data in 1200 byte chunks.
Fire the callback each time
"""
self.log.info('Opening Multicast Receive Socket %s' % (port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 4)
sock.bind(('', port))
mreq = struct.pack("4sl", socket.inet_aton(self.MCAST_GROUP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
(data, addr) = sock.recvfrom(1200)
try:
cm = json.loads(data)
checksum = cm['_checksum']
cm['_checksum'] = " "
received_checksum = "%s%s" % (cm, checksum)
ourChecksum = hashlib.sha1(received_checksum.encode('utf-8')).hexdigest()
if self.DISABLE_CHECKSUM or self._verify_checksum(cm, port):
callback(cm)
except ImportError:
self.log.debug("Error decoding input message\n%s" % (data))
pass
|
#!/usr/bin/python
import os
from sys import argv as args
# Default settings
EPOCHS = 5
MB_SIZE = 16
ETA = .9
HIDDEN_LAYER = 30
if len(args) > 1:
if args[1] != '.': EPOCHS = int(args[1])
if len(args) > 2:
if args[2] != '.': MB_SIZE = int(args[2])
if len(args) > 3:
if args[3] != '.': ETA = float(args[3])
if len(args) > 4:
if args[4] != '.': HIDDEN_LAYER = int(args[4])
os.system("rm results.csv")
os.system("touch results.csv")
for i in range(10):
print "Running test number %s..." % i
os.system("nohup ./run_experiment.py %s %s %s %s > test%s.out &" %
(EPOCHS, MB_SIZE, ETA, HIDDEN_LAYER, i))
os.system("jobs")
|
# Copyright (c) Yuta Saito, Yusuke Narita, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Off-Policy Estimators."""
from abc import ABCMeta
from abc import abstractmethod
from dataclasses import dataclass
from typing import Dict
from typing import Optional
import numpy as np
from sklearn.utils import check_scalar
from ..utils import check_array
from ..utils import check_ope_inputs
from ..utils import estimate_confidence_interval_by_bootstrap
from .helper import estimate_bias_in_ope
from .helper import estimate_high_probability_upper_bound_bias
@dataclass
class BaseOffPolicyEstimator(metaclass=ABCMeta):
"""Base class for OPE estimators."""
@abstractmethod
def _estimate_round_rewards(self) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards."""
raise NotImplementedError
@abstractmethod
def estimate_policy_value(self) -> float:
"""Estimate the policy value of evaluation policy."""
raise NotImplementedError
@abstractmethod
def estimate_interval(self) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure."""
raise NotImplementedError
@dataclass
class ReplayMethod(BaseOffPolicyEstimator):
"""Relpay Method (RM).
Note
-------
Replay Method (RM) estimates the policy value of evaluation policy :math:`\\pi_e` by
.. math::
\\hat{V}_{\\mathrm{RM}} (\\pi_e; \\mathcal{D}) :=
\\frac{\\mathbb{E}_{\\mathcal{D}}[\\mathbb{I} \\{ \\pi_e (x_t) = a_t \\} r_t ]}{\\mathbb{E}_{\\mathcal{D}}[\\mathbb{I} \\{ \\pi_e (x_t) = a_t \\}]},
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`\\pi_e: \\mathcal{X} \\rightarrow \\mathcal{A}` is the function
representing action choices by the evaluation policy realized during offline bandit simulation.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
Parameters
----------
estimator_name: str, default='rm'.
Name of the estimator.
References
------------
Lihong Li, Wei Chu, John Langford, and Xuanhui Wang.
"Unbiased Offline Evaluation of Contextual-bandit-based News Article Recommendation Algorithms.", 2011.
"""
estimator_name: str = "rm"
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
------------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (must be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Rewards of each round estimated by the Replay Method.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
action_match = np.array(
action_dist[np.arange(action.shape[0]), action, position] == 1
)
estimated_rewards = np.zeros_like(action_match)
if action_match.sum() > 0.0:
estimated_rewards = action_match * reward / action_match.mean()
return estimated_rewards
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Parameters
------------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (must be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
V_hat: float
Estimated policy value (performance) of a given evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_ope_inputs(
action_dist=action_dist, position=position, action=action, reward=reward
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 100,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (must be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_ope_inputs(
action_dist=action_dist, position=position, action=action, reward=reward
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class InverseProbabilityWeighting(BaseOffPolicyEstimator):
"""Inverse Probability Weighting (IPW) Estimator.
Note
-------
Inverse Probability Weighting (IPW) estimates the policy value of evaluation policy :math:`\\pi_e` by
.. math::
\\hat{V}_{\\mathrm{IPW}} (\\pi_e; \\mathcal{D}) := \\mathbb{E}_{\\mathcal{D}} [ w(x_t,a_t) r_t],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
When the weight-clipping is applied, a large importance weight is clipped as :math:`\\hat{w}(x,a) := \\min \\{ \\lambda, w(x,a) \\}`
where :math:`\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.
IPW re-weights the rewards by the ratio of the evaluation policy and behavior policy (importance weight).
When the behavior policy is known, IPW is unbiased and consistent for the true policy value.
However, it can have a large variance, especially when the evaluation policy significantly deviates from the behavior policy.
Parameters
------------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
estimator_name: str, default='ipw'.
Name of the estimator.
References
------------
Alex Strehl, John Langford, Lihong Li, and Sham M Kakade.
"Learning from Logged Implicit Exploration Data"., 2010.
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
lambda_: float = np.inf
estimator_name: str = "ipw"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("lambda_ must not be nan")
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like or Tensor, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like or Tensor, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like or Tensor, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by IPW.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw = action_dist[np.arange(action.shape[0]), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
return reward * iw
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
V_hat: float
Estimated policy value (performance) of a given evaluation policy.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
def _estimate_mse_score(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
use_bias_upper_bound: bool = True,
delta: float = 0.05,
**kwargs,
) -> float:
"""Estimate the MSE score of a given clipping hyperparameter to conduct hyperparameter tuning.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
Returns
----------
estimated_mse_score: float
Estimated MSE score of a given clipping hyperparameter `lambda_`.
MSE score is the sum of (high probability) upper bound of bias and the sample variance.
This is estimated using the automatic hyperparameter tuning procedure
based on Section 5 of Su et al.(2020).
"""
n_rounds = reward.shape[0]
# estimate the sample variance of IPW with clipping
sample_variance = np.var(
self._estimate_round_rewards(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
position=position,
)
)
sample_variance /= n_rounds
# estimate the (high probability) upper bound of the bias of IPW with clipping
iw = action_dist[np.arange(n_rounds), action, position] / pscore
if use_bias_upper_bound:
bias_term = estimate_high_probability_upper_bound_bias(
reward=reward, iw=iw, iw_hat=np.minimum(iw, self.lambda_), delta=delta
)
else:
bias_term = estimate_bias_in_ope(
reward=reward,
iw=iw,
iw_hat=np.minimum(iw, self.lambda_),
)
estimated_mse_score = sample_variance + (bias_term ** 2)
return estimated_mse_score
@dataclass
class SelfNormalizedInverseProbabilityWeighting(InverseProbabilityWeighting):
"""Self-Normalized Inverse Probability Weighting (SNIPW) Estimator.
Note
-------
Self-Normalized Inverse Probability Weighting (SNIPW) estimates the policy value of evaluation policy :math:`\\pi_e` by
.. math::
\\hat{V}_{\\mathrm{SNIPW}} (\\pi_e; \\mathcal{D}) :=
\\frac{\\mathbb{E}_{\\mathcal{D}} [w(x_t,a_t) r_t]}{ \\mathbb{E}_{\\mathcal{D}} [w(x_t,a_t)]},
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
SNIPW re-weights the observed rewards by the self-normalized importance weihgt.
This estimator is not unbiased even when the behavior policy is known.
However, it is still consistent for the true policy value and increases the stability in some senses.
See the references for the detailed discussions.
Parameters
----------
estimator_name: str, default='snipw'.
Name of the estimator.
References
----------
Adith Swaminathan and Thorsten Joachims.
"The Self-normalized Estimator for Counterfactual Learning.", 2015.
Nathan Kallus and Masatoshi Uehara.
"Intrinsically Efficient, Stable, and Bounded Off-Policy Evaluation for Reinforcement Learning.", 2019.
"""
estimator_name: str = "snipw"
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like or Tensor, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like or Tensor, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like or Tensor, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by the SNIPW estimator.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
iw = action_dist[np.arange(action.shape[0]), action, position] / pscore
return reward * iw / iw.mean()
@dataclass
class DirectMethod(BaseOffPolicyEstimator):
"""Direct Method (DM).
Note
-------
DM first learns a supervised machine learning model, such as ridge regression and gradient boosting,
to estimate the mean reward function (:math:`q(x,a) = \\mathbb{E}[r|x,a]`).
It then uses it to estimate the policy value as follows.
.. math::
\\hat{V}_{\\mathrm{DM}} (\\pi_e; \\mathcal{D}, \\hat{q})
&:= \\mathbb{E}_{\\mathcal{D}} \\left[ \\sum_{a \\in \\mathcal{A}} \\hat{q} (x_t,a) \\pi_e(a|x_t) \\right], \\\\
& = \\mathbb{E}_{\\mathcal{D}}[\\hat{q} (x_t,\\pi_e)],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_t,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
To estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`, which supports several fitting methods specific to OPE.
If the regression model (:math:`\\hat{q}`) is a good approximation to the true mean reward function,
this estimator accurately estimates the policy value of the evaluation policy.
If the regression function fails to approximate the mean reward function well,
however, the final estimator is no longer consistent.
Parameters
----------
estimator_name: str, default='dm'.
Name of the estimator.
References
----------
Alina Beygelzimer and John Langford.
"The offset tree for learning with partial labels.", 2009.
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
"""
estimator_name: str = "dm"
def _estimate_round_rewards(
self,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate the policy value of evaluation policy.
Parameters
----------
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by the DM estimator.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
n_rounds = position.shape[0]
q_hat_at_position = estimated_rewards_by_reg_model[
np.arange(n_rounds), :, position
]
pi_e_at_position = action_dist[np.arange(n_rounds), :, position]
if isinstance(action_dist, np.ndarray):
return np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
else:
raise ValueError("action must be 1D array")
def estimate_policy_value(
self,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> float:
"""Estimate the policy value of evaluation policy.
Parameters
----------
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
V_hat: float
Estimated policy value (performance) of a given evaluation policy.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_ope_inputs(
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
action_dist=action_dist,
).mean()
def estimate_interval(
self,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_ope_inputs(
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
position=position,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
action_dist=action_dist,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
@dataclass
class DoublyRobust(BaseOffPolicyEstimator):
"""Doubly Robust (DR) Estimator.
Note
-------
Similar to DM, DR first learns a supervised machine learning model, such as ridge regression and gradient boosting,
to estimate the mean reward function (:math:`q(x,a) = \\mathbb{E}[r|x,a]`).
It then uses it to estimate the policy value as follows.
.. math::
\\hat{V}_{\\mathrm{DR}} (\\pi_e; \\mathcal{D}, \\hat{q})
:= \\mathbb{E}_{\\mathcal{D}}[\\hat{q}(x_t,\\pi_e) + w(x_t,a_t) (r_t - \\hat{q}(x_t,a_t))],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`.
:math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_t,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
When the weight-clipping is applied, a large importance weight is clipped as :math:`\\hat{w}(x,a) := \\min \\{ \\lambda, w(x,a) \\}`
where :math:`\\lambda (>0)` is a hyperparameter that decides a maximum allowed importance weight.
To estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`,
which supports several fitting methods specific to OPE such as *more robust doubly robust*.
DR mimics IPW to use a weighted version of rewards, but DR also uses the estimated mean reward
function (the regression model) as a control variate to decrease the variance.
It preserves the consistency of IPW if either the importance weight or
the mean reward estimator is accurate (a property called double robustness).
Moreover, DR is semiparametric efficient when the mean reward estimator is correctly specified.
Parameters
----------
lambda_: float, default=np.inf
A maximum possible value of the importance weight.
When a positive finite value is given, importance weights larger than `lambda_` will be clipped.
DoublyRobust with a finite positive `lambda_` corresponds to Doubly Robust with Pessimistic Shrinkage of Su et al.(2020) or CAB-DR of Su et al.(2019).
estimator_name: str, default='dr'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Mehrdad Farajtabar, Yinlam Chow, and Mohammad Ghavamzadeh.
"More Robust Doubly Robust Off-policy Evaluation.", 2018.
Yi Su, Lequn Wang, Michele Santacatterina, and Thorsten Joachims.
"CAB: Continuous Adaptive Blending Estimator for Policy Evaluation and Learning", 2019.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudík.
"Doubly robust off-policy evaluation with shrinkage.", 2020.
"""
lambda_: float = np.inf
estimator_name: str = "dr"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("lambda_ must not be nan")
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like or Tensor, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like or Tensor, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like or Tensor, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model or Tensor: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by the DR estimator.
"""
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
n_rounds = action.shape[0]
iw = action_dist[np.arange(n_rounds), action, position] / pscore
# weight clipping
if isinstance(iw, np.ndarray):
iw = np.minimum(iw, self.lambda_)
q_hat_at_position = estimated_rewards_by_reg_model[
np.arange(n_rounds), :, position
]
q_hat_factual = estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
]
pi_e_at_position = action_dist[np.arange(n_rounds), :, position]
if isinstance(reward, np.ndarray):
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
else:
raise ValueError("reward must be 1D array")
estimated_rewards += iw * (reward - q_hat_factual)
return estimated_rewards
def estimate_policy_value(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
) -> float:
"""Estimate the policy value of evaluation policy.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
V_hat: float
Policy value estimated by the DR estimator.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
return self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
).mean()
def estimate_interval(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
alpha: float = 0.05,
n_bootstrap_samples: int = 10000,
random_state: Optional[int] = None,
**kwargs,
) -> Dict[str, float]:
"""Estimate confidence interval of policy value by nonparametric bootstrap procedure.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
alpha: float, default=0.05
Significance level.
n_bootstrap_samples: int, default=10000
Number of resampling performed in the bootstrap procedure.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
----------
estimated_confidence_interval: Dict[str, float]
Dictionary storing the estimated mean and upper-lower confidence bounds.
"""
check_array(
array=estimated_rewards_by_reg_model,
name="estimated_rewards_by_reg_model",
expected_dim=3,
)
check_array(array=reward, name="reward", expected_dim=1)
check_array(array=action, name="action", expected_dim=1)
check_array(array=pscore, name="pscore", expected_dim=1)
check_ope_inputs(
action_dist=action_dist,
position=position,
action=action,
reward=reward,
pscore=pscore,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
if position is None:
position = np.zeros(action_dist.shape[0], dtype=int)
estimated_round_rewards = self._estimate_round_rewards(
reward=reward,
action=action,
position=position,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
)
return estimate_confidence_interval_by_bootstrap(
samples=estimated_round_rewards,
alpha=alpha,
n_bootstrap_samples=n_bootstrap_samples,
random_state=random_state,
)
def _estimate_mse_score(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
use_bias_upper_bound: bool = True,
delta: float = 0.05,
) -> float:
"""Estimate the MSE score of a given clipping hyperparameter to conduct hyperparameter tuning.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
Returns
----------
estimated_mse_score: float
Estimated MSE score of a given clipping hyperparameter `lambda_`.
MSE score is the sum of (high probability) upper bound of bias and the sample variance.
This is estimated using the automatic hyperparameter tuning procedure
based on Section 5 of Su et al.(2020).
"""
n_rounds = reward.shape[0]
# estimate the sample variance of DR with clipping
sample_variance = np.var(
self._estimate_round_rewards(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
)
sample_variance /= n_rounds
# estimate the (high probability) upper bound of the bias of DR with clipping
iw = action_dist[np.arange(n_rounds), action, position] / pscore
if use_bias_upper_bound:
bias_term = estimate_high_probability_upper_bound_bias(
reward=reward,
iw=iw,
iw_hat=np.minimum(iw, self.lambda_),
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
delta=delta,
)
else:
bias_term = estimate_bias_in_ope(
reward=reward,
iw=iw,
iw_hat=np.minimum(iw, self.lambda_),
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
)
estimated_mse_score = sample_variance + (bias_term ** 2)
return estimated_mse_score
@dataclass
class SelfNormalizedDoublyRobust(DoublyRobust):
"""Self-Normalized Doubly Robust (SNDR) Estimator.
Note
-------
Self-Normalized Doubly Robust estimates the policy value of evaluation policy :math:`\\pi_e` by
.. math::
\\hat{V}_{\\mathrm{SNDR}} (\\pi_e; \\mathcal{D}, \\hat{q}) :=
\\mathbb{E}_{\\mathcal{D}} \\left[\\hat{q}(x_t,\\pi_e) + \\frac{w(x_t,a_t) (r_t - \\hat{q}(x_t,a_t))}{\\mathbb{E}_{\\mathcal{D}}[ w(x_t,a_t) ]} \\right],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_t,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
To estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`.
Similar to Self-Normalized Inverse Probability Weighting, SNDR estimator applies the self-normalized importance weighting technique to
increase the stability of the original Doubly Robust estimator.
Parameters
----------
estimator_name: str, default='sndr'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Nathan Kallus and Masatoshi Uehara.
"Intrinsically Efficient, Stable, and Bounded Off-Policy Evaluation for Reinforcement Learning.", 2019.
"""
estimator_name: str = "sndr"
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like or Tensor, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like or Tensor, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like or Tensor, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by the SNDR estimator.
"""
n_rounds = action.shape[0]
iw = action_dist[np.arange(n_rounds), action, position] / pscore
q_hat_at_position = estimated_rewards_by_reg_model[
np.arange(n_rounds), :, position
]
pi_e_at_position = action_dist[np.arange(n_rounds), :, position]
if isinstance(reward, np.ndarray):
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
else:
raise ValueError("reward must be 1D array")
q_hat_factual = estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
]
estimated_rewards += iw * (reward - q_hat_factual) / iw.mean()
return estimated_rewards
@dataclass
class SwitchDoublyRobust(DoublyRobust):
"""Switch Doubly Robust (Switch-DR) Estimator.
Note
-------
Switch-DR aims to reduce the variance of the DR estimator by using direct method when the importance weight is large.
This estimator estimates the policy value of evaluation policy :math:`\\pi_e` by
.. math::
\\hat{V}_{\\mathrm{SwitchDR}} (\\pi_e; \\mathcal{D}, \\hat{q}, \\lambda)
:= \\mathbb{E}_{\\mathcal{D}} [\\hat{q}(x_t,\\pi_e) + w(x_t,a_t) (r_t - \\hat{q}(x_t,a_t)) \\mathbb{I} \\{ w(x_t,a_t) \\le \\lambda \\}],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`. :math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
:math:`\\lambda (\\ge 0)` is a switching hyperparameter, which decides the threshold for the importance weight.
:math:`\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_t,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
To estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`.
Parameters
----------
lambda_: float, default=np.inf
Switching hyperparameter. When importance weight is larger than this parameter, DM is applied, otherwise DR is used.
This hyperparameter should be larger than or equal to 0., otherwise it is meaningless.
estimator_name: str, default='switch-dr'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yu-Xiang Wang, Alekh Agarwal, and Miroslav Dudík.
"Optimal and Adaptive Off-policy Evaluation in Contextual Bandits", 2016.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
lambda_: float = np.inf
estimator_name: str = "switch-dr"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("lambda_ must not be nan")
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like, shape (n_rounds,)
Rewards of each round estimated by the Switch-DR estimator.
"""
n_rounds = action.shape[0]
iw = action_dist[np.arange(n_rounds), action, position] / pscore
switch_indicator = np.array(iw <= self.lambda_, dtype=int)
q_hat_at_position = estimated_rewards_by_reg_model[
np.arange(n_rounds), :, position
]
q_hat_factual = estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
]
pi_e_at_position = action_dist[np.arange(n_rounds), :, position]
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
estimated_rewards += switch_indicator * iw * (reward - q_hat_factual)
return estimated_rewards
def _estimate_mse_score(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
use_bias_upper_bound: bool = False,
delta: float = 0.05,
) -> float:
"""Estimate the MSE score of a given switching hyperparameter to conduct hyperparameter tuning.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
Returns
----------
estimated_mse_score: float
Estimated MSE score of a given switching hyperparameter `lambda_`.
MSE score is the sum of (high probability) upper bound of bias and the sample variance.
This is estimated using the automatic hyperparameter tuning procedure
based on Section 5 of Su et al.(2020).
"""
n_rounds = reward.shape[0]
# estimate the sample variance of Switch-DR (Eq.(8) of Wang et al.(2017))
sample_variance = np.var(
self._estimate_round_rewards(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
)
sample_variance /= n_rounds
# estimate the (high probability) upper bound of the bias of Switch-DR
iw = action_dist[np.arange(n_rounds), action, position] / pscore
if use_bias_upper_bound:
bias_term = estimate_high_probability_upper_bound_bias(
reward=reward,
iw=iw,
iw_hat=iw * np.array(iw <= self.lambda_, dtype=int),
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
delta=delta,
)
else:
bias_term = estimate_bias_in_ope(
reward=reward,
iw=iw,
iw_hat=iw * np.array(iw <= self.lambda_, dtype=int),
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
)
estimated_mse_score = sample_variance + (bias_term ** 2)
return estimated_mse_score
@dataclass
class DoublyRobustWithShrinkage(DoublyRobust):
"""Doubly Robust with optimistic shrinkage (DRos) Estimator.
Note
------
DR with (optimistic) shrinkage replaces the importance weight in the original DR estimator with a new weight mapping
found by directly optimizing sharp bounds on the resulting MSE.
.. math::
\\hat{V}_{\\mathrm{DRos}} (\\pi_e; \\mathcal{D}, \\hat{q}, \\lambda)
:= \\mathbb{E}_{\\mathcal{D}} [\\hat{q}(x_t,\\pi_e) + w_o(x_t,a_t;\\lambda) (r_t - \\hat{q}(x_t,a_t))],
where :math:`\\mathcal{D}=\\{(x_t,a_t,r_t)\\}_{t=1}^{T}` is logged bandit feedback data with :math:`T` rounds collected by
a behavior policy :math:`\\pi_b`.
:math:`w(x,a):=\\pi_e (a|x)/\\pi_b (a|x)` is the importance weight given :math:`x` and :math:`a`.
:math:`\\hat{q} (x_t,\\pi):= \\mathbb{E}_{a \\sim \\pi(a|x)}[\\hat{q}(x,a)]` is the expectation of the estimated reward function over :math:`\\pi`.
:math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
:math:`\\hat{q} (x,a)` is an estimated expected reward given :math:`x` and :math:`a`.
To estimate the mean reward function, please use `obp.ope.regression_model.RegressionModel`.
:math:`w_{o} (x_t,a_t;\\lambda)` is a new weight by the shrinkage technique which is defined as
.. math::
w_{o} (x_t,a_t;\\lambda) := \\frac{\\lambda}{w^2(x_t,a_t) + \\lambda} w(x_t,a_t).
When :math:`\\lambda=0`, we have :math:`w_{o} (x,a;\\lambda)=0` corresponding to the DM estimator.
In contrast, as :math:`\\lambda \\rightarrow \\infty`, :math:`w_{o} (x,a;\\lambda)` increases and in the limit becomes equal to the original importance weight, corresponding to the standard DR estimator.
Parameters
----------
lambda_: float
Shrinkage hyperparameter.
This hyperparameter should be larger than or equal to 0., otherwise it is meaningless.
estimator_name: str, default='dr-os'.
Name of the estimator.
References
----------
Miroslav Dudík, Dumitru Erhan, John Langford, and Lihong Li.
"Doubly Robust Policy Evaluation and Optimization.", 2014.
Yi Su, Maria Dimakopoulou, Akshay Krishnamurthy, and Miroslav Dudik.
"Doubly Robust Off-Policy Evaluation with Shrinkage.", 2020.
"""
lambda_: float = 0.0
estimator_name: str = "dr-os"
def __post_init__(self) -> None:
"""Initialize Class."""
check_scalar(
self.lambda_,
name="lambda_",
target_type=(int, float),
min_val=0.0,
)
if self.lambda_ != self.lambda_:
raise ValueError("lambda_ must not be nan")
def _estimate_round_rewards(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Estimate round-wise (or sample-wise) rewards.
Parameters
----------
reward: array-like or Tensor, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like or Tensor, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like or Tensor, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like or Tensor, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like or Tensor, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
When None is given, the effect of position on the reward will be ignored.
(If only one action is chosen and there is no posion, then you can just ignore this argument.)
Returns
----------
estimated_rewards: array-like or Tensor, shape (n_rounds,)
Rewards of each round estimated by the DRos estimator.
"""
n_rounds = action.shape[0]
iw = action_dist[np.arange(n_rounds), action, position] / pscore
if self.lambda_ < np.inf:
iw_hat = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)
else:
iw_hat = iw
q_hat_at_position = estimated_rewards_by_reg_model[
np.arange(n_rounds), :, position
]
q_hat_factual = estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
]
pi_e_at_position = action_dist[np.arange(n_rounds), :, position]
if isinstance(reward, np.ndarray):
estimated_rewards = np.average(
q_hat_at_position,
weights=pi_e_at_position,
axis=1,
)
else:
raise ValueError("reward must be 1D array")
estimated_rewards += iw_hat * (reward - q_hat_factual)
return estimated_rewards
def _estimate_mse_score(
self,
reward: np.ndarray,
action: np.ndarray,
pscore: np.ndarray,
action_dist: np.ndarray,
estimated_rewards_by_reg_model: np.ndarray,
position: Optional[np.ndarray] = None,
use_bias_upper_bound: bool = False,
delta: float = 0.05,
) -> float:
"""Estimate the MSE score of a given shrinkage hyperparameter to conduct hyperparameter tuning.
Parameters
----------
reward: array-like, shape (n_rounds,)
Reward observed in each round of the logged bandit feedback, i.e., :math:`r_t`.
action: array-like, shape (n_rounds,)
Action sampled by behavior policy in each round of the logged bandit feedback, i.e., :math:`a_t`.
pscore: array-like, shape (n_rounds,)
Action choice probabilities of behavior policy (propensity scores), i.e., :math:`\\pi_b(a_t|x_t)`.
action_dist: array-like, shape (n_rounds, n_actions, len_list)
Action choice probabilities of evaluation policy (can be deterministic), i.e., :math:`\\pi_e(a_t|x_t)`.
estimated_rewards_by_reg_model: array-like, shape (n_rounds, n_actions, len_list)
Expected rewards given context, action, and position estimated by regression model, i.e., :math:`\\hat{q}(x_t,a_t)`.
position: array-like, shape (n_rounds,), default=None
Position of recommendation interface where action was presented in each round of the given logged bandit data.
use_bias_upper_bound: bool, default=True
Whether to use bias upper bound in hyperparameter tuning.
If False, direct bias estimator is used to estimate the MSE.
delta: float, default=0.05
A confidence delta to construct a high probability upper bound based on the Bernstein’s inequality.
Returns
----------
estimated_mse_score: float
Estimated MSE score of a given shrinkage hyperparameter `lambda_`.
MSE score is the sum of (high probability) upper bound of bias and the sample variance.
This is estimated using the automatic hyperparameter tuning procedure
based on Section 5 of Su et al.(2020).
"""
n_rounds = reward.shape[0]
# estimate the sample variance of DRos
sample_variance = np.var(
self._estimate_round_rewards(
reward=reward,
action=action,
pscore=pscore,
action_dist=action_dist,
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
position=position,
)
)
sample_variance /= n_rounds
# estimate the (high probability) upper bound of the bias of DRos
iw = action_dist[np.arange(n_rounds), action, position] / pscore
if self.lambda_ < np.inf:
iw_hat = (self.lambda_ * iw) / (iw ** 2 + self.lambda_)
else:
iw_hat = iw
if use_bias_upper_bound:
bias_term = estimate_high_probability_upper_bound_bias(
reward=reward,
iw=iw,
iw_hat=iw_hat,
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
delta=0.05,
)
else:
bias_term = estimate_bias_in_ope(
reward=reward,
iw=iw,
iw_hat=iw_hat,
q_hat=estimated_rewards_by_reg_model[
np.arange(n_rounds), action, position
],
)
estimated_mse_score = sample_variance + (bias_term ** 2)
return estimated_mse_score
|
'''
Created on 25 Oct 2016
This is a prototype piece of software that can be run on the command line that attempts to solve the issue of
deleting different elastic search contexts. It requires elastic search 2.3.0 especially the reindex API.
1. Check database to retrieve current contexts
2. Work out current (source) index alias for user defined data
3. Use 3 to work out destination index (flip between two choices based on 2).
4. If 3 exists delete it.
5. Reindex src index to dest index using just contexts from 1.
6. Alter alias to point to dest.
@author: Oliver Burren
'''
import psycopg2
import urllib.request
import json
import re
def index_exists(eurl, idx):
dreq = urllib.request.Request(elastic_url + '/' + idx, method='HEAD')
try:
urllib.request.urlopen(dreq)
except urllib.error.HTTPError as err:
if err.code == 404:
print("Index does not disease")
return False
return True
def delete_index(eurl, idx):
if index_exists(eurl, idx):
dreq = urllib.request.Request(elastic_url + '/' + new_index_name, method='DELETE')
try:
urllib.request.urlopen(dreq)
except urllib.error.HTTPError as err:
if err.code == 404:
print("Could not delete")
return False
return True
def create_index(eurl, idx):
# check if index already exists if it does delete
if index_exists(eurl, idx):
delete_index(eurl, idx)
data = {
"settings": {
"index": {
"number_of_shards": 5,
"number of replicas": 1
}
}
}
data = json.dumps(data)
data = data.encode('utf8')
req = urllib.request.Request(eurl + '/' + idx, data, method='PUT')
with urllib.request.urlopen(req) as response:
the_page = response.read()
print(the_page)
def reindex(eurl, src_index, dest_index, ctypes):
url = eurl + '/_reindex'
values = {
'source': {'index': src_index, 'type': ctypes},
'dest': {'index': dest_index}
}
data = json.dumps(values)
data = data.encode('utf8')
req = urllib.request.Request(url, data)
try:
urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
if err.code == 404:
return False
return True
def switch_alias(eurl, act ,old ,new):
data = {
"actions": [
{
'remove': {'index': old, 'alias': act}
},
{
'add': {'index': new, 'alias': act}
}
]
}
data = json.dumps(data)
data = data.encode('utf8')
req = urllib.request.Request(eurl + '/_aliases', data)
try:
urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
if err.code == 404:
return False
return True
def get_current_alias(eurl, idx):
url = eurl + idx + '/_alias'
req = urllib.request.Request(url)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as err:
if err.code == 404:
print("Not found")
return False
obj = json.loads(response.read().decode('utf8'))
return(list(obj)[0])
if __name__ == '__main__':
elastic_url = 'http://localhost:9200/'
index_name_alias = 'cp:hg19_userdata_bed'
old_index_name = get_current_alias(elastic_url, index_name_alias)
if old_index_name == 'cp:hg19_userdata_bed_v1':
new_index_name = 'cp:hg19_userdata_bed_v2'
else:
new_index_name = 'cp:hg19_userdata_bed_v1'
get_current_alias(elastic_url, index_name_alias)
# warning this will blow away current index if it already exists
create_index(elastic_url, new_index_name)
# connect to database hard code for now but eventually these can come from the conf files
conn = psycopg2.connect("dbname=chicp_authdb user=webuser")
cur = conn.cursor()
cur.execute("select model from django_content_type where model like 'cp_stats_ud-ud%'")
cidx = cur.fetchall()
print("Length is " + str(len(cidx)))
if len(cidx) == 0:
# random content_type that will never exist - effectively causes reindex to create blank index
cidx = ['asdfdgsqwdfghghhjk']
else:
cidx = [re.sub("cp_stats_ud-ud-(.*)_idx_type", "\\1", i[0]) for i in cidx]
# next we need to interface with elastic search to get a list of all the current mappings
reindex(elastic_url, old_index_name, new_index_name, cidx)
switch_alias(elastic_url, index_name_alias, old_index_name, new_index_name)
pass
|
# Generated by Django 3.1 on 2020-09-01 01:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0006_contact'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('itemJSon', models.CharField(max_length=50000)),
('name', models.CharField(max_length=200)),
],
),
]
|
# Generated by Django 2.2.13 on 2020-10-19 08:40
import datetime
import django.core.validators
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('backManage', '0005_auto_20201019_1415'),
]
operations = [
migrations.AddField(
model_name='work',
name='paper_commit',
field=models.FileField(null=True, upload_to='commit', validators=[django.core.validators.FileExtensionValidator(['pdf'], message='承诺书必须为pdf格式')], verbose_name='承诺书pdf版'),
),
migrations.AddField(
model_name='work',
name='paper_game_data',
field=models.FileField(null=True, upload_to='game_data', validators=[django.core.validators.FileExtensionValidator(['zip'], message='原始数据必须为zip格式')], verbose_name='原始数据zip版'),
),
migrations.AddField(
model_name='work',
name='paper_sign_up',
field=models.FileField(null=True, upload_to='sign_up', validators=[django.core.validators.FileExtensionValidator(['doc', 'docx'], message='报名表必须为doc/docx格式')], verbose_name='报名表word版'),
),
migrations.AlterField(
model_name='college',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 472502, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='instructor',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 474650, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='judge',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 481161, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='member',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 475735, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='team',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 477382, tzinfo=utc), verbose_name='添加时间'),
),
migrations.AlterField(
model_name='work',
name='add_time',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 19, 8, 40, 18, 479530, tzinfo=utc)),
),
migrations.AlterField(
model_name='work',
name='paper_cc',
field=models.FileField(null=True, upload_to='cc', validators=[django.core.validators.FileExtensionValidator(['pdf'], message='查重报告必须为pdf格式')], verbose_name='查重报告pdf版'),
),
]
|
#!/usr/bin/env python
from copy import deepcopy
from unittest import TestCase, main
from wiki_nodes.http.cache import WikiCache
from wiki_nodes.testing import wiki_cache
class CacheTest(TestCase):
def test_deepcopy_cache(self):
with wiki_cache('', 12345, base_dir=':memory:') as cache:
clone = deepcopy(cache)
self.assertFalse(cache is clone)
self.assertEqual(12345, clone.ttl)
self.assertEqual(cache.base_dir, clone.base_dir)
self.assertEqual(cache.img_dir, clone.img_dir)
def test_store_get_image(self):
with wiki_cache('', base_dir=':memory:') as cache:
cache.store_image('foo', b'abc')
cache.store_image('', b'def')
self.assertEqual(1, len(list(cache.img_dir.iterdir())))
self.assertEqual(b'abc', cache.get_image('foo'))
with self.assertRaises(KeyError):
cache.get_image('bar')
with self.assertRaises(KeyError):
cache.get_image('')
def test_store_get_misc(self):
cache = WikiCache('', base_dir=':memory:', img_dir=':memory:')
cache.store_misc('foo', {'bar': 'baz'})
needed, found = cache.get_misc('foo', ['bar', 'baz'])
self.assertEqual(['baz'], needed)
self.assertEqual({'bar': 'baz'}, found)
def test_reset_hard(self):
with wiki_cache('', img_dir=':memory:') as cache:
dir_path = cache.base_dir.joinpath('bar.db')
dir_path.mkdir()
other_path = cache.base_dir.joinpath('foo.bar')
db_path = cache.base_dir.joinpath('foo.db')
other_path.touch()
db_path.touch()
cache.reset_caches()
self.assertTrue(dir_path.exists())
self.assertTrue(other_path.exists())
self.assertTrue(db_path.exists())
cache.reset_caches(True)
self.assertTrue(dir_path.exists())
self.assertTrue(other_path.exists())
self.assertFalse(db_path.exists())
if __name__ == '__main__':
main(exit=False, verbosity=2)
|
import os
import unittest
from shutil import rmtree
import numpy as np
import z5py
class TestUtil(unittest.TestCase):
tmp_dir = './tmp_dir'
shape = (100, 100, 100)
chunks = (10, 10, 10)
def setUp(self):
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
def tearDown(self):
try:
rmtree(self.tmp_dir)
except OSError:
pass
def test_copy_dataset_with_roi(self):
from z5py.util import copy_dataset
in_path = os.path.join(self.tmp_dir, 'in.n5')
out_path = os.path.join(self.tmp_dir, 'out.n5')
in_file = z5py.File(in_path, use_zarr_format=False)
out_file = z5py.File(out_path, use_zarr_format=False)
# create input dataset
ds_in = in_file.create_dataset('data', dtype='float32',
shape=self.shape, chunks=self.chunks,
compression='gzip')
# write test data
data = np.arange(ds_in.size).reshape(ds_in.shape).astype(ds_in.dtype)
ds_in[:] = data
# define roi
roi = np.s_[5:45, 9:83, 10:60]
out_of_roi_mask = np.ones(self.shape, dtype='bool')
out_of_roi_mask[roi] = False
roi_shape = tuple(rr.stop - rr.start for rr in roi)
# copy dataset with roi
copy_dataset(in_path, out_path, 'data', 'data_roi', n_threads=8,
roi=roi, fit_to_roi=False)
ds_out = out_file['data_roi']
data_out = ds_out[:]
self.assertEqual(data_out.shape, data.shape)
self.assertTrue(np.allclose(data_out[roi], data[roi]))
self.assertTrue(np.allclose(data_out[out_of_roi_mask], 0))
# copy dataset with roi and fit_to_roi
copy_dataset(in_path, out_path, 'data', 'data_roi_fit', n_threads=8,
roi=roi, fit_to_roi=True)
ds_out = out_file['data_roi_fit']
data_out = ds_out[:]
self.assertEqual(data_out.shape, roi_shape)
self.assertTrue(np.allclose(data_out, data[roi]))
def test_copy_dataset_default(self):
from z5py.util import copy_dataset
in_path = os.path.join(self.tmp_dir, 'in.n5')
out_path = os.path.join(self.tmp_dir, 'out.n5')
# create input file
in_file = z5py.File(in_path, use_zarr_format=False)
ds_in = in_file.create_dataset('data', dtype='float32',
shape=self.shape, chunks=self.chunks,
compression='gzip')
# write test data
data = np.arange(ds_in.size).reshape(ds_in.shape).astype(ds_in.dtype)
ds_in[:] = data
# copy_dataset for different out blocks
out_file = z5py.File(out_path, use_zarr_format=False)
new_chunks = (20, 20, 20)
# NOTE we can only choose out blocks that align with the chunks
# because otherwise we run into issues due to not thread safe blocking
for out_blocks in (None, (40, 40, 40), (60, 60, 60)):
ds_str = 'none' if out_blocks is None else '_'.join(map(str, out_blocks))
ds_name = 'data_%s' % ds_str
copy_dataset(in_path, out_path, 'data', ds_name,
chunks=new_chunks,
block_shape=out_blocks,
n_threads=8)
# make sure that new data agrees
ds_out = out_file[ds_name]
data_out = ds_out[:]
self.assertEqual(data_out.shape, data.shape)
self.assertEqual(ds_out.chunks, new_chunks)
self.assertTrue(np.allclose(data, data_out))
def test_copy_dataset_custom(self):
from z5py.util import copy_dataset
in_path = os.path.join(self.tmp_dir, 'in.n5')
out_path = os.path.join(self.tmp_dir, 'out.n5')
# create input file
in_file = z5py.File(in_path, use_zarr_format=False)
ds_in = in_file.create_dataset('data', dtype='float32',
shape=self.shape, chunks=self.chunks,
compression='gzip')
# write test data
data = np.arange(ds_in.size).reshape(ds_in.shape).astype(ds_in.dtype)
ds_in[:] = data
# copy_dataset
new_chunks = (20, 20, 20)
for compression in ('raw', 'gzip'):
for dtype in ('float64', 'int32', 'uint32'):
ds_name = 'ds_%s_%s' % (compression, dtype)
copy_dataset(in_path, out_path,
'data', ds_name,
chunks=new_chunks,
n_threads=8,
compression=compression,
dtype=dtype)
# make sure that new data agrees
out_file = z5py.File(out_path, use_zarr_format=False)
ds_out = out_file[ds_name]
data_out = ds_out[:]
self.assertEqual(data_out.shape, data.shape)
self.assertEqual(ds_out.chunks, new_chunks)
self.assertTrue(np.allclose(data, data_out))
# TODO finish blocking tests
def simple_blocking(self, shape, block_shape):
blocking = []
ndim = len(shape)
for x in range(0, shape[0], block_shape[0]):
blocking.append((x, min(x + block_shape[0], shape[0])))
if ndim > 1:
block = blocking.pop()
for y in range(0, shape[1], block_shape[1]):
blocking.append(block + min(y + block_shape[1], shape[1]))
def _test_blocking(self):
from z5py.util import blocking
n_reps = 10
for dim in range(1, 6):
for _ in range(n_reps):
shape = tuple(np.random.randint(0, 1000) for ii in range(dim))
block_shape = tuple(min(np.random.randint(0, 100), sh)
for ii, sh in zip(range(dim), shape))
blocking1 = [(block.start, block.stop)
for block in blocking(shape, block_shape)]
blocking2 = self.simple_blocking(shape, block_shape)
sorted(blocking1)
sorted(blocking2)
self.assertEqual(blocking1, blocking2)
def test_remove_trivial_chunks(self):
from z5py.util import remove_trivial_chunks
path = './tmp_dir/data.n5'
f = z5py.File(path)
shape = (100, 100)
chunks = (10, 10)
a = np.zeros(shape)
a[10:20, 10:20] = 1
a[20:30, 20:30] = 2
a[50:60, 50:60] = np.arange(100).reshape(chunks)
ds = f.create_dataset('data', dtype='float64',
shape=shape, chunks=chunks)
ds[:] = a
remove_trivial_chunks(ds, n_threads=4)
b = ds[:]
self.assertTrue(np.allclose(b[10:20, 10:20], 0))
self.assertTrue(np.allclose(b[20:30, 20:30], 0))
self.assertTrue(np.allclose(b[50:60, 50:60],
np.arange(100).reshape(chunks)))
ds[:] = a
remove_trivial_chunks(ds, n_threads=4, remove_specific_value=1)
c = ds[:]
self.assertTrue(np.allclose(c[10:20, 10:20], 0))
self.assertTrue(np.allclose(c[20:30, 20:30], 2))
self.assertTrue(np.allclose(c[50:60, 50:60],
np.arange(100).reshape(chunks)))
def test_unique(self):
from z5py.util import unique
path = './tmp_dir/data.n5'
f = z5py.File(path)
shape = (100, 100)
chunks = (10, 10)
ds = f.create_dataset('data', dtype='int32',
shape=shape, chunks=chunks)
data = np.random.randint(0, 100, size=shape).astype('int32')
ds[:] = data
exp_uniques, exp_counts = np.unique(data, return_counts=True)
uniques = unique(ds, n_threads=4)
self.assertTrue(np.allclose(uniques, exp_uniques))
uniques, counts = unique(ds, n_threads=4, return_counts=True)
self.assertTrue(np.allclose(uniques, exp_uniques))
self.assertTrue(np.allclose(counts, exp_counts))
def test_remove_dataset(self):
from z5py.util import remove_dataset
path = './tmp_dir/data.n5'
f = z5py.File(path)
shape = (100, 100)
chunks = (10, 10)
ds = f.create_dataset('data', dtype='float64',
data=np.ones(shape), chunks=chunks)
remove_dataset(ds, 4)
self.assertFalse(os.path.exists(os.path.join(path, 'data')))
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.0.4 on 2018-04-23 12:36
from django.contrib.gis.geos import Point
from django.db import migrations
def forwards_func(apps, schema_editor):
change_region_center(apps, schema_editor, True)
def reverse_func(apps, schema_editor):
change_region_center(apps, schema_editor, False)
class Migration(migrations.Migration):
dependencies = [
('regions', '0008_federalsubjectwithpolygon'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
def change_region_center(apps, schema_editor, forward=True):
FederalSubjectWithPolygon = apps.get_model("regions", "FederalSubjectWithPolygon")
# region_id: center point
# see data/0002/federal_subject.jsonl
data_list = {
# Архангельская область
8: Point(40.86914062499997, 64.2445947679819)
}
for region_id, center in data_list.items():
fswp = FederalSubjectWithPolygon.objects.get(id=region_id)
fswp.center = center if forward else None
fswp.save(update_fields=['center'])
|
import csv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
canada_gold_hockey = 0
usa_gold_hockey = 0
norway_gold_hockey = 0
with open('OlympicsWinter.csv') as csvfile:
reader = csv.reader(csvfile)
line = 0
for row in reader:
if line != 0:
if (row[7] == "Gold") and (row[4] == "CAN") and (row[2] == "Ice Hockey"):
canada_gold_hockey += 1
elif (row[7] == "Gold") and (row[4] == "USA") and (row[2] == "Ice Hockey"):
usa_gold_hockey += 1
elif (row[7] == "Gold") and (row[4] == "NOR") and (row[2] == "Ice Hockey"):
norway_gold_hockey += 1
line = line + 1
labels = ['Canada', 'USA', 'Norway']
x = np.arange(len(labels))
medals = [canada_gold_hockey, usa_gold_hockey, norway_gold_hockey]
plt.bar(x, medals, align='center', alpha=0.8)
plt.xticks(x, labels)
plt.ylabel('Total Gold Medals in Hockey')
plt.title('Top 3 Countries Total Gold Medals in Hockey')
plt.show()
|
import os
import json
author_map = {}
author_list = []
baseurl='http://dl.acm.org/'
with open('aff.json1','r') as outfile:
data_dict = json.load(outfile)
datas = data_dict['authors']
for data in datas:
record = {'link' : baseurl+data[0], 'FName' : data[4], 'MName':data[5], 'LName': data[3], 'FULL Name':data[6]}
author_list.append(record)
output_dict = {'authors' : [author for author in author_list] }
with open('Author1.json','w') as outfile:
json.dump(output_dict,outfile) |
from django.conf.urls import url
import views
urlpatterns = [
url(r'^$', views.index),
url(r'^login/$', views.login, name='thewall-login'),
url(r'^register/$', views.register, name='thewall-register'),
url(r'^dashboard/$', views.dashboard, name='thewall-dashboard'),
# url(r'^login/$', views.login, name='thewall-login'),
]
|
error = 0
def crear_error():
error = 0
return
def add(a, b):
try:
if(a[1]==b[1]):
x = a[0]+b[0]
y = a[1]
else:
x = a[0]*b[1]+a[1]*b[0]
y = a[1]*b[1]
return (x,y)
except:
error+=1
return(None, None)
def mul(a, b):
try:
x = a[0]*b[0]
y = a[1]*b[1]
return (x,y)
except:
error+=1
return(None, None)
def sub(a, b):
try:
x = a[0]*b[1]-a[1]*b[0]
y = a[1]*b[1]
return (x,y)
except:
error+=1
return(None, None)
def div(a, b):
try:
if(type(a)==int): a=(a,a)
if(type(b)==int): a=(b,b)
x = a[0]*b[1]
y = a[1]*b[0]
return (x,y)
except:
error+=1
return(None, None)
def create(m, n):
if(n == "" or n == 0):
return(None, None)
return (m, n)
def inp():
try:
return create(float(input()), float(input()))
except:
error+=1
return(None, None)
def prt(a):
print ("=====================")
print (int(a[0]))
print ('--')
print (int(a[1]))
return
def _reduce(a):
try:
gcf = 1
for i in range(int(max(a[0], a[1])), 0, -1):
if(a[0]%i==0 and a[1]%i==0):
gcf = i
break
return(a[0]/gcf, a[1]/gcf)
except:
error+=1
return(None, None)
def eq(a, b):
return True if _reduce(a) == _reduce(b) else False
def it(a, b):
return True if _reduce(a) < _reduce(b) else False
|
import cv2
import os
import sys
from string import Template
# first argument is the haarcascades path
face_cascade_path = sys.argv[1]
face_cascade = cv2.CascadeClassifier(os.path.expanduser(face_cascade_path))
scale_factor = 1.1
min_neighbors = 3
min_size = (30, 30)
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
for filename in sys.argv[2:]:
image_path = os.path.expanduser(filename)
image = cv2.imread(image_path)
faces = face_cascade.detectMultiScale(image, scaleFactor = scale_factor, minNeighbors = min_neighbors,
minSize = min_size, flags = flags)
for( x, y, w, h ) in faces:
print filename + ':' + x + ',' + y + ',' + w + ',' + h
|
# Copyright (c) 2021 Qualcomm Technologies, Inc.
# All rights reserved.
import torch
import trimesh
def compute_normals_edges_from_mesh(data):
mesh = trimesh.Trimesh(vertices=data.pos.numpy(), faces=data.face.numpy().T, process=False)
data.normal = torch.tensor(
mesh.vertex_normals.copy(), dtype=data.pos.dtype, device=data.pos.device
)
data.edge_index = torch.tensor(mesh.edges.T.copy(), dtype=torch.long, device=data.pos.device)
return data
|
import numpy as np
from qtpy.QtCore import QPointF, Slot
from pymodaq.daq_utils import daq_utils as utils
from pymodaq.daq_utils.managers.roi_manager import ROIManager
from pymodaq.daq_utils.plotting.items.crosshair import Crosshair
from pymodaq.daq_utils.plotting.items.image import UniformImageItem
class Filter:
def __init__(self):
self._is_active = False
self._slot_to_send_data = None
def register_activation_signal(self, activation_signal):
activation_signal.connect(lambda x: self.set_active(x))
def register_target_slot(self, slot):
self._slot_to_send_data = slot
@Slot(bool)
def set_active(self, activate=True):
self._is_active = activate
def filter_data(self, data: utils.DataFromPlugins):
if self._is_active:
filtered_data = self._filter_data(data)
if filtered_data is not None and self._slot_to_send_data is not None:
self._slot_to_send_data(filtered_data)
def _filter_data(self, data: utils.DataFromPlugins):
raise NotImplementedError
class FilterFromCrosshair(Filter):
def __init__(self, crosshair: Crosshair, graph_items, image_keys):
"""
Extract data along a crosshair using coordinates and data displayed in graph_items such as imageItems
Parameters
----------
crosshair : (Crosshair)
graph_items : (dict)
image_keys : (list) list of string identifier to link datas to their graph_items. This means that in
_filter_data, datas['data'][key] is plotted on graph_items[key] for key in image_keys
"""
super().__init__()
self._graph_items = graph_items
self._image_keys = image_keys
self.crosshair = crosshair
self._x, self._y = 0., 0.
def _filter_data(self, datas: utils.DataFromPlugins):
data_dict = dict([])
if datas is not None:
self._x, self._y = self.crosshair.get_positions()
data_type = datas['distribution']
for data_index in range(len(self._image_keys)):
if data_index < len(datas['data']):
data = datas['data'][data_index]
image_type = self._image_keys[data_index]
if data_type == 'uniform':
data_dict[image_type] = self.get_data_from_uniform(image_type, data)
elif data_type == 'spread':
data_dict[image_type] = self.get_data_from_spread(image_type, data)
return data_dict
def get_data_from_uniform(self, data_key, data):
hor_axis, ver_axis = \
np.linspace(0, self._graph_items[data_key].width() - 1, self._graph_items[data_key].width()),\
np.linspace(0, self._graph_items[data_key].height() - 1, self._graph_items[data_key].height())
indx, indy = self.mapfromview(self._x, self._y, data_key)
data_H_index = slice(None, None, 1)
data_V_index = slice(None, None, 1)
H_indexes = (utils.rint(indy), data_H_index)
V_indexes = (data_V_index, utils.rint(indx))
out_of_bounds = False
if 0 <= H_indexes[0] < len(ver_axis):
hor_data = data[H_indexes]
else:
out_of_bounds = True
hor_data = np.zeros(hor_axis.shape)
if 0 <= V_indexes[1] < len(hor_axis):
ver_data = data[V_indexes]
else:
out_of_bounds = True
ver_data = np.zeros(ver_axis.shape)
if out_of_bounds:
ind_data = 0.
else:
ind_data = data[utils.rint(indy), utils.rint(indx)]
return LineoutData(hor_axis=hor_axis, ver_axis=ver_axis, hor_data=hor_data, ver_data=ver_data,
int_data=ind_data)
def get_data_from_spread(self, data_key, data):
data_H_index = slice(None, None, 1)
data_V_index = slice(None, None, 1)
posx, posy = self.mapfromview(self._x, self._y, data_key)
points, data = self._graph_items[data_key].get_points_at(axis='y', val=posy)
x_sorted_indexes = np.argsort(points[:, 0])
hor_axis = points[x_sorted_indexes, 0][data_H_index]
hor_data = data[x_sorted_indexes][data_H_index]
points, data = self._graph_items[data_key].get_points_at(axis='x', val=posx)
y_sorted_indexes = np.argsort(points[:, 1])
ver_axis = points[y_sorted_indexes, 1][data_V_index]
ver_data = data[y_sorted_indexes][data_V_index]
return LineoutData(hor_axis=hor_axis, ver_axis=ver_axis, hor_data=hor_data, ver_data=ver_data,
int_data=self._graph_items[data_key].get_val_at((posx, posy)))
def mapfromview(self, x, y, item_key='red'):
"""
get item coordinates from view coordinates
Parameters
----------
x: (float) x coordinate in the view reference frame
y: (float) y coordinate in the view refernece frame
Returns
-------
x: (float) coordinate in the item reference frame
y: (float) coordinate in the item reference frame
"""
point = self._graph_items[item_key].mapFromView(QPointF(x, y))
return point.x(), point.y()
class FilterFromRois(Filter):
def __init__(self, roi_manager: ROIManager, graph_item: UniformImageItem, image_keys):
"""
Parameters
----------
roi_manager
graph_item
image_keys : (list) list of string identifier to link datas to their graph_items. This means that in
_filter_data, datas['data'][key] is plotted on graph_items[key] for key in image_keys
"""
super().__init__()
self._roi_settings = roi_manager.settings
self._image_keys = image_keys
self._graph_item = graph_item
self.axes = (0, 1)
self._ROIs = roi_manager.ROIs
def _filter_data(self, datas: utils.DataFromPlugins) -> dict:
data_dict = dict([])
if datas is not None:
for roi_key, roi in self._ROIs.items():
image_key = self._roi_settings.child('ROIs', roi_key, 'use_channel').value()
image_index = self._image_keys.index(image_key)
data_type = datas['distribution']
data = datas['data'][image_index]
data_dict[roi_key] = self.get_xydata_from_roi(data_type, roi, data)
return data_dict
def get_xydata_from_roi(self, data_type, roi, data):
if data is not None:
if data_type == 'spread':
xvals, yvals, data = self.get_xydata_spread(data, roi)
ind_xaxis = np.argsort(xvals)
ind_yaxis = np.argsort(yvals)
xvals = xvals[ind_xaxis]
yvals = yvals[ind_yaxis]
data_H = data[ind_xaxis]
data_V = data[ind_yaxis]
int_data = np.array([np.mean(data)])
else:
xvals, yvals, data = self.get_xydata(data, roi)
data_H = np.mean(data, axis=0)
data_V = np.mean(data, axis=1)
int_data = np.array([np.mean(data)])
return LineoutData(hor_axis=xvals, ver_axis=yvals, hor_data=data_H, ver_data=data_V, int_data=int_data)
def get_xydata(self, data, roi):
data, coords = self.data_from_roi(data, roi)
if data is not None:
xvals = np.linspace(np.min(np.min(coords[1, :, :])), np.max(np.max(coords[1, :, :])),
data.shape[1])
yvals = np.linspace(np.min(np.min(coords[0, :, :])), np.max(np.max(coords[0, :, :])),
data.shape[0])
else:
xvals = yvals = data = np.array([])
return xvals, yvals, data
def data_from_roi(self, data, roi):
data, coords = roi.getArrayRegion(data, self._graph_item, self.axes, returnMappedCoords=True)
return data, coords
def get_xydata_spread(self, data, roi):
xvals = []
yvals = []
data_out = []
for ind in range(data.shape[0]):
# invoke the QPainterpath of the ROI (from the shape method)
if roi.shape().contains(QPointF(data[ind, 0] - roi.pos().x(),
data[ind, 1] - roi.pos().y())):
xvals.append(data[ind, 0])
yvals.append(data[ind, 1])
data_out.append(data[ind, 2])
data_out = np.array(data_out)
xvals = np.array(xvals)
yvals = np.array(yvals)
return xvals, yvals, data_out
class LineoutData:
def __init__(self, hor_axis=np.array([]), ver_axis=np.array([]), hor_data=np.array([]), ver_data=np.array([]),
int_data=None):
super().__init__()
if len(hor_axis) != len(hor_data):
raise ValueError(f'Horizontal lineout data and axis must have the same size')
if len(ver_axis) != len(ver_data):
raise ValueError(f'Horizontal lineout data and axis must have the same size')
self.hor_axis = hor_axis
self.ver_axis = ver_axis
self.hor_data = hor_data
self.ver_data = ver_data
if int_data is None:
self.int_data = np.array([np.sum(self.ver_data)])
else:
self.int_data = int_data |
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
sc = SparkContext('local')
spark = SparkSession(sc)
'''lines = sc.textFile('mapreduce/words.txt')
lines.count()
lines.first()
'''
lines = sc.textFile('mapreduce/words.txt')
lines.count()
lines.first() |
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from appmovie.models import Movie, MovieRaiting
class MovieSerializer(serializers.Serializer):
title= serializers.CharField()
duration_q = serializers.IntegerField()
director = serializers.CharField()
actor = serializers.CharField()
def get_movie_rate(self,obj):
rates=MovieRaiting.objects.filter(movie__pk=obj)
if rates.exists():
return rates.first()['rate']
return ''
class MovieRaitingSerializer(serializers.ModelSerializer):
id = serializers.HyperlinkedIdentityField(view_name='appmovie:drf-movierate-detail')
user = serializers.StringRelatedField()
movie = serializers.HyperlinkedIdentityField(read_only=True,view_name='appmovie:drf-movierate-detail',lookup_field='pk')
class Meta:
model = MovieRaiting
fields = ('movie','user','vote','id')
class MovieRaitingSerializerCreate(serializers.ModelSerializer):
class Meta:
model=MovieRaiting
fields = ('movie','user','vote','comment')
class MovieRaitingSerializerUpdate(serializers.ModelSerializer):
class Meta:
model=MovieRaiting
fields = ('movie','vote','comment')
class MovieRaitingSerializerDelete(serializers.ModelSerializer):
class Meta:
model = MovieRaiting
field = ('id')
class MovieRaitingSerializerAll(serializers.ModelSerializer):
class Meta:
model = MovieRaiting
fields = '__all__'
|
import os.path
def absolute_href(in_href, in_source_file_path, root_path):
# Make sure the in_source_file_path is an absolute path
assert (os.path.abspath(in_source_file_path) == os.path.normpath(in_source_file_path))
# Link is already absolute
if in_href.startswith("/"):
return in_href
# Don't touch external links
# i.e. http:, https:, ftp:, mailto:
if ":" in in_href:
return in_href
# Anchor is in same document
if in_href.startswith("#"):
rel_path = os.path.relpath(in_source_file_path, root_path)
return "/" + rel_path + in_href
# Example:
# in_source_file_path = C:/root/f1/a.html,
# which contains a link <a href="../f2/b.html#anch">.
# so in_href = "../f2/b.html#anch".
#
# Desired root is "C:/root".
# in_source_file_dir = "C:/root/f1"
in_source_file_dir, _ = os.path.split(in_source_file_path)
# href_path = "../f2/b.html", href_anchor = "anch"
if "#" in in_href:
href_path, href_anchor = in_href.split("#")
anchor = "#" + href_anchor
else:
href_path = in_href
anchor = ""
# href_dir = "../f2", href_file = "b.html"
href_dir, href_file = os.path.split(href_path)
# abs_dir = "C:/root/f1/../f2"
abs_dir = os.path.join (in_source_file_dir, href_dir)
# abs_dir = "C:/root/f2"
abs_dir = os.path.normpath(abs_dir)
# abs_path = "C:/root/f2/b.html"
abs_path = os.path.join(abs_dir, href_file)
# abs_path = "C:/root/f2/b.html#anch"
abs_path = abs_path + anchor
# rel_path = "/f2/b.html#anch"
rel_path = "/" + os.path.relpath(abs_path, root_path)
# Filesystem urls on Windows use "\" as path separator
# Web URLs must always use "/"
out_href = rel_path.replace("\\","/")
return out_href
#
# print absolute_href("../f2/b.html#anch","C:/root/f1/a.html", "C:/root") |
#!/usr/bin/env python3
import sys
import os
import random
class FileBuffer:
def __init__(self, f):
self.f = f
self.buf = []
def __call__(self, l):
self.buf.append(l)
if len(self.buf) == 10000 :
for b in self.buf :
print(b, file = self.f)
self.buf = []
def __del__(self):
for b in self.buf :
print(b, file = self.f)
if __name__ == '__main__':
tgt = sys.argv[1]
n = 100
os.system('mkdir %s -p' % tgt)
files = [open(os.path.join(tgt, "%d" % i), 'w') for i in range(n)]
files = [FileBuffer(f) for f in files]
for i, line in enumerate(sys.stdin) :
if i % 10000 == 0:
print(i / 10000, file =sys.stdout, end = '\r')
line = line.rstrip()#.split()
#if len(line) != 2 : continue
#k, v = line
k = line.partition('\t')[0]
i = hash(k) % n
files[i](line)
#print(k, v, sep = '\t', file = files[i])
|
from rest_framework import serializers
from music.models import Claims
from music.models import Messages
from music.models import ChatSession
class ClaimsSerializer(serializers.ModelSerializer):
class Meta:
model = Claims
fields = ("name", "goal", "iam", "lookfor", "lat", "lon", "esttime", "wholikes", "image")
class MessagesSerializer(serializers.ModelSerializer):
class Meta:
model = Messages
fields = ("message", "sender", "receiver", "timestamp", "chatsession")
class ChatSessionSerializer(serializers.ModelSerializer):
class Meta:
model = ChatSession
fields = ("chatsession", "esttime", "user1", "user2")
|
from typing import List
import bisect
class Solution:
def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:
"""
https://leetcode.com/problems/max-sum-of-rectangle-no-larger-than-k/discuss/445540/Python-bisect-solution-(960ms-beat-71.25)
"""
def maxSumSubarray(arr: List[int]):
sub_s_max = float('-inf')
s_curr = 0 # current sum
prefix_sums = [float('inf')]
for x in arr:
bisect.insort(prefix_sums, s_curr)
s_curr += x
i = bisect.bisect_left(prefix_sums, s_curr - k)
sub_s_max = max(sub_s_max, s_curr - prefix_sums[i])
return sub_s_max
m, n = len(matrix), len(matrix[0])
# For each row, calculate the prefix sum.
for x in range(m):
for y in range(n - 1):
matrix[x][y + 1] += matrix[x][y]
res = float('-inf')
# For each pair of columns, calculate the sum of rows.
for y1 in range(n):
for y2 in range(y1, n):
arr = [matrix[x][y2] -
(matrix[x][y1 - 1] if y1 > 0 else 0) for x in range(m)]
res = max(res, maxSumSubarray(arr))
return res
|
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
### Constants #####
Max_puls = 196 #https://www.ntnu.no/cerg/hfmax 16.03.21
Min_puls = 35
N_fft =40248
window = 51
fps = 40
#######
raw_data = np.loadtxt("trans_2_ex/jonas_puls2_1.txt")
#raw_data = np.loadtxt("extracted/jonas4.txt")
#raw_data = np.loadtxt("rob_1_ex/jonas_puls7_2.txt") #
data=[]
for i in range(3):
data.append(raw_data[:,i])
data = signal.detrend(data)
b, a = signal.butter(10, Max_puls/60, 'lp', analog=False, output="ba", fs=40) #filter away any noise outside the desiered range
data = signal.lfilter(b, a, data) # finding the resolution and converting the bit value to the coresponding value in Volts
b, a = signal.butter(10, Min_puls/60, 'hp', analog=False, output="ba", fs=40)# filter away any noise outside the desiered range
data = signal.lfilter(b, a, data) # finding the resolution and converting the bit value to the coresponding value in Volts
red = data[0]
green = data[1]
blue = data[2]
plt.plot(red,color = "r", label = "Red")
plt.plot(green,color = "g",label = "Green")
plt.plot(blue, color = "b", label = "Blue")
plt.legend()
plt.show()
data= data*signal.windows.hamming(len(raw_data))
fft_channel = blue
data_fft = np.fft.fft(fft_channel,len(fft_channel)*6) #Take the fast fourier transform with length 4 times sampling frequency og the complex vector array
data_log = 10*np.log10(np.abs(data_fft)**2)
freq = np.fft.fftfreq(n=round(len(data_log)), d=1/40)
freq_noise = []
data_noise =[]
data_signal =[]
for i in range(len(freq)-1,0,-1):
if freq[i] <= Min_puls/60 or freq[i] >= Max_puls/60:
freq_noise.append(freq[i])
data_noise.append(data_fft[i])
freq = np.delete(freq,i)
data_log = np.delete(data_log,i)
data_signal =np.delete(data_fft,i)
sorted = np.argsort(data_log)
print("######## The 10 frequency with the highest power:########")
for i in range(10):
ind = sorted[-i-1]
print("frequency[HZ]:",freq[ind],"Power[dB]:",data_log[ind],)
print("######################## \n")
plt.plot(freq,data_log)
plt.xlim(Min_puls/60,Max_puls/60)
plt.ylim(0,60)
plt.show()
f_D_index = np.argmax(data_log)
f_D = np.abs(freq[f_D_index])*60
print("Pulse is",f_D,"bpm")
print("######################## \n")
avg_signal = 10**(np.max(data_log)/10)
avg_noise = np.average(data_noise)
SNR=10*np.log(np.abs(avg_signal/avg_noise)**2)
print("SNR:",SNR)
|
'''
Created on Nov 12, 2013
@author: rriehle
'''
#import numpy as np
#import pandas as pd
import datetime as dt
import logging
import pandas as pd
from querystring import GenerateQueryString
from tradeseries import TradeSeries
# from tradeset import TradeSet
def consolidate_tradeset(myts):
'''Return a consolidated, sorted, single Pandas DataFrame from all of the Pandas DataFrames in the TradeSet'''
# df = pd.DataFrame()
# for e in myts:
# logging.debug("Class of e is %s", e.__class__)
# df = pd.concat(e)
dfaggregate = pd.concat(mytradeset)
dfsorted = dfaggregate.sort_index(by='time')
return(dfsorted)
if __name__ == '__main__':
def pylogger():
from logging.handlers import SysLogHandler
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
syslog = SysLogHandler(address="/dev/log")
formatter = logging.Formatter('%(module)s[%(process)d]: %(levelname)s %(message)s')
syslog.setFormatter(formatter)
logger.addHandler(syslog)
return(logger)
logger = pylogger()
logger.info("anius has started")
for myquerystring in GenerateQueryString():
print myquerystring
mytradeseries = TradeSeries(myquerystring)
logging.debug("Class of mytradeseries is %s", mytradeseries.__class__)
logger.info("Length of DataFrame mytradeseries.df is %i", len(mytradeseries.df))
# mytr = mytradeseries.capture_traderun(100,200)
# logging.debug("Type of mytr is %s", mytr.__class__)
mytradeset = set()
for mytraderun in mytradeseries.capture_traderun():
try:
print("len(mytraderun) is ", len(mytraderun))
mytradeset.add(mytraderun)
except:
print("len(mytraderun) is undefined, so we're done!")
break
# mytraderun01 = mytradeseries.capture_traderun(100, 200)
# mytraderun02 = mytradeseries.capture_traderun(800, 900)
# mytradeset = set()
# mytradeset.add(mytraderun01)
# mytradeset.add(mytraderun02)
finaltraderun = consolidate_tradeset(mytradeset)
print(finaltraderun['lasttrd'].describe())
# finaltraderun.lasttrd.plot(use_index=False)
dfuuu = mytradeseries.df[ (mytradeseries.df['trend']==2) & (mytradeseries.df['shorttrend']==2) & (mytradeseries.df['longtrend']==2) ]
criterion = dfuuu['vclose'].map(lambda x: x <= 4.04764039964)
dfuuu[criterion]
# dfuuu = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# dfuu0 = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# dfuud = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# dfu0u = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# dfu00 = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# dfu0d = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# dfudu = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# dfud0 = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# dfudd = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# df0uu = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# df0u0 = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# df0ud = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# df00u = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# df000 = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# df00d = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# df0du = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# df0d0 = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# df0dd = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# dfduu = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==2) ]
# dfdu0 = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==0) ]
# dfdud = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==-2) ]
# dfd0u = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==2) ]
# dfd00 = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==0) ]
# dfd0d = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==-2) ]
# dfddu = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==2) ]
# dfdd0 = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==0) ]
# dfddd = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==-2) ]
# Note that including the screen for the vclose at this level of the program, before iterative processing, is incorrect,
# because it will lead to an artificially high rate of flicker of initial entry conditions and therefore skew results.
# dfuuu = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-4.04764039964) ]
# dfuu0 = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-4.76417749748) ]
# dfuud = df[ (df['trend']==2) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-4.13861925445) ]
# dfu0u = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-4.91537064605) ]
# dfu00 = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-5.44565420654) ]
# dfu0d = df[ (df['trend']==2) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-4.95587748430) ]
# dfudu = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-4.80429611982) ]
# dfud0 = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-4.48430662326) ]
# dfudd = df[ (df['trend']==2) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-5.08143646781) ]
# df0uu = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-6.54130796415) ]
# df0u0 = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-6.98370504851) ]
# df0ud = df[ (df['trend']==0) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-7.12133634292) ]
# df00u = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-7.04157317948) ]
# df000 = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-7.60161035793) ]
# df00d = df[ (df['trend']==0) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-8.16827818869) ]
# df0du = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-6.48816341905) ]
# df0d0 = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-6.82932266486) ]
# df0dd = df[ (df['trend']==0) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-8.07630172755) ]
# dfduu = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==2) & (df['vclose']<=-6.72414808193) ]
# dfdu0 = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==0) & (df['vclose']<=-8.22475973216) ]
# dfdud = df[ (df['trend']==-2) & (df['shorttrend']==2) & (df['longtrend']==-2) & (df['vclose']<=-9.44844736198) ]
# dfd0u = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==2) & (df['vclose']<=-8.90679755796) ]
# dfd00 = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==0) & (df['vclose']<=-9.55531697976) ]
# dfd0d = df[ (df['trend']==-2) & (df['shorttrend']==0) & (df['longtrend']==-2) & (df['vclose']<=-9.28591422926) ]
# dfddu = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==2) & (df['vclose']<=-10.2452175762) ]
# dfdd0 = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==0) & (df['vclose']<=-9.46847620508) ]
# dfddd = df[ (df['trend']==-2) & (df['shorttrend']==-2) & (df['longtrend']==-2) & (df['vclose']<=-11.0170724181) ]
# if len(dfuuu) <> 0:
# print("dfuuu", len(dfuuu))
# dfuuu.lasttrd.plot()
# if len(dfuu0) <> 0:
# print("dfuu0", len(dfuu0))
# dfuu0.lasttrd.plot()
# if len(dfuud) <> 0:
# print("dfuud", len(dfuud))
# dfuud.lasttrd.plot()
# if len(dfu0u) <> 0:
# print("dfu0u", len(dfu0u))
# dfu0u.lasttrd.plot()
# if len(dfu00) <> 0:
# print("dfu00", len(dfu00))
# dfu00.lasttrd.plot()
# if len(dfu0d) <> 0:
# print("dfu00", len(dfu00))
# dfu0d.lasttrd.plot()
# if len(dfudu) <> 0:
# print("dfudu", len(dfudu))
# dfudu.lasttrd.plot()
# if len(dfud0) <> 0:
# print("dfud0", len(dfud0))
# dfud0.lasttrd.plot()
# if len(dfudd) <> 0:
# print("dfudd", len(dfudd))
# dfudd.lasttrd.plot()
# if len(df0uu) <> 0:
# print("df0uu", len(df0uu))
# df0uu.lasttrd.plot()
# if len(df0u0) <> 0:
# print("df0u0", len(df0u0))
# dfu0u.lasttrd.plot()
# if len(df0ud) <> 0:
# print("df0ud", len(df0ud))
# df0ud.lasttrd.plot()
# if len(df00u) <> 0:
# print("df00u", len(df00u))
# df00u.lasttrd.plot()
# if len(df000) <> 0:
# print("df000", len(df000))
# df000.lasttrd.plot()
# if len(df00d) <> 0:
# print("df00d", len(df00d))
# df00d.lasttrd.plot()
# if len(df0du) <> 0:
# print("df0du", len(df0du))
# df0du.lasttrd.plot()
# if len(df0d0) <> 0:
# print("df0d0", len(df0d0))
# df0d0.lasttrd.plot()
# if len(df0dd) <> 0:
# print("df0dd", len(df0dd))
# df0dd.lasttrd.plot()
# if len(dfduu) <> 0:
# print("dfduu", len(dfduu))
# dfduu.lasttrd.plot()
# if len(dfdu0) <> 0:
# print("dfdu0", len(dfdu0))
# dfdu0.lasttrd.plot()
# if len(dfdud) <> 0:
# print("dfdud", len(dfdud))
# dfdud.lasttrd.plot()
# if len(dfd0u) <> 0:
# print("dfd0u", len(dfd0u))
# dfd0u.lasttrd.plot()
# if len(dfd00) <> 0:
# print("dfd00", len(dfd00))
# dfd00.lasttrd.plot()
# if len(dfd0d) <> 0:
# print("dfd0d", len(dfd0d))
# dfd0d.lasttrd.plot()
# if len(dfddu) <> 0:
# print("dfddu", len(dfddu))
# dfddu.lasttrd.plot()
# if len(dfdd0) <> 0:
# print("dfdd0", len(dfdd0))
# dfdd0.lasttrd.plot()
# if len(dfddd) <> 0:
# print("dfddd", len(dfddd))
# dfddd.lasttrd.plot()
# dfuuu.lasttrd.plot(use_index=False)
# dfuu0.lasttrd.plot(use_index=False)
# dfuud.lasttrd.plot(use_index=False)
# dfu0u.lasttrd.plot(use_index=False)
# dfu00.lasttrd.plot(use_index=False)
# dfu0d.lasttrd.plot(use_index=False)
# dfudu.lasttrd.plot(use_index=False)
# dfud0.lasttrd.plot(use_index=False)
# dfudd.lasttrd.plot(use_index=False)
# df0uu.lasttrd.plot(use_index=False)
# df0u0.lasttrd.plot(use_index=False)
# df0ud.lasttrd.plot(use_index=False)
# df00u.lasttrd.plot(use_index=False)
# df000.lasttrd.plot(use_index=False)
# df00d.lasttrd.plot(use_index=False)
# df0du.lasttrd.plot(use_index=False)
# df0d0.lasttrd.plot(use_index=False)
# df0dd.lasttrd.plot(use_index=False)
# dfduu.lasttrd.plot(use_index=False)
# dfdu0.lasttrd.plot(use_index=False)
# dfdud.lasttrd.plot(use_index=False)
# dfd0u.lasttrd.plot(use_index=False)
# dfd00.lasttrd.plot(use_index=False)
# dfd0d.lasttrd.plot(use_index=False)
# dfddu.lasttrd.plot(use_index=False)
# dfdd0.lasttrd.plot(use_index=False)
# dfddd.lasttrd.plot(use_index=False)
# dfuuu.vclose.plot(use_index=False)
# dfuu0.vclose.plot(use_index=False)
# dfuud.vclose.plot(use_index=False)
# dfu0u.vclose.plot(use_index=False)
# dfu00.vclose.plot(use_index=False)
# dfu0d.vclose.plot(use_index=False)
# dfudu.vclose.plot(use_index=False)
# dfud0.vclose.plot(use_index=False)
# dfudd.vclose.plot(use_index=False)
# df0uu.vclose.plot(use_index=False)
# df0u0.vclose.plot(use_index=False)
# df0ud.vclose.plot(use_index=False)
# df00u.vclose.plot(use_index=False)
# df000.vclose.plot(use_index=False)
# df00d.vclose.plot(use_index=False)
# df0du.vclose.plot(use_index=False)
# df0d0.vclose.plot(use_index=False)
# df0dd.vclose.plot(use_index=False)
# dfduu.vclose.plot(use_index=False)
# dfdu0.vclose.plot(use_index=False)
# dfdud.vclose.plot(use_index=False)
# dfd0u.vclose.plot(use_index=False)
# dfd00.vclose.plot(use_index=False)
# dfd0d.vclose.plot(use_index=False)
# dfddu.vclose.plot(use_index=False)
# dfdd0.vclose.plot(use_index=False)
# dfddd.vclose.plot(use_index=False)
logger.info("anius is finished")
|
# encoding: utf-8
from config import PARAMS_TYPE
# 把function的__doc__字符串转换为字典
def trans_str_to_dict(do_str):
result = {"param_explain":{}}
if not do_str:
return result
tem_list = do_str.split('\n')
for x in tem_list:
if ":description" in x:
result["description"] = x.split(":description")[1].strip()
elif ":param" in x:
params = x.split(":param")[1]
if params.strip():
tem = params.split(':')
if len(tem) >= 2 and tem[1].strip().lower() in PARAMS_TYPE:
result[tem[0].strip()] = tem[1].strip()
if len(tem) >= 3 :
result["param_explain"][tem[0].strip()] = tem[2].strip()
elif ":return:" in x:
result["return"] = x.split(":return:")[1].strip()
return result
# 字典b是字典a中的一个value,把字典b中的一个键值对移动到字典a
def dict_move_key(dict_a, dict_b, key):
if key in dict_b:
dict_a[key] = dict_b[key]
dict_b.pop(key)
return dict_a
# 重组接口信息为get_all_api_tem中的数据格式
def compose_api_info(key, api_dict):
tem_res = {}
tem_res["name"] = key
doc_dict = trans_str_to_dict(api_dict[key].__doc__)
tem_res = dict_move_key(tem_res, doc_dict, "description")
tem_res = dict_move_key(tem_res, doc_dict, "return")
tem_res = dict_move_key(tem_res,doc_dict,"param_explain")
tem_res["params"] = doc_dict
return tem_res |
'''
4.3 列表数值练习
'''
'''
4-3 数列20
'''
for number in range(1,21):
print(number)
'''
4-4 一百万
'''
number02 = [n for n in range(1,1000001)]
print(number02)
'''
4-5 计算1~1000000的总和
'''
number = list(range(1,1000001))
print(min(number))
print(max(number))
print(sum(number))
'''
4-6 奇数
'''
ji = list(range(1,21,2))
for jishu in ji:
print(jishu)
'''
4-7 3的倍数
'''
chu3 = list(range(3,31,3))
for yin in chu3:
print(yin)
'''
4-8 立方
'''
# 此处为正常
lifang = []
for value in range(1,11):
lifang.append(value**3)# 创建列表
for i in lifang:
print(i)# 打印数值
# 此处为立方解析
lifang = [value**3 for value in range(1,11)]
for yin in lifang:
print(yin)
'''
4-9 立方解析
'''
lifang = [value**3 for value in range(1,11)]
print(lifang) |
#!/usr/bin/env python3
import sys # imports go at the top of the file
fruits = ["Apples", "Pears", "Oranges", "Peaches"]
prompt = "\n".join(("Welcome to the fruit stand!",
"Please choose from below options:",
"1 - View all fruits",
"2 - Add a fruit",
"3 - View a specific fruit",
"4 - Remove a fruit",
"5 - Mystery Display",
"6 - Execute Fruit List Reversal",
"7 - Exit",
">>> "))
def view_fruits():
item = input("Would you like to display fruits that\n"
"only begin with the letter 'P'? Enter 'Yes'.\n"
"Otherwise Enter 'No' to view ALL Fruits.")
if item.lower() in ("n", "no"):
print("\n".join(fruits))
elif item.lower() in ("y", "yes"):
for fruit in fruits:
if fruit[0].lower() == "p":
print(fruit)
def view_a_fruit():
item = input("Choose a fruit by its Pythonic number: ")
item = int(item)
if -1 < item < len(fruits):
print(item, fruits[item-1])
else:
return prompt
def add_fruit():
new_fruit = input("Name of the fruit to add?").title()
new_location = input("Enter '1' to add fruit to the beginning\
of the list.\n" "Enter '2' to add fruit to the end of the list").title()
if new_location == "2":
fruits.append(new_fruit)
else:
fruits.insert(0, new_fruit)
def remove_fruit():
purge_fruit = input("Name of the fruit to remove?").title()
if purge_fruit not in fruits:
print("This fruit does not exist!")
else:
fruits.remove(purge_fruit)
def mystery_display():
fruit = 0
while fruit < len(fruits):
item = input("Do you like {}? (y/n): ".format(fruits[fruit].lower()))
print(fruit)
while True:
if item.lower() == "y":
fruit = fruit+1
break
if item.lower() == "n":
print('Removing {}'.format(fruits[fruit].lower()))
fruits.remove(fruits[fruit])
print(fruit)
print(len(fruits))
break
else:
item = input("Please answer 'yes' or 'no' only.\n")
print(fruits)
def reverse_fruit():
for fruit in fruits:
transposed_fruits = fruit[::-1]
print(transposed_fruits)
del fruits[-1]
print(fruits)
def exit_program():
print("Bye!")
# exit the interactive script
sys.exit()
def main():
while True:
# continuously collect user selection
response = input(prompt)
# now redirect to feature functions based on the user selection
if response == "1":
view_fruits()
elif response == "2":
add_fruit()
elif response == "3":
view_a_fruit()
elif response == "4":
remove_fruit()
elif response == "5":
mystery_display()
elif response == "6":
reverse_fruit()
elif response.lower == "7" or "Exit":
exit_program()
else:
print("Not a valid option!")
if __name__ == "__main__":
# don't forget this block to guard against your code
# running automatically if this module is imported
main()
|
# Just a Guess Game
import random
print('Hello. What is Your name?')
name = input()
secretNumber = random.randint(1,100)
print('Well, ' + name + ', I am thinking of a number between 1 to 100')
for guessTaken in range(1,10):
print('Take a Guess.')
guess = int(input())
if guess < secretNumber:
print('Your Guess is too low')
elif guess > secretNumber:
print('Your Guess is too high')
else:
break
if guess == secretNumber:
print('Good job, ' + name + ', Congratulations. you only took ' + str(guessTaken) + ' times to guess it!')
else:
print('Nope. The Number I was thinking of was ' + str(secretNumber))
# Did you have fun making it?
|
from flask import render_template, request
from jobs_flask import app
from sqlalchemy import create_engine
from sqlalchemy_utils import database_exists, create_database
import random
import re
import ast
import pandas as pd
import psycopg2
from utilities import remember_viewed_jobs
user = 'ubuntu'
host = 'localhost'
dbname = 'indeed_db'
db = create_engine('postgres://%s%s/%s'%(user,host,dbname))
con = None
con = psycopg2.connect(database = dbname, user = user)
jobs_seen = []
jobkeys_picked = []
@app.route('/')
@app.route('/index')
def jobsearch_index():
#jobs_seen = []
return render_template("input.html")
@app.route('/employers')
def find_candidate_type():
#pull 'job title' from input field and store it
job_description = request.args.get('description')
# PREDICT JOBTITLE FOR THIS TEXT
return render_template("employers.html", job_description = job_description)
@app.route('/input')
def jobsearch_input():
#jobs_seen = []
return render_template("input.html")
@app.route('/output', methods=['POST', 'GET'])
def jobsearch_output():
jobs_seen = []
if request.method == 'GET':
# empty list of jobs seen and jobs picked
jobs_seen = []
jobkeys_picked = []
#pull 'job title' from input field and store it
search_jobtitle = request.args.get('jobtitle')
#pull 'state' from input field and store it
search_loc = request.args.get('state')
#print search_loc
#just select the jobs from the indeed database for the state and jobtitle that the user inputs
query = "SELECT jobkey, jobtitle, url, company, description FROM indeed_jobs_top50 WHERE LOWER(jobtitle) LIKE lower('%s') AND LOWER(state) LIKE lower('%s') ORDER BY RANDOM() LIMIT 2" % (search_jobtitle, search_loc)
if request.method == 'POST':
# pull 'jobs_seen' from button and store it
jobs_seen = request.form['jobs_seen']
jobs_seen = ast.literal_eval(jobs_seen)
#pull 'jobkey_choice' from button and store it
jobkey_choice = request.form['jobkey_choice']
try:
jobkeys_picked.append(jobkey_choice)
except:
jobkeys_picked = [jobkey_choice]
print jobkey_choice
#get text for jobkey_choice
try:
query_choice = "SELECT tf_lsi15_top50 FROM indeed_jobs_top50 WHERE jobkey = '%s'" % jobkey_choice
query_result = pd.read_sql_query(query_choice,con)
print "Got top-50 for selected job"
top_50_similar = re.findall("\(\'([a-zA-Z0-9]+)\'\,", query_result.iloc[0]['tf_lsi15_top50'] )#.replace("{", "").replace("}","")
print "top 5:", top_50_similar[:5]
#print "Jobs seen:", len(jobs_seen)
try:
top_50_unseen = [job for job in top_50_similar if job not in jobs_seen]
except:
top_50_unseen = top_50_similar
print "Length", len(top_50_unseen)
print "Removed jobs in history from top 50"
#then pick the 2 most similar that are not in jobs_seen
jobkeys_similar = random.sample(top_50_unseen, 2)
print "Similar jobkeys to pick:", jobkeys_similar[0], jobkeys_similar[1]
#select these two jobs from the database
query = "SELECT jobkey, jobtitle, url, company, description FROM indeed_jobs_top50 WHERE jobkey = '%s' OR jobkey = '%s' LIMIT 2" % (jobkeys_similar[0], jobkeys_similar[1])
except:
print "One of the queries didn't work"
return render_template("input.html") #, jobs_seen = jobs_seen)
try:
#print query
query_results=pd.read_sql_query(query,con)
print "Query successful"
jobs = []
for i in range(0,2): #query_results.shape[0]):
try:
jobs_seen.append({'jobkey': query_results.iloc[i]['jobkey'], 'url': query_results.iloc[i]['url'], 'jobtitle': query_results.iloc[i]['jobtitle'], 'company': query_results.iloc[i]['company']})
except:
print "Error with appending jobs seen"
# jobs_seen = [{'jobkey': query_results.iloc[i]['jobkey'], 'url': query_results.iloc[i]['url'], 'jobtitle': query_results.iloc[i]['jobtitle'], 'company': query_results.iloc[i]['company']}]
print "Appended jobs seen", len(jobs_seen)
jobs.append(dict(jobkey=query_results.iloc[i]['jobkey'], jobtitle=query_results.iloc[i]['jobtitle'], url=query_results.iloc[i]['url'], company=query_results.iloc[i]['company'], description=query_results.iloc[i]['description']))
# Remove duplicates from jobs_seen list
#jobs_seen = list(set(jobs_seen))
jobs_seen_str = str(jobs_seen).replace(" ", "")
return render_template("output.html", jobs = jobs, jobs_seen = jobs_seen, jobs_seen_str = jobs_seen_str)
except:
print "Query unsuccesful"
return render_template("input.html") |
# Imports ###########################################################
import logging
from django.conf import settings
from django.http import HttpResponse
from django.views.generic.base import View
from utils.json import JSONResponseMixin
# Logging ###########################################################
logger = logging.getLogger(__name__)
# Exceptions ########################################################
class ErrorResponse(Exception):
"""
Raised from an APIView method to trigger returning a formatted JSON error response
"""
@property
def description(self):
return u'; '.join(self.args)
# Views #############################################################
class APIView(JSONResponseMixin, View):
"""
Provides common handling of API view classes:
- Converts response to JSON
- Error handling & formatting
"""
def dispatch(self, request, *args, **kwargs):
try:
return self.dispatch_progressive(request, *args, **kwargs)
except ErrorResponse as e:
return self.handle_error(request, e)
except Exception as e:
if settings.DEBUG:
raise
else:
# TODO LOCAL
e.description = u'Sorry! An error has occurred.'
logger.exception(e)
return self.handle_error(request, e)
def dispatch_progressive(self, request, *args, **kwargs):
"""
Uses special self.progressive_<httpverb>() methods when defined, otherwise
use the normal self.<httpverb>() method
When defined, the progressive version is expected to return a generator, which
will be used to generate *both* the progressive response and the continuous
one. It should yield values progressively as the response context is being
generated.
"""
if request.method.lower() not in self.http_method_names:
return self.http_method_not_allowed(request, *args, **kwargs)
progressive_handler = getattr(self, 'progressive_{0}'.format(request.method.lower()))
progressive_requested = request.REQUEST.get('progressive')
if progressive_handler:
if progressive_requested:
return self.handle_progressive(progressive_handler, request)
else:
return self.handle_noprogressive_emulate(progressive_handler, request)
else:
if progressive_requested:
return self.http_method_not_allowed(request, *args, **kwargs)
else:
return super(APIView, self).dispatch(request, *args, **kwargs)
def handle_noprogressive_emulate(self, progressive_handler, request):
"""
Emulated non-progressive response - iterates over a progressive generator,
only returning its last answer
Since raising an exception from an iterator stops it, the handler is responsible
for catching exceptions & formatting them as one of the responses
"""
response = None
for progressive_response in progressive_handler(request, progressive=False):
response = progressive_response
return response
def handle_progressive(self, progressive_handler, request):
"""
Progressive answer - iterates over a progressive generator, sending responses
to the browser progressively as they are yielded
Since raising an exception from an iterator stops it, the handler is responsible
for catching exceptions & formatting them as one of the responses
"""
response = HttpResponse(progressive_handler(request, progressive=True),
content_type='application/json')
response['X-Progressive-Response-Separator'] = settings.PROGRESSIVE_RESPONSE_SEPARATOR
return response
|
# -*- coding: utf-8 -*-
import re
import datetime
import random
from flask import render_template, flash, redirect, session, url_for, request, g, jsonify, make_response
from flask.ext.babel import gettext
from app import app, db, babel
from config import COMMUNICATIONS, LANGUAGES, CURRENCIES, IsDebug, IsDeepDebug, default_communication, valid_action_types, n_a, \
getCountryID, getRegionID, getUserID, \
parseServiceId, getRegion, getCountry, getClient, isAuthorized, setReference, errorlog, demo, \
print_exception, print_action, print_to, \
UTC_EASY_TIMESTAMP
from wizard import set_platform, getPage, IsMobile, IsAndroid, IsiOS, IsiPad, IsIE, IsMSIE, IsSeaMonkey, \
IsChrome, IsFirefox, IsSafari, IsOpera
from exchange import data_encode, send, receive, getURLInfo, getXml, getDOMItemValue, getDOMErrors, \
cleanHtml, getRequestXML
from dbase import register, getLogVisibleHeaders, getLogImages, getLogTotal, getLogPage, getLogItem, \
getStatisticsVisibleHeaders, getStatictics, removeLogItem
from version import get_version
##
def getTime():
return datetime.datetime.now().strftime(UTC_EASY_TIMESTAMP)
def _validate(key, value):
if key == 'locale':
return value and value in LANGUAGES and value or 'rus'
return value
@babel.localeselector
def get_locale():
key = _validate('locale', request.args.get('locale'))
return LANGUAGES.has_key(key) and LANGUAGES.get(key)[0] or DEFAUL_LANGUAGE
def before(f):
def wrapper(mode, **kw):
set_platform(agent=request.user_agent)
if not (request and request.user_agent) or IsMSIE():
return make_response(render_template('noie.html'))
return f(mode, **kw)
return wrapper
@app.route('/', methods = ['GET', 'POST'])
@app.route('/index', methods = ['GET', 'POST'])
def _ext():
document = request.form.get('documentID') or ''
country = request.form.get('countryID') or ''
region = request.form.get('regionID') or ''
if region:
region = parseServiceId(region)
user = request.form.get('userName')
return index_with_logger(COMMUNICATIONS['external'], document, region, country, user)
@app.route('/index.html', methods = ['GET'])
def _int():
return index_with_logger(COMMUNICATIONS['internal'])
def index_with_logger(mode, document=None, region=None, country=None, user=None):
try:
return index(mode, document=document, region=region, country=country, user=user)
except:
print_exception()
raise
def _make_platform(wizard, locale, user, debug=None):
agent = request.user_agent
browser = agent.browser
os = agent.platform
root = '%s/' % request.script_root
referer = request.form.get('helperHttpReferer') or request.form.get('refererURI') or request.form.get('helperURI') or ''
close = request.form.get('closeURI') or request.form.get('refererURI') or request.form.get('helperURI') or ''
links = {
'calculate' : '/?wizard=%s&locale=%s' % (wizard, locale),
'cancel' : '/?wizard=%s&locale=%s' % (wizard, locale),
'referer' : referer,
'close' : close,
}
if IsIE():
version = agent.version.split('.')[0]
if version < '6':
version = '6'
elif IsSeaMonkey():
version = ''
else:
version = agent.version
authorized = isAuthorized(user)
css = '%s' % ( \
IsMobile() and (IsChrome() and '.android.' or '.mobile.') or
IsIE(10) and '.ie10.' or
IsOpera() and '.opera.' or
IsFirefox() and '.firefox.' or
'.'
)
is_default = 1 or os in ('ipad', 'android',) and browser in ('safari', 'chrome',) and 1 or 0
is_frame = not IsMobile() and 1 or 0
platform = '[os:%s, browser:%s (%s), css:%s, %s %s%s%s%s]' % ( \
os,
browser,
version,
css,
locale,
authorized and ' authorized' or '',
is_default and ' default' or ' flex',
is_frame and ' frame' or '',
debug and ' debug' or '',
)
kw = { \
'agent' : agent.string,
'browser' : browser,
'os' : os,
'root' : root,
'referer' : referer,
'links' : links,
'version' : version,
'authorized' : authorized,
'css' : css,
'is_frame' : is_frame,
'platform' : platform,
'style' : { 'default' : is_default },
'screen' : request.form.get('screen') or '',
'scale' : request.form.get('scale') or '',
}
return kw
def _make_keywords():
return ( \
# --------------
# Error Messages
# --------------
"'Execution error':'%s'" % gettext('Execution error'),
"'Missing parameters':'%s'" % gettext('Any parameters should be selected before the culculating.'),
"'Session is expired':'%s'" % ( \
gettext('Session is expired notification')+
'<br>'+
gettext('You will be redirected to the passage of the authorization procedure.')
),
"'System is not available':'%s'" % ( \
gettext('System is not available now.')+
'<br>'+
gettext('You will be redirected to the passage of the authorization procedure.')+
'<br><br>'+
gettext('We apologize for any inconvenience.')
),
# -------
# Buttons
# -------
"'Calculate':'%s'" % gettext('Calculate'),
"'Cancel':'%s'" % gettext('Cancel'),
"'Confirm':'%s'" % gettext('Confirm'),
"'Reject':'%s'" % gettext('Decline'),
"'OK':'%s'" % gettext('OK'),
# ----
# Help
# ----
"'All':'%s'" % gettext('All'),
"'Base article':'%s'" % gettext('Base article'),
"'Calculating autoupdate':'%s'" % gettext('Automatically update the calculations'),
"'Calculate the cost':'%s'" % gettext('Calculate the cost'),
"'Calculator':'%s'" % gettext('Calculator'),
"'Cancel search':'%s'" % gettext('Cancel search'),
"'Caption':'%s'" % gettext('Caption'),
"'Close':'%s'" % gettext('Close'),
"'Close page':'%s'" % gettext('Close page'),
"'Code':'%s'" % gettext('Code'),
"'Commands':'%s'" % gettext('Commands'),
"'Complete name':'%s'" % gettext("Complete name"),
"'Collapse form groups':'%s'" % gettext('Collapse form groups'),
"'Custom':'%s'" % gettext('Custom'),
"'Custom code':'%s'" % gettext('Custom code'),
"'Data not found!':'%s'" % gettext('Data not found!'),
"'Done':'%s'" % gettext('Done'),
"'Go back':'%s'" % gettext('Go back'),
"'Help':'%s'" % gettext('Help'),
"'Help information':'%s'" % gettext('Help information'),
"'Helper keypress guide':'%s'" % gettext('Helper keypress guide'),
"'Expand form groups':'%s'" % gettext('Expand form groups'),
"'Forward search':'%s'" % gettext('Forward search'),
"'Full screen':'%s'" % gettext('Full screen'),
"'Icon Toolbar':'%s'" % gettext('Icon Toolbar'),
"'Load selected order':'%s'" % gettext('Load selected order'),
"'Make an order':'%s'" % gettext('Make an order'),
"'Monitor price changes':'%s'" % gettext('Monitor price changes'),
"'No data...':'%s'" % gettext('No data...'),
"'Open Calculate form':'%s'" % gettext('Open Calculate form'),
"'Open Log page':'%s'" % gettext('Open Log page'),
"'Open Order form':'%s'" % gettext('Open Order form'),
"'Open Price statistics':'%s'" % gettext('Open Price statistics'),
"'Open Save form':'%s'" % gettext('Open Save form'),
"'Open Search panel':'%s'" % gettext('Open Search panel'),
"'Options':'%s'" % gettext('Options'),
"'Order confirmation form':'%s'" % gettext('Order confirmation form'),
"'Order log':'%s'" % gettext('Order log'),
"'Order type':'%s'" % gettext('Order type'),
"'Price article':'%s'" % gettext('Price article'),
"'Save confirmation form':'%s'" % gettext('Save confirmation form'),
"'Search':'%s'" % gettext('Search'),
"'Search context':'%s...'" % gettext('Search context'),
"'Search Icon buttons':'%s'" % gettext('Search Icon buttons'),
"'Standard':'%s'" % gettext('Standard'),
"'System information':'%s'" % gettext('System information'),
"'Total':'%s'" % gettext('Total'),
# --------------------
# Flags & Simple Items
# --------------------
"'error':'%s'" % gettext('error'),
"'yes':'%s'" % gettext('Yes'),
"'no':'%s'" % gettext('No'),
"'n_a':'%s'" % n_a,
"'true':'%s'" % 'true',
"'false':'%s'" % 'false',
# ------------------------
# Miscellaneous Dictionary
# ------------------------
"'confirmation request':'%s'" % gettext("Type the following operation result:"),
"'cost form':'%s'" % gettext('Cost notification form'),
"'cost':'%s'" % gettext('The cost'),
"'client':'%s'" % gettext('Client'),
"'':'%s'" % gettext(''),
"'estimated cost':'%s'" % gettext("The cost is estimated."),
"'not calculated':'%s'" % gettext('not calculated'),
"'not found':'%s'" % gettext('not found'),
"'no client':'%s'" % gettext('unauthorized client'),
"'order form':'%s'" % gettext('Order notification form'),
"'order confirmation':'%s'" % gettext('Are you really going to create an order?'),
"'ordered success':'%s'" % gettext('Successfully created.'),
"'ordered fail':'%s'" % gettext("The order was not created by the error."),
"'please confirm':'%s.'" % gettext('Please, confirm'),
"'product version':'%s'" % gettext('Product Version'),
"'save form':'%s'" % gettext('Save notification form'),
"'save confirmation':'%s'" % gettext('Are you really going to save changes of the order?'),
"'saved success':'%s'" % gettext('Successfully saved.'),
"'saved fail':'%s'" % gettext("The order was not saved by the error."),
"'shortcut version':'%s'" % get_version('shortcut'),
)
def _make_exchange(mode, locale, country, region, user, document=None):
return { \
'communication' : request.form.get('helperCommunicationType', mode) or default_communication,
'sessionID' : data_encode(request.form.get('sessionID') or ''),
'securityID' : data_encode(request.form.get('securityID') or ''),
'userID' : getUserID(request.form.get('userID')),
'userTypeID' : request.form.get('userTypeID', ''),
'userLogin' : request.form.get('userLogin', ''),
'userPswd' : request.form.get('userPswd', ''),
'httpHost' : request.form.get('httpHost', ''),
'webResource' : request.form.get('webResource', ''),
'documentID' : document or n_a,
'countryID' : country or n_a,
'regionID' : region or n_a,
'WebServiceURL' : request.form.get('WebServiceURL', ''),
'currencyID' : request.form.get('currencyID', ''),
'priceTypeID' : request.form.get('priceTypeID', ''),
'countryName' : request.form.get('countryName') or getCountry(country),
'regionName' : request.form.get('regionName') or getRegion(region),
'clientName' : getClient(user, locale),
}
def _make_page(wizard, locale, init_only=False):
if not wizard:
flash('Note! The argument "wizard" should be presented.')
page = None
else:
page = getPage(wizard, locale, agent=request.user_agent, init_only=init_only, keywords={ \
'Yes' : gettext('Yes'),
'Count' : gettext('Count'),
'Value' : '...',
})
return page
@before
def index(mode, **kw):
host = request.form.get('host') or request.form.get('helperURI') or request.host_url
debug = request.args.get('debug') == '1' and True or False
demo(request.args.get('demo'), request=request)
setReference(host, debug)
wizard = request.args.get('wizard', 'BillGateDUS120') #'StreetGateDUS210'
locale = _validate('locale', request.args.get('locale'))
country = kw.get('country')
region = kw.get('region')
user = kw.get('user')
document = kw.get('document')
exchange = _make_exchange(mode, locale, country, region, user, document)
page = _make_page(wizard, locale, init_only=False)
kw = _make_platform(wizard, locale, user, debug)
kw['extra'] = { \
'selected_action' : request.form.get('selected_action') or 'null',
'selected_item' : request.form.get('selected_item') or 'null',
}
forms = ('index', 'log',)
keywords = _make_keywords()
session['communication'] = exchange['communication']
session['session'] = exchange['sessionID']
session['security'] = exchange['securityID']
session['browser'] = kw['browser']
session['locale'] = locale
session['wizard'] = wizard
session['name'] = page and page.get_title(False) or ''
kw.update({ \
'title' : gettext('Helper Configurator Main Page'),
'host' : host,
'wizard' : wizard,
'locale' : locale,
'exchange' : exchange,
'keywords' : keywords,
'forms' : forms,
'page' : page,
})
if IsDebug:
print '$$$'
print '%s: %s %s %s %s session:[%s]' % (getTime(), host, wizard, locale, exchange['clientName'], exchange['sessionID'])
print '%s: %s %s communication:[%s]' % (getTime(), kw['browser'], kw['agent'], exchange['communication'])
print '%s: %s demo:%s' % (getTime(), kw['platform'], demo())
return respond(template='index.html', debug=debug, **kw)
@app.route('/log', methods = ['GET', 'POST'])
def log_with_logger():
if not request.referrer:
return redirect(url_for('_ext', _method='GET', **request.args)) # values , _external=True
try:
return log(0)
except:
print_exception()
raise
@before
def log(mode, **kw):
host = request.form.get('host') or request.form.get('helperURI') or request.host_url
debug = request.args.get('debug') == '1' and True or False
demo(request.args.get('demo'), request=request)
setReference(host, debug)
mode = request.form.get('helperCommunicationType', session.get('communication'))
wizard = request.args.get('wizard', request.form.get('wizardID', session.get('wizard')))
locale = _validate('locale', request.form.get('currentLocale', session.get('locale')))
country = request.form.get('countryID', n_a)
region = request.form.get('regionID', n_a)
user = request.form.get('userID', n_a)
exchange = _make_exchange(mode, locale, country, region, user=request.form.get('clientName', n_a))
exchange['wizard'] = wizard
page = _make_page(wizard, locale, init_only=True)
kw = _make_platform(wizard, locale, user, debug)
kw['links']['close'] = re.sub(r'index|log(?i)', '', request.referrer or request.url)
kw['extra'] = { \
'log_headers' : getLogVisibleHeaders(),
'statistics_headers' : getStatisticsVisibleHeaders(),
'images' : getLogImages(),
}
forms = ('load', 'search',)
keywords = _make_keywords()
kw.update({ \
'title' : gettext('Helper Configurator Order Log Page'),
'host' : host,
'wizard' : wizard,
'locale' : locale,
'exchange' : exchange,
'keywords' : keywords,
'forms' : forms,
'page' : page,
})
return respond('log.html', debug, **kw)
def respond(template='', debug=None, **kw):
locale = kw.get('locale', '')
host = kw.get('host', '')
exchange = kw.get('exchange', {})
keywords = kw.get('keywords', {})
page = kw.get('page', None)
response = make_response(render_template(template, \
# ------------------
# Communication Mode
# ------------------
authorized = kw['authorized'],
external = exchange['communication'] == COMMUNICATIONS['external'],
internal = exchange['communication'] == COMMUNICATIONS['internal'],
exchange = exchange,
language = get_locale(),
# -------------
# Client System
# -------------
agent = kw['agent'],
platform = kw['platform'],
locale = locale,
host = host,
os = kw['os'],
browser = IsIE() and 'ie' or kw['browser'],
version = request.args.get('_v', '') or kw['version'] or '',
css = kw['css'],
screen = kw['screen'],
scale = kw['scale'],
style = kw['style'],
is_frame = kw['is_frame'],
is_demo = demo(),
# -----------------
# Page & Properties
# -----------------
title = kw.get('title'),
name = page and page.get_title() or gettext('Helper Configurator Main Page'),
article = page and page.get_article() or '',
image = page and page.get_image() or '',
root = kw['root'],
loader = '/loader?locale=%s%s' % (locale, debug and '&debug=1' or ''),
referer = kw['referer'],
base = page and page.get_base() or '',
uri = page and page.get_uri() or {},
forms = kw['forms'],
document = '',
price = gettext(n_a),
links = kw['links'],
keywords = keywords,
page = page,
# -------------
# Extra entries
# -------------
extra = kw.get('extra', None),
))
#response.headers['content-length'] = str(len(response.data)+419)
return response
@app.route('/loader', methods = ['GET', 'POST'])
def loader_with_logger():
#
# Codes of action:
# 100 - confirmation for an order
# 203 - base calculation
# 204 - custom calculation
# 205 - save of state
# 206 - finalize (internal)
# 207 - order
# 208 - close of session
# 209 - cancel (Flash only)
# 210 - continue (Flash only)
# 301 - load log page
# 302 - load selected log item content
# 303 - load and refresh saved calculation
# 304 - custom calculation (log+db)
# 305 - price statistics
# 307 - order (log+db)
# 308 - remove log item
#
try:
return loader()
except:
#traceback.print_exc(file=open(errorlog, 'a'))
print_exception()
raise
def loader():
if not request.form.get('wizardID'):
return ''
host = request.form.get('host')
debug = request.args.get('debug') == '1' and True or False
demo(request.form.get('demo'), request=request)
setReference(host, debug)
if IsDebug:
start = datetime.datetime.now()
print '--> Started at %s' % start.strftime('%H:%M:%S:%f')
wizard = request.form.get('wizardID') or session.get('wizard')
action = request.form.get('action') or '204'
check = request.form.get('check') or ''
communication = request.form.get('helperCommunicationType') or default_communication
locale = request.form.get('currentUsedLocalization') or session.get('locale')
exchange_error = 0
if IsDebug:
print '--> action: [%s]' % action
print '--> communication: [%s]' % communication
print '--> demo: [%s]' % demo()
print '--> host: [%s]' % host
print '--> wizard: [%s]' % wizard
response = {}
if not (action and action in valid_action_types):
return ''
IsChecked = True
if not isAuthorized(request.form.get('userID')):
exchange_error = -4
exchange_message = gettext('Sorry, you are not authorized.')
IsChecked = False
if IsChecked and action in ('207','307') and demo() and not _check(wizard, check):
exchange_error = -5
exchange_message = gettext('Sorry, the confirmation code is invalid.')+'<br>'+gettext('Please, try more.')
IsChecked = False
if action in ('301','302','303','304','305','307','308',):
if IsChecked:
# -------------------------------------------------------------------------------------
# Get Data from DB (XML:data, DOM:dom, parsed XML-Items:items or Log-page content:data)
# -------------------------------------------------------------------------------------
response = respond_log(action)
if not (response and response.get('data')):
pass
elif action in ('303','304','307',):
requested_action = str(int(action)-100)
# --------------------------------------------------
# Get XML (update tags for a new request to Service)
# --------------------------------------------------
check, data = getRequestXML(requested_action, request, session, data=response.get('items'), dom=response.get('dom'),
title=response.get('title'), page='log',
url=host)
# ----------------------------------------
# Send request to Service and get response
# ----------------------------------------
response = respond_external(requested_action, data=data, id=response.get('id'))
response['action'] = action
# -------------------------------------------------------------
# Get Data Dictionary (!!!) from response to send to Controller
# -------------------------------------------------------------
if action == '303':
dom, data = receive(action, request, session, data=response.get('data'))
response['data'] = data
response['dom'] = None
response['items'] = None
else:
# --------------------------------------
# Remove DOM from response to Controller
# --------------------------------------
response['dom'] = None
response['items'] = None
elif communication == COMMUNICATIONS['external'] or not communication:
if IsDebug:
print '--> check: [%s] = %s' % (check, IsChecked)
if action == '100':
# -------------------
# Validate the action
# -------------------
if not demo():
response = {'x1' : None, 'x2' : None, 'op' : None}
else:
response = _set_checker(wizard)
elif action in ('203','204','205',):
# ---------------------------------------------------------------
# Send External request to Service and get response (calculation)
# ---------------------------------------------------------------
response = respond_external(action)
elif action in ('207',) and IsChecked:
# ---------------------------------------------------------
# Send External request to Service and get response (order)
# ---------------------------------------------------------
response = respond_external(action)
if response:
response['data'] = ''
elif communication == COMMUNICATIONS['internal']:
if action == '100':
# -------------------
# Validate the action
# -------------------
response = {'x1' : None, 'x2' : None, 'op' : None}
elif action in ('203','204','205','206',):
# -------------------------------------------------
# Send Internal request to Service and get response
# -------------------------------------------------
response = respond_internal(action)
if not response:
response = { \
'action' : action,
'op' : '',
# -------------------------------
# Not authorized or check invalid
# -------------------------------
'exchange_error' : exchange_error,
'exchange_message' : exchange_message,
'error_code' : '',
'error_description' : '',
'errors' : '',
# -----------------
# Results (no data)
# -----------------
'price' : '',
'data' : '',
}
if IsDebug:
finish = datetime.datetime.now()
t = finish - start
print '--> Finished at %s' % finish.strftime('%H:%M:%S:%f')
print '--> Spent time: %s sec' % ((t.seconds*1000000+t.microseconds)/1000000)
return jsonify(response)
def _random(min, max):
return ('00'+str(random.randint(min, max)))[-2:]
def _set_checker(wizard):
_min = 1
_max = 20
x1 = _random(_min, _max)
x2 = -1
while x2 == -1 or x2 == x1:
x2 = _random(_min, _max)
x1, x2 = (max(x1, x2), min(x1, x2))
op = random.randint(0, 1) and '+' or '-'
checker_id = 'checker:%s' % wizard
checker_value = '%s:%s:%s' % (x1, x2, op)
if IsDebug:
print '--> set session checker: [%s, %s]' % (checker_id, checker_value)
session[checker_id] = checker_value
return { \
'x1' : x1,
'x2' : x2,
'op' : op,
}
def _check(wizard, value):
if len(value) == 0:
return False
checker_id = 'checker:%s' % wizard
checker_value = session.get(checker_id)
if IsDebug:
print '--> get session checker: [%s, %s]' % (checker_id, checker_value)
try:
x1, x2, op = checker_value.split(':')
x1 = int(x1)
x2 = int(x2)
result = int(value)
except:
return False
if op == '+':
return x1+x2 == result and True or False
if op == '-':
return x1-x2 == result and True or False
return False
def _demo_price(action):
return 0, '', float(1000)*random.random(), 'USD'
def _demo_order(action):
return action == '207' and ('ABC' + str(int(float(1000000)*random.random())), getTime(), ) or ('', '',)
def _order(dom):
number = getDOMItemValue(dom, 'documentNumber')
date = getDOMItemValue(dom, 'documentDate')
if IsDebug:
print '--> Number: [%s]' % number
print '--> Date: [%s]' % date
return { 'number' : number, 'date' : date, }
def respond_internal(action, **kw):
exchange_error = 0
exchange_message = ''
error_code = ''
error_description = ''
response = {}
dom = None
data = ''
currency = ''
order_number = ''
order_date = ''
print_action(action, 'Respond.Internal')
op = request.form.get('op') or kw.get('op')
if IsDebug:
print '--> op: [%s]' % op
if not op:
pass
elif op == 'get':
# --------------------------------------------------------------
# Generate and Send XML to Service (from WEB-form to JavaScript)
# --------------------------------------------------------------
data = getXml(action, request, session)
errors = []
elif op == 'set':
# -----------------------------------------------
# Receive XML from Service (loaded by JavaScript)
# -----------------------------------------------
dom, data = receive(action, request, session, **kw)
if demo():
exchange_error, exchange_message, total, currency = _demo_price(action)
order_number, order_date = _demo_order(action)
else:
total = float(getDOMItemValue(dom, 'total'))
currency = CURRENCIES.get(getDOMItemValue(dom, 'currency'), gettext('undefined'))
error_code = getDOMItemValue(dom, 'errorCode').strip()
error_description = getDOMItemValue(dom, 'errorDescription')
errors = getDOMErrors(dom) or ''
if IsDebug:
print '--> Total: %s %s' % (total, currency)
total = '%.2f' % total or ''
price = '%s %s' % (total, currency) or ''
return { \
'action' : action,
'op' : op,
# --------------
# Service Errors
# --------------
'exchange_error' : exchange_error,
'exchange_message' : exchange_message,
'error_code' : error_code,
'error_description' : error_description,
'errors' : errors,
# ---
# IDs
# ---
'countryID' : getDOMItemValue(dom, 'countryID') or '',
'regionID' : getDOMItemValue(dom, 'regionID') or '',
'userID' : getUserID(getDOMItemValue(dom, 'userID')),
# --------------
# Client Details
# --------------
'country_name' : getDOMItemValue(dom, 'countryName') or getCountry(getDOMItemValue(dom, 'countryID')),
'region_name' : getDOMItemValue(dom, 'regionName') or getRegion(getDOMItemValue(dom, 'regionID')),
'client_name' : getClient(getDOMItemValue(dom, 'userName')),
# ------------------------------
# Results (Price & XML-Response)
# ------------------------------
'price' : price,
'data' : data,
}
def respond_external(action, **kw):
exchange_error = 0
exchange_message = ''
error_code = ''
error_description = ''
errors = ''
response = {}
dom = None
data = ''
total = ''
price = ''
currency = ''
document = ''
order_number = ''
order_date = ''
print_action(action, 'Respond.External')
locale = request.form.get('currentUsedLocalization') or ''
wizard = request.form.get('wizardID') or ''
try:
# -------------------------------------
# Get DOM and XML response from Service
# -------------------------------------
exchange_error, exchange_message, dom, data = send(action, request, session, **kw)
if demo():
exchange_error, exchange_message, total, currency = _demo_price(action)
order_number, order_date = _demo_order(action)
elif exchange_error:
total = 0.0
elif dom is not None:
total = float(getDOMItemValue(dom, 'total') or '0')
currency = CURRENCIES.get(getDOMItemValue(dom, 'currency'), gettext('undefined'))
error_code = getDOMItemValue(dom, 'errorCode').strip()
error_description = getDOMItemValue(dom, 'errorDescription')
if action == '207':
order = _order(dom)
order_number = order.get('number', '')
order_date = order.get('date', '')
elif data:
x = request.form.get('price')
total = x and float(x.split()[0])*1.288726 or 0
except:
msg = '--> Send error!'
print_to(errorlog, [msg, data], request=request)
# ----------------------
# Service Exchange Error
# ----------------------
if IsDeepDebug:
print msg
raise
#print_to(errorlog, ['>>> Data:', data])
IsValid = data and True or False
if exchange_message and exchange_message == exchange_error:
exchange_message = ''
if error_description and error_description == error_code:
error_description = ''
# -----------------
# Response is valid
# -----------------
if IsValid:
errors = getDOMErrors(dom) or ''
if IsDeepDebug:
print errors
if currency in ('undefined', n_a, '') or not total:
if not exchange_message:
if action == '203' and error_code in ('', '0',):
pass
else:
exchange_message = gettext('Calculation is not performed.')
IsValid = False
total = IsValid and '%.2f' % total or ''
price = IsValid and '%s %s' % (total, currency) or ''
if IsDebug:
print '--> Total: %s' % price
document = order_number and ('# %s %s %s' % (order_number, gettext('at'), order_date)) or ''
# -------------------------------------------
# Make parameters and Register response in DB
# -------------------------------------------
attrs = { \
'locale' : locale,
'selected_item' : kw.get('id') or request.form.get('selected_item'),
'title' : request.form.get('title') or '',
'document' : document or action,
'total' : total,
'currency' : currency,
'countryID' : getCountryID(getDOMItemValue(dom, 'countryID'), locale),
'regionID' : getRegionID(getDOMItemValue(dom, 'regionID'), locale),
'userID' : getUserID(getDOMItemValue(dom, 'userID'), locale),
'userName' : getClient(getDOMItemValue(dom, 'userName'), locale),
'wizardID' : wizard,
'wizardName' : request.form.get('wizardName') or '',
'custom_code' : request.form.get('custom_code') or '',
'option_update' : request.form.get('option_update') or '',
'option_cost' : request.form.get('option_cost') or '',
'data' : data, #getDOMTagStrippedValue(dom, 'parameters'),
}
#print_to(errorlog, ['>>> Total:', total])
if IsValid and action in ('203','204','205','207',): # and dom
response = register(action, dom, attrs)
if IsDeepDebug:
print '>>> DB Response:%s' % response
order = action == '205' and response.get('custom_code') or document
return { \
'action' : action,
'op' : '',
# --------------
# Service Errors
# --------------
'exchange_error' : exchange_error,
'exchange_message' : exchange_message,
'error_code' : error_code,
'error_description' : error_description,
'errors' : errors,
# ---
# IDs
# ---
'countryID' : getDOMItemValue(dom, 'countryID') or '',
'regionID' : getDOMItemValue(dom, 'regionID') or '',
'userID' : attrs['userID'],
# --------------
# Client Details
# --------------
'country_name' : getDOMItemValue(dom, 'countryName') or getCountry(getDOMItemValue(dom, 'countryID'), locale),
'region_name' : getDOMItemValue(dom, 'regionName') or getRegion(getDOMItemValue(dom, 'regionID'), locale),
'client_name' : attrs['userName'],
# ----------
# Order Info
# ----------
'document_number' : order_number,
'document_date' : order_date,
'order' : order,
# -------
# DB Data
# -------
'total_log_rows' : getLogTotal({'userID' : attrs['userID'], 'wizardID' : wizard}),
'custom_code' : response.get('custom_code', ''),
'next_custom_code' : response.get('next_custom_code', ''),
'option_update' : response.get('option_update', ''),
'option_cost' : response.get('option_cost', ''),
'title' : response.get('title', ''),
# ------------------------------
# Results (Price & XML-Response)
# ------------------------------
'price' : price,
'data' : data,
}
def respond_log(action):
exchange_error = 0
exchange_message = ''
page = None
current_page, pages, per_page, has_prev, has_next, total, id = (0, 0, 0, False, False, 0, None)
iter_pages = []
order = None
data = None
dom = None
items = None
title = ''
print_action(action, 'Respond.Log')
locale = request.form.get('currentUsedLocalization') or ''
try:
# ----------------
# Get Data from DB
# ----------------
if not action:
pass
elif action == '301':
exchange_error, exchange_message, page, data = getLogPage(action, request, session)
elif action == '305':
exchange_error, exchange_message, id, total, data = getStatictics(action, request, session)
elif action == '308':
exchange_error, exchange_message, page, data = removeLogItem(action, request, session)
else: #if action in ('302','303','304','307',):
exchange_error, exchange_message, id, order = getLogItem(action, request, session)
except:
msg = '--> Database error!'
# -------------
# DB Log failed
# --------------
print_to(errorlog, msg, request=request)
raise
if not (action and action in valid_action_types):
pass
elif action in ('301','308',):
# --------------------
# Get Log-page content
# --------------------
if page:
current_page, pages, per_page, has_prev, has_next, total = page.get_page_params()
iter_pages = page.iter_pages()
if IsDebug:
print '--> LogPage[%s]: items:[%s] %s-%s-%s-%s iter:%s' % ( \
current_page, len(data), pages, per_page, has_prev, has_next, iter_pages)
elif action in ('302',):
# ------------------------------
# DOM and XML-Items (Dictionary)
# ------------------------------
dom, data = receive(action, request, session, data=order.data) #, products_sorted=True
if IsDebug:
print '--> LogPageItem[%s]: Data %s, Products %s, Parameters %s' % ( \
id, len(order.data), len(data['products']), len(data['parameters']))
elif action in ('303','304','307',):
# ---------
# Clean XML
# ---------
data = order.data #_data(coding='encode')
# ------------------------------
# DOM and XML-Items (Dictionary)
# ------------------------------
dom, items = receive(action, request, session, data=data)
title = order.title
if IsDebug:
print '--> Loaded LogPageItem[%s]: Data %s, Products %s, Parameters %s' % ( \
id, len(order.data), len(items['products']), len(items['parameters']))
elif action in ('305',):
if IsDebug:
print '--> LogStatictics[%s]: Data %s' % ( \
id, data and len(data), )
if exchange_error:
if IsDebug:
print '--> ExchangeError[%s]: %s' % (exchange_error, exchange_message)
return { \
'action' : action,
'op' : '',
# --------------
# Service Errors
# --------------
'exchange_error' : exchange_error,
'exchange_message' : exchange_message,
# -----------------------------
# Results (Log page parameters)
# -----------------------------
'id' : id,
'rows_on_page' : len(data),
'total' : total,
'page' : current_page,
'pages' : pages,
'per_page' : per_page,
'has_prev' : has_prev,
'has_next' : has_next,
'iter_pages' : iter_pages,
# --------------------------
# Results (Log page content)
# --------------------------
'custom_code' : order and order.code or '',
'title' : title,
'price' : '',
'data' : data,
'dom' : dom,
'items' : items,
}
|
import math
import random
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from pytorch_msssim import ms_ssim, ssim
def quantize_per_tensor(t, bit=8, axis=-1):
if axis == -1:
t_valid = t!=0
t_min, t_max = t[t_valid].min(), t[t_valid].max()
scale = (t_max - t_min) / 2**bit
elif axis == 0:
min_max_list = []
for i in range(t.size(0)):
t_valid = t[i]!=0
if t_valid.sum():
min_max_list.append([t[i][t_valid].min(), t[i][t_valid].max()])
else:
min_max_list.append([0, 0])
min_max_tf = torch.tensor(min_max_list).to(t.device)
scale = (min_max_tf[:,1] - min_max_tf[:,0]) / 2**bit
if t.dim() == 4:
scale = scale[:,None,None,None]
t_min = min_max_tf[:,0,None,None,None]
elif t.dim() == 2:
scale = scale[:,None]
t_min = min_max_tf[:,0,None]
elif axis == 1:
min_max_list = []
for i in range(t.size(1)):
t_valid = t[:,i]!=0
if t_valid.sum():
min_max_list.append([t[:,i][t_valid].min(), t[:,i][t_valid].max()])
else:
min_max_list.append([0, 0])
min_max_tf = torch.tensor(min_max_list).to(t.device)
scale = (min_max_tf[:,1] - min_max_tf[:,0]) / 2**bit
if t.dim() == 4:
scale = scale[None,:,None,None]
t_min = min_max_tf[None,:,0,None,None]
elif t.dim() == 2:
scale = scale[None,:]
t_min = min_max_tf[None,:,0]
# import pdb; pdb.set_trace; from IPython import embed; embed()
quant_t = ((t - t_min) / (scale + 1e-19)).round()
new_t = t_min + scale * quant_t
return quant_t, new_t
def all_gather(tensors):
"""
All gathers the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all gather across all processes in
all machines.
"""
gather_list = []
output_tensor = []
world_size = dist.get_world_size()
for tensor in tensors:
tensor_placeholder = [
torch.ones_like(tensor) for _ in range(world_size)
]
dist.all_gather(tensor_placeholder, tensor, async_op=False)
gather_list.append(tensor_placeholder)
for gathered_tensor in gather_list:
output_tensor.append(torch.cat(gathered_tensor, dim=0))
return output_tensor
def all_reduce(tensors, average=True):
"""
All reduce the provided tensors from all processes across machines.
Args:
tensors (list): tensors to perform all reduce across all processes in
all machines.
average (bool): scales the reduced tensor by the number of overall
processes across all machines.
"""
for tensor in tensors:
dist.all_reduce(tensor, async_op=False)
if average:
world_size = dist.get_world_size()
for tensor in tensors:
tensor.mul_(1.0 / world_size)
return tensors
class PositionalEncoding(nn.Module):
def __init__(self, pe_embed):
super(PositionalEncoding, self).__init__()
self.pe_embed = pe_embed.lower()
if self.pe_embed == 'none':
self.embed_length = 1
else:
self.lbase, self.levels = [float(x) for x in pe_embed.split('_')]
self.levels = int(self.levels)
self.embed_length = 2 * self.levels
def forward(self, pos):
if self.pe_embed == 'none':
return pos[:,None]
else:
pe_list = []
for i in range(self.levels):
temp_value = pos * self.lbase **(i) * math.pi
pe_list += [torch.sin(temp_value), torch.cos(temp_value)]
return torch.stack(pe_list, 1)
def psnr2(img1, img2):
mse = (img1 - img2) ** 2
PIXEL_MAX = 1
psnr = -10 * torch.log10(mse)
psnr = torch.clamp(psnr, min=0, max=50)
return psnr
def loss_fn(pred, target, args):
target = target.detach()
if args.loss_type == 'L2':
loss = F.mse_loss(pred, target, reduction='none')
loss = loss.mean()
elif args.loss_type == 'L1':
loss = torch.mean(torch.abs(pred - target))
elif args.loss_type == 'SSIM':
loss = 1 - ssim(pred, target, data_range=1, size_average=True)
elif args.loss_type == 'Fusion1':
loss = 0.3 * F.mse_loss(pred, target) + 0.7 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion2':
loss = 0.3 * torch.mean(torch.abs(pred - target)) + 0.7 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion3':
loss = 0.5 * F.mse_loss(pred, target) + 0.5 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion4':
loss = 0.5 * torch.mean(torch.abs(pred - target)) + 0.5 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion5':
loss = 0.7 * F.mse_loss(pred, target) + 0.3 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion6':
loss = 0.7 * torch.mean(torch.abs(pred - target)) + 0.3 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion7':
loss = 0.7 * F.mse_loss(pred, target) + 0.3 * torch.mean(torch.abs(pred - target))
elif args.loss_type == 'Fusion8':
loss = 0.5 * F.mse_loss(pred, target) + 0.5 * torch.mean(torch.abs(pred - target))
elif args.loss_type == 'Fusion9':
loss = 0.9 * torch.mean(torch.abs(pred - target)) + 0.1 * (1 - ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion10':
loss = 0.7 * torch.mean(torch.abs(pred - target)) + 0.3 * (1 - ms_ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion11':
loss = 0.9 * torch.mean(torch.abs(pred - target)) + 0.1 * (1 - ms_ssim(pred, target, data_range=1, size_average=True))
elif args.loss_type == 'Fusion12':
loss = 0.8 * torch.mean(torch.abs(pred - target)) + 0.2 * (1 - ms_ssim(pred, target, data_range=1, size_average=True))
return loss
def psnr_fn(output_list, target_list):
psnr_list = []
for output, target in zip(output_list, target_list):
l2_loss = F.mse_loss(output.detach(), target.detach(), reduction='mean')
psnr = -10 * torch.log10(l2_loss)
psnr = psnr.view(1, 1).expand(output.size(0), -1)
psnr_list.append(psnr)
psnr = torch.cat(psnr_list, dim=1) #(batchsize, num_stage)
return psnr
def msssim_fn(output_list, target_list):
msssim_list = []
for output, target in zip(output_list, target_list):
if output.size(-2) >= 160:
msssim = ms_ssim(output.float().detach(), target.detach(), data_range=1, size_average=True)
else:
msssim = torch.tensor(0).to(output.device)
msssim_list.append(msssim.view(1))
msssim = torch.cat(msssim_list, dim=0) #(num_stage)
msssim = msssim.view(1, -1).expand(output_list[-1].size(0), -1) #(batchsize, num_stage)
return msssim
def RoundTensor(x, num=2, group_str=False):
if group_str:
str_list = []
for i in range(x.size(0)):
x_row = [str(round(ele, num)) for ele in x[i].tolist()]
str_list.append(','.join(x_row))
out_str = '/'.join(str_list)
else:
str_list = [str(round(ele, num)) for ele in x.flatten().tolist()]
out_str = ','.join(str_list)
return out_str
def adjust_lr(optimizer, cur_epoch, cur_iter, data_size, args):
cur_epoch = cur_epoch + (float(cur_iter) / data_size)
if args.lr_type == 'cosine':
lr_mult = 0.5 * (math.cos(math.pi * (cur_epoch - args.warmup)/ (args.epochs - args.warmup)) + 1.0)
elif args.lr_type == 'step':
lr_mult = 0.1 ** (sum(cur_epoch >= np.array(args.lr_steps)))
elif args.lr_type == 'const':
lr_mult = 1
elif args.lr_type == 'plateau':
lr_mult = 1
else:
raise NotImplementedError
if cur_epoch < args.warmup:
lr_mult = 0.1 + 0.9 * cur_epoch / args.warmup
for i, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = args.lr * lr_mult
return args.lr * lr_mult
def worker_init_fn(worker_id):
"""
Re-seed each worker process to preserve reproducibility
"""
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
return
class PositionalEncodingTrans(nn.Module):
def __init__(self, d_model, max_len):
super().__init__()
self.max_len = max_len
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, pos):
index = torch.round(pos * self.max_len).long()
p = self.pe[index]
return p
|
print('=' * 80)
print('Programa para diagnostico de pacientes con sintomas compatibles con COVID-19. ')
print('=' * 80)
edad = int(input('Ingrese la edad del paciente: '))
temperaturaCorporal = int(input('ingrese la temperatura del paciente: '))
print('Presione la tecla "s" si el paciente tiene neumonia evidenciada o "n" en caso contrario')
neumonia = input()
if neumonia == 's':
print()
print('Es un caso sospechoso. El paciente debera quedar internado...')
elif neumonia == 'n':
if temperaturaCorporal > 37:
print('Responda las siguientes preguntas presionando la tecla "s" para SI o "n" para NO')
tos = input('Tiene el paciente tos?: ')
if tos == "s":
tos = True
elif tos == 'n':
tos = False
else:
print('Ha presionado un valor incorrecto..')
odinofagia = input('tiene el paciente odinofagia?: ')
if odinofagia == 's':
odinofagia = True
elif odinofagia == 'n':
odinofagia = False
else:
print('Ha presionado un valor incorrecto..')
dificultadRespiratoria = input('Dificultad respiratoria ?: ')
if dificultadRespiratoria == 's':
dificultadRespiratoria = True
elif dificultadRespiratoria == 'n':
dificultadRespiratoria = False
else:
print('Ha presionado un valor incorrecto..')
if tos or odinofagia or dificultadRespiratoria:
personalSalud = input('Es el paciente personal de salud?: ')
if personalSalud == 's':
personalSalud = True
elif personalSalud == 'n':
personalSalud = False
else:
print('Ha presionado un valor incorrecto..')
contactoCovid = input('Estuvo el paciente en contacto con casos positivos de COVID-19?: ')
if contactoCovid == 's':
contactoCovid = True
elif contactoCovid == 'n':
contactoCovid = False
else:
print('Ha presionado un valor incorrecto..')
viajoExterior = input('Ha viajado al exterior en los ultimos 14 dias?: ')
if viajoExterior == 's':
viajoExterior = True
elif viajoExterior == 'n':
viajoExterior = False
else:
print('Ha presionado un valor incorrecto..')
transmisionesLocales = input('Ha estado el paciente en zonas locales de transmision confimadas?: ')
if transmisionesLocales == 's':
transmisionesLocales = True
elif transmisionesLocales == 'n':
transmisionesLocales = False
else:
print('Ha presionado un valor incorrecto..')
if personalSalud:
print()
print('Es un caso sospechoso por ser personal de salud con dos o mas sintomas asociados al COVID-19.')
elif contactoCovid:
print()
print('Es un caso sospechoso por presentar dos o mas sintomas y haber tenido contacto con casos confimados.')
elif viajoExterior:
print()
print('Es un caso sospechoso por presentar dos o mas sintomas y haber viajado al exterior en los ultimos 14 dias')
elif transmisionesLocales:
print()
print('Es un caso sospechoso autoctono por presentar dos o mas sintomas y haber estado en zonas de transmision local.')
else:
print()
print('Es un caso sospechoso por presentar dos o mas sintomas asociados al COVID-19.')
else:
print()
print('El paciente no presenta suficientes sintomas para considerarlo de riesgo. sin embargo tiene fiebre por lo que debera ser monitoreado a diario.')
else:
if edad >= 60:
print()
print('El paciente no es un caso sospechoso pero debe cuidarse dado que es grupo de riesgo.')
else:
print()
print('El paciente no es un caso sospechoso porque no presenta ninguno de los sintomas asociados.')
else:
print('Ha presionado un valor incorrecto..') |
import matplotlib.pyplot as plt
#
## Figure
#
### Plot timelines for ALL panel and ground data, with one line in one panel
#
def FIG_all_timelines(gpta, adta, output, field_data, fignum):
fig_title = 'Figure '+str(fignum)+': '+field_data[0]+' '+field_data[1]+' '+field_data[2]+' '+field_data[3]
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.5, 3.5))
plt.tight_layout(pad=1.5, w_pad=0.0, h_pad=0.0)
gpta.plot(x='Time', y='ones', kind='scatter', legend=False, ax=axes, color='orange', marker='|', s=50, linewidth=0.8)
adta.plot(x='Time', y='ones', kind='scatter', legend=False, ax=axes, marker='x', sharey=True, s=50, linewidth=0.8)
axes.tick_params(axis='both', which='major', labelsize=9)
axes.tick_params(axis='both', which='minor', labelsize=9)
axes.set_xlabel("Time (seconds)")
axes.set_ylabel("")
axes.set_yticks([])
plt.savefig(output+field_data[0]+'_'+field_data[1]+'_'+field_data[2]+'_'+field_data[3]+'_'+'Fig'+str(fignum)+'_AllTimeLineData.png', dpi=300)
|
#-*coding:utf-8-*-
from flaskone import Flask,json,jsonify
from flaskone import redirect
from flaskone import url_for
app = Flask(__name__)
@app.route("/")
def index():
return "index"
# @app.route("/json")
# def demo4():
# temp_dict={
# "name":"laowang",
# "age":18
# }
# return jsonify(temp_dict)
#重定向
@app.route("/redirect")
def demo5():
return redirect("http://ntlias-stu.boxuegu.com")
@app.route("/demo6")
def demo6():
return redirect(url_for("index"))
@app.route("/code")
def demo7():
return "自定义状态码666"
if __name__ == '__main__':
app.run(debug=True)
|
import rospy
import numpy as np
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Point
from interactive_markers.interactive_marker_server import *
from visualization_msgs.msg import *
import math
from tkinter import messagebox
from tkinter import filedialog
import tkinter as tk
import tkinter.font as tkFont
from scipy.spatial.transform import Rotation as R
server = None
poses = None
frame_id = None
def transform_matrix(pose):
r = R.from_quat([pose[3], pose[4], pose[5], pose[6]])
r = r.as_dcm()
translation = np.array((pose[0], pose[1], pose[2])).reshape((3,1))
T = np.concatenate((r, translation), axis=1)
T = np.row_stack((T, [0,0,0,1]))
return T
def processFeedback(feedback):
global poses
p = feedback.pose.position
o = feedback.pose.orientation
rospy.loginfo(feedback.marker_name + " is now at " + str(p.x) + ", " + str(p.y) + ", " + str(p.z) + ", " + str(o.x) + ", " + str(o.y) +", " + str(o.z) + ", " + str(o.w))
index = int(feedback.marker_name);
h = poses[index][7]
pose = np.array([p.x, p.y, p.z, o.x, o.y, o.z, o.w, h])
poses[index] = pose;
n = np.size(poses, 0)
arrows = MarkerArray()
for i in range(0, n):
pose_cp = poses[i].copy()
if np.isnan(pose_cp[0]) == False:
this_pose = makeArrow(pose_cp, i, frame_id)
arrows.markers.append(this_pose)
orientation_pub.publish(arrows)
def makeArrow(pose, i, frame_id):
this_pose = Marker()
this_pose.id = i
this_pose.header.frame_id = frame_id
this_pose.header.stamp = rospy.Time.now()
this_pose.action = Marker.ADD
this_pose.type = Marker.ARROW
this_pose.scale.x = pose[7]/2*0.05
this_pose.scale.y = pose[7]/2*0.1
this_pose.scale.z = pose[7]/2*0.1
this_pose.color.r = 0;
this_pose.color.g = 1;
this_pose.color.b = 0;
this_pose.color.a = 1;
p = Point()
p.x = pose[0]
p.y = pose[1]
p.z = pose[2]
T = transform_matrix(pose)
p1 = np.array([0, 0, pose[7]/3*2, 1]).reshape((4,1))
p2 = np.array([+pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p1 = np.dot(T, p1)
p2 = np.dot(T, p2)
T = transform_matrix(pose)
pc = np.array([pose[0], pose[1], pose[2]])
normal = np.cross(p1[0:3, 0] - pc, p2[0:3, 0] - pc);
normal = normal / np.linalg.norm(normal)
pv = pc + 0.2 * normal
pv_ = Point()
pv_.x = pv[0]
pv_.y = pv[1]
pv_.z = pv[2]
this_pose.points.append(p)
this_pose.points.append(pv_)
return this_pose
def makeBoxControl(pose, name_id, frame_id):
int_marker = InteractiveMarker()
int_marker.header.frame_id = frame_id
int_marker.name = name_id
int_marker.description = ""
int_marker.pose.position.x = pose[0];
int_marker.pose.position.y = pose[1];
int_marker.pose.position.z = pose[2];
int_marker.pose.orientation.x = pose[3];
int_marker.pose.orientation.y = pose[4];
int_marker.pose.orientation.z = pose[5];
int_marker.pose.orientation.w = pose[6];
triangle_marker = Marker()
left_marker = Marker()
text_marker = Marker()
control = InteractiveMarkerControl()
control.orientation.w = 1;
control.orientation.x = 0;
control.orientation.y = 1;
control.orientation.z = 0;
control.name = "move_xy";
control.interaction_mode = InteractiveMarkerControl.MOVE_PLANE;
color_marker = Marker();
color_marker.type = Marker.CYLINDER;
color_marker.color.r = 1.0;
color_marker.color.g = 1.0;
color_marker.color.b = 1.0;
color_marker.color.a = 0.5;
color_marker.scale.x = 0.5;
color_marker.scale.y = 0.5;
color_marker.scale.z = 0.005;
control.markers.append(color_marker)
int_marker.controls.append(control);
z_control = InteractiveMarkerControl()
z_control.orientation.w = 1;
z_control.orientation.x = 0;
z_control.orientation.y = 1;
z_control.orientation.z = 0;
z_control.name = "move_z";
z_control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS;
int_marker.controls.append(z_control);
rotx_control = InteractiveMarkerControl()
rotx_control.always_visible = True;
rotx_control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS;
rotx_control.orientation.x = 1;
rotx_control.orientation.y = 0;
rotx_control.orientation.z = 0;
rotx_control.orientation.w = 1;
rotx_control.name = "rot_x";
int_marker.controls.append(rotx_control);
roty_control = InteractiveMarkerControl()
roty_control.always_visible = True;
roty_control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS;
roty_control.orientation.x = 0;
roty_control.orientation.y = 1;
roty_control.orientation.z = 0;
roty_control.orientation.w = 1;
roty_control.name = "rot_y";
int_marker.controls.append(roty_control);
rotz_control = InteractiveMarkerControl()
rotz_control.always_visible = True;
rotz_control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS;
rotz_control.orientation.x = 0;
rotz_control.orientation.y = 0;
rotz_control.orientation.z = 1;
rotz_control.orientation.w = 1;
rotz_control.name = "rot_z";
int_marker.controls.append(rotz_control);
triangle_marker.type = Marker.LINE_STRIP
triangle_marker.scale.x = 0.02;
triangle_marker.color.r = 1.0;
triangle_marker.color.g = 1.0;
triangle_marker.color.b = 1.0;
triangle_marker.color.a = 1.0;
p1 = Point()
p1.x = 0
p1.y = 0
p1.z = pose[7]/3*2;
p2 = Point()
p2.x = +pose[7]/np.sqrt(3)
p2.y = 0
p2.z = -pose[7]/3
p3 = Point()
p3.x = -pose[7]/np.sqrt(3)
p3.y = 0
p3.z = -pose[7]/3
triangle_marker.points = []
triangle_marker.points.append(p1)
triangle_marker.points.append(p2)
triangle_marker.points.append(p3)
triangle_marker.points.append(p1)
left_marker.type = Marker.SPHERE;
left_marker.scale.x = 0.05;
left_marker.scale.y = 0.05;
left_marker.scale.z = 0.05;
left_marker.color.r = 0.0;
left_marker.color.g = 1.0;
left_marker.color.b = 1.0;
left_marker.color.a = 1.0;
left_marker.pose.position.x = p3.x;
left_marker.pose.position.y = p3.y;
left_marker.pose.position.z = p3.z;
text_marker.type = Marker.TEXT_VIEW_FACING;
text_marker.scale.z = 0.45;
text_marker.text = name_id;
text_marker.color.r = 1.0;
text_marker.color.g = 1.0;
text_marker.color.b = 1.0;
text_marker.color.a = 1.0;
text_marker.pose.position.x = 0;
text_marker.pose.position.y = 0;
text_marker.pose.position.z = 1;
text_marker.pose.orientation.x = 0.0;
text_marker.pose.orientation.y = 0.0;
text_marker.pose.orientation.z = 0.0;
text_marker.pose.orientation.w = 1.0;
triangle_control = InteractiveMarkerControl()
triangle_control.always_visible = True
triangle_control.markers.append(triangle_marker)
triangle_control.markers.append(left_marker)
triangle_control.markers.append(text_marker)
int_marker.controls.append(triangle_control)
return int_marker
class Labeler():
def __init__(self):
self.ask_window = tk.Tk()
self.ask_window.title('What is your coordinate frame?')
self.ask_fontStyle = tkFont.Font(root = self.ask_window, family="Arial", size = 15)
self.ask_l_frame = tk.Label(self.ask_window, text = 'Frame ID: ', font = self.ask_fontStyle)
self.ask_e_frame = tk.Entry(self.ask_window, font = self.ask_fontStyle)
self.ask_l_frame.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.ask_e_frame.grid(row = 0, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.ask_button_enter = tk.Button(self.ask_window, text = 'Enter', width = 15, height = 2, command = self.set_frame, font = self.ask_fontStyle)
self.ask_button_enter.grid(row = 1, column = 0, padx = 10, pady = 10, columnspan = 2, sticky = tk.E + tk.W + tk.N + tk.S)
self.ask_window.mainloop()
global frame_id
if frame_id == None or frame_id == '':
frame_id = 'map'
self.window = tk.Tk()
self.window.title('Labeler')
self.fontStyle = tkFont.Font(root = self.window, family="Arial", size = 15)
self.choice = tk.Frame(self.window)
self.choice.grid(row = 0, column = 0, sticky = tk.E + tk.W + tk.N + tk.S)
self.choice.grid_propagate(True)
self.button_new = tk.Button(self.choice, text = 'New Marker', width = 15, height = 2, command = self.new_marker, font = self.fontStyle)
self.button_erase = tk.Button(self.choice, text = 'Erase Marker', width = 15, height = 2, command = self.erase_marker, font = self.fontStyle)
self.button_update = tk.Button(self.choice, text = 'Update Marker', width = 15, height = 2, command = self.update_marker, font = self.fontStyle)
self.button_exit = tk.Button(self.choice, text = 'Exit', width = 15, height = 2, command = self.window.quit, font = self.fontStyle)
self.button_new.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.button_erase.grid(row = 0, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.button_update.grid(row = 0, column = 2, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.button_exit.grid(row = 0, column = 3, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.choice.grid_columnconfigure(0, weight = 1)
self.choice.grid_columnconfigure(1, weight = 1)
self.choice.grid_columnconfigure(2, weight = 1)
self.choice.grid_columnconfigure(3, weight = 1)
self.window.grid_columnconfigure(0, weight = 1)
self.marker_info = tk.Frame(self.window)
self.lb = tk.Listbox(self.marker_info, font = self.fontStyle)
self.sby = tk.Scrollbar(self.marker_info, width = 20)
self.sbx = tk.Scrollbar(self.marker_info, orient = tk.HORIZONTAL, width = 20)
self.lb.configure(xscrollcommand = self.sbx.set, yscrollcommand = self.sby.set)
self.sby['command'] = self.lb.yview
self.sbx['command'] = self.lb.xview
self.marker_info.grid(row = 1, column = 0, sticky = tk.E + tk.W + tk.N + tk.S)
self.sby.grid(row = 0, column = 1, sticky = tk.E + tk.W + tk.N + tk.S)
self.sbx.grid(row = 1, column = 0, sticky = tk.E + tk.W + tk.N + tk.S)
self.lb.grid(row = 0, column = 0, sticky = tk.E + tk.W + tk.N + tk.S)
self.marker_info.grid_columnconfigure(0, weight = 1)
self.output = tk.Frame(self.window)
self.button_refresh = tk.Button(self.output, text = 'Refresh', width = 15, height = 2, command = self.refrersh, font = self.fontStyle)
self.button_save = tk.Button(self.output, text = 'Save', width = 15, height = 2, command = self.save_file, font = self.fontStyle)
self.output.grid(row = 2, column = 0, sticky = tk.E + tk.W + tk.N + tk.S)
self.button_refresh.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.button_save.grid(row = 0, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
self.window.mainloop()
def set_frame(self):
global frame_id
frame_id = self.ask_e_frame.get()
self.ask_window.destroy()
def new_marker(self):
def new_marker_():
global poses
n = int(e_num.get())
h = float(e_height.get())
for i in range(0, n):
pose = [0, i*h, 0., 0., 0., 0., 1., h]
if np.size(poses, 0) == 0:
poses = np.array([pose])
self.lb.insert('end', 'id, x, y, z, ox, oy, oz, ow, height, tx, ty, tz')
else:
poses = np.row_stack((poses, pose))
name_id = str(np.size(poses, 0) - 1)
global frame_id
int_marker = makeBoxControl(pose, name_id, frame_id);
server.insert(int_marker, processFeedback);
# pose = poses[i].copy()
T = transform_matrix(pose)
p1 = np.array([0, 0, pose[7]/3*2, 1]).reshape((4,1))
p2 = np.array([+pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p3 = np.array([-pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p1 = np.dot(T, p1)
p2 = np.dot(T, p2)
p3 = np.dot(T, p3)
# pose[0] = p[0]
# pose[1] = p[1]
# pose[2] = p[2]
r = str(np.size(poses, 0) - 1) + ', ' + str(pose[0]) + ', ' + str(pose[1]) + ', ' + str(pose[2]) + ', '\
+ str(pose[3]) + ', ' + str(pose[4]) + ', ' + str(pose[5]) + ', ' + str(pose[6]) + ', '\
+ str(pose[7]) + '|'\
+ str(p1[0][0]) + ', ' + str(p1[1][0]) + ',' + str(p1[2][0]) + ', '\
+ str(p2[0][0]) + ', ' + str(p2[1][0]) + ',' + str(p2[2][0]) + ', '\
+ str(p3[0][0]) + ', ' + str(p3[1][0]) + ',' + str(p3[2][0])
self.lb.insert('end', r)
server.applyChanges();
window_new_marker.destroy()
window_new_marker = tk.Toplevel(self.window)
window_new_marker.title('New Marker')
l_num = tk.Label(window_new_marker, text = 'Number(s): ', font = self.fontStyle)
l_height = tk.Label(window_new_marker, text = 'Height(m): ', font = self.fontStyle)
l_num.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_height.grid(row = 1, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
num = tk.StringVar()
num.set('1')
height = tk.StringVar()
height.set('1.2')
e_num = tk.Entry(window_new_marker, textvariable = num, font = self.fontStyle)
e_height = tk.Entry(window_new_marker, textvariable = height, font = self.fontStyle)
e_num.grid(row = 0, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_height.grid(row = 1, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_new = tk.Button(window_new_marker, text = 'New', width = 15, height = 2, command = new_marker_, font = self.fontStyle)
button_quit = tk.Button(window_new_marker, text = 'Quit', width = 15, height = 2, command = window_new_marker.destroy, font = self.fontStyle)
button_new.grid(row = 4, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_quit.grid(row = 4, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
window_new_marker.grid_columnconfigure(0, weight = 1)
window_new_marker.grid_columnconfigure(1, weight = 1)
def erase_marker(self):
def erase_marker_():
str_id = e_id.get()
int_id = int(str_id)
if int_id < np.size(poses, 0) and int_id >= 0:
server.erase(str_id);
for i in range(0, 8):
poses[int_id][i]= np.nan;
server.applyChanges();
window_erase_marker.destroy()
else:
tk.messagebox.showerror(message='The marker with id = ' + str_id + ' has never been generated.')
window_erase_marker = tk.Toplevel(self.window)
window_erase_marker.title('Erase Marker')
l_id = tk.Label(window_erase_marker, text = 'Erased ID: ', font = self.fontStyle)
e_id = tk.Entry(window_erase_marker, font = self.fontStyle)
l_id.grid(row = 0, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_id.grid(row = 0, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_erase = tk.Button(window_erase_marker, text = 'Enter', width = 15, height = 2, command = erase_marker_, font = self.fontStyle)
button_quit = tk.Button(window_erase_marker, text = 'Quit', width = 15, height = 2, command = window_erase_marker.destroy, font = self.fontStyle)
button_erase.grid(row = 1, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_quit.grid(row = 1, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
def update_marker(self):
def show():
int_id = int(e_id.get())
if int_id < np.size(poses, 0) and int_id >= 0:
var_x.set(str(poses[int_id][0]))
var_y.set(str(poses[int_id][1]))
var_z.set(str(poses[int_id][2]))
var_height.set(str(poses[int_id][7]))
else:
var_x.set('')
var_y.set('')
var_z.set('')
var_height.set('')
e_x.configure(state = 'normal')
e_y.configure(state = 'normal')
e_z.configure(state = 'normal')
e_height.configure(state = 'normal')
return True
def update_marker_():
str_id = e_id.get()
int_id = int(str_id)
if int_id < np.size(poses, 0) and int_id >= 0:
if np.isnan(poses[int_id][0]) == True:
poses[int_id][3] = 0
poses[int_id][4] = 0
poses[int_id][5] = 0
poses[int_id][6] = 1
poses[int_id][0] = float(e_x.get())
poses[int_id][1] = float(e_y.get())
poses[int_id][2] = float(e_z.get())
poses[int_id][7] = float(e_height.get())
pose = poses[int_id]
global frame_id
int_marker = makeBoxControl(pose, str_id, frame_id);
server.erase(str_id)
server.insert(int_marker, processFeedback);
server.applyChanges();
window_update_marker.destroy()
else:
tk.messagebox.showerror(message='The marker with id = ' + str_id + ' has never been generated.')
window_update_marker = tk.Toplevel(self.window)
window_update_marker.title('Update Marker')
l_instruction = tk.Label(window_update_marker, text = 'Update the position of the control object or the size of the cube.', font = self.fontStyle)
l_id = tk.Label(window_update_marker, text = 'Updated ID: ', font = self.fontStyle)
l_x = tk.Label(window_update_marker, text = 'tx(m): ', font = self.fontStyle)
l_y = tk.Label(window_update_marker, text = 'ty(m): ', font = self.fontStyle)
l_z = tk.Label(window_update_marker, text = 'tz(m): ', font = self.fontStyle)
l_h = tk.Label(window_update_marker, text = 'Height(m): ', font = self.fontStyle)
l_instruction.grid(row = 0, column = 0, columnspan = 2, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_id.grid(row = 1, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_x.grid(row = 2, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_y.grid(row = 3, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_z.grid(row = 4, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
l_h.grid(row = 7, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
var_x = tk.StringVar()
var_y = tk.StringVar()
var_z = tk.StringVar()
var_height = tk.StringVar()
e_id = tk.Entry(window_update_marker, validate = 'focusout', validatecommand = show, font = self.fontStyle)
e_x = tk.Entry(window_update_marker, textvariable = var_x, font = self.fontStyle, state='readonly')
e_y = tk.Entry(window_update_marker, textvariable = var_y, font = self.fontStyle, state='readonly')
e_z = tk.Entry(window_update_marker, textvariable = var_z, font = self.fontStyle, state='readonly')
e_height = tk.Entry(window_update_marker, textvariable = var_height, font = self.fontStyle, state='readonly')
e_id.grid(row = 1, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_x.grid(row = 2, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_y.grid(row = 3, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_z.grid(row = 4, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
e_height.grid(row = 7, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_update = tk.Button(window_update_marker, text = 'Update', width = 15, height = 2, command = update_marker_, font = self.fontStyle)
button_quit = tk.Button(window_update_marker, text = 'Quit', width = 15, height = 2, command = window_update_marker.destroy, font = self.fontStyle)
button_update.grid(row = 8, column = 0, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
button_quit.grid(row = 8, column = 1, padx = 10, pady = 10, sticky = tk.E + tk.W + tk.N + tk.S)
def refrersh(self):
global poses
n = np.size(poses, 0)
self.lb.delete(0, tk.END)
self.lb.insert('end', 'id, x, y, z, ox, oy, oz, ow, height, tx, ty, tz')
for i in range(0, n):
pose = poses[i].copy()
if np.isnan(pose[0]) == False:
T = transform_matrix(pose)
p1 = np.array([0, 0, pose[7]/3*2, 1]).reshape((4,1))
p2 = np.array([+pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p3 = np.array([-pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p1 = np.dot(T, p1)
p2 = np.dot(T, p2)
p3 = np.dot(T, p3)
r = str(i) + ', ' + str(pose[0]) + ', ' + str(pose[1]) + ', ' + str(pose[2]) + ', '\
+ str(pose[3]) + ', ' + str(pose[4]) + ', ' + str(pose[5]) + ', ' + str(pose[6]) + ', '\
+ str(pose[7]) + '|'\
+ str(p1[0][0]) + ', ' + str(p1[1][0]) + ',' + str(p1[2][0]) + ', '\
+ str(p2[0][0]) + ', ' + str(p2[1][0]) + ',' + str(p2[2][0]) + ', '\
+ str(p3[0][0]) + ', ' + str(p3[1][0]) + ',' + str(p3[2][0])
else:
r = str(i) + ', ' + str(pose[0]) + ', ' + str(pose[1]) + ', ' + str(pose[2]) + ', '\
+ str(pose[3]) + ', ' + str(pose[4]) + ', ' + str(pose[5]) + ', ' + str(pose[6]) + ', '\
+ str(pose[7])
self.lb.insert('end', r)
def save_file(self):
f = filedialog.asksaveasfile(mode = 'w', defaultextension = '.csv')
if f is None:
return
f.write('id, x, y, z, tx, ty, tz, rx, ry, rz, lx, ly, lz\n')
for i in range(0, np.size(poses, 0)):
pose = poses[i].copy()
if np.isnan(pose[0]) == False:
T = transform_matrix(pose)
p1 = np.array([0, 0, pose[7]/3*2, 1]).reshape((4,1))
p2 = np.array([+pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p3 = np.array([-pose[7]/np.sqrt(3), 0, -pose[7]/3, 1]).reshape((4,1))
p1 = np.dot(T, p1)
p2 = np.dot(T, p2)
p3 = np.dot(T, p3)
r = str(i) + ', ' + str(pose[0]) + ', ' + str(pose[1]) + ', ' + str(pose[2]) + ', '\
+ str(p1[0][0]) + ', ' + str(p1[1][0]) + ',' + str(p1[2][0]) + ', '\
+ str(p2[0][0]) + ', ' + str(p2[1][0]) + ',' + str(p2[2][0]) + ', '\
+ str(p3[0][0]) + ', ' + str(p3[1][0]) + ',' + str(p3[2][0]) + '\n'
f.write(r)
if __name__=="__main__":
rospy.init_node("simple_marker")
# rospy.Subscriber("/move_base_simple/goal", PoseStamped, navCallback)
orientation_pub = rospy.Publisher('cube_poses', MarkerArray, queue_size = 100)
server = InteractiveMarkerServer("simple_marker")
poses = np.array([])
Labeler()
# rospy.spin()
# window.mainloop()
|
from django.http import HttpResponse
from django.shortcuts import render
def temp(Re):
return render(Re,"home.html",{"Req":str(Re)})
def cont(Re):
orgwords=Re.GET["fulltext"]
words=orgwords.lower()
num=int()
wordlist=words.split()
num=len(wordlist)
numword=dict()
for w in wordlist:
numword[w]=wordlist.count(w)
worditem=[(v,k) for k,v in numword.items()]
worditem.sort()
worditem.reverse()
return render(Re, "cont.html",{"fulltext":orgwords,"num":num,"numword":worditem})
def hh(Re):
return HttpResponse("""
<html>
<body bgcolor="white" text="black" link="blue" vlink="green" lang="en">
<p><a href="http://127.0.0.1:8000/">Home</a>
<a href="http://127.0.0.1:8000/about/"> About</a>
<a href="http://127.0.0.1:8000/temp/">Count Word</a></p>
<hr>
<h1>Welcome Visitor</h1>
<blockquote>
Do you know what is this?<br>
This is my website.<br>
Now <br>
<i>Go FUCK Yourself</i></blockquote>
<p>Thank you for visiting. please come back and enjoy yourself again.</p>""")
def homepage(Re):
return HttpResponse("""
<html>
<body bgcolor="white" text="black" link="blue" vlink="green" lang="en">
<p><a href="http://127.0.0.1:8000/">Home</a>
<a href="http://127.0.0.1:8000/hh/"> Welcome visitor</a>
<a href="http://127.0.0.1:8000/about/"> About</a>
<a href="http://127.0.0.1:8000/temp/">Count Word</a></p>
<hr>
<h1>Welcome</h1>
<p>Here we have a great advice for you to keep going<br>
please visit the <b><h3>"welcome visitor"</h3></b> page for the advise<br>
Thanks.</p>
""")
def about(Re):
return HttpResponse("""
<html land="fa">
<body bgcolor="white" text="black" link="blue" vlink="green" lang="en">
<p><a href="http://127.0.0.1:8000/">Home</a>
<a href="http://127.0.0.1:8000/temp/">Count Word</a></p>
<hr>
<h1>About</h1>
<p>This is my first site<br>
<h3>من حتی می تونم فارسی هم توش بنویسم!!!</h3></b>
</body>
""") |
class Solution(object):
f =
def climbStairs(self, n):
if n < 2: return 1
return self.climbStairs(n-1) + self.climbStairs(n-2)
sol = Solution()
n = 3
print sol.climbStairs(100)
|
# Define the variables
msg = input('Please write the message here: ')
n = input('How many times would you like to repeat?: ')
# Conversion
n = int(n)
# For loop for printing multiple lines
for i in range(n):
print(msg)
|
# coding=utf-8
from __future__ import unicode_literals
import os
from django.forms import widgets
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.conf import settings
HTML = (
'<div class="s3direct" data-url="{policy_url}">'
' <div class="link-controls">'
' <a class="link" target="_blank" href="{file_url}">{file_name}</a>'
' <a class="remove" href="#remove">Очистить</a>'
' </div>'
' <div class="progress-controls">'
' <div class="progress progress-striped">'
' <div class="progress-bar progress-bar-success" role="progressbar" aria-valuenow="0" aria-valuemin="0" aria-valuemax="100">'
' </div>'
' <div class="info"></div>'
' </div>'
' <span class="abort btn btn-danger btn-sm">Отмена</span>'
' </div>'
' <div class="form-controls">'
' <input type="hidden" value="{file_url}" id="{element_id}" name="{name}" />'
' <input type="file" class="fileinput" />'
' </div>'
'</div>'
)
class S3DirectEditor(widgets.TextInput):
class Media:
js = (
's3direct/js/jquery-1.10.2.min.js',
's3direct/js/jquery.iframe-transport.js',
's3direct/js/jquery.ui.widget.js',
's3direct/js/jquery.fileupload.js',
's3direct/js/s3direct.js',
)
css = {
'all': (
's3direct/css/bootstrap-progress.min.css',
's3direct/css/styles.css',
)
}
def __init__(self, *args, **kwargs):
self.upload_to = kwargs.pop('upload_to', '')
super(S3DirectEditor, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs)
element_id = final_attrs.get('id')
kwargs = {'upload_to': self.upload_to}
policy_url = reverse('s3direct', kwargs=kwargs)
file_url = value if value else ''
if hasattr(file_url, 'name'):
file_url = file_url.name
file_name = os.path.basename(file_url)
output = HTML.format(policy_url=policy_url,
file_url=file_url,
file_name=file_name,
element_id=element_id,
name=name)
return mark_safe(output)
|
import unittest
from .. import TEST_DTYPES
from pytorch_metric_learning.utils import loss_and_miner_utils as lmu
import torch
class TestLossAndMinerUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
self.device = torch.device('cuda')
def test_logsumexp(self):
for dtype in TEST_DTYPES:
rtol = 1e-2 if dtype == torch.float16 else 1e-5
mat = torch.tensor([[-1, 0, 1, 10, 50],
[-300, -200, -100, -50, -20],
[-300, -200, 0, 200, 300],
[100, 200, 300, 400, 500],
[0,0,0,0,0]], dtype=dtype, requires_grad=True).to(self.device)
result = lmu.logsumexp(mat, keep_mask=None, add_one=False, dim=1)
torch.mean(result).backward(retain_graph=True)
correct_result = torch.logsumexp(mat, dim=1, keepdim=True)
self.assertTrue(torch.allclose(result, correct_result, rtol=rtol))
result = lmu.logsumexp(mat, keep_mask=None, add_one=True, dim=1)
torch.mean(result).backward(retain_graph=True)
correct_result = torch.logsumexp(torch.cat([mat, torch.zeros(mat.size(0),dtype=dtype).to(self.device).unsqueeze(1)], dim=1), dim=1, keepdim=True)
self.assertTrue(torch.allclose(result, correct_result, rtol=rtol))
keep_mask = torch.tensor([[1, 1, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0]], dtype=torch.bool).to(self.device)
result = lmu.logsumexp(mat, keep_mask=keep_mask, add_one=False, dim=1)
torch.mean(result).backward()
row0_input = torch.tensor([-1, 0], dtype=dtype).to(self.device)
row1_input = torch.tensor([-300, -200, -100, -50, -20], dtype=dtype).to(self.device)
row2_input = torch.tensor([-200, 0, 200], dtype=dtype).to(self.device)
row4_input = torch.tensor([0, 0], dtype=dtype).to(self.device)
row0 = torch.logsumexp(row0_input, dim=0).unsqueeze(0)
row1 = torch.logsumexp(row1_input, dim=0).unsqueeze(0)
row2 = torch.logsumexp(row2_input, dim=0).unsqueeze(0)
row3 = torch.tensor([0.], dtype=dtype).to(self.device)
row4 = torch.logsumexp(row4_input, dim=0).unsqueeze(0)
correct_result = torch.stack([row0, row1, row2, row3, row4], dim=0)
self.assertTrue(torch.allclose(result, correct_result, rtol=rtol))
def test_get_all_pairs_triplets_indices(self):
original_x = torch.arange(10)
for i in range(1, 11):
x = original_x.repeat(i)
correct_num_pos = len(x)*(i-1)
correct_num_neg = len(x)*(len(x)-i)
a1, p, a2, n = lmu.get_all_pairs_indices(x)
self.assertTrue(len(a1) == len(p) == correct_num_pos)
self.assertTrue(len(a2) == len(n) == correct_num_neg)
correct_num_triplets = len(x)*(i-1)*(len(x)-i)
a, p, n = lmu.get_all_triplets_indices(x)
self.assertTrue(len(a) == len(p) == len(n) == correct_num_triplets)
def test_convert_to_triplets(self):
a1 = torch.LongTensor([0,1,2,3])
p = torch.LongTensor([4,4,4,4])
a2 = torch.LongTensor([4,5,6,7])
n = torch.LongTensor([5,5,6,6])
triplets = lmu.convert_to_triplets((a1,p,a2,n), labels=torch.arange(7))
self.assertTrue(all(len(x)==0 for x in triplets))
a2 = torch.LongTensor([0,4,5,6])
triplets = lmu.convert_to_triplets((a1,p,a2,n), labels=torch.arange(7))
self.assertTrue(triplets==[torch.LongTensor([0]),torch.LongTensor([4]), torch.LongTensor([5])])
def test_convert_to_weights(self):
a = torch.LongTensor([0,1,2,3]).to(self.device)
p = torch.LongTensor([4,4,4,4]).to(self.device)
n = torch.LongTensor([5,5,6,6]).to(self.device)
for dtype in TEST_DTYPES:
weights = lmu.convert_to_weights((a,p,n), labels=torch.arange(7).to(self.device), dtype=dtype)
correct_weights = torch.tensor([0.25,0.25,0.25,0.25,1,0.5,0.5], dtype=dtype).to(self.device)
self.assertTrue(torch.all(weights==correct_weights))
a = torch.LongTensor([]).to(self.device)
p = torch.LongTensor([]).to(self.device)
n = torch.LongTensor([]).to(self.device)
for dtype in TEST_DTYPES:
weights = lmu.convert_to_weights((a,p,n), labels=torch.arange(7).to(self.device), dtype=dtype)
correct_weights = torch.tensor([1,1,1,1,1,1,1], dtype=dtype).to(self.device)
self.assertTrue(torch.all(weights==correct_weights))
if __name__ == "__main__":
unittest.main() |
import sys
a = " ".join(sys.argv[1:]) + " "
lines = sys.stdin.readlines()
i = 0
while i < len(lines):
line = lines[i]
details = line.split()
name = " ".join(details)
if name[12:21] == a:
print name
i = i + 1
|
#all messages are (subject, body)
from constants import CONF_NAME
account_verification = ('Please verify your account','Dear {name},\
Thank you for activating your account, we look forward to recieving.\
your presentations. To complete the process please activate your account\
by clicking on the following link <a href="{url}">{url}</a>')
password_reset = ('Password Reset','Dear {name},\
Please click on the following link to reset your password <a href="{url}">{url}</a>')
new_account = ('New Account','Dear {name},\
Your account is ready for activation for the upccoming %s , please follow this link to activate your account <a href="{url}">{url}</a>' % CONF_NAME)
recieved_presentation = ('Presentation recieved', 'Dear {name},\
Congratulations your presentation has uploaded succesfully, to view your submission and confirm the upload please click <a href="{url}">{url}</a>' )
|
from googleplaces import GooglePlaces, types
def hospitalfind(my_input) :
YOUR_API_KEY = 'AIzaSyDuy19nMwHBvLvgkg9upGZkex9jqriWkQ0'
google_places = GooglePlaces(YOUR_API_KEY)
query_result = google_places.nearby_search(
location=my_input, keyword='hospital',
radius=2000, types=[types.TYPE_HOSPITAL])
strr = " "
flag = 0
for place in query_result.places:
place.get_details()
strr = strr + "\n Name :" + (str(place.name).upper()) + "\n Address:" + str(place.formatted_address) + "\n Phone Number :" + (str(place.international_phone_number).upper()) + "\n Map Url :" + str(place.url) + "\n Web Link :" + str(place.website) + "\n Ratings:" + str(place.rating) +"\n"+("_"*50)+"\n"
flag = flag+1
if flag == 5:
break
return strr
#returns nearby Pharmacy
# dependency : python-google-places 1.4.1
|
# -*- coding: utf-8 -*-
# @TIME : 2021/3/28 16:11
# @AUTHOR : Xu Bai
# @FILE : __init__.py.py
# @DESCRIPTION :
from .alexnet import AlexNet
from .resnet34 import ResNet34
from .squeezenet import SqueezeNet
# 加上这两行就可以在主函数里写from models import AlexNet了
# from torchvision.models import InceptinV3
# from torchvision.models import alexnet as AlexNet
|
from flask import Flask, session, request
from flask_restplus import fields, Resource, Api, reqparse
import re
import datetime
import os
from Helper import *
from TopicModel import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret key'
api = Api(app)
helper = Helper()
tm = TopicModel('model/model.p', 'model/dictionary.p')
sentiment_req_fields = api.model('SentimentReqFields', {
'text': fields.String(required=True, description='The user conversations'),
'uid': fields.String(required=True, description='The user ID')
})
unhappy_req_fields = api.model('UnhappyReqFields', {
'max': fields.Float(default=0.0, description='The maximum sentiment value')
}) |
from django.db import models
from django.db.models import permalink
# Create your models here.
class Blogpost(models.Model):
"""docstring for ClassName"""
title = models.CharField(max_length=100, unique=True)
author = models.CharField(max_length=100,unique=True)
slug = models.SlugField(max_length=100,unique=True)
body = models.TextField()
posted = models.DateField(db_index=True,auto_now_add=True)
def __unicode__(self):
return '%s' % self.title
@permalink
def get_absolute_url(self):
return ('view_blog_post',None,{'slug':self.slug})
|
"""
Problem 5
Smallest multiple
"""
from utility.decorators import timeit, printit
from utility import math_f
def convert(l):
out = {}
for i in l:
if i in out:
out[i] += 1
else:
out[i] = 1
return out
def redu(d):
total = 1
for i in d:
total *= (i**d[i])
return total
@printit
@timeit
def run(n):
primes = math_f.sieve_of_eratosthenes(n)
freq = {i: 0 for i in primes}
for i in range(2, n):
x = math_f.get_prime_divisors(i)
y = convert(x)
for key in y:
if freq[key] < y[key]:
freq[key] = y[key]
ans = redu(freq)
return ans
if __name__ == "__main__":
n = 20
run(n)
|
from tqdm import tqdm
import torch
import config
def train_fn(model,dataloader,optimizer):
model.train()
fin_loss = 0
tk = tqdm(dataloader,total = len(dataloader))
for data in tk:
for k,v in data.items():
data[k] = v.to(config.DEVICE)
optimizer.zero_grad()
_,loss = model(**data)
loss.backward()
optimizer.step()
fin_loss += loss.item()
return fin_loss/len(dataloader)
def eval_fn(model,dataloader):
model.eval()
fin_loss = 0
fin_preds = []
tk0 = tqdm(dataloader,total = len(dataloader))
for data in tk0:
for k,v in data.items():
data[k] = v.to(config.DEVICE)
with torch.no_grad():
batch_preds,loss = model(**data)
fin_loss += loss.item()
fin_preds.append(batch_preds)
return fin_loss/len(dataloader),fin_preds |
# from PIL import Image
import glob
import cv2
import os
import sys
input_folder = sys.argv[1] # first commandline argument sets the original images folder
for input_mask in sys.argv[2:]: # rest of command line input is list of mask images
os.makedirs(input_folder + "\\" + input_mask.split(".")[0], exist_ok=True)
for filename in glob.glob(input_folder + "\*.jpg"): # assuming jpg
image = cv2.imread(filename)
mask = cv2.imread(input_mask)
# Mask input image with binary mask
result = cv2.bitwise_and(image, mask)
# Color background white i.e. white mask
# result[mask == 0] = 255 # Optional
newFile = (
input_folder
+ "\\"
+ input_mask.split(".")[0]
+ filename.split(input_folder)[1]
)
print(newFile)
cv2.imwrite(newFile, result)
|
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from PIL import Image
from PIL import ImageOps
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
class CaltechBirds(Dataset):
"""Caltech-UCSD 200 Birds dataset."""
def __init__(self, data_split):
"""
Args:
data_split (string): desired data split
"""
self.train_image_list_path = './lists/train.txt'
self.test_image_list_path = './lists/test.txt'
self.data_split = data_split
#Image transformation
self.transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
#Image resize with envelope mode
def imageResizeAndPadding(im,desired_size):
old_size = im.size # old_size[0] is in (width, height) format
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
im = im.resize(new_size, Image.ANTIALIAS)
delta_w = desired_size - new_size[0]
delta_h = desired_size - new_size[1]
padding = (delta_w//2, delta_h//2, delta_w-(delta_w//2), delta_h-(delta_h//2))
new_im = ImageOps.expand(im, padding)
return new_im
#Read train-test image lists
self.train_images = open(self.train_image_list_path).readlines()
self.test_images = open(self.test_image_list_path).readlines()
#create data lists
if self.data_split =='train':
self.image_list = self.train_images
if self.data_split == 'test':
self.image_list = self.test_images
#create train images array
self.data_list = list()
for i in range(len(self.image_list)):
_train_set = self.image_list[i].split("\n")[0]
species = self.image_list[0].split('/')[0]
id, name = int(species.split('.')[0])-1, species.split('.')[1]
_data_list = ['./images/' + _train_set, id, name]
self.data_list.append(_data_list)
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
return sample |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""=================================================
@Project -> File :PycharmProjects -> user
@IDE :PyCharm
@Author :Mr. toiler
@Date :1/10/2020 3:15 PM
@Desc :
=================================================="""
from web.dao.db import DB
from web.dto.user import UserDto
class UserDao(object):
create_sql = """CREATE TABLE USER (
ID INTEGER PRIMARY KEY,
LOGIN_NAME VARCHAR(40) NOT NULL,
NAME VARCHAR(40) NOT NULL,
MOBILE VARCHAR(40),
STATUS INT, --0 正常 1 删除
DEPART VARCHAR(256), -- 文字 金科/武汉事业群/架构
ORG VARCHAR(40), -- 代号 ( 00001, 金科/武汉事业群; 00001, 金科/武汉事业群/架构;
MEMO TEXT(500))"""
insert_sql = "insert into user(id,login_name,name,mobile,status,depart,org,memo) values(?,?,?,?,?,?,?,?)"
query_sql = "select id,login_name,name,mobile,status,depart,org,memo from user"
def __init__(self, _db: DB):
self._db = _db
def create_table(self):
self._db.create_table(UserDao.create_sql)
self._db.execute("create index USER_IND_LOGIN_NAME on USER(LOGIN_NAME)")
def get_user_by_login_name(self, login_name: str):
sql = UserDao.query_sql + " where LOGIN_NAME = ?"
paras = (login_name,)
row = self._db.get_one(sql, paras)
if row:
return UserDto(pid=row[0], login_name=row[1], name=row[2], mobile=row[3], status=row[4],
depart=row[5], org=row[6], memo=row[7])
return None
def get_user_by_id(self, pid):
sql = UserDao.query_sql + " where id = ?"
paras = (pid,)
row = self._db.get_one(sql, paras)
if row:
return UserDto(pid=row[0], login_name=row[1], name=row[2], mobile=row[3], status=row[4],
depart=row[5], org=row[6], memo=row[7])
return None
def insert_user(self, login_name=None, name=None, mobile=None, depart=None, org=None, memo=None,
u=None, is_commit=False):
"""
参数传入 u 或者 login_name 是tuple时候,id使用传入的数据, 按照各个变量传入, id使用None
:param login_name:
:param name:
:param mobile:
:param depart:
:param org:
:param memo:
:param u:
:param is_commit:
:return:
"""
sql = UserDao.insert_sql
if u and isinstance(u, UserDto):
para = (u.id, u.login_name, u.name, u.mobile, 0, u.depart, u.org, u.memo)
elif isinstance(login_name, tuple):
para = login_name
else:
para = (None, login_name, name, mobile, 0, depart, org, memo)
self._db.insert_one(sql, para, is_commit)
|
from django.db import models
from django.urls import reverse
# Create your models here.
class TodoItem(models.Model):
title = models.CharField(max_length=50, null=True)
content = models.TextField(null=False)
def get_detail_url(self):
return reverse('detail', kwargs={'my_item':self.id})
|
# FastAPI
from fastapi import APIRouter, HTTPException, Request, Depends, status, BackgroundTasks
# SQLAlchemy
from sqlalchemy.orm import Session
# Types
from typing import List, Optional
# Custom Modules
from .. import schemas, crud
from ..dependencies import get_db, get_current_user
from ..background_functions.email_notifications import send_new_follower_notification_email
from ..core import security
from ..core.config import settings
from ..core.websocket.connection_manager import ws_manager
# FastAPI router object
router = APIRouter(prefix="/follows", tags=['follows'])
@router.get("/{userId}", response_model=List[schemas.FollowsResponse])
def get_follows(userId: int, db: Session = Depends(get_db)):
"""
The GET method for this endpoint requires a userId and will send
back information about all users the userId follows .
Returns:
This endpoint will always return an array of objects.
Errors:
An error will be returned if the userId does not exist.
"""
user = crud.get_user_by_id(db, userId)
if not user:
raise HTTPException(status.HTTP_404_NOT_FOUND,
detail="User does not exist")
follows = crud.get_all_users_following(db, userId)
return [
schemas.FollowsResponse(
userId=following.follows_user.id,
email=following.follows_user.email,
username=following.follows_user.username,
bio=following.follows_user.bio,
birthdate=following.follows_user.birthdate
) for following in follows
]
@router.get("/count/{userId}", response_model=schemas.CountBase)
def get_follows_count_for_user(
userId: int,
db: Session = Depends(get_db)
):
count = crud.get_following_for_user(db, user_id=userId)
return schemas.CountBase(
count=count
)
@router.post("", response_model=schemas.EmptyResponse)
async def create_follow_record_for_user(
request_body: schemas.FollowsCreateRequestBody,
bg_tasks: BackgroundTasks,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(get_current_user)
):
"""
The POST method for this endpoint will create a follow relationship between two users.
current_user requests to follow a new user
"""
crud.create_follow_relationship(
db, current_user.id, request_body.followUserId)
#
# Broadcast WS message so user components can update
#
message = schemas.WSMessage[schemas.WSFollowsUpdateBody](
action=schemas.WSMessageAction.NewFollower,
body=schemas.WSFollowsUpdateBody(
userId=current_user.id,
followUserId=request_body.followUserId
)
)
if not ws_manager.user_is_online(request_body.followUserId):
# Send a notification email
new_follower = crud.get_user_by_id(db, request_body.followUserId)
bg_tasks.add_task(send_new_follower_notification_email,
new_follower, current_user)
await ws_manager.broadcast(message, current_user.id)
return schemas.EmptyResponse()
@router.delete('', response_model=schemas.EmptyResponse)
async def delete_follow_relationship(
request_body: schemas.FollowsDeleteRequestBody,
db: Session = Depends(get_db),
current_user: schemas.User = Depends(get_current_user)
):
delete_successful = crud.delete_follow_relationship(
db, current_user.id, request_body.followUserId)
#
# Broadcast WS message so user components can update
#
message = schemas.WSMessage[schemas.WSFollowsUpdateBody](
action=schemas.WSMessageAction.LostFollower,
body=schemas.WSFollowsUpdateBody(
userId=current_user.id,
followUserId=request_body.followUserId
)
)
await ws_manager.broadcast(message, current_user.id)
return schemas.EmptyResponse()
|
#! /usr/bin/env python
# vim:sw=4 ts=4 et:
#
# Copyright (c) 2015, 2016 Torchbox Ltd.
# 2015-04-02 ft: created
# 2016-05-10 ft: modified for TS
#
from flask import Flask, request, make_response
app = Flask(__name__)
import os
from kyotocabinet import DB
import settings
def text_response(text, code = 200):
response = make_response(text, code)
response.headers['Content-Type'] = 'text/plain;charset=UTF-8'
return response
@app.route("/")
def main():
return text_response("Ready.\n")
@app.route("/purge/<domain>/<int:genid>", methods=[ 'POST' ])
def purge(domain, genid):
if request.remote_addr not in settings.ALLOW:
return text_response("Not permitted.\n", 403)
db = DB()
if not db.open(settings.GENID_DATABASE, DB.OWRITER | DB.OCREATE):
return text_response("Failed to purge: cannot open database.\n", 501)
set_ok = db.set(domain, genid)
db.close()
if not set_ok:
return text_response("Failed to purge: cannot set genid.\n", 501)
else:
return text_response("Purged <%s>\n" % (domain,))
if __name__ == "__main__":
app.run(debug = settings.DEBUG)
|
import unittest
# import pdb; pdb.set_trace()
def digits(x):
""" Convert an Integer into list of digits .
Args : x - the number of digits we want.
Returns : A list of digits, in order, of ''x''.
>>> digits(4586378)
[4,58,6,3,7,8]
"""
digs = []
while x!= 0:
div, mod = divmod(x,10)
digs.append(mod)
x = div
return digs
def is_palindrome(x):
""" Determine if an digit is palindrome.
Args : x - The number to check for palindromity
Returns : True if the digits of ''x'' are a palindrome, False otherwise
>>> is_palindrome(1234)
False
>>> is_palindrome(2468642)
True
"""
digs = digits(x)
for f, r in zip(digs, reversed(digs)):
if f != r:
return False
return True
class Tests(unittest.TestCase):
""" Tests for the is_palindrom function."""
def test_negative(self):
"Check that it returns False correctly."
self.assertFalse(is_palindrome(1234))
def test_positive(self):
"Check that it returns True correctly."
self.assertTrue(is_palindrome(1234321))
def test_single_digit(self):
"Check that it works for a single digit numbers."
for i in range(10):
self.assertTrue(is_palindrome(i))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
""" FCDR harmonisation modules
Project: H2020 FIDUCEO
Author: Arta Dilo \NPL MM
Reviewer: Peter Harris \NPL MM, Sam Hunt \NPL ECO
Date created: 12-12-2016
Last update: 02-05-2017
Version: 12.0
Perform harmonisation of a satellite series using matchup records of pairs of
series sensors and reference sensor. Harmonisation runs an ODR regression on
matchups and is based on two models:
- measurement model of a series sensor; series are AVHRR, HIRS and MW
- adjustment model for a pair of measurements, i.e. matchup pixels
from two (sensor) instruments of a series; the adjustment accounts for the
spectral differences between two sensors, and the matchup process.
Harmonisation returns calibration coefficients for each sensor in the series and
their covariance matrix, it propagates coefficients uncertainty to the measured
quantity (radiance for the considered series), i.e. evaluates the harmonisation
uncertainty to the series FCDR. """
from numpy import zeros, ones, savetxt
from random import sample
from os.path import join as pjoin
from optparse import OptionParser
from datetime import datetime as dt
import readHD as rhd
import harFun as har
import unpFun as upf
import visFun as vis
from plotHaR import plotSDfit as pltErrs
# Set GLOBAL variables
datadir = "D:\Projects\FIDUCEO\Data" # main data folder in laptop
#datadir = "/home/ad6/Data" # main data folder in eoserver
#datadir = "/group_workspaces/cems2/fiduceo/Data/Matchup_Simulated/Data" # in CEMS
mcrdir = pjoin(datadir, 'Results') # folder for MC trials results
#pltdir = pjoin(datadir, 'Graphs') # folder for png images of graphs
hvars = 12 # number of columns in the H data matrices of a series
""" Perform multiple-pair regression with ODR """
def multipH(filelist, series, dstype):
p = series.nocoefs # number of calibration parameters
m = series.novars # # number of measured variables
nos = series.nosensors # number of sensors in the series
slist = series.sslab # list of sensors in the series
inCoef = series.preHcoef # input coefficients to simulations
# Create array of initial beta values for the ODR fit
hsCoef = zeros((nos,p))
# Keep the same values as input coefficients in inCoef
for sno in range(nos):
sl = slist[sno]
hsCoef[sno,:] = inCoef[sl][0:p]
b0 = hsCoef.flatten('A') # format to ODR input for initial values
print '\n\nInitial beta values for ODR'
print b0
if series.notime: # work with not time-dependant dataset
folder = pjoin(datadir, 'newSim_notime') # data folder
# columns for X variables in the H matrices
cols = [x for x in range(hvars) if (x!=5 and x<10)]
## create ifixb arrays; fix a coeffs for reference sensor
#parfix = zeros(nos*p, dtype=int)
#parfix[0:p] = 1
#fixb = parfix.tolist() # ifixb ODR parameter
#print '\n\nifixb array for sensors', slist
#print fixb
fixb = None
fixx = None
else: # work with data in the main/time-dependent data folder
folder = pjoin(datadir, 'newSim') # data folder
# columns for X variables in the H matrices
cols = [x for x in range(hvars) if x < 11]
# create ifixb arrays; fix a3 for all series' sensors
parfix = zeros(nos*p, dtype=int)
for sidx in range(1,nos):
parfix[p*sidx:p*sidx+p-1] = 1
fixb = parfix.tolist() # ifixb ODR parameter
print '\n\nifixb array for sensors', slist
print fixb
# create ifixx arrays; fix orbit temperature To for sensors
varfix = ones(m*2+1, dtype=int)
varfix[m] = 0 # fix To for 1st sensor
varfix[2*m] = 0 # fix To for 2nd sensor
fixx = varfix.tolist() # ifixx ODR parameter
print '\nifixx array', fixx
#if Hd.shape[1] != hvars:
# sys.exit('Incorrect shape of harmonisation matrices')
# work with real datasets; currently a different folder for the data
if dstype == 'r':
folder = pjoin(datadir, 'Harm_RealData') # real data folder
# read data from the list of netCDF files
Im,Hd,Hr,Hs,sp,mutime,corL,Is,sl1,mxsl1,sl2,mxsl2,CsU1,CictU1,CsU2,CictU2 = rhd.rHData(folder, filelist)
series.setIm(Im) # set the series index matrix
# perform odr on all sensors from the list
print '\nRunning ODR for multiple pairs\n'
sodr = har.seriesODR(Hd[:,cols],Hd[:,11],Hr[:,cols],Hr[:,11],b0,sp,series,fixb,fixx)
print '\nODR output for sensors', slist, '\n'
sodr.pprint() # print summary of odr results
print '\nCost function in final iteration (Sum of squares):', sodr.sum_square
print '\nSum of squares of epsilon error (K):', sodr.sum_square_eps
print '\nSum of squares of delat error (H variables):', sodr.sum_square_delta
#print '\n odr iwork array'
#print sodr.iwork
print '\nIndex matrix of sensors in', filelist
print Im
print '\n\nRange of input K values [', min(Hd[:,11]), max(Hd[:,11]), ']'
print 'Range of estimated K values (ODR y) [', min(sodr.y), max(sodr.y), ']'
print 'Range of estimated K error (ODR epsilon) [', min(sodr.eps), max(sodr.eps), ']'
print 'Range of input Lref values [', min(Hd[:,0]), max(Hd[:,0]), ']'
print 'Range of estimated Lref values (from ODR xplus) [', min(sodr.xplus[0,:]), max(sodr.xplus[0,:]), ']'
print 'Range of estimated Lref error (from ODR delta) [', min(sodr.delta[0,:]), max(sodr.delta[0,:]), ']'
print '\nFirst row of H data matrix'
print Hd[0,:]
print '\nLast row of H data matrix'
print Hd[-1,:]
return sodr, Hd, Hr, Hs, mutime
# Plot harmonisation results for series sensors
def plotSSH(sodr, Hd, series, nobj):
nos = series.nosensors # number of sensors in the series
p = series.nocoefs # number of calibration parameters
m = series.novars # # number of measured variables
slist = series.sslab # list of sensors in the series
inCoef = series.preHcoef # input coefficients to simulations
Im = series.im # index matrix for series matchups
mpbeta = sodr.beta # calibration coeffs of fitted sensors
mpcov = sodr.cov_beta # coefficients covariance
mpcor = vis.cov2cor(mpcov) # coeffs' correlation matrix
cor_ttl = 'Correlation of harmonisation coefficients for pairs\n'+', '.join(filelist)
#cor_lbl = ['a0', 'a1', 'a2', 'a3'] * nos
vis.plot_corr_heatmap(mpcor, title=cor_ttl, labels=['a0'])
print '\nCorrelation of harmonisation coefficients for pairs '+', '.join(filelist) +'\n'
print mpcor
""" Extract coefficients and covariance of each sensor,
compute and plot radiance with 4*sigma uncertainty """
for sno in range(1,nos): # loop through fitted sensors
sl = slist[sno] # sensor label
slab = int(sl[1:3]) # two-digits label in Im matrix
sMidx, eMidx = rhd.sliceHidx(Im, slab) # 1st and last record index
print '\nFirst and last record for sensor', slab, '[', sMidx, eMidx,']'
selMU = sample(xrange(sMidx, eMidx), nobj) # Select matchups for plotting
inC = inCoef[sl] # input coeffs to simulations
print 'Input coefficients for sensor', slab, ':', inC
inL = avhrrNx.measEq(Hd[selMU, m+1:2*m+1], inC) # radiance from input coeffs
calC = mpbeta[sno*p:(sno+1)*p] # calib. coeffs for sensor slab
print 'Fitted coefficients for sensor', slab, ':', calC
calL = avhrrNx.measEq(Hd[selMU, m+1:2*m+1], calC) # calibrated radiance
covCC = mpcov[sno*p:(sno+1)*p,sno*p:(sno+1)*p] # coeffs covariance from odr
print 'Covariance of coefficients for sensor', slab
print covCC
# radiance uncertainty from harmonisation
cLU = avhrrNx.harUnc(Hd[selMU, m+1:2*m+1],calC,covCC)
# graphs of radiance bias with 2sigma error bars
plot_ttl = sl + ' Radiance bias and ' + r'$4*\sigma$'+ ' uncertainty from multiple-pairs ODR covariance'
vis.LbiasU(inL, calL, cLU, 4, plot_ttl)
return mpcor
if __name__ == "__main__":
usage = "usage: %prog time-flag data-type series-label"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) < 3:
parser.error("Insufficient number of arguments")
# 1st argument: boolean, defines what dataset to work with
notime = args[0]
if not isinstance(notime, bool): # if input is not True/False
notime = str(args[0]).lower()
if notime in ("yes", "true", "t", "1"):
notime = True # work with not-time dependent dataset
else:
notime = False # work with time dependent dataset
dtype = args[1] # 2nd argument: type of dataset, r for real, s for simulated
sslab = args[2] # 3rd argument: series label e.g. avhrr; currently not used
# TODO: add 4th argument the list of netCDFs with harmonisation data
#filelist = args[3] # input from a text file ?
#filelist = ["m02_n15.nc","n15_n14.nc","n14_n12.nc","n12_n11.nc"] #,"n11_n10.nc","n10_n09.nc"]
#filelist = ["m02_n19.nc","m02_n17.nc","m02_n15.nc","n19_n15.nc","n17_n15.nc"]
filelist = ["m02_n19.nc","m02_n15.nc","n19_n15.nc","n15_n14.nc"]
# Time the execution of harmonisation
st = dt.now() # start time of script run
# create instance of series class, currently assumed 'avhrr' only
# TODO: change for different series label
avhrrNx = upf.avhrr(datadir, filelist, notime)
# perform regression on multiple pairs
sodr, Hd, Hr, Hs, mutime = multipH(filelist, avhrrNx, dtype)
# Store beta and covariance to text files
fn = 'sh_' # compile filename
if dtype =='r':
fn += 'rd_'
if notime:
fn += 'notd_'
else:
fn += 'td_'
# store calibration coefficients for sensors in the avhrr/.. class
fnb = fn+'beta.txt' # filename for beta coefficients
fnb = pjoin(mcrdir, fnb) # path & filename
savetxt(fnb, sodr.beta, delimiter=',')
# store coefficients' covariance of sensors in the avhrr/.. class
fnc = fn+'bcov.txt' # filename for covariance matrix
fnc = pjoin(mcrdir, fnc) # path & filename
savetxt(fnc, sodr.cov_beta, delimiter=',')
et = dt.now() # end of harmonisation run
exect = (et-st).total_seconds()
print '\nTime taken for fitting pairs', filelist
print (exect/60.), 'minutes\n'
# plot results of harmonisation
cCorr = plotSSH(sodr, Hd, avhrrNx, 250)
# plot weighted residuals
noMU = Hd.shape[0] # number of matchups in multiple pairs
nobj = 10000 # number of mathcup records to plot
pltErrs(avhrrNx, noMU, nobj, '', mutime, Hr, sodr, weight=1)
|
#!/usr/bin/python
import os
import sys
import time
import datetime
import RPi.GPIO
import RPiI2C
import socket
import struct
# DS1307 Constants.
DS1307_CTRL_OUT = 0x80
DS1307_CTRL_SQWE = 0x10
DS1307_CTRL_RATE_0 = 0x00
DS1307_CTRL_RATE_1 = 0x01
DS1307_CTRL_RATE_1HZ = 0x00
DS1307_CTRL_RATE_4KHZ = 0x01
DS1307_CTRL_RATE_8KHZ = 0x02
DS1307_CTRL_RATE_32KHZ = 0x03
DS1307_CTRL_BYTE = (DS1307_CTRL_OUT | DS1307_CTRL_SQWE | DS1307_CTRL_RATE_1HZ)
DOW = [ "", "Sunday", "Monday", "Tuesday", \
"Wednesday", "Thursday", "Friday", "Saturday" ]
# I2C Command Data. To read specific register, write addres, then read data.
# WRITE: [READ_COUNT, [DS1307_ADDRESS + 0], REG_ADDRESS, DATA, DATA, ...]
# READ: [READ_COUNT, [DS1307_ADDRESS + 1]]
DS1307_WRITE_ALL = [0, [0xD0, 0x00]]
DS1307_READ_ALL = [64, [0xD1]]
DS1307_WRITE_TIME = [0, [0xD0, 0x00]]
DS1307_READ_TIME = [3, [0xD1]]
DS1307_WRITE_DATE = [0, [0xD0, 0x03]]
DS1307_READ_DATE = [4, [0xD1]]
DS1307_WRITE_CTRL = [0, [0xD0, 0x07]]
DS1307_READ_CTRL = [1, [0xD1]]
DS1307_WRITE_MSG = [0, [0xD0, 0x08]]
DS1307_READ_MSG = [56, [0xD1]]
print("Python version")
print (sys.version)
print("Version info.")
print (sys.version_info)
# Initialise GPIO.
RPi.GPIO.setwarnings(False)
RPi.GPIO.setmode(RPi.GPIO.BCM)
RPiI2C.I2C_Init()
def get_ntp_time(host = "pool.ntp.org"):
port = 123
buf = 1024
address = (host,port)
msg = '\x1b' + 47 * '\0'
# reference time (in seconds since 1900-01-01 00:00:00)
TIME1970 = 2208988800 # 1970-01-01 00:00:00
try:
# connect to server
client = socket.socket( socket.AF_INET, socket.SOCK_DGRAM)
client.settimeout(3)
client.sendto(msg.encode('utf-8'), address)
msg, address = client.recvfrom( buf )
t = struct.unpack( "!12I", msg )[10]
t -= TIME1970
# return time.ctime(t).replace(" "," ")
return t
except Exception as e:
return None
def update_system_time_from_rtc():
SetText = ""
RPiI2C.I2C_SendReceiveData(DS1307_WRITE_DATE[1])
Result = RPiI2C.I2C_SendReceiveData(DS1307_READ_DATE[1], DS1307_READ_DATE[0])
SetText += "sudo date -s '20{:02X}-{:02X}-{:02X} ".format(Result[3], Result[2], Result[1])
RPiI2C.I2C_SendReceiveData(DS1307_WRITE_TIME[1])
Result = RPiI2C.I2C_SendReceiveData(DS1307_READ_TIME[1], DS1307_READ_TIME[0])
SetText += "{:02X}:{:02X}:{:02X}'".format(Result[2], Result[1], Result[0])
os.system(SetText)
# Change timezone
TimeZone = "sudo timedatectl set-timezone Asia/Ho_Chi_Minh"
os.system(TimeZone)
def update_time_to_rtc(current_time):
# Set the DS1307 with the current system date.
SetData = list(DS1307_WRITE_DATE[1])
Day = current_time.tm_wday + 2
if Day > 7:
Day = 1
SetData.append(Day)
SetData.append((current_time.tm_mday % 10) + (int(current_time.tm_mday / 10) << 4))
SetData.append((current_time.tm_mon % 10) + (int(current_time.tm_mon / 10) << 4))
SetData.append(((current_time.tm_year - 2000) % 10) + (int((current_time.tm_year - 2000) / 10) << 4))
RPiI2C.I2C_SendReceiveData(SetData)
# Set the DS1307 with the current system time.
SetData = list(DS1307_WRITE_TIME[1])
SetData.append((current_time.tm_sec % 10) + (int(current_time.tm_sec / 10) << 4))
SetData.append((current_time.tm_min % 10) + (int(current_time.tm_min / 10) << 4))
SetData.append((current_time.tm_hour % 10) + (int(current_time.tm_hour / 10) << 4))
RPiI2C.I2C_SendReceiveData(SetData)
def main():
update_system_time_from_rtc_flag = False
update_system_time_from_rtc_tick_sec = 0
update_system_time_from_rtc_period = 5
update_internet_time_to_rtc_tick_sec = 0
update_internet_time_to_rtc_period = 10
rtc_detect_flag = False
while True:
# RTC detection
if RPiI2C.I2C_Check(0xD0) == 0:
rtc_detect_flag = True
else:
rtc_detect_flag = False
if update_system_time_from_rtc_tick_sec >= update_system_time_from_rtc_period:
update_system_time_from_rtc_tick_sec = 0
if update_system_time_from_rtc_flag == False :
try:
if rtc_detect_flag is True:
update_system_time_from_rtc()
update_system_time_from_rtc_flag = True
print("Get time from RTC")
else:
print("RTC not detected")
except Exception as e:
update_system_time_from_rtc_flag = False
print(str(e))
if update_internet_time_to_rtc_tick_sec >= update_internet_time_to_rtc_period:
update_internet_time_to_rtc_tick_sec = 0
tm = get_ntp_time()
if tm is not None:
try:
if rtc_detect_flag is True:
update_time_to_rtc(time.gmtime(tm))
update_internet_time_to_rtc_period = 15#300
print("RTC time updated")
else:
print("RTC not detected")
except Exception as e:
update_internet_time_to_rtc_period = 10
print(str(e))
else:
print("No internet connection")
update_internet_time_to_rtc_tick_sec += 1
update_system_time_from_rtc_tick_sec += 1
time.sleep(1)
if __name__ == '__main__':
main()
|
import time
import busio
import board
class ESPNEW:
def __init__(self, baud=115200):
self.s=busio.UART(board.TX, board.RX, baudrate=baud)
time.sleep(0.1)
self.reset()
self.s.read(self.s.in_waiting)
time.sleep(0.1)
def buildJSON(self, func, data):
toSend=b'${"f":"'+func+b'","d":['
for d in data:
toSend =toSend+b'"'+d +b'",'
toSend=toSend+b']}&'
print(toSend)
return toSend
def storeapi(self,api, URL):
toSend=b'dongle.storeAPISecret("'+api+b'","'+ URL + b'")\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def storewifi(self,ssid,password):
toSend=b'dongle.storeWIFISecret("'+ssid+b'","'+ password + b'")\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def getTW(self):
self.send()
return(self.s.read())
def putTW(self, thing, prop, value):
toSend=b'dongle.PutTW("'+thing+b'","'+ prop +b'","'+ value +b'")\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def setwifi(self):
toSend=b'dongle.setWiFi()\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def setTW(self):
toSend=b'dongle.setTW()\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def callTW(self):
toSend=b'dongle.CallTW("Test1","TestService")\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def getMAC(self):
toSend=b'dongle.getMAC()\r\n'
self.send(toSend)
ret=self.clean_return()
return(ret)
def reset(self):
toSend=b'import dongle\r\n'
self.send(toSend)
return(self.s.read(self.s.in_waiting))
def read(self):
return(self.s.read())
def send(self,body):
while (len(body)>15):
self.s.write(body[:15])
body=body[15:]
time.sleep(0.1)
self.s.write(body)
time.sleep(0.1)
#ret=self.clean_return()
#return(ret)
def clean_return(self):
while not (self.s.in_waiting) :
pass
time.sleep(0.1)
ret=self.s.read()
now= time.monotonic()
start=time.monotonic()
while ((now-start)<2 and not ('!!' in ret)):
now= time.monotonic()
if(self.s.in_waiting) :
ret = ret + self.s.read()
time.sleep(0.1)
ret=str(ret)
print(ret)
ret = ret[ret.index("@") + 6 : ret.index("!") - 4]
if (ret is None):
return (0)
else :
return(ret)
#if(self.s.in_waiting):
# raw_ret=self.s.read()
# print(raw_ret)
#print(str(raw_ret).find(">>>"))
#i=0
# while str(raw_ret).find(">>>")<0:
# if(i>15):
# break
# raw_ret+=self.s.read(400)
# i=i+1
# print(str(raw_ret)[str(raw_ret).find("\r\n"):str(raw_ret).find("\r\n>>>")])
# return(str(raw_ret)[str(raw_ret).find("\r\n"):str(raw_ret).find("\r\n>>>")])
# time.sleep(0.1)
#Key = "pp92f871d8-c3bd-4fd7-8fab-ee50e1fbd0e5"
#urlBase = "https://-2008281301ci.portal.ptc.io:8443/Thingworx/" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.