seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20422548222 | import argparse
import collections
import getpass
import hashlib
import json
import os
import pickle
import requests
import time
import uuid
import urllib.parse
from datetime import datetime, timedelta
from email_validator import validate_email, EmailNotValidError
from pandas import DataFrame, to_datetime
from pytz import timezone
from . import endpoints
class webull :
def __init__(self, region_code=None) :
self._session = requests.session()
self._headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:99.0) Gecko/20100101 Firefox/99.0',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/json',
'platform': 'web',
'hl': 'en',
'os': 'web',
'osv': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:99.0) Gecko/20100101 Firefox/99.0',
'app': 'global',
'appid': 'webull-webapp',
'ver': '3.39.18',
'lzone': 'dc_core_r001',
'ph': 'MacOS Firefox',
'locale': 'eng',
# 'reqid': req_id,
'device-type': 'Web',
'did': self._get_did()
}
#endpoints
self._urls = endpoints.urls()
#sessions
self._account_id = ''
self._trade_token = ''
self._access_token = ''
self._refresh_token = ''
self._token_expire = ''
self._uuid = ''
#miscellaenous
self._did = self._get_did()
self._region_code = region_code or 6
self.zone_var = 'dc_core_r001'
self.timeout = 15
def _get_did(self, path=''):
'''
Makes a unique device id from a random uuid (uuid.uuid4).
if the pickle file doesn't exist, this func will generate a random 32 character hex string
uuid and save it in a pickle file for future use. if the file already exists it will
load the pickle file to reuse the did. Having a unique did appears to be very important
for the MQTT web socket protocol
path: path to did.bin. For example _get_did('cache') will search for cache/did.bin instead.
:return: hex string of a 32 digit uuid
'''
filename = 'did.bin'
if path:
filename = os.path.join(path, filename)
if os.path.exists(filename):
did = pickle.load(open(filename,'rb'))
else:
did = uuid.uuid4().hex
pickle.dump(did, open(filename, 'wb'))
return did
def _set_did(self, did, path=''):
'''
If your starting to use this package after webull's new image verification for login, you'll
need to login from a browser to get your did file in order to login through this api. You can
find your did file by using this link:
https://github.com/tedchou12/webull/wiki/Workaround-for-Login
and then headers tab instead of response head, and finally look for the did value from the
request headers.
Then, you can run this program to save your did into did.bin so that it can be accessed in the
future without the did explicitly being in your code.
path: path to did.bin. For example _get_did('cache') will search for cache/did.bin instead.
'''
filename = 'did.bin'
if path:
filename = os.path.join(path, filename)
pickle.dump(did, open(filename, 'wb'))
return True
def build_req_headers(self, include_trade_token=False, include_time=False, include_zone_var=True):
'''
Build default set of header params
'''
headers = self._headers
req_id = str(uuid.uuid4().hex)
headers['reqid'] = req_id
headers['did'] = self._did
headers['access_token'] = self._access_token
if include_trade_token :
headers['t_token'] = self._trade_token
if include_time :
headers['t_time'] = str(round(time.time() * 1000))
if include_zone_var :
headers['lzone'] = self.zone_var
return headers
def login(self, username='', password='', device_name='', mfa='', question_id='', question_answer='', save_token=False, token_path=None):
'''
Login with email or phone number
phone numbers must be a str in the following form
US '+1-XXXXXXX'
CH '+86-XXXXXXXXXXX'
'''
if not username or not password:
raise ValueError('username or password is empty')
# with webull md5 hash salted
password = ('wl_app-a&b@!423^' + password).encode('utf-8')
md5_hash = hashlib.md5(password)
account_type = self.get_account_type(username)
if device_name == '' :
device_name = 'default_string'
data = {
'account': username,
'accountType': str(account_type),
'deviceId': self._did,
'deviceName': device_name,
'grade': 1,
'pwd': md5_hash.hexdigest(),
'regionId': self._region_code
}
if mfa != '' :
data['extInfo'] = {
'codeAccountType': account_type,
'verificationCode': mfa
}
headers = self.build_req_headers()
else :
headers = self._headers
if question_id != '' and question_answer != '' :
data['accessQuestions'] = '[{"questionId":"' + str(question_id) + '", "answer":"' + str(question_answer) + '"}]'
response = requests.post(self._urls.login(), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'accessToken' in result :
self._access_token = result['accessToken']
self._refresh_token = result['refreshToken']
self._token_expire = result['tokenExpireTime']
self._uuid = result['uuid']
self._account_id = self.get_account_id()
if save_token:
self._save_token(result, token_path)
return result
def get_mfa(self, username='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'codeType': int(5)}
response = requests.post(self._urls.get_mfa(), json=data, headers=self._headers, timeout=self.timeout)
# data = response.json()
if response.status_code == 200 :
return True
else :
return False
def check_mfa(self, username='', mfa='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'code': str(mfa),
'codeType': int(5)}
response = requests.post(self._urls.check_mfa(), json=data, headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def get_security(self, username='') :
account_type = self.get_account_type(username)
username = urllib.parse.quote(username)
# seems like webull has a bug/stability issue here:
time = datetime.now().timestamp() * 1000
response = requests.get(self._urls.get_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 0), headers=self._headers, timeout=self.timeout)
data = response.json()
if len(data) == 0 :
response = requests.get(self._urls.get_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 1), headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def next_security(self, username='') :
account_type = self.get_account_type(username)
username = urllib.parse.quote(username)
# seems like webull has a bug/stability issue here:
time = datetime.now().timestamp() * 1000
response = requests.get(self._urls.next_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 0), headers=self._headers, timeout=self.timeout)
data = response.json()
if len(data) == 0 :
response = requests.get(self._urls.next_security(username, account_type, self._region_code, 'PRODUCT_LOGIN', time, 1), headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def check_security(self, username='', question_id='', question_answer='') :
account_type = self.get_account_type(username)
data = {'account': str(username),
'accountType': str(account_type),
'answerList': [{'questionId': str(question_id), 'answer': str(question_answer)}],
'event': 'PRODUCT_LOGIN'}
response = requests.post(self._urls.check_security(), json=data, headers=self._headers, timeout=self.timeout)
data = response.json()
return data
def login_prompt(self):
'''
End login session
'''
uname = input('Enter Webull Username:')
pwd = getpass.getpass('Enter Webull Password:')
self.trade_pin = getpass.getpass('Enter 6 digit Webull Trade PIN:')
self.login(uname, pwd)
return self.get_trade_token(self.trade_pin)
def logout(self):
'''
End login session
'''
headers = self.build_req_headers()
response = requests.get(self._urls.logout(), headers=headers, timeout=self.timeout)
return response.status_code
def api_login(self, access_token='', refresh_token='', token_expire='', uuid='', mfa=''):
self._access_token = access_token
self._refresh_token = refresh_token
self._token_expire = token_expire
self._uuid = uuid
self._account_id = self.get_account_id()
def refresh_login(self, save_token=False, token_path=None):
'''
Refresh login token
'''
headers = self.build_req_headers()
data = {'refreshToken': self._refresh_token}
response = requests.post(self._urls.refresh_login(self._refresh_token), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'accessToken' in result and result['accessToken'] != '' and result['refreshToken'] != '' and result['tokenExpireTime'] != '':
self._access_token = result['accessToken']
self._refresh_token = result['refreshToken']
self._token_expire = result['tokenExpireTime']
self._account_id = self.get_account_id()
if save_token:
result['uuid'] = self._uuid
self._save_token(result, token_path)
return result
def _save_token(self, token=None, path=None):
'''
save login token to webull_credentials.json
'''
filename = 'webull_credentials.json'
if path:
filename = os.path.join(path, filename)
with open(filename, 'wb') as f:
pickle.dump(token, f)
return True
return False
def get_detail(self):
'''
get some contact details of your account name, email/phone, region, avatar...etc
'''
headers = self.build_req_headers()
response = requests.get(self._urls.user(), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_account_id(self, id=0):
'''
get account id
call account id before trade actions
'''
headers = self.build_req_headers()
response = requests.get(self._urls.account_id(), headers=headers, timeout=self.timeout)
result = response.json()
if result['success'] and len(result['data']) > 0 :
self.zone_var = str(result['data'][int(id)]['rzone'])
self._account_id = str(result['data'][int(id)]['secAccountId'])
return self._account_id
else:
return None
def get_account(self):
'''
get important details of account, positions, portfolio stance...etc
'''
headers = self.build_req_headers()
response = requests.get(self._urls.account(self._account_id), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_positions(self):
'''
output standing positions of stocks
'''
data = self.get_account()
return data['positions']
def get_portfolio(self):
'''
output numbers of portfolio
'''
data = self.get_account()
output = {}
for item in data['accountMembers']:
output[item['key']] = item['value']
return output
def get_activities(self, index=1, size=500) :
'''
Activities including transfers, trades and dividends
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {'pageIndex': index,
'pageSize': size}
response = requests.post(self._urls.account_activities(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def get_current_orders(self) :
'''
Get open/standing orders
'''
data = self.get_account()
return data['openOrders']
def get_history_orders(self, status='All', count=20):
'''
Historical orders, can be cancelled or filled
status = Cancelled / Filled / Working / Partially Filled / Pending / Failed / All
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
response = requests.get(self._urls.orders(self._account_id, count) + str(status), headers=headers, timeout=self.timeout)
return response.json()
def get_trade_token(self, password=''):
'''
Trading related
authorize trade, must be done before trade action
'''
headers = self.build_req_headers()
# with webull md5 hash salted
password = ('wl_app-a&b@!423^' + password).encode('utf-8')
md5_hash = hashlib.md5(password)
data = {'pwd': md5_hash.hexdigest()}
response = requests.post(self._urls.trade_token(), json=data, headers=headers, timeout=self.timeout)
result = response.json()
if 'tradeToken' in result :
self._trade_token = result['tradeToken']
return True
else:
return False
'''
Lookup ticker_id
Ticker issue, will attempt to find an exact match, if none is found, match the first one
'''
def get_ticker(self, stock=''):
headers = self.build_req_headers()
ticker_id = 0
if stock and isinstance(stock, str):
response = requests.get(self._urls.stock_id(stock, self._region_code), headers=headers, timeout=self.timeout)
result = response.json()
if result.get('data') :
for item in result['data'] : # implies multiple tickers, but only assigns last one?
if 'symbol' in item and item['symbol'] == stock :
ticker_id = item['tickerId']
break
elif 'disSymbol' in item and item['disSymbol'] == stock :
ticker_id = item['tickerId']
break
if ticker_id == 0 :
ticker_id = result['data'][0]['tickerId']
else:
raise ValueError('TickerId could not be found for stock {}'.format(stock))
else:
raise ValueError('Stock symbol is required')
return ticker_id
'''
Get stock public info
get price quote
tId: ticker ID str
'''
def get_ticker_info(self, stock=None, tId=None) :
headers = self.build_req_headers()
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock :
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
response = requests.get(self._urls.stock_detail(tId), headers=headers, timeout=self.timeout)
result = response.json()
return result
'''
Get all tickers from a region
region id: https://github.com/tedchou12/webull/wiki/What-is-the-region_id%3F
'''
def get_all_tickers(self, region_code=None) :
headers = self.build_req_headers()
if not region_code :
region_code = self._region_code
response = requests.get(self._urls.get_all_tickers(region_code, region_code), headers=headers, timeout=self.timeout)
result = response.json()
return result
'''
Actions related to stock
'''
def get_quote(self, stock=None, tId=None):
'''
get price quote
tId: ticker ID str
'''
headers = self.build_req_headers()
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock:
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
response = requests.get(self._urls.quotes(tId), headers=headers, timeout=self.timeout)
result = response.json()
return result
def place_order(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True, stpPrice=None, trial_value=0, trial_type='DOLLAR'):
'''
Place an order
price: float (LMT / STP LMT Only)
action: BUY / SELL / SHORT
ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
timeinforce: GTC / DAY / IOC
outsideRegularTradingHour: True / False
stpPrice: float (STP / STP LMT Only)
trial_value: float (STP TRIAL Only)
trial_type: DOLLAR / PERCENTAGE (STP TRIAL Only)
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action,
'comboType': 'NORMAL',
'orderType': orderType,
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': int(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce
}
# Market orders do not support extended hours trading.
if orderType == 'MKT' :
data['outsideRegularTradingHour'] = False
elif orderType == 'LMT':
data['lmtPrice'] = float(price)
elif orderType == 'STP' :
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP LMT' :
data['lmtPrice'] = float(price)
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP TRAIL' :
data['trailingStopStep'] = float(trial_value)
data['trailingType'] = str(trial_type)
response = requests.post(self._urls.place_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def modify_order(self, order=None, order_id=0, stock=None, tId=None, price=0, action=None, orderType=None, enforce=None, quant=0, outsideRegularTradingHour=None):
'''
Modify an order
order_id: order_id
action: BUY / SELL
ordertype : LMT / MKT / STP / STP LMT / STP TRAIL
timeinforce: GTC / DAY / IOC
outsideRegularTradingHour: True / False
'''
if not order and not order_id:
raise ValueError('Must provide an order or order_id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
modifiedAction = action or order['action']
modifiedLmtPrice = float(price or order['lmtPrice'])
modifiedOrderType = orderType or order['orderType']
modifiedOutsideRegularTradingHour = outsideRegularTradingHour if type(outsideRegularTradingHour) == bool else order['outsideRegularTradingHour']
modifiedEnforce = enforce or order['timeInForce']
modifiedQuant = int(quant or order['quantity'])
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else :
tId = order['ticker']['tickerId']
order_id = order_id or order['orderId']
data = {
'action': modifiedAction,
'lmtPrice': modifiedLmtPrice,
'orderType': modifiedOrderType,
'quantity': modifiedQuant,
'comboType': 'NORMAL',
'outsideRegularTradingHour': modifiedOutsideRegularTradingHour,
'serialId': str(uuid.uuid4()),
'orderId': order_id,
'tickerId': tId,
'timeInForce': modifiedEnforce
}
#Market orders do not support extended hours trading.
if data['orderType'] == 'MKT':
data['outsideRegularTradingHour'] = False
response = requests.post(self._urls.modify_order(self._account_id, order_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def cancel_order(self, order_id=''):
'''
Cancel an order
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {}
response = requests.post(self._urls.cancel_order(self._account_id) + str(order_id) + '/' + str(uuid.uuid4()), json=data, headers=headers, timeout=self.timeout)
result = response.json()
return result['success']
def place_order_otoco(self, stock='', price='', stop_loss_price='', limit_profit_price='', time_in_force='DAY', quant=0) :
'''
OTOCO: One-triggers-a-one-cancels-the-others, aka Bracket Ordering
Submit a buy order, its fill will trigger sell order placement. If one sell fills, it will cancel the other
sell
'''
headers = self.build_req_headers(include_trade_token=False, include_time=True)
data1 = {
'newOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER'},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS'},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT'}
]
}
response1 = requests.post(self._urls.check_otoco_orders(self._account_id), json=data1, headers=headers, timeout=self.timeout)
result1 = response1.json()
if result1['forward'] :
data2 = {'newOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER', 'serialId': str(uuid.uuid4())},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS', 'serialId': str(uuid.uuid4())},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT', 'serialId': str(uuid.uuid4())}],
'serialId': str(uuid.uuid4())
}
response2 = requests.post(self._urls.place_otoco_orders(self._account_id), json=data2, headers=headers, timeout=self.timeout)
# print('Resp 2: {}'.format(response2))
return response2.json()
else:
print(result1['checkResultList'][0]['code'])
print(result1['checkResultList'][0]['msg'])
return False
def modify_order_otoco(self, order_id1='', order_id2='', order_id3='', stock='', price='', stop_loss_price='', limit_profit_price='', time_in_force='DAY', quant=0) :
'''
OTOCO: One-triggers-a-one-cancels-the-others, aka Bracket Ordering
Submit a buy order, its fill will trigger sell order placement. If one sell fills, it will cancel the other
sell
'''
headers = self.build_req_headers(include_trade_token=False, include_time=True)
data = {'modifyOrders': [
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id1),
'outsideRegularTradingHour': False, 'action': 'BUY', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(price), 'comboType': 'MASTER', 'serialId': str(uuid.uuid4())},
{'orderType': 'STP', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id2),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'auxPrice': float(stop_loss_price), 'comboType': 'STOP_LOSS', 'serialId': str(uuid.uuid4())},
{'orderType': 'LMT', 'timeInForce': time_in_force, 'quantity': int(quant), 'orderId': str(order_id3),
'outsideRegularTradingHour': False, 'action': 'SELL', 'tickerId': self.get_ticker(stock),
'lmtPrice': float(limit_profit_price), 'comboType': 'STOP_PROFIT', 'serialId': str(uuid.uuid4())}],
'serialId': str(uuid.uuid4())
}
response = requests.post(self._urls.modify_otoco_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
# print('Resp: {}'.format(response))
return response.json()
def cancel_order_otoco(self, combo_id=''):
'''
Retract an otoco order. Cancelling the MASTER order_id cancels the sub orders.
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
# data = { 'serialId': str(uuid.uuid4()), 'cancelOrders': [str(order_id)]}
data = {}
response = requests.post(self._urls.cancel_otoco_orders(self._account_id, combo_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
'''
Actions related to cryptos
'''
def place_order_crypto(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='DAY', entrust_type='QTY', quant=0, outsideRegularTradingHour=False) :
'''
Place Crypto order
price: Limit order entry price
quant: dollar amount to buy/sell when entrust_type is CASH else the decimal or fractional amount of shares to buy
action: BUY / SELL
entrust_type: CASH / QTY
ordertype : LMT / MKT
timeinforce: DAY
outsideRegularTradingHour: True / False
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action,
'assetType': 'crypto',
'comboType': 'NORMAL',
'entrustType': entrust_type,
'lmtPrice': str(price),
'orderType': orderType,
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': str(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce
}
response = requests.post(self._urls.place_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
'''
Actions related to options
'''
def get_option_quote(self, stock=None, tId=None, optionId=None):
'''
get option quote
'''
if not stock and not tId:
raise ValueError('Must provide a stock symbol or a stock id')
if stock:
try:
tId = str(self.get_ticker(stock))
except ValueError as _e:
raise ValueError("Could not find ticker for stock {}".format(stock))
headers = self.build_req_headers()
params = {'tickerId': tId, 'derivativeIds': optionId}
return requests.get(self._urls.option_quotes(), params=params, headers=headers, timeout=self.timeout).json()
def get_options_expiration_dates(self, stock=None, count=-1):
'''
returns a list of options expiration dates
'''
headers = self.build_req_headers()
data = {'count': count,
'direction': 'all',
'tickerId': self.get_ticker(stock)}
res = requests.post(self._urls.options_exp_dat_new(), json=data, headers=headers, timeout=self.timeout).json()
r_data = []
for entry in res['expireDateList'] :
r_data.append(entry['from'])
# return requests.get(self._urls.options_exp_date(self.get_ticker(stock)), params=data, headers=headers, timeout=self.timeout).json()['expireDateList']
return r_data
def get_options(self, stock=None, count=-1, includeWeekly=1, direction='all', expireDate=None, queryAll=0):
'''
get options and returns a dict of options contracts
params:
stock: symbol
count: -1
includeWeekly: 0 or 1 (deprecated)
direction: all, call, put
expireDate: contract expire date
queryAll: 0 (deprecated)
'''
headers = self.build_req_headers()
# get next closet expiredate if none is provided
if not expireDate:
dates = self.get_options_expiration_dates(stock)
# ensure we don't provide an option that has < 1 day to expire
for d in dates:
if d['days'] > 0:
expireDate = d['date']
break
data = {'count': count,
'direction': direction,
'tickerId': self.get_ticker(stock)}
res = requests.post(self._urls.options_exp_dat_new(), json=data, headers=headers, timeout=self.timeout).json()
t_data = []
for entry in res['expireDateList'] :
if str(entry['from']['date']) == expireDate :
t_data = entry['data']
r_data = {}
for entry in t_data :
if entry['strikePrice'] not in r_data :
r_data[entry['strikePrice']] = {}
r_data[entry['strikePrice']][entry['direction']] = entry
r_data = dict(sorted(r_data.items()))
rr_data = []
for s_price in r_data :
rr_entry = {'strikePrice': s_price}
if 'call' in r_data[s_price] :
rr_entry['call'] = r_data[s_price]['call']
if 'put' in r_data[s_price] :
rr_entry['put'] = r_data[s_price]['put']
rr_data.append(rr_entry)
return rr_data
#deprecated 22/05/01
# params = {'count': count,
# 'includeWeekly': includeWeekly,
# 'direction': direction,
# 'expireDate': expireDate,
# 'unSymbol': stock,
# 'queryAll': queryAll}
#
# data = requests.get(self._urls.options(self.get_ticker(stock)), params=params, headers=headers, timeout=self.timeout).json()
#
# return data['data']
def get_options_by_strike_and_expire_date(self, stock=None, expireDate=None, strike=None, direction='all'):
'''
get a list of options contracts by expire date and strike price
strike: string
'''
opts = self.get_options(stock=stock, expireDate=expireDate, direction=direction)
return [c for c in opts if c['strikePrice'] == strike]
def place_order_option(self, optionId=None, lmtPrice=None, stpPrice=None, action=None, orderType='LMT', enforce='DAY', quant=0):
'''
create buy / sell order
stock: string
lmtPrice: float
stpPrice: float
action: string BUY / SELL
optionId: string
orderType: MKT / LMT / STP / STP LMT
enforce: GTC / DAY
quant: int
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'orderType': orderType,
'serialId': str(uuid.uuid4()),
'timeInForce': enforce,
'orders': [{'quantity': int(quant), 'action': action, 'tickerId': int(optionId), 'tickerType': 'OPTION'}],
}
if orderType == 'LMT' and lmtPrice :
data['lmtPrice'] = float(lmtPrice)
elif orderType == 'STP' and stpPrice :
data['auxPrice'] = float(stpPrice)
elif orderType == 'STP LMT' and lmtPrice and stpPrice :
data['lmtPrice'] = float(lmtPrice)
data['auxPrice'] = float(stpPrice)
response = requests.post(self._urls.place_option_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('place_option_order failed', response.status_code, response.reason)
return response.json()
def modify_order_option(self, order=None, lmtPrice=None, stpPrice=None, enforce=None, quant=0):
'''
order: dict from get_current_orders
stpPrice: float
lmtPrice: float
enforce: GTC / DAY
quant: int
'''
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'comboId': order['comboId'],
'orderType': order['orderType'],
'timeInForce': enforce or order['timeInForce'],
'serialId': str(uuid.uuid4()),
'orders': [{'quantity': quant or order['totalQuantity'],
'action': order['action'],
'tickerId': order['ticker']['tickerId'],
'tickerType': 'OPTION',
'orderId': order['orderId']}]
}
if order['orderType'] == 'LMT' and (lmtPrice or order.get('lmtPrice')):
data['lmtPrice'] = lmtPrice or order['lmtPrice']
elif order['orderType'] == 'STP' and (stpPrice or order.get('auxPrice')):
data['auxPrice'] = stpPrice or order['auxPrice']
elif order['orderType'] == 'STP LMT' and (stpPrice or order.get('auxPrice')) and (lmtPrice or order.get('lmtPrice')):
data['auxPrice'] = stpPrice or order['auxPrice']
data['lmtPrice'] = lmtPrice or order['lmtPrice']
response = requests.post(self._urls.replace_option_orders(self._account_id), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('replace_option_order failed', response.status_code, response.reason)
return True
def cancel_all_orders(self):
'''
Cancels all open (aka 'working') orders
'''
open_orders = self.get_current_orders()
for order in open_orders:
self.cancel_order(order['orderId'])
def get_tradable(self, stock='') :
'''
get if stock is tradable
'''
headers = self.build_req_headers()
response = requests.get(self._urls.is_tradable(self.get_ticker(stock)), headers=headers, timeout=self.timeout)
return response.json()
def alerts_list(self) :
'''
Get alerts
'''
headers = self.build_req_headers()
response = requests.get(self._urls.list_alerts(), headers=headers, timeout=self.timeout)
result = response.json()
if 'data' in result:
return result.get('data', [])
else:
return None
def alerts_remove(self, alert=None, priceAlert=True, smartAlert=True):
'''
remove alert
alert is retrieved from alert_list
'''
headers = self.build_req_headers()
if alert.get('tickerWarning') and priceAlert:
alert['tickerWarning']['remove'] = True
alert['warningInput'] = alert['tickerWarning']
if alert.get('eventWarning') and smartAlert:
alert['eventWarning']['remove'] = True
for rule in alert['eventWarning']['rules']:
rule['active'] = 'off'
alert['eventWarningInput'] = alert['eventWarning']
response = requests.post(self._urls.remove_alert(), json=alert, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('alerts_remove failed', response.status_code, response.reason)
return True
def alerts_add(self, stock=None, frequency=1, interval=1, priceRules=[], smartRules=[]):
'''
add price/percent/volume alert
frequency: 1 is once a day, 2 is once a minute
interval: 1 is once, 0 is repeating
priceRules: list of dicts with below attributes per alert
field: price , percent , volume
type: price (above/below), percent (above/below), volume (vol in thousands)
value: price, percent, volume amount
remark: comment
rules example:
priceRules = [{'field': 'price', 'type': 'above', 'value': '900.00', 'remark': 'above'}, {'field': 'price', 'type': 'below',
'value': '900.00', 'remark': 'below'}]
smartRules = [{'type':'earnPre','active':'on'},{'type':'fastUp','active':'on'},{'type':'fastDown','active':'on'},
{'type':'week52Up','active':'on'},{'type':'week52Down','active':'on'},{'type':'day5Down','active':'on'}]
'''
headers = self.build_req_headers()
rule_keys = ['value', 'field', 'remark', 'type', 'active']
for line, rule in enumerate(priceRules, start=1):
for key in rule:
if key not in rule_keys:
raise Exception('malformed price alert priceRules found.')
rule['alertRuleKey'] = line
rule['active'] = 'on'
alert_keys = ['earnPre', 'fastUp', 'fastDown', 'week52Up', 'week52Down', 'day5Up', 'day10Up', 'day20Up', 'day5Down', 'day10Down', 'day20Down']
for rule in smartRules:
if rule['type'] not in alert_keys:
raise Exception('malformed smart alert smartRules found.')
try:
stock_data = self.get_tradable(stock)['data'][0]
data = {
'regionId': stock_data['regionId'],
'tickerType': stock_data['type'],
'tickerId': stock_data['tickerId'],
'tickerSymbol': stock,
'disSymbol': stock,
'tinyName': stock_data['name'],
'tickerName': stock_data['name'],
'exchangeCode': stock_data['exchangeCode'],
'showCode': stock_data['disExchangeCode'],
'disExchangeCode': stock_data['disExchangeCode'],
'eventWarningInput': {
'tickerId': stock_data['tickerId'],
'rules': smartRules,
'remove': False,
'del': False
},
'warningInput': {
'warningFrequency': frequency,
'warningInterval': interval,
'rules': priceRules,
'tickerId': stock_data['tickerId']
}
}
except Exception as e:
print(f'failed to build alerts_add payload data. error: {e}')
response = requests.post(self._urls.add_alert(), json=data, headers=headers, timeout=self.timeout)
if response.status_code != 200:
raise Exception('alerts_add failed', response.status_code, response.reason)
return True
def active_gainer_loser(self, direction='gainer', rank_type='afterMarket', count=50) :
'''
gets gainer / loser / active stocks sorted by change
direction: gainer / loser / active
rank_type: preMarket / afterMarket / 5min / 1d / 5d / 1m / 3m / 52w (gainer/loser)
volume / turnoverRatio / range (active)
'''
headers = self.build_req_headers()
response = requests.get(self._urls.active_gainers_losers(direction, self._region_code, rank_type, count), headers=headers, timeout=self.timeout)
result = response.json()
return result
def run_screener(self, region=None, price_lte=None, price_gte=None, pct_chg_gte=None, pct_chg_lte=None, sort=None,
sort_dir=None, vol_lte=None, vol_gte=None):
'''
Notice the fact that endpoints are reversed on lte and gte, but this function makes it work correctly
Also screeners are not sent by name, just the parameters are sent
example: run_screener( price_lte=.10, price_gte=5, pct_chg_lte=.035, pct_chg_gte=.51)
just a start, add more as you need it
'''
jdict = collections.defaultdict(dict)
jdict['fetch'] = 200
jdict['rules'] = collections.defaultdict(str)
jdict['sort'] = collections.defaultdict(str)
jdict['attach'] = {'hkexPrivilege': 'true'} #unknown meaning, was in network trace
jdict['rules']['wlas.screener.rule.region'] = 'securities.region.name.6'
if not price_lte is None and not price_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.price'] = 'gte=' + str(price_lte) + '<e=' + str(price_gte)
if not vol_lte is None and not vol_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.volume'] = 'gte=' + str(vol_lte) + '<e=' + str(vol_gte)
if not pct_chg_lte is None and not pct_chg_gte is None:
# lte and gte are backwards
jdict['rules']['wlas.screener.rule.changeRatio'] = 'gte=' + str(pct_chg_lte) + '<e=' + str(pct_chg_gte)
if sort is None:
jdict['sort']['rule'] = 'wlas.screener.rule.price'
if sort_dir is None:
jdict['sort']['desc'] = 'true'
# jdict = self._ddict2dict(jdict)
response = requests.post(self._urls.screener(), json=jdict, timeout=self.timeout)
result = response.json()
return result
def get_analysis(self, stock=None):
'''
get analysis info and returns a dict of analysis ratings
'''
headers = self.build_req_headers()
return requests.get(self._urls.analysis(self.get_ticker(stock)), headers=headers, timeout=self.timeout).json()
def get_capital_flow(self, stock=None, tId=None, show_hist=True):
'''
get capital flow
:param stock:
:param tId:
:param show_hist:
:return: list of capital flow
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_capital_flow(tId, show_hist), headers=headers, timeout=self.timeout).json()
def get_etf_holding(self, stock=None, tId=None, has_num=0, count=50):
'''
get ETF holdings by stock
:param stock:
:param tId:
:param has_num:
:param count:
:return: list of ETF holdings
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_etf_holding(tId, has_num, count), headers=headers, timeout=self.timeout).json()
def get_institutional_holding(self, stock=None, tId=None):
'''
get institutional holdings
:param stock:
:param tId:
:return: list of institutional holdings
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_institutional_holding(tId), headers=headers, timeout=self.timeout).json()
def get_short_interest(self, stock=None, tId=None):
'''
get short interest
:param stock:
:param tId:
:return: list of short interest
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.analysis_shortinterest(tId), headers=headers, timeout=self.timeout).json()
def get_financials(self, stock=None):
'''
get financials info and returns a dict of financial info
'''
headers = self.build_req_headers()
return requests.get(self._urls.fundamentals(self.get_ticker(stock)), headers=headers, timeout=self.timeout).json()
def get_news(self, stock=None, tId=None, Id=0, items=20):
'''
get news and returns a list of articles
params:
Id: 0 is latest news article
items: number of articles to return
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
return requests.get(self._urls.news(tId, Id, items), headers=headers, timeout=self.timeout).json()
def get_bars(self, stock=None, tId=None, interval='m1', count=1, extendTrading=0, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
interval: m1, m5, m15, m30, h1, h2, h4, d1, w1
count: number of bars to return
extendTrading: change to 1 for pre-market and afterhours bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
if timeStamp is None:
# if not set, default to current time
timeStamp = int(time.time())
params = {'extendTrading': extendTrading}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(
self._urls.bars(tId, interval, count, timeStamp),
params=params,
headers=headers,
timeout=self.timeout,
)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data']:
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_bars_crypto(self, stock=None, tId=None, interval='m1', count=1, extendTrading=0, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
interval: m1, m5, m15, m30, h1, h2, h4, d1, w1
count: number of bars to return
extendTrading: change to 1 for pre-market and afterhours bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
params = {'type': interval, 'count': count, 'extendTrading': extendTrading, 'timestamp': timeStamp}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(self._urls.bars_crypto(tId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data']:
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_options_bars(self, derivativeId=None, interval='1m', count=1, direction=1, timeStamp=None):
'''
get bars returns a pandas dataframe
params:
derivativeId: to be obtained from option chain, eg option_chain[0]['call']['tickerId']
interval: 1m, 5m, 30m, 60m, 1d
count: number of bars to return
direction: 1 ignores {count} parameter & returns all bars on and after timestamp
setting any other value will ignore timestamp & return latest {count} bars
timeStamp: If epoc timestamp is provided, return bar count up to timestamp. If not set default to current time.
'''
headers = self.build_req_headers()
if derivativeId is None:
raise ValueError('Must provide a derivative ID')
params = {'type': interval, 'count': count, 'direction': direction, 'timestamp': timeStamp}
df = DataFrame(columns=['open', 'high', 'low', 'close', 'volume', 'vwap'])
df.index.name = 'timestamp'
response = requests.get(self._urls.options_bars(derivativeId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
for row in result[0]['data'] :
row = row.split(',')
row = ['0' if value == 'null' else value for value in row]
data = {
'open': float(row[1]),
'high': float(row[3]),
'low': float(row[4]),
'close': float(row[2]),
'volume': float(row[6]),
'vwap': float(row[7])
}
#convert to a panda datetime64 which has extra features like floor and resample
df.loc[to_datetime(datetime.fromtimestamp(int(row[0])).astimezone(time_zone))] = data
return df.iloc[::-1]
def get_chart_data(self, stock=None, tId=None, ma=5, timestamp=None):
bars = self.get_bars(stock=stock, tId=tId, interval='d1', count=1200, timestamp=timestamp)
ma_data = bars['close'].rolling(ma).mean()
return ma_data.dropna()
def get_calendar(self, stock=None, tId=None):
'''
There doesn't seem to be a way to get the times the market is open outside of the charts.
So, best way to tell if the market is open is to pass in a popular stock like AAPL then
and see the open and close hours as would be marked on the chart
and see if the last trade date is the same day as today's date
:param stock:
:param tId:
:return: dict of 'market open', 'market close', 'last trade date'
'''
headers = self.build_req_headers()
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
params = {'type': 'm1', 'count': 1, 'extendTrading': 0}
response = requests.get(self._urls.bars(tId), params=params, headers=headers, timeout=self.timeout)
result = response.json()
time_zone = timezone(result[0]['timeZone'])
last_trade_date = datetime.fromtimestamp(int(result[0]['data'][0].split(',')[0])).astimezone(time_zone)
today = datetime.today().astimezone() #use no time zone to have it pull in local time zone
if last_trade_date.date() < today.date():
# don't know what today's open and close times are, since no trade for today yet
return {'market open': None, 'market close': None, 'trading day': False}
for d in result[0]['dates']:
if d['type'] == 'T':
market_open = today.replace(
hour=int(d['start'].split(':')[0]),
minute=int(d['start'].split(':')[1]),
second=0)
market_open -= timedelta(microseconds=market_open.microsecond)
market_open = market_open.astimezone(time_zone) #set to market timezone
market_close = today.replace(
hour=int(d['end'].split(':')[0]),
minute=int(d['end'].split(':')[1]),
second=0)
market_close -= timedelta(microseconds=market_close.microsecond)
market_close = market_close.astimezone(time_zone) #set to market timezone
#this implies that we have waited a few minutes from the open before trading
return {'market open': market_open , 'market close':market_close, 'trading day':True}
#otherwise
return None
def get_dividends(self):
''' Return account's incoming dividend info '''
headers = self.build_req_headers()
data = {}
response = requests.post(self._urls.dividends(self._account_id), json=data, headers=headers, timeout=self.timeout)
return response.json()
def get_five_min_ranking(self, extendTrading=0):
'''
get 5 minute trend ranking
'''
rank = []
headers = self.build_req_headers()
params = {'regionId': self._region_code, 'userRegionId': self._region_code, 'platform': 'pc', 'limitCards': 'latestActivityPc'}
response = requests.get(self._urls.rankings(), params=params, headers=headers, timeout=self.timeout)
result = response.json()[0].get('data')
if extendTrading:
for data in result:
if data['id'] == 'latestActivityPc.faList':
rank = data['data']
else:
for data in result:
if data['id'] == 'latestActivityPc.5minutes':
rank = data['data']
return rank
def get_watchlists(self, as_list_symbols=False) :
"""
get user watchlists
"""
headers = self.build_req_headers()
params = {'version': 0}
response = requests.get(self._urls.portfolio_lists(), params=params, headers=headers, timeout=self.timeout)
if not as_list_symbols :
return response.json()['portfolioList']
else:
list_ticker = response.json()['portfolioList'][0].get('tickerList')
return list(map(lambda x: x.get('symbol'), list_ticker))
def get_account_type(self, username='') :
try:
validate_email(username)
account_type = 2 # email
except EmailNotValidError as _e:
account_type = 1 # phone
return account_type
def is_logged_in(self):
'''
Check if login session is active
'''
try:
self.get_account_id()
except KeyError:
return False
else:
return True
def get_press_releases(self, stock=None, tId=None, typeIds=None, num=50):
'''
gets press releases, useful for getting past earning reports
typeIds: None (all) or comma-separated string of the following: '101' (financials) / '104' (insiders)
it's possible they add more announcment types in the future, so check the 'announcementTypes'
field on the response to verify you have the typeId you want
'''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers()
response = requests.get(self._urls.press_releases(tId, typeIds, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_calendar_events(self, event, start_date=None, page=1, num=50):
'''
gets calendar events
event: 'earnings' / 'dividend' / 'splits'
start_date: in `YYYY-MM-DD` format, today if None
'''
if start_date is None:
start_date = datetime.today().strftime('%Y-%m-%d')
headers = self.build_req_headers()
response = requests.get(self._urls.calendar_events(event, self._region_code, start_date, page, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
''' Paper support '''
class paper_webull(webull):
def __init__(self):
super().__init__()
def get_account(self):
''' Get important details of paper account '''
headers = self.build_req_headers()
response = requests.get(self._urls.paper_account(self._account_id), headers=headers, timeout=self.timeout)
return response.json()
def get_account_id(self):
''' Get paper account id: call this before paper account actions'''
headers = self.build_req_headers()
response = requests.get(self._urls.paper_account_id(), headers=headers, timeout=self.timeout)
result = response.json()
if result is not None and len(result) > 0 and 'id' in result[0]:
id = result[0]['id']
self._account_id = id
return id
else:
return None
def get_current_orders(self):
''' Open paper trading orders '''
return self.get_account()['openOrders']
def get_history_orders(self, status='Cancelled', count=20):
headers = self.build_req_headers(include_trade_token=True, include_time=True)
response = requests.get(self._urls.paper_orders(self._account_id, count) + str(status), headers=headers, timeout=self.timeout)
return response.json()
def get_positions(self):
''' Current positions in paper trading account. '''
return self.get_account()['positions']
def place_order(self, stock=None, tId=None, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True):
''' Place a paper account order. '''
if not tId is None:
pass
elif not stock is None:
tId = self.get_ticker(stock)
else:
raise ValueError('Must provide a stock symbol or a stock id')
headers = self.build_req_headers(include_trade_token=True, include_time=True)
data = {
'action': action, # BUY or SELL
'lmtPrice': float(price),
'orderType': orderType, # 'LMT','MKT'
'outsideRegularTradingHour': outsideRegularTradingHour,
'quantity': int(quant),
'serialId': str(uuid.uuid4()),
'tickerId': tId,
'timeInForce': enforce # GTC or DAY
}
#Market orders do not support extended hours trading.
if orderType == 'MKT':
data['outsideRegularTradingHour'] = False
response = requests.post(self._urls.paper_place_order(self._account_id, tId), json=data, headers=headers, timeout=self.timeout)
return response.json()
def modify_order(self, order, price=0, action='BUY', orderType='LMT', enforce='GTC', quant=0, outsideRegularTradingHour=True):
''' Modify a paper account order. '''
headers = self.build_req_headers()
data = {
'action': action, # BUY or SELL
'lmtPrice': float(price),
'orderType':orderType,
'comboType': 'NORMAL', # 'LMT','MKT'
'outsideRegularTradingHour': outsideRegularTradingHour,
'serialId': str(uuid.uuid4()),
'tickerId': order['ticker']['tickerId'],
'timeInForce': enforce # GTC or DAY
}
if quant == 0 or quant == order['totalQuantity']:
data['quantity'] = order['totalQuantity']
else:
data['quantity'] = int(quant)
response = requests.post(self._urls.paper_modify_order(self._account_id, order['orderId']), json=data, headers=headers, timeout=self.timeout)
if response:
return True
else:
print("Modify didn't succeed. {} {}".format(response, response.json()))
return False
def cancel_order(self, order_id):
''' Cancel a paper account order. '''
headers = self.build_req_headers()
response = requests.post(self._urls.paper_cancel_order(self._account_id, order_id), headers=headers, timeout=self.timeout)
return bool(response)
def get_social_posts(self, topic, num=100):
headers = self.build_req_headers()
response = requests.get(self._urls.social_posts(topic, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
def get_social_home(self, topic, num=100):
headers = self.build_req_headers()
response = requests.get(self._urls.social_home(topic, num), headers=headers, timeout=self.timeout)
result = response.json()
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Interface with Webull. Paper trading is not the default.')
parser.add_argument('-p', '--use-paper', help='Use paper account instead.', action='store_true')
args = parser.parse_args()
if args.use_paper:
wb = paper_webull()
else:
wb = webull()
| tedchou12/webull | webull/webull.py | webull.py | py | 63,799 | python | en | code | 576 | github-code | 36 |
37775727462 | # https://leetcode-cn.com/problems/reverse-string/
# 编写一个函数,其作用是将输入的字符串反转过来。输入字符串以字符数组 char[] 的形式给出。
#
# 不要给另外的数组分配额外的空间,你必须原地修改输入数组、使用 O(1) 的额外空间解决这一问题。
#
# 你可以假设数组中的所有字符都是 ASCII 码表中的可打印字符。
#
# 示例 1:
#
# 输入:["h","e","l","l","o"]
# 输出:["o","l","l","e","h"]
# 示例 2:
#
# 输入:["H","a","n","n","a","h"]
# 输出:["h","a","n","n","a","H"]
from typing import List
class Solution:
# 直接交换,交换次数为数组长度除以2的整数商
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
if not s:
return
times = len(s) // 2
for i in range(times):
s[i], s[-(i + 1)] = s[-(i + 1)], s[i]
# 简化方案
def reverseString2(self, s: List[str]) -> None:
s[:] = s[::-1]
# 双指针
def reverseString3(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
l, r = 0, len(s) - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
# python 有个 reverse 函数,可以直接翻转列表
| cookie-rabbit/LeetCode_practice | 专题/入门/字符串/344 反转字符串/1.py | 1.py | py | 1,403 | python | en | code | 1 | github-code | 36 |
19214969558 | from twitter_sentiment import avg_sentiment, get_tweets
from time import sleep
import pickle
import re
import numpy as np
# open the mappings
mappingFile = open('mappings', 'rb')
mappings:dict = pickle.load(mappingFile)
mappingFile.close()
def find_avg_sentiment(row):
"""
Find average sentiment of an item in dataset
"""
print("Finding sentiment of " + row['model'])
hashtags = row['hashtags'].split("|")
#get tweets
tweets = get_tweets(hashtags)
# print(tweets)
#get avg_sentiment
avgs = avg_sentiment(tweets)
# sleep
sleep(5)
return avgs
def map_to_value(column_name, index):
"""
Map id to real value of a column
"""
global mappings
if column_name in mappings.keys():
return mappings[column_name][index]
return index
def feature_list():
return [
'abs', 'compression_ratio', 'coupe_type',
'cylinder_bore', 'doors', 'fuel_tank_volume', 'fuel_type',
'kerb_weight', 'number_of_cylinders',
'number_of_gears','number_of_valves_per_cylinder', 'piston_stroke',
'position_of_cylinders', 'power', 'seats', 'torque', 'wheelbase'
]
def beautify_text(text):
"""
Beautify the sluggified string
"""
return re.sub(r'[\_\-]+',' ', text).capitalize()
def extract_cols(Y):
vals = Y.values if hasattr(Y, 'values') else Y
return np.transpose(vals) | ArjunAtlast/main-project | FINAL/helpers.py | helpers.py | py | 1,411 | python | en | code | 0 | github-code | 36 |
4500680225 | import abc
import os
import xml.etree.ElementTree as ET
from abc import ABC
from enum import Enum
from typing import List
from xbrl import XbrlParseException, LinkbaseNotFoundException
from xbrl.cache import HttpCache
from xbrl.helper.uri_helper import resolve_uri
LINK_NS: str = "{http://www.xbrl.org/2003/linkbase}"
XLINK_NS: str = "{http://www.w3.org/1999/xlink}"
XBRLDT_NS: str = "{http://xbrl.org/2005/xbrldt}"
XML_NS: str = "{http://www.w3.org/XML/1998/namespace}"
class LinkbaseType(Enum):
""" Enum of linkbase types, that this parser can parse """
DEFINITION = 0x001
CALCULATION = 0x002
PRESENTATION = 0x003
LABEL = 0x004
@staticmethod
def get_type_from_role(role: str) -> int or None:
"""
Takes a xlink:role (i.e http://www.xbrl.org/2003/role/definitionLinkbaseRef) and returns the corresponding
LinkbaseType
@param role:
@return: LinkbaseType or None if the role is unknown
"""
return {
'http://www.xbrl.org/2003/role/definitionLinkbaseRef': LinkbaseType.DEFINITION,
'http://www.xbrl.org/2003/role/calculationLinkbaseRef': LinkbaseType.CALCULATION,
'http://www.xbrl.org/2003/role/presentationLinkbaseRef': LinkbaseType.PRESENTATION,
'http://www.xbrl.org/2003/role/labelLinkbaseRef': LinkbaseType.LABEL,
}.get(role, None)
@staticmethod
def guess_linkbase_role(href: str) -> int or None:
"""
Guesses the linkbase role based on the name of the linkbase
@param href:
@return:
"""
return LinkbaseType.DEFINITION if '_def' in href \
else LinkbaseType.CALCULATION if '_cal' in href \
else LinkbaseType.PRESENTATION if '_pre' in href \
else LinkbaseType.LABEL if '_lab' in href \
else None
class AbstractArcElement(ABC):
"""
Represents an abstract Arc
An Arc links two Locators together and assigns a relation ship between those two items.
Arcs are used in all linkbases (definition, calculation, presentation and label)
From the Xbrl Specification 2.0:
Standard Arc Element:
An element derived from xl:arc that is defined in this specification, Specifically,
one of: link:presentationArc, link:calculationArc, link:labelArc, link:referenceArc, or link:definitionArc.
i.e:
<link:definitionArc order="30"
xlink:arcrole="http://xbrl.org/int/dim/arcrole/domain-member"
xlink:from="loc_AssetsAbstract"
xlink:to="loc_CashAndCashEquivalentsAtCarryingValue" xlink:type="arc"/>
This arc describes the relationship between Assets and Cash and Cash Equivalents. Cash is a sub-domain from Assets.
"""
def __init__(self, from_locator, arcrole: str, order: int) -> None:
"""
@param from_locator: Locator Object from that the arc is pointing from
to_locator: is missing here, because not all arc's point to another locator. A label arc for example points
to multiple link:label's
@type from_locator: Locator
@param arcrole: Role of the arc
@param order: Order attribute of the arc. Only makes sense in combination with the arc role.
i.e arcrole parent-child together with the order attribute defines a hierarchical relationship between elements
(XBRL for Interactive Data, 2009, p.59)
"""
self.from_locator = from_locator
self.arcrole: str = arcrole
self.order: int = order
@abc.abstractmethod
def to_dict(self):
""" Returns a dictionary representation of the arc """
pass
class RelationArc(AbstractArcElement, ABC):
"""
A Relation arc is an abstract implementation of an AbstractArc Element that has the to_locator attribute
"""
def __init__(self, from_locator, to_locator, arcrole: str, order: int) -> None:
super().__init__(from_locator, arcrole, order)
self.to_locator: Locator = to_locator
class DefinitionArc(RelationArc):
""" Represents a definition arc (link:definitionArc) """
def __init__(self, from_locator, to_locator, arcrole: str, order: int, closed: bool = None,
context_element: str = None) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param arcrole: Can be one of the following: (XBRL for Interactive Data, 2009, p.140)
- http://xbrl.org/int/dim/arcrole/all:
connects a measure to a hypercube implying use of dimensions attached to this hypercube and their
specified breakdowns
Elements:
- closed: boolean,
- contextElement: (segment/scenario),
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/notAll
connects a measure to a hypercube prohibiting use of dimensions attached to this hypercube and their
specified breakdowns
Elements:
- closed: boolean,
- contextElement: (segment/scenario),
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/hypercube-dimension
connects a hypercube and a dimension item
Elements:
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/dimension-domain
connects a dimension item to its top level members in every variation of a breakdown
Elements:
- usage: boolean,
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/domain-member
defines hierarchical relations for measures and domain members; in case of measures implies inheritance
of dimensional characteristics from upper-level concepts
Elements:
- usage:boolean,
- targetRole: anyURI
- http://xbrl.org/int/dim/arcrole/dimension-default
links dimension item to its default member (usually total of the full breakdown)
Elements:
None
"""
super().__init__(from_locator, to_locator, arcrole, order)
self.closed: bool or None = closed
self.context_element: bool or None = context_element
def __str__(self) -> str:
return "Linking to {} as {}".format(str(self.to_locator.name), self.arcrole.split('/')[-1])
def to_dict(self) -> dict:
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order, "closed": self.closed,
"contextElement": self.context_element,
"locator": self.to_locator.to_dict()}
class CalculationArc(RelationArc):
""" Represents a calculation arc (link:calculationArc) """
def __init__(self, from_locator, to_locator, order: int, weight: float) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param weight: Defines the sign and multiplication factor for two connected concepts
(XBRL for Interactive Data, 2009, p.61)
"""
# A Calculation arc only has the summation-item arc role
super().__init__(from_locator, to_locator, "http://www.xbrl.org/2003/arcrole/summation-item", order)
self.weight: float = weight
def to_dict(self):
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order, "weight": self.weight,
"locator": self.to_locator.to_dict()}
def __str__(self) -> str:
return "{} {}".format(self.arcrole.split('/')[-1], self.to_locator.concept_id)
class PresentationArc(RelationArc):
""" Represents a presentation arc (link:presentationArc) """
def __init__(self, from_locator, to_locator, order: int, priority: int, preferred_label: str = None) -> None:
"""
@type from_locator: Locator
@type to_locator: Locator
@param preferred_label: indicates the most appropriate kind of label to use when presenting the arc's child Concept
(XBRL Specification 2.1, 5.2.4.2.1)
"""
# A Presentation arc only has the parent-child arc role
super().__init__(from_locator, to_locator, "http://www.xbrl.org/2003/arcrole/parent-child", order)
self.priority = priority
self.preferred_label: str = preferred_label
def to_dict(self):
""" Returns a dictionary representation of the arc """
return {"arcrole": self.arcrole, "order": self.order,
"preferredLabel": self.preferred_label, "locator": self.to_locator.to_dict()}
def __str__(self) -> str:
return "{} {}".format(self.arcrole.split('/')[-1], self.to_locator.concept_id)
class Label:
"""
Class representing a label (link:label)
This class is only used by LabelArcs in label Linkbases
Example for label in label linkbase:
<link:label id="lab_Assets_label_en-US" xlink:label="lab_Assets" xlink:role="http://www.xbrl.org/2003/role/label"
xlink:type="resource" xml:lang="en-US">Assets</link:label>
"""
def __init__(self, label: str, label_type: str, language: str, text: str) -> None:
"""
@param label: the xlink:label of the label (locators will be referencing the label over the xlink:label attribute)
@param label_type: the role of the label, possible values (XBRL for Interactive Data, 2009, p.61):
- http://www.xbrl.org/2003/role/label:
Standard label for a concept
- http://www.xbrl.org/2003/role/terseLabel:
Short label for a concept, often omitting text that should be inferable when the concept is reported
in the context of other related concepts
- http://www.xbrl.org/2003/role/verboseLabel:
Extended label for a concept, making sure not to omit text that is required to enable the label to be
understood on a standalone basis
- http://www.xbrl.org/2003/role/totalLabel:
The label for a concept for use in presenting values associated with the concept when it is being
reported as the total of a set of other values
- http://www.xbrl.org/2003/role/periodStartLabel & http://www.xbrl.org/2003/role/periodEndLabel:
The label for a concept with periodType="instant" for use in presenting values associated with the
concept when it is being reported as a start (end) of period value
- http://www.xbrl.org/2003/role/documentation:
Documentation of a concept, providing an explanation of its meaning and its appropriate usage and any
other documentation deemed necessary
"""
# the label of the link:label element (see Locator label) i.e: lab_Assets
self.label: str = label
self.language = language
# the label itself i.e: "Defined Benefit Plan Disclosure [Line Items]"
self.text: str = text.strip() if text is not None else text
# the role of the label i.e: http://www.xbrl.org/2003/role/terseLabel
self.label_type: str = label_type
def __str__(self) -> str:
return self.text
class LabelArc(AbstractArcElement):
"""
Represents a label arc (link:labelArc)
The xml representation of a label arc also has a xlink:to attribute, like the Relational Arcs.
However in contrast to the xlink:to attribute of relational arcs which is pointing to another locator (1:1), the xlink:to
attribute of a label arc points to multiple label elements
"""
def __init__(self, from_locator, order: int, labels: List[Label]) -> None:
"""
@type from_locator: Locator
@param labels: Array of label objects, the arc is pointing to
@type labels: Label[]
"""
# A Label Arc only has the concept-label arc role
super().__init__(from_locator, "http://www.xbrl.org/2003/arcrole/concept-label", order)
self.labels = labels
def __str__(self) -> str:
return "LabelArc with {} labels".format(len(self.labels))
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the label arc.
"""
label_obj = {}
# dynamically add all available labels
for label in self.labels:
label_obj[label.label_type] = label.text
return label_obj
class Locator:
"""
Represents a Locator. The Locator points from the Linkbase back to the Concept, that is defined in the schema file
i.e: <link:loc xlink:href="../elts/us-gaap-2019-01-31.xsd#us-gaap_Goodwill" xlink:label="loc_Goodwill"
xlink:type="locator"/>
"""
def __init__(self, href: str, name: str):
"""
@param href: The link, the locator is pointing to. IN ABSOLUTE FORMAT (starting with http...)
@param name: The name (xlink:label) from the locator
"""
# the link of the concept the locator is pointing to (i.e: ../elts/us-gaap-2019-01-31.xsd#us-gaap_Goodwill)
self.href: str = href
# the label of the Locator (i.e: loc_Goodwill)
self.name: str = name
# the id of the concept (i.e: us-gaap_Goodwill)
self.concept_id: str = href.split('#')[1]
# This array stores the locators that that are connected with this locator via a label arc, there
# the current locator was in the to attribute. This array is only used for finding the root locators (the locators
# that have no parents)
self.parents: List[Locator] = []
# This array stores all the labelArcs that reference this locator in the "from" attribute
self.children: List[AbstractArcElement] = []
def __str__(self) -> str:
return "{} with {} children".format(self.name, len(self.children))
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the Locator.
This method will ignore the parents array and will take the children for building the
recursive dictionary hierarchy
@return:
"""
return {"name": self.name, "href": self.href, "concept_id": self.concept_id,
"children": [arc_element.to_dict() for arc_element in self.children]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
@return:
"""
return {"concept_id": self.concept_id,
"children": [arc_element.to_locator.to_simple_dict() for arc_element in self.children]}
class ExtendedLink:
"""
Generic class for definitionLink, labelLink, referenceLink and calculationLink elements
From the Xbrl Specification 2.0:
Standard Extended Link Element:
An element derived from xl:link that is defined in this specification. Specifically, one of:
link:presentationLink, link:calculationLink, link:labelLink, link:referenceLink, or link:definitionLink.
"""
def __init__(self, role: str, elr_id: str or None, root_locators: List[Locator]) -> None:
"""
@param role: role of the extended link element
@param elr_id: the link to the extended Link role (as defined in the schema file)
i.e aapl-20180929.xsd#ConsolidatedStatementsOfComprehensiveIncome
Is none for label linkbases!
@param root_locators: Label array of all root locators (all locators that have no parents)
"""
self.role: str = role
self.elr_id: str or None = elr_id
self.root_locators: List[Locator] = root_locators
def to_dict(self) -> dict:
"""
Returns a dictionary representation of the ExtendedLinkElement
@return:
"""
return {"role": self.role, "elr_id": self.elr_id,
"root_locators": [loc.to_dict() for loc in self.root_locators]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
@return:
"""
return {"role": self.role, "children": [loc.to_simple_dict() for loc in self.root_locators]}
def __str__(self) -> str:
return self.elr_id
class Linkbase:
"""
Represents a complete Linkbase (non-generic).
"""
def __init__(self, extended_links: List[ExtendedLink], linkbase_type: LinkbaseType, linkbase_uri: None or str = None) -> None:
"""
:param extended_links: All standard extended links that are defined in the linkbase
:type extended_links: [ExtendedDefinitionLink] or [ExtendedCalculationLink] or [ExtendedPresentationLink] or [ExtendedLabelArc]
:param linkbase_type: Type of the linkbase
:param linkbase_uri: Either the path or the url to the linkbase (depends from where the parser loaded it for parsing)
"""
self.extended_links: List[ExtendedLink] = extended_links
self.type = linkbase_type
self.linkbase_uri = linkbase_uri
def to_dict(self) -> dict:
"""
Converts the Linkbase object with in a dictionary representing the Hierarchy of the locators
"""
return {"standardExtendedLinkElements": [el.to_dict() for el in self.extended_links]}
def to_simple_dict(self) -> dict:
"""
Does the same as to_dict() but ignores the ArcElements.
So it basically returns the hierarchy, without the information in which type of relationship
parent and children are
"""
return {"standardExtendedLinkElements": [el.to_simple_dict() for el in self.extended_links]}
def parse_linkbase_url(linkbase_url: str, linkbase_type: LinkbaseType, cache: HttpCache) -> Linkbase:
"""
Parses a linkbase given given a url
:param linkbase_url: full link to the linkbase
:param linkbase_type: type of the linkbase (calculation-, label-, presentation-, ...)
:param cache: :class:`xbrl.cache.HttpCache` instance
:return: parsed :class:`xbrl.linkbase.Linkbase` object
"""
if not linkbase_url.startswith('http'): raise XbrlParseException(
'This function only parses remotely saved linkbases. Please use parse_linkbase to parse local linkbases')
linkbase_path: str = cache.cache_file(linkbase_url)
return parse_linkbase(linkbase_path, linkbase_type, linkbase_url)
def parse_linkbase(linkbase_path: str, linkbase_type: LinkbaseType, linkbase_url: str or None = None) -> Linkbase:
"""
Parses a linkbase and returns a Linkbase object containing all
locators, arcs and links of the linkbase in a hierarchical order (a Tree)
A Linkbase usually does not import any additional files.
Thus no cache instance is needed
:param linkbase_path: path to the linkbase
:param linkbase_type: Type of the linkbase
:param linkbase_url: if the locator of the linkbase contain relative references to concepts
(i.e.: './../schema.xsd#Assets') the url has to be set so that the parser can connect
the locator with concept from the taxonomy
:return: parsed :class:`xbrl.linkbase.Linkbase` object
"""
if linkbase_path.startswith('http'): raise XbrlParseException(
'This function only parses locally saved linkbases. Please use parse_linkbase_url to parse remote linkbases')
if not os.path.exists(linkbase_path):
raise LinkbaseNotFoundException(f"Could not find linkbase at {linkbase_path}")
root: ET.Element = ET.parse(linkbase_path).getroot()
# store the role refs in a dictionary, with the role uri as key.
# Role Refs are xlink's that connect the extended Links to the ELR defined in the schema
role_refs: dict = {}
for role_ref in root.findall(LINK_NS + 'roleRef'):
role_refs[role_ref.attrib['roleURI']] = role_ref.attrib[XLINK_NS + 'href']
# Loop over all definition/calculation/presentation/label links.
# Each extended link contains the locators and the definition arc's
extended_links: List[ExtendedLink] = []
# figure out if we want to search for definitionLink, calculationLink, presentationLink or labelLink
# figure out for what type of arcs we are searching; definitionArc, calculationArc, presentationArc or labelArc
extended_link_tag: str
arc_type: str
if linkbase_type == LinkbaseType.DEFINITION:
extended_link_tag = "definitionLink"
arc_type = "definitionArc"
elif linkbase_type == LinkbaseType.CALCULATION:
extended_link_tag = "calculationLink"
arc_type = "calculationArc"
elif linkbase_type == LinkbaseType.PRESENTATION:
extended_link_tag = "presentationLink"
arc_type = "presentationArc"
else:
extended_link_tag = "labelLink"
arc_type = "labelArc"
# loop over all extended links. Extended links can be: link:definitionLink, link:calculationLink e.t.c
# Note that label linkbases only have one extended link
for extended_link in root.findall(LINK_NS + extended_link_tag):
extended_link_role: str = extended_link.attrib[XLINK_NS + 'role']
# find all locators (link:loc) and arcs (i.e link:definitionArc or link:calculationArc)
locators = extended_link.findall(LINK_NS + 'loc')
arc_elements = extended_link.findall(LINK_NS + arc_type)
# store the locators in a dictionary. The label attribute is the key. This way we can access them in O(1)
locator_map = {}
for loc in locators:
loc_label: str = loc.attrib[XLINK_NS + 'label']
# check if the locator href is absolute
locator_href = loc.attrib[XLINK_NS + 'href']
if not locator_href.startswith('http'):
# resolve the path
# todo, try to get the URL here, instead of the path!!!
locator_href = resolve_uri(linkbase_url if linkbase_url else linkbase_path, locator_href)
locator_map[loc_label] = Locator(locator_href, loc_label)
# Performance: extract the labels in advance. The label name (xlink:label) is the key and the value is
# an array of all labels that have this name. This can be multiple labels (label, terseLabel, documentation...)
label_map = {}
if linkbase_type == LinkbaseType.LABEL:
for label_element in extended_link.findall(LINK_NS + 'label'):
label_name: str = label_element.attrib[XLINK_NS + 'label']
label_role: str = label_element.attrib[XLINK_NS + 'role']
label_lang: str = label_element.attrib[XML_NS + 'lang']
label_obj = Label(label_name, label_role, label_lang, label_element.text)
if label_name in label_map:
label_map[label_name].append(label_obj)
else:
label_map[label_name] = [label_obj]
for arc_element in arc_elements:
# if the use of the element referenced by the arc is prohibited, just ignore it
if 'use' in arc_element.attrib and arc_element.attrib['use'] == 'prohibited': continue
# extract the attributes if the arc. The arc always connects two locators through the from and to attributes
# additionally it defines the relationship between these two locators (arcrole)
arc_from: str = arc_element.attrib[XLINK_NS + 'from']
arc_to: str = arc_element.attrib[XLINK_NS + 'to']
arc_role: str = arc_element.attrib[XLINK_NS + 'arcrole']
arc_order: int = arc_element.attrib['order'] if 'order' in arc_element.attrib else None
# the following attributes are linkbase specific, so we have to check if they exist!
# Needed for (sometimes) definitionArc
arc_closed: bool = bool(arc_element.attrib[XBRLDT_NS + "closed"]) \
if (XBRLDT_NS + "weight") in arc_element.attrib else None
arc_context_element: str = arc_element.attrib[XBRLDT_NS + "contextElement"] if \
(XBRLDT_NS + "contextElement") in arc_element.attrib else None
# Needed for calculationArc
arc_weight: float = float(arc_element.attrib["weight"]) if "weight" in arc_element.attrib else None
# Needed for presentationArc
arc_priority: int = int(arc_element.attrib["priority"]) if "priority" in arc_element.attrib else None
arc_preferred_label: str = arc_element.attrib[
"preferredLabel"] if "preferredLabel" in arc_element.attrib else None
# Create the arc object based on the current linkbase type
arc_object: AbstractArcElement
if linkbase_type == LinkbaseType.DEFINITION:
arc_object = DefinitionArc(
locator_map[arc_from], locator_map[arc_to], arc_role, arc_order, arc_closed,
arc_context_element)
elif linkbase_type == LinkbaseType.CALCULATION:
arc_object = CalculationArc(locator_map[arc_from], locator_map[arc_to], arc_order, arc_weight)
elif linkbase_type == LinkbaseType.PRESENTATION:
arc_object = PresentationArc(locator_map[arc_from], locator_map[arc_to], arc_order, arc_priority,
arc_preferred_label)
else:
# find all labels that are referenced by this arc.
# These where preprocessed previously, so we can just take them
arc_object = LabelArc(locator_map[arc_from], arc_order, label_map[arc_to])
# Build the hierarchy for the Locators.
if linkbase_type != LinkbaseType.LABEL:
# This does not work for label linkbase, since link:labelArcs only link to link:labels
# and not to other locators!!
locator_map[arc_to].parents.append(locator_map[arc_from])
locator_map[arc_from].children.append(arc_object)
# find the top elements of the three (all elements that have no parents)
root_locators = []
for locator in locator_map.values():
if len(locator.parents) == 0:
root_locators.append(locator)
# only add the extended link to the linkbase if the link references a role
# (some filers have empty links in which we are not interested:
# <definitionLink xlink:type="extended" xlink:role="http://www.xbrl.org/2003/role/link"/>)
if extended_link_role in role_refs:
extended_links.append(
ExtendedLink(extended_link_role, role_refs[extended_link_role], root_locators))
elif linkbase_type == LinkbaseType.LABEL:
extended_links.append(ExtendedLink(extended_link_role, None, root_locators))
return Linkbase(extended_links, linkbase_type, linkbase_url if linkbase_url else linkbase_path)
| manusimidt/py-xbrl | xbrl/linkbase.py | linkbase.py | py | 27,220 | python | en | code | 78 | github-code | 36 |
174132737 | from datetime import datetime, timezone, timedelta
from django.db.models import Q, Sum
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.conf import settings
from django.template.loader import render_to_string
from elasticsearch.helpers import bulk
from api.indexes import ES_PAGE_NAME
from api.esconnection import ES_CLIENT
from api.models import Country, Appeal, Event, FieldReport, ActionsTaken
from api.logger import logger
from notifications.models import RecordType, SubscriptionType, Subscription, SurgeAlert
from notifications.hello import get_hello
from notifications.notification import send_notification
from deployments.models import PersonnelDeployment, ERU
from main.frontend import frontend_url
import html
time_interval = timedelta(minutes = 5)
time_interva2 = timedelta( days = 1) # to check: the change was not between time_interval and time_interva2, so that the user don't receive email more frequent than a day.
time_interva7 = timedelta( days = 7) # for digest mode
basetime = int(20314) # weekday - hour - min for digest timing (5 minutes once a week)
daily_retro = int(654) # hour - min for daily retropective email timing (5 minutes a day) | Should not contain a leading 0!
max_length = 280 # after this length (at the first space) we cut the sent content
events_sent_to = {} # to document sent events before re-sending them via specific following
template_types = {
99: 'design/generic_notification.html',
RecordType.FIELD_REPORT: 'design/field_report.html',
RecordType.APPEAL: 'design/new_operation.html',
98: 'design/operation_update.html', # TODO: Either Operation Update needs a number or it should be constructed from other types (ask someone)
RecordType.WEEKLY_DIGEST: 'design/weekly_digest.html',
}
class Command(BaseCommand):
help = 'Index and send notifications about new/changed records'
# Digest mode duration is 5 minutes once a week
def is_digest_mode(self):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
weekdayhourmin = int(today.strftime('%w%H%M'))
return basetime <= weekdayhourmin and weekdayhourmin < basetime + 5
def is_retro_mode(self):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
hourmin = int(today.strftime('%H%M'))
return daily_retro <= hourmin and hourmin < daily_retro + 5
def get_time_threshold(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interval
def get_time_threshold2(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interva2
def get_time_threshold_digest(self):
return datetime.utcnow().replace(tzinfo=timezone.utc) - time_interva7
def gather_country_and_region(self, records):
# Appeals only, since these have a single country/region
countries = []
regions = []
for record in records:
if record.country is not None:
countries.append('c%s' % record.country.id)
if record.country.region is not None:
regions.append('r%s' % record.country.region.id)
countries = list(set(countries))
regions = list(set(regions))
return countries, regions
def gather_countries_and_regions(self, records):
# Applies to emergencies and field reports, which have a
# many-to-many relationship to countries and regions
countries = []
for record in records:
if record.countries is not None:
countries += [country.id for country in record.countries.all()]
countries = list(set(countries))
qs = Country.objects.filter(pk__in=countries)
regions = ['r%s' % country.region.id for country in qs if country.region is not None]
countries = ['c%s' % id for id in countries]
return countries, regions
def gather_subscribers(self, records, rtype, stype):
# Correction for the new notification types:
if rtype == RecordType.EVENT or rtype == RecordType.FIELD_REPORT:
rtype_of_subscr = RecordType.NEW_EMERGENCIES
stype = SubscriptionType.NEW
elif rtype == RecordType.APPEAL:
rtype_of_subscr = RecordType.NEW_OPERATIONS
stype = SubscriptionType.NEW
else:
rtype_of_subscr = rtype
# Gather the email addresses of users who should be notified
if self.is_digest_mode():
subscribers = User.objects.filter(subscription__rtype=RecordType.WEEKLY_DIGEST, \
is_active=True).values('email')
# In digest mode we do not care about other circumstances, just get every subscriber's email.
emails = [subscriber['email'] for subscriber in subscribers]
return emails
else:
# Start with any users subscribed directly to this record type.
subscribers = User.objects.filter(subscription__rtype=rtype_of_subscr, \
subscription__stype=stype, is_active=True).values('email')
# For FOLLOWED_EVENTs and DEPLOYMENTs we do not collect other generic (d*, country, region) subscriptions, just one. This part is not called.
if rtype_of_subscr != RecordType.FOLLOWED_EVENT and \
rtype_of_subscr != RecordType.SURGE_ALERT and \
rtype_of_subscr != RecordType.SURGE_DEPLOYMENT_MESSAGES:
dtypes = list(set(['d%s' % record.dtype.id for record in records if record.dtype is not None]))
if (rtype_of_subscr == RecordType.NEW_OPERATIONS):
countries, regions = self.gather_country_and_region(records)
else:
countries, regions = self.gather_countries_and_regions(records)
lookups = dtypes + countries + regions
if len(lookups):
subscribers = (subscribers | User.objects.filter(subscription__lookup_id__in=lookups, is_active=True).values('email')).distinct()
emails = [subscriber['email'] for subscriber in subscribers]
return emails
def get_template(self, rtype=99):
#older: return 'email/generic_notification.html'
#old: return 'design/generic_notification.html'
return template_types[rtype]
# Get the front-end url of the resource
def get_resource_uri (self, record, rtype):
# Determine the front-end URL
resource_uri = frontend_url
if rtype == RecordType.SURGE_ALERT or rtype == RecordType.FIELD_REPORT: # Pointing to event instead of field report %s/%s/%s - Munu asked - ¤
belonging_event = record.event.id if record.event is not None else 999 # Very rare
resource_uri = '%s/emergencies/%s#overview' % (frontend_url, belonging_event)
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
resource_uri = '%s/%s' % (frontend_url, 'deployments') # can be further sophisticated
elif rtype == RecordType.APPEAL and (
record.event is not None and not record.needs_confirmation):
# Appeals with confirmed emergencies link to that emergency
resource_uri = '%s/emergencies/%s#overview' % (frontend_url, record.event.id)
elif rtype != RecordType.APPEAL:
# One-by-one followed or globally subscribed emergencies
resource_uri = '%s/%s/%s' % (
frontend_url,
'emergencies' if rtype == RecordType.EVENT or rtype == RecordType.FOLLOWED_EVENT else 'reports', # this else never occurs, see ¤
record.id
)
return resource_uri
def get_admin_uri (self, record, rtype):
admin_page = {
RecordType.FIELD_REPORT: 'api/fieldreport',
RecordType.APPEAL: 'api/appeal',
RecordType.EVENT: 'api/event',
RecordType.FOLLOWED_EVENT: 'api/event',
RecordType.SURGE_DEPLOYMENT_MESSAGES: 'deployments/personneldeployment',
RecordType.SURGE_ALERT: 'notifications/surgealert',
}[rtype]
return 'https://%s/admin/%s/%s/change' % (
settings.BASE_URL,
admin_page,
record.id,
)
def get_record_title(self, record, rtype):
if rtype == RecordType.FIELD_REPORT:
sendMe = record.summary
if record.countries.all():
country = record.countries.all()[0].name
if country not in sendMe:
sendMe = sendMe + ' (' + country + ')'
return sendMe
elif rtype == RecordType.SURGE_ALERT:
return record.operation + ' (' + record.atype.name + ', ' + record.category.name.lower() +')'
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
return '%s, %s' % (record.country_deployed_to, record.region_deployed_to)
else:
return record.name
def get_record_content(self, record, rtype):
if rtype == RecordType.FIELD_REPORT:
sendMe = record.description
elif rtype == RecordType.APPEAL:
sendMe = record.sector
if record.code:
sendMe += ', ' + record.code
elif rtype == RecordType.EVENT or rtype == RecordType.FOLLOWED_EVENT:
sendMe = record.summary
elif rtype == RecordType.SURGE_ALERT:
sendMe = record.message
elif rtype == RecordType.SURGE_DEPLOYMENT_MESSAGES:
sendMe = record.comments
else:
sendMe = '?'
return html.unescape(sendMe) # For contents we allow HTML markup. = autoescape off in generic_notification.html template.
def get_record_display(self, rtype, count):
display = {
RecordType.FIELD_REPORT: 'field report',
RecordType.APPEAL: 'operation',
RecordType.EVENT: 'event',
RecordType.FOLLOWED_EVENT: 'event',
RecordType.SURGE_DEPLOYMENT_MESSAGES: 'surge deployment',
RecordType.SURGE_ALERT: 'surge alert',
}[rtype]
if (count > 1):
display += 's'
return display
def get_weekly_digest_data(self, field):
today = datetime.utcnow().replace(tzinfo=timezone.utc)
if field == 'dref':
return Appeal.objects.filter(end_date__gt=today, atype=0).count()
elif field == 'ea':
return Appeal.objects.filter(end_date__gt=today, atype=1).count()
elif field == 'fund':
amount_req = (
Appeal.objects
.filter(Q(end_date__gt=today, atype=1) | Q(end_date__gt=today, atype=2))
.aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
)
amount_fund = (
Appeal.objects
.filter(Q(end_date__gt=today, atype=1) | Q(end_date__gt=today, atype=2))
.aggregate(Sum('amount_funded'))['amount_funded__sum'] or 0
)
percent = round(amount_fund / amount_req, 3) * 100
return percent
elif field == 'budget':
amount = Appeal.objects.filter(end_date__gt=today).aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
rounded_amount = round(amount / 1000000, 2)
return rounded_amount
elif field == 'pop':
people = Appeal.objects.filter(end_date__gt=today).aggregate(Sum('num_beneficiaries'))['num_beneficiaries__sum'] or 0
rounded_people = round(people / 1000000, 2)
return rounded_people
def get_weekly_digest_latest_ops(self):
dig_time = self.get_time_threshold_digest()
ops = Appeal.objects.filter(created_at__gte=dig_time).order_by('-created_at')
ret_ops = []
for op in ops:
op_to_add = {
'op_event_id': op.event_id,
'op_country': Country.objects.values_list('name', flat=True).get(id=op.country_id) if op.country_id else '',
'op_name': op.name,
'op_created_at': op.created_at,
'op_funding': op.amount_requested,
}
ret_ops.append(op_to_add)
return ret_ops
def get_weekly_digest_highlights(self):
dig_time = self.get_time_threshold_digest()
events = Event.objects.filter(is_featured=True, updated_at__gte=dig_time).order_by('-updated_at')
ret_highlights = []
for ev in events:
amount_requested = Appeal.objects.filter(event_id=ev.id).aggregate(Sum('amount_requested'))['amount_requested__sum'] or 0
amount_funded = Appeal.objects.filter(event_id=ev.id).aggregate(Sum('amount_funded'))['amount_funded__sum'] or 0
data_to_add = {
'hl_id': ev.id,
'hl_name': ev.name,
'hl_last_update': ev.updated_at,
'hl_people': Appeal.objects.filter(event_id=ev.id).aggregate(Sum('num_beneficiaries'))['num_beneficiaries__sum'] or 0,
'hl_funding': amount_requested,
'hl_deployed_eru': ERU.objects.filter(event_id=ev.id).aggregate(Sum('units'))['units__sum'] or 0,
'hl_deployed_sp': PersonnelDeployment.objects.filter(event_deployed_to_id=ev.id).count(),
'hl_coverage': round(amount_funded / amount_requested, 1) if amount_requested != 0 else 0,
}
ret_highlights.append(data_to_add)
return ret_highlights
def get_actions_taken(self, frid):
ret_actions_taken = {
'NTLS': [],
'PNS': [],
'FDRN': [],
}
actions_taken = ActionsTaken.objects.filter(field_report_id=frid)
for at in actions_taken:
action_to_add = {
'action_summary': at.summary,
'actions': [],
}
if at.actions.all():
for act in at.actions.all():
action_to_add['actions'].append(act)
if at.organization == 'NTLS':
ret_actions_taken['NTLS'].append(action_to_add)
elif at.organization == 'PNS':
ret_actions_taken['PNS'].append(action_to_add)
elif at.organization == 'FDRN':
ret_actions_taken['FDRN'].append(action_to_add)
return ret_actions_taken
def get_weekly_latest_frs(self):
dig_time = self.get_time_threshold_digest()
ret_fr_list = []
fr_list = list(FieldReport.objects.filter(created_at__gte=dig_time).order_by('-created_at'))
for fr in fr_list:
fr_data = {
'id': fr.id,
'country': fr.countries.all()[0].name if fr.countries else None,
'summary': fr.summary,
'created_at': fr.created_at,
}
ret_fr_list.append(fr_data)
return ret_fr_list
# Based on the notification type this constructs the different type of objects needed for the different templates
def construct_template_record(self, rtype, record):
if rtype != RecordType.WEEKLY_DIGEST:
shortened = self.get_record_content(record, rtype)
if len(shortened) > max_length:
shortened = shortened[:max_length] + \
shortened[max_length:].split(' ', 1)[0] + '...' # look for the first space
# TODO: Operation Update and Announcement types are missing
if rtype == RecordType.FIELD_REPORT:
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'description': shortened,
'key_figures': {
'affected': (record.num_affected or 0) + (record.gov_num_affected or 0) + (record.other_num_affected or 0),
'injured': (record.num_injured or 0) + (record.gov_num_injured or 0) + (record.other_num_injured or 0),
'dead': (record.num_dead or 0) + (record.gov_num_dead or 0) + (record.other_num_dead or 0),
'missing': (record.num_missing or 0) + (record.gov_num_missing or 0) + (record.other_num_missing or 0),
'displaced': (record.num_displaced or 0) + (record.gov_num_displaced or 0) + (record.other_num_displaced or 0),
'assisted': (record.num_assisted or 0) + (record.gov_num_assisted or 0) + (record.other_num_assisted or 0),
'local_staff': record.num_localstaff or 0,
'volunteers': record.num_volunteers or 0,
'expat_delegates': record.num_expats_delegates or 0,
},
'actions_taken': self.get_actions_taken(record.id),
'actions_others': record.actions_others,
'gov_assistance': 'Yes' if record.request_assistance else 'No',
'ns_assistance': 'Yes' if record.ns_request_assistance else 'No',
}
elif rtype == RecordType.APPEAL:
# Maybe we need these in the future
# localstaff = FieldReport.objects.filter(event_id=record.event_id).values_list('num_localstaff', flat=True)
# volunteers = FieldReport.objects.filter(event_id=record.event_id).values_list('num_volunteers', flat=True)
# expats = FieldReport.objects.filter(event_id=record.event_id).values_list('num_expats_delegates', flat=True)
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'situation_overview': Event.objects.values_list('summary', flat=True).get(id=record.event_id) if record.event_id != None else '',
'key_figures': {
'people_targeted': record.num_beneficiaries or 0,
'funding_req': record.amount_requested or 0,
'appeal_code': record.code,
'start_date': record.start_date,
'end_date': record.end_date,
# 'local_staff': localstaff[0] if localstaff else 0,
# 'volunteers': volunteers[0] if volunteers else 0,
# 'expat_delegates': expats[0] if expats else 0,
},
'field_reports': list(FieldReport.objects.filter(event_id=record.event_id)) if record.event_id != None else None,
}
elif rtype == RecordType.WEEKLY_DIGEST:
dig_time = self.get_time_threshold_digest()
rec_obj = {
'active_dref': self.get_weekly_digest_data('dref'),
'active_ea': self.get_weekly_digest_data('ea'),
'funding_coverage': self.get_weekly_digest_data('fund'),
'budget': self.get_weekly_digest_data('budget'),
'population': self.get_weekly_digest_data('pop'),
'highlighted_ops': self.get_weekly_digest_highlights(),
'latest_ops': self.get_weekly_digest_latest_ops(),
'latest_deployments': list(SurgeAlert.objects.filter(created_at__gte=dig_time).order_by('-created_at')),
'latest_field_reports': self.get_weekly_latest_frs(),
}
else: # The default (old) template
rec_obj = {
'resource_uri': self.get_resource_uri(record, rtype),
'admin_uri': self.get_admin_uri(record, rtype),
'title': self.get_record_title(record, rtype),
'content': shortened,
}
return rec_obj
def notify(self, records, rtype, stype, uid=None):
record_count = 0
if records:
record_count = records.count()
if not record_count and rtype != RecordType.WEEKLY_DIGEST:
return
# Decide if it is a personal notification or batch
if uid is None:
emails = self.gather_subscribers(records, rtype, stype)
if not len(emails):
return
else:
usr = User.objects.filter(pk=uid, is_active=True)
if not len(usr):
return
else:
emails = list(usr.values_list('email', flat=True)) # Only one email in this case
# TODO: maybe this needs to be adjusted based on the new functionality (at first only handling Weekly Digest)
# Only serialize the first 10 records
record_entries = []
if rtype == RecordType.WEEKLY_DIGEST:
record_entries.append(self.construct_template_record(rtype, None))
else:
entries = list(records) if record_count <= 10 else list(records[:10])
for record in entries:
record_entries.append(self.construct_template_record(rtype, record))
if uid is not None:
is_staff = usr.values_list('is_staff', flat=True)[0]
if rtype == RecordType.WEEKLY_DIGEST:
record_type = 'weekly digest'
else:
record_type = self.get_record_display(rtype, record_count)
if uid is None:
adj = 'new' if stype == SubscriptionType.NEW else 'modified'
#subject = '%s %s %s in IFRC GO' % (
if rtype == RecordType.WEEKLY_DIGEST:
subject = '%s %s' % (
adj,
record_type,
)
else:
subject = '%s %s %s' % (
record_count,
adj,
record_type,
)
else:
#subject = '%s followed %s modified in IFRC GO' % (
subject = '%s followed %s modified' % (
record_count,
record_type,
)
if self.is_retro_mode():
subject += ' [daily followup]'
template_path = self.get_template()
if rtype == RecordType.FIELD_REPORT or rtype == RecordType.APPEAL or rtype == RecordType.WEEKLY_DIGEST:
template_path = self.get_template(rtype)
html = render_to_string(template_path, {
'hello': get_hello(),
'count': record_count,
'records': record_entries,
'is_staff': True if uid is None else is_staff, # TODO: fork the sending to "is_staff / not ~" groups
'subject': subject,
})
recipients = emails
if uid is None:
if record_count == 1:
subject += ': ' + record_entries[0]['title'] # On purpose after rendering – the subject changes only, not email body
# For new (email-documented :10) events we store data to events_sent_to{ event_id: recipients }
if stype == SubscriptionType.EDIT: # Recently we do not allow EDIT substription
for e in list(records.values('id'))[:10]:
i = e['id']
if i not in events_sent_to:
events_sent_to[i] = []
email_list_to_add = list(set(events_sent_to[i] + recipients))
if email_list_to_add:
events_sent_to[i] = list(filter(None, email_list_to_add)) # filter to skip empty elements
plural = '' if len(emails) == 1 else 's' # record_type has its possible plural thanks to get_record_display()
logger.info('Notifying %s subscriber%s about %s %s %s' % (len(emails), plural, record_count, adj, record_type))
send_notification(subject, recipients, html)
else:
if len(recipients):
# check if email is not in events_sent_to{event_id: recipients}
if not emails:
logger.info('Silent about the one-by-one subscribed %s – user %s has not set email address' % (record_type, uid))
# Recently we do not allow EDIT (modif.) subscription, so it is irrelevant recently (do not check the 1+ events in loop) :
elif (records[0].id not in events_sent_to) or (emails[0] not in events_sent_to[records[0].id]):
logger.info('Notifying %s subscriber about %s one-by-one subscribed %s' % (len(emails), record_count, record_type))
send_notification(subject, recipients, html)
else:
logger.info('Silent about a one-by-one subscribed %s – user already notified via generic subscription' % (record_type))
def index_new_records(self, records):
self.bulk([self.convert_for_bulk(record, create=True) for record in list(records)])
def index_updated_records(self, records):
self.bulk([self.convert_for_bulk(record, create=False) for record in list(records)])
def convert_for_bulk(self, record, create):
data = record.indexing()
metadata = {
'_op_type': 'create' if create else 'update',
'_index': ES_PAGE_NAME,
'_type': 'page',
'_id': record.es_id()
}
if (create):
metadata.update(**data)
else:
metadata['doc'] = data
return metadata
def bulk(self, actions):
try:
created, errors = bulk(client=ES_CLIENT , actions=actions)
if len(errors):
logger.error('Produced the following errors:')
logger.error('[%s]' % ', '.join(map(str, errors)))
except Exception as e:
logger.error('Could not index records')
logger.error('%s...' % str(e)[:512])
# Remove items in a queryset where updated_at == created_at.
# This leaves us with only ones that have been modified.
def filter_just_created(self, queryset):
if queryset.first() is None:
return []
if hasattr(queryset.first(), 'modified_at') and queryset.first().modified_at is not None:
return [record for record in queryset if (
record.modified_at.replace(microsecond=0) == record.created_at.replace(microsecond=0))]
else:
return [record for record in queryset if (
record.updated_at.replace(microsecond=0) == record.created_at.replace(microsecond=0))]
def handle(self, *args, **options):
if self.is_digest_mode():
t = self.get_time_threshold_digest() # in digest mode (1ce a week, for new_entities only) we use a bigger interval
else:
t = self.get_time_threshold()
t2 = self.get_time_threshold2()
cond1 = Q(created_at__gte=t)
condU = Q(updated_at__gte=t)
condR = Q(real_data_update__gte=t) # instead of modified at
cond2 = ~Q(previous_update__gte=t2) # we negate (~) this, so we want: no previous_update in the last day. So: send once a day!
condF = Q(auto_generated_source='New field report') # We exclude those events that were generated from field reports, to avoid 2x notif.
# In this section we check if there was 2 FOLLOWED_EVENT modifications in the last 24 hours (for which there was no duplicated email sent, but now will be one).
if self.is_retro_mode():
condU = Q(updated_at__gte=t2)
cond2 = Q(previous_update__gte=t2) # not negated. We collect those, who had 2 changes in the last 1 day.
followed_eventparams = Subscription.objects.filter(event_id__isnull=False)
users_of_followed_events = followed_eventparams.values_list('user_id', flat=True).distinct()
for usr in users_of_followed_events: # looping in user_ids of specific FOLLOWED_EVENT subscriptions (8)
eventlist = followed_eventparams.filter(user_id=usr).values_list('event_id', flat=True).distinct()
cond3 = Q(pk__in=eventlist) # getting their events as a condition
followed_events = Event.objects.filter(condU & cond2 & cond3)
if len(followed_events): # usr - unique (we loop one-by-one), followed_events - more
self.notify(followed_events, RecordType.FOLLOWED_EVENT, SubscriptionType.NEW, usr)
else:
new_reports = FieldReport.objects.filter(cond1)
updated_reports = FieldReport.objects.filter(condU & cond2)
new_appeals = Appeal.objects.filter(cond1)
updated_appeals = Appeal.objects.filter(condR & cond2)
new_events = Event.objects.filter(cond1).exclude(condF)
updated_events = Event.objects.filter(condU & cond2)
new_surgealerts = SurgeAlert.objects.filter(cond1)
new_pers_deployments = PersonnelDeployment.objects.filter(cond1) # CHECK: Best instantiation of Deployment Messages? Frontend appearance?!?
# No need for indexing for personnel deployments
# Approaching End of Mission ? new_approanching_end = PersonnelDeployment.objects.filter(end-date is close?)
# No need for indexing for Approaching End of Mission
# PER Due Dates ? new_per_due_date_warnings = User.objects.filter(PER admins of countries/regions, for whom the setting/per_due_date is in 1 week)
# No need for indexing for PER Due Dates
followed_eventparams = Subscription.objects.filter(event_id__isnull=False)
## followed_events = Event.objects.filter(updated_at__gte=t, pk__in=[x.event_id for x in followed_eventparams])
# Merge Weekly Digest into one mail instead of separate ones
if self.is_digest_mode():
self.notify(None, RecordType.WEEKLY_DIGEST, SubscriptionType.NEW)
else:
self.notify(new_reports, RecordType.FIELD_REPORT, SubscriptionType.NEW)
#self.notify(updated_reports, RecordType.FIELD_REPORT, SubscriptionType.EDIT)
self.notify(new_appeals, RecordType.APPEAL, SubscriptionType.NEW)
#self.notify(updated_appeals, RecordType.APPEAL, SubscriptionType.EDIT)
self.notify(new_events, RecordType.EVENT, SubscriptionType.NEW)
#self.notify(updated_events, RecordType.EVENT, SubscriptionType.EDIT)
self.notify(new_surgealerts, RecordType.SURGE_ALERT, SubscriptionType.NEW)
self.notify(new_pers_deployments, RecordType.SURGE_DEPLOYMENT_MESSAGES, SubscriptionType.NEW)
users_of_followed_events = followed_eventparams.values_list('user_id', flat=True).distinct()
for usr in users_of_followed_events: # looping in user_ids of specific FOLLOWED_EVENT subscriptions (8)
eventlist = followed_eventparams.filter(user_id=usr).values_list('event_id', flat=True).distinct()
cond3 = Q(pk__in=eventlist) # getting their events as a condition
followed_events = Event.objects.filter(condU & cond2 & cond3)
if len(followed_events): # usr - unique (we loop one-by-one), followed_events - more
self.notify(followed_events, RecordType.FOLLOWED_EVENT, SubscriptionType.NEW, usr)
logger.info('Indexing %s updated field reports' % updated_reports.count())
self.index_updated_records(self.filter_just_created(updated_reports))
logger.info('Indexing %s updated appeals' % updated_appeals.count())
self.index_updated_records(self.filter_just_created(updated_appeals))
logger.info('Indexing %s updated events' % updated_events.count())
self.index_updated_records(self.filter_just_created(updated_events))
logger.info('Indexing %s new field reports' % new_reports.count())
self.index_new_records(new_reports)
logger.info('Indexing %s new appeals' % new_appeals.count())
self.index_new_records(new_appeals)
logger.info('Indexing %s new events' % new_events.count())
self.index_new_records(new_events)
| batpad/go-api | api/management/commands/index_and_notify.py | index_and_notify.py | py | 31,984 | python | en | code | 0 | github-code | 36 |
37738312088 | # -*- coding: utf-8 -*-
from collections import defaultdict
import struct
from sqlalchemy.sql.expression import text
from ambry.orm.dataset import Dataset
from ambry.library.search_backends.base import BaseDatasetIndex, BasePartitionIndex,\
BaseIdentifierIndex, BaseSearchBackend, IdentifierSearchResult,\
DatasetSearchResult, PartitionSearchResult, SearchTermParser
from ambry.util import get_logger
import logging
logger = get_logger(__name__, propagate=False)
#logger.setLevel(logging.DEBUG)
class SQLiteSearchBackend(BaseSearchBackend):
def _get_dataset_index(self):
""" Returns dataset index. """
# returns initialized dataset index
return DatasetSQLiteIndex(backend=self)
def _get_partition_index(self):
""" Returns partition index. """
return PartitionSQLiteIndex(backend=self)
def _get_identifier_index(self):
""" Returns identifier index. """
return IdentifierSQLiteIndex(backend=self)
def _and_join(self, terms):
""" AND join of the terms.
Args:
terms (list):
Examples:
self._and_join(['term1', 'term2'])
Returns:
str
"""
if len(terms) > 1:
return ' '.join([self._or_join(t) for t in terms])
else:
return self._or_join(terms[0])
class DatasetSQLiteIndex(BaseDatasetIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating dataset FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS dataset_index USING fts3(
vid VARCHAR(256) NOT NULL,
title TEXT,
keywords TEXT,
doc TEXT
);
"""
self.backend.library.database.connection.execute(query)
def _make_query_from_terms(self, terms):
""" Creates a query for dataset from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
"""
match_query = ''
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
match_query = self.backend._or_join(expanded_terms['doc'])
if expanded_terms['keywords']:
if match_query:
match_query += self.backend._and_join(
match_query, self.backend._join_keywords(expanded_terms['keywords']))
else:
match_query = self.backend._join_keywords(expanded_terms['keywords'])
query = text("""
SELECT vid, rank(matchinfo(dataset_index)) AS score
FROM dataset_index
WHERE dataset_index MATCH :match_query
ORDER BY score DESC;
""")
query_params = {
'match_query': match_query}
return query, query_params
def search(self, search_phrase, limit=None):
""" Finds datasets by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of DatasetSearchResult instances.
"""
# SQLite FTS can't find terms with `-`, therefore all hyphens were replaced with underscore
# before save. Now to get appropriate result we need to replace all hyphens in the search phrase.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
search_phrase = search_phrase.replace('-', '_')
query, query_params = self._make_query_from_terms(search_phrase)
self._parsed_query = (query, query_params)
connection = self.backend.library.database.connection
# Operate on the raw connection
connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))
logger.debug('Searching datasets using `{}` query.'.format(query))
results = connection.execute(query,
**query_params).fetchall() # Query on the Sqlite proxy to the raw connection
datasets = defaultdict(DatasetSearchResult)
for result in results:
vid, score = result
datasets[vid] = DatasetSearchResult()
datasets[vid].vid = vid
datasets[vid].b_score = score
logger.debug('Extending datasets with partitions.')
for partition in self.backend.partition_index.search(search_phrase):
datasets[partition.dataset_vid].p_score += partition.score
datasets[partition.dataset_vid].partitions.add(partition)
return list(datasets.values())
def list_documents(self, limit=None):
""" Generates vids of all indexed datasets.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the dataset.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT vid FROM dataset_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['vid']
def _as_document(self, dataset):
""" Converts dataset to document indexed by to FTS index.
Args:
dataset (orm.Dataset): dataset to convert.
Returns:
dict with structure matches to BaseDatasetIndex._schema.
"""
assert isinstance(dataset, Dataset)
doc = super(self.__class__, self)._as_document(dataset)
# SQLite FTS can't find terms with `-`, replace it with underscore here and while searching.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
doc['keywords'] = doc['keywords'].replace('-', '_')
doc['doc'] = doc['doc'].replace('-', '_')
doc['title'] = doc['title'].replace('-', '_')
return doc
def _index_document(self, document, force=False):
""" Adds document to the index. """
query = text("""
INSERT INTO dataset_index(vid, title, keywords, doc)
VALUES(:vid, :title, :keywords, :doc);
""")
self.backend.library.database.connection.execute(query, **document)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE dataset_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, vid=None):
""" Deletes given dataset from index.
Args:
vid (str): dataset vid.
"""
query = text("""
DELETE FROM dataset_index
WHERE vid = :vid;
""")
self.backend.library.database.connection.execute(query, vid=vid)
def is_indexed(self, dataset):
""" Returns True if dataset is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM dataset_index
WHERE vid = :vid;
""")
result = self.backend.library.database.connection.execute(query, vid=dataset.vid)
return bool(result.fetchall())
def all(self):
""" Returns list with all indexed datasets. """
datasets = []
query = text("""
SELECT vid
FROM dataset_index;""")
for result in self.backend.library.database.connection.execute(query):
res = DatasetSearchResult()
res.vid = result[0]
res.b_score = 1
datasets.append(res)
return datasets
class IdentifierSQLiteIndex(BaseIdentifierIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating identifier FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS identifier_index USING fts3(
identifier VARCHAR(256) NOT NULL,
type VARCHAR(256) NOT NULL,
name TEXT
);
"""
self.backend.library.database.connection.execute(query)
def search(self, search_phrase, limit=None):
""" Finds identifiers by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to return. None means without limit.
Returns:
list of IdentifierSearchResult instances.
"""
query_parts = [
'SELECT identifier, type, name, 0',
'FROM identifier_index',
'WHERE name MATCH :part']
query_params = {
'part': '*{}*'.format(search_phrase)}
query_parts.append('ORDER BY name')
if limit:
query_parts.append('LIMIT :limit')
query_params['limit'] = limit
query_parts.append(';')
query = text('\n'.join(query_parts))
results = self.backend.library.database.connection.execute(query, **query_params).fetchall()
for result in results:
vid, type, name, score = result
yield IdentifierSearchResult(
score=score, vid=vid,
type=type, name=name)
def list_documents(self, limit=None):
""" Generates vids of all indexed identifiers.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT identifier FROM identifier_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['identifier']
def _index_document(self, identifier, force=False):
""" Adds identifier document to the index. """
query = text("""
INSERT INTO identifier_index(identifier, type, name)
VALUES(:identifier, :type, :name);
""")
self.backend.library.database.connection.execute(query, **identifier)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE identifier_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, identifier=None):
""" Deletes given identifier from index.
Args:
identifier (str): identifier of the document to delete.
"""
query = text("""
DELETE FROM identifier_index
WHERE identifier = :identifier;
""")
self.backend.library.database.connection.execute(query, identifier=identifier)
def is_indexed(self, identifier):
""" Returns True if identifier is already indexed. Otherwise returns False. """
query = text("""
SELECT identifier
FROM identifier_index
WHERE identifier = :identifier;
""")
result = self.backend.library.database.connection.execute(query, identifier=identifier['identifier'])
return bool(result.fetchall())
def all(self):
""" Returns list with all indexed identifiers. """
identifiers = []
query = text("""
SELECT identifier, type, name
FROM identifier_index;""")
for result in self.backend.library.database.connection.execute(query):
vid, type_, name = result
res = IdentifierSearchResult(
score=1, vid=vid, type=type_, name=name)
identifiers.append(res)
return identifiers
class PartitionSQLiteIndex(BasePartitionIndex):
def __init__(self, backend=None):
assert backend is not None, 'backend argument can not be None.'
super(self.__class__, self).__init__(backend=backend)
logger.debug('Creating partition FTS table.')
query = """\
CREATE VIRTUAL TABLE IF NOT EXISTS partition_index USING fts3(
vid VARCHAR(256) NOT NULL,
dataset_vid VARCHAR(256) NOT NULL,
from_year INTEGER,
to_year INTEGER,
title TEXT,
keywords TEXT,
doc TEXT
);
"""
self.backend.library.database.connection.execute(query)
def search(self, search_phrase, limit=None):
""" Finds partitions by search phrase.
Args:
search_phrase (str or unicode):
limit (int, optional): how many results to generate. None means without limit.
Generates:
PartitionSearchResult instances.
"""
# SQLite FTS can't find terms with `-`, therefore all hyphens replaced with underscore before save.
# Now to make proper query we need to replace all hyphens in the search phrase.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
search_phrase = search_phrase.replace('-', '_')
terms = SearchTermParser().parse(search_phrase)
from_year = terms.pop('from', None)
to_year = terms.pop('to', None)
query, query_params = self._make_query_from_terms(terms)
self._parsed_query = (query, query_params)
connection = self.backend.library.database.connection
connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0)))
# SQLite FTS implementation does not allow to create indexes on FTS tables.
# see https://sqlite.org/fts3.html 1.5. Summary, p 1:
# ... it is not possible to create indices ...
#
# So, filter years range here.
results = connection.execute(query, query_params).fetchall()
for result in results:
vid, dataset_vid, score, db_from_year, db_to_year = result
if from_year and from_year < db_from_year:
continue
if to_year and to_year > db_to_year:
continue
yield PartitionSearchResult(
vid=vid, dataset_vid=dataset_vid, score=score)
def list_documents(self, limit=None):
""" Generates vids of all indexed partitions.
Args:
limit (int, optional): If not empty, the maximum number of results to return
Generates:
str: vid of the document.
"""
limit_str = ''
if limit:
try:
limit_str = 'LIMIT {}'.format(int(limit))
except (TypeError, ValueError):
pass
query = ('SELECT vid FROM partition_index ' + limit_str)
for row in self.backend.library.database.connection.execute(query).fetchall():
yield row['vid']
def _as_document(self, partition):
""" Converts partition to document indexed by to FTS index.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema.
"""
doc = super(self.__class__, self)._as_document(partition)
# SQLite FTS can't find terms with `-`, replace it with underscore here and while searching.
# See http://stackoverflow.com/questions/3865733/how-do-i-escape-the-character-in-sqlite-fts3-queries
doc['keywords'] = doc['keywords'].replace('-', '_')
doc['doc'] = doc['doc'].replace('-', '_')
doc['title'] = doc['title'].replace('-', '_')
# pass time_coverage to the _index_document.
doc['time_coverage'] = partition.time_coverage
return doc
def _make_query_from_terms(self, terms):
""" Creates a query for partition from decomposed search terms.
Args:
terms (dict or unicode or string):
Returns:
tuple of (str, dict): First element is str with FTS query, second is parameters of the query.
"""
match_query = ''
expanded_terms = self._expand_terms(terms)
if expanded_terms['doc']:
match_query = self.backend._and_join(expanded_terms['doc'])
if expanded_terms['keywords']:
if match_query:
match_query = self.backend._and_join(
[match_query, self.backend._join_keywords(expanded_terms['keywords'])])
else:
match_query = self.backend._join_keywords(expanded_terms['keywords'])
if match_query:
query = text("""
SELECT vid, dataset_vid, rank(matchinfo(partition_index)) AS score, from_year, to_year
FROM partition_index
WHERE partition_index MATCH :match_query
ORDER BY score DESC;
""")
query_params = {
'match_query': match_query}
else:
query = text("""
SELECT vid, dataset_vid, rank(matchinfo(partition_index)), from_year, to_year AS score
FROM partition_index""")
query_params = {}
return query, query_params
def _index_document(self, document, force=False):
""" Adds parition document to the index. """
from ambry.util import int_maybe
time_coverage = document.pop('time_coverage', [])
from_year = None
to_year = None
if time_coverage:
from_year = int_maybe(time_coverage[0])
to_year = int_maybe(time_coverage[-1])
query = text("""
INSERT INTO partition_index(vid, dataset_vid, title, keywords, doc, from_year, to_year)
VALUES(:vid, :dataset_vid, :title, :keywords, :doc, :from_year, :to_year); """)
self.backend.library.database.connection.execute(
query, from_year=from_year, to_year=to_year, **document)
def reset(self):
""" Drops index table. """
query = """
DROP TABLE partition_index;
"""
self.backend.library.database.connection.execute(query)
def _delete(self, vid=None):
""" Deletes partition with given vid from index.
Args:
vid (str): vid of the partition document to delete.
"""
query = text("""
DELETE FROM partition_index
WHERE vid = :vid;
""")
self.backend.library.database.connection.execute(query, vid=vid)
def is_indexed(self, partition):
""" Returns True if partition is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM partition_index
WHERE vid = :vid;
""")
result = self.backend.library.database.connection.execute(query, vid=partition.vid)
return bool(result.fetchall())
def all(self):
""" Returns list with vids of all indexed partitions. """
partitions = []
query = text("""
SELECT dataset_vid, vid
FROM partition_index;""")
for result in self.backend.library.database.connection.execute(query):
dataset_vid, vid = result
partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1))
return partitions
def _make_rank_func(weights):
def rank(matchinfo):
# matchinfo is defined as returning 32-bit unsigned integers
# in machine byte order
# http://www.sqlite.org/fts3.html#matchinfo
# and struct defaults to machine byte order
matchinfo = struct.unpack('I' * int(len(matchinfo) / 4), matchinfo)
it = iter(matchinfo[2:])
return sum(x[0] * w / x[1]
for x, w in zip(list(zip(it, it, it)), weights)
if x[1])
return rank
| CivicSpleen/ambry | ambry/library/search_backends/sqlite_backend.py | sqlite_backend.py | py | 20,150 | python | en | code | 5 | github-code | 36 |
42822681247 | from math import dist, inf
from typing import Optional
from random import random, choice
from aasd.vehicle import Vehicle, VehicleType
class Environment:
def __init__(self, width: int = 1280, height: int = 720, object_size: int = 10, chance_to_crash: float = 0.001):
self.width = width
self.height = height
self.obj_size: int = object_size
self.vehicles: list[Vehicle] = []
self.chance_to_crash: float = chance_to_crash
def register_vehicle(self, vehicle: Vehicle):
self.vehicles.append(vehicle)
def unregister_vehicle(self, vehicle: Vehicle):
self.vehicles.remove(vehicle)
def get_emergency_vehicles(self) -> list[Vehicle]:
return [
vehicle
for vehicle in self.vehicles
if vehicle.type is VehicleType.Emergency
]
def get_nearby_vehicles(self, caller: Vehicle, radius: float):
return [
vehicle
for vehicle in self.vehicles
if is_nearby(vehicle, caller, radius) and vehicle is not caller
]
def get_random_coordinates(self) -> tuple[float, float]:
x = random() * self.width + 5
y = random() * self.height + 5
if x > self.width:
x = float(self.width)
if y > self.height:
y = float(self.height)
return x, y
def move_vehicles(self):
for vehicle in self.vehicles:
vehicle.move(self.width - self.obj_size, self.height - self.obj_size)
def make_random_accident(self) -> str:
vehicle = choice(self.vehicles)
vehicle.type = VehicleType.Crashed
return vehicle.id
def get_vehicle(self, vehicle_id: str) -> Optional[Vehicle]:
for vehicle in self.vehicles:
if vehicle.id == vehicle_id:
return vehicle
else:
return None
def get_closest_emergency_vehicle(self, x: float, y: float):
emergency_vehicles = [v for v in self.vehicles if v.type is VehicleType.Emergency]
closest_ev = None
lowest_distance = inf
for ev in emergency_vehicles:
distance = dist(ev.get_coordinates(), (x, y))
if distance < lowest_distance:
lowest_distance = distance
closest_ev = ev
return closest_ev
def are_vehicles_nearby(self, id1: str, id2: str, radius: float) -> bool:
return is_nearby(self.get_vehicle(id1), self.get_vehicle(id2), radius)
def is_nearby(vehicle1: Optional[Vehicle], vehicle2: Optional[Vehicle], radius: float) -> bool:
if vehicle1 is None or vehicle2 is None:
return False
return dist(vehicle1.get_coordinates(), vehicle2.get_coordinates()) <= radius
| Pruxon/AASD | aasd/environment.py | environment.py | py | 2,733 | python | en | code | 0 | github-code | 36 |
36173205593 | import random
import json
import znc
class slapanswer(znc.Module):
description = 'Answer slaps'
module_types = [znc.CModInfo.NetworkModule]
def OnLoad(self, args, message):
self.default_answers = [
'"Be kind whenever possible. It is always possible." - Dalai Lama',
'"Where ignorance is our master, there is no possibility of real'
' peace." - Dalai Lama',
'"We can never obtain peace in the outer world until we make peace'
' with ourselves." - Dalai Lama',
'"An eye for an eye will only make the whole world blind."'
' - Mahatma Gandhi',
'"The best fighter is never angry" - Lao Tzu',
'"Peace cannot be achieved through violence, it can only be'
' attained through understanding." - Ralph Waldo Emerson',
'"Silence is sometimes the best answer" - Dalai Lama',
]
if 'answers' in self.nv:
self.ANSWERS = json.loads(self.nv['answers'])
else:
self.ANSWERS = self.default_answers
self.save_answers()
return True
def OnModCommand(self, cmd):
split = cmd.split()
command = str(split[0]).lower()
args = [a.lower() for a in split[1:]]
if command == 'help':
self.command_help()
elif command == 'add':
self.command_add(args)
elif command == 'remove':
self.command_remove(args)
elif command == 'reset':
self.command_reset()
elif command == 'list':
self.command_list()
def save_answers(self):
self.nv['answers'] = json.dumps(self.ANSWERS)
def command_help(self):
self.PutModule('\n'.join([
'add <msg> | add a msg (replace nick with {nick})',
'remove <id> | remove msg with id <id> (get id\'s with "list")',
'reset | reset msgs to default',
'list | get a list with msgs'
]))
return True
def command_add(self, args):
msg = ' '.join(args)
if '\n' in msg:
self.PutModule('ERROR: Line-Breaks are not allowed in answers!')
return True
self.ANSWERS.append(msg)
self.save_answers()
self.PutModule('Successfully added answer!')
return True
def command_remove(self, args):
try:
answer_id = int(args[0])
except ValueError:
self.PutModule('ERROR: Invalid ID!')
if answer_id < len(self.ANSWERS) and answer_id >= 0:
del self.ANSWERS[answer_id]
self.save_answers()
self.PutModule('Successfully removed answer!')
else:
self.PutModule(
'ERROR: Invalid ID! Try "list" for a list of id\'s!'
)
return True
def command_reset(self):
self.ANSWERS = self.default_answers
self.save_answers()
self.PutModule('Successfully reset answers!')
return True
def command_list(self):
for index, value in enumerate(self.ANSWERS):
self.PutModule('{} | {}'.format(index, value))
return True
def OnChanAction(self, invoker, channel, message):
own_nick = self.GetNetwork().GetIRCNick().GetNick()
own_host = self.GetNetwork().GetIRCNick().GetHostMask()
nick = invoker.GetNick()
channel = channel.GetName()
msg = str(message)
if 'slap' in msg and own_nick in msg:
self.answer_slap(channel, nick, own_host)
return znc.CONTINUE
def answer_slap(self, channel, nick, own_host):
msg = random.choice(self.ANSWERS)
if '{nick}' in msg:
msg = msg.format(nick=nick)
msg = 'PRIVMSG {channel} :{msg}'.format(channel=channel, msg=msg)
self.GetNetwork().PutIRC(msg)
self.GetNetwork().PutUser(':{own_host} {msg}'.format(
own_host=own_host, msg=msg))
| Thor77/SlapAnswer | slapanswer.py | slapanswer.py | py | 3,958 | python | en | code | 4 | github-code | 36 |
32480112663 | import sklearn
import os
import numpy as np
import matplotlib.pyplot as plt
import timeit
current_dir = os.getcwd()
from tensorflow.keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data(path=current_dir + '/mnist.npz')
# FLATTING TRAIN DATA
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
# ONE HOT ENCODING
nb_classes = 10
targets1 = np.array(Y_train).reshape(-1)
Y_train = np.eye(nb_classes)[targets1]
targets2 = np.array(Y_test).reshape(-1)
Y_test = np.eye(nb_classes)[targets2]
# DATASET SHAPE
print('MNIST Dataset Shape:')
print('X_train: ' + str(X_train.shape))
print('Y_train: ' + str(Y_train.shape))
print('X_test: ' + str(X_test.shape))
print('Y_test: ' + str(Y_test.shape))
# SOFTMAX ACTIVATION FUNCTION
def activation_function(z):
"""returns y hat"""
a = np.transpose(np.transpose(np.exp(z)) / (np.sum(np.exp(z), axis=1)))
return a
def loss_function(y, y_hat):
L = -1 * np.sum(np.matmul(np.transpose(y), np.log(y_hat)))
return L
def gradient_descent(y, y_hat, x):
grad = np.transpose(np.dot(np.transpose(x), (y - y_hat))) / y.size
# grad = (y - y_hat)*0.001
# print(grad[2])
return grad
# INIT PARAMS
W = np.zeros((10, 784))
b = np.zeros(10)
y_hat = np.zeros(10)
learningRate = 0.001
# TRAIN
start = timeit.default_timer()
for i in range(50):
for j in range(100):
X_train_batch = X_train[(j * 600 + 0):(j + 1) * 600, ]
Y_train_batch = Y_train[(j * 600 + 0):(j + 1) * 600, ]
Z = np.dot(X_train_batch, W.transpose())
y_hat = activation_function(Z)
step = gradient_descent(Y_train_batch, y_hat, X_train_batch)
W = W + learningRate * step
stop = timeit.default_timer()
# EVALUATE
Z = np.dot(X_test, W.transpose())
y_hat = activation_function(Z)
y_pred = y_hat.argmax(axis=1)
true_labels = Y_test.argmax(axis=1)
correct = 0
total = 10000
for i in range(total):
if y_pred[i] == true_labels[i]: # index
correct += 1
accuracy = (correct / total)
print("ACCURANCY OF SOFTMAX TRAINING IS: ", accuracy)
print('Time: ', stop - start)
| solmvz/MNIST-LogisticRegression | HW2_softmax.py | HW2_softmax.py | py | 2,198 | python | en | code | 4 | github-code | 36 |
73694485864 | import requests
from bs4 import BeautifulSoup
from bs4.element import ResultSet
import json
from telprefix.path import JSON_DATA_PATH
def getHTMLText(result: ResultSet | None) -> str:
if result is not None:
result = result.text.strip()
return result
# URL Artikel
# Sumber: https://www.pinhome.id
URL = "https://www.pinhome.id/blog/kode-nomor-prefix/"
class TelPrefixScrap():
def __init__(self) -> None:
self.data = {}
self.URL = URL
def request(self) -> BeautifulSoup:
req = requests.get(URL)
reqParse = BeautifulSoup(req.text, "html.parser")
return reqParse
def parse(self):
reqParse = self.request()
tables = reqParse.find_all("table")
currentTable = 0
for index, table in enumerate(tables):
tableRow = table.find_all("tr")
for row in tableRow[1:]:
# Extract table rows data
prefix = row.find_all("td")[0]
tableRowJudul = tableRow[0]
if( len(tableRowJudul.find_all("td")) == 4 ):
jenis = row.find_all("td")[1]
keterangan = row.find_all("td")[2]
provider = row.find_all("td")[3]
else:
jenis = None
keterangan = row.find_all("td")[1]
provider = row.find_all("td")[2]
# Get & strip text
prefix = getHTMLText(prefix)
provider = getHTMLText(provider)
jenis = getHTMLText(jenis)
keterangan = getHTMLText(keterangan)
# Tidy up
if provider == "":
if index == currentTable:
provider = self.data[list(self.data.keys())[-1]]["provider"]
else:
provider = None
if jenis is not None:
if "atau" in jenis:
jenis = jenis.split(" atau ")
elif "dan" in jenis:
jenis = jenis.split(" dan ")
self.data[prefix] = {
"provider": provider,
"jenis": jenis,
"keterangan": keterangan
}
currentTable += 1
return self.data
def save(self):
with open(JSON_DATA_PATH, "w") as file:
json.dump(self.data, file, indent=4)
def scrap(self):
parse = self.parse()
save = self.save()
return parse
| manoedinata/telprefix | telprefix/scrap.py | scrap.py | py | 2,569 | python | en | code | 0 | github-code | 36 |
32673542433 | import os
import urllib.request
WEIGHTS_URL = 'https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580d880c_bvlc-alexnet/bvlc-alexnet.npy'
TRAINING_URL = 'https://d17h27t6h515a5.cloudfront.net/topher/2016/October/580a829f_train/train.p'
def download(url, filename):
print('Downloading {}...'.format(filename))
urllib.request.urlretrieve(url, filename)
filestat = os.stat(filename)
size = filestat.st_size
print('Successfully downloaded {} ({} bytes)'.format(filename, size))
return filename
def get_data():
train_file = 'train.p'
weights_file = 'bvlc-alexnet.npy'
if not os.path.isfile(train_file):
download(TRAINING_URL, train_file)
if not os.path.isfile(weights_file):
download(WEIGHTS_URL, weights_file) | marcomarasca/SDCND-Feature-Extraction | get_data.py | get_data.py | py | 777 | python | en | code | 0 | github-code | 36 |
6240147202 | #
# SAKARYA ÜNİVERSİTESİ BİLGİSAYAR VE BİLİŞİM BİLİMLERİ FAKÜLTESİ
# BİLGİSAYAR MÜHENDİSLİĞİ BÖLÜMÜ
# BİLGİSAYAR MÜHENDİSLİĞİ TASARIMI - 2. ÖĞRETİM P GRUBU
# EDA NUR KARAMUK - G181210061 & ELİF RUMEYSA AYDIN - G181210031
#
#
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QImage, QPixmap
import cv2, imutils
import sqlite3 as sql
import os
os.system('python Connection.py')
os.system('python CreateTable.py')
from PlateRecognitionAlgorithm import plateRecognize
from PlateRecords import Ui_SecondWindow
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1065, 594)
MainWindow.setAutoFillBackground(False)
MainWindow.setStyleSheet("background-color: rgb(226, 226, 226);")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Icon/car.png"), QtGui.QIcon.Selected, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.plate_result = QtWidgets.QGroupBox(self.centralwidget)
self.plate_result.setGeometry(QtCore.QRect(550, 10, 481, 551))
font = QtGui.QFont()
font.setPointSize(12)
self.plate_result.setFont(font)
self.plate_result.setStyleSheet("background-color: rgb(235, 235, 235);")
self.plate_result.setObjectName("plate_result")
self.label_PlateResult = QtWidgets.QLabel(self.plate_result)
self.label_PlateResult.setGeometry(QtCore.QRect(10, 40, 461, 351))
self.label_PlateResult.setStyleSheet("background-color: rgb(200, 200, 200);")
self.label_PlateResult.setText("")
self.label_PlateResult.setAlignment(QtCore.Qt.AlignCenter)
self.label_PlateResult.setObjectName("label_PlateResult")
self.textPlateResult = QtWidgets.QTextEdit(self.plate_result)
self.textPlateResult.setGeometry(QtCore.QRect(10, 400, 271, 61))
font = QtGui.QFont()
font.setPointSize(16)
self.textPlateResult.setFont(font)
self.textPlateResult.setStyleSheet("background-color: rgb(255, 255, 255);")
self.textPlateResult.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textPlateResult.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.textPlateResult.setReadOnly(True)
self.textPlateResult.setObjectName("textPlateResult")
self.textEdit = QtWidgets.QTextEdit(self.plate_result)
self.textEdit.setGeometry(QtCore.QRect(10, 480, 271, 51))
font = QtGui.QFont()
font.setPointSize(10)
self.textEdit.setFont(font)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.btnCheckPlateNumber = QtWidgets.QPushButton(self.plate_result)
self.btnCheckPlateNumber.setGeometry(QtCore.QRect(290, 400, 181, 61))
font = QtGui.QFont()
font.setPointSize(11)
self.btnCheckPlateNumber.setFont(font)
self.btnCheckPlateNumber.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.btnCheckPlateNumber.setIconSize(QtCore.QSize(30, 30))
self.btnCheckPlateNumber.setObjectName("btnCheckPlateNumber")
self.btnShowPlateRecords = QtWidgets.QPushButton(self.plate_result)
self.btnShowPlateRecords.setGeometry(QtCore.QRect(290, 480, 181, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.btnShowPlateRecords.setFont(font)
self.btnShowPlateRecords.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.btnShowPlateRecords.setIconSize(QtCore.QSize(30, 30))
self.btnShowPlateRecords.setObjectName("btnShowPlateRecords")
self.vehicle_plate = QtWidgets.QGroupBox(self.centralwidget)
self.vehicle_plate.setGeometry(QtCore.QRect(30, 10, 481, 551))
font = QtGui.QFont()
font.setPointSize(12)
self.vehicle_plate.setFont(font)
self.vehicle_plate.setStyleSheet("background-color: rgb(235, 235, 235);")
self.vehicle_plate.setObjectName("vehicle_plate")
self.startPlateRecognition = QtWidgets.QPushButton(self.vehicle_plate)
self.startPlateRecognition.setGeometry(QtCore.QRect(250, 430, 211, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.startPlateRecognition.setFont(font)
self.startPlateRecognition.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.startPlateRecognition.setIconSize(QtCore.QSize(30, 30))
self.startPlateRecognition.setObjectName("startPlateRecognition")
self.labelVehicle = QtWidgets.QLabel(self.vehicle_plate)
self.labelVehicle.setGeometry(QtCore.QRect(10, 30, 461, 351))
self.labelVehicle.setStyleSheet("background-color: rgb(200, 200, 200);")
self.labelVehicle.setText("")
self.labelVehicle.setAlignment(QtCore.Qt.AlignCenter)
self.labelVehicle.setObjectName("labelVehicle")
self.openImageFile = QtWidgets.QPushButton(self.vehicle_plate)
self.openImageFile.setGeometry(QtCore.QRect(20, 430, 211, 51))
font = QtGui.QFont()
font.setPointSize(11)
self.openImageFile.setFont(font)
self.openImageFile.setStyleSheet("background-color: rgb(79, 79, 79);\n"
"color: rgb(255, 255, 255);")
self.openImageFile.setIconSize(QtCore.QSize(30, 30))
self.openImageFile.setObjectName("openImageFile")
self.vehicle_plate.raise_()
self.plate_result.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.openImageFile.clicked.connect(self.loadImage)
self.startPlateRecognition.clicked.connect(self.showPlateRecognition)
self.btnShowPlateRecords.clicked.connect(self.openWindow)
self.btnCheckPlateNumber.clicked.connect(self.checkPlateNumber)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
imagePath = ""
def loadImage(self):
""" Bilgisayardan resim seçilmesini sağlayan fonksiyon.
"""
self.filename = QFileDialog.getOpenFileName(filter="Image (*.png *.xmp *.jpg *.jpeg *.webp)")[0]
self.image = cv2.imread(self.filename)
self.setPhoto(self.image)
global imagePath
imagePath = self.image
def setPhoto(self, image):
""" Label bileşeninde resmin yeniden boyutlandırıp gösterilmesini sağlayan fonksiyon.
"""
self.tmp = image
image = imutils.resize(image, width=500)
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.labelVehicle.setPixmap(QtGui.QPixmap.fromImage(image))
# Plaka Okuma butonuna tıklandığında çalışan fonksiyon.
def showPlateRecognition(self):
self.writePlate(imagePath)
self.showPlateImage(imagePath)
# Plaka Okuma algoritmasında resmin sonuç label üzerinde gösterilmesini sağlayan fonksiyon.
def showPlateImage(self, image):
self.tmp = image
txt, krp = plateRecognize(image)
krp = imutils.resize(krp, width=350)
frame = cv2.cvtColor(krp, cv2.COLOR_BGR2RGB)
krp = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.label_PlateResult.setPixmap(QtGui.QPixmap.fromImage(krp))
# Plaka Okuma algoritmasında palaka metninin sonuç textBox'ın üzerinde gösterilmesini sağlayan fonksiyon.
def writePlate(self, image):
txt, krp = plateRecognize(image)
self.textPlateResult.setText(txt)
def openWindow(self):
self.window = QtWidgets.QMainWindow()
self.plateResult = self.textPlateResult.toPlainText()
self.ui = Ui_SecondWindow(self.plateResult)
self.ui.setupUi(self.window)
self.window.show()
def checkPlateNumber(self):
plateNumber = self.textPlateResult.toPlainText()
self.conn = sql.connect("Database/PlateRecognition.db")
self.c = self.conn.cursor()
self.c.execute("SELECT * FROM PlateNumberInformations WHERE plate_number = ?",(plateNumber,))
data = self.c.fetchall()
if len(data) == 0:
self.textEdit.setText("Araç kayıtlı değil.")
else:
self.textEdit.setText("Araç kayıtlı.")
self.conn.commit()
self.c.close()
self.conn.close()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Plaka Tanıma Sistemi"))
self.plate_result.setTitle(_translate("MainWindow", "Plaka Sonucu"))
self.btnCheckPlateNumber.setText(_translate("MainWindow", "Plakayı Kontrol Et"))
self.btnShowPlateRecords.setText(_translate("MainWindow", "Plaka Kayıtlarını Göster"))
self.vehicle_plate.setTitle(_translate("MainWindow", "Araç/Plaka Görseli"))
self.startPlateRecognition.setText(_translate("MainWindow", "Plaka Okuma Başlat"))
self.openImageFile.setText(_translate("MainWindow", "Resim Dosyası Seç"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| EdaNurKaramuk/PlakaTanimaSistemi | CarPlateRecognitionSystem.py | CarPlateRecognitionSystem.py | py | 9,785 | python | en | code | 7 | github-code | 36 |
10070249997 | from django.shortcuts import render
from.models import friends
# Create your views here.
def showindex(request):
id=request.GET.get("update_id")
if id==None:
res=friends.objects.all()
return render(request,"index.html",{"res":res})
else:
id1=friends.objects.filter(entry=id).update()
print(id1)
return render(request,"index.html",{"id":id})
def displaydetails(request):
entry= request.POST.get("eno")
date= request.POST.get("date")
amount= request.POST.get("amt")
members= request.POST.getlist("t1")
i=(", ".join(members))
t=len(members)
t1=int(amount)/t
fr=friends(entry,date,amount,i,t1)
fr.save()
res=friends.objects.all()
d1={"msg":"datasaved"}
return render(request,"index.html",{"res":res})
def deletedetails(request):
id=request.POST.get("delete_id")
friends.objects.filter(entry=id).delete()
res=friends.objects.all()
return render(request,"index.html",{"res":res}) | prasadnaidu1/django | sisco1/app1/views.py | views.py | py | 993 | python | en | code | 0 | github-code | 36 |
6097122691 | class Solution:
def longestOnes(self, nums: List[int], k: int) -> int:
start,end,maxOnes,noZeros=0,0,0,0
for end in range(len(nums)):
if nums[end]==0:
noZeros+=1
while noZeros >k:
if nums[start]==0:
noZeros-=1
start+=1
maxOnes=max(maxOnes,end-start+1)
return maxOnes
| abeni505/Comp-programming | Max-Consecutive-Ones-III.py | Max-Consecutive-Ones-III.py | py | 412 | python | en | code | 2 | github-code | 36 |
12829799852 | import pygame as pg
WIDTH = 700
HEIGHT = 500
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255,0)
BLUE = (0, 0, 255)
up_key = False
down_key = False
left_key = False
right_key = False
CAR_WIDTH = 30
CAR_HEIGHT = 30
car_x = WIDTH / 2
car_y = HEIGHT / 2
car_x_vel = 0
car_y_vel = 0
max_vel = 10
friction = 0.9
power = 4
pg.init()
pg.display.set_caption('Platformer 1')
screen = pg.display.set_mode([WIDTH, HEIGHT])
running = True
while running == True:
for e in pg.event.get():
if e.type == pg.QUIT:
running = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_LEFT:
left_key = True
elif e.key == pg.K_RIGHT:
right_key = True
elif e.key == pg.K_DOWN:
down_key = True
elif e.key == pg.K_UP:
up_key = True
elif e.type == pg.KEYUP:
if e.key == pg.K_LEFT:
left_key = False
elif e.key == pg.K_RIGHT:
right_key = False
elif e.key == pg.K_DOWN:
down_key = False
elif e.key == pg.K_UP:
up_key = False
# print(up_key, '\t', down_key, '\t', left_key, '\t', right_key)
# After processing events:
# Handle x velocity
if left_key and not right_key:
car_x_vel -= power
elif not left_key and right_key:
car_x_vel += power
else:
car_x_vel = car_x_vel * friction
# Limit x velocity
if car_x_vel > max_vel:
car_x_vel = max_vel
elif car_x_vel < -max_vel:
car_x_vel = -max_vel
# Handle y velocity
if up_key and not down_key:
car_y_vel -= power
elif not up_key and down_key:
car_y_vel += power
else:
car_y_vel = car_y_vel * friction
# Limit y velocity
if car_y_vel > max_vel:
car_y_vel = max_vel
elif car_y_vel < -max_vel:
car_y_vel = -max_vel
# Change position by velocity
car_x += car_x_vel
car_y += car_y_vel
# Warp car over x axis
if car_x + (CAR_WIDTH/2) < 0:
car_x = WIDTH - car_x
elif car_x - (CAR_WIDTH/2) > WIDTH:
car_x = car_x - WIDTH - CAR_WIDTH
# Warp car over y axis
if car_y + (CAR_HEIGHT/2) < 0:
car_y = HEIGHT - car_y
elif car_y - (CAR_HEIGHT/2) > HEIGHT:
car_y = car_y - HEIGHT - CAR_HEIGHT
# Render car
screen.fill(WHITE)
pg.draw.rect(screen, BLUE, (int(car_x-(CAR_WIDTH/2)), int(car_y-(CAR_HEIGHT/2)), CAR_WIDTH, CAR_HEIGHT))
pg.display.update()
pg.quit()
| oscarsangwin/pygame-platformer | car01.py | car01.py | py | 2,614 | python | en | code | 0 | github-code | 36 |
34360597607 | import audio_edit_utils
import vid_utils
# "C:\Program Files\VideoLAN\VLC\vlc.exe" --start-time=35 "C:\Users\Brandon\AppData\Roaming\I2P\i2psnark\Rick.and.Morty.S04E03.720p.WEBRip.x264-TBS[rarbg]\rick.and.morty.s04e03.720p.webrip.x264-tbs.mkv"
# "C:\Program Files\VideoLAN\VLC\vlc.exe" --start-time=150 "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\transfer_dir\\Atonement_(720).m4v"
# import speech_recognition as sr
# from os import path
# from pydub import AudioSegment
# if not isinstance(actual_result, dict) or len(actual_result.get("alternative", [])) == 0: raise UnknownValueError()
# speech_recognition.UnknownValueError
import time
# VID_FILE_PATH = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\dolbycanyon_outside.mkv"
VID_FILE_PATH = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\transfer_dir\\Atonement_(720).m4v"
# VID_FILE_PATH = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\Screen.mkv"
FULL_AUDIO_PATH = '..\\my_movie_tools_big_data\\full_audio.wav'
CLIPED_AUDIO_PATH = '..\\my_movie_tools_big_data\\clipped_audio.wav'
AUDIO_CLIP_TIME = 10 # number of seconds to divide audio into to check for speech
# returns # of seconds after start_time before the first time someone speaks in the audio (.wav file)
# returns False if there is no speech in audio clip
def get_first_speech_time(full_audio_path, start_time):
audio_duration = audio_edit_utils.get_audio_duration(full_audio_path)
cur_time = start_time
keep_looping = True
while(keep_looping):
# get end time for clipping audio
end_time = cur_time + AUDIO_CLIP_TIME
if end_time > audio_duration:
end_time = audio_duration
keep_looping = False
# clip audio
audio_edit_utils.clip_audio(full_audio_path, CLIPED_AUDIO_PATH, cur_time, end_time)
# transcribe audio
transcription = audio_edit_utils.transcribe_audio(CLIPED_AUDIO_PATH)
# return time if speech was found in audio clip
if transcription != False:
return cur_time
# set up for next loop if no speech was found in audio clip
cur_time = end_time
return False
# extract audio from vid
audio_edit_utils.get_audio_from_video(VID_FILE_PATH, FULL_AUDIO_PATH)
first_speech_time = get_first_speech_time(FULL_AUDIO_PATH, 0)
print(first_speech_time)
#
#
#
# # # convert mp3 file to wav
# sound = AudioSegment.from_mp3("C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\ste-021-dilan-herkunft.mp3")
# sound.export("transcript.wav", format="wav")
#
#
# # transcribe audio file
# AUDIO_FILE = "transcript.wav"
# # AUDIO_FILE = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\Screen_audio.wav"
# # AUDIO_FILE = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\youre-so-funny-1.wav"
# # AUDIO_FILE = "C:\\Users\\Brandon\\Documents\\Personal_Projects\\my_movie_tools_big_data\\file_example_WAV_1MG.wav"
#
# s = time.time()
#
#
# # use the audio file as the audio source
# r = sr.Recognizer()
# with sr.AudioFile(AUDIO_FILE) as source:
# audio = r.record(source) # read the entire audio file
#
# print("Transcription: " + r.recognize_google(audio))
#
# print(time.time() - s)
#
# # ffmpeg -i "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_audio.wav" -ss 10 -to 15 -c copy "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_audio_10-15.wav"
# #
# #
# #
# # ffmpeg -i "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_trimmed.mp4" -acodec pcm_s16le -ac 2 "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_trimmed.wav"
# # # ffmpeg -i "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen.mkv" -acodec pcm_s16le -ac 2 "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_audio.wav"
# # ffmpeg -i "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_trimmed.wav" 2>&1 | grep Duration | sed 's/Duration: \(.*\), start/\1/g'
# # ffprobe -i "C:\Users\Brandon\Documents\Personal_Projects\my_movie_tools_big_data\Screen_trimmed.wav" -show_entries format=duration -v quiet -of csv="p=0"
| Brandon-Valley/my_movie_tools | find_first_speach.py | find_first_speach.py | py | 4,625 | python | en | code | 0 | github-code | 36 |
3496632276 | # importing pycairo
import cairo
# creating a SVG surface
# here geek95 is file name & 700, 700 is dimension
with cairo.SVGSurface("geek95.svg", 700, 700) as surface:
# creating a cairo context object for SVG surface
# using Context method
context = cairo.Context(surface)
# move the context to x,y position
context.move_to(50, 200)
# Drawing Curve
context.curve_to(150, 75, 225, 50, 350, 150)
# setting color of the context
context.set_source_rgb(1, 0, 0)
# setting width of the context
context.set_line_width(4)
# stroke out the color and width property
context.stroke()
# printing message when file is saved
print("File Saved")
# ---------------------------------------------------------------------------------------
import cv2
import numpy as np
def gammaCorrection(src, gamma):
invGamma = 1 / gamma
table = [((i / 255) ** invGamma) * 255 for i in range(256)]
table = np.array(table, np.uint8)
return cv2.LUT(src, table)
img = cv2.imread('image.jpg')
gammaImg = gammaCorrection(img, 2.2)
cv2.imshow('Original image', img)
cv2.imshow('Gamma corrected image', gammaImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -----------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from skimage import color, data, restoration
rng = np.random.default_rng()
astro = color.rgb2gray(data.astronaut())
from scipy.signal import convolve2d as conv2
psf = np.ones((5, 5)) / 25
astro = conv2(astro, psf, 'same')
astro += 0.1 * astro.std() * rng.standard_normal(astro.shape)
deconvolved, _ = restoration.unsupervised_wiener(astro, psf)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 5),
sharex=True, sharey=True)
plt.gray()
ax[0].imshow(astro, vmin=deconvolved.min(), vmax=deconvolved.max())
ax[0].axis('off')
ax[0].set_title('Data')
ax[1].imshow(deconvolved)
ax[1].axis('off')
ax[1].set_title('Self tuned restoration')
fig.tight_layout()
plt.show()
# ----------------------------------------------------------------------------------
import cv2
import numpy as np
# Read source image.
im_src = cv2.imread('image1.png')
# Four corners of the book in source image
pts_src = np.array([[141, 131], [480, 159], [493, 630], [64, 601]])
# Read destination image.
im_dst = cv2.imread('image.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[318, 256], [534, 372], [316, 670], [73, 473]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1], im_dst.shape[0]))
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.waitKey(0)
#---------------------------------------------------------------------------
import cv2
import numpy as np
image = cv2.imread('image.jpg')
#Apply identity kernel
kernel1 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
# filter2D() function can be used to apply kernel to an image.
# Where ddepth is the desired depth of final image. ddepth is -1 if...
# ... depth is same as original or source image.
identity = cv2.filter2D(src=image, ddepth=-1, kernel=kernel1)
# We should get the same image
cv2.imshow('Original', image)
cv2.imshow('Identity', identity)
cv2.waitKey()
cv2.imwrite('identity.jpg', identity)
cv2.destroyAllWindows()
# ------------------------------------------------------------
import cv2
import numpy as np
image = cv2.imread('image.jpg')
#Apply blurring kernel
kernel2 = np.ones((5, 5), np.float32) / 25
img = cv2.filter2D(src=image, ddepth=-1, kernel=kernel2)
cv2.imshow('Original', image)
cv2.imshow('Kernel Blur', img)
cv2.waitKey()
cv2.imwrite('blur_kernel.jpg', img)
cv2.destroyAllWindows()
| Malgetany/ch2-part2 | main.py | main.py | py | 3,947 | python | en | code | 0 | github-code | 36 |
42629364234 | #!/usr/bin/env python
# coding=utf-8
import torch
import torchvision.models as models
#resnet169 = models.densenet169(pretrained=True).cuda()
inception_v3 = models.inception_v3(pretrained=True).cuda()
dummy_input = torch.randn(1, 3, 224, 224, device='cuda')
input_names = ['data']
output_names = ['outputs']
torch.onnx.export(inception_v3, dummy_input, f='inception_v3.onnx', verbose=True, input_names=input_names,
output_names=output_names, opset_version=10) # generate onnx model of 244M
| YixinSong-e/onnx-tvm | torchmodel/torch_model.py | torch_model.py | py | 528 | python | en | code | 0 | github-code | 36 |
32402600311 | import json
from falcon.status_codes import HTTP_404, HTTP_400
from marshmallow import ValidationError
from utils.HTTPError import HTTPError
class Serializer:
"""
This middleware gives us a possibility to validate data from request body.
It also allows to set a separate schema (validator) for every HTTP method.
At the end, it sets serializer data in context so we can read the data in API
endpoint (if needed). If the data is not correct, API returns HTTP 400 error
with validation message returned by marshmallow.
"""
def process_resource(self, req, _resp, resource, _params):
body = json.load(req.bounded_stream)
try:
serializer = resource.serializers[req.method.lower()]
except (AttributeError, IndexError, KeyError):
raise HTTPError(status=HTTP_404)
else:
try:
req.context["serializer"] = serializer().load(data=body)
except ValidationError as err:
raise HTTPError(error="Bad Request", status=HTTP_400, field_errors=err.messages)
| NomanGul/kanda-fullstack-test | server/middlewares/serializer.py | serializer.py | py | 1,084 | python | en | code | 3 | github-code | 36 |
32065556859 | import fileIO
import view
def menu(data):
while True:
answer = view.show_menu()
if answer == 1:
view.show_data(data)
elif answer == 2:
# fileIO.add_data(data)
str_data = input('Enter your data delimited by tab> ')
row = str_data.split('\t')
data.append(row)
elif answer == 3:
str_del = input('Ente ID> ')
for i in range(len(data)):
if data[i][0] == str_del:
del data[i]
elif answer == 4:
fileIO.write_data('data.csv', data)
elif answer == 5:
print('bye')
break
else:
print('Wrong number') | DanisYuma/Introduction-to-Python | Introdution/Homeworks/HW8/UI.py | UI.py | py | 718 | python | en | code | 0 | github-code | 36 |
506670450 | """
Merge, combine and mosaic
"""
def rsts_to_mosaic(inRasterS, o, api="grass", fformat='.tif', method=None):
"""
Create Mosaic of Raster
"""
if api == 'pygrass':
"""
The GRASS program r.patch allows the user to build a new raster map the size
and resolution of the current region by assigning known data values from
input raster maps to the cells in this region. This is done by filling in
"no data" cells, those that do not yet contain data, contain NULL data, or,
optionally contain 0 data, with the data from the first input map.
Once this is done the remaining holes are filled in by the next input map,
and so on. This program is useful for making a composite raster map layer
from two or more adjacent map layers, for filling in "holes" in a raster map
layer's data (e.g., in digital elevation data), or for updating an older map
layer with more recent data. The current geographic region definition and
mask settings are respected.
The first name listed in the string input=name,name,name, ... is the name of
the first map whose data values will be used to fill in "no data" cells in
the current region. The second through last input name maps will be used,
in order, to supply data values for for the remaining "no data" cells.
"""
from grass.pygrass.modules import Module
m = Module(
"r.patch", input=inRasterS, output=o,
overwrite=True, run_=False, quiet=True
)
m()
elif api == 'grass':
from glass.pys import execmd
rcmd = execmd((
f"r.patch input={','.join(inRasterS)} output={o} "
"--overwrite --quiet"
))
elif api == 'rasterio':
import rasterio
from rasterio.merge import merge
from glass.prop.df import drv_name
from glass.prop.prj import get_epsg, epsg_to_wkt
if type(inRasterS) != list:
from glass.pys.oss import lst_ff
rsts = lst_ff(inRasterS, file_format=fformat)
else: rsts = inRasterS
methods = ['first', 'last', 'min', 'max']
method = 'first' if not method or \
method not in methods else method
srcs = [rasterio.open(r) for r in rsts]
mosaic, out_trans = merge(srcs, method=method)
out_meta = srcs[0].meta.copy()
out_meta.update({
"driver" : drv_name(o),
"height" : mosaic.shape[1],
"width" : mosaic.shape[2],
"transform" : out_trans,
"count" : 1,
"crs" : epsg_to_wkt(get_epsg(rsts[0])),
"compress" : 'lzw'
})
with rasterio.open(o, "w", **out_meta) as dest:
dest.write(mosaic)
else:
raise ValueError(f'api {api} is not available')
return o
def rseries(lst, out, meth, as_cmd=None):
"""
r.series - Makes each output cell value a function of the values
assigned to the corresponding cells in the input raster map layers.
Method Options:
average, count, median, mode, minimum, min_raster, maximum,
max_raster, stddev, range, sum, variance, diversity,
slope, offset, detcoeff, tvalue, quart1, quart3, perc90,
quantile, skewness, kurtosis
"""
if type(lst) != list:
raise ValueError("lst must be a list of rasters")
if not as_cmd:
from grass.pygrass.modules import Module
serie = Module(
'r.series', input=lst, output=out, method=meth,
overwrite=True, quiet=True, run_=False
)
serie()
else:
from glass.pys import execmd
ilst = ",".join(lst)
rcmd = execmd((
f"r.series input={ilst} output={out} "
f"method={meth} "
"--overwrite --quiet"
))
return out
def fullgrass_rseries(ifolder, refrst, method, orst):
"""
R. Series using grass
"""
import os
from glass.wenv.grs import run_grass
from glass.pys.tm import now_as_str
from glass.pys.oss import lst_ff, fprop
loc = now_as_str()
gbase = run_grass(ifolder, location=loc, srs=refrst)
import grass.script.setup as gsetup
gsetup.init(gbase, ifolder, loc, "PERMANENT")
from glass.it.rst import rst_to_grs, grs_to_rst
rsts = [rst_to_grs(
r, fprop(r, 'fn')
) for r in lst_ff(ifolder, file_format='.tif')]
prst = rseries(rsts, fprop(orst, 'fn'), method, as_cmd=True)
grs_to_rst(prst, orst)
return orst
def bnds_to_mosaic(bands, outdata, ref_raster, loc=None):
"""
Satellite image To mosaic
bands = {
'bnd_2' : [path_to_file, path_to_file],
'bnd_3' : [path_to_file, path_to_file],
'bnd_4' : [path_to_file, path_to_file],
}
"""
"""
Start GRASS GIS Session
"""
import os
from glass.pys.oss import fprop
from glass.prop.prj import rst_epsg
from glass.wenv.grs import run_grass
# Get EPSG from refRaster
epsg = rst_epsg(ref_raster, returnIsProj=None)
LOC = loc if loc else 'gr_loc'
grass_base = run_grass(
outdata, grassBIN='grass78',
location=LOC, srs=epsg
)
import grass.script.setup as gsetup
gsetup.init(grass_base, outdata, LOC, 'PERMANENT')
# ************************************************************************ #
# GRASS MODULES #
# ************************************************************************ #
from glass.it.rst import rst_to_grs, grs_to_rst
from glass.wenv.grs import rst_to_region
# ************************************************************************ #
# SET GRASS GIS LOCATION EXTENT #
# ************************************************************************ #
extRst = rst_to_grs(ref_raster, 'extent_raster')
rst_to_region(extRst)
# ************************************************************************ #
# SEND DATA TO GRASS GIS #
# ************************************************************************ #
grs_bnds = {}
for bnd in bands:
l= []
for b in bands[bnd]:
bb = rst_to_grs(b, fprop(b, 'fn'))
l.append(bb)
grs_bnds[bnd] = l
# ************************************************************************ #
# PATCH bands and export #
# ************************************************************************ #
for bnd in grs_bnds:
mosaic_band = rseries(grs_bnds[bnd], bnd, 'maximum')
grs_bnds[bnd] = grs_to_rst(mosaic_band, os.path.join(
outdata, mosaic_band + '.tif'
), as_cmd=True)
return grs_bnds
| jasp382/glass | glass/rst/mos.py | mos.py | py | 6,840 | python | en | code | 2 | github-code | 36 |
11424826236 | import logging
from edera.exceptions import ExcusableError
from edera.exceptions import ExcusableWorkflowExecutionError
from edera.exceptions import WorkflowExecutionError
from edera.queue import Queue
from edera.routine import deferrable
from edera.routine import routine
from edera.workflow.executor import WorkflowExecutor
class BasicWorkflowExecutor(WorkflowExecutor):
"""
A basic workflow executor.
Expects tasks to be ranked in advance.
Runs tasks in the current thread one by one, handles exceptions, and performs logging.
This executor is interruptible.
See also:
$TaskRanker
"""
@routine
def execute(self, workflow):
queue = Queue(workflow)
stopped_tasks = []
failed_tasks = []
while queue:
task = queue.pick()
if task.phony:
queue.accept()
continue
try:
logging.getLogger(__name__).debug("Picked task %r", task)
if task.target is not None:
completed = yield deferrable(task.target.check).defer()
if completed:
queue.accept()
continue
logging.getLogger(__name__).info("Running task %r", task)
yield deferrable(task.execute).defer()
except ExcusableError as error:
logging.getLogger(__name__).info("Task %r stopped: %s", task, error)
stopped_tasks.append(task)
queue.discard()
except Exception:
logging.getLogger(__name__).exception("Task %r failed:", task)
failed_tasks.append(task)
queue.discard()
else:
logging.getLogger(__name__).info("Task %r completed", task)
queue.accept()
if failed_tasks:
raise WorkflowExecutionError(failed_tasks)
if stopped_tasks:
raise ExcusableWorkflowExecutionError(stopped_tasks)
| thoughteer/edera | edera/workflow/executors/basic.py | basic.py | py | 2,027 | python | en | code | 3 | github-code | 36 |
17523274197 | #!/usr/bin/env python
"""
author: Jun Ding
date: 2020-07-06
function: plot the expression of input gene
copy and modification of this code is allowed for academic purposes.
Please don NOT remove this author statement under any condition.
"""
import sys,os,pdb,argparse
import anndata
import scanpy as sc
def plotGene(exFn,gene):
prRes=anndata.read_h5ad(exFn)
if gene in prRes.var.index:
sc.pl.umap(prRes,color=[gene])
else:
print("Error! please check your input gene ID, it must be the same as in your expression file")
print("Also, the missing gene could be caused by the dispersion based gene filtering by the prerun program")
def main():
parser=argparse.ArgumentParser(description="scdiff2 plotGene")
parser.add_argument('-i','--input',required=True,help='input h5ad prerun result')
parser.add_argument('-g','--gene',required=True, help='gene name you want to explore, must be the same ID as in your original input expression file')
args = parser.parse_args()
exFn=args.input
gene=args.gene
plotGene(exFn,gene)
if __name__=="__main__":
main()
| phoenixding/scdiff2 | utils/plotGene.py | plotGene.py | py | 1,080 | python | en | code | 5 | github-code | 36 |
37724851331 | # -*- coding: utf-8 -*-
import numpy as np
import math
import matplotlib.pyplot as plt
def option_pricing(s0, k, t, sigma, r, cp, american=False, n = 100):
#cijena call opcije u T CT = max(ST-K, 0)
#cijena put opcije u T PT = max(K-ST, 0)
#s0 - pocetna cijena
#k - strajk cijena
#t - datum dospjeca
#v - volatility - promjenjivost -sigma
#rf - risk-free rate
#cp 1/-1 call/put
#american True/False American/European
#n - broj koraka binomnog stabla
#b = B - money market account
#jarrow-rudd algoritam za binomial tree
#ovi parametri se racunaju na razne nacine u ovisnosti od koristenja algoritma
#(CRR, jarrow-rudd, Tian...)
delta_t = t/n
#p = 0.5
u = math.exp((r-0.5*math.pow(sigma,2))*delta_t+sigma*math.sqrt(delta_t))
d = math.exp((r-0.5*math.pow(sigma,2))*delta_t-sigma*math.sqrt(delta_t))
b = math.exp(r*delta_t)
q = (b - d)/(u-d) #q = p* - risk neutral measure
st = np.zeros((n+1, n+1))
option_value = np.zeros((n+1, n+1))
st[0, 0] = s0
am_price = []
eu_price = []
for i in range(1, n+1):
st[i, 0] = st[i-1, 0]*u
for j in range(1, i+1):
st[i, j] = st[i-1, j-1]*d
#rekurzija
for j in range(n+1):
option_value[n, j] = max(0, cp*(st[n, j]-k)) #stavljanje maks. vrijednosti na kraj
for i in range(n-1, -1, -1):
for j in range(i+1): #European option
option_value[i, j] = (q*option_value[i+1, j]+(1-q)*option_value[i+1, j+1])/b
if american: #American option
option_value[i, j] = max(option_value[i, j], cp*(st[i, j]-k))
am_price.append(option_value[i, j]) #samo za potrebe plotanja
else:
eu_price.append(option_value[i, j]) #samo za potrebe plotanja
#plotanje grafika
tam = np.linspace(0, 1, len(am_price))
plt.plot(tam, am_price, 'bo')
teu = np.linspace(0, 1, len(eu_price))
plt.plot(teu, eu_price, 'ro')
plt.show()
return option_value[0,0]
V = option_pricing(100, 80, 1, 0.8, 0.01, -1, False, 10)
#V = option_pricing(100, 80, 1, 0.8, 0.01, -1, True, 100)
print(V) | aldinabu/ou | option_pricing_dp.py | option_pricing_dp.py | py | 2,225 | python | en | code | 0 | github-code | 36 |
37569450031 | f = open('minfil.txt')
def write_to_file(data):
f = open('minfil.txt','w') #w spesifiserer at filen skal skrives til
f.write(data)
f.close()
def read_from_file(filename):
f = open(filename,'r')
innhold = f.read()
print(innhold)
f.close()
def main():
todo =''
while todo != 'done':
todo = input('Do you want to read or write? ')
if todo == 'write':
data = input('What do you want to write to file? ')
write_to_file(data)
print(f'{data} was written to file.')
elif todo == 'read':
read_from_file('minfil.txt')
print('You are done.')
main()
| jorul/ITGK | ITGK øvinger/Øving 9 uke 45/4 Generelt om filbehandling/a filbehandling.py | a filbehandling.py | py | 668 | python | en | code | 0 | github-code | 36 |
70172359785 | def list_sort(num_list):
if len(num_list) <= 1:
return num_list
element = num_list[0]
left = list(filter(lambda x : x < element, num_list))
center = [i for i in num_list if i == element]
right = list(filter(lambda x : x > element, num_list))
return list_sort(left) + center + list_sort(right)
def index_search(num_list, number, start, end):
if start > end:
return f'Искомое число отсутствует в последовательности.\nИндекс числа, которое {"больше" if number < num_list[0] else "меньше"} искомого: {0 if number < num_list[0] else len(num_list) - 1}'
middle = (start + end) // 2
if num_list[middle] == number or (num_list[middle] > number and num_list[middle - 1] < number):
return f'Индекс числа, которое больше или равно искомому: {middle}\nИндекс числа, которое меньше искомого: {middle - 1 if middle != 0 else "отсутствует в последовательности"}'
if number < num_list[middle]:
return index_search(num_list, number, start, middle - 1)
else:
return index_search(num_list, number, middle + 1, end)
while True:
try:
list_of_numbers = list(map(int, input('Введите последовательность целых чисел через пробел:\n').split()))
user_number = int(input('Введите искомое число: '))
except ValueError:
print('Необходимо вводить только целые числа! Попробуйте еще раз.')
continue
else:
break
sorted_list = list_sort(list_of_numbers)
print(f'Последовательность чисел упорядочена по возрастанию:\n{sorted_list}')
print(index_search(sorted_list, user_number, 0, len(sorted_list) - 1))
| IgorRush/practice_17.9 | sorting.py | sorting.py | py | 1,891 | python | ru | code | 0 | github-code | 36 |
15972612873 | #!/usr/bin/env python3
__doc__ = """Process a dump from the 'Charge Activity Report by Employee
- Project Detail Information' report from Webwise. We only need the
table view because we simply want to extract the fields. For this to
work, we _must_ have the table headers. Those are used as the keys in
the YAML formatting.
"""
import argparse
import datetime
import os
import re
import yaml
_reference_format = """ - title: {Project Title}
project: {Project}
contract: {Contract No}
sponsor: {Sponsor}
PD:
project: {PD of Project}
subtask: {PD of Subtask}
role: '[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'
budget: '[Did Candidate have budgetary authority?]'
subtask: {Subtask Title}
amount-funded:
task: {Budget of Subtask}
project: {Funded Amount includes Fee}
number-supervised: '[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'
performance:
project:
- year: {Contract Start Date.year}
month: {Contract Start Date.month}
day: {Contract Start Date.day}
- year: {Contract End Date.year}
month: {Contract End Date.month}
day: {Contract End Date.day}
candidate:
- year: {Employee First Month Worked on Project.year}
month: {Employee First Month Worked on Project.month}
day: {Employee First Month Worked on Project.day}
- year: {Employee Last Month Worked on Project.year}
month: {Employee Last Month Worked on Project.month}
day: {Employee Last Month Worked on Project.day}
hours-worked: {Total Hours Worked}
contributions: '[Briefly describe you contributions in 2--3 sentences.]'
"""
# These were part of an attempt to update a reference YAML with new
# information from the table, but I think that's going to take too much
# effort. Maybe we'll do that, but not now.
# _empty_row = {
# "title" : "",
# "project" : "",
# "contract" : "",
# "sponsor" : "",
# "PD-project" : "",
# "PD-subtask" : "",
# "role" : "'[Program manager, P.D./P.I., Co-P.I./P.D. Task leader]'",
# "budget" : "'[Did Candidate have budgetary authority?]'",
# "subtask" : "",
# "amount-funded-task" : "",
# "amount-funded-project" : "",
# "number-supervised" : "'[15 (3 PRE, 1 SRE, 1 REII, 1 RE1, 9 students)]'",
# "contract-start" : None,
# "contract-end" : None,
# "candidate-start" : None,
# "candidate-end" : None,
# "hour-worked" : "",
# "contributions" : "'[Briefly describe you contributions in 2--3 sentences.]'",
# }
# _from_to_keys = (
# ("Project Title", "title"),
# ("Project", "project"),
# ("Contract No", "contract"),
# ("Sponsor", "sponsor"),
# ("PD of Project", "pd-project"),
# ("PD of Subtask", "pd-subtask"),
# ("Subtask Title", "subtask"),
# ("Budget of Subtask", "amount-funded-task"),
# ("Funded Amount includes Fee", "amount-funded-project"),
# ("Contract Start Date", "contract-start"),
# ("Contract End Date", "contract-end"),
# ("Employee First Month Worked on Project", "candidate-start"),
# ("Employee Last Month Worked on Project", "candidate-end"),
# ("Total Hours Worked", "hour-worked"),
# )
# This is the worked out regular expression for copying the vita
# view over. All of the information is in the table and it's easier
# to parse that. But I don't want to loose the careful work I did
# to figure this out.
# pattern = re.compile(r"\s*\d+\s*" \
# + r"Project\s*Title\s*(?P<title>[-&\w\s]+)" \
# + r"Contract\s*No(?:[.]|umber)\s*(?P<contract>[\w-]*)\s*" \
# + r"Sponsor\s*(?P<sponsor>[-&\w/\s]+)\s*" \
# + r"P[.]\s*I[.]\s*(?P<pi>[\w,\s]+)" \
# + r"Candidate['’]s\s+Role\s*(?P<role>[\w\s-]*)" \
# + r"Budgetary\s*Authority[?]\s*(?P<budget>\w*)\s*" \
# + r"Subtask\s*Title[?]?\s*(?P<subtask>[-&\w\s]*)" \
# + r"Amount\s*Funded\s*for\s*Task:?\s*(?P<task_amount>\$[\d,.]+)?\s*" \
# + r"Amount\s*Funded\s*for\s*Project:?\s*(?P<project_amount>\$[\d,.]+)?\s*" \
# + r"Number\s*and\s*Rank\s*of\s*Persons\s*Supervised:?\s*(?P<supervised>[\w\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Project\):?\s*(?P<project_performance>[-/\d\s]*)" \
# + r"Period\s*of\s*Performance\s*\(Candidate\):?\s*(?P<candidate_performance>[-/\d\s]*)" \
# + r"Contributions:?\s*(?P<contributions>\w|\s)*"
# )
# We define two entries as the same if they have the same entries
# same_entry = lambda l, r: all(l[k] == r[k] for k in ("title",
# "subtask",
# "contract"))
if __name__ == "__main__":
prog, _ = os.path.splitext(".".join(__file__.split(os.sep)[-3:]))
parser = argparse.ArgumentParser(prog=prog, description=__doc__)
parser.add_argument("-o", "--output", default="-",
type=argparse.FileType("w"),
help="Output file")
parser.add_argument("table", type=argparse.FileType("r"),
help="Input table view")
args = parser.parse_args()
keys = [k.strip() for k in args.table.readline().split("\t")]
# Sanitize the bad lines that start with a tab. This is most likely
# due to the poor formatting or bad copy/paste.
lines = []
for line in args.table.readlines():
if line.startswith("\t") and len(lines) > 0:
lines[-1] = lines[-1][:-len(os.linesep)] + line
else:
lines.append(line)
func = lambda k, x: datetime.datetime.strptime(x, "%m/%d/%Y") \
if k in keys[-4:] else x
args.output.write("projects:\n")
for line in lines:
row = {k:func(k, e.strip())
for k, e in zip(keys, line.split("\t"))}
args.output.write(_reference_format.format(**row))
| kprussing/resume | projects-import.py | projects-import.py | py | 6,217 | python | en | code | 0 | github-code | 36 |
74816953384 | from django.urls import include, path
from rest_framework import routers
from . import views
# def router robi za nas widoki generowane przez nasz viewset; tworzy do nich ścieżki
router = routers.DefaultRouter()
router.register('categories', views.CategoryViewSet)
router.register('rooms', views.RoomViewSet)
router.register('plants', views.PlantViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| BParaszczak/plant_manager | plants/urls.py | urls.py | py | 413 | python | en | code | 0 | github-code | 36 |
17602594229 | from functools import wraps
import json
import os
import requests
import boto3
from sanic import Sanic, response
from sanic.exceptions import NotFound
from sanic.log import LOGGING_CONFIG_DEFAULTS
from sanic_cors import CORS
from sanic_limiter import Limiter, get_remote_address, RateLimitExceeded
from botocore.exceptions import ClientError
from sanic_prometheus import monitor
from erebor.errors import (error_response, UNAUTHORIZED,
INVALID_API_KEY,
RATE_LIMIT_EXCEEDED, ROUTE_NOT_FOUND)
from erebor.logs import logging_config
from erebor.sql import USER_ID_SQL
app = Sanic(log_config=logging_config
if not os.getenv('erebor_test') else LOGGING_CONFIG_DEFAULTS)
CORS(app, automatic_options=True)
limiter = Limiter(app,
global_limits=['50 per minute'],
key_func=get_remote_address)
def authorized():
def decorator(f):
@wraps(f)
async def decorated_function(request, *args, **kwargs):
db = request.app.pg
cookie = request.cookies.get('session_id')
if cookie:
user_ids = await db.fetchrow(USER_ID_SQL, cookie)
if user_ids is not None:
request['db'] = request.app.pg
request['session'] = {'user_id': user_ids['user_id'],
'user_uid': user_ids['user_uid'],
'channel': user_ids['channel'],
'session_id': cookie}
res = await f(request, *args, **kwargs)
return res
else:
error_response([INVALID_API_KEY])
return error_response([UNAUTHORIZED])
return decorated_function
return decorator
@app.exception(RateLimitExceeded)
def handle_429(request, exception):
return error_response([RATE_LIMIT_EXCEEDED])
@app.exception(NotFound)
def handle_404(request, exception):
return error_response([ROUTE_NOT_FOUND])
# REMOVE
@app.route('/jsonrpc', methods=['POST'])
@authorized()
async def json_rpc_bridge(request):
url = "http://hoard:bombadil@shenron.hoardinvest.com:8332"
headers = {'content-type': 'application/json'}
payload = request.json
rpc_response = requests.post(
url, data=json.dumps(payload), headers=headers)
return response.json(rpc_response.json())
def load_aws_secret(secret_name):
secret = None
endpoint_url = "https://secretsmanager.us-east-2.amazonaws.com"
region_name = "us-east-2"
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
endpoint_url=endpoint_url
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
print("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
print("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
print("The request had invalid params:", e)
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return json.loads(secret)
if __name__ == '__main__':
secret_name = os.environ['EREBOR_DB_AWS_SECRET']
if secret_name:
from erebor.db import db_bp
secret = load_aws_secret(secret_name)
app.db = dict(database=secret['dbname'],
user=secret['username'],
password=secret['password'],
host=secret['host'],
port=secret['port'])
app.blueprint(db_bp)
else:
raise Exception("Missing database credentials")
from erebor.api.users import users_bp
from erebor.api.transactions import transactions_bp
from erebor.api.support import support_bp
from erebor.api.misc import misc_bp
from erebor.api.prices import prices_bp
app.blueprint(users_bp)
app.blueprint(transactions_bp)
app.blueprint(support_bp)
app.blueprint(misc_bp)
app.blueprint(prices_bp)
monitor(app).expose_endpoint()
app.run(host='0.0.0.0',
port=8000,
access_log=False if os.environ.get('EREBOR_ENV') == 'PROD'
else True)
| MichaelHDesigns/erebor | erebor/erebor.py | erebor.py | py | 4,559 | python | en | code | 0 | github-code | 36 |
71117028264 | import pprint
import numpy as np
import matplotlib.pyplot as plt
import math
#重量
m = 5
#ばね定数
k = 10
#ダンピング係数
c = 1
#速度
vn_list = []
#位置
xn_list = []
#制御入力
un = 0
#時間
t = 10
#刻み幅
h = 0.001
#要素数
n = int((t/h) + 1)
#システムノイズのばらつき(分散)
stdv_x = 15
Sigma_x = stdv_x **2
T_s = np.linspace(0,t,n)
mat_1 = np.matrix([[1-(c/m*h) , (-k/m)*h],[h,1]])
x = np.matrix([[0],[0]])
mat_3 = np.matrix([[h/m],[0]])
for i in range(n):
if i > t*10:
rand = np.random.normal(0,stdv_x,(1,1))
un = 1 + rand[0,0]
x = (mat_1 * x) + (mat_3)*un
xn_list.append(x[1,0])
vn_list.append(x[0,0])
# plt.plot(T_s,xn_list,label = "true location")
# plt.grid()
# plt.xlabel('Time')
# plt.ylabel('location')
# plt.legend()
# plt.show()
| itolab2022/Altitude_Control | Kalman_spring/spring.py | spring.py | py | 881 | python | ja | code | 0 | github-code | 36 |
37217205291 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
#1. Write a Python Program to Find the Factorial of a Number?
def fact(n):
if n == 1 or n == 0:
return 1
else:
return n * fact(n-1)
fact(5)
# In[7]:
#2. Write a Python Program to Display the multiplication Table?
number = int(input ("Enter the number of which the user wants to print the multiplication table: "))
count = 1
# we are using while loop for iterating the multiplication 10 times
print ("The Multiplication Table of: ", number)
while count <= 10:
number = number *1
print (number, 'x', count, '=', number * count)
count += 1
# In[17]:
#3. Write a Python Program to Print the Fibonacci sequence?
fib_array = [0,1]
def fibonacci(n):
if n < 0:
return "Incoorect input"
elif n == 0:
return 0
elif n ==1 or n == 2:
return 1
else:
fib_series = fibonacci(n-1) + fibonacci(n-2)
fib_array.append(fib_series)
return fib_series
fibonacci(9)
print(fib_array)
# In[24]:
#4. Write a Python Program to Check Armstrong Number?
def Armstrong(n,o):
sum = 0
temp = n
while temp > 0:
digit = temp % 10
sum += digit ** o
temp = temp//10
if n == sum:
print(n,"is an Armstrong number")
else:
print(n,"is not an Armstrong number")
num = int(input("Enter Number: "))
order = len(str(num))
Armstrong(num,order)
# In[32]:
#5. Write a Python Program to Find Armstrong Number in an Interval?
#1*1 + 6*6 = 37 so it is not an Armstrong Number.
x=int(input("lower limit: "))
y=int(input("upper limit: "))
print("Armstrong Numbers are: ")
for Number in range(x,y):
digits=0
temp = Number
while temp > 0: # no of digits
digits = digits+1
temp = temp//10
sum = 0
temp = Number
while temp > 0: # calculate armstrong number
last_digit = temp % 10
sum = sum +(last_digit**digits)
temp = temp//10
if Number == sum:
print(Number)
# In[2]:
#6. Write a Python Program to Find the Sum of Natural Numbers?
n = int(input())
if n <0:
print("Enter a Natural Number")
elif n == 0:
print("0")
else:
sumn = 0
while n > 0:
sumn += n
n -= 1
print(sumn)
# In[ ]:
| 16anshul/basic-programmin-in-python | basic 4.py | basic 4.py | py | 2,344 | python | en | code | 0 | github-code | 36 |
35015230469 | import importlib
from sklearn.cluster import SpectralClustering
import clusters_optimizer_base as co
importlib.reload(co)
class SpectralClusterOptimizer(co.ClustersOptimizerBase):
# 100 initializations
def optimize(self, data):
obj = SpectralClustering(
n_clusters=self.num_clusters,
assign_labels='discretize',
random_state=self.seed,
n_init=100,
)
obj.fit(data)
return obj.labels_
| morganstanley/MSML | papers/Clustering_via_Dual_Divergence_Maximization/spectral_clusters_optimizer.py | spectral_clusters_optimizer.py | py | 494 | python | en | code | 12 | github-code | 36 |
10501857645 | import os
from aiohttp import Fingerprint
import cv2
from matplotlib import pyplot as plt
from Matcher import *
from random import *
# Authentication class will serve as an authenticator for one person
class Authentication:
def __init__(self, probe_img, data_path, folder, threshold=0.6):
self.probe_img = probe_img # Probe_image_description
self.genuine_scores = [] # genuine score list
self.impostor_scores = [] # Imposter score list
self.authentication_database = [] # random authentication data_base
self.data_path = data_path # "Dataset"
self.folder = folder # "Probe"
self.match_dictionary = {} # dictionary that stores the matches as values and corresponding images that matched as keys
self.threshold = threshold # Threshold value that you want to compare when deciding genuine or imposter
def create_random_authentication_database(self) -> None:
"""
objective: Creates a random list of images from real folder so that we can have a sort of
database for authentication
process: Randomly picks 5 images from the Real folder and appends it to
self.authentication_database list
input: None
output: changes the self.authentication_database list
"""
real_img_path = os.path.join(self.data_path, "Real")
real_img_file_names = os.listdir(real_img_path)
for i in range(0, 5):
random_idx_for_real_img = randint(0, len(real_img_file_names) - 1)
random_real_img = real_img_file_names[random_idx_for_real_img]
self.authentication_database.append(random_real_img)
def create_match_dict(self) -> None:
"""
objective: Creates a dictionary where the key is the image name and the value is the match score
process: computing the match score with every image in the database that the user formed when they
call the function create_random_authentication_database
input: None
output: changes the self.match_dictionary
"""
probe_img_info_list = self.probe_img.split("_")
probe_img_folder = probe_img_info_list[0]
database_path = os.path.join(self.data_path, 'Real')
probe_path = os.path.join(self.data_path, self.folder, probe_img_folder, self.probe_img)
real_image_descriptions = os.listdir(database_path)
probe_image = cv2.imread(probe_path)
for image_description in self.authentication_database:
authentication_database_image_path = os.path.join(database_path, image_description)
real_image = cv2.imread(authentication_database_image_path)
M = Matcher(real_image, probe_image)
match_score = M.get_sift_flann_match_score()
print_description = image_description
self.match_dictionary[print_description] = match_score
sorted_match_dict = sorted(self.match_dictionary.items(), key=lambda x: x[1])
def get_prediction(self) -> int:
"""
objective: to return whether a correct or incorrect match
process: gets the score of the image and if the match score is greater than the self.threshold
then we append it to the self.genuine_scores, otherwise we append it to self.impostor_scores
input: None
output: a dictionary with the filename and corresponding match scores
"""
sorted_match_dict = sorted(self.match_dictionary.items(), key=lambda x: x[1])
if sorted_match_dict[-1][1] / 100 > self.threshold:
print('Access Granted')
self.genuine_scores.append(sorted_match_dict[-1][1] / 100)
return 1
else:
self.impostor_scores.append(sorted_match_dict[-1][1] / 100)
# print('Access Denied')
return 0
| Dorukozar/Fingerprint-Matcher-and-Evaluation | Authentication.py | Authentication.py | py | 3,865 | python | en | code | 0 | github-code | 36 |
24730881807 | from sklearn import tree
import numpy as np
X = np.array([[-1,-1],[-2,-1],[1,1],[2,1]])
y = np.array([1,1,2,2])
#
# X = [[0, 0], [1, 1]]
# Y = [0, 1]
clf = tree.DecisionTreeClassifier()
# clf = clf.fit(X, y)
clf = clf.fit(X, y)
print(clf)
print(clf.predict([[-0.8,-1]]))
print(clf.predict([[5,6]])) | 11city/tianchi | algorithm/DecisionTree/DecisionTreeTest.py | DecisionTreeTest.py | py | 304 | python | en | code | 0 | github-code | 36 |
12366342382 | """
Created on 28 Feb 2013
@author: jmht
"""
import os
import sys
from ample.util import ample_util
def mrbump_cmd(name, mtz, mr_sequence, keyword_file):
"""Return the command to run mrbump"""
if sys.platform.startswith("win"):
mrbump = os.path.join(os.environ["CCP4"], "bin", "mrbump" + ample_util.SCRIPT_EXT)
else:
mrbump = os.path.join(os.environ["CCP4"], "bin", "mrbump")
cmd = [
mrbump,
"KEYIN",
"{0}".format(keyword_file),
"HKLIN",
"{0}".format(mtz),
"SEQIN",
"{0}".format(mr_sequence),
"HKLOUT",
"{0}.mtz".format(name),
"XYZOUT",
"{0}.pdb".format(name),
]
return " ".join(cmd)
def keyword_dict(ensemble_pdb, name, amoptd, extra_options={}):
"""Extract the mrbump keywords from the main ample dictionary and add/change any from
the extra_options dict"""
keywords = [
'arpwarp_cycles',
'buccaneer_cycles',
'debug',
'existing_mr_solution',
'F',
'FREE',
'mr_keys',
'mr_sg_all',
'mrbump_programs',
'native_pdb',
'nmasu',
'phaser_kill',
'phaser_rms',
'shelx_cycles',
'shelxe_exe',
'shelxe_rebuild_arpwarp',
'shelxe_rebuild_buccaneer',
'SIGF',
'refine_rebuild_arpwarp',
'refine_rebuild_buccaneer',
'use_shelxe',
]
# Pull out all mrbump options from the main ample dict
if sys.version_info.major == 3:
key_dict = dict((k, v) for k, v in amoptd.items() if k in keywords)
extra_options_d = extra_options.items()
else:
key_dict = dict((k, v) for k, v in amoptd.iteritems() if k in keywords)
extra_options_d = extra_options.iteritems()
# Change any/add options for this ensemble
for k, v in extra_options_d:
key_dict[k] = v
# Add ensemble_pdb and name
key_dict['name'] = name
key_dict['ensemble_pdb'] = ensemble_pdb
return key_dict
def mrbump_keyword_file(odict, fixed_iden=0.6):
"""
Create MRBUMP keywords
Args:
odict -- dictionary of options
jmht - check fixed_iden - 0.6 if not specified
"""
mrs = 'LABIN SIGF={0} F={1} FreeR_flag={2}\n'.format(odict['SIGF'], odict['F'], odict['FREE'])
mrs += 'JOBID {0}_mrbump\n'.format(odict['name'])
mrs += 'MRPROGRAM {0}\n'.format(" ".join(odict['mrbump_programs']))
mrs += 'LOCALFILE {0} CHAIN ALL RMS {1}'.format((odict['ensemble_pdb']), odict['phaser_rms'])
if 'ncopies' in odict and odict['ncopies'] > 0:
mrs += ' COPIES {0}'.format(odict['ncopies'])
mrs += '\n'
# Don't do any of the searches as we are providing a local file
mrs += 'SCOPSEARCH False\n'
mrs += 'PQSSEARCH False\n'
mrs += 'SSMSEARCH False\n'
mrs += 'DOFASTA False\n'
mrs += 'DOPHMMER False\n'
mrs += 'DOHHPRED False\n'
mrs += 'FAST False\n'
mrs += 'MDLD False\n'
mrs += 'MDLC False\n'
mrs += 'MDLM False\n'
mrs += 'MDLP False\n'
mrs += 'MDLS False\n'
mrs += 'MDLU True\n'
mrs += 'UPDATE False\n'
mrs += 'BUCC {0}\n'.format(odict['refine_rebuild_buccaneer'])
mrs += 'BCYCLES {0}\n'.format(odict['buccaneer_cycles'])
mrs += 'ARPWARP {0}\n'.format(odict['refine_rebuild_arpwarp'])
mrs += 'ACYCLES {0}\n'.format(odict['arpwarp_cycles'])
mrs += 'SHELXE {0}\n'.format(odict['use_shelxe'])
mrs += 'SHLXEXE {0}\n'.format(odict['shelxe_exe'])
mrs += 'SCYCLES {0}\n'.format(odict['shelx_cycles'])
mrs += 'FIXSG True\n'
mrs += 'PJOBS 1\n'
mrs += 'CHECK False\n'
mrs += 'LITE True\n'
mrs += 'PICKLE False\n'
mrs += 'TRYALL True\n'
mrs += 'USEACORN False\n'
mrs += 'USEENSEM False\n'
mrs += 'CLEAN False\n'
mrs += 'DEBUG {0}\n'.format(odict['debug'])
if odict['shelxe_rebuild_arpwarp'] or odict['shelxe_rebuild_buccaneer']:
# Rebuild SHELXE trace with both Buccaneer and ArpWarp
mrs += 'SXREBUILD True\n'
if odict['shelxe_rebuild_buccaneer']:
mrs += 'SXRBUCC True\n'
if odict['shelxe_rebuild_arpwarp']:
mrs += 'SXRARPW True\n'
if odict['nmasu'] > 0:
mrs += 'NMASU {0}\n'.format(odict['nmasu'])
if odict['existing_mr_solution']:
mrs += 'FIXED_XYZIN {0} IDEN {1}\n'.format(odict['existing_mr_solution'], fixed_iden)
if odict['native_pdb']:
mrs += 'PDBNATIVE {0}\n'.format(odict['native_pdb'])
if odict['phaser_kill'] > 0:
mrs += 'PKEY KILL TIME {0}\n'.format(odict['phaser_kill'])
if odict['mr_sg_all']:
mrs += 'PKEY SGALTERNATIVE SELECT ALL\n'
# Extra keywords
# This assumes everything in mr_keys is a list of [ KEYWORD, VALUE0, VALUE1, ...]
if odict['mr_keys']:
for l in odict['mr_keys']:
mrs += " ".join(l) + "\n"
mrs += 'END\n'
return mrs
| rigdenlab/ample | ample/util/mrbump_cmd.py | mrbump_cmd.py | py | 4,898 | python | en | code | 6 | github-code | 36 |
35132638275 | import numpy as np
import gin.tf
@gin.configurable(whitelist=["use_entities_order"])
class ExistingEdgesFilter(object):
def __init__(self, entities_count, graph_edges, use_entities_order=True):
self.entities_count = entities_count
self.set_of_graph_edges = set(graph_edges)
self.use_entities_order = use_entities_order
def get_values_corresponding_to_existing_edges(self, edge_ids, mask_index, values):
output_index = edge_ids[mask_index]
candidate_edges = np.tile(edge_ids, (self.entities_count, 1))
candidate_edges[:, mask_index] = np.arange(self.entities_count, dtype=np.int32)
if self.use_entities_order:
edges_to_keep_indexes = [
index for index, edge_ids in enumerate(candidate_edges)
if tuple(edge_ids) not in self.set_of_graph_edges or index == output_index
]
filtered_edges = candidate_edges[edges_to_keep_indexes]
target_index = np.where((filtered_edges == edge_ids).all(axis=1))[0][0]
return values[edges_to_keep_indexes], target_index
edges_to_keep_indexes = [
index for index, edge_ids in enumerate(candidate_edges)
if tuple(edge_ids) not in self.set_of_graph_edges and index != output_index
]
filtered_values = np.concatenate((values[edges_to_keep_indexes], [values[output_index]]))
target_index = np.random.randint(len(filtered_values))
filtered_values[-1], filtered_values[target_index] = filtered_values[target_index], filtered_values[-1]
return filtered_values, target_index
| Dawidsoni/relation-embeddings | src/optimization/existing_edges_filter.py | existing_edges_filter.py | py | 1,625 | python | en | code | 0 | github-code | 36 |
32282456781 | import socket
#创建一个socket对象
skfd=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#绑定IP和端口号
skfd.bind(('127.0.0.1',7777))
#将套接字变为监听套接字
skfd.listen(10)
L=[]
i=0
while True:
print('waiting for connect...')
#等待客户端请求
sk1,adr1=skfd.accept()
print('i get address:',adr1)
while True:
#接收客户端的消息
data=sk1.recv(1024)
if not data:
break
print('i get message:',data.decode())
#给客户端发送消息
data1=sk1.send('i wanna be a bytes'.encode())
print('i send message:',data1)
print('已经进行了', i + 1, '次收发工作')
i += 1
#关闭套接字
sk1.close() #与该客户端断开连接(四次挥手)
# skfd.close() #关闭socket套接字 | joiller/exercises | web1.py | web1.py | py | 850 | python | zh | code | 0 | github-code | 36 |
6562172415 | from django.shortcuts import render,redirect
from .models import *
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.db.models import Q
# Create your views here.
def login_page(request):
return render(request, "index.html")
def init_login(request):
username = request.POST.get("username", None)
password = request.POST.get("password", None)
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect("searchapp:search")
else:
messages.error(request, "Invalid Login Credentials")
return redirect("searchapp:login")
def search_bar(request):
fullname = request.POST.get("fullname", None)
school = request.POST.get("school", None)
grad_year = request.POST.get("grad_year" , None)
if fullname and school and grad_year:
student = Student.objects.filter(Q(fullname = fullname) & Q(school__name = school) & Q(year_of_grad = grad_year))
context = {"student":student}
elif fullname and school:
student = Student.objects.filter(Q(fullname = fullname) & Q(school__name = school))
context = {"student":student}
elif fullname and grad_year:
student = Student.objects.filter(Q(fullname = fullname) & Q(year_of_grad = grad_year))
context = {"student":student}
elif school and grad_year:
student = Student.objects.filter(Q(school__name = school) & Q(year_of_grad = grad_year))
context = {"student":student}
elif fullname:
student = Student.objects.filter(Q(fullname = fullname))
context = {"student":student}
elif school:
student = Student.objects.filter(Q(school__name = school))
context = {"student":student}
elif grad_year:
student = Student.objects.filter(Q(year_of_grad = grad_year))
context = {"student":student}
else:
student = ""
context = {"student":student}
return render(request, "searchbar.html", context)
def logout_view(request):
logout(request)
return redirect("searchapp:sign-in")
| Ennyola/Search-System | searchSystem/searchApp/views.py | views.py | py | 2,161 | python | en | code | 0 | github-code | 36 |
23682916986 | import re
import pandas as pd
from bs4 import BeautifulSoup
df = pd.DataFrame.from_csv("realtor.csv", sep="|", encoding="ISO-8859-1")
print(df.head)
print ("done")
dftemp = df
for i, (idx, ser) in enumerate(dftemp.iterrows()):
html = ser["metaHTML"]
bs = BeautifulSoup(html)
for li in bs.find_all("li"):
temp = li.get("data-label").split('-')
colname = temp[len(temp)-1]
if (colname not in df.columns):
df[colname] = None
val = li.find("span").text
df[colname][idx] = val
colname = "broker"
html = ser["broker"]
bs = BeautifulSoup(html)
if (colname not in df.columns):
df[colname] = None
df[colname][idx] = re.sub("Brokered by", '', bs.text)
html = ser["geo"]
bs = BeautifulSoup(html)
elems = bs.find_all("meta")
for e in elems:
colname = e.get("itemprop")
if (colname not in df.columns):
df[colname] = None
val = e.get("content")
df[colname][idx] = val
print (i)
del df["metaHTML"]
del df["geo"]
print(df.head)
df.to_csv("realtor2.csv", sep="|", quotechar='"',index=False )
print ("done")
| jhmuller/real_estate | realtor2.py | realtor2.py | py | 1,198 | python | en | code | 0 | github-code | 36 |
74611006504 | # coding: utf-8
from __future__ import print_function
import json
from math import log10
import numpy as np
def fit(x, y):
x_mean = np.mean(x)
y_mean = np.mean(y)
cov = np.sum((x - x_mean) * (y - y_mean))
var = np.sum((x - x_mean)**2)
a = cov / var
b = y_mean - a * x_mean
return lambda x1: a * x1 + b
def fit_thd(mol_data, lvl=0.):
x = np.array(mol_data['lvl'])
y = np.array(mol_data['thd'])
cut = (x > lvl-1) * (x < lvl+1)
f = fit(x[cut], y[cut])
return f(lvl)
def fit_mol(mol_data):
x = np.array(mol_data['thd'])
y = np.array(mol_data['lvl'])
cut = (x > -32) * (x < -28)
f = fit(x[cut], y[cut])
return f(-30.46)
data = json.load(open("test.json"))
out = open('datasheet.dat', 'w')
for b, m in data:
print(
b,
20 * log10(b / 0.5),
m['reflevel'],
fit_thd(m['mol_data']),
m['s01'],
m['s63'],
m['s10'],
m['s16'],
fit_mol(m['mol_data']),
m['sol10'],
m['sol16'],
m['noise'],
file=out)
out.close()
| andreas-schmidt/tapetool | json2ds.py | json2ds.py | py | 1,133 | python | en | code | 0 | github-code | 36 |
43160456317 | import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 01. 기본 sigmoid
x = np.arange(-5., 5., 0.1)
y = sigmoid(x)
plt.figure(0)
plt.plot(x, y, 'g')
plt.plot([0,0],[1.,0.], ':')
plt.title('sigmoid func')
# plt.show()
# 02.sigmoid (ax)
# a가 클수록 step function에 가까워진다.
y1 = sigmoid(0.5 * x)
y2 = sigmoid(x)
y3 = sigmoid(2 * x)
plt.figure(1)
plt.plot(x, y1, 'r')
plt.plot(x, y2, 'g')
plt.plot(x, y3, 'b')
plt.title('sigmoid func')
# plt.show()
# 03.sigmoid (b)
# b가 클수록 더 위로 올라간다.
y1 = sigmoid(x)
y2 = sigmoid(x + 1)
y3 = sigmoid(x + 2)
plt.figure(2)
plt.plot(x, y1, 'r')
plt.plot(x, y2, 'g')
plt.plot(x, y3, 'b')
plt.title('sigmoid func')
plt.show() | minssoj/Learning_Pytorch | day2/01.sigmoidFunctionEX.py | 01.sigmoidFunctionEX.py | py | 741 | python | en | code | 0 | github-code | 36 |
32442603242 | import sqlite3
conn = sqlite3.connect('bancodedados.db')
cursor = conn.cursor()
#variaveis gerais
usuario_logado = ""
#cria tabelas
def modularTable():#Victor
clear()
tabela = int(input('\nBem vindo ao sistema Meditech\nPrimeiramente adicione os modulos com que deseja trabalhar\n\n1 - funcionarios\n2 - Veiculos\n3 - Agendamentos\n4 - Equipamentos\n5 - Paciente\n6 - login\n7 - anamnese\n8 - leito\n9 - finalizar.\n\nQuais sao as tabelas de dados que deseja utilizar?'))
if tabela == 1:
tabela_funcionarios()
modularTable()
elif tabela == 2:
tabela_veiculos()
modularTable()
elif tabela == 3:
tabela_agendamentos()
modularTable()
elif tabela == 4:
tabela_equipamento()
modularTable()
elif tabela == 5:
tabela_paciente()
modularTable()
elif tabela == 6:
tabela_login()
cadastro_login('admin', '123', 'gerente')
modularTable()
elif tabela == 7:
tabela_anamnese()
modularTable()
elif tabela == 8:
tabela_leito()
modularTable()
elif tabela == 9:
firstAccess()
else:
print('Opcao invalida')
modularTable()
def firstAccess():
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
if len(cursor.fetchall()):
fazerlogin()
else:
modularTable()
def fazerlogin():
clear()
print('\nBem vindo ao sistema de gerencimento Meditech:\n')
login = input('Digite o seu login:\n')
senha = input('Digite sua senha:\n')
cursor.execute('SELECT * FROM login WHERE nome_usuario = ? and senha = ?', (login, senha))
if len(cursor.fetchall()) >= 1:
cursor.execute('SELECT * FROM login WHERE nome_usuario = ? and senha = ?', (login, senha))
for linha in cursor.fetchall():
global usuario_logado
usuario_logado = linha[1]
area = linha[3]
if area == 'medico':
menu_medico()
if area == 'engenheiro biomedico':
menu_engbio()
if area == 'atendente':
menu_atendente()
if area == 'gerente':
menu_manager()
else:
input('\nLogin ou senha incorretos, pressione qualquer tecla')
fazerlogin()
def tabela_funcionarios():#marianne
cursor.execute('CREATE TABLE funcionarios(nome TEXT NOT NULL, profissao TEXT NOT NULL, matricula VARCHAR(25) NOT NULL, id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT);')
def tabela_veiculos():#marianne
cursor.execute('CREATE TABLE veiculos(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, placa VARCHAR(8) NOT NULL, status TEXT NOT NULL, motorista TEXT NOT NULL, paramedico TEXT NOT NULL, paciente TEXT NOT NULL);')
def tabela_agendamentos():#marianne
cursor.execute('CREATE TABLE agendamentos(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, id_paciente INTEGER NOT NULL, id_medico INTEGER NOT NULL, data VARCHAR(10) NOT NULL, horario VARCHAR(5));')
def tabela_equipamento():# Luiz Eduardo
cursor.execute('CREATE TABLE equipamento(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, funcao TEXT NOT NULL, preco INTEGER, status TEXT NOT NULL, data DATE NOT NULL);')
def tabela_paciente():# Luiz Eduardo
cursor.execute('CREATE TABLE paciente(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, idade INTEGER, sexo TEXT NOT NULL, peso INTEGER);')
def tabela_leito(): #luiz henrique
cursor.execute('CREATE TABLE dadosleito( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome TEXT NOT NULL, num_leito INTEGER NOT NULL)')
def tabela_login(): #luiz henrique
cursor.execute('CREATE TABLE login( id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, nome_usuario TEXT NOT NULL, senha VARCHAR(10) NOT NULL, area TEXT NOT NULL)')
def tabela_anamnese(): #luiz henrique
cursor.execute('CREATE TABLE anamnese (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, id_paciente INTEGER NOT NULL, o_que_sente TEXT NOT NULL, onde_doi TEXT NOT NULL, quando_comecou INTEGER NOT NULL)')
def clear():#marianne
print("\n" * 100)
# funçoes
def lista_equipamento():
verifica = cursor.execute('SELECT * FROM equipamento')
for linha in verifica.fetchall():
print(linha)
def insere_equipamento(nome, funcao, preco, status, data): # Luiz Eduardo
cursor.execute('INSERT INTO equipamento(nome,funcao,preco,status,data)VALUES(?,?,?,?,?)',
(nome, funcao, preco, status, data))
conn.commit()
def remove_equipamento(id_equipamento): # Luiz Eduardo
cursor.execute('DELETE FROM equipamento WHERE id =?', id_equipamento)
conn.commit()
def alterar_equipamento(novo_nome, nova_funcao, novo_preco, novo_status, nova_data, id_equipamento): # Luiz Eduardo
cursor.execute('UPDATE equipamento SET nome = ?, funcao= ?, preco = ?,status = ?, data = ? WHERE id = ?',
(novo_nome, nova_funcao, novo_preco, novo_status, nova_data, id_equipamento))
conn.commit()
def insere_funcionarios (nome, profissao, matricula):#marianne
cursor.execute('INSERT INTO funcionarios(nome, profissao, matricula) VALUES (?,?,?)', (nome, profissao, matricula))
conn.commit()
def remove_funcionarios (id_funcionario):#marianne
cursor.execute("DELETE FROM funcionarios WHERE id = ?", (id_funcionario))
conn.commit()
def altera_funcionarios(alteracao_campo, alteracao, id_funcionario):#marianne
if alteracao_campo == 'nome':
cursor.execute("UPDATE funcionarios SET nome = ? WHERE id = ?", (alteracao, id_funcionario))
if alteracao_campo == 'profissao':
cursor.execute("UPDATE funcionarios SET profissao = ? WHERE id = ?", (alteracao, id_funcionario))
if alteracao_campo == 'matricula':
cursor.execute("UPDATE funcionarios SET matricula = ? WHERE id = ?", (alteracao, id_funcionario))
conn.commit()
def insere_veiculos(placa, status, motorista, paramedico, paciente):#marianne
cursor.execute("INSERT INTO veiculos(placa, status, motorista, paramedico, paciente) VALUES (?,?,?,?,?)", (placa, status, motorista, paramedico, paciente))
conn.commit()
def remove_veiculos(id_veiculo):#marianne
cursor.execute("DELETE FROM veiculos WHERE id = ?", (id_veiculo))
conn.commit()
def altera_veiculos(alteracao_campo, alteracao, id_veiculo):#marianne
if alteracao_campo == 'placa':
cursor.execute("UPDATE veiculos SET placa = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'status':
cursor.execute("UPDATE veiculos SET status = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'motorista':
cursor.execute("UPDATE veiculos SET motorista = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'paramedico':
cursor.execute("UPDATE veiculos SET paramedico = ? WHERE id = ?", (alteracao, id_veiculo))
if alteracao_campo == 'paciente':
cursor.execute("UPDATE veiculos SET paciente = ? WHERE id = ?", (alteracao, id_veiculo))
conn.commit()
def insere_agendamentos(id_paciente, id_medico, data, horario):#marianne
cursor.execute("INSERT INTO agendamentos(id_paciente, id_medico, data, horario) VALUES (?,?,?,?)", (id_paciente, id_medico, data, horario))
conn.commit()
def remove_agendamentos(id_paciente):#marianne
cursor.execute("DELETE FROM agendamentos WHERE id = ?", (id_paciente))
conn.commit()
def altera_agendamentos(alteracao_campo, alteracao, id_agendamentos):#marianne
if alteracao_campo == 'id_paciente':
cursor.execute("UPDATE agendamentos SET id_paciente = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'id_medico':
cursor.execute("UPDATE agendamentos SET id_medico = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'data':
cursor.execute("UPDATE agendamentos SET data = ? WHERE id = ?", (alteracao, id_agendamentos))
if alteracao_campo == 'horario':
cursor.execute("UPDATE agendamentos SET horario = ? WHERE id = ?", (alteracao, id_agendamentos))
conn.commit()
def cadastro_login(nome_usuario, senha, area):
cursor.execute('INSERT INTO login(nome_usuario, senha, area) VALUES (?, ?, ?)', (nome_usuario, senha, area))
conn.commit()
def remove_login(id_usuario):
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
cursor.execute('DELETE FROM login WHERE id = ?', (id_usuario))
conn.commit()
def altera_login():
login = input('Digite seu login:\n')
novo_login = input('Digite o novo login:\n')
cursor.execute('UPDATE login SET nome_usuario = ? WHERE nome_usuario = ?', (novo_login, login))
senha = input('Digite a senha:')
nova_senha = input('Digite a nova senha:\n')
cursor.execute('UPDATE login SET senha = ? WHERE senha = ?', (nova_senha, senha))
conn.commit()
def insere_paciente(nome, idade, sexo, peso):# Luiz Eduardo
cursor.execute('INSERT INTO paciente(nome,idade,sexo,peso)VALUES(?,?,?,?)',(nome,idade,sexo,peso))
conn.commit()
def remove_paciente(id_paciente):# Luiz Eduardo
cursor.execute('DELETE FROM paciente WHERE id=?', id_paciente)
conn.commit()
def cadastra_leito(nome, numero): #luiz h
cursor.execute('INSERT INTO dadosleito(nome, num_leito) VALUES (?, ?)', (nome, numero))
conn.commit()
def remove_leito(id_paciente): #luiz h
cursor.execute("DELETE FROM dadosleito WHERE id = ?", (id_paciente))
conn.commit()
def insere_anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou): #luiz h
cursor.execute('INSERT INTO anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou) VALUES (?,?,?,?)',
(id_paciente, onde_doi, o_que_sente, quando_comecou))
conn.commit()
#menus
def menu_atendente():#marianne
clear()
print('\nBem vindo '+usuario_logado+'!\n1- Agendar consulta.\n2- Cancelar agendamento.\n3- Alterar agendamento.\n4- Ver agendamentos.\n5- Cadastrar paciente. \n6-Sair.')
opcao = int(input('Digite a opcao desejada: '))
if opcao == 1:
clear()
id_paciente = input("Digite o ID do paciente: ")
id_medico = input("Digite o ID do medico: ")
data = input("Digite a data da consulta: ")
horario = input("Digite o horario da consulta: ")
insere_agendamentos(id_paciente, id_medico, data, horario)
menu_atendente()
if opcao == 2:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
print(linha)
id_paciente_r = input("Digite o ID do paciente que deseja remover: ")
remove_agendamentos(id_paciente_r)
menu_atendente()
if opcao == 3:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
print(linha)
id_agendamentos = input('\nID agendamento: ')
alteracao_campo = input('Digite o campo de alteracao (id_paciente, id_medico, data, horario): ')
alteracao = input('Digite a alteracao: ')
altera_agendamentos(alteracao_campo, alteracao, id_agendamentos)
menu_atendente()
if opcao == 4:
clear()
print('Consultas cadastradas (ID, ID paciente, ID medico, data, horario): ')
mostrar = cursor.execute('SELECT * FROM agendamentos')
for linha in mostrar.fetchall():
paciente_id = linha[1]
medico_id = linha[2]
paciente_nome = ''
medico_nome = ''
medico_profissao = ''
novo_mostrar = cursor.execute('SELECT * FROM paciente WHERE id = ? OR id = ?', (paciente_id, paciente_id))
for nova_linha in novo_mostrar.fetchall():
paciente_nome = nova_linha[1]
novo_mostrar = cursor.execute('SELECT * FROM funcionarios WHERE id = ? OR id = ?', (medico_id, medico_id))
for nova_linha in novo_mostrar.fetchall():
medico_nome = nova_linha[0]
medico_profissao = nova_linha[1]
print(linha[3], 'as', linha[4], paciente_nome, 'tem um consulta agendada com', medico_nome, '(', medico_profissao, ')')
input('Pressione qualquer tecla para continuar')
menu_atendente()
if opcao == 5:
clear()
nome = input('Digite o nome do paciente: ')
idade = input('Digite a idade do paciente: ')
sexo = input('Digite o sexo do paciente: ')
peso = input('Digite o peso do paciente: ')
insere_paciente(nome, idade, sexo, peso)
menu_atendente()
if opcao == 6:
fazerlogin()
else:
clear(), print("Invalido, entre com outro valor\n"), menu_atendente()
def menu_manager():
clear()
print('\nBem vindo '+usuario_logado+'!\n1- Cadastrar funcionario.\n2- Remover funcionario.\n3- Alterar funcionario.\n4- Ver funcionarios cadastrados.\n5- Cadastrar veiculo.\n6- Remover veiculo.\n7- Alterar veiculo.\n8- Ver veiculos cadastrados.\n9- Cadastrar login.\n10- Remover login.\n11- Alterar login\n12- Listar logins\n13- Sair!')
opc = int(input('Digite a opcao desejada: '))
if opc == 1:
nome = input('Digite o nome do funcionario: ')
profissao = input('Digite a profissao do funcionario: ')
matricula = input('Digite a matricula do funcionario: ')
insere_funcionarios(nome, profissao, matricula)
voltar_manager()
return 0
if opc == 2:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
id_funcionario_r = input("\nDigite o ID do funcionario que deseja remover: ")
remove_funcionarios(id_funcionario_r)
voltar_manager()
return 0
if opc == 3:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
id_funcionario = int(input('\nID do funcionario que deseja alterar: '))
alteracao_campo = input('Digite o campo de alteracao (nome, profissao, matricula): ')
alteracao = input('Digite a alteracao: ')
altera_funcionarios(alteracao_campo, alteracao, id_funcionario)
voltar_manager()
return 0
if opc == 4:
print('Funcionarios cadastrados (nome, profissao, matricula, ID): ')
mostrar = cursor.execute('SELECT * FROM funcionarios')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 5:
placa = input('Digite a placa do veiculo: ')
status = input('Digite o status do veiculo: ')
motorista = input('Digite o motorista do veiculo: ')
paramedico = input('Digite o paramedico que esta no veiculo: ')
paciente = input('Digite o paciente que será atendido: ')
insere_veiculos(placa, status, motorista, paramedico, paciente)
voltar_manager()
return 0
if opc == 6:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
id_veiculo_r = input("\nDigite o ID do veiculo que deseja remover: ")
remove_veiculos(id_veiculo_r)
voltar_manager()
return 0
if opc == 7:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
id_veiculo = input('\nID do veiculo que deseja alterar: ')
alteracao_campo = input('Digite o campo de alteracao (placa, status, motorista, paramedico, paciente): ')
alteracao = input('Digite a alteracao: ')
altera_veiculos(alteracao_campo, alteracao, id_veiculo)
voltar_manager()
return 0
if opc == 8:
print('Veiculos cadastrados (ID, placa, status, motorista, paramedico, paciente): ')
mostrar = cursor.execute('SELECT * FROM veiculos')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 9:
nome_usuario = input('\nDigite o login a ser cadastrado:')
senha = input('\nDigite sua senha:')
area = input('\nDigite sua profissao:')
cadastro_login(nome_usuario, senha, area)
voltar_manager()
return 0
if opc == 10:
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
id_usuario = input('digite o id do usuario a ser removido: ou "cancelar" para voltar\n')
if(id_usuario != "cancelar"):
remove_login(id_usuario)
voltar_manager()
return 0
if opc == 11:
altera_login()
voltar_manager()
return 0
if opc == 12:
mostrar = cursor.execute('SELECT * FROM login')
for linha in mostrar.fetchall():
print(linha)
voltar_manager()
return 0
if opc == 13:
fazerlogin()
else:
clear(), print("Invalido, entre com outro valor\n"), menu_manager()
def voltar_manager():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_manager()
else:
return 0
def menu_medico():
clear()
opcao = int(input('\nBem vindo '+usuario_logado+'\nDigite\n1-Para fazer anamnese\n2-Para cadastrar ou remover um leito \n3-Para mudar senha ou login\n4-Para sair\n'))
if opcao == 1:
mostrar = cursor.execute('SELECT * FROM paciente')
for linha in mostrar.fetchall():
print(linha)
id_paciente = input('\nDigite o ID do paciente:\n')
onde_doi = input('\nDigite o local da dor:\n')
o_que_sente = input('\nDigite o que o paciente sente:\n')
quando_comecou = input('\nDigite a data de quando começou:\n')
insere_anamnese(id_paciente, onde_doi, o_que_sente, quando_comecou)
voltar_medico()
return 0
if opcao == 2:
op = int(input('\n1-Cadastrar\n2-Remover\n:'))
if op == 1:
nome = input('Digite o nome do paciente:')
numero = input('Digite o numero do leito:')
cadastra_leito(nome, numero)
voltar_medico()
return 0
if op == 2:
mostrar = cursor.execute('SELECT * FROM dadosleito')
for linha in mostrar.fetchall():
print(linha)
id_paciente = input('id do leito a ser removido:')
remove_leito(id_paciente)
voltar_medico()
return 0
if opcao == 3:
altera_login()
voltar_medico()
return 0
if opcao == 4:
fazerlogin()
else:
print('Numero invalido, digite novamente!\n')
def voltar_medico():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_medico()
else:
return 0
def menu_engbio():# Luiz Eduardo
clear()
print("Bem vindo "+usuario_logado+"!\n\n1-Calibragem de equipamentos.\n2-Cadastrar/Remover equipamento.\n3-Listar/Alterar equipamentos.\n4-Sair!")
opcao = int(input('Digite o numero da opcao desejada=>'))
if opcao == 1:
print("\nQual equipamento deseja calibrar ?")
voltar_engbio()
return 0
if opcao == 2:
print("1-Cadastrar\n2-Remover")
cr = int(input('Digite o numero da opcao desejada=>'))
if cr == 1:
nome = input('Nome:')
funcao = input('Funcao:')
preco = input('Preço:')
status = input('Status:')
data = input('Data de insersao:')
insere_equipamento(nome, funcao, preco, status, data)
print('Cadastrado com sucesso !')
voltar_engbio()
return 0
if cr == 2:
lista_equipamento()
id_equipamento = input('id=')
remove_equipamento(id_equipamento)
print("\nEquipamento removido com sucesso!")
voltar_engbio()
return 0
else:
voltar_engbio()
return 0
if opcao == 3:
lista_equipamento()
resp = input('\nDeseja alterar(sim ou nao)?')
if resp == 'sim':
id_equipamento = input('Id do equipamento:')
novo_nome = input('Digite o nome:')
nova_funcao = input('Digite a funcao:')
novo_preco = input('Digite o preco:')
novo_status = input('Digite o novo status do equipamento:')
nova_data = input('Digite a data atual:')
alterar_equipamento(id_equipamento, novo_nome, nova_funcao, novo_preco, novo_status, nova_data)
print('Alterado com sucesso !')
voltar_engbio()
return 0
else:
voltar_engbio()
return 0
if opcao == 4:
fazerlogin()
else:
clear()
menu_engbio()
def voltar_engbio():# Luiz Eduardo
volta = input('\nDeseja voltar(sim ou nao)?:')
if volta == 'sim':
clear()
menu_engbio()
else:
return 0
firstAccess()
| victorhnogueira/esof_sistema_gerencimento_hospitalar | setup.py | setup.py | py | 21,575 | python | pt | code | 1 | github-code | 36 |
26361613449 | from bme590_assignment02.ECG_Class import ECG_Class
from flask import Flask, jsonify, request
import numpy as np
app = Flask(__name__)
count_requests = 0 # Global variable
@app.route('/heart_rate/summary', methods=['POST'])
def get_data_for_summary():
"""
Summary endpoint: Accepts user data and returns instantaneous heart rate and brady tachy annotations
:return: resp: (json) instantaneous heart rate and brady tachy annotations
"""
global count_requests
count_requests += 1
req = request.json # Retrieve external data
data = check_and_parse_summary(req) # Validate the data and map to internal format
out = calc_summary(data) # Process the data
resp = jsonify(out) # Map internal data to external format
return resp # Respond to client
def check_and_parse_summary(dictionary):
"""
This validates the user input data and turns it into a tuple (Map external-->internal)
:param: dictionary: (dict) User data (time and voltage)
:return: dat: (tuple) User data (time and voltage)
"""
# Check that time and voltage data were provided
if 'time' in dictionary.keys():
d1 = dictionary['time']
else:
try:
d1 = dictionary['t']
except ValueError:
try:
d1 = dictionary['T']
except ValueError:
try:
d1 = dictionary['Time']
except ValueError:
return send_error('Dictionary does not contain valid ''time'' data', 400)
if 'voltage' in dictionary.keys():
d2 = dictionary['voltage']
else:
try:
d2 = dictionary['v']
except ValueError:
try:
d2 = dictionary['V']
except ValueError:
try:
d2 = dictionary['Voltage']
except ValueError:
return send_error('Dictionary does not contain valid ''voltage'' data', 400)
dat = (np.array([d1]), np.array([d2]))
# Check that time and voltage data have same number of elements
if len(dat[0]<27):
return send_error('The data needs to have at least 27 points to be properly filtered',400)
if len(dat[0]) != len(dat[1]):
return send_error('Time and voltage arrays must have same number of elements', 400)
# Check that data isn't entirely negative
if np.all(np.where(dat[1] < 0, 1, 0)):
return send_error('Data is entirely negative', 400)
return dat
def calc_summary(dat):
"""
This calculates the average heart rate and brady tachy annotations
:param: dat: (tuple) User data (time and voltage)
:return: output: (dict) Contains time, instantaneous HR, and brady tachy cardia annotations
"""
#try:
ecg_object = ECG_Class(dat)
#except: # this should be made much more specific
# return send_error('stop giving me bad data dummy', 400)
hr = ecg_object.instHR
ta = ecg_object.tachy('inst')
ba = ecg_object.brady('inst')
output = {'time': dat[0],
'instantaneous_heart_rate': hr.tolist(),
'tachycardia_annotations': ta,
'bradycardia_annotations': ba
}
return output
@app.route('/heart_rate/average', methods=['POST'])
def get_data_for_average():
"""
Average endpoint: Accepts user data and returns average heart rate and brady tachy annotations
:return: resp: (json) average heart rate and brady tachy annotations
"""
global count_requests
count_requests += 1
req = request.json # Retrieve external data
dat, ap = check_and_parse_average(req) # Validate the data and map to internal format
out = calc_average_summary(dat, ap) # Process the data
resp = jsonify(out) # Map internal data to external format
return resp # Respond to client
def check_and_parse_average(dictionary):
"""
This validates the user input data and turns it into a tuple (Map external-->internal)
:return: dictionary: (dict) User data (time and voltage)
"""
# Check that time, voltage, and averaging period data were provided
if 'time' in dictionary.keys():
d1 = dictionary['time']
else:
try:
d1 = dictionary['t']
except ValueError:
try:
d1 = dictionary['T']
except ValueError:
try:
d1 = dictionary['Time']
except ValueError:
return send_error('Dictionary does not contain valid ''time'' data', 400)
if 'voltage' in dictionary.keys():
d2 = dictionary['voltage']
else:
try:
d2 = dictionary['v']
except ValueError:
try:
d2 = dictionary['V']
except ValueError:
try:
d2 = dictionary['Voltage']
except ValueError:
return send_error('Dictionary does not contain valid ''voltage'' data', 400)
if 'averaging_period' in dictionary.keys():
ap = dictionary['averaging_period']
else:
return send_error('Dictionary does not contain valid ''averaging_period'' data', 400)
dat = (np.array(d1), np.array(d2))
# Check that time and voltage data have same number of elements
if len(dat[0]) != len(dat[1]):
return send_error('Time and voltage arrays must have same number of elements', 400)
# Check that there is enough data for averaging during the specified averaging period
if dat[0][-1] < ap:
return send_error('Not enough data for averaging', 400)
# Check that data isn't entirely negative
if np.all(np.where(dat[1] < 0, 1, 0)):
return send_error('Data is entirely negative', 400)
return dat
def calc_average_summary(dat, avg_secs):
"""
:param dat: (tuple) User data (time and voltage)
:param avg_secs: (int) Number of seconds to average over (bin size)
:return: output: (json) Contains the time interval, averaging period,
average heart rate, and brady and tachy diagnoses
"""
ecg_object = ECG_Class(dat, avg_secs)
ahr = ecg_object.avg()
ta = ecg_object.tachy('avg')
ba = ecg_object.brady('avg')
output = {'time_interval': dat[0],
'averaging_period': avg_secs,
'average_heart_rate': ahr,
'tachycardia_annotations': ta,
'bradycardia_annotations': ba
}
return output
@app.route('/heart_rate/requests', methods=['GET'])
def requests():
"""
Returns the number of requests made to the server since its last reboot
:return: resp: (int) The number of requests
"""
global count_requests
count_requests += 1
resp = jsonify(count_requests)
return resp
def send_error(message, code): # Suyash error function
err = {
"error": message,
}
return jsonify(err), code
| juliaross20/cloud_ecg | api_codes.py | api_codes.py | py | 6,928 | python | en | code | 0 | github-code | 36 |
4253568754 | def mergeLinkedLists(headOne, headTwo):
head, tail = None, None
while headOne or headTwo:
curr = None
if headOne and not headTwo:
tail.next = headOne
break
if not headOne and headTwo:
tail.next = headTwo
break
if headOne.value < headTwo.value:
curr = headOne
headOne = headOne.next
else:
curr = headTwo
headTwo = headTwo.next
if not head:
head = curr
tail = curr
else:
tail.next = curr
tail = tail.next
return head
| blhwong/algos_py | algo_exp/merge_linked_list/main.py | main.py | py | 630 | python | en | code | 0 | github-code | 36 |
37635045680 | # There are n cars on an infinitely long road. The cars are numbered from 0 to n - 1 from left to right and each car is present at a unique point.
# You are given a 0-indexed string directions of length n. directions[i] can be either 'L', 'R', or 'S' denoting whether the ith car is moving towards the left, towards the right, or staying at its current point respectively. Each moving car has the same speed.
# The number of collisions can be calculated as follows:
# When two cars moving in opposite directions collide with each other, the number of collisions increases by 2.
# When a moving car collides with a stationary car, the number of collisions increases by 1.
# After a collision, the cars involved can no longer move and will stay at the point where they collided. Other than that, cars cannot change their state or direction of motion.
# Return the total number of collisions that will happen on the road.
# Example 1:
# Input: directions = "RLRSLL"
# Output: 5
# Explanation:
# The collisions that will happen on the road are:
# - Cars 0 and 1 will collide with each other. Since they are moving in opposite directions, the number of collisions becomes 0 + 2 = 2.
# - Cars 2 and 3 will collide with each other. Since car 3 is stationary, the number of collisions becomes 2 + 1 = 3.
# - Cars 3 and 4 will collide with each other. Since car 3 is stationary, the number of collisions becomes 3 + 1 = 4.
# - Cars 4 and 5 will collide with each other. After car 4 collides with car 3, it will stay at the point of collision and get hit by car 5. The number of collisions becomes 4 + 1 = 5.
# Thus, the total number of collisions that will happen on the road is 5.
# Example 2:
# Input: directions = "LLRR"
# Output: 0
# Explanation:
# No cars will collide with each other. Thus, the total number of collisions that will happen on the r
class Solution:
def countCollisions(self, directions: str) -> int:
ans = 0
stack = []
for c in directions:
if not stack:
stack.append(c)
else:
if c == 'L':
if stack[-1] == 'R':
ans += 2
stack.pop()
while stack and stack[-1] == 'R':
stack.pop()
ans += 1
stack.append('S')
elif stack[-1] == 'S':
ans += 1
else:
stack.append(c)
elif c=='R':
stack.append(c)
else:
while stack and stack[-1]=='R':
stack.pop()
ans += 1
stack.append(c)
return ans | sunnyyeti/Leetcode-solutions | 2211 Cout Collisions on a Road.py | 2211 Cout Collisions on a Road.py | py | 2,787 | python | en | code | 0 | github-code | 36 |
7615554968 | # -*- coding: utf-8 -*-
import codecs
import sys
import re
import h5py
import numpy as np
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.embedding_ops import embedding
from tflearn.layers.recurrent import bidirectional_rnn, BasicLSTMCell, GRUCell
from tflearn.layers.recurrent import lstm
from tflearn.layers.estimator import regression
from tflearn.optimizers import *
from multiprocessing import cpu_count, freeze_support
from multiprocessing.pool import Pool
from make_data import make_data, make_data_divided, norm_many
from util import read_text_lines, refine_line
def bi_LSTM():
# Network building
net = input_data(shape=[None, 440])
net = embedding(net, input_dim=20000, output_dim=128)
net = dropout(net, 0.9)
net = bidirectional_rnn(net,
BasicLSTMCell(128, forget_bias=1.),
BasicLSTMCell(128, forget_bias=1.))
net = dropout(net, 0.7)
net = fully_connected(net, 2, activation='softmax')
net = regression(net,
optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
return net
def train(trainX, trainY, model_file):
print('# Data preprocessing')
trainX = pad_sequences(trainX, maxlen=440, value=0.)
trainY = to_categorical(trainY, nb_classes=2)
print('build network')
net = bi_LSTM()
print('# Training')
'''
tensorboard_verbose:
0: Loss, Accuracy (Best Speed)
1: Loss, Accuracy + Gradients
2: Loss, Accuracy, Gradients, Weights
3: Loss, Accuracy, Gradients, Weights, Activations, Sparsity (Best Visualization)
'''
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0,
checkpoint_path='./chkpoint_mdm001/',
best_checkpoint_path='./best_chkpoint_mdm001/',
best_val_accuracy=0.9)
print('tfl.DNN end.')
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128,
n_epoch=4, run_id='bilstm_170519b')
print('model.fit end.')
# Save model
model.save(model_file)
print('model save end.')
class Trainer():
def __init__(self):
print('train_diviced')
print('# Network building')
self.net = bi_LSTM()
self.model = tflearn.DNN(self.net, clip_gradients=0., tensorboard_verbose=0,
checkpoint_path='./chkpoint_mdm001/',
best_checkpoint_path='./best_chkpoint_mdm001/',
best_val_accuracy=0.9)
print('tfl.DNN end.')
self.i = 0
def train(self, trainX, trainY):
print('# Data preprocessing')
trainX = pad_sequences(trainX, maxlen=440, value=0.)
trainY = to_categorical(trainY, nb_classes=2)
print('data preproc end.')
self.model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128,
n_epoch=1, run_id='bilstm_170524mdm001')
print('model.fit #{} end'.format(self.i))
self.i += 1
def save(self, model_file):
self.model.save(model_file)
print('model save end.')
def interference(testX, testY, model_file):
print('interference')
print('# Data preprocessing')
testX = pad_sequences(testX, maxlen=440, value=0.)
testY = to_categorical(testY, nb_classes=2)
print('# Network building')
net = bi_LSTM()
print('# Load model')
model = tflearn.DNN(net)
model.load(model_file)
if not model:
print('model not loaded')
sys.exit(1)
else:
print('model load.')
print('# Predict')
pred = model.predict(testX)
new_y = np.argmax(pred, axis=1)
result = new_y.astype(np.uint8)
print('predict end.')
result = str(result)
print('pred to str.')
with codecs.open('test_result.txt', 'w', encoding='utf-8') as wfh:
wfh.write(result)
print('end.')
class Tagger():
def __init__(self, model_file):
print('interference_divided')
print('# Network building')
self.net = bi_LSTM()
print('# Load model')
self.model = tflearn.DNN(self.net)
self.model.load(model_file)
if not self.model:
print('model not loaded')
sys.exit(1)
else:
print('model load.')
def interference(self, testX):
print('# Data preprocessing')
testX = pad_sequences(testX, maxlen=440, value=0.)
print('# Predict')
pred = self.model.predict(testX)
new_y = np.argmax(pred, axis=1)
result = (int(y) for y in new_y.astype(np.uint8))
return result
def run_train(train_file):
print('train')
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, train_file)
print('make train data end.')
X = norm_many(pool, X)
print('norm_data end.')
train(X, Y, 'model_MDM001.tfl')
def run_train_divided(train_file):
print('train')
pool = Pool(processes=cpu_count())
trainer = Trainer()
epoch = 4
for i in range(epoch):
for X, Y in make_data_divided(pool, train_file):
print('epoch: {}'.format(i))
trainer.train(X, Y)
trainer.save('model_MDM001.tfl')
def run_test():
print('test')
pool = Pool(processes=cpu_count())
X, Y = make_data(pool, 'ted_7_ErasePunc_FullKorean__test.txt')
print('make test data end.')
X = norm_many(pool, X)
print('norm_data end.')
interference(X, Y, 'model.tfl')
def run_test_divided(test_file):
print('test')
pool = Pool(processes=cpu_count())
tagger = Tagger('model.tfl')
for X, _ in make_data_divided(pool, test_file):
y = (str(r) for r in tagger.interference(X))
# y는 문장 구분 없이 한번에 다 들어오므로
# X의 각 문장의 글자수 단위로 끊는다.
# 그 다음에 y의 내용으로 원문을 복원한다.
yield ''.join(y)
def main():
if len(sys.argv) < 2:
print('usage: bi_lstm.py (train|test|make)')
sys.exit(1)
if sys.argv[1] == 'train':
train_file = 'MDM001_FullKorean__train.txt'
#run_train(train_file)
run_train_divided(train_file)
elif sys.argv[1] == 'test':
test_file = 'ted_7_ErasePunc_FullKorean__test.txt'
lines = read_text_lines(test_file)
lines = (refine_line(line) for line in lines)
lines = [re.sub(r'[\ \n\r]+', '', line).strip() for line in lines]
i = 0
with codecs.open('ted_test_result.txt', 'w', encoding='utf-8') as wfh:
for Y in run_test_divided(test_file):
# Y의 길이와 lines의 길이를 확인해가면서 합치기
# 아니면 Y가 10000줄 처리한 단위로 나오니까 10000줄씩 읽어서 대조해보기
y_pos = 0
buf = []
while True:
'''
Y가 있는 만큼만 line을 진행시켜서 해보기
'''
line = lines[i]
result = ''
line_y = Y[y_pos:y_pos+len(line)]
for ch, y in zip(line, line_y):
if y == '1':
result += ' ' + ch
else:
result += ch
buf.append(result.strip())
y_pos += len(line)
i += 1
if y_pos >= len(Y):
break
wfh.write('\n'.join(buf) + '\n')
elif sys.argv[1] == 'make':
make_file = 'MDM001_FullKorean__train.txt'
lines = read_text_lines(make_file)
lines = (refine_line(line) for line in lines)
lines = [re.sub(r'[\ \n\r]+', '', line).strip() for line in lines]
i = 0
pool = Pool(processes=cpu_count())
X = []
Y = []
for x, y in make_data_divided(pool, make_file):
x = norm_many(pool, x)
x = pad_sequences(x, maxlen=440, value=0.)
if len(X) > 0:
X = np.concatenate((X, x), axis=0)
else:
X = x
print('{}) x'.format(i), end=', ')
y = to_categorical(y, nb_classes=2)
if len(Y) > 0:
Y = np.concatenate((Y, y), axis=0)
else:
Y = y
print('y')
i += 1
# TODO: 파일 이름, 데이터셋 이름 바꾸기
#h5f = h5py.File('ted_train.h5', 'w')
#h5f.create_dataset('ted7_X', data=X)
#h5f.create_dataset('ted7_Y', data=Y)
h5f = h5py.File('ted_MDM001.h5', 'w')
h5f.create_dataset('MDM001_X', data=X)
h5f.create_dataset('MDM001_Y', data=Y)
h5f.close()
else:
print('usage: bi_lstm.py (train|test|make)')
if __name__ == '__main__':
print('hello')
print(sys.argv[1])
#input()
freeze_support()
main()
| kimwansu/autospacing_tf | bi_lstm.py | bi_lstm.py | py | 9,163 | python | en | code | 0 | github-code | 36 |
9503023051 | import os
import os.path as osp
import time
import yaml
import warnings
import torch
import torch.optim as optim
from utils import get_world_size, get_rank
from builder import build_train_dataloader, build_val_dataloader,build_model
from utils import Logger,CosineDecayLR
from torch import distributed as dist
from torch.nn.utils import clip_grad_norm_
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
class IterRunner():
def __init__(self, config):
self.config = config
self.rank = get_rank()
self.world_size = get_world_size()
self.iter = 0
# init dataloader
self.train_dataloader,self.sampler = build_train_dataloader(self.config['train']['data'])
self.val_dataloader = build_val_dataloader(self.config['val'])
# init model
feat_dim = config['model']['backbone']['net']['out_channel']
self.config['model']['head']['net']['feat_dim'] = feat_dim
self.model = build_model(config['model'])
# init project
timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime())
self.project_dir = osp.join(config['common']['save_log_dir'],timestamp)
os.makedirs(self.project_dir,exist_ok=True)
if self.rank == 0:
print('')
print('The training log and models are saved to ' + self.project_dir)
print('')
# save cfg
save_cfg_path = osp.join(self.project_dir,config['common']['save_cfg_name'])
with open(save_cfg_path, 'w') as f:
yaml.dump(config, f, sort_keys=False, default_flow_style=None)
# save log
save_log_dir = osp.join(self.project_dir, 'log')
os.makedirs(save_log_dir, exist_ok=True)
self.train_log = Logger(name='train', path="{}/{}_train.log".format(save_log_dir,time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
self.val_log = Logger(name='val', path="{}/{}_val.log".format(save_log_dir,time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime())))
#save weight
self.save_weights_dir = osp.join(self.project_dir,'weights')
os.makedirs(self.save_weights_dir, exist_ok=True)
# init common and train arguments
self.freeze_epoch = self.config['train']['freeze']['epoch']
self.norm_epoch = self.config['train']['norm']['epoch']
self.test_first = self.config['common']['test_first']
self.screen_intvl = self.config['common']['screen_intvl']
self.val_intvl = self.config['common']['val_intvl']
self.save_iters = self.config['common']['save_iters']
self.freeze_iter_step = self.config['train']['freeze']['optim']['iter_step']
self.norm_iter_step = self.config['train']['norm']['optim']['iter_step']
self.scheduler_type = None
self.tpr_1e_3 = 0
self.tpr_5e_3 = 0
self.acc = 0
# make sure the max_save_iter less than all_iter
all_iter = (self.freeze_epoch+self.norm_epoch)*len(self.train_dataloader)
if self.rank == 0:
if len(self.save_iters) == 0:
warnings.warn('`save_iters` is not set. if you want to save model in specified location,or not only end of each epoch.please check it!')
else:
if all_iter < max(self.save_iters):
raise KeyError(f'all_iter is {all_iter},but got max_save_iter {max(self.save_iters)},max_save_iter must be less than it')
if self.rank != 0:
return
def set_optimizer_scheduler(self,config,freeze=False):
for module in self.model:
if freeze:
for param in self.model['backbone']['net'].parameters():
param.requires_grad = False
else:
for param in self.model['backbone']['net'].parameters():
param.requires_grad = True
self.model[module]['optimizer'] = optim.SGD(self.model[module]['net'].parameters(),
lr=config['optim']['lr_init'],
momentum=config['optim']['momentum'],
weight_decay=config['optim']['weight_decay'])
if config['scheduler']['type'] == 'CosineDecayLR':
self.scheduler_type = 'CosineDecayLR'
self.model[module]['scheduler'] = CosineDecayLR(
self.model[module]['optimizer'],
T_max=config['epoch']*len(self.train_dataloader),
lr_init=config['optim']['lr_init'],
lr_min=config['scheduler']['lr_end'],
warmup=config['scheduler']['warm_up_epoch']*len(self.train_dataloader)
)
if config['scheduler']['type'] == 'MultiStepLR':
self.scheduler_type = 'MultiStepLR'
self.model[module]['scheduler'] = optim.lr_scheduler.MultiStepLR(
self.model[module]['optimizer'],
config['scheduler']['milestones'],
config['scheduler']['gamma'],
-1
)
def set_model(self, test_mode):
for module in self.model:
if test_mode:
self.model[module]['net'].eval()
else:
self.model[module]['net'].train()
def update_model(self,i,freeze=False):
for module in self.model:
if freeze:
if i % self.freeze_iter_step == 0:
self.model[module]['optimizer'].step()
self.model[module]['optimizer'].zero_grad()
if self.scheduler_type == 'CosineDecayLR':
self.model[module]['scheduler'].step(self.iter)
else:
self.model[module]['scheduler'].step()
else:
if i % self.norm_iter_step == 0:
self.model[module]['optimizer'].step()
self.model[module]['optimizer'].zero_grad()
if self.scheduler_type == 'CosineDecayLR':
self.model[module]['scheduler'].step(self.iter-self.freeze_epoch*len(self.train_dataloader))
else:
self.model[module]['scheduler'].step()
def save_model(self):
for module in self.model:
model_name = '{}_{}.pth'.format(str(module), str(self.iter+1))
model_path = osp.join(self.save_weights_dir, model_name)
torch.save(self.model[module]['net'].state_dict(), model_path)
@torch.no_grad()
def val(self):
# switch to test mode
self.set_model(test_mode=True)
for val_loader in self.val_dataloader:
# meta info
dataset = val_loader.dataset
# create a placeholder `feats`,
# compute _feats in different GPUs and collect
dim = self.config['model']['backbone']['net']['out_channel']
with torch.no_grad():
feats = torch.zeros(
[len(dataset), dim], dtype=torch.float32).to(self.rank)
for data, indices in val_loader:
data = data.to(self.rank)
_feats = self.model['backbone']['net'](data)
data = torch.flip(data, [3])
_feats += self.model['backbone']['net'](data)
feats[indices, :] = _feats
dist.all_reduce(feats, op=dist.ReduceOp.SUM)
results = dataset.evaluate(feats.cpu())
if self.rank == 0:
results = dict(results)
self.val_log.logger.info("Processing Val Iter:{} [{} : {}]".format(self.iter+1, dataset.name, results))
# if model have acc better in the test data,save the model
if results['TPR@FPR=1e-3'] >= self.tpr_1e_3 or results['ACC'] >= self.acc:
self.save_model()
self.tpr_1e_3 = results['TPR@FPR=1e-3']
self.acc = results['ACC']
def train(self):
if self.test_first:
self.val()
self.set_optimizer_scheduler(self.config['train']['freeze'],freeze=True)
for epoch in range(self.freeze_epoch):
Loss,Mag_mean,Mag_std,bkb_grad,head_grad = 0,0,0,0,0
if self.sampler != None:
self.sampler.set_epoch(epoch)
self.set_model(test_mode=False)
for i,(images,labels) in enumerate(self.train_dataloader):
images, labels = images.to(self.rank), labels.to(self.rank)
# forward
self.set_model(test_mode=False)
feats = self.model['backbone']['net'](images)
loss = self.model['head']['net'](feats, labels)
# backward
loss.backward()
b_norm = self.model['backbone']['clip_grad_norm']
h_norm = self.model['head']['clip_grad_norm']
if b_norm < 0. or h_norm < 0.:
raise ValueError(
'the clip_grad_norm should be positive. ({:3.4f}, {:3.4f})'.format(b_norm, h_norm))
b_grad = clip_grad_norm_(
self.model['backbone']['net'].parameters(),
max_norm=b_norm, norm_type=2)
h_grad = clip_grad_norm_(
self.model['head']['net'].parameters(),
max_norm=h_norm, norm_type=2)
# update model
self.iter = epoch*len(self.train_dataloader)+i
self.update_model(i,freeze=True)
magnitude = torch.norm(feats, 2, 1)
Loss = (Loss * i + loss.item()) / (i + 1)
Mag_mean = (Mag_mean * i + magnitude.mean().item()) / (i + 1)
Mag_std = (Mag_std * i + magnitude.std().item()) / (i + 1)
bkb_grad = (bkb_grad * i + b_grad) / (i + 1)
head_grad = (head_grad * i + h_grad) / (i + 1)
if (i + 1) % self.screen_intvl == 0 or (i + 1) == len(self.train_dataloader):
if self.rank == 0:
# logging and update meters
self.train_log.logger.info("Processing Freeze Training Epoch:[{} | {}] Batch:[{} | {}] Lr:{:.6f} Loss:{:.4f} Mag_mean:{:.4f} Mag_std:{:.4f} bkb_grad:{:.4f} head_grad:{:.4f}"
.format(epoch+1,self.freeze_epoch+self.norm_epoch,i+1,len(self.train_dataloader),self.model['backbone']['optimizer'].param_groups[0]['lr'],Loss, Mag_mean, Mag_std, bkb_grad, head_grad))
# if (i + 1) % self.val_intvl == 0 or (i + 1) == len(self.train_dataloader) or (self.iter + 1) in self.save_iters:
# self.val()
if ((self.iter + 1) in self.save_iters or (i + 1) == len(self.train_dataloader)) and self.rank == 0:
self.save_model()
self.set_optimizer_scheduler(self.config['train']['norm'], freeze=False)
for epoch in range(self.norm_epoch):
Loss,Mag_mean,Mag_std,bkb_grad,head_grad = 0,0,0,0,0
if self.sampler != None:
self.sampler.set_epoch(epoch)
self.set_model(test_mode=False)
for i,(images,labels) in enumerate(self.train_dataloader):
images, labels = images.to(self.rank), labels.to(self.rank)
# forward
self.set_model(test_mode=False)
feats = self.model['backbone']['net'](images)
loss = self.model['head']['net'](feats, labels)
# backward
loss.backward()
b_norm = self.model['backbone']['clip_grad_norm']
h_norm = self.model['head']['clip_grad_norm']
if b_norm < 0. or h_norm < 0.:
raise ValueError(
'the clip_grad_norm should be positive. ({:3.4f}, {:3.4f})'.format(b_norm, h_norm))
b_grad = clip_grad_norm_(
self.model['backbone']['net'].parameters(),
max_norm=b_norm, norm_type=2)
h_grad = clip_grad_norm_(
self.model['head']['net'].parameters(),
max_norm=h_norm, norm_type=2)
# update model
self.iter = (self.freeze_epoch+epoch)*len(self.train_dataloader)+i
self.update_model(i,freeze=False)
magnitude = torch.norm(feats, 2, 1)
Loss = (Loss * i + loss.item()) / (i + 1)
Mag_mean = (Mag_mean * i + magnitude.mean().item()) / (i + 1)
Mag_std = (Mag_std * i + magnitude.std().item()) / (i + 1)
bkb_grad = (bkb_grad * i + b_grad) / (i + 1)
head_grad = (head_grad * i + h_grad) / (i + 1)
if (i + 1) % self.screen_intvl == 0 or (i + 1) == len(self.train_dataloader):
if self.rank == 0:
# logging and update meters
self.train_log.logger.info("Processing Norm Training Epoch:[{} | {}] Batch:[{} | {}] Lr:{:.6f} Loss:{:.4f} Mag_mean:{:.4f} Mag_std:{:.4f} bkb_grad:{:.4f} head_grad:{:.4f}"
.format(epoch+self.freeze_epoch+1, self.freeze_epoch + self.norm_epoch, i+1,len(self.train_dataloader),self.model['backbone']['optimizer'].param_groups[0]['lr'],Loss, Mag_mean, Mag_std, bkb_grad, head_grad))
# do test
if (i + 1) % self.val_intvl == 0 or (i + 1) == len(self.train_dataloader) or (self.iter + 1) in self.save_iters:
self.val()
# do save
if ((self.iter + 1) in self.save_iters or (i + 1) == len(self.train_dataloader)) and self.rank == 0:
self.save_model()
| CxyZyr/face-recognition | runner.py | runner.py | py | 13,847 | python | en | code | 0 | github-code | 36 |
5791291209 | # intervalo de integracion
a = 0
b = 3
# Numero de rectangulos entre a y b
n = 5
# Tamano de rectangulo
d = (b-a)/ (n *1.0)
# Inicializamos la variable
I = 0
#DEFINIR UNA FUNCION
def f(x):
return x**2 - 2*x + 4
#range (0,n) = range(n)
# Recordatorio: La funcion range
# range(3) = [0,1,2]
# range(1,6,2) = [1,3,5]
for i in range(n):
# Punto inicial izquierdo del rectangulo
xi = a + i*d
# Punto final, derecha del rectangulo
xii = a + (1+i)*d
# La altura del rectangulo en el punto xi y xii
fi = f(xi)
fii = f(xii)
# La altura promedio del rectangulo
h = (fi + fii) / (2 * 1.0)
# Area del rectangulo
A = d*h
# Se hace la suma de Riemman
I += A
# I = I + A
print (I)
| IvonFis/Python-UAM | Ejercicios/Integral.py | Integral.py | py | 734 | python | es | code | 2 | github-code | 36 |
13395835031 | """
@author: gjorando
"""
import os
import importlib
from pypandoc import convert_file
from setuptools import setup, find_packages
def read(*tree):
"""
Read a file from the setup.py location.
"""
full_path = os.path.join(os.path.dirname(__file__), *tree)
with open(full_path, encoding='utf-8') as file:
return file.read()
def version(main_package):
"""
Read the version number from the __version__ variable in the main
package __init__ file.
"""
package = "{}.__init__".format(main_package)
init_module = importlib.import_module(package)
try:
return init_module.__version__
except AttributeError:
raise RuntimeError("No version string found in {}.".format(package))
def requirements(*tree):
"""
Read the requirements list from a requirements.txt file.
"""
requirements_file = read(*tree)
return [r for r in requirements_file.split("\n") if r != ""]
def long_description(*tree):
"""
setup.py only supports .rst files for the package description. As a
result, we need to convert README.md on the fly.
"""
tree_join = os.path.join(os.path.dirname(__file__), *tree)
rst_readme = convert_file(tree_join, 'rst')
rst_path = "{}.rst".format(os.path.splitext(tree_join)[0])
with open(rst_path, "w") as file:
file.write(rst_readme)
return rst_readme
setup(
name="neurartist",
version=version("neurartist"),
author="Guillaume Jorandon",
description="Ready-to-use artistic deep learning algorithms",
long_description=long_description("README.md"),
url="https://github.com/gjorando/style-transfer",
packages=find_packages(exclude=["tests"]),
install_requires=requirements("requirements.txt"),
entry_points={
'console_scripts': ['neurartist=neurartist.cli:main']
},
python_requires='>=3',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Artistic Software',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
)
| gjorando/style-transfer | setup.py | setup.py | py | 2,277 | python | en | code | 2 | github-code | 36 |
71696562344 | # @keras-rl
'''
Script for custom or modified noise processes
'''
from __future__ import division
import numpy as np
#makes an instance of a noise process and returns it
#defined by configuration nc
#size is the number of parameters the noise is applied to
#so far just one-dimensional vector (only action noise)
def getNoise(nc, size):
dictionary = {
'GWN' : GaussianWhiteNoiseProcess,
'OU' : OrnsteinUhlenbeckProcess,
'OUAR' : OUAnnealReset,
#'AOU' : AlternatingOU
}
assert nc['key'] in dictionary, "noise process does not exist"
if nc['key'] == 'GWN':
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
return GaussianWhiteNoiseProcess(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing, size=size)
elif nc['key'] == 'OU':
assert 'theta' in nc
theta = nc['theta']
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
dt = nc['dt'] if 'dt' in nc else 1e-2
#x0 = np.random.normal(mu,sigma,size) if 'x0' in nc else None
return OrnsteinUhlenbeckProcess(theta=theta, mu=mu, sigma=sigma, sigma_min=sigma_min,
n_steps_annealing=n_steps_annealing, dt=dt, x0=None, size=size)
elif nc['key'] == 'OUAR':
assert 'theta' in nc
theta = nc['theta']
mu = nc['mu'] if 'mu' in nc else 0.
sigma = nc['sigma'] if 'sigma' in nc else 1.
sigma_min = nc['sigma_min'] if 'sigma_min' in nc else None
n_steps_annealing = nc['n_steps_annealing'] if 'n_steps_annealing' in nc else 1000
dt = nc['dt'] if 'dt' in nc else 1e-2
return OUAnnealReset(theta=theta, mu=mu, sigma=sigma, sigma_min=sigma_min,
n_steps_annealing=n_steps_annealing, dt=dt, size=size)
#### From keras-rl: ####
#the following 4 classes
#https://github.com/keras-rl/keras-rl/blob/1e915aa1943086e3c75c6aaf51b84c6b649c2600/rl/random.py
class RandomProcess(object):
def reset_states(self):
pass
class AnnealedGaussianProcess(RandomProcess):
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
class GaussianWhiteNoiseProcess(AnnealedGaussianProcess):
def __init__(self, mu=0., sigma=1., sigma_min=None, n_steps_annealing=1000, size=1):
super(GaussianWhiteNoiseProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.size = size
def sample(self):
sample = np.random.normal(self.mu, self.current_sigma, self.size)
self.n_steps += 1
return sample
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(self, theta, mu=0., sigma=1., dt=1e-2, x0=None, size=1, sigma_min=None, n_steps_annealing=1000):
super(OrnsteinUhlenbeckProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.theta = theta
self.mu = mu
self.dt = dt
self.x0 = x0
self.size = size
self.reset_states()
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros(self.size)
#### Own Noise Processes ####
#improves the keras-rl OU implementation by making the reset dependent on the standard deviation
class OUAnnealReset(OrnsteinUhlenbeckProcess):
def __init__(self,**kwargs):
super(OUAnnealReset,self).__init__(**kwargs)
def reset_states(self):
self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size)
#### Experimentals for fun ####
#Ornstein Uhlenbeck which resets the annealing sigma to the initial value
class AlternatingOU(OUAnnealReset):
def __init__(self, n_res, n_steps_annealing, n_begin=0, **kwargs):
self.n_res = n_res
self.n_ann = n_steps_annealing
self.n_begin = n_begin #step count when to begin with noise
super(AlternatingOU, self).__init__(n_steps_annealing=n_steps_annealing, **kwargs)
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps % (self.n_ann + self.n_res)) + self.c)
return sigma
def sample(self):
if self.n_begin <= 0:
return super(AlternatingOU, self).sample()
else:
self.n_begin -= 1
return np.random.normal(self.mu, self.sigma_min, self.size)
#OU modification which sets the ongoing output of the noise process to zero
#but does not interrupt the process itself
class PausingOU(OrnsteinUhlenbeckProcess):
def __init__(self, noiseLength, noisePause, alpha, **kwargs):
self.alpha = alpha
self.noiseLength = noiseLength
self.noisePause = noisePause
self.np = noisePause + noiseLength
super(PausingOU, self).__init__(**kwargs)
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
if self.np <= self.noisePause:
x = x * self.alpha
self.np -= 1
if self.np <= 0:
self.np = self.noiseLength + self.noisePause
return x
#testing
if __name__=='__main__':
from config import noiseConfig as nc
noise = getNoise(nc[0],5)
print(noise.theta)
print(noise.sample())
noise.reset_states()
print(noise.x_prev) | Frawak/squig-rl | source/noiseProcesses.py | noiseProcesses.py | py | 6,718 | python | en | code | 1 | github-code | 36 |
20715650622 |
def solve(program):
accumulator = 0
pointer = 0
executed = set()
while pointer not in executed and pointer < len(program):
instruction, argument = program[pointer]
executed.add(pointer)
pointer += 1
if instruction == 'acc':
accumulator += argument
if instruction == 'jmp':
pointer += argument - 1
return accumulator, pointer
def solve_part1(program):
accumulator, _ = solve(program)
return accumulator
def solve_part2(program):
for i in range(len(program)):
instruction, argument = program[i]
if instruction != 'acc':
program[i] = ('jmp' if instruction == 'nop' else 'nop', argument)
accumulator, pointer = solve(program)
program[i] = (instruction, argument)
if pointer == len(program):
return accumulator
return None
with open('input', 'r') as f:
program = [line.strip().split( ) for line in f.readlines()]
program = [(line[0], int(line[1])) for line in program]
result = solve_part1(program)
print(f"The accumulator contains the value {result} (part1).")
result = solve_part2(program)
print(f"The accumulator contains the value {result} (part2).")
| jonassjoh/AdventOfCode | 2020/8/day8.py | day8.py | py | 1,252 | python | en | code | 0 | github-code | 36 |
16009855981 | #!/usr/bin/python3
import numpy as np
from matplotlib import pyplot as plt
lx = []
ly = []
with open("HailStoneNum.txt", "r") as f:
for line in f:
ls = line.split(",")
lx.append(int(ls[0]))
ly.append(int(ls[1]))
x = np.array(lx)
y = np.array(ly)
plt.plot(x,y)
plt.savefig("HailStone.jpg")
| Ukuer/rasp-pi | DSA/HailStone/HailStoneCount.py | HailStoneCount.py | py | 322 | python | en | code | 0 | github-code | 36 |
16198615974 | # Climate App
# Now that you have completed your initial analysis, design a Flask api based on the queries that you have just developed.
# - Use FLASK to create your routes.
#################################################
# Import Flask & jsonify & the kitchen sink...
#################################################
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the tables
Station = Base.classes.station
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
"<h1>HW 11 Surf Is Up!<h1/>"
"<br/>"
"<h2>Available APIs<h2/>"
"<li><a href ='/api/v1.0/precipitation'>Precipitation</a></li>"
"<li><a href ='/api/v1.0/stations'>Stations</a></li>"
"<li><a href ='/api/v1.0/tobs'>Temps observed</a></li>"
"<li><a href = '/api/v1.0/start_end'>Calculated Temps</a></li>"
)
#################################################
# /api/v1.0/precipitation
#################################################
@app.route("/api/v1.0/precipitation")
def precipitation():
yearago_date = dt.date(2016, 8 , 22)
# select(station, date, prcp) frome measurement
# where date >= yearago_date
prcp_in_last_year = session.query(Measurement.date, func.sum(Measurement.prcp)).\
filter(Measurement.date > yearago_date).group_by(Measurement.date).all()
prcp_list = [prcp_in_last_year]
return jsonify(prcp_list)
#################################################
# /api/v1.0/stations
#################################################
@app.route("/api/v1.0/stations")
def stations():
all_stations = session.query(Station.name, Station.station, Station.elevation).all()
station_list = []
for a_station in all_stations:
row = {}
row['elevation'] = a_station[2]
row['station'] = a_station[1]
row['name'] = a_station[0]
station_list.append(row)
return jsonify(station_list)
#################################################
# /api/v1.0/tobs
# - Return a json list of Temperature Observations (tobs) for the previous year
#################################################
@app.route("/api/v1.0/tobs")
def temp_obs():
yearago_date = dt.date(2016, 8 , 22)
temps = session.query(Station.name, Measurement.date, Measurement.tobs).\
filter(Measurement.date > yearago_date).all()
tobs_list = []
for temp in temps:
t = {}
t["Station"] = temp[0]
t["Date"] = temp[1]
t["Temperature"] = int(temp[2])
tobs_list.append(t)
return jsonify(tobs_list)
#################################################
#
# - /api/v1.0/<start> and /api/v1.0/<start>/<end>
# - Return a json list of the minimum temperature, the average temperature, and the max temperature
# for a given start or start-end range.
# - When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal
# to the start date.
# - When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the
# start and end date inclusive.
#
# Hints
# - You will need to join the station and measurement tables for some of the analysis queries.
# - Use Flask jsonify to convert your api data into a valid json response object.
#################################################
@app.route("/api/v1.0/start_end")
def calc_temps():
sy = 2017 # start year
sm = 7 # start month
sd = 1 # start day
ey = 2017 # end year
em = 7 # end month
ed = 11 # end day
# Convert dates to "year - 1" dates
start_date = dt.date(sy, sm, sd)
end_date = dt.date(ey, em, ed)
temp_info = session.query(Measurement.tobs).filter(Measurement.date >= start_date, Measurement.date <= end_date).all()
temperatures = [temperature[0] for temperature in temp_info]
# Get the minimum temp
temp_min = min(temperatures)
# Get the maximum temp
temp_max = max(temperatures)
# Get the average temp
temp_avg = np.mean(temperatures)
date_results = 'Start date: ' + str(start_date) + '</br>' + 'End date: ' + str(end_date) + '</br>'
minmax_results = 'Min temp: ' + str(temp_min) + '</br>' + 'Avg temp: ' + str(temp_avg) +'</br>' + 'Max temp: ' + str(temp_max)
temp_results = date_results + minmax_results
return(temp_results)
#################################################
# Define Main behavior
#################################################
if __name__ == '__main__':
app.run(debug=True)
| JREwan/python-challenge | Homework11_SurfsUp/app.py | app.py | py | 5,386 | python | en | code | 0 | github-code | 36 |
13289609625 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 00:48:47 2021
@author: baris
"""
import pandas as pd
import math
import numpy as np
import xlsxwriter
xlxs_file = pd.read_excel("example.xlsx")
# All columns have separeted into a list on their own.
parsed_store = xlxs_file["store"].tolist()
parsed_x = xlxs_file["x"].tolist()
parsed_y = xlxs_file["y"].tolist()
parsed_demand = xlxs_file["demand"].tolist()
result = np.zeros((len(parsed_x) + 5, len(parsed_y)))
for i in range(len(parsed_store)):
for j in range(len(parsed_store)):
distance = math.sqrt((parsed_x[i] - parsed_x[j])**2 + (parsed_y[i] - parsed_y[j])**2)
result[i][j] = distance*parsed_demand[i]
for i in range(len(parsed_store)):
sum = .0
for j in range(len(parsed_store)):
sum += result[j][i]
result[-5][i] = sum
result[-4] = sorted(result[-5])
result[-3] = np.argsort(result[-5]) + 1
min1, min2 = int(result[-3][0] - 1), int(result[-3][1] - 1)
sum1 = 0
sum2 = 0
sum1_m = .0
sum2_m = .0
for i in range(len(parsed_store)):
tmp1 = result[i][min1]
tmp2 = result[i][min2]
if tmp1 < tmp2:
result[-2][i] = min1 + 1
sum1 += 1
sum1_m += tmp1
else:
result[-2][i] = min2 + 1
sum2 += 1
sum2_m += tmp2
result[-1][0] = sum1
result[-1][1] = sum2
result[-1][2] = sum1_m
result[-1][3] = sum2_m
workbook = xlsxwriter.Workbook('result.xlsx')
worksheet = workbook.add_worksheet()
row = 0
column = 0
for module in result :
worksheet.write_row(row, column, module)
row += 1
workbook.close()
| barissoyer/FunProjects | X-Yl_Location based/xy_locations.py | xy_locations.py | py | 1,579 | python | en | code | 0 | github-code | 36 |
20496563572 | from typing import List
from instructor import patch
from pydantic import BaseModel, Field
import openai
patch()
class Property(BaseModel):
key: str
value: str
resolved_absolute_value: str
class Entity(BaseModel):
id: int = Field(
...,
description="Unique identifier for the entity, used for deduplication, design a scheme allows multiple entities",
)
subquote_string: List[str] = Field(
...,
description="Correctly resolved value of the entity, if the entity is a reference to another entity, this should be the id of the referenced entity, include a few more words before and after the value to allow for some context to be used in the resolution",
)
entity_title: str
properties: List[Property] = Field(
..., description="List of properties of the entity"
)
dependencies: List[int] = Field(
...,
description="List of entity ids that this entity depends or relies on to resolve it",
)
class DocumentExtraction(BaseModel):
entities: List[Entity] = Field(
...,
description="Body of the answer, each fact should be its seperate object with a body and a list of sources",
)
def ask_ai(content) -> DocumentExtraction:
resp: DocumentExtraction = openai.ChatCompletion.create(
model="gpt-4",
response_model=DocumentExtraction,
messages=[
{
"role": "system",
"content": "You are a perfect entity resolution system that extracts facts from the document. Extract and resolve a list of entities from the following document:",
},
{
"role": "user",
"content": content,
},
],
) # type: ignore
return resp
content = """
Sample Legal Contract
Agreement Contract
This Agreement is made and entered into on 2020-01-01 by and between Company A ("the Client") and Company B ("the Service Provider").
Article 1: Scope of Work
The Service Provider will deliver the software product to the Client 30 days after the agreement date.
Article 2: Payment Terms
The total payment for the service is $50,000.
An initial payment of $10,000 will be made within 7 days of the the signed date.
The final payment will be due 45 days after [SignDate].
Article 3: Confidentiality
The parties agree not to disclose any confidential information received from the other party for 3 months after the final payment date.
Article 4: Termination
The contract can be terminated with a 30-day notice, unless there are outstanding obligations that must be fulfilled after the [DeliveryDate].
"""
model = ask_ai(content)
print(model.model_dump_json(indent=2))
"""
{
"entities": [
{
"id": 1,
"subquote_string": [
"This Agreement is made and entered into on 2020-01-01 by and between Company A (\"the Client\") and Company B (\"the Service Provider\")."
],
"entity_title": "Agreement between Company A and Company B",
"properties": [
{
"key": "Date",
"value": "2020-01-01",
"resolved_absolute_value": "2020-01-01"
},
{
"key": "Party 1",
"value": "Company A",
"resolved_absolute_value": "Company A"
},
{
"key": "Party 2",
"value": "Company B",
"resolved_absolute_value": "Company B"
}
],
"dependencies": []
},
{
"id": 2,
"subquote_string": [
"The Service Provider will deliver the software product to the Client 30 days after the agreement date."
],
"entity_title": "Scope of Work",
"properties": [
{
"key": "Delivery Date",
"value": "30 days after the agreement date",
"resolved_absolute_value": "2020-01-31"
}
],
"dependencies": [
1
]
},
{
"id": 3,
"subquote_string": [
"The total payment for the service is $50,000.",
"An initial payment of $10,000 will be made within 7 days of the the signed date.",
"The final payment will be due 45 days after [SignDate]."
],
"entity_title": "Payment Terms",
"properties": [
{
"key": "Total Payment",
"value": "$50,000",
"resolved_absolute_value": "50000"
},
{
"key": "Initial Payment",
"value": "$10,000",
"resolved_absolute_value": "10000"
},
{
"key": "Final Payment Due Date",
"value": "45 days after [SignDate]",
"resolved_absolute_value": "2020-02-15"
}
],
"dependencies": [
1
]
},
{
"id": 4,
"subquote_string": [
"The parties agree not to disclose any confidential information received from the other party for 3 months after the final payment date."
],
"entity_title": "Confidentiality Terms",
"properties": [
{
"key": "Confidentiality Duration",
"value": "3 months after the final payment date",
"resolved_absolute_value": "2020-05-15"
}
],
"dependencies": [
3
]
},
{
"id": 5,
"subquote_string": [
"The contract can be terminated with a 30-day notice, unless there are outstanding obligations that must be fulfilled after the [DeliveryDate]."
],
"entity_title": "Termination",
"properties": [
{
"key": "Termination Notice",
"value": "30-day",
"resolved_absolute_value": "30 days"
}
],
"dependencies": [
2
]
}
]
}
"""
| realsrisri/jxnl-instructor | examples/reference-citation/run.py | run.py | py | 5,705 | python | en | code | null | github-code | 36 |
13460588720 | class Dice():
def __init__(self, x, y):
self.side = int(random(1, 7))
self.x = x
self.y = y
self.status = 'stopped' # rolling
self.last_rolled = millis()
self.keep = False
def roll(self):
if self.status != 'rolling' and not self.keep:
self.status = 'rolling'
self.last_rolled = millis()
def translate(self, dst_x, dst_y):
dx = int((dst_x - self.x) / 4)
dy = int((dst_y - self.y) / 4)
if dx == 0:
self.x = dst_x
else:
self.x += dx
if dy == 0:
self.y = dst_y
else:
self.y += dy
def display(self):
if self.status == 'rolling':
if millis() - self.last_rolled > random(1000, 3000):
self.status = 'stopped'
self.side = int(random(1, 7))
self.x += int(random(-10, 10))
self.y += int(random(-10, 10))
self.x = max(50, self.x)
self.y = max(50, self.y)
if self.keep:
strokeWeight(5)
stroke(204, 102, 0)
else:
noStroke()
for i in range(1, 10, 2):
fill(0, 100 - 5 * (i + 1))
rectMode(CENTER)
rect(self.x + i, self.y + i, 100, 100)
fill('#ffffff')
rectMode(CENTER)
rect(self.x, self.y, 100, 100)
noStroke()
fill('#333333')
if self.side == 1 or self.side == 3 or self.side == 5:
ellipse(self.x, self.y, 20, 20)
if self.side == 4 or self.side == 5 or self.side == 6:
ellipse(self.x - 30, self.y - 30, 20, 20)
if self.side == 6:
ellipse(self.x - 30, self.y, 20, 20)
if self.side == 2 or self.side == 3 or self.side == 4 or self.side == 5 or self.side == 6:
ellipse(self.x - 30, self.y + 30, 20, 20)
if self.side == 2 or self.side == 3 or self.side == 4 or self.side == 5 or self.side == 6:
ellipse(self.x + 30, self.y - 30, 20, 20)
if self.side == 6:
ellipse(self.x + 30, self.y, 20, 20)
if self.side == 4 or self.side == 5 or self.side == 6:
ellipse(self.x + 30, self.y + 30, 20, 20)
class Strategy():
def __init__(self, strategies):
self.strategies = strategies
def set_dices(self, dices):
self.dices = dices
def calculate(self):
self.sides = [dice.side for dice in self.dices]
self.unique = set(self.sides)
upper_score = 0
for i in range(1, 7):
if self.strategies['%ds' % i]['done']:
continue
score = self.sum_of_single(i)
self.strategies['%ds' % i]['score'] = score
upper_score += score
if upper_score >= 63:
self.strategies['Bonus']['score'] = 35
if not self.strategies['Choice']['done']:
self.strategies['Choice']['score'] = sum(self.sides)
if not self.strategies['3-of-a-kind']['done']:
self.strategies['3-of-a-kind']['score'] = self.of_a_kind(3)
if not self.strategies['4-of-a-kind']['done']:
self.strategies['4-of-a-kind']['score'] = self.of_a_kind(4)
if not self.strategies['Full House']['done']:
self.strategies['Full House']['score'] = self.full_house()
if not self.strategies['S. Straight']['done']:
self.strategies['S. Straight']['score'] = self.small_straight()
if not self.strategies['L. Straight']['done']:
self.strategies['L. Straight']['score'] = self.large_straight()
if not self.strategies['Yacht']['done']:
self.strategies['Yacht']['score'] = self.of_a_kind(5)
self.strategies['Total']['score'] = 0
for k, v in self.strategies.items():
if v['done']:
self.strategies['Total']['score'] += v['score']
return self.strategies
def count(self, number):
return len([side for side in self.sides if side == number])
def highest_repeated(self, min_repeats):
repeats = [x for x in self.unique if self.count(x) >= min_repeats]
return max(repeats) if repeats else 0
def of_a_kind(self, n):
hr = self.highest_repeated(n)
if hr == 0:
return 0
if n == 5:
return 50
rests = [side for side in self.sides if side != hr]
return hr * n + sum(rests)
def sum_of_single(self, number):
return sum([x for x in self.sides if x == number])
def full_house(self):
hr = self.highest_repeated(3)
if hr > 0:
rests = [side for side in self.sides if side != hr]
if len(set(rests)) == 1 and len(rests) == 2:
return 25
hr = self.highest_repeated(2)
if hr > 0:
rests = [side for side in self.sides if side != hr]
if len(set(rests)) == 1 and len(rests) == 3:
return 25
return 0
def small_straight(self):
if set([1, 2, 3, 4]).issubset(self.unique) or set([2, 3, 4, 5]).issubset(self.unique) or set([3, 4, 5, 6]).issubset(self.unique):
return 30
return 0
def large_straight(self):
if set([1, 2, 3, 4, 5]).issubset(self.unique) or set([2, 3, 4, 5, 6]).issubset(self.unique):
return 40
return 0
class GameManager():
def __init__(self):
self.set_status('normal')
self.n_rolling_dices = 0
self.n_rolls = 0
self.n_keeps = 0
self.score = 0
self.n_rounds = 1
self.highest_score = 0
def reset(self):
self.set_status('normal')
self.n_rolling_dices = 0
self.n_rolls = 0
self.n_keeps = 0
self.score = 0
for dice in dices:
dice.keep = False
if self.n_rounds == 13:
self.reset2()
else:
self.n_rounds += 1
def reset2(self):
self.n_rounds = 1
self.highest_score = strategies['Total']['score']
for k, v in strategies.items():
strategies[k]['done'] = False
def set_status(self, status):
self.status = status # (normal, rolling, sorting, keeping, calculating)
self.last_status_changed = millis()
def roll_dices(self):
if self.n_keeps < 5:
self.set_status('rolling')
self.n_rolls += 1
for dice in dices:
dice.roll()
else:
self.set_status('calculating')
def print_board(self, strategies):
textSize(32)
textAlign(LEFT)
for k, strategy in strategies.items():
if strategy['selected']:
fill(0, 255, 255)
elif strategy['done']:
fill(150)
else:
fill(240)
text('%s' % (k,), strategy['position'][0], strategy['position'][1])
text('%2d' % (strategy['score'],), strategy['position'][0] + 300, strategy['position'][1])
def print_status(self):
status_map = {
'normal': 'Roll dices',
'rolling': '',
'sorting': '',
'keeping': 'Select dices',
'calculating': 'Choose category'
}
fill(240)
textSize(32)
textAlign(CENTER)
text(status_map[self.status], 500, 100)
textAlign(RIGHT)
text('Round %d/13' % (self.n_rounds), 980, 40)
text('%d/3' % (self.n_rolls), 980, 80)
textAlign(LEFT)
text('Highest %d' % (self.highest_score), 10, 40)
# main
strategies_order = ['1s', '2s', '3s', '4s', '5s', '6s', 'Bonus', 'Choice', '3-of-a-kind', '4-of-a-kind', 'Full House', 'S. Straight', 'L. Straight', 'Yacht', 'Total']
strategies = {
'1s': 0,
'2s': 0,
'3s': 0,
'4s': 0,
'5s': 0,
'6s': 0,
'Bonus': 0,
'Choice': 0,
'3-of-a-kind': 0,
'4-of-a-kind': 0,
'Full House': 0,
'S. Straight': 0,
'L. Straight': 0,
'Yacht': 0,
'Total': 0
}
for i, strategy_name in enumerate(strategies_order):
strategies[strategy_name] = {
'position': [300, 400 + i * 40],
'score': 0,
'selected': False,
'done': False
}
gm = GameManager()
strategy = Strategy(strategies)
dices = []
init_pos = [
(200, 200),
(350, 200),
(500, 200),
(650, 200),
(800, 200)
]
for pos in init_pos:
dice = Dice(pos[0], pos[1])
dices.append(dice)
def setup():
frameRate(30)
size(1000, 1000)
def draw():
global init_pos, strategy
background('#777777')
gm.n_rolling_dices = 0
gm.n_keeps = 0
for i, dice in enumerate(dices):
dice.display()
if dice.status == 'rolling':
gm.n_rolling_dices += 1
if dice.keep:
gm.n_keeps += 1
if gm.status == 'rolling' and gm.n_rolling_dices == 0:
gm.set_status('sorting')
elif gm.status == 'sorting':
dices.sort(key=lambda x: x.side, reverse=False)
for dice, pos in zip(dices, init_pos):
dice.translate(pos[0], pos[1])
if millis() - gm.last_status_changed > 1000:
if gm.n_rolls >= 3 or gm.n_keeps >= 5:
gm.set_status('calculating')
else:
gm.set_status('keeping')
strategy.set_dices(dices)
strategy.calculate()
gm.print_board(strategies)
gm.print_status()
def mouseReleased():
# dices area
for i, pos in enumerate(init_pos):
if pos[0] - 50 < mouseX < pos[0] + 50 and pos[1] - 50 < mouseY < pos[1] + 50:
if gm.status == 'keeping':
dices[i].keep = not dices[i].keep
break
# score board area
if 300 < mouseX < 640 and strategies[strategies_order[0]]['position'][1] - 30 < mouseY < strategies[strategies_order[-1]]['position'][1]:
if gm.status == 'keeping' or gm.status == 'calculating':
for k, v in strategies.items():
if k == 'Bonus' or k == 'Total':
continue
pos = v['position']
if 300 < mouseX < 640 and pos[1] - 30 < mouseY < pos[1] + 10 and strategies[k]['done'] == False:
strategies[k]['selected'] = not strategies[k]['selected']
else:
strategies[k]['selected'] = False
# background area
elif not (init_pos[0][0] - 50 < mouseX < init_pos[-1][0] + 50 and init_pos[0][1] - 50 < mouseY < init_pos[-1][1] + 50):
selected = None
for k, v in strategies.items():
if v['selected']:
selected = k
if selected is not None:
if gm.status == 'keeping' or gm.status == 'calculating':
strategies[selected]['done'] = True
strategies[selected]['selected'] = False
selected = None
gm.reset()
else:
if gm.status == 'keeping' or gm.status == 'normal':
gm.roll_dices()
elif gm.status == 'calculating':
if selected is not None:
strategies[selected]['done'] = True
strategies[selected]['selected'] = False
selected = None
gm.reset()
| kairess/yacht-dice | yacht/yacht.pyde | yacht.pyde | pyde | 11,675 | python | en | code | 4 | github-code | 36 |
32115859766 | from __future__ import annotations
from typing import Iterable, Iterator, List, Literal, Optional, Type
import frictionless as fl
import marshmallow as mm
from dimcat import DimcatConfig, get_class
from dimcat.data.base import Data
from dimcat.data.packages.base import Package, PackageSpecs
from dimcat.data.resources.base import Resource
from dimcat.data.resources.dc import FeatureSpecs
from dimcat.dc_exceptions import (
DuplicatePackageNameError,
EmptyCatalogError,
EmptyPackageError,
NoMatchingResourceFoundError,
PackageNotFoundError,
ResourceNotFoundError,
)
from dimcat.utils import treat_basepath_argument
from frictionless import FrictionlessException
from typing_extensions import Self
class DimcatCatalog(Data):
"""Has the purpose of collecting and managing a set of :obj:`Package` objects.
Analogous to a :obj:`frictionless.Catalog`, but without intermediate :obj:`frictionless.Dataset` objects.
Nevertheless, a DimcatCatalog can be stored as and created from a Catalog descriptor (ToDo).
"""
class PickleSchema(Data.PickleSchema):
packages = mm.fields.List(
mm.fields.Nested(Package.Schema),
required=False,
allow_none=True,
metadata=dict(description="The packages in the catalog."),
)
class Schema(PickleSchema, Data.Schema):
pass
def __init__(
self,
basepath: Optional[str] = None,
packages: Optional[PackageSpecs | List[PackageSpecs]] = None,
) -> None:
"""Creates a DimcatCatalog which is essentially a list of :obj:`Package` objects.
Args:
basepath: The basepath for all packages in the catalog.
"""
self._packages: List[Package] = []
super().__init__(basepath=basepath)
if packages is not None:
self.packages = packages
def __getitem__(self, item: str) -> Package:
try:
return self.get_package(item)
except Exception as e:
raise KeyError(str(e)) from e
def __iter__(self) -> Iterator[Package]:
yield from self._packages
def __len__(self) -> int:
return len(self._packages)
@property
def basepath(self) -> Optional[str]:
"""If specified, the basepath for all packages added to the catalog."""
return self._basepath
@basepath.setter
def basepath(self, basepath: str) -> None:
new_catalog = self._basepath is None
self._set_basepath(basepath, set_packages=new_catalog)
@property
def package_names(self) -> List[str]:
return [package.package_name for package in self._packages]
@property
def packages(self) -> List[Package]:
return self._packages
@packages.setter
def packages(self, packages: PackageSpecs | List[PackageSpecs]) -> None:
if len(self._packages) > 0:
raise ValueError("Cannot set packages if packages are already present.")
if isinstance(packages, (Package, fl.Package, str)):
packages = [packages]
for package in packages:
try:
self.add_package(package)
except FrictionlessException as e:
self.logger.error(f"Adding the package {package!r} failed with\n{e!r}")
def add_package(
self,
package: PackageSpecs,
basepath: Optional[str] = None,
copy: bool = False,
):
"""Adds a :obj:`Package` to the catalog."""
if isinstance(package, fl.Package):
dc_package = Package.from_descriptor(package)
elif isinstance(package, str):
dc_package = Package.from_descriptor_path(package)
elif isinstance(package, Package):
if copy:
dc_package = package.copy()
else:
dc_package = package
else:
msg = f"{self.name}.add_package() takes a package, not {type(package)!r}."
raise TypeError(msg)
if dc_package.package_name in self.package_names:
raise DuplicatePackageNameError(dc_package.package_name)
if basepath is not None:
dc_package.basepath = basepath
self._packages.append(dc_package)
def add_resource(
self,
resource: Resource,
package_name: Optional[str] = None,
):
"""Adds a resource to the catalog. If package_name is given, adds the resource to the package with that name."""
package = self.get_package_by_name(package_name, create=True)
package.add_resource(resource=resource)
def check_feature_availability(self, feature: FeatureSpecs) -> bool:
"""Checks whether the given feature is potentially available."""
return True
def copy(self) -> Self:
new_object = self.__class__(basepath=self.basepath)
new_object.packages = self.packages
return new_object
def extend(self, catalog: Iterable[Package]) -> None:
"""Adds all packages from another catalog to this one."""
for package in catalog:
if package.package_name not in self.package_names:
self.add_package(package.copy())
continue
self_package = self.get_package_by_name(package.package_name)
self_package.extend(package)
def extend_package(self, package: Package) -> None:
"""Adds all resources from the given package to the existing one with the same name."""
catalog_package = self.get_package_by_name(package.package_name, create=True)
catalog_package.extend(package)
def get_package(self, name: Optional[str] = None) -> Package:
"""If a name is given, calls :meth:`get_package_by_name`, otherwise returns the last loaded package.
Raises:
RuntimeError if no package has been loaded.
"""
if name is not None:
return self.get_package_by_name(name=name)
if len(self._packages) == 0:
raise EmptyCatalogError
return self._packages[-1]
def get_package_by_name(self, name: str, create: bool = False) -> Package:
"""
Raises:
fl.FrictionlessException if none of the loaded packages has the given name.
"""
for package in self._packages:
if package.package_name == name:
return package
if create:
self.make_new_package(
package_name=name,
basepath=self.basepath,
)
self.logger.info(f"Automatically added new empty package {name!r}")
return self.get_package()
raise PackageNotFoundError(name)
def get_resource_by_config(self, config: DimcatConfig) -> Resource:
"""Returns the first resource that matches the given config.
Raises:
EmptyCatalogError: If the package is empty.
NoMatchingResourceFoundError: If no resource matching the specs is found in the "features" package.
"""
if len(self._packages) == 0:
raise EmptyCatalogError
for package in self._packages:
try:
return package.get_resource_by_config(config)
except (EmptyPackageError, ResourceNotFoundError):
pass
raise NoMatchingResourceFoundError(config)
def get_resource_by_name(self, name: str) -> Resource:
"""Returns the Resource with the given name.
Raises:
EmptyCatalogError: If the package is empty.
ResourceNotFoundError: If the resource with the given name is not found.
"""
if len(self._packages) == 0:
raise EmptyCatalogError
for package in self._packages:
try:
return package.get_resource_by_name(name=name)
except (EmptyPackageError, ResourceNotFoundError):
pass
raise ResourceNotFoundError(name, self.catalog_name)
def get_resources_by_regex(self, regex: str) -> List[Resource]:
"""Returns the Resource objects whose names contain the given regex."""
result = []
for package in self._packages:
result.extend(package.get_resources_by_regex(regex=regex))
return result
def get_resources_by_type(
self,
resource_type: Type[Resource] | str,
) -> List[Resource]:
"""Returns the Resource objects of the given type."""
if isinstance(resource_type, str):
resource_type = get_class(resource_type)
results = []
for package in self._packages:
results.extend(package.get_resources_by_type(resource_type=resource_type))
return results
def has_package(self, name: str) -> bool:
"""Returns True if a package with the given name is loaded, False otherwise."""
for package in self._packages:
if package.package_name == name:
return True
return False
def iter_resources(self):
"""Iterates over all resources in all packages."""
for package in self:
for resource in package:
yield resource
def make_new_package(
self,
package: Optional[PackageSpecs] = None,
package_name: Optional[str] = None,
basepath: Optional[str] = None,
auto_validate: bool = False,
):
"""Adds a package to the catalog. Parameters are the same as for :class:`Package`."""
if package is None or isinstance(package, (fl.Package, str)):
package = Package(
package_name=package_name,
basepath=basepath,
auto_validate=auto_validate,
)
elif not isinstance(package, Package):
msg = f"{self.name} takes a Package, not {type(package)!r}."
raise ValueError(msg)
self.add_package(package, basepath=basepath)
def replace_package(self, package: Package) -> None:
"""Replaces the package with the same name as the given package with the given package."""
if not isinstance(package, Package):
msg = (
f"{self.name}.replace_package() takes a Package, not {type(package)!r}."
)
raise TypeError(msg)
for i, p in enumerate(self._packages):
if p.package_name == package.package_name:
self.logger.info(
f"Replacing package {p.package_name!r} ({p.n_resources} resources) with "
f"package {package.package_name!r} ({package.n_resources} resources)"
)
self._packages[i] = package
return
self.add_package(package)
def _set_basepath(
self,
basepath: str | Literal[None],
set_packages: bool = True,
) -> None:
"""Sets the basepath for all packages in the catalog (if set_packages=True)."""
self._basepath = treat_basepath_argument(basepath, self.logger)
if not set_packages:
return
for package in self._packages:
package.basepath = self.basepath
def summary_dict(self, include_type: bool = True) -> dict:
"""Returns a summary of the dataset."""
if include_type:
summary = {
p.package_name: [f"{r.resource_name!r} ({r.dtype})" for r in p]
for p in self._packages
}
else:
summary = {p.package_name: p.resource_names for p in self._packages}
return dict(basepath=self.basepath, packages=summary)
| DCMLab/dimcat | src/dimcat/data/catalogs/base.py | base.py | py | 11,590 | python | en | code | 8 | github-code | 36 |
21536939801 | import cv2
import numpy as np
import os
import random
import torch
from tqdm import tqdm
def draw(prediction,dependency):
img=np.full((256,256,3),220,dtype=np.uint8)
for i,c in enumerate(prediction):
if c==0 or c>9:
if i not in dependency:
cv2.rectangle(img,(10+i%9*26, 10+i//9*26),(36+i%9*26, 36+i//9*26),(255,255,255),-1)
else:
cv2.rectangle(img, (10 + i % 9 * 26, 10 + i // 9 * 26), (36 + i % 9 * 26, 36 + i // 9 * 26),
(255, 180, 180), -1)
if c>0:
txt=str((c-1)%9+1)
color=[(0,0,0),(0,0,255),(0,120,0)][(c-1)//9]
cv2.putText(img, txt, (18+i%9*26, 30+i//9*26), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2, cv2.LINE_AA)
for i in range(10):
cv2.line(img,(10+26*i,10),(10+26*i,245),(0,0,0),2 if i%3==0 else 1)
for i in range(10):
cv2.line(img,(10,10+26*i),(245,10+26*i),(0,0,0),2 if i%3==0 else 1)
return img
if __name__=='__main__':
import datasets
path='output/bart_base_sudoku_bs64'
os.makedirs(os.path.join(path,'pic4'),exist_ok=True)
gt=datasets.load_dataset(path='csv',
data_files={
k: os.path.join('data/sudoku',f'sudoku_{k}.csv') for k in ['test']})["test"][96339]
src = np.int64(list(gt['quizzes']))
tgt = np.int64(list(gt['solutions']))
tgt[src == 0] += 18
preds=[]
atts=[]
for casstep in range(5):
pred=torch.load(os.path.join(path,f'cas_{casstep}/cas_test_generation.pk'))[96339]
pd = pred - 3
pd[(src == 0) & (pd + 9 == tgt)] += 9
preds.append(pd)
if casstep==0:
atts.append(None)
else:
atts.append(torch.load(os.path.join(path,f'cas_{casstep}/cas_test_generation.pk.96339.att')))
for i in range(1,5):
os.makedirs(os.path.join(path, 'pic4',str(i)), exist_ok=True)
mask=np.where((preds[i-1]!=preds[i])&(preds[i]==tgt))[0]
for x in mask:
row_att=atts[i][x]
row_att[src != 0]=0
row_att[x]=0
dependency=np.argsort(row_att)[-5:]
img_pd = draw(preds[i-1],dependency)
cv2.imwrite(os.path.join(path, 'pic4', str(i), '%d-%d.png'%(x//9,x%9)), img_pd)
| RalphHan/CASR | empirical/sudoku2.py | sudoku2.py | py | 2,295 | python | en | code | 1 | github-code | 36 |
35396368561 | #!/usr/bin/env python3
# coding=utf-8
import xml.dom.minidom as xmldom
import os
class Appconfig(object):
AppCode=""
#AppType=""
Icon=""
Version=""
PathType=""
Path=""
Arguments=""
AppStartupType=""
def __init__(self,appCode,icon,version,pathType,path,arguments,appStartupType):
self.AppCode=appCode
self.Icon=icon
self.Version=version
self.PathType=pathType
self.Path=path
self.Arguments=arguments
self.AppStartupType=appStartupType
class AppConfigure(object):
Appconfigs = []
def __init__(self):
super().__init__()
self.Appconfigs.clear()
def loadConf(self,filepath):
self.Appconfigs.clear()
if not os.path.isfile(filepath):
return
domTree = xmldom.parse(filepath)
rootNode = domTree.documentElement
apps=rootNode.getElementsByTagName("AppConfig")
for app in apps:
if app.hasAttribute("AppCode"):
appCode = app.getAttribute("AppCode")
icon = app.getAttribute("Icon")
version = app.getAttribute("Version")
pathType = app.getAttribute("PathType")
path = app.getAttribute("Path")
arguments = app.getAttribute("Arguments")
appStartupType = app.getAttribute("AppStartupType")
self.Appconfigs.append(Appconfig(appCode,icon,version,pathType,path,arguments,appStartupType))
| LeeZhang1979/UniTools | conf/AppConfigure.py | AppConfigure.py | py | 1,538 | python | en | code | 0 | github-code | 36 |
8982223007 | # Packages
import numpy as np
import os
# Path to txt files
path_to_d1 = ""
########
# Data #
########
# Preallocation
data = {}
# Loading .txt files
for i in range(1,1001):
with open(path_to_d1 + 'Domain01/{0}.txt'.format(i)) as f:
lines = f.readlines()
# Retrieve domain, class and user ids
dom = lines[0].split(" ")[-1][:1]
cla = lines[1].split(" ")[-1].split("\n")[0]
use = lines[2].split(" ")[-1].split("\n")[0]
# Check for right domain
if dom != "1":
print("Found data for domain {0} in file {1}".format(dom,i))
# Check if the order of data insertion is the same for all files
if lines[4].split('\n')[0].split(',') != ["<x>","<y>","<z>","<t>"]:
print("Different order found in data insertion at index {0}".format(i))
# Retrieve x, y, z and t data
lines = lines[5:]
x = []
y = []
z = []
t = []
for line in lines:
line = line.split('\n')[0].split(',')
x.append(float(line[0]))
y.append(float(line[1]))
z.append(float(line[2]))
t.append(float(line[3]))
# Insert data in the main dico
if use not in data.keys():
data[use] = {}
data[use][cla] = {'x':[x], 'y':[y], 'z':[z], 't':[t]}
else:
if cla not in data[use].keys():
data[use][cla] = {'x':[x], 'y':[y], 'z':[z], 't':[t]}
else:
data[use][cla]['x'].append(x)
data[use][cla]['y'].append(y)
data[use][cla]['z'].append(z)
data[use][cla]['t'].append(t)
#######
# DTW #
#######
# Dynamic time warping
def dtw(user='1',k=1,dist='dtwi',data=data):
"""
INPUTS: user, k, dist, data
user --> str corresponding to the user left out for cross-validation
k --> int corresponding to the number of K nearest neighbours asked to provide
for each sequence
dist --> str = 'euclidian' OR 'dtwi' OR 'dtwd' according to the type of distance to use
data --> a dict corresponding to the data to use in the model (involving the
user to leave out). Order of the keys: user > class > coordinates
OUTPUTS: list_out
"""
###########################
# Complementary functions #
###########################
def comp_dtwi(c_test,c_train):
"""
INPUTS: c_test, c_train
c_test --> list of int/float
c_train --> list of int/float
OUTPUTS: dtw distance between the two 1D time series
"""
# Preallocation
matrix = np.zeros((len(c_test)+1,len(c_train)+1))
# Filling the fist line and the first column with infinity values
matrix[0,0] = 0
for i in range(1,len(c_test)+1):
matrix[i,0] = np.inf
for j in range(1,len(c_train)+1):
matrix[0,j] = np.inf
# Filling the rest of the matrix
for i in range(1,len(c_test)+1):
for j in range(1,len(c_train)+1):
matrix[i,j] = (c_test[i-1]-c_train[j-1])**2 + np.min([matrix[i-1,j-1],matrix[i-1,j],matrix[i,j-1]])
# Computing the value with the best backward path
i = len(c_test)
j = len(c_train)
path = []
while i > 1 or j > 1:
path.append(matrix[i,j])
next = np.min([matrix[i-1,j-1],matrix[i-1,j],matrix[i,j-1]])
if next == matrix[i-1,j-1]:
i -= 1
j -= 1
elif next == matrix[i,j-1]:
j -= 1
elif next == matrix[i-1,j]:
i -= 1
# Add last value
path.append(matrix[1,1])
# Return sum of the path
return np.sqrt(sum(path))
def comp_dtwd(c_test,c_train):
"""
INPUTS: c_test, c_train
c_test --> list of lists of int/float
c_train --> list of lists of int/float
OUTPUTS: dtwd distance between the two multi-dimensional time series
"""
# Preallocation
matrix = np.zeros((len(c_test[0])+1,len(c_train[0])+1))
# Filling the fist line and the first column with infinity values
matrix[0,0] = 0
for i in range(1,len(c_test[0])+1):
matrix[i,0] = np.inf
for j in range(1,len(c_train[0])+1):
matrix[0,j] = np.inf
# Filling the rest of the matrix
for i in range(1,len(c_test[0])+1):
for j in range(1,len(c_train[0])+1):
# d(i,j) is computed as the sum of the squared euclidian distance for all dimensions
distij = []
for m in range(len(c_test)):
distij.append((c_test[m][i-1]-c_train[m][j-1])**2)
matrix[i,j] = sum(distij) + np.min([matrix[i-1,j-1],matrix[i-1,j],matrix[i,j-1]])
# Computing the value with the best backward path
i = len(c_test)
j = len(c_train)
path = []
while i > 1 or j > 1:
path.append(matrix[i,j])
next = np.min([matrix[i-1,j-1],matrix[i-1,j],matrix[i,j-1]])
if next == matrix[i-1,j-1]:
i -= 1
j -= 1
elif next == matrix[i,j-1]:
j -= 1
elif next == matrix[i-1,j]:
i -= 1
# Add last value
path.append(matrix[i,i])
# Return sum of the path
return np.sqrt(sum(path))
def samples_dist(type,c_test,c_train):
"""
INPUTS: type, c_test, c_train
type --> str = 'euclidian' OR 'dtwi' OR 'dtwd' according to the type of distance to compute
c_test --> list of float [x,y,z] of testing sample
c_train --> list of float [x,y,z] of training sample
OUTPUT: overall distance between the two samples
"""
if type == 'euclidian':
# By definition, euclidian distance cannot take into account the differences
# in lengths or in timesteps of the two time series. The test sample is set
# as the refence sequence.
dist = 0
if len(c_test[0]) <= len(c_train[0]):
for i in range(len(c_test[0])):
dist += np.sqrt((c_test[0][i]-c_train[0][i])**2 +
(c_test[1][i]-c_train[1][i])**2 +
(c_test[2][i]-c_train[2][i])**2)
else:
for i in range(len(c_train[0])):
dist += np.sqrt((c_test[0][i]-c_train[0][i])**2 +
(c_test[1][i]-c_train[1][i])**2 +
(c_test[2][i]-c_train[2][i])**2)
return dist
elif type == 'dtwi':
# dtwi is the sum of the distances computed independently for all dimensions
dtwi = []
for coord in range(len(c_test)):
dtwi.append(comp_dtwi(c_test[coord],c_train[coord]))
return sum(dtwi)
elif type == 'dtwd':
return comp_dtwd(c_test,c_train)
#############
# Main part #
#############
# Preallocation
dico = {}
coord = ['x','y','z']
users_train = []
for u in data.keys():
if u != user:
users_train.append(u)
# Number of iterations
it_tot = 100
it = 1
# Preallocation of the output
list_out = []
# Iterate through the testing classes
for key in data[user].keys():
dico[key] = []
# Iterate through the testing samples
for samp in range(len(data[user][key]['x'])):
x_test = data[user][key]['x'][samp]
y_test = data[user][key]['y'][samp]
z_test = data[user][key]['z'][samp]
# Preallocation with fake values
knn_d = []
indices = {}
for v in range(k):
knn_d.append(1e10)
indices[str(v)] = [-1,-1,-1] # [user, class, samp]
knn_dist = np.array(knn_d)
# Iterate through the users of the training set
for user_tr in users_train:
# Iterate through the training classes
for key_tr in data[user].keys():
# Iterate through the training samples
for iter in range(len(data[user_tr][key_tr]['x'])):
# Compare training and testing samples forward and backward in time
# At the end, only the best of the two will be compared to the current knn
dtime = []
compo = []
for tim in range(2):
# Training sample
if tim == 0:
x_train = data[user_tr][key_tr]['x'][iter]
y_train = data[user_tr][key_tr]['y'][iter]
z_train = data[user_tr][key_tr]['z'][iter]
else:
x_train = data[user_tr][key_tr]['x'][iter]
x_train.reverse()
y_train = data[user_tr][key_tr]['y'][iter]
y_train.reverse()
z_train = data[user_tr][key_tr]['z'][iter]
z_train.reverse()
# Compute the distance between the testing and the training samples
dtime.append(samples_dist(dist,[x_test,y_test,z_test],[x_train,y_train,z_train]))
if tim == 0:
compo.append([user_tr,key_tr,str(iter),"forward"])
else:
compo.append([user_tr,key_tr,str(iter),"backward"])
# Determine best output between forward and backward in time
ind = np.where(np.array(dtime) == min(np.array(dtime)))[0][0]
d = dtime[ind]
comp = compo[ind]
# Compare to the current knn
if d < max(knn_dist):
# If smaller distance, replace max value in knn
ind2 = np.where(knn_dist == max(knn_dist))[0][0]
knn_dist[ind2] = d
indices[str(ind2)] = comp
# Print the current progress
print("Iteration {0}/{1} done.".format(it,it_tot))
it += 1
# Store the sample's outputs in the output list
list_out.append([indices, knn_dist.tolist()])
return list_out
#############
# Computing #
#############
# Output path
path_out = "path/out/"
for dist_type in ["euclidian", "dtwd", "dtwi"]:
for u in range(1,11):
user = str(u)
result = dtw(user=user, k=3, dist=dist_type, data=data)
# Writing the output
with open(path_out + "res_user={0}_type={1}.txt".format(user,dist_type), "w") as output:
output.write("[")
for i in range(len(result)):
if i != len(result)-1:
output.write(str(result[i])+",\n")
else:
output.write(str(result[i]))
output.write("]")
| gheroufosse/Dynamic_Time_Warping | dtw.py | dtw.py | py | 9,861 | python | en | code | 0 | github-code | 36 |
37634308620 | # Given two binary search trees root1 and root2.
# Return a list containing all the integers from both trees sorted in ascending order.
# Example 1:
# Input: root1 = [2,1,4], root2 = [1,0,3]
# Output: [0,1,1,2,3,4]
# Example 2:
# Input: root1 = [0,-10,10], root2 = [5,1,7,0,2]
# Output: [-10,0,0,1,2,5,7,10]
# Example 3:
# Input: root1 = [], root2 = [5,1,7,0,2]
# Output: [0,1,2,5,7]
# Example 4:
# Input: root1 = [0,-10,10], root2 = []
# Output: [-10,0,10]
# Example 5:
# Input: root1 = [1,null,8], root2 = [8,1]
# Output: [1,1,8,8]
# Constraints:
# Each tree has at most 5000 nodes.
# Each node's value is between [-10^5, 10^5].
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
def inorder(root,ans):
if root is None:
return
inorder(root.left,ans)
ans.append(root.val)
inorder(root.right,ans)
res1 = []
inorder(root1,res1)
res2 = []
inorder(root2,res2)
i=j=0
res = []
while i<len(res1) and j<len(res2):
if res1[i]<res2[j]:
res.append(res1[i])
i+=1
else:
res.append(res2[j])
j+=1
while i<len(res1):
res.append(res1[i])
i+=1
while j<len(res2):
res.append(res2[j])
j+=1
return res | sunnyyeti/Leetcode-solutions | 1305 All Elements in Two Binary Search Trees.py | 1305 All Elements in Two Binary Search Trees.py | py | 1,590 | python | en | code | 0 | github-code | 36 |
42154272648 | # 예산
case = int(input())
lands = list(map(int, input().split()))
lands.sort()
max_t = int(input())
left = 0
right = lands[-1]
ret = 0
while left <= right:
mid = (left+right)//2
tmp = sum([min(i, mid) for i in lands])
if tmp <= max_t:
left = mid+1
else:
right = mid-1
print(right)
| FeelingXD/algorithm | beakjoon/2512.py | 2512.py | py | 318 | python | en | code | 2 | github-code | 36 |
27868546226 | """Module for I/O related data parsing"""
__author__ = "Copyright (c) 2016, Mac Xu <shinyxxn@hotmail.com>"
__copyright__ = "Licensed under GPLv2 or later."
import datetime
import pprint
import re
from app.modules.lepd.LepDClient import LepDClient
class IOProfiler:
def __init__(self, server, config='release'):
self.server = server
self.client = LepDClient(self.server)
self.config = config
def get_status(self):
start_time = datetime.datetime.now()
result = self.client.getIostatResult()
if not result:
return {}
end_time = datetime.datetime.now()
raw_results = result[:]
headerline = result.pop(0)
duration = "%.1f" % ((end_time - start_time).total_seconds())
io_status = {
'lepdDuration': duration,
'disks': {},
'diskCount': 0,
'ratio': 0
}
for line in result:
if (line.strip() == ""):
continue
line_values = line.split()
device_name = line_values[0]
io_status['diskCount'] += 1
io_status['disks'][device_name] = {}
io_status['disks'][device_name]['rkbs'] = line_values[5]
io_status['disks'][device_name]['wkbs'] = line_values[6]
io_status['disks'][device_name]['ratio'] = line_values[-1]
this_disk_ratio = self.client.toDecimal(line_values[-1])
if this_disk_ratio > io_status['ratio']:
io_status['ratio'] = this_disk_ratio
end_time_2 = datetime.datetime.now()
duration = "%.1f" % ((end_time_2 - end_time).total_seconds())
io_status['lepvParsingDuration'] = duration
response_data = {
'data': io_status,
'rawResult': raw_results
}
return response_data
def get_capacity(self):
responseLines = self.client.getResponse("GetCmdDf")
if (len(responseLines) == 0):
return {}
responseData = {}
if (self.config == 'debug'):
responseData['rawResult'] = responseLines[:]
diskData = {}
for resultLine in responseLines:
if (not resultLine.startswith('/dev/')):
continue
lineValues = resultLine.split()
diskName = lineValues[0][5:]
diskData[diskName] = {}
diskData[diskName]['size'] = lineValues[1]
diskData[diskName]['used'] = lineValues[2]
diskData[diskName]['free'] = lineValues[3]
diskData['size'] = lineValues[1]
diskData['used'] = lineValues[2]
diskData['free'] = lineValues[3]
capacity = {}
capacity['diskTotal'] = diskData['size']
capacity['diskUsed'] = diskData['used']
responseData['data'] = capacity
return responseData
def get_io_top(self, ioTopLines = None):
if (ioTopLines == None):
ioTopLines = self.client.getResponse('GetCmdIotop')
ioTopResults = {}
ioTopResults['data'] = {}
ioTopResults['rawResult'] = ioTopLines[:]
# print(len(ioTopLines))
if (len(ioTopLines) < 2):
return ioTopResults
dataLineStartingIndex = 0
for line in ioTopLines:
if (re.match(r'\W*TID\W+PRIO\W+USER\W+DISK READ\W+DISK WRITE\W+SWAPIN\W+IO\W+COMMAND\W*', line.strip(), re.M|re.I)):
break
else:
dataLineStartingIndex += 1
while(dataLineStartingIndex >= 0):
ioTopLines.pop(0)
dataLineStartingIndex -= 1
# for line in ioTopLines:
# print(line)
# print('--------------------')
orderIndex = 0
for line in ioTopLines:
# print(line)
if (line.strip() == ''):
continue
try:
# find the 'M/s" or 'B/s', they are for disk read and write
matches = re.findall('\s*\d+\.\d{2}\s*[G|M|K|B]\/s\s+', line)
diskRead = matches[0].strip()
diskWrite = matches[1].strip()
# find the "0.00 %" occurrences, they are for swapin and io
matches = re.findall('\s*\d+\.\d{2}\s*\%\s+', line)
swapin = matches[0].strip()
io = matches[1].strip()
lineValues = line.split()
pid = lineValues[0].strip()
prio = lineValues[1].strip()
user = lineValues[2].strip()
lastPercentIndex = line.rfind('%')
command = line[lastPercentIndex+1:]
ioTopItem = {}
ioTopItem['TID'] = pid
ioTopItem['PRIO'] = prio
ioTopItem['USER'] = user
ioTopItem['READ'] = diskRead
ioTopItem['WRITE'] = diskWrite
ioTopItem['SWAPIN'] = swapin
ioTopItem['IO'] = io
ioTopItem['COMMAND'] = command
except Exception as err:
print(err, "------- GetCmdIotop")
continue
# use an incremental int as key, so we keey the order of the items.
ioTopResults['data'][orderIndex] = ioTopItem
orderIndex += 1
return ioTopResults
if( __name__ =='__main__' ):
profiler = IOProfiler('www.rmlink.cn')
profiler.config = 'debug'
pp = pprint.PrettyPrinter(indent=2)
# monitor = IOMonitor('www.rmlink.cn')
# pp.pprint(profiler.get_io_top())
profiler.get_io_top()
# pp.pprint(profiler.getIoPPData())
# to make a io change on server: sudo dd if=/dev/sda of=/dev/null &
| linuxep/lepv | app/modules/profilers/io/IOProfiler.py | IOProfiler.py | py | 5,872 | python | en | code | 20 | github-code | 36 |
41714169812 | #hyperparameters
feature_dim = 300
regularizer = 0.01
activation='relu'
optimizer = 'adam'
padding = 'valid'
dropout_rate = 0.01
nb_epochs = 10
batch_size = 256
stop_epochs = 5
nb_neurons_dense = 100
#we use this only for lstm
nb_neurons_lstm = 150
# only used for cnn
nb_filter = 100
filter1_length = 3
filter2_length = 2
pool1_length = 5
pool2_length = 3
| marinaangelovska/complementary_products_suggestions | complementary_products_suggestions/config.py | config.py | py | 359 | python | en | code | 12 | github-code | 36 |
835315908 | # assignment 1
# Name-Peyush Jindal
# SID=21103092
# Question 1
number_1=input("Enter first number:")
number_2=input("Enter first number:")
number_3=input("Enter first number:")
number_1=int(number_1)
number_2=int(number_2)
number_3=int(number_3)
# average
average=(number_1 + number_2 + number_3)/3
print(average)
# Question 2
# all values are in US dollars $
Gross_Income=input("Please enter gross income:")
Gross_Income=float(Gross_Income)
Standard_Deduction=10000
Dependents=input("Enter the number of dependents:")
Dependents=int(Dependents)
# For each dependent, a taxpayer is allowed an additional $3,000 deduction.
Dependent_Deduction=3000
Taxable_Income=Gross_Income-Standard_Deduction-(Dependent_Deduction*Dependents)
# tax rate=20%
Tax=(Taxable_Income*20)/100
print("Tax:",Tax)
# Question 3
name= input("Please enter name:")
sid= input("Enter Student ID:")
sid=int(sid)
# Enter Gender values:
# ‘F’ for female
# ‘M’ for male
# ‘U’ for unknown
gender= input("Enter gender:")
course_name= input("Enter your course name:")
cgpa= input("Enter CGPA:")
cgpa=float(cgpa)
Student=[sid,name,gender,course_name,cgpa]
print(Student)
# Question 4
student_1_marks= int(input("Enter student 1 marks:"))
student_2_marks= int(input("Enter student 2 marks:"))
student_3_marks= int(input("Enter student 3 marks:"))
student_4_marks= int(input("Enter student 4 marks:"))
student_5_marks= int(input("Enter student 5 marks:"))
my_list=[student_1_marks,student_2_marks,student_3_marks,student_4_marks,student_5_marks]
my_list.sort()
print(my_list)
# Question 5
# part a
color=['Red','Green','White','Black','Pink','Yellow']
color.remove(color[3])
print("Part a Answer:",color)
# part b
color=['Red','Green','White','Black','Pink','Yellow']
color[3:5]=['Purple']
print("Part b Answer:",color) | Peyush3/Python-Assignments | asg1_source_code_CSE_21103092.py | asg1_source_code_CSE_21103092.py | py | 1,902 | python | en | code | 0 | github-code | 36 |
37396630977 | from regression_tests import *
class Test(Test):
settings = TestSettings(
input='hello.exe'
)
def test_main_addresses(self):
assert self.out_c.contains('Address range: 0x407740 - 0x40775e')
assert self.out_dsm.contains('function: main at 0x407740 -- 0x40775e')
| avast/retdec-regression-tests | bugs/same-fnc-end-addr-in-c-and-dsm/test.py | test.py | py | 299 | python | en | code | 11 | github-code | 36 |
43794443747 | # Pobierz od użytkownika 10 liczb, wyświetl tylko te, które są nieparzyste.
# Wprowadzanie n numerów na listę
lista = []
print("Wprowadż 10 numerów")
for number in range(0, 10):
new_number = int(input(f'{number+1} ->'))
lista.append(new_number)
print("podane liczby to: ", lista)
# Sprawdzenie czy numery są parzyste i dodawanie do nowej listy
lista_parzysta = []
for i in lista:
if i % 2 == 0:
lista_parzysta.append(i)
print("Liczby parzyste na podanej liście to: ", lista_parzysta)
| TomekJJ/PythonCourse2022 | 04 Kolekcje/Homework/12 Listy - zad 2.py | 12 Listy - zad 2.py | py | 519 | python | pl | code | 0 | github-code | 36 |
6751661466 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QWidget, QTreeWidgetItem, QMenu
from PyQt5.QtCore import pyqtSlot, QPoint
from selfcheck.controllers.selfcheckcontroller import SelfCheckController
from selfcheck.modules.editselfcheckitemmodule import EditSelfCheckItemModule
from selfcheck.views.selfcheckitemlist import Ui_Form
import user
class SelfCheckItemListModule(QWidget, Ui_Form):
def __init__(self, parent=None):
super(SelfCheckItemListModule, self).__init__(parent)
self.setupUi(self)
self.SC = SelfCheckController()
self.current_kind = ''
self.treeWidget_items.hideColumn(0)
self.get_item_kind()
def get_item_kind(self):
temp_kind = self.current_kind
self.comboBox_kind.clear()
res = self.SC.get_data(0, True, *VALUES_TUPLE_KIND).distinct()
if len(res):
self.comboBox_kind.addItems(res)
if temp_kind != '':
self.comboBox_kind.setCurrentText(temp_kind)
def get_detail(self):
self.treeWidget_items.clear()
condition = {'kind': self.current_kind}
res = self.SC.get_data(0, False, *VALUES_TUPLE_ITEM, **condition)
if not len(res):
return
for item in res.order_by('seqid'):
qtreeitem = QTreeWidgetItem(self.treeWidget_items)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, str(item['seqid']))
qtreeitem.setText(2, item['itemname'])
qtreeitem.setText(3, item['basic'])
for i in range(1, 4):
self.treeWidget_items.resizeColumnToContents(i)
@pyqtSlot(str)
def on_comboBox_kind_currentTextChanged(self, p_str):
self.current_kind = p_str
self.get_detail()
@pyqtSlot(QPoint)
def on_treeWidget_items_customContextMenuRequested(self, pos):
global_pos = self.treeWidget_items.mapToGlobal(pos)
current_item = self.treeWidget_items.currentItem()
menu = QMenu()
action_1 = menu.addAction("增加")
action_2 = menu.addAction("修改")
action_3 = menu.addAction("删除")
action = menu.exec(global_pos)
if action == action_1:
detail = EditSelfCheckItemModule(parent=self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
elif action == action_2:
if current_item is None:
return
id = int(current_item.text(0))
detail = EditSelfCheckItemModule(id, self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
elif action == action_3:
if current_item is None:
return
id = int(current_item.text(0))
condition = {'autoid': id}
self.SC.delete_data(0, **condition)
self.get_item_kind()
self.get_detail()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_items_itemDoubleClicked(self, qtreeitem, p_int):
id = int(qtreeitem.text(0))
detail = EditSelfCheckItemModule(id, self)
detail.accepted.connect(self.get_item_kind)
detail.accepted.connect(self.get_detail)
detail.show()
VALUES_TUPLE_KIND = ('kind', )
VALUES_TUPLE_ITEM = ('autoid', 'seqid', 'itemname', 'basic')
| zxcvbnmz0x/gmpsystem | selfcheck/modules/selfcheckitemlistmodule.py | selfcheckitemlistmodule.py | py | 3,415 | python | en | code | 0 | github-code | 36 |
36830593270 | #!/usr/bin/python3
# 涉及对象的定义过程,不能交互式执行,需要放入.py代码文件中执行。
# 导入LCD数字,滑块,部件,Box布局,Q程序,网格布局
from PySide2.QtWidgets import QLCDNumber, QSlider, QWidget, QVBoxLayout, QApplication, QGridLayout
# 导入Qt库
from PySide2.QtCore import Qt
class MyLCDNumber(QWidget): # 创建LCD数字显示器类
def __init__(self, parent=None): # 初始化,无父类
super().__init__(parent)
self.lcd_number = QLCDNumber() # 创建一个lcd数字显示器对象
self.slider = QSlider(Qt.Horizontal)# 创建滑动条,水平显示
self.layout = QVBoxLayout() # 两元素使用垂直布局(上下排列)
self.layout.addWidget(self.lcd_number) # 将lcd_num对象加入
self.layout.addWidget(self.slider) # 将slider对象加入
self.setLayout(self.layout)
self.setFixedSize(120, 100) # 设置整个控件大小
self.lcd_number.setDigitCount(2) # 设置lcd显示器最多显示两位数字
self.slider.setRange(0, 99) # 设置可调节的范围
self.slider.valueChanged.connect(self.lcd_number.display) # 滑动条的值修改,连接到lcd的显示值
app = QApplication() # 初始化Q程序实例,app
window = QWidget() # 创建window实例,继承自Q部件
layout = QGridLayout() # 布局使用网格布局
mylcdnumber01 = MyLCDNumber() # 创建lcd显示器的4个实例
mylcdnumber02 = MyLCDNumber()
mylcdnumber03 = MyLCDNumber()
mylcdnumber04 = MyLCDNumber()
layout.addWidget(mylcdnumber01, 1, 1) # 将4个lcd显示器实例,逐个加入到全局控件中(按照坐标)
layout.addWidget(mylcdnumber02, 1, 2)
layout.addWidget(mylcdnumber03, 2, 1)
layout.addWidget(mylcdnumber04, 2, 2)
window.setLayout(layout) # window对象使用上述layout布局
window.show() # 显示window对象
app.exec_() # 执行程序
| oca-john/Python3-xi | Pyside2/1.pyside2.4.widget.def.py | 1.pyside2.4.widget.def.py | py | 2,141 | python | zh | code | 0 | github-code | 36 |
24537790919 | from pathlib import Path
N, S = int(Path("day17.txt").read_text()), 50000000
l, pos, after2017, afterzero = [0], 0, 0, 0
for v in range(1, S+1):
pos = (pos + N) % v + 1
if v == 2017: after2017 = l[pos]
elif v > 2017:
if pos == 1:
afterzero = v
continue
l.insert(pos, v)
print(after2017, afterzero)
| AlexBlandin/Advent-of-Code | 2017/day17.py | day17.py | py | 322 | python | en | code | 0 | github-code | 36 |
38793739034 | def DeBruijnKmer(kmers):
graph=dict()
for item in kmers:
temp=item[:-1]
if temp in graph:
graph[temp].append(item[1:])
else:
graph[temp]=[item[1:]]
return graph
def GenerateCycle(graph,start):
unexplored_edges=graph
route=[]
lastnode=start
nownode=unexplored_edges[lastnode][0]
if len(unexplored_edges[lastnode])!=1:
del unexplored_edges[lastnode][0]
else:
del unexplored_edges[lastnode]
route.append(lastnode)
route.append(nownode)
while nownode in unexplored_edges:
lastnode=nownode
nownode=unexplored_edges[lastnode][0]
route.append(nownode)
if len(unexplored_edges[lastnode])==1:
del unexplored_edges[lastnode]
else:
del unexplored_edges[lastnode][0]
return[route,unexplored_edges]
def EulerianCycle(graph):
edges_remained=graph
startnode=list(edges_remained)[0]
[route,edges_remained]=GenerateCycle(edges_remained,startnode)
while edges_remained:
del route[-1]
length=len(route)
temp=[]
for i in range(length):
if route[i] in edges_remained:
for j in range(length):
temp.append(route[(i+j)%length])
route=temp
break
[newroute,edges_remained]=GenerateCycle(edges_remained,route[0])
route.extend(newroute)
return route
def GenomePath(reads):
k=len(reads[0])
genome=reads[0]
for i in range(1,len(reads)):
temp=reads[i]
genome=genome+temp[-1]
return(genome)
reads=[]
with open('dataset_203_7.txt','r') as f:
content=f.read().split()
k=content[0]
for i in range(1,len(content)):
reads.append(content[i])
graphstring=DeBruijnKmer(reads)
graphnumber=dict()
nodelist=list(graphstring)
nodenumber=len(nodelist)+1
indegree=[0]*nodenumber
outdegree=[0]*nodenumber
for key in graphstring:
startnode=nodelist.index(key)
for value in graphstring[key]:
if value not in nodelist:
nodelist.append(value)
endnode=nodelist.index(value)
if startnode in graphnumber:
graphnumber[startnode].append(endnode)
else:
graphnumber[startnode]=[endnode]
outdegree[startnode]+=1
indegree[endnode]+=1
for i in range(nodenumber):
if outdegree[i]>indegree[i]:
fakeend=i
if outdegree[i]<indegree[i]:
fakestart=i
if fakestart in graphnumber:
graphnumber[fakestart].append(fakeend)
else:
graphnumber[fakestart]=[fakeend]
temp=EulerianCycle(graphnumber)
del temp[-1]
answer=[]
length=len(temp)
if temp[0]!=fakeend or temp[-1]!=fakestart:
for i in range(1,length):
if temp[i]==fakeend and temp[i-1]==fakestart:
for j in range(length):
answer.append(temp[(i+j)%length])
kmersinorder=[]
for item in answer:
kmersinorder.append(nodelist[item])
text=GenomePath(kmersinorder)
print(text)
with open('answer.txt','w') as f:
f.write(text)
| XueningHe/Rosalind_Genome_Sequencing | synthesis.py | synthesis.py | py | 3,075 | python | en | code | 0 | github-code | 36 |
23269856572 | # import os
import sys
import csv
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
# import pandas as pd
# from pandas.plotting import lag_plot
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import *
# from scipy.stats.stats import pearsonr
from statsmodels.tsa.arima_process import arma_generate_sample
# from sklearn.metrics import mean_squared_error as MSE
import math
# -- -- #
import distribution_displays
# -- -- #
import functools
import inspect
from typing import Optional
# -- -- #
import Simulation_fit
# -- -- #
from ellipse import confidence_ellipse, get_correlated_dataset
from wave_g import createWave, saveWave, reafWave
class Example(QWidget):
label: list[Optional[list[QLabel]]]
le: list[Optional[list[QLineEdit]]]
intline: list[Optional[list[QLineEdit]]]
pb: list[Optional[list[QPushButton]]]
combobox: list[Optional[QComboBox]]
stack: list[Optional[QWidget]]
sp: list[Optional[QSpinBox]]
group: list[Optional[list[QGroupBox]]]
def __init__(self):
super().__init__()
# -- -- #
self.n_pages = 13
# -- -- #
self.tree = None
self.tree_dict = None
self.stackedWidget = None
self.stack = [None] * self.n_pages
self.le = [None] * self.n_pages
self.var_dict = None
self.groupbox = None
self.link21 = None
self.intline = [None] * self.n_pages
self.sp = [None] * self.n_pages
self.pb = [None] * self.n_pages
self.icon = None
self.label = [None] * self.n_pages
self.filename = None
self.group = [None] * self.n_pages
self.combobox = [None] * self.n_pages
# -- -- #
self.initUI()
'This is the main window,left is tree menu and right part are the stacked windows.'
def initUI(self):
self.setFixedSize(700, 450)
self.setWindowTitle('Tool of SMNt')
self.setStyleSheet("background-color:'silver'")
hbox = QHBoxLayout(self)
left = QFrame(self)
left.setFixedSize(235, 450)
right = QFrame(self)
splitter1 = QSplitter(Qt.Horizontal)
splitter1.setSizes([35, ])
splitter1.addWidget(left)
splitter1.addWidget(right)
hbox.addWidget(splitter1)
self.setLayout(hbox)
self.tree = QTreeWidget(left)
list_00 = [self.tree.setMinimumSize, self.tree.setStyleSheet, self.tree.setAutoScroll,
self.tree.setEditTriggers, self.tree.setTextElideMode, self.tree.setRootIsDecorated,
self.tree.setUniformRowHeights, self.tree.setItemsExpandable, self.tree.setAnimated,
self.tree.setHeaderHidden, self.tree.setExpandsOnDoubleClick, self.tree.setObjectName]
list_01 = [[35, 450], ["background-color:'silver';border:outset;color:seagreen;font:bold;font-size:15px"],
[True], [QAbstractItemView.DoubleClicked | QAbstractItemView.EditKeyPressed], [Qt.ElideMiddle],
[True], [False], [True], [False], [True], [True], ["tree"]]
for i, j in zip(list_00, list_01):
i(*j)
self.tree_dict = {}
list_02 = ["root", "root1", "root2", "root3", "child11", "child12", "child13", "child14", "child21", "child22",
"child25", "child31", "child32", "child33"]
list_03 = [None, None, None, None, "root1", "root1", "root1", "root1", "root2",
"root2", "root2", "root3", "root3", "root3"]
list_04 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
list_05 = ['HomePage', 'Probability', 'Correlation Function', 'Estimation Function', 'Coin Flipping',
'Dice Throw', 'Distribution Simulation', 'Central Limit Theorem', 'Correlation Coefficient',
'Correlation Function', 'AR Model', 'Maximum likelihood', 'confidence ellipse', "waveforms"]
for i, j, k, l in zip(list_02, list_03, list_04, list_05):
if j is None:
self.tree_dict[i] = QTreeWidgetItem(self.tree)
else:
self.tree_dict[i] = QTreeWidgetItem(self.tree_dict[j])
self.tree_dict[i].setText(k, l)
self.tree.addTopLevelItem(self.tree_dict["root"])
self.stackedWidget = QStackedWidget(right)
self.stack = [QWidget() for i in range(13)]
for func in [self.stackUI0, self.stackUI1, self.stackUI2, self.stackUI3, self.stackUI4, self.stackUI5,
self.stackUI6, self.stackUI7, self.stackUI8, self.stackUI9, self.stackUI10, self.stackUI11,
self.stackUI12]:
func()
for i in range(len(self.stack)):
self.stackedWidget.addWidget(self.stack[i])
self.tree.clicked.connect(self.Display)
'Change the stacked windows.'
def Display(self):
item = self.tree.currentItem()
switcher = {
"HomePage": 0,
"Coin Flipping": 1,
"Dice Throw": 2,
"Distribution Simulation": 3,
"Central Limit Theorem": 4,
"Correlation Coefficient": 5,
"Correlation Function": 6,
"AR Model": 9,
"Maximum likelihood": 10,
"confidence ellipse": 11,
"waveforms": 12
}
i = switcher.get(item.text(0), None)
if i is not None:
self.stackedWidget.setCurrentIndex(i)
'----------------Homepage------------------------------'
def stackUI0(self):
layout = QVBoxLayout(self.stack[0])
self.label[0] = [QLabel()]
self.label[0][0].setText("Statistische Methoden \nder Nachrichtentechnik\nVer.1.0")
self.label[0][0].setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.label[0][0].setAlignment(Qt.AlignCenter)
self.label[0][0].setFont(QFont("Sanserif", 15, QFont.Bold))
layout.addWidget(self.label[0][0])
'---------------------Coin----------------------------'
def stackUI1(self):
vlayout = QVBoxLayout(self.stack[1])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
vlayout.addWidget(grid)
self.le[1] = [QLineEdit() for i in range(2)]
self.var_dict = {}
list_00 = ["pb1_2", "pb1_1", "help"]
list_01 = ["Execute", "Clear", "Help"]
list_02 = [self.coin, self.clear11, self.msg1]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12"], ["Times:", "Probability:"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"], *self.le[1],
self.var_dict["pb1_2"], self.var_dict["pb1_1"]]
list_04 = [1, 2, 3, 2, 3, 4, 5]
list_05 = [2, 0, 0, 1, 1, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def msg1(self):
QMessageBox.about(self, "Help", "This function is a simulator of coin flipping.\n"
"Input the number of flipping times and the probability of head.\n"
"The simulator will generate a graphic of probability change polyline.\n"
"Can generate multiple images simultaneously.")
def coin(self):
try:
times = []
frequency = []
n_heads = 0
n_instances = 0
number = int(self.le[1][0].text())
probability = float(self.le[1][1].text())
'when number of trials is smaller than 100,we need a solid sampling interval'
for flip_num in range(0, number):
if random.random() <= probability:
n_heads += 1
n_instances += 1
frequency.append(n_heads / n_instances)
times.append(n_instances)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(times, frequency, color='b', label='(actual) relative occurrence')
ax.set_xlabel('Number of Trials')
ax.set_ylabel('Frequency')
ax.set_ylim([0, 1])
ax.set_xlim([1, number])
'red line is the probability of head'
ax.plot([-0, number], [probability, probability], label='Theoretical Probability', color='r',
linewidth='1.5')
ax.legend(loc=0)
plt.title('{} times Flip with Probability p(head)= {} '.format(number, probability))
plt.grid(axis='y')
plt.show()
except:
self.Error()
def clear11(self, index=1):
for i in self.le[index]:
i.clear()
'---------------------Dice----------------------------'
def stackUI2(self):
vlayout = QVBoxLayout(self.stack[2])
gridlayout = QGridLayout()
gridlayout2 = QGridLayout()
grid = QWidget()
grid2 = QWidget()
grid.setLayout(gridlayout)
grid2.setLayout(gridlayout2)
self.groupbox = QGroupBox('5 Platonic Solids', self)
self.groupbox.setLayout(gridlayout)
self.link21 = QLabel()
self.link21.setOpenExternalLinks(True)
self.link21.setText(u'<a href="https://en.wikipedia.org/wiki/Platonic_solid" style="color:#0000ff;">'
u'<b>Wikipedia</b></a>')
self.link21.setStyleSheet('font-size: 11px')
self.intline[2] = [QLineEdit() for i in range(3)]
self.pb[2] = []
self.icon = {}
list_00 = ["1", "2", "3", "4", "5"]
list_01 = ["4.png", "6.png", "8.png", "12.png", "20.png"]
list_02 = ["4", "6", "8", "12", "20"]
list_03 = ["Tetrahedron 4 faces", "Cube 6 faces", "Octahedron 8 faces", "Dodecahedron 12 faces",
"Icosahedron 20 faces"]
for j, k, l, m in zip(list_00, list_01, list_02, list_03):
self.pb[2].append(QPushButton())
self.icon[j] = QIcon()
self.icon[j].addPixmap(QPixmap(k), QIcon.Normal, QIcon.Off)
self.pb[2][-1].setIcon(self.icon[j])
self.pb[2][-1].setIconSize(QSize(50, 50))
self.pb[2][-1].clicked.connect(functools.partial(self.intline[2][1].setText, l))
self.pb[2][-1].setToolTip(m)
for j, k in zip(["Help", "Execute", "Clear"], [self.msg2, self.dice_simulation, self.clear21]):
self.pb[2].append(QPushButton(j))
self.pb[2][-1].clicked.connect(k)
self.label[2] = [QLabel(text) for text in ["Number of dice:", "Faces of dice:", "Throw times:"]]
list_07 = [*self.pb[2][:5], self.link21, *self.pb[2][5:], *self.intline[2], *self.label[2]]
list_08 = [2, 2, 2, 2, 2, 3, 1, 5, 6, 2, 3, 4, 2, 3, 4]
list_09 = [0, 1, 2, 3, 4, 4, 2, 2, 2, 1, 1, 1, 0, 0, 0]
for i in zip(list_07[:6], list_08[:6], list_09[:6]):
gridlayout.addWidget(*i)
for i in zip(list_07[6:], list_08[6:], list_09[6:]):
gridlayout2.addWidget(*i)
vlayout.addWidget(self.groupbox)
vlayout.addWidget(grid2)
def clear21(self, index=2):
for i in self.intline[index]:
i.clear()
def msg2(self):
QMessageBox.about(self, "Help", "This function is a simulator of dice throw .\n"
"The simulator will generate a graphic of probability distribution.\n"
"Can generate multiple images simultaneously.")
def dice_simulation(self):
try:
number, face, time = [int(i.text()) for i in self.intline[2]]
list_dice = np.arange(1, face + 1)
# result = [sum(random.choices(list_dice, k=10)) for i in range(time)]
result = [sum(random.choices(list_dice, k=number)) for i in range(time)]
fig = plt.figure()
ax = fig.add_subplot(111)
'the formula of bins makes the histogram looks more comfortable'
# histo = ax.hist(result, color='RoyalBlue', bins=np.arange((face-1) * number + 4)-0.5, label='occurrence')
histo = ax.hist(result, color='RoyalBlue', bins=np.arange(face * number + 2) - 0.5, label='occurrence')
ax.set_xlim([number - 1, number * face + 1])
ax.set_xlabel('Dice Points')
ax.set_ylabel('total occurence')
ax.legend(loc=1)
plt.title('{} times Toss {} dice each has {} faces'.format(time, number, face))
plt.grid()
plt.show()
except:
self.Error()
'-----------------Distribution-------------------------'
def stackUI3(self):
layout = QVBoxLayout()
glayout = QGridLayout()
gbox = QWidget()
gbox.setLayout(glayout)
self.combobox[3] = QComboBox()
font31 = QFont()
font31.setPointSize(16)
self.combobox[3].setFont(font31)
list_00 = ["Please select...", "Beta_Distribution", "Binomial_Distribution", "Cauchy_Distribution",
"Chi2_Distribution", "Expon_Distribution", "F_Distribution", "Gamma_Distribution",
"Geometric_Distribution", "Laplace_Distribution", "Logistic_Distribution", "Lomax_Distribution",
"Lognorm_Distribution", "Negative_Binomial_Distribution", "Normal_Distribution",
"Poisson_Distribution", "Rayleigh_Distribution", "T_Distribution", "Weibull_Distribution",
"Zipf_Distribution"]
for i in list_00:
self.combobox[3].addItem(i)
self.combobox[3].currentIndexChanged.connect(self.Select_onChange31)
self.le[3] = [QLineEdit() for i in range(2)]
self.label[3] = [QLabel()] + [QLabel(text) for text in [" ", "1. parameter:", "2. parameter:"]]
self.label[3][1].setFont(QFont('Sanserif', 15))
self.label[3][1].setStyleSheet("font:bold")
self.pb[3] = []
for i, j in zip(['Execute', 'Clear', 'Help'], [self.Select_onChange32, self.clear31, self.msg3]):
self.pb[3].append(QPushButton(i))
self.pb[3][-1].clicked.connect(j)
for i in [self.combobox[3], *self.label[3][:2], gbox]:
layout.addWidget(i)
list_00 = [*self.label[3][2:], *self.le[3], *self.pb[3]]
list_01 = [1, 2, 1, 2, 2, 3, 1]
list_02 = [0, 0, 1, 1, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
glayout.addWidget(*i)
self.stack[3].setLayout(layout)
def clear31(self, index=3):
for i in self.le[index]:
i.clear()
def msg3(self):
QMessageBox.about(self, "Help", "This function is a simulator of general distributions.\n"
"The simulator will generate a graphic of probability distribution.\n"
"Inputboxes support multiple sets of parameters like '0.5,0.7,1.0'")
def Select_onChange31(self):
switcher = {
"Binomial_Distribution": ["binomial.svg", [200, 60], 'n={}\n' 'p={}'],
"Normal_Distribution": ["normal.svg", [200, 60], 'μ={}\n' 'σ²={}'],
"Poisson_Distribution": ["poisson.svg", [250, 70], 'λ={}'],
"Rayleigh_Distribution": ["rayleigh.svg", [250, 60], 'σ={}'],
"Beta_Distribution": ["Beta.svg", [200, 60], 'α={}\n' 'β={}'],
"F_Distribution": ["f.svg", [450, 350], 'd1={}\n' 'd2={}'],
"Gamma_Distribution": ["gamma2.svg", [300, 50], 'k={} θ={}'],
"Geometric_Distribution": ["geometric.svg", [290, 60], 'p={}'],
"Lognorm_Distribution": ["lognorm.svg", [250, 60], 'μ={}\n' 'σ={}'],
"Chi2_Distribution": ["chi2.svg", [300, 140], 'df={}'],
"Cauchy_Distribution": ["cauchy.svg", [350, 80], 'x0={}\n' 'γ={}'],
"Laplace_Distribution": ["laplace.svg", [200, 60], 'μ={}\n' 'λ={}'],
"T_Distribution": ["t.svg", [300, 90], 'v={}'],
"Expon_Distribution": ["exponential.svg", [200, 60], 'λ={}'],
"Weibull_Distribution": ["weibull.svg", [350, 80], 'λ={}\n' 'a={}'],
"Negative_Binomial_Distribution": ["negativ.svg", [300, 60], 'n={}\n' 'p={}'],
"Lomax_Distribution": ["lomax.svg", [250, 60], 'λ={}\n' 'α={}'],
"Logistic_Distribution": ["logistic.svg", [300, 170], 'μ={}\n' 's={}']
}
if self.combobox[3].currentText() == 'Please select...':
self.label[3][0].setText(' ')
self.label[3][1].setText(' ')
elif self.combobox[3].currentText() == 'Zipf_Distribution':
self.label[3][0].setText('No pic')
self.label[3][0].setScaledContents(True)
self.label[3][0].setMaximumSize(200, 60)
self.label[3][1].setText('a={}')
else:
i = switcher.get(self.combobox[3].currentText(), None)
self.label[3][0].setPixmap(QPixmap(i[0]))
self.label[3][0].setScaledContents(True)
self.label[3][0].setMaximumSize(*i[1])
self.label[3][1].setText(i[2])
def _decorator(self, func):
def inner(*args, **kwargs):
try:
n_args = len(inspect.getfullargspec(func).args)
sp = [self.le[3][i].text().split(',') for i in range(n_args)]
if len(sp) > 1 and not all(len(sp[0]) == len(x) for x in sp[1:]):
QMessageBox.about(self, "Warning", "The length of the two rows is not the same.")
else:
func(*sp, *args, **kwargs)
except:
self.Error()
return inner
def Select_onChange32(self):
switcher = {
"Binomial_Distribution": self._decorator(distribution_displays.Binomial_Distribution),
"Normal_Distribution": self._decorator(distribution_displays.Normal_Distribution),
"Poisson_Distribution": self._decorator(distribution_displays.Poisson_Distribution),
"Rayleigh_Distribution": self._decorator(distribution_displays.Rayleigh_Distribution),
"Beta_Distribution": self._decorator(distribution_displays.Beta_Distribution),
"F_Distribution": self._decorator(distribution_displays.F_Distribution),
"Gamma_Distribution": self._decorator(distribution_displays.Gamma_Distribution),
"Geometric_Distribution": self._decorator(distribution_displays.Geometric_Distribution),
"Lognorm_Distribution": self._decorator(distribution_displays.Lognorm_Distribution),
# "Uniform_Distribution": self._decorator(distribution_displays.Uniform_Distribution),
"Chi2_Distribution": self._decorator(distribution_displays.Chi2_Distribution),
"Cauchy_Distribution": self._decorator(distribution_displays.Cauchy_Distribution),
"Laplace_Distribution": self._decorator(distribution_displays.Laplace_Distribution),
"T_Distribution": self._decorator(distribution_displays.T_Distribution),
"Expon_Distribution": self._decorator(distribution_displays.Expon_Distribution),
"Weibull_Distribution": self._decorator(distribution_displays.Weibull_Distribution),
"Zipf_Distribution": self._decorator(distribution_displays.Zipf_Distribution),
"Negative_Binomial_Distribution": self._decorator(distribution_displays.Negative_Binomial_Distribution),
"Lomax_Distribution": self._decorator(distribution_displays.Lomax_Distribution),
"Logistic_Distribution": self._decorator(distribution_displays.Logistic_Distribution)
}
switcher.get(self.combobox[3].currentText(), lambda: None)()
def Error(self):
QMessageBox.about(self, 'Warning', 'Error happened!\n'
'please check parameters!')
'----------------------Central Limit Theorem----------------------------'
def stackUI4(self):
layout = QVBoxLayout()
layout1 = QGridLayout()
hbox1 = QWidget()
hbox1.setLayout(layout1)
self.pb[4] = [QPushButton(text) for text in ['Execute', 'Help', 'Clear']]
self.le[4] = [QLineEdit() for i in range(2)]
self.label[4] = [QLabel(text) for text in ['Number of dice:', 'Number of times:', 'Central limit theorem']]
self.label[4][2].setStyleSheet(
"background-color:'RoyalBlue';border:outset;color:'yellow';font:bold;font-size:20px"
)
self.label[4][2].setAlignment(Qt.AlignCenter)
self.label[4][2].setGeometry(20, 30, 600, 300)
list_00 = [*self.label[4][:2], *self.le[4], *self.pb[4]]
list_01 = [2, 3, 2, 3, 4, 1, 5]
list_02 = [0, 0, 1, 1, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
layout1.addWidget(*i)
layout.addWidget(self.label[4][2])
layout.addWidget(hbox1)
for i, j in zip(self.pb[4], [self.CentralLimintTheorem, self.msg4, self.clear41]):
i.clicked.connect(j)
self.stack[4].setLayout(layout)
def clear41(self, index=4):
for i in self.le[index]:
i.clear()
def msg4(self):
QMessageBox.about(self, 'Help', 'the more dice and the more loop times,makes the histgram more like approach'
' the Normal Distribution')
def CentralLimintTheorem(self):
try:
number = int(self.le[4][0].text())
times = int(self.le[4][1].text())
mu = 3.5 * number
sigma = (35 / 12) * number
x = np.arange(1 * number - 1, 6 * number + 1, 0.1)
y = norm.pdf(x, loc=mu, scale=math.sqrt(sigma))
samples_sum = []
for i in range(times):
sample = np.random.randint(1, 7, size=number)
sum = np.sum(sample)
samples_sum.append(sum)
fig = plt.figure(tight_layout=True)
ax = fig.add_subplot(111)
ax.hist(samples_sum, bins=np.arange(4 + 5 * number) - 0.5, density=True, color='RoyalBlue', alpha=0.9,
label='relative occurrence')
ax.set_xlabel('Dice Points')
ax.set_ylabel('relative occurence')
ax.set_xlim([number - 2, number * 6 + 2])
ax.legend(loc=2)
ax2 = ax.twinx()
ax2.plot(x, y, label='Theoretical Value', color='r')
ax2.legend(loc=1)
ax2.set_ylabel('Probability')
ax2.set_ylim(ymin=0)
plt.title('{} dice {} throws'.format(number, times))
ax2.set_ylim(ax.get_ylim())
plt.grid()
plt.show()
except:
self.Error()
'---------------------------Correlation example-------------------------------------------------'
def stackUI5(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
hlayout = QHBoxLayout()
hbox = QWidget()
hbox.setLayout(hlayout)
self.label[5] = [QLabel(text) for text in ['Title precision:', 'x.xx']]
self.sp[5] = QSpinBox()
self.sp[5].setValue(2)
self.sp[5].setMinimum(0)
self.sp[5].setSingleStep(1)
self.sp[5].valueChanged.connect(functools.partial(lambda x: x.label[5][1].setText("x." + x.sp[5].value() * "x"),
self))
for i in [*self.label[5], self.sp[5]]:
hlayout.addWidget(i)
group = [QGroupBox(text, self) for text in ['Example', 'Input Data']]
for i, j in zip(group, layout_i):
i.setLayout(j)
for i in [hbox, *group]:
layout.addWidget(i)
self.pb[5] = [QPushButton(text) for text in
['Weak correlation', 'Strong correlation', 'Uncorrelated', 'AddFile']]
list_00 = [0, 0, 0, 1]
list_01 = [1, 1, 1, 1]
list_02 = [0, 1, 2, 0]
list_03 = [self.Weak_correlation, self.Strong_correlation, self.Uncorrelated, self.Openfile_coe]
for i, j, *k, l in zip(list_00, self.pb[5], list_01, list_02, list_03):
layout_i[i].addWidget(j, *k)
j.clicked.connect(l)
self.stack[5].setLayout(layout)
def Valuechange51(self): # In my opinion 3 levels are enough
self.label[5][1].setText("x." + self.sp[5].value() * "x")
def base_correlation(self, func):
try:
x = np.arange(1, 101)
y = func(x)
coefxy = np.corrcoef(x, y)
pxy = coefxy[0, 1]
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, marker='o', linestyle='None')
plt.plot(x, y1, c='r', label='fitted line')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} Fitted line: y = {res.intercept:.{self.sp[5].value()}f}"
f" + {res.slope:.{self.sp[5].value()}f} * x")
plt.grid()
plt.legend()
plt.show()
except:
self.Error()
def Weak_correlation(self):
self.base_correlation(lambda x: np.random.randn(100) * 350 + np.random.randint(-10, 10, 1) * x)
def Strong_correlation(self):
self.base_correlation(lambda x: np.random.randn(100) * 50 + np.random.randint(-10, 10, 1) * x)
def Uncorrelated(self):
x0 = np.linspace(-1, 1, 200) # Draw a circle in polar coordinates
y0 = np.sqrt(1 - x0 ** 2)
list1 = []
list2 = []
for i in range(200):
a = (-1) ** random.randint(0, 1)
b = random.random() * 0.1
list1.append(a)
list2.append(b)
y1 = np.multiply(y0, list1)
list3 = np.array(list2)
y2 = y1 + list3
coefxy = np.corrcoef(x0, y2)
pxy = coefxy[0, 1]
plt.plot(x0, y2, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} No fitted line")
plt.grid()
plt.show()
def Openfile_coe(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.Openfile_coe2()
except Exception as r:
self.Error()
def loader(self):
with open(self.filename, 'r') as f:
f_csv = csv.reader(f)
csv_list = []
for row in f_csv:
csv_list.append(row)
x = csv_list[0]
x = list(map(int, x))
y = csv_list[1]
y = list(map(int, y))
return x, y
def Openfile_coe2(self):
x, y = self.loader()
cov = np.cov(*self.loader())
result1 = cov[0, 0] * cov[1, 1]
result2 = cov[0, 1] * cov[1, 0]
if result1 != 0 and result2 != 0: # Check whether the data is independent
coefxy = np.corrcoef(x, y)
pxy = coefxy[0, 1]
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.plot(x, y1, c='r', label='fitted line')
plt.title(f"r = {pxy:.{self.sp[5].value()}f} Fitted line: y = {res.intercept:.{self.sp[5].value()}f}"
f" + {res.slope:.{self.sp[5].value()}f} * x")
plt.legend()
plt.grid()
plt.show()
else:
plt.plot(x, y, marker='o', linestyle='None')
plt.xlabel('x')
plt.ylabel('y')
plt.title(f"r = {0:.{self.sp[5].value()}f} No fitted line")
plt.grid()
plt.show()
'===================CCF========================='
def stackUI6(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
self.group[6] = [QGroupBox(text, self) for text in ['Example', 'Input Data']]
for i, j in zip(self.group[6], layout_i):
i.setLayout(j)
layout.addWidget(i)
self.pb[6] = [QPushButton(i) for i in ["CCF", "ACF", "ACF_Rxx", "AddFile1_CCF", "AddFile2_ACF",
"AddFile3_Linear_Regression"]]
list_00 = [1, 1, 1, 1, 1, 1]
list_01 = [0, 1, 2, 0, 1, 2]
list_02 = [self.ccf, self.acf, self.acf_Rxx, self.add_file_01, self.add_file_02, self.add_file_03]
for i, *j, k in zip(self.pb[6][:3], list_00[:3], list_01[:3], list_02[:3]):
layout_i[0].addWidget(i, *j)
layout_i[0].addWidget(i)
i.clicked.connect(k)
for i, *j, k in zip(self.pb[6][3:], list_00[3:], list_01[3:], list_02[3:]):
layout_i[1].addWidget(i, *j)
layout_i[1].addWidget(i)
i.clicked.connect(k)
self.stack[6].setLayout(layout)
def acf_Rxx(self):
# x = np.array([50,47,60,88,20,19,12,57,49,33,42,10,99,22,58,67,90,56,33,74,23,62,90,29,74,10,29,74,57,15])
x1 = np.arange(1, 50, 0.01)
x2 = np.cos(x1)
x3 = x2[0]
n = len(x3)
o = np.arange(1 - n, n)
var = np.var(x3, ddof=1)
mx = np.mean(x3)
autocorrelation = np.correlate(x3, x3, 'full')
plt.plot(o, autocorrelation, marker='o', linestyle='None')
plt.plot([1 - n, n], [var + mx * 2, var + mx * 2], c='g', linestyle='--')
plt.plot([1 - n, n], [mx * 2, mx * 2], c='r', linestyle='--')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Autocorrelation function')
plt.grid()
plt.show()
def ccf(self):
x = np.array([50, 47, 60, 88, 20, 19, 12, 57, 49, 33, 42, 10, 99, 22, 58, 67, 90, 56, 33, 74, 23, 62, 90, 29,
74, 10, 29, 74, 57, 15])
y = np.array([20, 70, 66, 40, 53, 22, 14, 68, 43, 89, 54, 55, 3, 78, 56, 4, 9, 41, 14, 24, 68, 64, 87, 45, 33,
67, 55, 22, 86, 45])
n = len(x)
o = np.arange(1 - n, n)
crosscorrelation = np.correlate(x, y, 'full')
plt.plot(o, crosscorrelation, marker='o', linestyle='None')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Cross correlation function')
plt.grid()
plt.show()
def acf(self):
x = np.array([50, 47, 60, 88, 20, 19])
n = len(x)
o = np.arange(1 - n, n)
autocorrelation = np.correlate(x, x, 'full')
plt.plot(o, autocorrelation, marker='o', linestyle='None')
plt.xlabel('Time')
plt.ylabel('R')
plt.title('Autocorrelation function')
plt.grid()
plt.show()
'------------------------------------------------------'
'-------------------------------------------------------'
def stackUI7(self):
pass
def get_data(self, func, title):
try:
x, y = self.loader()
n = len(x)
o = np.arange(1 - n, n)
correlation = func(x, y)
plt.plot(o, correlation, marker='o')
plt.xlabel('Time')
plt.ylabel('R')
plt.title(title)
plt.show()
plt.grid()
except:
self.Error()
def add_file_01(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.self.get_data(lambda x, y: np.correlate(x, y, 'full'), 'Cross correlation function')
except:
self.Error()
def add_file_02(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.self.get_data(lambda x, y: np.correlate(x, x, 'full'), 'Autocorrelation function')
except:
self.Error()
def add_file_03(self):
try:
self.filename = QFileDialog.getOpenFileName(self, 'ChooseFile')[0]
self.get_data3()
except:
self.Error()
def get_data3(self):
try:
x, y = self.loader()
res = linregress(x, y)
y1 = res.intercept + np.multiply(res.slope, x)
plt.plot(x, y, 'o', label='original data')
plt.plot(x, y1, 'r', label='fitted line')
plt.legend()
plt.show()
except:
self.Error()
'------------------AR-----------------------------'
'-------------------------AR Model----------------------------------'
def stackUI8(self): # This part is the application of ARMA Model in the economic field, which is inconsistent
# with the teaching purpose of this lecture
# layout = QVBoxLayout()
#
# self.pb81 = QPushButton('Dataset')
# self.pb82 = QPushButton('coefficient')
# self.pb84 = QPushButton('Prediction')
# self.pb81.clicked.connect(self.Dataset)
# self.pb82.clicked.connect(self.coefficient)
# self.pb84.clicked.connect(self.Prediction)
#
# self.df = pd.read_csv(
# 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv',
# index_col=0, parse_dates=True)
#
# layout.addWidget(self.pb81)
# layout.addWidget(self.pb82)
# layout.addWidget(self.pb84)
# self.stack8.setLayout(layout)
pass
def Dataset(self):
pass
# self.df.plot()
# plt.show()
def coefficient(self):
pass
# a = self.df.Temp
# b = self.df.Temp.shift(1)
# coefficient = pearsonr(a[1:], b[1:])
# lag_plot(self.df)
# plt.title('Correlation coefficient:r = {:.2f}'.format(coefficient[0]))
# plt.show()
def ACF_PACF(self):
pass
# fig, axes = plt.subplots(2, 1)
# plot_acf(self.df['Temp'], ax=axes[0])
# plot_pacf(self.df['Temp'], ax=axes[1])
#
# plt.tight_layout()
# plt.show()
def Prediction(self):
pass
# x = self.df.values
# train, test = x[:-7], x[-7:]
# model_fit = AR(train).fit()
# params = model_fit.params
# p = model_fit.k_ar
# # p = 1
# history = train[-p:]
# history = np.hstack(history).tolist()
# test = np.hstack(test).tolist()
#
# predictions = []
# for t in range(len(test)):
# lag = history[-p:]
# yhat = params[0]
# for i in range(p):
# yhat += params[i + 1] * lag[p - 1 - i]
# predictions.append(yhat)
# obs = test[t]
# history.append(obs)
# print(np.mean((np.array(test) - np.array(predictions)) ** 2)) # 得到mean_squared_error, MSE
# plt.plot(test, color='b',label='Reality')
# plt.plot(predictions, color='r',label='Prediction')
# plt.legend()
# plt.show()
def stackUI9(self):
layout = QVBoxLayout()
layout_i = [QGridLayout() for i in range(2)]
self.group[9] = [QGroupBox(text, self) for text in ['Sample generate', 'Graphic']]
for i, j in zip(self.group[9], layout_i):
i.setLayout(j)
layout.addWidget(i)
self.label[9] = [QLabel(text) for text in ["b1:", "σw^2"]]
self.le[9] = [QLineEdit() for i in range(2)]
self.pb[9] = []
for i, j in zip(["Execute", "Rxx", "rxx"], [self.sample_generate91, self.Rxx91, self.rxx91]):
self.pb[9].append(QPushButton(i))
self.pb[9][-1].clicked.connect(j)
list_00 = [*self.label[9], *self.le[9], *self.pb[9]]
list_01 = [2, 3, 2, 3, 4, 1, 2]
list_02 = [0, 0, 1, 1, 2, 1, 1]
for i in zip(list_00[:5], list_01[:5], list_02[:5]):
layout_i[0].addWidget(*i)
for i in zip(list_00[5:], list_01[5:], list_02[5:]):
layout_i[1].addWidget(*i)
self.stack[9].setLayout(layout)
def sample_generate91(self):
try:
b1 = float(self.le[9][0].text())
ar_coefs = [1]
ar_coefs.append(b1)
ma_coefs = [1, 0]
sigma_s = float(self.le[9][1].text())
max_lag = 15
y = arma_generate_sample(ar_coefs, ma_coefs, nsample=100)
x = np.arange(100)
plt.plot(x, y)
plt.title('Time Series b1 = {} σw^2 = {}'.format(b1, sigma_s))
plt.show()
except:
self.Error()
def Rxx91(self):
try:
b1 = float(self.le[9][0].text())
sigma_w_2 = float(self.le[9][1].text())
sigma_x_2 = sigma_w_2 / (1 - b1 ** 2)
Rxx = []
for m in range(-10, 11, 1):
y = b1 ** abs(m) * sigma_x_2
Rxx.append(y)
x = np.arange(-10, 11)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, Rxx, 'bo')
ax.set_ylim([0, None])
plt.title('Rxx b1 = {} σw^2 = {}'.format(b1, sigma_w_2))
plt.grid()
plt.show()
except:
self.Error()
# coefficient
def rxx91(self):
try:
b1 = float(self.le[9][0].text())
sigma_w_2 = float(self.le[9][1].text())
sigma_x_2 = sigma_w_2 / (1 - b1 ** 2)
Rxx = []
for m in range(0, 11, 1):
y = b1 ** abs(m) * sigma_x_2
Rxx.append(y)
rxx = []
for i in Rxx:
i /= sigma_x_2
rxx.append(i)
x = np.arange(0, 11)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, rxx, 'bo')
ax.set_ylim([0, None])
plt.title('rxx b1 = {} σw^2 = {}'.format(b1, sigma_w_2))
plt.grid()
plt.show()
except:
self.Error() # coeff
'-------------------------Estimation Model----------------------------------'
def stackUI10(self):
layout = QVBoxLayout()
glayout = QGridLayout()
gbox = QWidget()
gbox.setLayout(glayout)
self.combobox[9] = QComboBox()
self.combobox[10] = QComboBox()
font31 = QFont()
font31.setPointSize(16)
self.combobox[9].setFont(font31)
self.combobox[10].setFont(font31)
list_00 = ["Please select Distribution", "Beta_Distribution", "Cauchy_Distribution",
"F_Distribution", "Gamma_Distribution", "Laplace_Distribution", "Logistic_Distribution",
"Lomax_Distribution", "Lognorm_Distribution", "Normal_Distribution", "Rayleigh_Distribution"]
list_001 = ["Please fit Distribution", "Beta_Distribution", "Binomial_Distribution", "Cauchy_Distribution",
"F_Distribution", "Gamma_Distribution", "Laplace_Distribution", "Logistic_Distribution",
"Lomax_Distribution", "Lognorm_Distribution", "Normal_Distribution", "Rayleigh_Distribution"]
for i in list_00:
self.combobox[10].addItem(i)
for i in list_001:
self.combobox[9].addItem(i)
self.combobox[10].currentIndexChanged.connect(self.Select_onChange101)
self.le[10] = [QLineEdit() for i in range(2)]
self.label[10] = [QLabel()] + [QLabel(text) for text in [" ", "1. parameter:", "2. parameter:"]]
self.label[10][1].setFont(QFont('Sanserif', 15))
self.label[10][1].setStyleSheet("font:bold")
self.pb[10] = []
for i, j in zip(['Execute', 'Clear', 'Help', 'Fit'],
[self.Simulation, self.clear101, self.msg10, self.fit]):
self.pb[10].append(QPushButton(i))
self.pb[10][-1].clicked.connect(j)
for i in [self.combobox[10], self.combobox[9], *self.label[10][:2], gbox]:
layout.addWidget(i)
list_00 = [*self.label[10][2:], *self.le[10], *self.pb[10]]
list_01 = [1, 2, 1, 2, 2, 4, 1, 3]
list_02 = [0, 0, 1, 1, 2, 2, 2, 2]
for i in zip(list_00, list_01, list_02):
glayout.addWidget(*i)
self.stack[10].setLayout(layout)
def clear101(self, index=10):
for i in self.le[index]:
i.clear()
def Select_onChange101(self):
switcher = {
"Binomial_Distribution": ["binomial.svg", [200, 60], 'n={}\n' 'p={}'],
"Normal_Distribution": ["normal.svg", [200, 60], 'μ={}\n' 'σ²={}'],
"Poisson_Distribution": ["poisson.svg", [250, 70], 'λ={}'],
"Rayleigh_Distribution": ["rayleigh.svg", [250, 60], 'σ={}'],
"Beta_Distribution": ["Beta.svg", [200, 60], 'α={}\n' 'β={}'],
"F_Distribution": ["f.svg", [450, 350], 'd1={}\n' 'd2={}'],
"Gamma_Distribution": ["gamma2.svg", [300, 50], 'k={} θ={}'],
"Geometric_Distribution": ["geometric.svg", [290, 60], 'p={}'],
"Lognorm_Distribution": ["lognorm.svg", [250, 60], 'μ={}\n' 'σ={}'],
"Chi2_Distribution": ["chi2.svg", [300, 140], 'df={}'],
"Cauchy_Distribution": ["cauchy.svg", [350, 80], 'x0={}\n' 'γ={}'],
"Laplace_Distribution": ["laplace.svg", [200, 60], 'μ={}\n' 'λ={}'],
"T_Distribution": ["t.svg", [300, 90], 'v={}'],
"Expon_Distribution": ["exponential.svg", [200, 60], 'λ={}'],
"Weibull_Distribution": ["weibull.svg", [350, 80], 'λ={}\n' 'a={}'],
"Negative_Binomial_Distribution": ["negativ.svg", [300, 60], 'n={}\n' 'p={}'],
"Lomax_Distribution": ["lomax.svg", [250, 60], 'λ={}\n' 'α={}'],
"Logistic_Distribution": ["logistic.svg", [300, 170], 'μ={}\n' 's={}']
}
if self.combobox[10].currentText() == 'Please select Distribution':
self.label[10][0].setText(' ')
self.label[10][1].setText(' ')
elif self.combobox[10].currentText() == 'Zipf_Distribution':
self.label[10][0].setText('No pic')
self.label[10][0].setScaledContents(True)
self.label[10][0].setMaximumSize(200, 60)
self.label[10][1].setText('a={}')
else:
i = switcher.get(self.combobox[10].currentText(), None)
self.label[10][0].setPixmap(QPixmap(i[0]))
self.label[10][0].setScaledContents(True)
self.label[10][0].setMaximumSize(*i[1])
self.label[10][1].setText(i[2])
def msg10(self):
QMessageBox.about(self, "Help", "The simulator generates a probability distribution\n"
"which is then fitted to estimate its parameters.\n"
"Inputboxes support a sets of parameters like '4'")
def Simulation(self):
try:
a = 0
b = 0
a = float(self.le[10][0].text())
b = float(self.le[10][1].text())
switcher = {
# "Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.getParament(a, b).normal_P,
"Rayleigh_Distribution": Simulation_fit.getParament(a, b).rayleigh_P,
"Beta_Distribution": Simulation_fit.getParament(a, b).beta_P,
"F_Distribution": Simulation_fit.getParament(a, b).f_P,
"Gamma_Distribution": Simulation_fit.getParament(a, b).gamma_P,
"Lognorm_Distribution": Simulation_fit.getParament(a, b).lognorm_P,
"Cauchy_Distribution": Simulation_fit.getParament(a, b).cauchy_P,
"Laplace_Distribution": Simulation_fit.getParament(a, b).laplace_P,
"Lomax_Distribution": Simulation_fit.getParament(a, b).lomax_P,
"Logistic_Distribution": Simulation_fit.getParament(a, b).logistic_P
}
X = switcher.get(self.combobox[10].currentText(), lambda: None)()
fig = plt.figure()
Simulation_fit.fit_Funktion(X).Sim()
except:
self.Error()
def fit(self):
try:
a = 0
b = 0
a = float(self.le[10][0].text())
# print(a)
b = float(self.le[10][1].text())
# print(b)
switcher = {
# "Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.getParament(a, b).normal_P,
"Rayleigh_Distribution": Simulation_fit.getParament(a, b).rayleigh_P,
"Beta_Distribution": Simulation_fit.getParament(a, b).beta_P,
"F_Distribution": Simulation_fit.getParament(a, b).f_P,
"Gamma_Distribution": Simulation_fit.getParament(a, b).gamma_P,
"Lognorm_Distribution": Simulation_fit.getParament(a, b).lognorm_P,
"Cauchy_Distribution": Simulation_fit.getParament(a, b).cauchy_P,
"Laplace_Distribution": Simulation_fit.getParament(a, b).laplace_P,
"Lomax_Distribution": Simulation_fit.getParament(a, b).lomax_P,
"Logistic_Distribution": Simulation_fit.getParament(a, b).logistic_P
}
X = switcher.get(self.combobox[10].currentText(), lambda: None)()
fig = plt.figure()
switcher_new = {
"Binomial_Distribution": Simulation_fit.getParament(a, b).binomial_P,
"Normal_Distribution": Simulation_fit.fit_Funktion(X).normal_Fit,
"Rayleigh_Distribution": Simulation_fit.fit_Funktion(X).rayleigh_Fit,
"Beta_Distribution": Simulation_fit.fit_Funktion(X).beat_Fit,
"F_Distribution": Simulation_fit.fit_Funktion(X).f_Fit,
"Gamma_Distribution": Simulation_fit.fit_Funktion(X).gamma_Fit,
"Lognorm_Distribution": Simulation_fit.fit_Funktion(X).lognorm_Fit,
"Cauchy_Distribution": Simulation_fit.fit_Funktion(X).cauchy_Fit,
"Laplace_Distribution": Simulation_fit.fit_Funktion(X).laplace_Fit,
"Lomax_Distribution": Simulation_fit.fit_Funktion(X).lomax_Fit,
"Logistic_Distribution": Simulation_fit.fit_Funktion(X).logistic_Fit
}
switcher_new.get(self.combobox[9].currentText(), lambda: None)()
except:
self.Error()
'-------------------------confidence ellipse----------------------------------'
def stackUI11(self):
vlayout = QVBoxLayout(self.stack[11])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
vlayout.addWidget(grid)
self.le[11] = [QLineEdit() for i in range(4)]
list_00 = ["pb1_1", "pb1_2", "pb1_3", "pb1_4", "help"]
list_01 = ["positive correlation", "negative correlation", "Weak correlation", "Clear", "Help"]
list_02 = [self.positive_correlation, self.negative_correlation, self.Weak_correlation,
self.clear111, self.msg11]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12", "label13", "label14"], ["quantity:", "mu:", "scale", "std"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"],
self.var_dict["label13"], self.var_dict["label14"], *self.le[11],
self.var_dict["pb1_1"], self.var_dict["pb1_2"], self.var_dict["pb1_3"], self.var_dict["pb1_4"]]
list_04 = [1, 2, 3, 4, 5, 2, 3, 4, 5, 2, 3, 4, 5]
list_05 = [2, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def positive_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[0.85, 0.35],
[0.15, -0.65]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Positive Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def negative_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[0.9, -0.4],
[0.1, -0.6]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Negative Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def Weak_correlation(self):
fig, ax_nstd = plt.subplots()
dependency_nstd = [[1, 0],
[0, 1]]
sp = [self.le[11][i].text().split(',') for i in range(4)]
try:
n = int(self.le[11][0].text())
mu = list(map(int, sp[1]))
scale = list(map(int, sp[2]))
std = list(map(int, sp[3]))
ax_nstd.axvline(c='grey', lw=1)
x, y = get_correlated_dataset(n, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
for i in range(len(std)):
colors = ['r', 'g', 'b']
confidence_ellipse(x, y, ax_nstd, n_std=std[i],
label=r'$%s\sigma$' % std[i], edgecolor=colors[i])
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Weak Correlation:Different standard deviations')
ax_nstd.legend()
plt.show()
except:
self.Error()
def clear111(self, index=11):
for i in self.le[index]:
i.clear()
def msg11(self):
QMessageBox.about(self, "Help", "The simulator generates confidence ellipses\n"
"quantity support a sets of parameters like '500'\n"
"Other inputboxes support multiple sets of parameters like '8,5'")
'-------------------------Waveforms----------------------------------'
def stackUI12(self):
layout = QVBoxLayout(self.stack[12])
gridlayout = QGridLayout()
grid = QWidget()
grid.setLayout(gridlayout)
layout.addWidget(grid)
self.le[12] = [QLineEdit() for i in range(3)]
self.combobox[12] = QComboBox()
font31 = QFont()
font31.setPointSize(12)
self.combobox[12].setFont(font31)
list_00 = ["Please select mode", "square125", "square25", "square50", "square75", "triangle", "noise"]
for i in list_00:
self.combobox[12].addItem(i)
layout.addWidget(self.combobox[12])
list_00 = ["pb1_1", "pb1_2", "pb1_3", "pb1_4", "help"]
list_01 = ["Execute", "Save", "Read", "Clear", "Help"]
list_02 = [self.wave_generation, self.save_wave, reafWave,
self.clear111, self.msg11]
for i, j, k in zip(list_00, list_01, list_02):
self.var_dict[i] = QPushButton(j)
self.var_dict[i].clicked.connect(k)
for i, j in zip(["label11", "label12", "label13"], ["Sample-rate:", "Frequency:", "Time-lengh"]):
self.var_dict[i] = QLabel()
self.var_dict[i].setText(j)
list_03 = [self.var_dict["help"], self.var_dict["label11"], self.var_dict["label12"],
self.var_dict["label13"], *self.le[12],
self.var_dict["pb1_1"], self.var_dict["pb1_2"], self.var_dict["pb1_3"], self.var_dict["pb1_4"]]
list_04 = [1, 3, 4, 5, 3, 4, 5, 2, 3, 4, 5]
list_05 = [2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
for i in zip(list_03, list_04, list_05):
gridlayout.addWidget(*i)
def wave_generation(self):
try:
fig = plt.figure()
sample_rate = int(self.le[12][0].text())
fa = int(self.le[12][1].text())
t_length = float(self.le[12][2].text())
mode = str(self.combobox[12].currentText())
y, t = createWave(sample_rate=sample_rate, fa=fa, t_length=t_length, mode=mode)
plt.plot(t, y)
plt.title("%s" % mode)
plt.show()
except:
self.Error()
def save_wave(self):
try:
fig = plt.figure()
sample_rate = int(self.le[12][0].text())
fa = int(self.le[12][1].text())
t_length = float(self.le[12][2].text())
mode = str(self.combobox[12].currentText())
y, t = createWave(sample_rate=sample_rate, fa=fa, t_length=t_length, mode=mode)
saveWave(y=y, sample_rate=sample_rate, path=r'wave.wav')
except:
self.Error()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
| lukascao/GUI_vorlesung | GUI_example.py | GUI_example.py | py | 54,545 | python | en | code | 0 | github-code | 36 |
37307336429 |
def test_cobaya():
from cosmoprimo.fiducial import DESI
cosmo = DESI()
for engine in ['class', 'camb', 'isitgr']:
params = {'Omega_m': {'prior': {'min': 0.1, 'max': 1.},
'ref': {'dist': 'norm', 'loc': 0.3, 'scale': 0.01},
'latex': '\Omega_{m}'},
'omega_b': cosmo['omega_b'],
'H0': cosmo['H0'],
'A_s': cosmo['A_s'],
'n_s': cosmo['n_s'],
'tau_reio': cosmo['tau_reio']}
extra_args = {'m_ncdm': cosmo['m_ncdm'], 'N_ur': cosmo['N_ur']}
if engine == 'isitgr':
params['Q1'] = 1.5
params['Q2'] = 0.5
extra_args.update(parameterization='QD', binning='hybrid', z_div=1, z_TGR=2, z_tw=0.05,
k_c=0.01, Q3=1.5, Q4=0.5, D1=1, D2=1, D3=1, D4=1)
info = {'params': params, 'debug': True,
'likelihood': {'sn.pantheon': None, 'H0.riess2020': None, 'bao.sdss_dr12_consensus_final': None, 'planck_2018_highl_plik.TTTEEE': None},
'theory': {'cosmoprimo.bindings.cobaya.cosmoprimo': {'engine': engine, 'stop_at_error': True, 'extra_args': extra_args}}}
info_sampler = {'evaluate': {}}
from cobaya.model import get_model
from cobaya.sampler import get_sampler
model = get_model(info)
get_sampler(info_sampler, model=model).run()
def test_cosmosis():
import os
import cosmoprimo
os.environ['COSMOPRIMO_DIR'] = os.path.dirname(os.path.dirname(cosmoprimo.__file__))
print(os.environ['COSMOPRIMO_DIR'])
from cosmosis.main import run_cosmosis
for engine in ['class', 'camb', 'isitgr']:
with open('cosmosis_config.ini', 'r') as file:
config = file.read()
config = config.replace('engine = class', 'engine = {}'.format(engine))
tmp_fn = 'tmp.ini'
with open(tmp_fn, 'w') as file:
file.write(config)
run_cosmosis(tmp_fn)
os.remove(tmp_fn)
if __name__ == '__main__':
test_cobaya()
test_cosmosis() | cosmodesi/cosmoprimo | cosmoprimo/tests/test_bindings.py | test_bindings.py | py | 2,073 | python | en | code | 12 | github-code | 36 |
29238296763 | N, K = map(int, input().split(" "))
graph = [[] for i in range(N+1)]
degree = [0 for i in range(N+1)]
q = []
for _ in range(K):
A, B = map(int, input().split(" "))
graph[A].append(B)
degree[B] = degree[B] + 1
for i in range(1, N+1):
if degree[i] == 0:
q.append(i)
while q: # isEmpty(queue) == !queue
X = q.pop(0)
for i in graph[X]:
degree[i]-=1
if degree[i] == 0:
q.append(i)
print(X, end=' ')
| SketchAlgorithm/19_Choi-JinWoo | 2252.py | 2252.py | py | 486 | python | en | code | 0 | github-code | 36 |
36445673009 | """remove subscriber
Revision ID: f71f10afe911
Revises: 514826a76b2b
Create Date: 2020-03-15 02:09:24.586462
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f71f10afe911'
down_revision = '514826a76b2b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscribers')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('subscribers',
sa.Column('id', sa.INTEGER(),
autoincrement=True, nullable=False),
sa.Column('subscriber_id', sa.VARCHAR(),
autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='subscribers_pkey'),
sa.UniqueConstraint(
'subscriber_id', name='subscribers_subscriber_id_key')
)
# ### end Alembic commands ###
| mhelmetag/mammoth | alembic/versions/f71f10afe911_remove_subscriber.py | f71f10afe911_remove_subscriber.py | py | 1,071 | python | en | code | 1 | github-code | 36 |
72294810025 | import streamlit as st
import pandas as pd
import numpy as np
# Wedding budget planner for the region of Southern France
st.title("Wedding Budget Planner for the Region of Southern France")
# Filter to allow the user to narrow down their options
st.subheader("Filter")
number_of_guests = st.slider("Number of guests", 0, 500)
type_of_accommodation = st.selectbox("Type of accommodation", ["Hotel", "Villa", "Castle"])
type_of_catering = st.selectbox("Type of catering", ["Sit-down dinner", "Buffet", "Family-style"])
type_of_entertainment = st.selectbox("Type of entertainment", ["Live band", "DJ", "Karaoke"])
type_of_decor = st.selectbox("Type of decor", ["Simple", "Elegant", "Extravagant"])
# Selector of the number of days for the wedding
st.subheader("Number of days")
number_of_days = st.slider("Number of days for the wedding", 0, 30)
# Maximum and minimum budget
st.subheader("Budget")
maximum_budget = st.number_input("Maximum budget", 0, 100000)
minimum_budget = st.number_input("Minimum budget", 0, maximum_budget)
# OpenAI API connection
st.subheader("OpenAI API")
openAI_connect = st.checkbox("Connect to OpenAI API for budget advice")
if openAI_connect:
st.text("Connecting to OpenAI API...")
# Connect to OpenAI API
# Retrieve budget advice
st.text("Retrieving budget advice...")
st.text("Budget advice: Spend wisely and get the most value for your money.") # dummy advice
st.success("Done!") # when the user is done creating the budget plan
st.button("Save budget plan") # save the budget plan
| karlotimmerman/budget_heroku | hello.py | hello.py | py | 1,545 | python | en | code | 0 | github-code | 36 |
4179808886 | '''
constants used throughout project
'''
import numpy as np
from astropy.cosmology import FlatLambdaCDM
RERUN_ANALYSIS = False
## set cosmology to Planck 2018 Paper I Table 6
cosmo = FlatLambdaCDM(H0=67.32, Om0=0.3158, Ob0=0.03324)
boss_h = 0.676 ## h that BOSS uses.
h = 0.6732 ## planck 2018 h
eta_star = cosmo.comoving_distance(1059.94).value ## z_drag from Planck 2018 cosmology paper Table 2, all Planck alone
rs = 147.09 ## try rs=r_drag from Planck 2018 same table as z_drag above
lstar = np.pi*eta_star/rs
dklss = np.pi/19. ##width of last scattering -- see Bo & David's paper.
| kpardo/mg_bao | mg_bao/constants.py | constants.py | py | 593 | python | en | code | 1 | github-code | 36 |
14525591993 | from TDAs import grafo, ciudades
import csv
def LeerPJ(ruta):
grafo_ciudades = grafo.Grafo()
dicc = {}
with open(ruta) as archivo:
# Colocamos ciudades (vertices)
contador = int(archivo.readline())
for _ in range(contador):
nombre, x, y = archivo.readline().rstrip().split(",")
ciudad = ciudades.Ciudad(nombre, y, x)
dicc[nombre] = ciudad
grafo_ciudades.agregar_vertice(ciudad)
# Colocamos rutas (aristas)
contador = int(archivo.readline())
for _ in range(contador):
origen, destino, tiempo = archivo.readline().rstrip().split(",")
grafo_ciudades.agregar_arista(dicc[origen], dicc[destino], int(tiempo))
return grafo_ciudades, dicc
def leer_recomendaciones(dicc_ciudades, grafo_original, ruta_csv):
grafo_recomendaciones = grafo.Grafo(es_dirigido=True)
for ciudad in grafo_original.obtener_vertices():
grafo_recomendaciones.agregar_vertice(ciudad)
with open(ruta_csv) as archivo:
lector = csv.reader(archivo, delimiter=',')
for fila in lector:
origen = dicc_ciudades[fila[0]]
destino = dicc_ciudades[fila[1]]
grafo_recomendaciones.agregar_arista(origen, destino)
return grafo_recomendaciones
| juandelaHD/Planificador-de-Viaje---Qatar | lectura_archivos.py | lectura_archivos.py | py | 1,317 | python | es | code | 0 | github-code | 36 |
2723494159 | #!/usr/bin/python3
"""tracking the iss using
api.open-notify.org/astros.json | Alta3 Research"""
# notice we no longer need to import urllib.request or json
import requests
## Define URL
MAJORTOM = 'http://api.open-notify.org/astros.json'
def main():
"""runtime code"""
## Call the webservice
groundctrl = requests.get(MAJORTOM)
# send a post with requests.post()
# send a put with requests.put()
# send a delete with requests.delete()
# send a head with requests.head()
## strip the json off the 200 that was returned by our API
## translate the json into python lists and dictionaries
helmetson = groundctrl.json()
## display our Pythonic data
print("\n\nConverted Python data")
print(helmetson)
print('\n\nPeople in Space: ', helmetson['number'])
people = helmetson['people']
print(people)
for astronaut in helmetson["people"]:
# notice that the text is pink between the two " marks
# python thinks you're starting and stopping a string on one line
# the fix is to mix up your ' and " quotation marks a bit
#print(f"{astronaut["name"]} is on the {astronaut["craft"]}")
print(f"{astronaut['name']} is on the {astronaut['craft']}")
if __name__ == "__main__":
main()
| chadkellum/mycode | iss/requests-ride_iss.py | requests-ride_iss.py | py | 1,295 | python | en | code | 0 | github-code | 36 |
32179513329 | import sys
from PyQt5 import QtWidgets
def Pencere():
app = QtWidgets.QApplication(sys.argv)
okay = QtWidgets.QPushButton("Tamam")
cancel = QtWidgets.QPushButton("İptal")
h_box = QtWidgets.QHBoxLayout()
h_box.addStretch()
h_box.addWidget(okay)
h_box.addWidget(cancel)
v_box = QtWidgets.QVBoxLayout()
v_box.addStretch()
v_box.addLayout(h_box)
pencere = QtWidgets.QWidget()
pencere.setWindowTitle("PyQt5 Ders 4")
pencere.setLayout(v_box)
pencere.setGeometry(100,100,500,500)
pencere.show()
sys.exit(app.exec_())
Pencere()
| mustafamuratcoskun/Sifirdan-Ileri-Seviyeye-Python-Programlama | PyQt5 - Arayüz Geliştirme/Videolarda Kullanılan Kodlar/horizontal ve vertical layout.py | horizontal ve vertical layout.py | py | 643 | python | en | code | 1,816 | github-code | 36 |
7537206122 | from django.test import TestCase, tag
from djangoplicity.newsletters.models import NewsletterType, Newsletter
from webb.tests import utils
@tag('newsletters')
class TestNewsletters(TestCase):
fixtures = [
'test/common',
'test/media',
'test/announcements',
'test/releases',
'test/highlights',
'test/newsletters'
]
def setUp(self):
self.client = utils.get_staff_client()
self.newsletter_types = NewsletterType.objects.all()
self.newsletter = Newsletter.objects.filter(published=True, send__isnull=False).first()
def test_newsletter_generation(self):
for newsletter_type in self.newsletter_types:
response = self.client.post(
'/admin/newsletters/newsletter/new/',
{
'type': newsletter_type.pk,
'start_date_0': '01/01/2000',
'start_date_1': '00:00:00',
'end_date_0': '31/12/2220',
'end_date_1': '23:59:59',
'_generate': 'Generate'
},
follow=True
)
utils.check_redirection_to(self, response, r'/admin/newsletters/newsletter/[0-9]+/change/')
def test_newsletter_list(self):
url = '/newsletters/{}/'.format(self.newsletter.type.slug)
response = self.client.get('{}{}'.format(url, '?search=this+does+not+exists'))
self.assertContains(response, 'No entries were found')
response = self.client.get(url)
self.assertContains(response, self.newsletter.type.name)
def test_newsletter_detail(self):
response = self.client.get('/newsletters/{}/html/{}/'.format(self.newsletter.type.slug, self.newsletter.pk))
self.assertContains(response, self.newsletter.subject)
| esawebb/esawebb | webb/tests/newsletters.py | newsletters.py | py | 1,838 | python | en | code | 0 | github-code | 36 |
39069951023 | # https://leetcode.com/problems/sqrtx/
class Solution:
# Iterative Binary Search
# Time: O(logn), Space: O(1)
def mySqrt(self, x: int) -> int:
if x <= 1:
return x
left, right = 2, x
while left <= right:
mid = (left + right) // 2
if mid * mid == x:
return mid
elif mid * mid < x:
left = mid + 1
else:
right = mid - 1
return right
# Recursive Binary Search
# Time: O(logn), Space: O(logn)
# Newton-Raphson method (read more: https://leetcode.com/problems/sqrtx/discuss/2732386/O(1)-solution-in-python!(Newton-Raphson-method))
# Time: O(1), Space: O(?)
def newtonRaphson(self, x: int) -> int:
ans = 1.0
for i in range(20):
ans = ans - (ans * ans - x)/(2 * ans)
return int(ans)
| grenkoff/leetcode | solutions/0069. Sqrt(x)/Sqrt(x).py | Sqrt(x).py | py | 898 | python | en | code | 0 | github-code | 36 |
17885393929 | from django.shortcuts import render
from django.views import View
from django.http.response import JsonResponse
from django.template.loader import render_to_string
from .models import Topic
from .forms import TopicForm
class BbsView(View):
def get(self, request, *args, **kwargs):
topics = Topic.objects.all()
context = { "topics":topics }
return render(request,"posting/index.html",context)
def post(self, request, *args, **kwargs):
json = { "error":True }
form = TopicForm(request.POST)
if not form.is_valid():
print("Validation Error")
return JsonResponse(json)
form.save()
json["error"] = False
topics = Topic.objects.all()
context = { "topics":topics }
content = render_to_string("posting/content.html",context,request)
json["content"] = content
return JsonResponse(json)
index = BbsView.as_view() | inatai/super_tsp | posting/views.py | views.py | py | 1,026 | python | en | code | 0 | github-code | 36 |
32473831452 | from config import bot, chat_id
from plugins.error import Error
import requests
from bs4 import BeautifulSoup
import time
from telebot import types
from plugins.error import in_chat
#________________________________________________________________________________________________________________
#Скриншот сайтов
#________________________________________________________________________________________________________________
@bot.message_handler(commands=['url'])
@in_chat()
def screen(m):
bot.delete_message(m.chat.id, m.message_id)
HEADERS = {"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36"}
keyboard = types.InlineKeyboardMarkup()
keyboard_delete = types.InlineKeyboardButton(text = "❌", callback_data = "delete")
keyboard.add(keyboard_delete)
try:
res = requests.get(m.text[5:], headers = HEADERS) # Защита от спермотоксикозников
bool_ = ("Порн" in res.text or "Porn" in res.text or "porn" in res.text or "порн" in res.text)
if bool_ == 1:
bot.send_sticker(m.chat.id, "CAACAgQAAxkBAAIaSF93cwIsw1oPRGtOdZHTF8_UsBTDAAJYAAO6erwZr3-jVb-xFsgbBA")
time.sleep (15.5)
bot.delete_message(m.chat.id, m.message_id + 1)
else:
bot.send_photo(m.chat.id, photo="https://mini.s-shot.ru/1366x768/JPEG/1366/Z100/?" + m.text[5:], reply_markup = keyboard)
except Exception as e:
print ("❌ ОШИБКА ❌")
print ("screenshot.py " + e)
Error(m, bot).error() | evilcatsystem/telegram-bot | plugins/screenshot.py | screenshot.py | py | 1,624 | python | en | code | 1 | github-code | 36 |
28068784452 | # 2021-05-20
# 출처 : https://programmers.co.kr/learn/courses/30/lessons/17683
# 방금 그곡
m = "ABCDEFG"
musicinfos = ["11:50,12:14,HELLO,CDEFGAB", "13:00,13:05,WORLD,ABCDEF"]
# m='CC#BCC#BCC#BCC#B'
# musicinfos=["03:00,03:30,FOO,CC#B", "04:00,04:08,BAR,CC#BCC#BCC#B"]
# m='ABC'
# musicinfos=["12:00,12:14,HELLO,C#DEFGAB", "13:00,13:05,WORLD,ABCDEF"]
def solution(m, musicinfos):
# #치환
m = m.replace('C#', 'c').replace('D#', 'd').replace('F#', 'f').replace('A#', 'a').replace('G#', 'g')
check = []
for i in musicinfos:
check.append(list(i.split(',')))
answer = []
for i in range(len(check)):
time1 = list(map(int, check[i][0].split(':')))
time2 = list(map(int, check[i][1].split(':')))
time = (time2[0] * 60 + time2[1]) - (time1[0] * 60 + time1[1])
# #치환
melody = check[i][3].replace('C#', 'c').replace('D#', 'd').replace('F#', 'f').replace('A#', 'a').replace('G#','g')
if len(melody) <= time:
idx = 1
count = 0
while count < 1440: # 하루는 1440분이므로 곡의 길이가 1440보다 클수는 없음
melody1 = melody * idx
count = len(list(melody1))
if m in melody1:
answer.append((time, i, check[i][2]))
break
idx += 1
# 음악길이보다 재생시간이 짧은경우
else:
if len(melody) >= len(m):
if m in melody[:time]:
answer.append((time, i, check[i][2]))
else:
pass
# 해당하는 음악이 없는경우
if len(answer) == 0:
return '(None)'
else:
answer.sort(key=lambda x: (-x[0], x[1]))
return answer[0][2]
print(solution(m, musicinfos)) | hwanginbeom/algorithm_study | 2.algorithm_test/21.05.16/21.05.20_방금그곡_kyounglin.py | 21.05.20_방금그곡_kyounglin.py | py | 1,838 | python | en | code | 3 | github-code | 36 |
1941672558 | #TGP 2018-01-12
#Snippet for adding images to cards
import sys
import os
import re
import math
import json
import subprocess as sub
import sublime
import sublime_plugin
class UmbertoAddImage(sublime_plugin.TextCommand):
def run(self, edit):
# Location of the current project.
settings = sublime.load_settings('Umberto.sublime-settings')
proj_dir = settings.get('current_project_directory')
img_dir = proj_dir + "/images"
# if not os.path.isdir("img_dir"):
# print('Image directory not found')
# img_dir = "C:"
self.img_list = self.get_img_list(img_dir)
self.view.window().show_quick_panel(self.img_list, self.on_done)
def get_img_list(self, img_dir):
img_list = []
for file in os.listdir(img_dir):
img_list.append(file)
return img_list
def on_done(self, jj):
if jj < 0 or math.isnan(jj) == True:
return
else:
img_name = self.img_list[jj]
trunc_img_name = re.match('(.+)(?:\..+$)', img_name).group(1)
print(trunc_img_name)
fig_txt_start = r'\b' + 'egin{figure}\n\caption{Caption}\n\centering\n\includegraphics[width=0.7' + r'\t' + 'extwidth]{'
fig_txt_two = '}\n\label{fig:'
fig_txt_end = '}\n\end{figure}'
insert_string = fig_txt_start + img_name+ fig_txt_two + trunc_img_name + fig_txt_end
self.view.run_command('insert', {'characters': insert_string})
| tgparton/Umberto | add_image.py | add_image.py | py | 1,570 | python | en | code | 0 | github-code | 36 |
21671571550 | import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
class Basset(nn.Module):
"""
This model is also known to do well in transcription factor binding.
This model is "shallower" than factorized basset, but has larger convolutions
that may be able to pick up longer motifs
"""
def __init__(self, dropout, num_classes):
super(Basset, self).__init__()
torch.manual_seed(3278)
self.dropout = dropout
self.conv1 = nn.Conv2d(4, 300, (19, 1), stride = (1, 1), padding=(9,0))
self.conv2 = nn.Conv2d(300, 200, (11, 1), stride = (1, 1), padding = (5,0))
self.conv3 = nn.Conv2d(200, 200, (7, 1), stride = (1, 1), padding = (4,0))
self.bn1 = nn.BatchNorm2d(300)
self.bn2 = nn.BatchNorm2d(200)
self.bn3 = nn.BatchNorm2d(200)
self.maxpool1 = nn.MaxPool2d((3, 1))
self.maxpool2 = nn.MaxPool2d((4, 1))
self.maxpool3 = nn.MaxPool2d((4, 1))
self.fc1 = nn.Linear(4200, 1000)
self.bn4 = nn.BatchNorm1d(1000)
self.fc2 = nn.Linear(1000, 1000)
self.bn5 = nn.BatchNorm1d(1000)
self.fc3 = nn.Linear(1000, num_classes)
def forward(self, s):
s = s.permute(0, 2, 1).contiguous() # batch_size x 4 x 1000
s = s.view(-1, 4, 1000, 1) # batch_size x 4 x 1000 x 1 [4 channels]
s = self.maxpool1(F.relu(self.bn1(self.conv1(s)))) # batch_size x 300 x 333 x 1
s = self.maxpool2(F.relu(self.bn2(self.conv2(s)))) # batch_size x 200 x 83 x 1
s = self.maxpool3(F.relu(self.bn3(self.conv3(s)))) # batch_size x 200 x 21 x 1
s = s.view(-1, 4200)
s = F.dropout(F.relu(self.bn4(self.fc1(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = F.dropout(F.relu(self.bn5(self.fc2(s))), p=self.dropout, training=self.training) # batch_size x 1000
intermediate_out = s
s = self.fc3(s)
s = torch.sigmoid(s)
return s, intermediate_out
class FactorizedBasset(nn.Module):
"""
This model is known to do well in predicting transcription factor binding. This means it may be good
at predicting sequence localization as well, if its architecture lends itself well to predicting sequence
motifs in general.
"""
def __init__(self, dropout, num_classes=1):
super(FactorizedBasset, self).__init__()
torch.manual_seed(3278)
self.dropout = dropout
self.num_cell_types = num_classes
self.layer1 = self.layer_one()
self.layer2 = self.layer_two()
self.layer3 = self.layer_three()
self.maxpool1 = nn.MaxPool2d((3, 1))
self.maxpool2 = nn.MaxPool2d((4, 1))
self.maxpool3 = nn.MaxPool2d((4, 1))
self.fc1 = nn.Linear(4200, 1000)
self.bn4 = nn.BatchNorm1d(1000)
self.fc2 = nn.Linear(1000, 1000)
self.bn5 = nn.BatchNorm1d(1000)
# self.fc3 = nn.Linear(1000, self.num_cell_types)
self.fc3 = nn.Linear(1000, num_classes)
def layer_one(self):
self.conv1a = nn.Conv2d(4, 48, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1b = nn.Conv2d(48, 64, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1c = nn.Conv2d(64, 100, (3, 1), stride=(1, 1), padding=(1, 0))
self.conv1d = nn.Conv2d(100, 150, (7, 1), stride=(1, 1), padding=(3, 0))
self.conv1e = nn.Conv2d(150, 300, (7, 1), stride=(1, 1), padding=(3, 0))
self.bn1a = nn.BatchNorm2d(48)
self.bn1b = nn.BatchNorm2d(64)
self.bn1c = nn.BatchNorm2d(100)
self.bn1d = nn.BatchNorm2d(150)
self.bn1e = nn.BatchNorm2d(300)
tmp = nn.Sequential(self.conv1a, self.bn1a, nn.ReLU(inplace=True),
self.conv1b, self.bn1b, nn.ReLU(inplace=True),
self.conv1c, self.bn1c, nn.ReLU(inplace=True),
self.conv1d, self.bn1d, nn.ReLU(inplace=True),
self.conv1e, self.bn1e, nn.ReLU(inplace=True))
return tmp
def layer_two(self):
self.conv2a = nn.Conv2d(300, 200, (7,1), stride = (1,1), padding = (3,0))
self.conv2b = nn.Conv2d(200, 200, (3,1), stride = (1,1), padding = (1, 0))
self.conv2c = nn.Conv2d(200, 200, (3, 1), stride =(1,1), padding = (1,0))
self.bn2a = nn.BatchNorm2d(200)
self.bn2b = nn.BatchNorm2d(200)
self.bn2c = nn.BatchNorm2d(200)
tmp = nn.Sequential(self.conv2a,self.bn2a, nn.ReLU(inplace= True),
self.conv2b,self.bn2b, nn.ReLU(inplace=True),
self.conv2c, self.bn2c, nn.ReLU(inplace=True))
return tmp
def layer_three(self):
self.conv3 = nn.Conv2d(200, 200, (7,1), stride =(1,1), padding = (4,0))
self.bn3 = nn.BatchNorm2d(200)
return nn.Sequential(self.conv3, self.bn3, nn.ReLU(inplace=True))
def forward(self, s):
"""Expect input batch_size x 1000 x 4"""
s = s.permute(0, 2, 1).contiguous() # batch_size x 4 x 1000
s = s.view(-1, 4, 1000, 1) # batch_size x 4 x 1000 x 1 [4 channels]
s = self.maxpool1(self.layer1(s)) # batch_size x 300 x 333 x 1
s = self.maxpool2(self.layer2(s)) # batch_size x 200 x 83 x 1
s = self.maxpool3(self.layer3(s)) # batch_size x 200 x 21 x 1
s = s.view(-1, 4200)
conv_out = s
s = F.dropout(F.relu(self.bn4(self.fc1(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = F.dropout(F.relu(self.bn5(self.fc2(s))), p=self.dropout, training=self.training) # batch_size x 1000
s = self.fc3(s)
s = torch.sigmoid(s)
return s, conv_out
if __name__ == "__main__":
# Easy sanity check that nothing is blatantly wrong
x = FactorizedBasset(dropout=0.2, num_classes=8)
| wukevin/rnagps | rnagps/models/basset_family.py | basset_family.py | py | 6,034 | python | en | code | 8 | github-code | 36 |
41974155562 | #@ File (label = "Input directory", style = "directory") srcFile
#@ String (label = "File extension", value=".dv") ext
#@ Integer (label = "cell contour channel", value=2) contour
#@ Integer (label = "GFP channel", value = 3) countchannel
#@ Integer (label = "mCherry channel", value = 4) linechannel
#@ Integer (label = "Magnification", value = 40) magnification
#@ Float (label = "Minimum distance (um)", value = 1) minimumdist
#@ Float (label = "Maximum distance (um)", value = 2.5) maximumdist
#@ Integer (label = "Minimum cell size", value = 200) min_size
#@ Integer (label = "Maximum cell size", value = 999999999) max_size
"""
count_cells_foci.py
created by: Erick Martins Ratamero
date: 03/07/18
last updated: 20/08/18
From an open image in Fiji, generate cells ROIs from
thresholding a channel, then count the foci inside each
ROI for GPF channel and generate distances between foci in
the same ROI at mCherry channel. Finally, it saves the results
as a csv file.
"""
import os
from java.io import File
from ij import IJ, ImageStack, ImagePlus
from ij.plugin.frame import RoiManager
import math
from ij import WindowManager
from ij.measure import ResultsTable
from loci.plugins import BF
from ij.io import FileSaver
from ij.process import ImageStatistics as IS
countcells = 0
count_range = 0
cellsperfoci = {0:0, 1:0, 2:0, 3:0, "more":0}
cellsperfoci_range = {0:0, 1:0, 2:0, 3:0, "more":0}
# calculate total number of channels based on
# parameter inputs
totchannels = 2
if (linechannel > 0):
totchannels = totchannels +1
if (countchannel > 0):
totchannels = totchannels +1
# figure out which one is the relevant channel for cell shapes
# by elimination
channels = range(totchannels)
if (linechannel > 0):
channels.remove(linechannel-1)
if (countchannel > 0):
channels.remove(countchannel-1)
channels.remove(contour-1)
cellchannel=channels[0]
# camera pixel size for the DVs - relevant for pixel size
# given magnification
physicalPixel = 6.45
# returns an instance of ROI Manager (creates one if
# it doesn't exist)
def get_roi_manager(new=False):
rm = RoiManager.getInstance()
if not rm:
rm = RoiManager()
if new:
rm.runCommand("Reset")
return rm
srcDir = srcFile.getAbsolutePath()
# the the list of file names in the input directory
folders = []
filenamelist = []
for root, directories, filenames in os.walk(srcDir):
folders.append(root)
filenames.sort()
filenamelist.append(filenames)
totalcellsperfoci = []
totalcellsperfoci_range = []
totalcountcells = []
totalcount_range = []
directories = []
# skip irrelevant filenames, do stuff for relevant ones
for counter in range(len(folders)):
cellsperfoci = {0:0, 1:0, 2:0, 3:0, "more":0}
cellsperfoci_range = {0:0, 1:0, 2:0, 3:0, "more":0}
countcells = 0
count_range = 0
srcDir = folders[counter]
for filename in filenamelist[counter]:
if not filename.endswith(ext):
continue
# generate full file path for opening
print(os.path.join(srcDir, filename))
path = os.path.join(srcDir, filename)
# use the Bioformats importer to open image
IJ.run("Bio-Formats Importer", "open=" + path + " autoscale color_mode=Default view=Hyperstack stack_order=XYCZT");
image = IJ.getImage()
directory = srcDir
# calculate pixel size given camera pixel and magnification
size_x = physicalPixel/magnification
size_y = physicalPixel/magnification
# get stack from current image
stack = image.getStack()
# create empty stacks for split channels
countfoci_stack = ImageStack(image.width, image.height)
linefoci_stack = ImageStack(image.width, image.height)
# initialise variables for calculating in-focus slice
maxstddev = 0
infocus = 0
# now we go through the original image and retrieve slices to
# create substacks with split channels
for i in range(1, image.getNSlices()+1):
if (countchannel > 0):
myslice = stack.getProcessor(i*totchannels -(totchannels-countchannel))
countfoci_stack.addSlice(str(i), myslice)
if (linechannel > 0):
myslice = stack.getProcessor(i*totchannels -(totchannels-linechannel))
linefoci_stack.addSlice(str(i), myslice)
# we also calculate the standard deviation on each cell channel slice
# and update the maximum value of that
print("getting slice", i*totchannels -(totchannels-1 - cellchannel))
myslice = stack.getProcessor(i*totchannels -(totchannels-1 - cellchannel))
stats = IS.getStatistics(myslice)
if stats.stdDev > maxstddev:
maxstddev = stats.stdDev
infocus = i
#print(i,infocus,maxstddev)
# we set the relevant z-slice to be the maximum std dev one and get
# that "stack" (it's a single slice)
zslice = infocus
print("zslice:", zslice, zslice*totchannels -(totchannels - 1 -cellchannel))
cellsproc = stack.getProcessor(zslice*totchannels -(totchannels - 1 -cellchannel))
# we create a new image from that z-slice and display it
im_slice = ImagePlus("stack", cellsproc)
im_slice.show()
fs = FileSaver(im_slice)
filepath = directory + "/" + filename + "_slice.tif"
fs.saveAsTiff(filepath)
# then we close the original image
image.close()
# from now on, "image" refers to in-focus z-slice with cells
image = IJ.getImage()
# then, we create a binary image using Default thresholding
IJ.run(im_slice,"Auto Threshold", "method=Default ignore_black ignore_white white");
IJ.run("Make Binary");
# it underestimates areas a bit, so we dilate them once and then
# separate contiguous cells using Watershed
IJ.run("Dilate");
IJ.run( "Watershed");
# we establish a minimum size of 200 for something to be considered
# a cell, add cells to ROI and create an outline image
IJ.run(im_slice,"Analyze Particles...", "size="+str(min_size)+"-"+str(max_size)+" show=Outlines clear add");
# getting and saving outline image as a tif
image = IJ.getImage()
fs = FileSaver(image)
filepath = directory + "/" + filename + ".tif"
fs.saveAsTiff(filepath)
# now we can save the outlines and save the cell channel image.
image.changes = False
image.close()
image = IJ.getImage()
image.changes = False
image.close()
# now, we get the ROIs generated - these should be the cells
rm = get_roi_manager()
rois = rm.getRoisAsArray()
# this segment only runs if a GFP channel is present
if (countchannel > 0):
# generate image with GFP channel
ImagePlus("stack", countfoci_stack).show()
image = IJ.getImage()
# run max projection to get all foci, then close original image
IJ.run("Z Project...", "projection=[Max Intensity]");
image.close()
image = IJ.getImage()
# find maxima corresponding to foci - noise=50 has worked well
# empirically
IJ.run("Find Maxima...", "noise=750 output=List");
image.close()
# get the results table with maxima and add a "cell" column to it
rt = ResultsTable.getResultsTable()
rt.addValue("cell", 0)
countcells = countcells + len(rois)
print("countcells: "+str(countcells))
cell = 1
for roi in rois:
# this is looping over cells...
for count in range(rt.size()):
# ... and for each cell this is looping over foci
# we get XY coordinates of the foci
x = int(rt.getValue("X",count))
y = int(rt.getValue("Y", count))
# if that cell contains these coordinates, add the cell
# number as "cell" value for that foci
if roi.contains(x,y):
rt.setValue("cell", count, cell)
cell = cell + 1
# save this results table
rt.save(directory+"/"+filename+"_GFP.csv" )
print("saving at ",directory+"/"+filename+"_GFP.csv")
# create summary resulta table, with "cell" and "foci_count" columns
consol = ResultsTable()
consol.incrementCounter()
consol.addValue("cell", 0)
consol.addValue("foci_count", 0)
rowcount = 1
# loop over all cells, add cell number to the "cell" column
for count in range(cell):
consol.setValue("cell",count,count)
# loop over all foci
for count in range(rt.size()):
# get in which cell that foci is and increase the
# counter on the summary results table
currcell = int(rt.getValue("cell",count))
consol.setValue("foci_count", currcell, int(consol.getValue("foci_count", currcell))+1)
for count in range(1,cell):
foci = consol.getValue("foci_count",count)
if foci<=3:
cellsperfoci[foci] = cellsperfoci[foci] + 1
else:
cellsperfoci["more"] = cellsperfoci["more"] +1
print(cellsperfoci)
# close the results window
IJ.selectWindow("Results");
IJ.run("Close");
# this segment only runs if a mCherry channel is present
if (linechannel > 0):
# generate image with mCherry channel
ImagePlus("stack", linefoci_stack).show()
image = IJ.getImage()
# run max projection to get all foci, then close original image
IJ.run("Z Project...", "projection=[Max Intensity]");
image.close()
image = IJ.getImage()
# find maxima corresponding to foci - noise=50 has worked well
# empirically
IJ.run("Find Maxima...", "noise=50 output=List");
image.close()
# get the results table with maxima and add a "cell" column to it
rt = ResultsTable.getResultsTable()
rt.addValue("cell", 0)
cell = 1
for roi in rois:
# this is looping over cells...
for count in range(rt.size()):
# ... and for each cell this is looping over foci
# we get XY coordinates of the foci
x = int(rt.getValue("X",count))
y = int(rt.getValue("Y", count))
# if that cell contains these coordinates, add the cell
# number as "cell" value for that foci
if roi.contains(x,y):
rt.setValue("cell", count, cell)
cell = cell + 1
# add columns "dist_to" and "focus" to results table
rt.addValue("dist_to", 0)
rt.addValue("focus", 0)
# add column "is_in_range" to the summary table
consol.addValue("is_in_range", 0)
for count in range(rt.size()):
# loop over the mCherry foci
# we want to get the minimum distance between foci in the same
# cell, so we start with a very large value
mindist = 99999999
minval = -1
# cell1 is the cell where the current focus is
cell1 = rt.getValue("cell",count)
# for each focus, we loop over all foci to compare that with
for count2 in range(rt.size()):
# cell2 is the cell where the focus being compared to
# "main one" is
cell2 = rt.getValue("cell",count2)
# if we're comparing two different foci in the same cell:
if (count != count2 and cell1 == cell2):
# get their XY coordinates, calculate 2d distance
x1 = rt.getValue("X",count)*size_x
x2 = rt.getValue("X",count2)*size_x
y1 = rt.getValue("Y",count)*size_y
y2 = rt.getValue("Y",count2)*size_y
dist = math.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
# if that's the smallest distance from that "main"
# focus to another focus in the same cell, replace it
if (dist < mindist):
mindist = dist
minval = count2
# if the minimum distance from that focus to another one in
# the same cell is in the relevant range, set "is_in_range" to 1
if (mindist > minimumdist) and (mindist < maximumdist):
consol.setValue("is_in_range", int(cell1), 1)
# in the mCherry results table, set minimum distance
# and the focus to which that minimum distance is
rt.setValue("dist_to", count, mindist)
rt.setValue("focus", count, minval+1)
# save the mCherry results table
rt.save(directory+"/"+filename+"_mcherry.csv" )
# do the same foci counting procedure as for GFP
rowcount = 1
#for count in range(cell):
#consol.setValue("cell",count,count)
#consol.setValue("foci_count",count,0)
#for count in range(rt.size()):
#currcell = int(rt.getValue("cell",count))
#print(currcell, "old value", consol.getValue("foci_count", currcell), "new value", int(consol.getValue("foci_count", currcell))+1)
#consol.setValue("foci_count", currcell, int(consol.getValue("foci_count", currcell))+1)
IJ.selectWindow("Results");
IJ.run("Close");
for count in range(consol.size()):
inrange = consol.getValue("is_in_range",count)
if (inrange == 1):
count_range = count_range + 1
foci = consol.getValue("foci_count",count)
if foci<=3:
cellsperfoci_range[foci] = cellsperfoci_range[foci] + 1
else:
cellsperfoci_range["more"] = cellsperfoci_range["more"] +1
# save the summary results table
consol.save(directory+"/"+filename+"_summary.csv" )
# reset the ROI Manager, close it and go to next file (if there is one)
rm.runCommand("Reset")
rm.close()
if any(x.endswith(ext) for x in filenamelist[counter]):
fp = open(srcDir + "/total_summary.csv", "w")
fp.write("total cells, "+str(countcells)+"\n\n")
if (linechannel > 0):
fp.write("cells in range, "+str(count_range)+"\n")
fp.write("cells with foci and in range, "+str(cellsperfoci_range[1]+cellsperfoci_range[2]+cellsperfoci_range[3])+"\n\n")
for i in range(4):
fp.write("cells with "+str(i)+" foci and in range, "+str(cellsperfoci_range[i])+"\n")
fp.write("cells with more than 3 foci and in range, "+ str(cellsperfoci_range["more"])+"\n\n")
for i in range(4):
fp.write("percentage of in range cells with "+str(i)+" foci, "+str(float(cellsperfoci_range[i])/count_range)+"\n")
fp.write("percentage of in range cells with more than 3 foci, "+str(float(cellsperfoci_range["more"])/count_range)+"\n\n")
for i in range(4):
fp.write("cells with "+str(i)+" foci, "+str(cellsperfoci[i])+"\n")
fp.write("cells with more than 3 foci, "+ str(cellsperfoci["more"])+"\n\n")
for i in range(4):
fp.write("percentage of cells with "+str(i)+" foci, "+str(float(cellsperfoci[i])/countcells)+"\n")
fp.write("percentage of cells with more than 3 foci, "+str(float(cellsperfoci["more"])/countcells)+"\n\n")
fp.close()
directories.append(srcDir)
totalcountcells.append(countcells)
totalcount_range.append(count_range)
totalcellsperfoci.append(cellsperfoci)
totalcellsperfoci_range.append(cellsperfoci_range)
srcDir = srcFile.getAbsolutePath()
fp = open(srcDir + "/all_folders_summary.csv", "w")
fp.write(",")
for i in range(len(directories)):
fp.write(directories[i]+ ",")
fp.write("\n")
fp.write("total cells, ")
for i in range(len(directories)):
fp.write(str(totalcountcells[i])+ ",")
fp.write("\n\n")
if (linechannel > 0):
fp.write("cells in range, ")
for i in range(len(directories)):
fp.write(str(totalcount_range[i])+ ",")
fp.write("\n\n")
fp.write("cells with foci and in range, ")
for i in range(len(directories)):
fp.write(str(totalcellsperfoci_range[i][1]+totalcellsperfoci_range[i][2]+totalcellsperfoci_range[i][3])+ ",")
fp.write("\n\n")
for j in range(4):
fp.write("cells with "+str(j)+" foci and in range, ")
for i in range(len(directories)):
fp.write(str(totalcellsperfoci_range[i][j])+ ",")
fp.write("\n")
fp.write("cells with more than 3 foci and in range,")
for i in range(len(directories)):
fp.write(str(totalcellsperfoci_range[i]["more"])+ ",")
fp.write("\n\n")
for j in range(4):
fp.write("percentage of in range cells with "+str(j)+" foci, ")
for i in range(len(directories)):
fp.write(str(float(totalcellsperfoci_range[i][j])/totalcount_range[i])+ ",")
fp.write("\n")
fp.write("percentage of in range cells with more than 3 foci,")
for i in range(len(directories)):
fp.write(str(float(totalcellsperfoci_range[i]["more"])/totalcount_range[i])+ ",")
fp.write("\n\n")
for j in range(4):
fp.write("cells with "+str(j)+" foci, ")
for i in range(len(directories)):
fp.write(str(totalcellsperfoci[i][j])+ ",")
fp.write("\n")
fp.write("cells with more than 3 foci, ")
for i in range(len(directories)):
fp.write(str(totalcellsperfoci[i]["more"])+ ",")
fp.write("\n\n")
for j in range(4):
fp.write("percentage of cells with "+str(j)+" foci, ")
for i in range(len(directories)):
fp.write(str(float(totalcellsperfoci[i][j])/totalcountcells[i])+ ",")
fp.write("\n")
fp.write("percentage of cells with more than 3 foci,")
for i in range(len(directories)):
fp.write(str(float(totalcellsperfoci[i]["more"])/totalcountcells[i])+ ",")
fp.write("\n\n")
fp.close() | erickmartins/ImageJ_Macros | katy_foci/count_cells_foci.py | count_cells_foci.py | py | 16,326 | python | en | code | 0 | github-code | 36 |
17417433393 | from rest_framework.serializers import ModelSerializer
from tintoreria.empleados.models import Empleado
class EmpleadoSerializer(ModelSerializer):
def to_internal_value(self, data):
obj = super(EmpleadoSerializer, self).to_internal_value(data)
instance_id = data.get('id', None)
if instance_id:
obj['id'] = instance_id
return obj
class Meta:
model = Empleado
fields = ('id',
'nombre',
'paterno',
'materno',
'puesto',
'status') | marco2v0/Tintoreria | site/tintoreria/empleados/serializers.py | serializers.py | py | 587 | python | es | code | 0 | github-code | 36 |
28797419371 | import yfinance as yf
from matplotlib import pyplot as plt
def load_ticker(symbol):
ticker = yf.Ticker(symbol)
hist = ticker.history(start="2020-03-01", end="2020-12-02")
hist = hist.reset_index()
for i in ['Open', 'High', 'Close', 'Low']:
hist[i] = hist[i].astype('float64')
return hist
def main():
while True:
print("Please choose one of the following choices: ")
print("1. Display graph for NVDA and INTC")
print("2. Display graph for INTC and AMD")
print("3. Display graph for AMD and NVDA")
print("4. Exit.")
resp = input(">>> ")
if resp == "1":
h1 = load_ticker("NVDA")
h2 = load_ticker("INTC")
ax = h1[['Open']].plot(title="NVDA vs INTC")
h2[['Open']].plot(ax=ax)
plt.legend(["Open NVDA", "Open INTC"])
if resp == "2":
h1 = load_ticker("INTC")
h2 = load_ticker("AMD")
ax = h1[['Open']].plot(title="INTC vs AMD")
h2[['Open']].plot(ax=ax)
plt.legend(["Open INTC", "Open AMD"])
if resp == "3":
h1 = load_ticker("AMD")
h2 = load_ticker("NVDA")
ax = h1[['Open']].plot(title="AMD vs NVDA")
h2[['Open']].plot(ax=ax)
plt.legend(["Open AMD", "Open NVDA"])
if resp == "4":
break
plt.show()
main()
| Eric-Wonbin-Sang/CS110Manager | 2020F_final_project_submissions/mcdonaldjillian/CSfinalproject.py | CSfinalproject.py | py | 1,422 | python | en | code | 0 | github-code | 36 |
2892146403 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, serialize=False)),
('date_of_birth', models.DateField(verbose_name='date of birth', blank=True, null=True)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(verbose_name='phone number', blank=True, max_length=128)),
('gender', models.CharField(choices=[('U', 'unknown'), ('M', 'male'), ('F', 'female')], default='U', verbose_name='gender', max_length=1)),
('image', models.ImageField(upload_to='', verbose_name='image', blank=True, null=True)),
],
),
]
| abarto/learn_drf_with_images | learn_drf_with_images/user_profiles/migrations/0001_initial.py | 0001_initial.py | py | 1,052 | python | en | code | 21 | github-code | 36 |
34211305302 | from flask import Flask, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from .models import *
db = SQLAlchemy()
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'data.db')
def model_exists(model_class):
engine = db.get_engine(bind=model_class.__bind_key__)
return model_class.metadata.tables[model_class.__tablename__].exists(engine)
def create_app(config=None):
app = Flask(__name__, static_url_path="", static_folder="build")
CORS(app)
# app.config.from_object('config.ProductionConfig')
# app.config.from_object('config.DevelopmentConfig')
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
db.init_app(app)
# Serve React App
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def serve(path):
if path != "" and os.path.exists(app.static_folder + "/" + path):
return send_from_directory(app.static_folder, path)
else:
return send_from_directory(app.static_folder, "index.html")
# if not model_exists(User):
# User.__table__.create(db.session.bind)
from .auth import auth
app.register_blueprint(auth)
from .api import api
app.register_blueprint(api)
from .pages import page
app.register_blueprint(page)
# admin = User(name='admin', password='123456', admin=True)
# db.session.add(admin)
# db.session.commit()
# app.run(use_reloader=True, port=5000, threaded=True)
return app
if __name__ == "__main__":
app = create_app()
app.run(use_reloader=True, port=5000, threaded=True)
| wickes1/fullstack-react-flask-overview-backend | app/__init__.py | __init__.py | py | 1,680 | python | en | code | 0 | github-code | 36 |
5179170859 | #coding:utf-8
from django.shortcuts import render_to_response, get_object_or_404
from activity.dao import activityDao
from django.template.context import RequestContext
from collection.dao import collectionDao, select_collection_byReq,\
update_rightTime_byReq, update_wrongTime_byReq
from django.http.response import HttpResponse
import json
from subject.models import Collection, Exercise
from django.views.decorators.csrf import csrf_exempt
from django.utils import simplejson
from exercise.dao import get_tips_byId
def into_collection(req):
if req.COOKIES.has_key('userid'):
userid = req.COOKIES['userid']
content = ('进入错题集').decode('utf-8')
ADao = activityDao({"userid":userid})
ADao.add_a_activity(content)
return render_to_response('collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
def get_collection(req):
if req.COOKIES.has_key('userid'):
p = int(req.GET.get('p'))
cur = p
rs = {}
dao = collectionDao({'userid':req.COOKIES['userid']})
if p==0:
cur = 1
cn = dao.select_Ccollection_byUs()
rs['numT'] = cn
ts = dao.select_collection_byUs(cur)
rs['col'] = ts
return HttpResponse(json.dumps(rs),content_type="application/json")
return HttpResponse(json.dumps({}),content_type="application/json")
@csrf_exempt
def delete_collection(req,p1):
if select_collection_byReq({'id':p1}).righttime > 0:
col = get_object_or_404(Collection,id=p1)
col.delete()
return HttpResponse()
return HttpResponse(json.dumps({'tips':'唯有正确次数>0才能删除'}),content_type="application/json")
def into_a_collection(req):
if req.COOKIES.has_key('userid'):
return render_to_response('a_collection.html',RequestContext(req))
return render_to_response('login.html',RequestContext(req))
#获取一条错题
def get_a_collection(req,param):
if req.COOKIES.has_key('userid'):
rsp = collectionDao({'userid':req.COOKIES['userid']}).select_a_collection_byUs(int(param)-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
return HttpResponse(json.dumps({}), content_type="application/json")
'''
验证错题答案:1.获取登录信息
2.获取json
3.判断答案:根据题目id、answer get——》存在:根据collection.id增加正确次数,返回下一错题详情
不存在:根据collection.id增加错误次数,返回tips
'''
@csrf_exempt
def check_answer(req):
if req.method=='POST' and req.COOKIES.has_key('userid'):
jsonReq = simplejson.loads(req.body)
title = jsonReq['title']
id = jsonReq['id']
isTitle = Exercise.objects.filter(id = title['id'],answer = title['answer'])
CDao = collectionDao({'userid':req.COOKIES['userid']})
if isTitle:
update_rightTime_byReq({'id':id})
rsp = CDao.select_a_collection_byUs(jsonReq['num']-1)
return HttpResponse(json.dumps(rsp), content_type="application/json")
else:
update_wrongTime_byReq({'id':id})
return HttpResponse(json.dumps({'tips':get_tips_byId(title['id']),'wrongTime':select_collection_byReq({'id':id}).wrongtime}), content_type="application/json")
return HttpResponse(json.dumps({'tips':'访问错误,请重新登录'}), content_type="application/json")
| WarmerHu/subject | collection/views.py | views.py | py | 3,516 | python | en | code | 0 | github-code | 36 |
25047209667 | from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.views import APIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import Profile, Subject, Lesson, Screenshot
from .permissions import EditingForLecturerOnly
from .serializers import ProfileSerializer, SubjectSerializer, LessonSerializer, ScreenshotSerializer
class UserAPI(APIView):
def get(self, request):
users = Profile.objects.all()
group = request.query_params.get('group', None)
if group:
users = users.filter(user__groups__name=group)
serializer = ProfileSerializer(users, many=True)
return Response({
'users': serializer.data
}, status.HTTP_200_OK)
class SubjectAPI(APIView):
def get(self, _):
serializer = SubjectSerializer(Subject.objects.all(), many=True)
return Response({
'subjects': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = SubjectSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_subject = serializer.save()
return Response({
'success': "Предмет '%s' успешно добавлен." % new_subject.name
}, status.HTTP_201_CREATED)
def put(self, request, subject_id):
updated_subject = get_object_or_404(Subject.objects.all(), pk=subject_id)
serializer = SubjectSerializer(instance=updated_subject, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_subject = serializer.save()
return Response({
'success': "Предмет '%s' был успешно отредактирован." % updated_subject.name
}, status.HTTP_200_OK)
def delete(self, _, subject_id):
subject = get_object_or_404(Subject.objects.all(), pk=subject_id)
message = "Учебный предмет '%s', а также все учебные предметы, " \
"относящиеся к нему, были успешно удалены." % subject.name
subject.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
class ScreenshotAPI(APIView):
def get(self, _):
serializer = ScreenshotSerializer(Screenshot.objects.all(), many=True)
return Response({
'screenshots': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = ScreenshotSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_screenshot = serializer.save()
return Response({
'success': "Скриншот '%s' успешно добавлен." % new_screenshot.name
}, status.HTTP_201_CREATED)
def put(self, request, screenshot_id):
updated_screenshot = get_object_or_404(Screenshot.objects.all(), pk=screenshot_id)
serializer = ScreenshotSerializer(instance=updated_screenshot, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_screenshot = serializer.save()
return Response({
'success': "Скриншот '%s' был успешно отредактирован." % updated_screenshot.name
}, status.HTTP_200_OK)
def delete(self, _, screenshot_id):
screenshot = get_object_or_404(Screenshot.objects.all(), pk=screenshot_id)
message = "Скриншот '%s' был успешно удален." % screenshot.name
screenshot.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
class LessonAPI(APIView):
def get(self, _):
serializer = LessonSerializer(Lesson.objects.all(), many=True)
return Response({
'lessons': serializer.data
}, status.HTTP_200_OK)
def post(self, request):
serializer = LessonSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
new_lesson = serializer.save()
return Response({
'success': "Учебное занятие '%s' успешно добавлен." % new_lesson.name
}, status.HTTP_201_CREATED)
def put(self, request, lesson_id):
updated_lesson = get_object_or_404(Lesson.objects.all(), pk=lesson_id)
serializer = LessonSerializer(instance=updated_lesson, data=request.data, partial=True)
if serializer.is_valid(raise_exception=True):
updated_lesson = serializer.save()
return Response({
'success': "Учебное занятие '%s' было успешно отредактировано." % updated_lesson.name
}, status.HTTP_200_OK)
def delete(self, _, lesson_id):
lesson = get_object_or_404(Lesson.objects.all(), pk=lesson_id)
message = "Учебное занятие '%s' успешно удалено." % lesson.name
lesson.delete()
return Response({
'success': message
}, status.HTTP_200_OK)
| vnkrtv/screenshots-loader | backend/app/api/views.py | views.py | py | 5,190 | python | en | code | 0 | github-code | 36 |
36426081739 | from PIL import Image
import math
def invert(img):
rgb_img = img.convert('RGB')
width, height = rgb_img.size
img2 = Image.new('RGB', (width, height))
for y in range(height):
for x in range(width):
r, g, b = rgb_img.getpixel((x, y))
r = 255 - r
g = 255 - g
b = 255 - b
# print(f'(x:{x},y:{y} = ({r},{g},{}))')
img2.putpixel((x, y), (r, g, b))
return img2
# via https://qiita.com/zaburo/items/0b9db87d0a52191b164b
def blur(img):
rgb_img = img.convert('RGB')
width, height = rgb_img.size
img2 = Image.new('RGB', (width, height))
for y in range(height):
for x in range(width):
r0, g0, b0 = rgb_img.getpixel((x, y))
r1 = r2 = r3 = r4 = r5 = r6 = r7 = r8 = r0
g1 = g2 = g3 = g4 = g5 = g6 = g7 = g8 = g0
b1 = b2 = b3 = b4 = b5 = b6 = b7 = b8 = b0
if x - 1 > 0 and y + 1 < height:
r1, g1, b1 = rgb_img.getpixel((x - 1, y + 1))
if y + 1 < height:
r2, g2, b2 = rgb_img.getpixel((x, y + 1))
if x + 1 < width and y + 1 < height:
r3, g3, b3 = rgb_img.getpixel((x + 1, y + 1))
if x - 1 > 0:
r4, g4, b4 = rgb_img.getpixel((x - 1, y))
if x + 1 < width:
r5, g5, b5 = rgb_img.getpixel((x + 1, y))
if x - 1 > 0 and y - 1 > 0:
r6, g6, b6 = rgb_img.getpixel((x - 1, y - 1))
if y - 1 > 0:
r7, g7, b7 = rgb_img.getpixel((x, y - 1))
if x + 1 < width and y - 1 > 0:
r8, g8, b8 = rgb_img.getpixel((x + 1, y - 1))
r = int((r0 + r1 + r2 + r3 + r4 + r5 + r6 + r7 + r8) / 9)
g = int((g0 + g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8) / 9)
b = int((b0 + b1 + b2 + b3 + b4 + b5 + b6 + b7 + b8) / 9)
img2.putpixel((x, y), (r, g, b))
return img2
def brightness(r, g, b, brightnessValue=None):
# FIXME
mono = int(float((r + g + b) / 3.0))
if brightnessValue is not None:
mono += brightnessValue
if mono > 255:
mono = 255
elif mono < 0:
mono = 0
return mono
def atkinson(src_img, brightnessValue=None):
src_rgb_img = src_img.convert('RGB')
width, height = src_img.size
result_img = Image.new('RGB', (width, height))
gray_array_length = width * height
gray_array = [0] * gray_array_length
for y in range(height):
for x in range(width):
r, g, b = src_rgb_img.getpixel((x, y))
bright_temp = brightness(r, g, b, brightnessValue)
# brightness correction curve
bright_temp = int(math.sqrt(255.0) * math.sqrt(bright_temp))
if bright_temp > 255:
bright_temp = 255
elif bright_temp < 0:
bright_temp = 0
darkness = int(255 - bright_temp)
index = y * width + x
darkness += gray_array[index]
if darkness >= 128:
result_img.putpixel((x, y), (0, 0, 0))
# TODO: specify dark_color with atkinson's argument
darkness -= 128
else:
result_img.putpixel((x, y), (255, 255, 255))
darkn8 = int(round(float(darkness) / 8.0))
# Atkinson dithering algorithm
if index + 1 < gray_array_length:
gray_array[index + 1] += darkn8
if index + 2 < gray_array_length:
gray_array[index + 2] += darkn8
if index + width - 1 < gray_array_length:
gray_array[index + width - 1] += darkn8
if index + width < gray_array_length:
gray_array[index + width] += darkn8
if index + width + 1 < gray_array_length:
gray_array[index + width + 1] += darkn8
if index + width * 2 < gray_array_length:
gray_array[index + width * 2] += darkn8
return result_img
def main():
img = Image.open('test:Lenna')
# img.show()
# inverted_img = invert(img)
# inverted_img.show()
# blured_img = blur(img)
# blured_img.show()
atkinson_img = atkinson(img)
atkinson_img.show()
if __name__ == '__main__':
main()
| koyachi/sketches | 2021-02-11-pythonista-image/image_processor.py | image_processor.py | py | 3,591 | python | en | code | 2 | github-code | 36 |
72432170665 | import numpy as np
import cv2
STAGE_FIRST_FRAME = 0
STAGE_SECOND_FRAME = 1
STAGE_DEFAULT_FRAME = 2
kMinNumFeature = 1500
orb = cv2.ORB_create()
lk_params = dict(winSize = (21, 21),
#maxLevel = 3,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
############## Edit this portion ###############
#Add SIFT
def featureTracking(image_ref, image_cur, px_ref):
# kp2, st, err = cv2.calcOpticalFlowPyrLK(image_ref, image_cur, px_ref, None, **lk_params) #shape: [k,2] [k,1] [k,1]
# st = st.reshape(st.shape[0])
#initialize SIFT object
sift = cv2.xfeatures2d.SIFT_create()
#detect keypoints
kp1, _= sift.detectAndCompute(image_ref, None)
kp2, _= sift.detectAndCompute(image_cur, None)
'''
kp1 = px_ref[st == 1]
kp2 = kp2[st == 1]
'''
return kp1, kp2
''' SIFT
import cv2 as cv
#load image
image = cv.imread("lena.jpg")
#convert to grayscale image
gray_scale = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#initialize SIFT object
sift = cv.xfeatures2d.SIFT_create()
#detect keypoints
keypoints, _= sift.detectAndCompute(image, None)
'''
#################
class PinholeCamera:
def __init__(self, width, height, fx, fy, cx, cy,
k1=0.0, k2=0.0, p1=0.0, p2=0.0, k3=0.0):
self.width = width
self.height = height
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.distortion = (abs(k1) > 0.0000001)
self.d = [k1, k2, p1, p2, k3]
class VisualOdometry:
def __init__(self, cam, annotations):
self.frame_stage = 0
self.cam = cam
self.new_frame = None
self.last_frame = None
self.cur_R = None
self.cur_t = None
self.px_ref = None
self.px_cur = None
self.keyp1 = None
self.disptr1 = None
self.keyp2 = None
self.disptr2 = None
self.focal = cam.fx
self.pp = (cam.cx, cam.cy)
self.trueX, self.trueY, self.trueZ = 0, 0, 0
self.detector = cv2.FastFeatureDetector_create(threshold=25, nonmaxSuppression=True)
with open(annotations) as f:
self.annotations = f.readlines()
def getAbsoluteScale(self, frame_id): #specialized for KITTI odometry dataset
ss = self.annotations[frame_id-1].strip().split()
x_prev = float(ss[3])
y_prev = float(ss[7])
z_prev = float(ss[11])
ss = self.annotations[frame_id].strip().split()
x = float(ss[3])
y = float(ss[7])
z = float(ss[11])
self.trueX, self.trueY, self.trueZ = x, y, z
return np.sqrt((x - x_prev)*(x - x_prev) + (y - y_prev)*(y - y_prev) + (z - z_prev)*(z - z_prev))
def processFirstFrame(self):
# self.px_ref = self.detector.detect(self.new_frame)
keyp1, disptr1 = orb.detectAndCompute(self.new_frame, None)
self.keyp1 = np.array([x.pt for x in keyp1], dtype=np.float32)
self.disptr1 = disptr1
self.frame_stage = STAGE_SECOND_FRAME
def processSecondFrame(self):
# self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
keyp2, disptr2 = orb.detectAndCompute(self.new_frame, None)
self.keyp2 = np.array([x.pt for x in keyp2], dtype=np.float32)
# brute force match
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # cC=True ==> best matches only
matches = bf.match(self.disptr1, disptr2)
# sorting the match vales from low 2 high
matches = sorted(matches, key=lambda x: x.distance)
matches = matches[0:20]
queryIdx = np.array([x.queryIdx for x in matches], dtype=np.int)
trainIdx = np.array([x.trainIdx for x in matches], dtype=np.int)
self.keyp1 = self.keyp1[queryIdx]
self.keyp2 = self.keyp2[trainIdx]
# matching_result = cv2.drawMatches(self., keyp1, img2, keyp2, matches[0:20], None) # [:20]matches 0 to 20 only
E, mask = cv2.findEssentialMat(self.keyp2, self.keyp1, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, self.cur_R, self.cur_t, mask = cv2.recoverPose(E, self.keyp2, self.keyp1, focal=self.focal, pp = self.pp)
#
# # drawing the matches on the images
# matching_result = cv2.drawMatches(img_cur, self.keyp1, img_nxt, keyp2, matches[0:20],
# None) # [:20]matches 0 to 20 only
#
# # display matches
# cv2.imshow("match_result", matching_result)
# cv2.waitKey(0)
# cv2.desrtroyAllWindows()
# img_cur = img_nxt
# keyp1, disptr1 = keyp2, disptr2
#
self.frame_stage = STAGE_DEFAULT_FRAME
self.keyp1 = self.keyp2
def processFrame(self, frame_id):
self.px_ref, self.px_cur = featureTracking(self.last_frame, self.new_frame, self.px_ref)
E, mask = cv2.findEssentialMat(self.px_cur, self.px_ref, focal=self.focal, pp=self.pp, method=cv2.RANSAC, prob=0.999, threshold=1.0)
_, R, t, mask = cv2.recoverPose(E, self.px_cur, self.px_ref, focal=self.focal, pp = self.pp)
absolute_scale = self.getAbsoluteScale(frame_id)
if(absolute_scale > 0.1):
self.cur_t = self.cur_t + absolute_scale*self.cur_R.dot(t)
self.cur_R = R.dot(self.cur_R)
if(self.px_ref.shape[0] < kMinNumFeature):
self.px_cur = self.detector.detect(self.new_frame)
self.px_cur = np.array([x.pt for x in self.px_cur], dtype=np.float32)
self.px_ref = self.px_cur
def update(self, img, frame_id):
assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale"
self.new_frame = img
if(self.frame_stage == STAGE_DEFAULT_FRAME):
self.processFrame(frame_id)
elif(self.frame_stage == STAGE_SECOND_FRAME):
self.processSecondFrame()
elif(self.frame_stage == STAGE_FIRST_FRAME):
self.processFirstFrame()
self.last_frame = self.new_frame
def update(self, img, frame_id):
assert(img.ndim==2 and img.shape[0]==self.cam.height and img.shape[1]==self.cam.width), "Frame: provided image has not the same size as the camera model or image is not grayscale"
self.new_frame = img
if(self.frame_stage == STAGE_DEFAULT_FRAME):
self.processFrame(frame_id)
elif(self.frame_stage == STAGE_SECOND_FRAME):
self.processSecondFrame()
elif(self.frame_stage == STAGE_FIRST_FRAME):
self.processFirstFrame()
self.last_frame = self.new_frame
| aswinsbabu/visual-odometry | test_folder/odometry/sift_odometry.py | sift_odometry.py | py | 5,990 | python | en | code | 1 | github-code | 36 |
27320663172 | import lyricsgenius as lg
import csv
import re
def clean(lyrics):
s = re.split(" |\n", lyrics)
tempLyrics = ""
for i in range(1,len(s)):
word = s[i]
tempLyrics += " " + re.sub(r'\W+', '', word)
return tempLyrics
client_access_token = "T_JxSIu8YFUEi3rDYG_9dOuajcyPXDJV09CtpAg4QFReKrKi_ijn-CSoCRQHflNd"
with open("artists.csv", 'r', encoding='utf-8') as f:
csv_reader = csv.reader(f, delimiter='\t')
header = next(csv_reader)
artistCol = header.index('artist')
genreCol = header.index('genre')
artists = dict()
for row in csv_reader:
artists[row[artistCol]] = row[genreCol]
api = lg.Genius(client_access_token, timeout=20)
data = []
for artist in artists.keys():
a = api.search_artist(artist, max_songs=3)
# while True:
# try:
# a = api.search_artist(artist, max_songs=3)
# break
# except:
# pass
for song in a.songs:
print(song.lyrics)
data.append([artist, artists[artist], song.title, clean(song.lyrics)])
break
with open("data.csv", 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(data)
| Ykelli/NLP | scraper.py | scraper.py | py | 1,245 | python | en | code | 0 | github-code | 36 |
25317597548 | #!/user/bin/python
import configparser
import requests
import json
import time
#read config file for API key
config = configparser.ConfigParser()
config.sections()
config.read('../TwitterScrape/credentials.ini')
api = config.get("keys", 'urlapi')
#Set headers and data for api usage
headers = {
'Content-Type': 'application/json',
'API-Key': api,
}
data = '{"url":"http://bestravelways.com/P1C0uUXVxpq.jsv?byuIqrLNbdSJ=PszfbaUhtspk18d9brJ032bju01farr0116612056ozcw2fio", "public": "on"}'
#sumbits scan and decodes the details#
scan = requests.post('https://urlscan.io/api/v1/scan/', headers=headers, data=data)
scandetails = scan.content.decode('utf-8')
#parse the returned json details
scanjson = json.loads(scandetails)
#test details
#print(scanjson["uuid"])
uuid = scanjson["uuid"]
#print(uuid)
base_url = "https://urlscan.io/api/v1/result/" + str(uuid)
time.sleep(60)
response = requests.get(base_url)
print(response)
responsedetails = response.content.decode('utf-8')
print(responsedetails)
| monkeytail2002/TwitterURLChecker | Test Scripts/testrequest.py | testrequest.py | py | 1,010 | python | en | code | 0 | github-code | 36 |
35132573715 | import itertools
from abc import ABCMeta
import numpy as np
import tensorflow as tf
import gin.tf
from datasets.raw_dataset import RawDataset
from datasets import dataset_utils
from layers.embeddings_layers import ObjectType
class SamplingDataset(RawDataset, metaclass=ABCMeta):
pass
@gin.configurable(blacklist=['sample_weights_model', 'sample_weights_loss_object'])
class SamplingEdgeDataset(RawDataset):
MAX_ITERATIONS = 1000
def __init__(self, negatives_per_positive=1, sample_weights_model=None, sample_weights_loss_object=None,
sample_weights_count=100, **kwargs):
super(SamplingEdgeDataset, self).__init__(**kwargs)
self.negatives_per_positive = negatives_per_positive
self.sample_weights_model = sample_weights_model
self.sample_weights_loss_object = sample_weights_loss_object
self.sample_weights_count = sample_weights_count
def _get_positive_samples_dataset(self):
raw_dataset = tf.data.Dataset.from_tensor_slices(self.graph_edges)
raw_dataset = raw_dataset.map(
lambda x: {"object_ids": x, "object_types": list(dataset_utils.EDGE_OBJECT_TYPES)}
)
return self._get_processed_dataset(raw_dataset)
def _generate_negative_samples(self, negatives_per_positive):
random_binary_variable_iterator = dataset_utils.get_int_random_variables_iterator(low=0, high=2)
random_entity_index_iterator = dataset_utils.get_int_random_variables_iterator(low=0, high=self.entities_count)
for entity_head, relation, entity_tail in self.graph_edges:
is_head_to_be_swapped = next(random_binary_variable_iterator)
produced_edges = []
iterations_count = 0
while len(produced_edges) < negatives_per_positive and iterations_count < self.MAX_ITERATIONS:
if is_head_to_be_swapped:
entity_head = self.ids_of_entities[next(random_entity_index_iterator)]
else:
entity_tail = self.ids_of_entities[next(random_entity_index_iterator)]
produced_edge = (entity_head, relation, entity_tail)
if produced_edge not in self.set_of_graph_edges and produced_edge not in produced_edges:
produced_edges.append(produced_edge)
iterations_count += 1
if iterations_count < self.MAX_ITERATIONS:
for produced_edge in produced_edges:
yield {
"object_ids": produced_edge,
"object_types": list(dataset_utils.EDGE_OBJECT_TYPES),
"head_swapped": is_head_to_be_swapped,
}
def _reorder_negative_samples(self, batched_samples):
reordered_samples = []
for key, values in batched_samples.items():
for index, negative_inputs in enumerate(tf.unstack(values, axis=1)):
if len(reordered_samples) <= index:
reordered_samples.append({})
reordered_samples[index][key] = negative_inputs
return reordered_samples
def _get_negative_samples_dataset(self):
if self.negatives_per_positive > 1 and self.sample_weights_model is not None:
raise ValueError("`negatives_per_positive > 1` while `sample_weights_model` is not supported")
negatives_per_positive = (
self.negatives_per_positive if self.sample_weights_model is None else self.sample_weights_count
)
raw_dataset = tf.data.Dataset.from_generator(
lambda: self._generate_negative_samples(negatives_per_positive),
output_signature={"object_ids": tf.TensorSpec(shape=(3, ), dtype=tf.int32),
"object_types": tf.TensorSpec(shape=(3,), dtype=tf.int32),
"head_swapped": tf.TensorSpec(shape=(), dtype=tf.bool)},
)
raw_dataset = raw_dataset.batch(negatives_per_positive, drop_remainder=True)
return self._get_processed_dataset(raw_dataset).map(self._reorder_negative_samples)
def _pick_samples_using_model(self, positive_inputs, array_of_negative_inputs):
positive_outputs = self.sample_weights_model(positive_inputs, training=False)
array_of_raw_losses = []
for negative_inputs in array_of_negative_inputs:
negative_outputs = self.sample_weights_model(negative_inputs, training=False)
array_of_raw_losses.append(self.sample_weights_loss_object.get_losses_of_pairs(
positive_outputs, negative_outputs
))
losses = tf.transpose(tf.stack(array_of_raw_losses, axis=0))
probs = losses / tf.expand_dims(tf.reduce_sum(losses, axis=1), axis=1)
indexes_of_chosen_samples = tf.reshape(tf.random.categorical(tf.math.log(probs), num_samples=1), (-1, ))
negative_samples_keys = list(array_of_negative_inputs[0].keys())
chosen_negative_inputs = {}
for key in negative_samples_keys:
stacked_inputs = tf.stack([inputs[key] for inputs in array_of_negative_inputs], axis=1)
chosen_negative_inputs[key] = tf.gather(stacked_inputs, indexes_of_chosen_samples, axis=1, batch_dims=1)
return positive_inputs, (chosen_negative_inputs, )
@property
def samples(self):
positive_samples = self._get_positive_samples_dataset()
negative_samples = self._get_negative_samples_dataset()
samples = tf.data.Dataset.zip((positive_samples, negative_samples))
if (self.sample_weights_model is None) != (self.sample_weights_loss_object is None):
raise ValueError("Expected sample_weights_model and sample_weights_loss_object to be set.")
if self.sample_weights_model is not None:
samples = samples.map(self._pick_samples_using_model)
return samples
@gin.configurable
class SamplingNeighboursDataset(SamplingEdgeDataset):
NEIGHBOUR_OBJECT_TYPES = (ObjectType.ENTITY.value, ObjectType.RELATION.value)
def __init__(self, neighbours_per_sample, **kwargs):
super(SamplingNeighboursDataset, self).__init__(**kwargs)
self.neighbours_per_sample = neighbours_per_sample
def _produce_object_ids_with_types(self, edges):
object_ids, object_types = [], []
for head_id, relation_id, tail_id in edges.numpy():
sampled_output_edges, missing_output_edges_count = dataset_utils.sample_edges(
self.known_entity_output_edges[head_id],
banned_edges=[(tail_id, relation_id)],
neighbours_per_sample=self.neighbours_per_sample,
)
sampled_input_edges, missing_input_edges_count = dataset_utils.sample_edges(
self.known_entity_input_edges[tail_id],
banned_edges=[(head_id, relation_id)],
neighbours_per_sample=self.neighbours_per_sample,
)
object_ids.append([head_id, relation_id, tail_id] + sampled_output_edges + sampled_input_edges)
outputs_types = list(np.concatenate((
np.tile(self.NEIGHBOUR_OBJECT_TYPES, reps=self.neighbours_per_sample - missing_output_edges_count),
np.tile(ObjectType.SPECIAL_TOKEN.value, reps=2 * missing_output_edges_count),
)))
inputs_types = list(np.concatenate((
np.tile(self.NEIGHBOUR_OBJECT_TYPES, reps=self.neighbours_per_sample - missing_input_edges_count),
np.tile(ObjectType.SPECIAL_TOKEN.value, reps=2 * missing_input_edges_count),
)))
object_types.append(list(dataset_utils.EDGE_OBJECT_TYPES) + outputs_types + inputs_types)
return np.array(object_ids), np.array(object_types)
def _produce_positions(self, samples_count):
outputs_positions = list(itertools.chain(*[(3, 4) for _ in range(self.neighbours_per_sample)]))
inputs_positions = list(itertools.chain(*[(5, 6) for _ in range(self.neighbours_per_sample)]))
positions = [0, 1, 2] + outputs_positions + inputs_positions
return tf.tile(tf.expand_dims(positions, axis=0), multiples=[samples_count, 1])
def _include_neighbours_in_edges(self, edges):
object_ids, object_types = tf.py_function(
self._produce_object_ids_with_types, inp=[edges["object_ids"]], Tout=(tf.int32, tf.int32)
)
updated_edges = {
"object_ids": object_ids,
"object_types": object_types,
"positions": self._produce_positions(samples_count=tf.shape(edges["object_ids"])[0]),
}
for key, values in edges.items():
if key in updated_edges:
continue
updated_edges[key] = values
return updated_edges
def _map_batched_samples(self, positive_edges, array_of_negative_edges):
positive_edges = self._include_neighbours_in_edges(positive_edges)
array_of_negative_edges = tuple([
self._include_neighbours_in_edges(edges) for edges in array_of_negative_edges
])
return positive_edges, array_of_negative_edges
@property
def samples(self):
edge_samples = super(SamplingNeighboursDataset, self).samples
return edge_samples.map(self._map_batched_samples)
| Dawidsoni/relation-embeddings | src/datasets/sampling_datasets.py | sampling_datasets.py | py | 9,287 | python | en | code | 0 | github-code | 36 |
41037129028 | import matplotlib.pyplot as plt
import cv2
import os
import random
BASE_PATH = "testImages"
CATEGORIES = ["flybuss", "neptuntaxi", "trondertaxi"]
IMG_SIZE = 60
for category in CATEGORIES:
path = os.path.join(BASE_PATH, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap="gray")
plt.show()
| JoakimAa/Bachelor2021 | ML/Cnn/viewtest.py | viewtest.py | py | 474 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.