seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
9367032792 | import time
import argparse
import numpy as np
import torch
from deeprobust.graph.defense import GCN, ProGNN
from deeprobust.graph.data import Dataset, PrePtbDataset
from deeprobust.graph.utils import preprocess, encode_onehot, get_train_val_test
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
default=False, help='debug mode')
parser.add_argument('--only_gcn', action='store_true',
default=False, help='test the performance of gcn without other components')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default='cora',
choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
parser.add_argument('--attack', type=str, default='meta',
choices=['no', 'meta', 'random', 'nettack'])
parser.add_argument('--ptb_rate', type=float, default=0.05, help="noise ptb_rate")
parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train.')
parser.add_argument('--alpha', type=float, default=5e-4, help='weight of l1 norm')
parser.add_argument('--beta', type=float, default=1.5, help='weight of nuclear norm')
parser.add_argument('--gamma', type=float, default=1, help='weight of l2 norm')
parser.add_argument('--lambda_', type=float, default=0, help='weight of feature smoothing')
parser.add_argument('--phi', type=float, default=0, help='weight of symmetric loss')
parser.add_argument('--inner_steps', type=int, default=2, help='steps for inner optimization')
parser.add_argument('--outer_steps', type=int, default=1, help='steps for outer optimization')
parser.add_argument('--lr_adj', type=float, default=0.01, help='lr for training adj')
parser.add_argument('--symmetric', action='store_true', default=False,
help='whether use symmetric matrix')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.ptb_rate == 0:
args.attack = "no"
print(args)
# Here the random seed is to split the train/val/test data, we need to set the random seed to be the same as that when you generate the perturbed graph
data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
if args.dataset == 'pubmed':
# just for matching the results in the paper, see details in https://github.com/ChandlerBang/Pro-GNN/issues/2
print("just for matching the results in the paper," + \
"see details in https://github.com/ChandlerBang/Pro-GNN/issues/2")
import ipdb
ipdb.set_trace()
idx_train, idx_val, idx_test = get_train_val_test(adj.shape[0],
val_size=0.1, test_size=0.8, stratify=encode_onehot(labels), seed=15)
import json
splits = {'idx_train': idx_train.tolist(),
'idx_val': idx_val.tolist(),
'idx_test': idx_test.tolist()}
with open(f'splits/{args.dataset}_prognn_splits.json', 'w') as f:
json.dump(splits, f)
| ChandlerBang/Pro-GNN | save_splits.py | save_splits.py | py | 3,762 | python | en | code | 249 | github-code | 36 |
43400299413 | """OTE-API OntoKB Plugin
A plugin for OTE-API.
Authored by Alessandro Calvio, UNIBO, 2022
Created from cookiecutter-oteapi-plugin, SINTEF, 2022
"""
__version__ = "0.0.1"
__author__ = "Alessandro Calvio"
__author_email__ = "alessandro.calvio2@unibo.it"
| xAlessandroC/oteapi-ontokb-plugin | oteapi_ontokb_plugin/__init__.py | __init__.py | py | 255 | python | en | code | 0 | github-code | 36 |
20015295669 | """
Skriv ett program som ersätter alla förekomster av ett givet ord i en fil med ett annat ord.
Programmet ska läsa in filnamn, ord att ersätta samt ord att använda istället från användaren via kommandoraden.
Notera att den ursprungliga filen ska skrivas över.
Exempel på körning:
Ange ett filnamn: kort_saga.txt
Ord att byta ut: gång
Ersätt med: VÄG
Ledtråd: Använd sträng-funktionen replace.
"""
fileName = input('Ange ett filnamn: ')
oldWord = input('Ord att byta ut: ')
newWord = input('Ersätt med: ')
with open(fileName, encoding='utf-8') as file:
fileContent = file.readlines()
replacedFileContent = []
for i, line in enumerate(fileContent):
replacedFileContent.append(line.replace(oldWord, newWord))
with open(fileName, 'w', encoding='utf-8') as file:
file.writelines(replacedFileContent)
| yararajjoub/pythonModulo | Modul7/Labb_7/replacing.py | replacing.py | py | 840 | python | sv | code | 0 | github-code | 36 |
32788623389 | import wx
import re
import Wammu
import Wammu.Events
import Wammu.Utils
import Wammu.Paths
from Wammu.Locales import StrConv, ugettext as _
import wx.lib.mixins.listctrl
COLUMN_INFO = {
'info':
(
(
_('Name'),
_('Value')
),
(
'Name',
'Value'
),
),
'contact':
(
(
_('Location'),
_('Memory'),
_('Name'),
_('Number')
),
(
'Location',
'MemoryType',
'Name',
'Number'
),
),
'call':
(
(
_('Location'),
_('Type'),
_('Name'),
_('Number'),
_('Date')
),
(
'Location',
'MemoryType',
'Name',
'Number',
'Date'
),
),
'message':
(
(
_('Location'),
_('Status'),
_('Number'),
_('Date'),
_('Text')
),
(
'Location',
'State',
'Number',
'DateTime',
'Text'
),
),
'todo':
(
(
_('Location'),
_('Completed'),
_('Priority'),
_('Text'),
_('Date')
),
(
'Location',
'Completed',
'Priority',
'Text',
'Date'
),
),
'calendar':
(
(
_('Location'),
_('Type'),
_('Start'),
_('End'),
_('Text'),
_('Alarm'),
_('Recurrence')
),
(
'Location',
'Type',
'Start',
'End',
'Text',
'Alarm',
'Recurrence'
),
)
}
class FilterException(Exception):
'''
Exception which occurs when there is something wrong in filtering
expression.
'''
pass
class Browser(wx.ListCtrl, wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin):
'''
Generic class for browsing values.
'''
def __init__(self, parent, win, cfg):
wx.ListCtrl.__init__(
self,
parent,
-1,
style=wx.LC_REPORT | wx.LC_VIRTUAL | wx.LC_HRULES | wx.LC_VRULES
)
self.win = win
self.cfg = cfg
self.itemno = -1
self.type = ''
self.values = []
self.allvalues = []
self.sortkey = ''
self.sortorder = 1
self.columns = []
self.keys = []
self.popup_index = -1
color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT)
self.attr1 = wx.ListItemAttr()
self.attr2 = wx.ListItemAttr()
self.attr2.SetBackgroundColour(color)
self.attr3 = wx.ListItemAttr()
fnt = self.attr3.GetFont()
fnt.SetStyle(wx.FONTSTYLE_ITALIC)
self.attr3.SetFont(fnt)
self.attr4 = wx.ListItemAttr()
self.attr4.SetBackgroundColour(color)
self.attr4.SetFont(fnt)
image_list = wx.ImageList(16, 16)
down_bitmap = wx.Bitmap(Wammu.Paths.MiscPath('downarrow'))
up_bitmap = wx.Bitmap(Wammu.Paths.MiscPath('uparrow'))
self.downarrow = image_list.Add(down_bitmap)
self.uparrow = image_list.Add(up_bitmap)
self.AssignImageList(image_list, wx.IMAGE_LIST_SMALL)
wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin.__init__(self)
# Create IDs for popup menu
self.popup_id_send = wx.NewId()
self.popup_id_edit = wx.NewId()
self.popup_id_message = wx.NewId()
self.popup_id_contact = wx.NewId()
self.popup_id_call = wx.NewId()
self.popup_id_delete = wx.NewId()
self.popup_id_delete_selection = wx.NewId()
self.popup_id_duplicate = wx.NewId()
self.popup_id_reply = wx.NewId()
self.popup_id_backup_one = wx.NewId()
self.popup_id_backup_selection = wx.NewId()
self.popup_id_backup_all = wx.NewId()
self.BindEvents()
def BindEvents(self):
'''
Bind various event handlers to events we need.
'''
self.Bind(
wx.EVT_LIST_ITEM_SELECTED,
self.OnItemSelected,
self
)
self.Bind(
wx.EVT_LIST_ITEM_ACTIVATED,
self.OnItemActivated,
self
)
self.Bind(
wx.EVT_LIST_KEY_DOWN,
self.OnKey,
self
)
self.Bind(
wx.EVT_LIST_COL_CLICK,
self.OnColClick,
self
)
self.Bind(
wx.EVT_LIST_ITEM_RIGHT_CLICK,
self.OnRightClick,
self
)
self.Bind(
wx.EVT_MENU,
self.OnPopupSend,
id=self.popup_id_send
)
self.Bind(
wx.EVT_MENU,
self.OnPopupEdit,
id=self.popup_id_edit
)
self.Bind(
wx.EVT_MENU,
self.OnPopupMessage,
id=self.popup_id_message
)
self.Bind(
wx.EVT_MENU,
self.OnPopupContact,
id=self.popup_id_contact
)
self.Bind(
wx.EVT_MENU,
self.OnPopupCall,
id=self.popup_id_call
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDelete,
id=self.popup_id_delete
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDeleteSel,
id=self.popup_id_delete_selection
)
self.Bind(
wx.EVT_MENU,
self.OnPopupDuplicate,
id=self.popup_id_duplicate
)
self.Bind(
wx.EVT_MENU,
self.OnPopupReply,
id=self.popup_id_reply
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupOne,
id=self.popup_id_backup_one
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupSel,
id=self.popup_id_backup_selection
)
self.Bind(
wx.EVT_MENU,
self.OnPopupBackupAll,
id=self.popup_id_backup_all
)
def ShowHeaders(self):
'''
Updates which headers and keys should be show and displays them.
'''
self.columns = COLUMN_INFO[self.type][0]
self.keys = COLUMN_INFO[self.type][1]
cnt = len(self.columns)
for i in range(cnt):
self.InsertColumn(i, self.columns[i])
# resize columns to fit content
# FIXME: this should be acquired better!
spc = 10
maxval = [0] * cnt
for i in range(cnt):
size = self.GetTextExtent(StrConv(self.columns[i]))[0]
# 16 bellow is for sort arrrow
if size + 16 > maxval[i]:
maxval[i] = size + 16
for current in self.values:
for i in range(cnt):
size = self.GetTextExtent(StrConv(current[self.keys[i]]))
if size[0] > maxval[i]:
maxval[i] = size[0]
for i in range(cnt - 1):
self.SetColumnWidth(i, maxval[i] + spc)
self.resizeLastColumn(maxval[cnt - 1] + spc)
def Filter(self, text, filter_type):
'''
Filters content of browser by various expressions (type of expression
is defined by filter_type).
'''
if text == '':
self.values = self.allvalues
else:
num = None
if text.isdigit():
num = int(text)
if filter_type == 0:
match = re.compile('.*%s.*' % re.escape(text), re.I)
elif filter_type == 1:
try:
match = re.compile(text, re.I)
except:
raise FilterException('Failed to compile regexp')
elif filter_type == 2:
text = text.replace('*', '__SEARCH_ALL__')
text = text.replace('?', '__SEARCH_ONE__')
text = re.escape(text)
text = text.replace('\\_\\_SEARCH\\_ALL\\_\\_', '.*')
text = text.replace('\\_\\_SEARCH\\_ONE\\_\\_', '.')
match = re.compile('.*%s.*' % text, re.I)
else:
raise Exception('Unsupported filter type %s!' % filter_type)
self.values = [
item for item in self.allvalues
if Wammu.Utils.MatchesText(item, match, num)
]
self.SetItemCount(len(self.values))
self.RefreshView()
self.ShowRow(0)
def Sorter(self, item1, item2):
'''
Compare function for internal list of values.
'''
if self.sortkey == 'Location' and isinstance(item1[self.sortkey], str):
return self.sortorder * cmp(
int(item1[self.sortkey].split(',')[0]),
int(item2[self.sortkey].split(', ')[0]))
elif item1[self.sortkey] is None:
return -self.sortorder
elif item2[self.sortkey] is None:
return self.sortorder
return self.sortorder * cmp(item1[self.sortkey], item2[self.sortkey])
def ShowLocation(self, loc, second=None):
'''
Shows row which is stored on defined location. Search can be extended
by specifiyng second tupe of search attribute and value.
'''
result = Wammu.Utils.SearchLocation(self.values, loc, second)
if result != -1:
self.ShowRow(result)
def ShowRow(self, index):
'''
Activates id-th row.
'''
if (self.GetItemCount() > index and index >= 0 and
self.GetCountPerPage() > 0):
self.itemno = index
while self.GetFirstSelected() != -1:
self.SetItemState(
self.GetFirstSelected(), 0, wx.LIST_STATE_SELECTED
)
self.SetItemState(
index,
wx.LIST_STATE_FOCUSED | wx.LIST_STATE_SELECTED,
wx.LIST_STATE_FOCUSED | wx.LIST_STATE_SELECTED
)
self.EnsureVisible(index)
else:
evt = Wammu.Events.ShowEvent(data=None)
wx.PostEvent(self.win, evt)
def Change(self, newtype, values):
'''
Change type of browser component.
'''
if self.type != '':
self.cfg.Write(
'/BrowserSortKey/%s' % self.type, self.sortkey
)
self.cfg.WriteInt(
'/BrowserSortOrder/%s' % self.type, self.sortorder
)
self.type = newtype
self.values = values
self.allvalues = values
self.sortkey = ''
self.sortorder = 1
self.ClearAll()
self.SetItemCount(len(values))
self.ShowHeaders()
# restore sort order
found = False
readsort = self.cfg.Read('/BrowserSortKey/%s' % self.type)
readorder = self.cfg.ReadInt('/BrowserSortOrder/%s' % self.type)
for i in range(len(self.keys)):
if self.keys[i] == readsort:
if readorder == -1:
self.sortkey = readsort
self.Resort(i)
found = True
if not found:
self.Resort(0)
def Resort(self, col):
'''
Changes sort order of listing.
'''
# remember show item
try:
item = self.values[self.itemno]
except IndexError:
item = None
# find keys and order
nextsort = self.keys[col]
if nextsort == self.sortkey:
self.sortorder = -1 * self.sortorder
else:
self.sortorder = 1
self.sortkey = nextsort
# do the real sort
self.values.sort(self.Sorter)
# set image
for i in range(self.GetColumnCount()):
self.ClearColumnImage(i)
if self.sortorder == 1:
image = self.downarrow
else:
image = self.uparrow
self.SetColumnImage(col, image)
self.RefreshView()
if item is not None:
self.ShowRow(self.values.index(item))
def RefreshView(self):
'''
Refresh displayed items.
'''
if self.GetItemCount() != 0:
top = self.GetTopItem()
if top < 0:
top = 0
count = self.GetCountPerPage()
totalcount = self.GetItemCount()
if count < 0:
count = totalcount
last = min(totalcount - 1, top + count)
self.RefreshItems(top, last)
def OnKey(self, evt):
'''
Key handler which catches delete key for deletion of current item and
R/r key for message reply.
'''
if evt.GetKeyCode() == wx.WXK_DELETE:
self.DoSelectedDelete()
elif evt.GetKeyCode() in [114, 82]:
self.DoReply()
def DoSelectedDelete(self):
'''
Delete selected message.
'''
lst = []
index = self.GetFirstSelected()
while index != -1:
lst.append(self.values[index])
index = self.GetNextSelected(index)
self.DoDelete(lst)
def DoDelete(self, lst):
'''
Send delete event to parent.
'''
evt = Wammu.Events.DeleteEvent(lst=lst)
wx.PostEvent(self.win, evt)
def DoBackup(self, lst):
'''
Send backup event to parent.
'''
evt = Wammu.Events.BackupEvent(lst=lst)
wx.PostEvent(self.win, evt)
def DoReply(self):
'''
Send reply event to parent.
'''
evt = Wammu.Events.ReplyEvent(data=self.values[self.GetFocusedItem()])
wx.PostEvent(self.win, evt)
def OnRightClick(self, evt):
'''
Handle right click - show context menu with correct options for
current type of listing.
'''
if self.type == 'info':
return
self.popup_index = evt.m_itemIndex
# make a menu
menu = wx.Menu()
# add some items
if self.popup_index != -1 and self.type == 'message':
if self.values[evt.m_itemIndex]['State'] == 'Sent':
menu.Append(self.popup_id_send, _('Resend'))
if self.values[evt.m_itemIndex]['State'] == 'UnSent':
menu.Append(self.popup_id_send, _('Send'))
if self.values[evt.m_itemIndex]['State'] in ('Read', 'UnRead'):
menu.Append(self.popup_id_reply, _('Reply'))
if self.values[evt.m_itemIndex]['Number'] != '':
menu.Append(self.popup_id_call, _('Call'))
menu.AppendSeparator()
if self.popup_index != -1 and self.type in ['contact', 'call']:
menu.Append(self.popup_id_message, _('Send message'))
menu.Append(self.popup_id_call, _('Call'))
if self.popup_index != -1 and self.type in ['call']:
menu.Append(self.popup_id_contact, _('Store as new contact'))
menu.AppendSeparator()
if self.popup_index != -1 and self.type not in ['call', 'message']:
menu.Append(self.popup_id_edit, _('Edit'))
if self.popup_index != -1 and self.type not in ['call']:
menu.Append(self.popup_id_duplicate, _('Duplicate'))
menu.AppendSeparator()
if self.popup_index != -1:
menu.Append(self.popup_id_delete, _('Delete current'))
menu.Append(self.popup_id_delete_selection, _('Delete selected'))
menu.AppendSeparator()
if self.popup_index != -1:
menu.Append(self.popup_id_backup_one, _('Backup current'))
menu.Append(self.popup_id_backup_selection, _('Backup selected'))
menu.Append(self.popup_id_backup_all, _('Backup all'))
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu, evt.GetPoint())
def OnPopupDuplicate(self, event):
evt = Wammu.Events.DuplicateEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupReply(self, event):
evt = Wammu.Events.ReplyEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupSend(self, event):
evt = Wammu.Events.SendEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupCall(self, event):
evt = Wammu.Events.CallEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupMessage(self, event):
evt = Wammu.Events.MessageEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupContact(self, event):
data = self.values[self.popup_index]
data['Location'] = 0
data['MemoryType'] = 'ME'
evt = Wammu.Events.EditEvent(data=data)
wx.PostEvent(self.win, evt)
def OnPopupEdit(self, event):
evt = Wammu.Events.EditEvent(data=self.values[self.popup_index])
wx.PostEvent(self.win, evt)
def OnPopupDelete(self, event):
self.DoDelete([self.values[self.popup_index]])
def OnPopupDeleteSel(self, event):
self.DoSelectedDelete()
def OnPopupBackupOne(self, event):
self.DoBackup([self.values[self.popup_index]])
def OnPopupBackupSel(self, event):
item_list = []
index = self.GetFirstSelected()
while index != -1:
item_list.append(self.values[index])
index = self.GetNextSelected(index)
self.DoBackup(item_list)
def OnPopupBackupAll(self, event):
self.DoBackup(self.values)
def OnColClick(self, evt):
self.Resort(evt.GetColumn())
def OnItemSelected(self, event):
self.itemno = event.m_itemIndex
evt = Wammu.Events.ShowEvent(data=self.values[event.m_itemIndex])
wx.PostEvent(self.win, evt)
def OnItemActivated(self, event):
evt = Wammu.Events.EditEvent(data=self.values[event.m_itemIndex])
wx.PostEvent(self.win, evt)
def getColumnText(self, index, col):
item = self.GetItem(index, col)
return item.GetText()
def OnGetItemText(self, item, col):
'''
Get item text.
'''
if item >= len(self.values):
return None
return StrConv(self.values[item][self.keys[col]])
def OnGetItemAttr(self, item):
'''
Get item attributes - highlight synced items, make odd and even rows
different.
'''
if self.values[item]['Synced']:
if item % 2 == 1:
return self.attr1
else:
return self.attr2
if item % 2 == 1:
return self.attr3
else:
return self.attr4
| gammu/wammu | Wammu/Browser.py | Browser.py | py | 19,382 | python | en | code | 63 | github-code | 36 |
43535999214 | """
Very simple Flask web site, with one page
displaying a course schedule.
"""
import flask
from flask import render_template
from flask import request
from flask import url_for
from flask import jsonify # For AJAX transactions
import json
import logging
# Date handling
import arrow # Replacement for datetime, based on moment.js
import datetime # But we still need time
from dateutil import tz # For interpreting local times
# Our own module
# import acp_limits
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
import uuid
app.secret_key = str(uuid.uuid4())
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
###
# Pages
###
@app.route("/")
@app.route("/index")
@app.route("/calc")
def index():
app.logger.debug("Main page entry")
return flask.render_template('calc.html')
@app.errorhandler(404)
def page_not_found(error):
app.logger.debug("Page not found")
flask.session['linkback'] = flask.url_for("calc")
return flask.render_template('page_not_found.html'), 404
############### Work around for loading extra js plugins ##############
@app.route("/_moment")
def moment():
app.logger.debug("Moment.js Page")
return flask.render_template('moment.js')
@app.route("/_collapse")
def collapse():
app.logger.debug("Collapse.js Page")
return flask.render_template('collapse.js')
@app.route("/_transitions")
def transitions():
app.logger.debug("Transition.js Page")
return flask.render_template('transition.js')
@app.route("/_bootdate")
def bootdate():
app.logger.debug("Bootstrap Datepicker.js Page")
return flask.render_template('bootstrap-datetimepicker.min.js')
@app.route("/_boot")
def boot():
app.logger.debug("Bootstrap min.js Page")
return flask.render_template('bootstrap.min.js')
######################################################
###############
#
# AJAX request handlers
# These return JSON, rather than rendering pages.
#
###############
@app.route("/_calc_close_times")
def calc_times():
"""
Calculates open/close times from miles, using rules
described at http://www.rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request");
miles = request.args.get('miles', 0, type=int)
# brevetDist = request.args.get('brevetDist', 0, type=int)
if miles in range(0,601):
return jsonify(result=miles/15)
elif miles in range(601,1000):
return jsonify(result=miles/11.428)
elif miles in range(1000, 1300):
return jsonify(result=miles/13.333)
@app.route("/_calc_open_times")
def calc_open_times():
"""
Calculates open/close times from miles, using rules
described at http://www.rusa.org/octime_alg.html.
Expects one URL-encoded argument, the number of miles.
"""
app.logger.debug("Got a JSON request");
miles = request.args.get('miles', 0, type=int)
# brevetDist = request.args.get('brevetDist', 0, type=int)
if miles in range(0,201):
# hours=miles/34
return jsonify(hours=miles//34)
elif miles in range(201,401):
return jsonify(hours=miles//32)
elif miles in range(401,601):
return jsonify(result=miles//30)
elif miles in range(601,1001):
return jsonify(result=miles//28)
elif miles in range(1001, 1301):
return jsonify(result=miles//26)
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD/YYYY")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( date )
return normal.format("hh:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
import uuid
app.secret_key = str(uuid.uuid4())
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.run(port=CONFIG.PORT)
| RedMustard/proj3-ajax | app.py | app.py | py | 3,951 | python | en | code | null | github-code | 36 |
72694605863 | import requests
from datetime import datetime
from helpers.db import ExchangeRateDb
class RetrieveHourlyCryptoToUSDData:
def __init__(self):
self.db_path = 'helpers/cryptocurrency_exchange_rate.db'
self.db = ExchangeRateDb(self.db_path)
self.currency = None
def insert_data_to_table(self, exchange_rate):
updated_time = datetime.now().strftime('%Y-%m-%d %H:00:00')
query = f'''
insert into {self.currency}_exchange_rate (date, '{self.currency}')
values ('{updated_time}', '{exchange_rate}')
'''
self.db.execute(query=query)
| madeleinema-cee/walletwatch_python_backend | update/generic_retrieve_exchange_rate_class.py | generic_retrieve_exchange_rate_class.py | py | 635 | python | en | code | 0 | github-code | 36 |
32352103733 | from service_app.logger import get_logger
from scrapers_app.constants import *
from lxml import html
import requests
import copy
import re
logger = get_logger(__name__)
# аттрибут == элемент
class ZaraItemInfoScraper:
NAME = "name"
SIZES_ON_SITE = "sizes_on_site"
COLORS_ON_SITE = "colors_on_site"
PRICE = "current_price"
# информация, которая должна быть
elements = {
NAME: ['//*[@class = "product-detail-info__name"]'],
SIZES_ON_SITE: ['//*[@class = "product-detail-size-info__main-label"]'],
COLORS_ON_SITE: ['//*[@class = "product-detail-color-selector__color-area"]/span'],
PRICE: ['//*[@class = "price__amount-current"]']
}
def __init__(self, item):
self.item = item
self.html_tree = self.get_page_html_tree()
# найденная на странице информация
self.found_elements = {}
# xpath-ы, по которым была найдена информация
self.found_elements_xpaths = {}
@logger.log_scraper_method
def find_elements_on_page(self, elements = None):
if elements is None:
elements = self.elements.keys()
for element_name in elements:
for element_xpath in self.elements[element_name]:
found_elements = self.html_tree.xpath(element_xpath)
if len(found_elements) != 0:
self.found_elements.update({element_name: found_elements})
self.found_elements_xpaths.update({element_name: element_xpath})
@logger.log_scraper_method
def init_item(self, elements = None):
if elements is None:
elements = [self.NAME, self.SIZES_ON_SITE, self.COLORS_ON_SITE, self.PRICE]
if self.NAME in elements:
self.item.name = self.get_name()
if self.SIZES_ON_SITE in elements:
self.item.sizes_on_site = self.get_sizes()
if self.COLORS_ON_SITE in elements:
self.item.colors_on_site = self.get_colors()
if self.PRICE in elements:
self.item.current_price = self.get_price()
def get_page_html_tree(self):
response = requests.get(self.item.url, headers = HEADERS)
logger.debug(f"{response}: {self.item.url}")
return html.fromstring(response.text)
def get_name(self):
return self.found_elements[self.NAME][0].text
def get_sizes(self):
return [x.text for x in self.found_elements[self.SIZES_ON_SITE]] if self.item.has_sizes else []
def get_colors(self):
return [x.text for x in self.found_elements[self.COLORS_ON_SITE]] if self.item.has_colors else []
def get_price(self):
return int("".join(re.findall(r"\d+", self.found_elements[self.PRICE][0].text)))
@property
def not_found_elements(self):
needed_elements = copy.copy(list(self.elements.keys()))
if not self.item.has_sizes:
needed_elements.remove(self.SIZES_ON_SITE)
if not self.item.has_colors:
needed_elements.remove(self.COLORS_ON_SITE)
return [x for x in needed_elements if x not in self.found_elements]
@property
def found_all_elements(self):
return len(self.not_found_elements) == 0
| Radislav123/discount_waiter | scrapers_app/scrapers/zara_item_info_scraper.py | zara_item_info_scraper.py | py | 3,298 | python | en | code | 0 | github-code | 36 |
11194234055 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Use this to execute the differential kinematics
controller in our kinecontrol paper.
'''
from __future__ import print_function
import Sofa
import math
import sys, os
import time
import logging
import datetime
import numpy as np
from utils import *
from config import *
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
logger = logging.getLogger(__name__)
# https://www.sofa-framework.org/community/forum/topic/get-the-position-value-from-a-mechanicalobject-point-in-python/
def moveRestPos(rest_pos, pose):
str_out = ' '
dx, dy, dz = pose
for i in range(0,len(rest_pos)) :
str_out= str_out + ' ' + str(rest_pos[i][0]+dx)
str_out= str_out + ' ' + str(rest_pos[i][1]+dy)
str_out= str_out + ' ' + str(rest_pos[i][2]+dz)
return str_out
def rotateRestPos(rest_pos,rx,centerPosY,centerPosZ):
str_out = ' '
for i in xrange(0,len(rest_pos)) :
newRestPosY = (rest_pos[i][1] - centerPosY)*math.cos(rx) - (rest_pos[i][2] - centerPosZ)*math.sin(rx) + centerPosY
newRestPosZ = (rest_pos[i][1] - centerPosY)*math.sin(rx) + (rest_pos[i][2] - centerPosZ)*math.cos(rx) + centerPosZ
str_out= str_out + ' ' + str(rest_pos[i][0])
str_out= str_out + ' ' + str(newRestPosY)
str_out= str_out + ' ' + str(newRestPosZ)
return str_out
class controller(Sofa.PythonScriptController):
'''
For examples, see:
+ Keyboard Control:
- https://github.com/lakehanne/sofa/blob/master/examples/Tutorials/StepByStep/Dentistry_Python/keyboardControl.py
+ Parallel and SSH Launcher:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/launcher.py
+ OneParticle:
- https://github.com/lakehanne/sofa/blob/master/tools/sofa-launcher/example.py
'''
def initGraph(self, root):
self.move_dist = move_dist #(0, .40, 0)
self.growth_rate = growth_rate #.5 #was .05
self.max_pressure = max_pressure #100 # was 15
self.root = root
dome_all_dofs = self.get_dome_dofs(self.root)
self.dh_dofs = dome_all_dofs.dh_dofs
self.cav_dofs = dome_all_dofs.cav_dofs
self.cover_dofs = dome_all_dofs.cover_dofs
# domes' mechanical states
def get_dome_dofs(self, node):
'here node is root'
domehead = node.getChild('DomeHead')
dh_dofs = domehead.getObject('dh_dofs')
cav_node = domehead.getChild('DomeCavity')
cav_dofs = cav_node.getObject('dome_cav_dofs')
cover_node = domehead.getChild('DomeCover')
cover_dofs = cover_node.getObject('dome_cover_dofs')
cover_collis_node = domehead.getChild('DomeCoverCollis')
cover_collis_dofs = cover_collis_node.getObject('dome_cover_collis_dofs')
return Bundle(dict(dh_dofs=dh_dofs,
cav_dofs=cav_dofs,
cover_dofs=cover_dofs
))
def bwdInitGraph(self,node):
# find the position at the end of the shape (which has the biggest x coordinate)
dh_dofs = self.get_dome_dofs(self.root).dh_dofs.position
max_x, max_y, max_z = 0, 0, 0
max_idx_x, max_idx_y, max_idx_z = 0, 0, 0
for i in range(len(dh_dofs)):
if dh_dofs[i][0] > max_x:
max_idx_x = i
max_x = dh_dofs[i][0]
if dh_dofs[i][1] > max_y:
max_idx_y = i
max_y = dh_dofs[i][1]
if dh_dofs[i][2] > max_z:
max_idx_z = i
max_z = dh_dofs[i][2]
self.max_vals = Bundle(dict(max_x=max_x, max_y=max_y, max_z=max_z))
print('dh trans [x,y,z] {}, {}, {}'.format(max_x, max_y, max_z))
return 0
def run_traj_plotter(self):
if self.is_chart_updated:
self.traj_plotter.update(self.data)
# time.sleep(.11)
self.is_chart_updated = False
return 0
def deform_positive(self, dofs):
print('dome head dofs: ', dofs.position)
def onBeginAnimationStep(self, deltaTime):
deltaTime += deltaTime
# obtain associated dofs and cavity dofs
while(deltaTime < 2):
self.deform_positive(self.dh_dofs)
return 0;
def onEndAnimationStep(self, deltaTime):
sys.stdout.flush()
#access the 'position' state vector
self.bwdInitGraph(self.root)
return 0;
def onKeyPressed(self, c):
self.dt = self.root.findData('dt').value
incr = self.dt*1000.0
self.dh_dofs = self.get_dome_dofs(self.root).dh_dofs
# self.dh_dofs = dome_all_dofs.dh_dofs
if (ord(c)==19): # UP Key
print("expanding ...")
test = moveRestPos(self.dh_dofs.position, (300.0, 300.0, 300.0))
self.dh_dofs.findData('position').value = test
if (ord(c)==21): # DOWN Key
print("retracting ...")
test = moveRestPos(self.dh_dofs.position, (-300.0, -300.0, -300.0))
self.dh_dofs.findData('position').value = test
self.bwdInitGraph(self.root)
def onLoaded(self, node):
return 0;
def reset(self):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonMiddle(self, mouseX,mouseY,isPressed):
# usage e.g.
if isPressed :
print("Control+Middle mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onScriptEvent(self, senderNode, eventName,data):
## Please feel free to add an example for a simple usage in /home/lex/catkin_ws/src/superchicko/sofa/python/xml_2_scn.py
return 0;
def onMouseButtonRight(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Right mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
def onMouseButtonLeft(self, mouseX,mouseY,isPressed):
## usage e.g.
if isPressed :
print("Control+Left mouse button pressed at position "+str(mouseX)+", "+str(mouseY))
return 0;
| robotsorcerer/superchicko | sofa/python/kinecontrol/single_controller.py | single_controller.py | py | 5,485 | python | en | code | 0 | github-code | 36 |
18878055040 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from imp import reload
except ImportError:
from importlib import reload
from django.core.urlresolvers import resolve, reverse
from django.http import Http404
from django.test import override_settings
from django.utils.translation import override
import parler.appsettings
from ..models import Category
from ..views import FaqByCategoryView, FaqAnswerView
from .test_base import AldrynFaqTest
class TestFaqByCategoryView(AldrynFaqTest):
reload_parler_appsettings = True
def test_view_context(self):
"""Tests that the FaqByCategoryView produces the correct context."""
category_1 = self.reload(self.category1, "en")
category_1_url = category_1.get_absolute_url()
question_1 = self.reload(self.question1, "en")
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_1_url,
)
url_kwargs = resolve(category_1_url).kwargs
try:
response = FaqByCategoryView.as_view()(request, **url_kwargs)
except Http404:
self.fail('Could not find category')
self.assertEqualItems(
response.context_data['object_list'],
[question_1, ],
)
def test_view_context_fallback(self):
"""
Tests that the FaqByCategoryView produces the correct context
when requesting a category in an untranslated language.
"""
category_2 = self.reload(self.category2, "en")
category_2_url = category_2.get_absolute_url()
question_2 = self.reload(self.question2, "en")
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_2_url,
)
url_kwargs = resolve(category_2_url).kwargs
with override_settings(**self.enabled_parler_fallback_settings):
reload(parler.appsettings)
try:
response = FaqByCategoryView.as_view()(request, **url_kwargs)
except Http404:
self.fail('Could not find category')
self.assertEqualItems(
response.context_data['object_list'],
[question_2, ],
)
def test_view_old_format_redirect(self):
"""
Tests that the FaqByCategoryView redirects user
when accessed with old category url format
"""
category_1 = self.reload(self.category1, "en")
category_1_url_new = category_1.get_absolute_url()
kwargs = {"category_slug": category_1.slug}
with override('en'):
category_1_url_name = '{ns}:faq-category'.format(
ns=self.app_config.namespace
)
category_1_url_old = reverse(category_1_url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=category_1_url_old,
)
response = FaqByCategoryView.as_view()(request, **kwargs)
self.assertEquals(response.status_code, 301)
self.assertEquals(response.url, category_1_url_new)
def test_list_view(self):
"""Test category list view to contain a proper set of categories"""
def _do_test_list_view(language_code):
with override(language_code):
categories = Category.objects.language(
language_code).active_translations(language_code).filter(
appconfig=self.app_config)
url = reverse('{ns}:faq-category-list'.format(
ns=self.app_config.namespace))
response = self.client.get(url, follow=True)
for category in categories:
self.assertContains(response, category.name)
for language_code in ('en', 'de'):
_do_test_list_view(language_code)
with override_settings(**self.settings_en):
reload(parler.appsettings)
_do_test_list_view('en')
class TestFaqAnswerView(AldrynFaqTest):
reload_parler_appsettings = True
def test_view_context(self):
"""Tests that the FaqByCategoryView produces the correct context."""
question_1 = self.reload(self.question1, "en")
question_1_url = question_1.get_absolute_url("en")
url_kwargs = resolve(question_1_url).kwargs
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_1_url,
)
response = FaqAnswerView.as_view()(request, **url_kwargs)
self.assertEqual(
response.context_data['object'],
question_1,
)
def test_view_context_fallback(self):
"""
Tests that the FaqByCategoryView produces the correct context
when requesting a category in an untranslated language.
"""
question_2 = self.reload(self.question1, "en")
question_2_url = question_2.get_absolute_url("en")
url_kwargs = resolve(question_2_url).kwargs
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_2_url,
)
with override_settings(**self.enabled_parler_fallback_settings):
reload(parler.appsettings)
response = FaqAnswerView.as_view()(request, **url_kwargs)
self.assertEqual(
response.context_data['object'],
question_2,
)
def test_view_old_format_redirect(self):
"""
Tests that the TestFaqAnswerView redirects user
when accessed with old category url format
"""
category_1 = self.reload(self.category1, "en")
question_1 = self.reload(self.question1, "en")
question_1_url_new = question_1.get_absolute_url()
kwargs = {
"category_slug": category_1.slug,
"pk": question_1.pk
}
with override('en'):
url_name = '{ns}:faq-answer'.format(ns=self.app_config.namespace)
question_1_url_old = reverse(url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_1_url_old,
)
response = FaqAnswerView.as_view()(request, **kwargs)
self.assertEquals(response.status_code, 301)
self.assertEquals(response.url, question_1_url_new)
def test_answer_match_category(self):
"""
Tests that the question id given in url
belongs to the given category, if not then 404 is raised.
"""
category_1 = self.reload(self.category1, "de")
question_2 = self.reload(self.question2, "de")
kwargs = {
"category_pk": category_1.pk,
"category_slug": category_1.slug,
"pk": question_2.pk
}
with override('de'):
url_name = '{ns}:faq-answer'.format(ns=self.app_config.namespace)
question_2_invalid_url = reverse(url_name, kwargs=kwargs)
request = self.get_page_request(
page=self.page,
user=self.user,
path=question_2_invalid_url,
)
with self.assertRaises(Http404):
FaqAnswerView.as_view()(request, **kwargs)
| aldryn/aldryn-faq | aldryn_faq/tests/test_views.py | test_views.py | py | 7,363 | python | en | code | 5 | github-code | 36 |
19279623611 | import tensorflow as tf
import numpy as np
import random
from agents.AbstractAgent import AbstractAgent
from minigames.utils import state_of_marine, move_to_position
from utils.select_algorithm import choose_algorithm
from utils.replay_buffer import UniformBuffer
class Agent(AbstractAgent):
def __init__(self, env, action_dim, screen_size, method, gamma=0.99, epsilon=1.0, lr=1e-4, loss='mse', batch_size=32,
epsilon_decrease=0.001, epsilon_min=0.05, update_target=2000, num_episodes=5000, max_memory=100000):
super(Agent, self).__init__(screen_size)
obs = env.reset()
screen = np.array(obs.observation['feature_screen'])
screen = np.reshape(screen, (screen.shape[1], screen.shape[2], screen.shape[0]))
screen = tf.convert_to_tensor(screen, dtype=tf.float64)
self.input_dim = screen.shape
self.action_dim = action_dim
# Hiperparametros
self.gamma = gamma
self.epsilon = epsilon
self.lr = lr
self.loss = loss
self.batch_size = batch_size
self.epsilon_decrease = epsilon_decrease
self.epsilon_min = epsilon_min
self.update_target = update_target
self.num_episodes = num_episodes
self.memory_size = max_memory
self.cur_frame = 0
# Red principal y target.
self.main_nn, self.target_nn, \
self.optimizer, self.loss_fn = choose_algorithm(method, self.input_dim, self.action_dim,
self.lr, self.loss)
# Buffer donde se almacenaran las experiencias del agente.
self.buffer = UniformBuffer(self.memory_size)
def step(self, state, pos_marine):
action = self.select_epsilon_greedy_action(state)
# Dependiendo de la acción se mueve ciertas coordenadas
destination = move_to_position(action, self.screen_size)
return action, self._MOVE_SCREEN("now", self._xy_offset(pos_marine, destination[0], destination[1]))
def state_marine(self, obs):
# Representación del beacon y marino
beacon = self.get_beacon(obs)
marine = self.get_marine(obs)
dist = np.hypot((beacon.x - marine.x), (beacon.y - marine.y))
screen = np.array(obs.observation['feature_screen'])
screen = np.reshape(screen, (screen.shape[1], screen.shape[2], screen.shape[0]))
state = tf.convert_to_tensor(screen, dtype=tf.float64)
pos_marine = self.get_unit_pos(marine)
return state, pos_marine, dist
def select_army(self, obs):
# La primera acción selecciona a los army
if obs.first():
return self._SELECT_ARMY
def select_epsilon_greedy_action(self, state, aux_epsilon=1.0):
"""Realiza una acción aleatoria con prob. épsilon; de lo contrario, realiza la mejor acción."""
result = tf.random.uniform((1,))
if result < self.epsilon and result < aux_epsilon:
return random.choice(range(self.action_dim)) #env.action_space.sample() # Acción aleatoria.
else:
state = np.reshape(state, (1, tf.shape(state)[0].numpy(), tf.shape(state)[1].numpy(), tf.shape(state)[2].numpy()))
return tf.argmax(self.main_nn.predict(state)[0]).numpy() # Acción greddy.
def train_step(self, states, actions, rewards, next_states, dones):
"""Realiza una iteración de entrenamiento en un batch de datos."""
next_qs = self.target_nn.predict(next_states, batch_size=self.batch_size)
max_next_qs = tf.reduce_max(next_qs, axis=-1)
target = rewards + (1. - dones) * self.gamma * max_next_qs
with tf.GradientTape() as tape:
qs = self.main_nn(states)
action_masks = tf.one_hot(actions, self.action_dim)
masked_qs = tf.reduce_sum(action_masks * qs, axis=-1)
loss = self.loss_fn(target, masked_qs)
grads = tape.gradient(loss, self.main_nn.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.main_nn.trainable_variables))
return loss
def decrease_epsilon(self):
"""Decrecimiento del epsilon."""
if self.epsilon > self.epsilon_min:
self.epsilon -= self.epsilon_decrease
else:
self.epsilon = self.epsilon_min
def copy_weights(self, Copy_from, Copy_to):
"""
Function to copy weights of a model to other
"""
variables2 = Copy_from.trainable_variables
variables1 = Copy_to.trainable_variables
for v1, v2 in zip(variables1, variables2):
v1.assign(v2.numpy())
def save_model(self, filename):
self.learner.save_q_table(filename + '/model.pkl')
def load_model(self, filename):
self.learner.load_model(filename + '/model.pkl')
| ericPrimelles/RLProject | agents/Agent.py | Agent.py | py | 4,824 | python | en | code | 0 | github-code | 36 |
15185050967 | """Extraction."""
import json
import logging
import time
from pathlib import Path
from typing import Any, List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup, SoupStrainer
from requests.exceptions import HTTPError
DATA_DIR = "/opt/airflow/data"
IMDB_TABLES = ["title.basics", "title.ratings"]
def _extract_nyt_reviews(url: str, key: str, left_boundary: str, right_boundary: str) -> bool:
"""Extract NYT movie reviews from movie review API.
Fetch movie reviews in a time frame starting at left_boundary and ending
at right_boundary. The server only allows for 10 requests per minute so,
there will be a timeout of one minute in case a 429 status code is
encountered. The result is dumped as json to ./data.
Args:
url: URL for the NYT movie review API.
key: Key for the NYT movie review API.
left_boundary: Start date, format must be %Y-%m-%d.
right_boundary: End date, format must be %Y-%m-%d.
Returns:
Boolean indicating if reviews were dumped.
"""
movies = []
has_more = True
offset = 0
while has_more:
try:
response = requests.get(
url=url + "/reviews/search.json",
params={
"api-key": key,
"opening-date": f"{left_boundary}:{right_boundary}",
"offset": str(offset),
},
)
response.raise_for_status()
response_parsed = response.json()
# Check if response has more results
has_more = response_parsed["has_more"]
offset += 20
results = response_parsed["results"]
if results is not None:
movies += results
except HTTPError as err:
# Pause for 1 minute in case request limit is reached
if err.response.status_code == 429:
time.sleep(60)
else:
logging.error(err)
file_name = "nyt-review.json"
if movies:
logging.info(f"Fetched {len(movies)} movie reviews. Writing to {file_name}.")
with open(f"{DATA_DIR}/nyt/nyt-review.json", "w") as f:
json.dump(movies, f, indent=4)
else:
logging.info("No reviews available.")
return True if movies else False
def _get_download_links(url: str) -> List[str]:
"""Get download links from url.
Parse the site and extract all hrefs that point to zipped files.
Args:
url: The URL for the site to parse.
Returns:
A list of urls.
"""
links = []
response = requests.get(url)
for link in BeautifulSoup(response.content, parse_only=SoupStrainer("a"), features="lxml"):
if hasattr(link, "href") and link["href"].endswith("gz"):
links.append(link["href"])
return links
def _extract_imdb_datasets(url: str) -> List[str]:
"""Extract datasets from IMDB.
Fetch the title.basics and title.ratings datasets from IMDB and dump new
rows as csv.gz to ./data.
Args:
url: URL to get download links via _get_download_links.
Returns:
List of dumped table names.
"""
urls = _get_download_links(url)
urls = [url for url in urls if any(keep_url in url for keep_url in IMDB_TABLES)]
tbl_urls = {tbl: url for tbl, url in zip(IMDB_TABLES, urls)}
dumped_tbls: List[str] = []
for tbl, url in tbl_urls.items():
df = pd.read_table(url, header=0, compression="gzip")
ids_file = f"{DATA_DIR}/imdb/ids/ids.{tbl}.csv"
if Path(ids_file).exists():
existing_ids = pd.read_csv(ids_file, header=None).squeeze("columns")
df = df.loc[~df.tconst.isin(existing_ids)]
# Append new ids
df.tconst.to_csv(ids_file, header=False, index=False, mode="a")
# '\\N' encodes missing values
df = df.where(df != "\\N", other=np.nan)
n_rows = df.shape[0]
file_name = f"imdb/tables/{tbl}.csv.gz"
if n_rows > 0:
logging.info(f"Fetched {n_rows} new rows for {tbl}. Writing to {file_name}.")
dumped_tbls.append(tbl)
df.to_csv(f"{DATA_DIR}/{file_name}", index=False)
else:
logging.info(f"No new rows for {tbl}.")
return dumped_tbls
def _branch_nyt_tests(**context: Any) -> str:
"""Branch for testing.
Skip the data tests if there are no new reviews available.
Args:
context: Airflow context.
Returns:
ID of task to run.
"""
has_results = context["task_instance"].xcom_pull(
task_ids="extract_nyt_reviews", key="return_value"
)
return "run_tests_raw_nyt_reviews" if has_results else "skip_tests_raw_nyt_reviews"
def _branch_nyt_copy(**context: Any) -> str:
"""Branch for copying.
Skip the copy if there are no new reviews available.
Args:
context: Airflow context.
Returns:
ID of task to run.
"""
has_results = context["task_instance"].xcom_pull(
task_ids="extract_nyt_reviews", key="return_value"
)
return "copy_raw_nyt_table" if has_results else "skip_copy_raw_nyt_table"
def _branch_imdb_tests(**context: Any) -> List[str]:
"""Branch for testing IMDB datasets.
Skip the data tests if there are no new records available.
Args:
context: Airflow context.
Returns:
IDs of tasks to run.
"""
dumped_tbls = context["task_instance"].xcom_pull(
task_ids="extract_imdb_datasets", key="return_value"
)
next_tasks = []
for tbl in IMDB_TABLES:
tbl_suffix = tbl.replace("title.", "")
if tbl in dumped_tbls:
next_tasks.append(f"run_tests_raw_imdb_{tbl_suffix}")
else:
next_tasks.append(f"skip_tests_raw_imdb_{tbl_suffix}")
return next_tasks
def _branch_imdb_copy(**context: Any) -> List[str]:
"""Branch for copying IMDB datasets.
Skip the copy if there are no new records available.
Args:
context: Airflow context.
Returns:
IDs of tasks to run.
"""
dumped_tbls = context["task_instance"].xcom_pull(
task_ids="extract_imdb_datasets", key="return_value"
)
next_tasks = []
for tbl in IMDB_TABLES:
tbl_suffix = tbl.replace("title.", "")
if tbl in dumped_tbls:
next_tasks.append(f"copy_raw_imdb_{tbl_suffix}_table")
else:
next_tasks.append(f"skip_copy_raw_imdb_{tbl_suffix}_table")
return next_tasks
| albutz/de-movies | dags/extract.py | extract.py | py | 6,534 | python | en | code | 0 | github-code | 36 |
70040826024 | from django.db import models
class IterFieldsValuesModel(models.Model):
def __iter__(self):
for field_name in self._meta.fields:
value = getattr(self, field_name.name)
yield (field_name.name, value)
class Meta:
abstract = True
| luke9642/Poll | questionnaire/models/iter_fields_values_model.py | iter_fields_values_model.py | py | 289 | python | en | code | 0 | github-code | 36 |
5667898056 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_restful import Resource, Api
import sqlite3 as lite
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///covid.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
IC = db.Column(db.String(32))
temperature = db.Column(db.String(32))
location = db.Column(db.String(32))
db.create_all()
def __init__(self, username, IC, temperature, location):
self.username = username
self.IC = IC
self.temperature = temperature
self.location = location
class UserSchema(ma.Schema):
class Meta:
fields = ('id', 'username', 'IC', 'temperature', 'location')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
class UserManager(Resource):
@staticmethod
def get():
users = User.query.all()
return jsonify(users_schema.dump(users))
"""
try: id = request.args['id']
except Exception as _: id = None
if not id:
users = User.query.all()
return jsonify(users_schema.dump(users))
user = User.query.get(id)
return jsonify(user_schema.dump(user))
"""
@staticmethod
def post():
print("I received a post!")
username = request.json['username']
IC = request.json['IC']
temperature = request.json['temperature']
location = request.json['location']
user = User(username, IC, temperature, location)
db.session.update(user)
db.session.commit()
return jsonify({
'Message': f'User, {username}, with IC {IC}, {temperature} Celsius inserted at {location}.'
})
@staticmethod
def put():
try: id = request.args['id']
except Exception as _: id = None
if not id:
return jsonify({ 'Message': 'Must provide the user ID' })
user = User.query.get(id)
username = request.json['username']
IC = request.json['IC']
temperature = request.json['temperature']
location = request.json['location']
user.username = username
user.IC = IC
user.temperature = temperature
user.location = location
db.session.commit()
return jsonify({
'Message': f'User, {username}, with IC {IC}, {temperature} Celsius altered at {location}.'
})
@staticmethod
def delete():
try: id = request.args['id']
except Exception as _: id = None
if not id:
return jsonify({ 'Message': 'Must provide the user ID' })
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return jsonify({
'Message': f'User {str(id)} deleted.'
})
api.add_resource(UserManager, '/api/users')
if __name__ == '__main__':
app.run(debug=True) | joshloo/iot-pandemic-stack | web-database/flaskapp.py | flaskapp.py | py | 3,161 | python | en | code | 0 | github-code | 36 |
35917171486 | import ipywidgets as widgets
# Measured in g/cm^3
MATERIAL_DENSITIES = {
"Aluminium": 2.7,
"Egetræ": 0.85,
"Granit": 2.650,
"Vand": 1.00,
"Uran": 18.70,
"Magnesium": 1.74,
"Messing": 8.40,
"Candy floss": 0.059,
}
MaterialDropDown = widgets.Dropdown(
options=[(f"{name} ({value} g/cm^3)", value) for (name, value) in MATERIAL_DENSITIES.items()],
description='Materiale: ',
)
AngleSlider = widgets.FloatSlider(
value = 30,
min = 0,
max = 90,
step = 0.1,
description= "Vinkel",
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
LengthScaleSlider = widgets.FloatSlider(
value = 10,
min = 10,
max = 100,
step = 0.1,
description= "Vinkel",
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
) | EmilLytthansBoesen/source | opgave_2/user_interface.py | user_interface.py | py | 882 | python | en | code | 0 | github-code | 36 |
5426664308 | """
Mini text-based role-playing game for practising OOP programming concepts.
Aim of this game to get 3 friends, most of the time you have to fight with them to become friendly.
Possible commands to move the player from room to room are north, south, west, east. Other commands talk, fight, check, backpack.
"""
# import Room, Items and Characters for the game
from mini_rpg_room import Room
from mini_rpg_items import Items
from mini_rpg_characters import Characters
from mini_rpg_characters import Enemy
from mini_rpg_characters import Friend
# Set the basic properties of rooms
kitchen = Room("Kitchen")
kitchen.set_description(
"Nice, cosy warm place with a fireplace. Freshly baked bread is smelling"
)
# print(kitchen.get_description())
dininghall = Room("Dining Hall")
dininghall.set_description(
"Small room with a table and 12 seats. It is really crowded."
)
ballroom = Room("Ballroom")
ballroom.set_description("Huge room with mirrors and windows.")
garden = Room("Garden")
garden.set_description("Huge garden with a lof of flowers")
kitchen.link_room(dininghall, "south")
dininghall.link_room(kitchen, "north")
dininghall.link_room(ballroom, "west")
ballroom.link_room(dininghall, "east")
garden.link_room(ballroom, "south")
ballroom.link_room(garden, "north")
# kitchen.describe()
# kitchen.get_details()
# dininghall.get_details()
# ballroom.get_details()
# Add characters to the game
dave = Characters("Dave", "A chef")
# dave.describe()
kitchen.set_character(dave)
dave.set_conversation("Hello, are you hungry?")
fred = Enemy("Fred", "Angry and hungry Cat")
fred.set_conversation("Mrrr, Mheeew")
fred.weakness = "cheese"
dininghall.set_character(fred)
bred = Enemy("Bred", "Enormous, frigtening dog")
bred.set_conversation("Wuffff")
bred.set_weakness("sausage")
ballroom.set_character(bred)
jim = Friend("Jim", "6 years old boy")
jim.set_interest("apple pie")
garden.set_character(jim)
# Add items to the game
apple_pie = Items("apple pie")
ballroom.set_item(apple_pie)
cheese = Items("cheese")
kitchen.set_item(cheese)
sausage = Items("sausage")
dininghall.set_item(sausage)
flower = Items("Flower")
garden.set_item(flower)
# Set the starting point of the game and add possibilty to act
current_room = kitchen
backpack = []
friend = 0
while friend < 3:
print("Number of friends: " + str(friend))
print("\n")
current_room.get_details()
inhabitant = current_room.get_character()
if inhabitant is not None:
inhabitant.describe()
if isinstance(inhabitant, Friend) == True:
friend += 1
print("You have a new friend")
local_item = current_room.get_item()
if local_item is not None:
local_item.describe()
command = input("> ")
if command in ["north", "south", "east", "west"]:
current_room = current_room.move(command)
elif command == "talk":
inhabitant.talk()
elif command == "check":
if current_room.item is not None:
# print(local_item.name)
backpack.append(local_item.name)
current_room.set_item(None)
print(backpack)
else:
print("Nothing left")
elif command == "backpack":
print(backpack)
elif command == "fight":
print(backpack)
if backpack == []:
print("You are not able to fight")
else:
fight_with = input("What would you like to use?>")
inhabitant.fight(fight_with)
if fight_with == inhabitant.weakness:
# backpack.pop()
friend += 1
inhabitant.description = "Friendly and nice"
else:
print(
"Please write valid command: north, south, east, west, talk, check, fight, backpack"
)
else:
print("You won the game")
| Maja0108/mini_rpg | mini_rpg_main.py | mini_rpg_main.py | py | 3,823 | python | en | code | 0 | github-code | 36 |
5547518029 | """
Tests for voting 10/07/2021.
"""
import pytest
from scripts.vote_2021_10_07 import (start_vote)
from utils.config import ldo_token_address, lido_dao_acl_address, lido_dao_token_manager_address
PURCHASE_CONTRACT_PAYOUT_ADDRESS = '0x689E03565e36B034EcCf12d182c3DC38b2Bb7D33'
payout_curve_rewards = {
'amount': 3_550_000 * (10 ** 18),
'address': '0x753D5167C31fBEB5b49624314d74A957Eb271709',
}
payout_balancer_rewards = {
'amount': 300_000 * (10 ** 18),
'address': '0x1dD909cDdF3dbe61aC08112dC0Fdf2Ab949f79D8',
}
payout_purchase_contract = {
'amount': '462962962962963400000000', # 462,962.9629629634 * (10 ** 18)
'address': PURCHASE_CONTRACT_PAYOUT_ADDRESS,
}
grant_role_purchase_contract = {
'address': PURCHASE_CONTRACT_PAYOUT_ADDRESS,
'permission_name': 'ASSIGN_ROLE'
}
payout_finance_multisig = {
'amount': 28_500 * (10 ** 18), # TODO: Check current rate on 1inch before run
'address': '0x48F300bD3C52c7dA6aAbDE4B683dEB27d38B9ABb',
'reference': 'Finance multisig transfer to pay a bug bounty'
}
def curve_balance(ldo) -> int:
"""Returns LDO balance of Curve rewards distributor"""
return ldo.balanceOf(payout_curve_rewards['address'])
def balancer_balance(ldo) -> int:
"""Returns LDO balance of Balancer rewards distributor"""
return ldo.balanceOf(payout_balancer_rewards['address'])
def purchase_contract_balance(ldo) -> int:
"""Returns LDO balance of purchase contract"""
return ldo.balanceOf(payout_purchase_contract['address'])
def finance_multisig_balance(ldo) -> int:
"""Returns LDO balance of finance multisig contract"""
return ldo.balanceOf(payout_finance_multisig['address'])
def has_assign_role_permission(acl, token_manager, who) -> int:
"""Returns if address has ASSIGN_ROLE on TokenManager contract"""
return acl.hasPermission(who, token_manager, token_manager.ASSIGN_ROLE())
@pytest.fixture(scope='module')
def ldo(interface):
"""Returns contract of LDO token."""
return interface.ERC20(ldo_token_address)
@pytest.fixture(scope='module')
def acl(interface):
"""Returns ACL contract"""
return interface.ACL(lido_dao_acl_address)
@pytest.fixture(scope='module')
def token_manager(interface):
"""Returns TokenManager contract"""
return interface.TokenManager(lido_dao_token_manager_address)
def test_common(
acl, token_manager, ldo_holder,
helpers, accounts, dao_voting, ldo
):
"""Perform testing for the whole voting."""
curve_balance_before = curve_balance(ldo)
balancer_balance_before = balancer_balance(ldo)
purchase_contract_balance_before = purchase_contract_balance(ldo)
finance_multisig_balance_before = finance_multisig_balance(ldo)
assert not has_assign_role_permission(acl, token_manager, grant_role_purchase_contract['address'])
vote_id, _ = start_vote({
'from': ldo_holder
}, silent=True)
helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting
)
curve_balance_after = curve_balance(ldo)
balancer_balance_after = balancer_balance(ldo)
purchase_contract_balance_after = purchase_contract_balance(ldo)
finance_multisig_balance_after = finance_multisig_balance(ldo)
curve_inc = curve_balance_after - curve_balance_before
balancer_inc = balancer_balance_after - balancer_balance_before
purchase_contract_balance_inc = purchase_contract_balance_after - purchase_contract_balance_before
finance_multisig_balance_inc = finance_multisig_balance_after - finance_multisig_balance_before
assert curve_inc == payout_curve_rewards['amount'], 'Failed on Curve'
assert balancer_inc == payout_balancer_rewards['amount'], 'Failed on Balancer'
assert purchase_contract_balance_inc == payout_purchase_contract['amount'], 'Failed on purchase contract'
assert has_assign_role_permission(acl, token_manager,
grant_role_purchase_contract['address']), 'Failed on grant ASSIGN_ROLE'
assert finance_multisig_balance_inc == payout_finance_multisig['amount'], 'Failed on purchase contract'
| lidofinance/scripts | archive/tests/xtest_2021_10_07.py | xtest_2021_10_07.py | py | 4,121 | python | en | code | 14 | github-code | 36 |
12486652790 | """
Basics OOP Principles
Check your solution: https://judge.softuni.bg/Contests/Practice/Index/1590#1
SUPyF Exam 24.03.2019 - 02. Command Center
Problem:
Input / Constraints
We are going to receive a list of integers from console.
After that we will start receive some of the following commands in format:
• swap {index1} {index2}
• enumerate_list
• max
• min
• get_divisible by {number}
*If you receive command 'swap' you should check if the indexes are valid. A valid index is index which is 0 or higher
and is less than list length.
- If one of the indexes is not valid just print the list without changing it
- If both indexes are valid swap the two elements on these indexes
*If you receive ‘enumerate_list’ you should enumerate the list and print it in the following format:
[(0, {list[0]}), (1, list[1]), (2, list[2]), (3, list[3])]
Where {list[n]} is the element corresponding to the given index (starting from zero)
*If you receive 'max', print the max number in the list
*If you receive 'min', print the min number in the list
*If you receive ‘get_divisible by’ you must print every element in the list which residue after division with {number}
is 0 in format:
[el1, el2, ….]
It is guaranteed - the {number} never will be 0, so you do not need to check it.
Output
When you receive a command which says 'end', you should print the count of commands you have performed.
Note that invalid commands may appear. In this case do not print anything and do not count these commands as performed.
Examples:
Input:
1 3 2 4 5
swap 1 15
enumerate_list
max
get_divisible by 13
get_divisible by 2
swap 1 4
enumerate_listtt
end
Output:
[1, 3, 2, 4, 5]
[(0, 1), (1, 3), (2, 2), (3, 4), (4, 5)]
5
[]
[2, 4]
[1, 5, 2, 4, 3]
6
Input:
15 -1 3 0 19 -15 24
swap 0 1
swap 4 6
enumerate_list
swap 6 1
swap 7 -1
get divisible by -15
get_divisible by 15
get_divisibleee by 15
end
Output:
[-1, 15, 3, 0, 19, -15, 24]
[-1, 15, 3, 0, 24, -15, 19]
[(0, -1), (1, 15), (2, 3), (3, 0), (4, 24), (5, -15), (6, 19)]
[-1, 19, 3, 0, 24, -15, 15]
[-1, 19, 3, 0, 24, -15, 15]
[0, -15, 15]
6
"""
nums = [int(item) for item in input().split(" ")]
valid_operations = 0
while True:
command = input()
if command == "end":
break
a = [item for item in command.split(" ")]
if a[0] == "swap" and len(a) == 3:
if 0 <= int(a[1]) <= len(nums) and 0 <= int(a[2]) < len(nums):
nums[int(a[1])], nums[int(a[2])] = nums[int(a[2])], nums[int(a[1])]
print(nums)
valid_operations += 1
else:
print(nums)
valid_operations += 1
elif command == "enumerate_list":
print(list(enumerate(nums, 0)))
valid_operations += 1
elif command == "max":
print(max(nums))
valid_operations += 1
elif command == "min":
print(min(nums))
valid_operations += 1
elif a[0] == "get_divisible" and a[1] == "by" and len(a) == 3:
divisible_list = []
for num in nums:
if num % int(a[2]) == 0:
divisible_list += [num]
print(divisible_list)
valid_operations += 1
print(valid_operations)
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals June 2019/Problems and Files/14. PAST EXAMS/02. Python Fundamentals Exam - 24 March2019/02. Command Center.py | 02. Command Center.py | py | 3,572 | python | en | code | 9 | github-code | 36 |
36121100233 | import logging
import os
from time import time
from abc import abstractmethod, ABC
from pathlib import Path
from typing import Any, Iterator, Optional, Union, List, Dict, Set
from forte.common.configuration import Config
from forte.common.exception import ProcessExecutionException
from forte.common.resources import Resources
from forte.data.base_pack import PackType
from forte.data.data_pack import DataPack
from forte.data.multi_pack import MultiPack
from forte.data.types import ReplaceOperationsType
from forte.pipeline_component import PipelineComponent
from forte.utils.utils import get_full_module_name
__all__ = [
"BaseReader",
"PackReader",
"MultiPackReader",
]
logger = logging.getLogger(__name__)
class BaseReader(PipelineComponent[PackType], ABC):
r"""The basic data reader class. To be inherited by all data readers.
Args:
from_cache: Decide whether to read from cache
if cache file exists. By default (``False``), the reader will
only read from the original file and use the cache file path
for caching, it will not read from the ``cache_directory``.
If ``True``, the reader will try to read a datapack from the
caching file.
cache_directory: The base directory to place the
path of the caching files. Each collection is contained in one
cached file, under this directory. The cached location for each
collection is computed by
:meth:`~forte.data.base_reader.BaseReader._cache_key_function`.
.. note::
A collection is the data returned by
:meth:`~forte.data.base_reader.BaseReader._collect`.
append_to_cache: Decide whether to append write
if cache file already exists. By default (``False``), we
will overwrite the existing caching file. If ``True``, we will
cache the datapack append to end of the caching file.
"""
def __init__(
self,
from_cache: bool = False,
cache_directory: Optional[str] = None,
append_to_cache: bool = False,
cache_in_memory: bool = False,
):
super().__init__()
self.from_cache = from_cache
self._cache_directory = cache_directory
self.component_name = get_full_module_name(self)
self.append_to_cache = append_to_cache
self._cache_in_memory = cache_in_memory
self._cache_ready: bool = False
self._data_packs: List[PackType] = []
# needed for time profiling of reader
self._enable_profiling: bool = False
self._start_time: float = 0.0
self.time_profile: float = 0.0
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
# Clear memory cache
self._cache_ready = False
del self._data_packs[:]
@classmethod
def default_configs(cls):
r"""Returns a `dict` of configurations of the reader with default
values. Used to replace the missing values of input `configs`
during pipeline construction.
Here:
- zip_pack (bool): whether to zip the results. The default value is
False.
- serialize_method: The method used to serialize the data. Current
available options are `json`, `jsonpickle` and `pickle`. Default is
`json`.
"""
return {"zip_pack": False, "serialize_method": "json"}
@staticmethod
def pack_type():
raise NotImplementedError
@abstractmethod
def _collect(self, *args: Any, **kwargs: Any) -> Iterator[Any]:
r"""Returns an iterator of data objects, and each individual object
should contain sufficient information needed to construct or locate
a data pack in cache.
For example: `data_source` can be a ``kwarg`` which is the path to a
file that a reader can take to read and parse a file.
Args:
args: Specify the data source.
kwargs: Specify the data source.
Returns: Iterator of collections that are sufficient to create one pack.
"""
raise NotImplementedError
def parse_pack(self, collection: Any) -> Iterator[PackType]:
r"""Calls :meth:`_parse_pack` to create packs from the collection.
This internally setup the component meta data. Users should implement
the :meth:`_parse_pack` method.
"""
if collection is None:
raise ProcessExecutionException(
"Got None collection, cannot parse as data pack."
)
for p in self._parse_pack(collection):
p.add_all_remaining_entries(self.name)
yield p
@abstractmethod
def _parse_pack(self, collection: Any) -> Iterator[PackType]:
r"""Returns an iterator of Packs parsed from a collection. Readers
should implement this class to populate the class input.
Args:
collection: Object that can be parsed into a Pack.
Returns: Iterator of Packs.
"""
raise NotImplementedError
def _cache_key_function(self, collection: Any) -> Optional[str]:
# pylint: disable=unused-argument
r"""Computes the cache key based on the type of data.
Args:
collection: Any object that provides information to identify the
name and location of the cache file
"""
return None
def text_replace_operation(self, text: str) -> ReplaceOperationsType:
# pylint: disable=unused-argument
r"""Given the possibly noisy text, compute and return the
replacement operations in the form of a list of (span, str)
pairs, where the content in the span will be replaced by the
corresponding str.
Args:
text: The original data text to be cleaned.
Returns (List[Tuple[Tuple[int, int], str]]):
the replacement operations.
"""
return []
def _get_cache_location(self, collection: Any) -> str:
r"""Gets the path to the cache file for a collection.
Args:
collection: information to compute cache key.
Returns (Path): file path to the cache file for a Pack.
"""
# pylint: disable=assignment-from-none
file_path = self._cache_key_function(collection)
if file_path is None:
raise ProcessExecutionException(
"Cache key is None. You probably set `from_cache` to true but "
"fail to implement the _cache_key_function"
)
return os.path.join(str(self._cache_directory), file_path)
def _lazy_iter(self, *args, **kwargs):
for collection in self._collect(*args, **kwargs):
if self.from_cache:
for pack in self.read_from_cache(
self._get_cache_location(collection)
):
pack.add_all_remaining_entries()
yield pack
else:
not_first = False
for pack in self.parse_pack(collection):
# write to the cache if _cache_directory specified
if self._cache_directory is not None:
self.cache_data(collection, pack, not_first)
if not isinstance(pack, self.pack_type()):
raise ValueError(
f"No Pack object read from the given "
f"collection {collection}, returned {type(pack)}."
)
not_first = True
pack.add_all_remaining_entries()
yield pack
def set_profiling(self, enable_profiling: bool = True):
r"""Set profiling option.
Args:
enable_profiling: A boolean of whether to enable profiling
for the reader or not (the default is True).
"""
self._enable_profiling = enable_profiling
def timer_yield(self, pack: PackType):
r"""Wrapper generator for time profiling. Insert timers around
'yield' to support time profiling for reader.
Args:
pack: DataPack passed from self.iter()
"""
# Aggregate time cost
if self._enable_profiling:
self.time_profile += time() - self._start_time
yield pack
# Start timer
if self._enable_profiling:
self._start_time = time()
def iter(self, *args, **kwargs) -> Iterator[PackType]:
# pylint: disable=protected-access
r"""An iterator over the entire dataset, giving all Packs processed
as list or Iterator depending on `lazy`, giving all the Packs read
from the data source(s). If not reading from cache, should call
``collect``.
Args:
args: One or more input data sources, for example, most
DataPack readers accept `data_source` as file/folder path.
kwargs: Iterator of DataPacks.
"""
# Start timer
if self._enable_profiling:
self._start_time = time()
if self._cache_in_memory and self._cache_ready:
# Read from memory
for pack in self._data_packs:
if self._check_type_consistency:
if hasattr(pack._meta, "record"):
self.record(pack._meta.record)
yield from self.timer_yield(pack)
else:
# Read via parsing dataset
for pack in self._lazy_iter(*args, **kwargs):
if self._check_type_consistency:
if hasattr(pack._meta, "record"):
self.record(pack._meta.record)
if self._cache_in_memory:
self._data_packs.append(pack)
yield from self.timer_yield(pack)
self._cache_ready = True
def record(self, record_meta: Dict[str, Set[str]]):
r"""Modify the pack meta record field of the reader's output. The
key of the record should be the entry type and values should
be attributes of the entry type. All the information would be used
for consistency checking purpose if the pipeline is initialized with
`enforce_consistency=True`.
Args:
record_meta: the field in the datapack for type record that need to
fill in for consistency checking.
"""
pass
def cache_data(self, collection: Any, pack: PackType, append: bool):
r"""Specify the path to the cache directory.
After you call this method, the dataset reader will use its
``cache_directory`` to store a cache of
:class:`~forte.data.base_pack.BasePack` read
from every document passed to read, serialized as one
string-formatted :class:`~forte.data.base_pack.BasePack`. If the cache file for a given
``file_path`` exists, we read the :class:`~forte.data.base_pack.BasePack` from the cache.
If the cache file does not exist, we will `create` it on our first
pass through the data.
Args:
collection: The collection is a piece of data from the
:meth:`_collect` function, to be read to produce DataPack(s).
During caching, a cache key is computed based on the data in
this collection.
pack: The data pack to be cached.
append: Whether to allow appending to the cache.
"""
if not self._cache_directory:
raise ValueError("Can not cache without a cache_directory!")
os.makedirs(self._cache_directory, exist_ok=True)
cache_filename = os.path.join(
self._cache_directory, self._get_cache_location(collection)
)
logger.info("Caching pack to %s", cache_filename)
if append:
with open(
cache_filename,
"a",
encoding="utf-8",
) as cache:
cache.write(pack.to_string() + "\n")
else:
with open(
cache_filename,
"w",
encoding="utf-8",
) as cache:
cache.write(pack.to_string() + "\n")
def read_from_cache(
self, cache_filename: Union[Path, str]
) -> Iterator[PackType]:
r"""Reads one or more Packs from ``cache_filename``, and yields Pack(s)
from the cache file.
Args:
cache_filename: Path to the cache file.
Returns:
List of cached data packs.
"""
logger.info("reading from cache file %s", cache_filename)
with open(cache_filename, "r", encoding="utf-8") as cache_file:
for line in cache_file:
pack = DataPack.from_string(line.strip())
if not isinstance(pack, self.pack_type()):
raise TypeError(
f"Pack deserialized from {cache_filename} "
f"is {type(pack)}, but expect {self.pack_type()}"
)
yield pack
def finish(self, resource: Resources):
pass
def set_text(self, pack: DataPack, text: str):
r"""Assign the text value to the
:class:`~forte.data.data_pack.DataPack`. This function will
pass the ``text_replace_operation`` to the
:class:`~forte.data.data_pack.DataPack` to conduct
the pre-processing step.
Args:
pack: The :class:`~forte.data.data_pack.DataPack` to assign value for.
text: The original text to be recorded in this dataset.
"""
pack.set_text(text, replace_func=self.text_replace_operation)
class PackReader(BaseReader[DataPack], ABC):
r"""A Pack Reader reads data into :class:`~forte.data.data_pack.DataPack`."""
@staticmethod
def pack_type():
return DataPack
class MultiPackReader(BaseReader[MultiPack], ABC):
r"""The basic :class:`~forte.data.multi_pack.MultiPack` data reader class.
To be inherited by all
data readers which return :class:`~forte.data.multi_pack.MultiPack`.
"""
@staticmethod
def pack_type():
return MultiPack
| asyml/forte | forte/data/base_reader.py | base_reader.py | py | 14,338 | python | en | code | 230 | github-code | 36 |
30502981495 | #########################################################################
# Iterative server - webserver3b.py #
# #
# Tested with Python 2.7.9 & Python 3.4 on Ubuntu 14.04 & Mac OS X #
# #
# - Server sleeps for 60 seconds after sending a response to a client #
#########################################################################
import socket
import time
import os
import sys # fileno
SERVER_ADDRESS = (HOST, PORT) = '', 8888
REQUEST_QUEUE_SIZE = 5
def handle_request(client_connection):
request = client_connection.recv(1024)
print(request.decode())
http_response = b"""\
HTTP/1.1 200 OK
Hello, World22!
"""
client_connection.sendall(http_response)
time.sleep(10) # sleep and block the process for 60 seconds
a = 0
def serve_forever():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(SERVER_ADDRESS)
listen_socket.listen(REQUEST_QUEUE_SIZE)
print("backlog {backlog}...".format(backlog=REQUEST_QUEUE_SIZE))
print('Serving HTTP on port {port} :{fileno}...'.format(port=PORT, fileno=listen_socket.fileno()))
while True:
client_connection, client_address = listen_socket.accept()
global a
a = a + 1
print("a...is: %d" % a)
handle_request(client_connection)
if a > 3:
print("go out ....%s" % time.ctime())
exit();
client_connection.close()
if __name__ == '__main__':
pid = os.getpid()
print("pid is:%d\n" % pid )
serve_forever() | feng1o/python_1 | socket/server-con/webserver3b.py | webserver3b.py | py | 1,820 | python | en | code | 1 | github-code | 36 |
42522310312 | #WebCrawler é uma ferramenta de captura de informações em sites, cadastrando e salvando o que acha que seja mais relevante por meio de palavras chave
#Importa operadores matematicos
import operator
#Biblioteca de manipulação de estruturas do python
from collections import Counter
from bs4 import BeautifulSoup
import requests
def start(url):
wordList = []
source_code = requests.get(url).text
#Requisita dados HTML da pagina
soup = BeautifulSoup(source_code, 'html.parser')
#Procura tudo que for classe e div e transforma em texto
for each_text in soup.findAll('div', {'class': 'entry_content'}):
content = each_text.text
words = content.lower().split()
for each_word in words:
wordList.append(each_word)
clean_wordList(wordList)
#Remove simbolos indesejados
def clean_wordList(wordList):
clean_list = []
for word in wordList:
symbols = '!@#$%^&*()_-+={[]}|\;:"<>?/,.'
for i in range(0, len(symbols)):
#Tira o simbolo e nacoloca nada no lugar
word = word.replace(symbols[i], '')
if len(word)>0:
clean_list.append(word)
create_dictionary(clean_list)
#Passa pela lista e mostra as palavras mais repetidas
def create_dictionary(clean_list):
word_count = {}
for word in clean_list:
if word in word_count:
word_count[word] += 1
else:
word_count[word] += 1
for key, value in sorted(word_count.items(), key = operator.itemgetter(1)):
print("%s: %s" % (key, value))
c = Counter(word_count)
top = c.most_common(10)
print(top)
if __name__ == "__main__":
start('https://www.geeksforgeeks.org/python-programming-language/?ref=leftbar') | CaioNM/S.I.comPython | Aula 04/Web Crawler.py | Web Crawler.py | py | 1,835 | python | pt | code | 0 | github-code | 36 |
37442560200 | """estres forms"""
from django import forms
from estres.models import *
class EstresForm(forms.ModelForm):
class Meta:
model = EstresModel
fields = [
"numero_escenarios",
"horizonte_riesgo",
"fechacorte",
"info_mercado",
"inc_financiero",
"inc_biometricos",
"inc_frecsev",
]
labels = {
"numero_escenarios":"Número de escenarios (entero de 1000 a 10000)",
"horizonte_riesgo":"Horizonte de riesgo",
"fechacorte":"Fecha de corte",
"info_mercado":"INFO_MERCADO.xlsx",
"inc_financiero":"INC_FINANCIERO.xlsx",
"inc_biometricos":"INC_BIOMETRICOS.xlsx",
"inc_frecsev":"INC_FRECUENCIA_SEVERIDAD.xlsx",
}
widgets = {
"numero_escenarios":forms.TextInput(),
"horizonte_riesgo":forms.Select(),
"fechacorte":forms.DateInput(format=('%Y-%m-%d'), attrs={'class':'datepicker'}),
"info_mercado":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_financiero":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_biometricos":forms.FileInput(attrs={"accept":".xlsx"}),
"inc_frecsev":forms.FileInput(attrs={"accept":".xlsx"}),
}
| adandh/Bedu_RiesgosSeguros | estres/forms.py | forms.py | py | 1,467 | python | en | code | 0 | github-code | 36 |
75287915945 |
from PyQt5.QtWidgets import QLabel,QLineEdit,QPushButton,QMessageBox,QWidget,QApplication,QMainWindow,QTextEdit
from PyQt5.QtGui import QFont
import sys,time
getter=str()
getter2=str()
class FirstWindow(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(200, 100, 500, 500)
self.setWindowTitle("Сhinese telegram")
self.start()
def font(self,obj):
obj.setFont(QFont("Times",30))
def start(self):
yozuv1=QLabel("Login:",self)
yozuv1.setFont(QFont("Times",25))
yozuv1.move(50,60)
self.log=QLineEdit(self)
self.log.setFont(QFont("Times",25))
self.log.move(150,60)
self.log.setPlaceholderText("login kiriting....")
yozuv2=QLabel("Parol:",self)
yozuv2.setFont(QFont("Times",25))
yozuv2.move(50,150)
self.par=QLineEdit(self)
self.par.setFont(QFont("Times",25))
self.par.move(150,150)
self.par.setPlaceholderText("parol kiriting....")
ok=QPushButton("OK",self)
ok.setFont(QFont("Times",50))
ok.move(200,250)
ok.clicked.connect(self.run)
btn=QPushButton("exit",self)
btn.clicked.connect(self.hide)
def hide(self):
self.hide()
def run(self):
log_par=[['Admin','12345'],['User','54321']]
if log_par[0][0]==self.log.text() and log_par[0][1]==self.par.text():
self.Chat=Chat()
self.Chat.show()
if log_par[1][0]==self.log.text() and log_par[1][1]==self.par.text():
self.Chat1=Chat1()
self.Chat1.show()
class Chat(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn=QPushButton("Back",self)
self.btn.clicked.connect(self.run1)
self.resize(500, 500)
self.send()
self.setWindowTitle("Admin's Chat")
self.show()
def run1(self):
self.hide()
def send(self):
self.wn1=QTextEdit(self)
self.wn1.setGeometry(10,40,480,150)
self.wn1.setFont(QFont("Times",20))
self.wn1.setPlaceholderText("Text kiriting...")
self.wn2=QTextEdit(self)
self.wn2.setGeometry(10,200,480,150)
self.wn2.setFont(QFont("Times",20))
self.wn2.setPlaceholderText("Sizga kelgan xabarlar... \n")
self.wn2.setText(getter2)
self.send1=QPushButton("Send💬",self)
self.send1.setFont(QFont("Times",15))
self.send1.move(200,400)
self.send1.clicked.connect(self.sender)
def sender(self):
global getter
getter=self.wn1.toPlainText()
self.send1.hide()
class Chat1(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.btn=QPushButton("Back",self)
self.btn.clicked.connect(self.run1)
self.resize(500, 500)
self.send()
self.setWindowTitle("User's Chat")
self.show()
def run1(self):
self.hide()
def send(self):
self.wn1=QTextEdit(self)
self.wn1.setGeometry(10,40,480,150)
self.wn1.setFont(QFont("Times",20))
self.wn1.setPlaceholderText("Text kiriting...")
self.wn2=QTextEdit(self)
self.wn2.setGeometry(10,200,480,150)
self.wn2.setFont(QFont("Times",20))
self.wn2.setPlaceholderText("Sizga kelgan xabarlar... \n")
self.wn2.setText(getter)
self.send1=QPushButton("Send💬",self)
self.send1.setFont(QFont("Times",15))
self.send1.move(200,400)
self.send1.clicked.connect(self.sender)
def sender(self):
global getter2
getter2=self.wn1.toPlainText()
self.send1.hide()
app=QApplication(sys.argv)
oyna=FirstWindow()
oyna.show()
sys.exit(app.exec_())
| Golibbek0414/PYTHON | imtihonga.py/class1.py | class1.py | py | 3,837 | python | en | code | 0 | github-code | 36 |
75143366504 | import cv2
from model import FacialKeypointModel
import numpy as np
import pandas as pd
from matplotlib.patches import Circle
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialKeypointModel("KeyPointDetector.json", "weights.hdf5")
font = cv2.FONT_HERSHEY_SIMPLEX
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture('/Users/apple/Workspace/Deeplearning/FacialKeypoints/videos/presidential_debate.mp4')
facialpoints_df = pd.read_csv('KeyFacialPoints.csv')
self.columns = facialpoints_df.columns[:-1]
#For video from webcam
# self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
# returns camera frames along with bounding boxes and predictions
def get_frame(self):
_, fr = self.video.read()
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (96, 96))
df_predict = pd.DataFrame(model.predict_keypoints(roi[np.newaxis, :, :, np.newaxis]), columns = self.columns)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
xScale = fc.shape[0]/96
yScale = fc.shape[1]/96
for j in range(1,31,2):
fr = cv2.drawMarker(fr,
(int(x+df_predict.loc[0][j-1] * xScale), int(y+df_predict.loc[0][j]* yScale )),
(0, 0, 255),
markerType=cv2.MARKER_CROSS,
markerSize=10,
thickness=2,
line_type=cv2.LINE_AA)
# fr = cv2.circle(fr, (df_predict.loc[0][j-1], df_predict.loc[0][j]), radius=5, color=(0, 0, 255), thickness=-1)
_, jpeg = cv2.imencode('.jpg', fr)
return jpeg.tobytes()
| kartikprabhu20/FacialKeypoints | .ipynb_checkpoints/camera-checkpoint.py | camera-checkpoint.py | py | 1,990 | python | en | code | 0 | github-code | 36 |
21806692940 | import socket
import string
import os
ADDR = ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0])
MEDIA_SERVER_ADDRESS = (ADDR,21567)
TRACKER_ADDRESS = (ADDR,21568)
#MEDIA_SERVER_ADDRESS = ('192.168.0.16', 21567) # (serv_ip, serv_port)
#TRACKER_ADDRESS = ('192.168.0.13', 21568) # (tracker_ip, tracker_port)
#TRACKER_ADDRESS = ('192.168.122.128', 21568) # (tracker_ip, tracker_port)
PEER_PORT = 21569 # peer port
MAX_SERV_CONNECTION = 1000 # max No. of server connection
MAX_TRAC_CONNECTION = 2*MAX_SERV_CONNECTION # max No. of tracker connection
MAX_PEER_CONNECTION = 15 # max No. of peer connection
SERVER = 1 # used to check if IS_SERVER
CLIENT = 0 # used to check if IS_SERVER
BUFSIZ = 1024 # Instruction buffer size
VERBOSE = 1 # print out or not
DEBUG = 1 # debug or not
PROCESS_TIME = 1 # process time for a thread
NUM_RETURNED_PEERS = 4*MAX_PEER_CONNECTION # number of returned peers by the tracker
INTERVAL_TRACKER_COMMUNICATION = 10 # time interval between when peers send 'alive' info to tracker
OBTAIN_NEIGHBOR_PERIOD = 30 # time interval between new neighbor info requests
TRY_INTERVAL = 10 # try intervals for errors
INTERVAL_RATEALLOC = 10 # interval for rate allocation
##################FILE INFO###################
SERVER_PATH = 'cache' # path under root dir for server to cache video
CLIENT_PATH = 'streaming' # path under root dir for client to cache streaming video
FILE_SUFFIX = '.pph' # suffix of stored filename
META_SUFFIX = '.pphmeta' # meta data for the files
STORAGE_CAP = 1*1024*1E6 # 1GB of storage cap
ST_ALLOC_TH = 3 # storage allocation threshold
BUFFER_LENGTH = 10 # buffer length in seconds
BITS_PER_CHUNK = 20*1E6 # bits per chunk of file
BYTES_PIECE_HEADER = 0
#BYTES_PIECE_HEADER = 1 + 1 + LDPC_LENGTH/8 # header size per piece: chunk ID + piece ID + LDPC coefficients (in bits/8 bytes)
LDPC_LENGTH = 1000 # LDPC length
CODING_MUL = 1 # multiplier to get the total number of unique packets
PIECE_PER_CHUNK = 20 # number of pieces per chunk
PIECE_PER_SEG = 5 # number of pieces per segment
PIECE_IDS = ((string.lowercase+
string.uppercase)[0:PIECE_PER_CHUNK*CODING_MUL])
# piece IDs (in string)
BUFFER_EMERG_LEFT = BUFFER_LENGTH/5 # time left for emergency download if buffer not filled
BUFFER_TOO_MUCH = BUFFER_LENGTH*1.5 # time to stop download because of too much buffer content
BUFFER_CHECK_INTERVAL = 0.2 # interval time to check buffer length; usually every segment
SEG_PER_CHUNK = PIECE_PER_CHUNK/PIECE_PER_SEG # number of segments per chunk
BYTES_PER_CHUNK = BITS_PER_CHUNK/8 # bytes per chunk of file
BYTES_PER_SEG = BYTES_PER_CHUNK/SEG_PER_CHUNK # bytes per segment (5 pieces)
BYTES_PER_PIECE = BYTES_PER_SEG/PIECE_PER_SEG # bytes per segment (5 pieces)
######## START : RE CAL BY ADDING HEADER ########
BYTES_PER_PIECE = int(BYTES_PER_PIECE + BYTES_PIECE_HEADER)
BYTES_PER_SEG = int(BYTES_PER_PIECE*PIECE_PER_SEG)
BYTES_PER_CHUNK = int(BYTES_PER_SEG*SEG_PER_CHUNK)
BITS_PER_CHUNK = int(BYTES_PER_CHUNK * 8)
##################### END #######################
def intersect(a, b):
return list(set(a) & set(b))
def union(a, b):
return list(set(a) | set(b))
def lminus(a, b):
return list(set(a) - set(b))
def PacketsDecode(Content, IDs):
IDsandContent = zip(IDs,Content);
IDsandContent.sort();
sortedIDs, sortedContent = zip(*IDsandContent)
Data = ""
ID = ""
for eachContent in sortedContent:
Data += eachContent
for eachID in sortedIDs:
ID += eachID
return ID,Data
def getFolderSize(folder):
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += getFolderSize(itempath)
return total_size
| chrishzhao/VideoShare | python/src/Configure.py | Configure.py | py | 4,823 | python | en | code | 6 | github-code | 36 |
28192387095 | # -*- coding: utf-8 -*-
from odoo import models, fields, _, api
class PosPromotionTotalPriceBuyOtherProduct(models.Model):
_name = 'pos.promotion.total.price.buy.other.product'
promotion_id = fields.Many2one('pos.promotion', string='Promotion')
product_id = fields.Many2one('product.product', string='Sản phẩm áp dụng')
qty = fields.Float(string='Số lượng')
price_unit = fields.Float(string='Giảm giá')
total_price = fields.Float(string='Tổng giá trị đơn hàng')
discount = fields.Float(digits=(18, 2), string=_('Chiết khấu'), default=0.0,
help=_('Percent to discount. This value between 0 - 100'), required=True)
@api.constrains('discount')
def _constraint_discount(self):
for r in self:
r._discount_factory()
@api.onchange('discount')
def _onchange_discount(self):
for r in self:
r._discount_factory()
def _discount_factory(self):
MIN, MAX = 0, 100
if self.discount < MIN:
self.discount = MIN
elif self.discount > MAX:
self.discount = MAX
| zyn1030z/promotion | ev_pos_promotion_total_price_buy_other_product/models/pos_promotion_total_price.py | pos_promotion_total_price.py | py | 1,138 | python | en | code | 1 | github-code | 36 |
8891981416 | from os import access
from tkinter import image_names
import cv2
import dropbox
import time
import random
start_time=time.time()
def take_snapshot():
number = random.randint(0,100)
videoCaptureObject=cv2.VideoCapture(0)
result=True
while(result):
ret,frame=videoCaptureObject.read()
img_name="img"+str(number)+".png"
#print(ret,frame)
cv2.imwrite(img_name,frame)
start_time=time.time()
result=False
return img_name
print("Snapshot taken")
videoCaptureObject.release()
cv2.destroyAllWindows()
def upload_files(image_name):
access_token="sl.BFNXVhLcVxzWNu4AWyBuMBw9q07HWCeS1ifaeAhkH5wibVA8iwoLjtu8wVC-BWZ_dTMYCduOJ1NHUYhq1GMEDxuPeoUiGHtmbkwJuStlCNJnL8wtjFjws_HXPC7eISi4P6PNqoqvzzE"
file= image_name
file_from=file
file_to="/ary/"+(image_name)
dbx=dropbox.Dropbox(access_token)
with open(file_from,"rb") as f:
dbx.file_upload(f.read(),file_to,mode=dropbox.files.WriteMode.overwrite)
print("file uploaded")
def main():
while(True):
if((time.time() - start_time) >= 5):
name=take_snapshot()
upload_files(name)
main()
| ARYAN0021/PythonProjectsFinal | SecurityWebCam.py | SecurityWebCam.py | py | 1,223 | python | en | code | 0 | github-code | 36 |
10642138456 | from SPARQLToSQL.parser_helper import *
from SPARQLToSQL.translation_helper import *
from SPARQLToSQL.sql_helper import *
from SPARQLToSQL.mapping import *
# Simple SPARQL query translator
def construct_SQL_object(tp):
pr_list = genPRSQL(tp)
fromClause = SQLTable(alpha(tp))
conditions = genCondSQL(tp)
return SQLQuery(pr_list, fromClause, conditions)
# named same as in Chebotko's paper
def genPRSQL(tp):
pr_list = []
pr_list.append(SQLProject(beta(tp, SUBJECT), name(tp.sp)))
if (tp.sp != tp.pp):
pr_list.append(SQLProject(beta(tp, PREDICATE), name(tp.pp)))
if (tp.sp != tp.op):
pr_list.append(SQLProject(beta(tp, OBJECT), name(tp.op)))
return pr_list
# named same as in Chebotko's paper
def genCondSQL(tp):
condition_list = []
if (is_variable(tp.sp) == False):
condition_list.append(SQLCondition(beta(tp, SUBJECT), OP_EQUALS, tp.sp))
if (is_variable(tp.pp) == False):
condition_list.append(SQLCondition(beta(tp, PREDICATE), OP_EQUALS, tp.pp))
if (is_variable(tp.op) == False):
condition_list.append(SQLCondition(beta(tp, OBJECT), OP_EQUALS, tp.op))
if (tp.sp == tp.op):
condition_list.append(SQLCondition(beta(tp, SUBJECT), OP_EQUALS, beta(tp, OBJECT)))
if (tp.sp == tp.pp):
condition_list.append(SQLCondition(beta(tp, SUBJECT), OP_EQUALS, beta(tp, PREDICATE)))
if (tp.pp == tp.op):
condition_list.append(SQLCondition(beta(tp, PREDICATE), OP_EQUALS, beta(tp, OBJECT)))
return condition_list
# /Simple SPARQL query translator
# INNER/LEFT JOIN translator
def merge_SQL_objects(sql_obj1, sql_obj2, join_type, trans_mode, is_union=False):
if trans_mode == 3:
if is_union == False:
if join_type == SQL_INNER_JOIN:
if isinstance(sql_obj1, SQLQuery) & isinstance(sql_obj2, SQLQuery):
return pr_merge_SQL_objects(sql_obj1, sql_obj2)
table1 = SQLTable(sql_obj1.build_sql(), "q1", True)
table2 = SQLTable(sql_obj2.build_sql(), "q2", True)
sql_obj1.set_alias("q1")
sql_obj2.set_alias("q2")
# pr list
if is_union:
pr_list = build_union_obj_pr(sql_obj1, sql_obj2)
else:
pr_list = build_join_pr(sql_obj1, sql_obj2, join_type, trans_mode)
# conditions
conditions = None
if is_union == False:
conditions = build_join_conditions(sql_obj1, sql_obj2, trans_mode)
return SQLJoinQuery(pr_list, table1, join_type, table2, conditions)
# Builds projection list of the sql join object which is currently being built
def build_join_pr(sql_obj1, sql_obj2, join_type, trans_mode):
pr_list = []
common_fields = find_common_pr(sql_obj1, sql_obj2)
for common_pr in common_fields:
# in trans-s, if q1 has no left outer join then project only q1.field without coalesce
if (trans_mode == 1 or join_type_of_sql_obj(sql_obj1, SQL_LEFT_OUTER_JOIN)):
common_pr.set_coalesce(
"Coalesce({0}.{1}, {2}.{1})".format(sql_obj1.alias, common_pr.get_attr_name(), sql_obj2.alias),
common_pr.get_attr_name())
else:
common_pr.set_coalesce("{0}.{1}".format(sql_obj1.alias, common_pr.get_attr_name()),
common_pr.get_attr_name())
pr_list.append(common_pr)
for project in sql_obj1.pr_list:
if project not in pr_list:
if in_pr_list(project, common_fields) is False:
new_pr = SQLProject(project.field, project.alias)
pr_list.append(new_pr)
for project in sql_obj2.pr_list:
if project not in pr_list:
if in_pr_list(project, common_fields) is False:
new_pr = SQLProject(project.field, project.alias)
pr_list.append(new_pr)
return pr_list
def build_union_obj_pr(sql_obj1, sql_obj2):
pr_list = []
common_fields = find_common_pr(sql_obj1, sql_obj2)
for project in sql_obj1.pr_list:
if (project not in pr_list):
if in_pr_list(project, common_fields) is False:
pr_list.append(project)
for project in sql_obj2.pr_list:
if (project not in pr_list):
if in_pr_list(project, common_fields) is False:
pr_list.append(project)
for common_pr in common_fields:
common_pr.set_table_alias(sql_obj1.alias)
pr_list.append(common_pr)
return pr_list
def build_join_conditions(sql_obj1, sql_obj2, trans_mode):
condList = []
common_fields = find_common_cond_attrs(sql_obj1, sql_obj2)
for project in common_fields:
# TODO might need copier to avoid internal link of objects
project1 = SQLProject(project.field, project.alias)
project1.set_table_alias(sql_obj1.alias)
project2 = SQLProject(project.field, project.alias)
project2.set_table_alias(sql_obj2.alias)
condList.append(SQLJoinCondition(project1, OP_EQUALS, project2, (trans_mode != 1)))
return condList
# /INNER JOIN translator
# UNION translator
def unite_SQL_objects(sql_obj1, sql_obj2, trans_mode):
if trans_mode == 1:
union_part1 = merge_SQL_objects(sql_obj1, sql_obj2, SQL_LEFT_OUTER_JOIN, 1, True)
union_part2 = merge_SQL_objects(sql_obj2, sql_obj1, SQL_LEFT_OUTER_JOIN, 1, True)
else:
union_part1 = create_outer_query(sql_obj1)
union_part2 = create_outer_query(sql_obj2)
return SQLUnionQuery(union_part1, union_part2)
# /UNION translator
# SQL object functions
def assign_pr_list(project_list, sql_obj):
"""
assigns projection list given in original SPARQL query text
:param project_list: projection list given in original SPARQL text
:param sql_obj: SQL object translated by function trans
:return: SPARQL query object : Classes.RelationalClasses.SQLQuery
"""
pr_list = []
for alias in project_list:
pr_checked = alias_in_pr_list(alias, sql_obj.pr_list)
if pr_checked != False:
pr_list.append(pr_checked)
sql_obj.pr_list = pr_list
if isinstance(sql_obj, SQLUnionQuery):
new_pr_list = []
for pr in pr_list:
pr_checked = alias_in_pr_list(pr.alias, sql_obj.query1.pr_list)
if (pr_checked):
new_pr_list.append(pr)
sql_obj.query1.pr_list = new_pr_list
new_pr_list = []
for pr in pr_list:
pr_checked = alias_in_pr_list(pr.alias, sql_obj.query2.pr_list)
if (pr_checked):
new_pr_list.append(pr)
sql_obj.query2.pr_list = new_pr_list
return sql_obj
def add_order_expr(order_clause, sql_obj):
if order_clause.orderCondition():
sql_order_conds = translate_order_cond(order_clause.orderCondition(), [])
sql_obj.set_order_fields(sql_order_conds)
return sql_obj
def translate_order_cond(order_condition, order_list):
if isinstance(order_condition, list):
for order_cond in order_condition:
order_list = translate_order_cond(order_cond, order_list)
return order_list
else:
if order_condition.var():
order_list.append(SQLOrderField(get_var(order_condition.var()).context))
return order_list
elif order_condition.brackettedExpression():
if order_condition.brackettedExpression().expression():
expr = order_condition.brackettedExpression().expression()
if expr.conditionalOrExpression():
if expr.conditionalOrExpression().conditionalAndExpression():
expr = expr.conditionalOrExpression().conditionalAndExpression()[0]
if expr.valueLogical():
if expr.valueLogical()[0].relationalExpression():
if expr.valueLogical()[0].relationalExpression().numericExpression():
expr = expr.valueLogical()[0].relationalExpression().numericExpression()[0]
if expr.additiveExpression():
if expr.additiveExpression().multiplicativeExpression():
if expr.additiveExpression().multiplicativeExpression()[0].unaryExpression():
expr = expr.additiveExpression().multiplicativeExpression()[0].unaryExpression()[0]
if expr.primaryExpression():
if expr.primaryExpression().var():
order_list.append(SQLOrderField(get_var(expr.primaryExpression().var()).context, getTerminalNode(order_condition) ))
return order_list
def pr_list_to_str_list(pr_list):
"""
converts pr list to string list which is comfortable to be serialized to json
:param projectList: SQLProject list
:return: string list
"""
str_list = []
for pr in pr_list:
str_list.append(pr.alias)
return str_list
def apply_db_field_to_filter_var(sql_cond, sql_obj):
sql_cond.field1 = find_attr_for_filter(sql_obj.pr_list, sql_cond.field1)
for logicalExpr in sql_cond.logical_exprs:
logicalExpr.field1 = find_attr_for_filter(sql_obj.pr_list, logicalExpr.field1)
return sql_cond
def add_condition_to_union_obj(union_obj, sql_cond):
# TODO: SOLVE IT
# if in_pr_list(sql_cond.field1, union_obj.query1.pr_list):
# union_obj.query1.add_where_condition(sql_cond)
# if in_pr_list(sql_cond.field1, union_obj.query2.pr_list):
# union_obj.query2.add_where_condition(sql_cond)
return union_obj
# builds sql object from another sql. dedicated for simplified union translator when schemas are same
def create_outer_query(sql_obj):
table = SQLTable(sql_obj.build_sql(), 'u', True)
sql_outer_query = SQLQuery(sql_obj.pr_list, table, [])
return sql_outer_query
# /SQL object functions
# Extension functions
def pr_merge_SQL_objects(sql_query_obj_1, sql_query_obj_2):
"""
Builds multi-table query instead of inner joins
:param sql_query_obj_1: SQLQuery
:param sql_query_obj_2: SQLQuery
:return: SQL query with multi tables
"""
if sql_query_obj_1.is_hyper_query() is True and sql_query_obj_2.is_hyper_query() is False:
return ext_join_SQL_object_to_SQL_hyper(sql_query_obj_1, sql_query_obj_2)
if sql_query_obj_2.is_hyper_query() is True and sql_query_obj_1.is_hyper_query() is False:
return ext_join_SQL_object_to_SQL_hyper(sql_query_obj_2, sql_query_obj_1)
if sql_query_obj_2.is_hyper_query() is True and sql_query_obj_1.is_hyper_query() is True:
return None
sql_query_obj_1.table.set_alias("t1")
sql_query_obj_2.table.set_alias("t2")
tables = [sql_query_obj_1.table, sql_query_obj_2.table]
# pr list
pr_list = ext_build_join_pr(sql_query_obj_1, sql_query_obj_2)
# conditions
join_conditions = ext_find_join_conditions(sql_query_obj_1, sql_query_obj_2)
null_conditions = ext_build_not_null_conditions(sql_query_obj_1, sql_query_obj_2, join_conditions)
where_conditions = ext_build_where_conditions(sql_query_obj_1, sql_query_obj_2)
sql_obj = SQLQuery(pr_list, tables, where_conditions, join_conditions)
sql_obj.set_null_conditions(null_conditions)
return sql_obj
def ext_join_SQL_object_to_SQL_hyper(sql_query_hyper, sql_query_obj):
"""
Adds sql table and conditions to another SQL query with multiple tables
:param sql_query_hyper: SQL Query with multiple tables
:param sql_query_obj: SQL Query with one table
:return: SQLQuery with multiple tables
"""
new_table_alias = generate_table_alias(sql_query_hyper)
sql_query_obj.table.set_alias(new_table_alias)
sql_query_hyper.tables.append(sql_query_obj.table)
# pr list
diff_pr_list = find_diff_attrs(sql_query_hyper, sql_query_obj)
# conditions
join_conditions = ext_find_join_conditions(sql_query_hyper, sql_query_obj)
for cond in join_conditions:
sql_query_hyper.add_join_condition(cond)
sql_query_hyper.add_null_condition(SQLCondition(cond.field2, OP_SQL_NOT, SQL_NULL))
for cond in sql_query_obj.where_conditions:
if isinstance(cond.field1, str):
cond.field1 = SQLProject(cond.field1)
cond.field1.set_table_alias(new_table_alias)
sql_query_hyper.add_where_condition(cond)
# pr list
for project in diff_pr_list:
project.set_table_alias(new_table_alias)
sql_query_hyper.pr_list.append(project)
return sql_query_hyper
def ext_find_join_conditions(sql_query_obj_1, sql_query_obj_2):
condList = []
common_fields = find_common_cond_attrs(sql_query_obj_1, sql_query_obj_2)
# Conditions which were to be used on inner join connection
for project in common_fields:
project1 = SQLProject(project.field)
if (sql_query_obj_1.is_hyper_query() is True):
project1.set_table_alias(sql_query_obj_1.tables[0].alias)
else:
project1.set_table_alias(sql_query_obj_1.table.alias)
project2 = SQLProject(project.field)
if (sql_query_obj_2.is_hyper_query() is True):
project2.set_table_alias(sql_query_obj_2.tables[0].alias)
else:
project2.set_table_alias(sql_query_obj_2.table.alias)
condList.append(SQLCondition(project1, OP_EQUALS, project2))
return condList
# used only when joining two sql queries each of which has only one table
def ext_build_not_null_conditions(sql_query_obj_1, sql_query_obj_2, condList):
nullCondList = []
# IS NOT NULL conditions for inner join connecting conditions
# they could be added inside for loop above, but for the UX it is better to add after main connections
cond_count = len(condList)
for cond in condList:
nullCondList.append(SQLCondition(cond.field1, OP_SQL_NOT, SQL_NULL))
nullCondList.append(SQLCondition(cond.field2, OP_SQL_NOT, SQL_NULL))
return nullCondList
def ext_build_where_conditions(sql_query_obj_1, sql_query_obj_2):
condList = []
for cond in sql_query_obj_1.where_conditions:
if isinstance(cond.field1, SQLProject) is False:
cond.field1 = SQLProject(str(cond.field1), sql_query_obj_1.table.alias)
cond.field1.set_table_alias(sql_query_obj_1.table.alias)
condList.append(cond)
for cond in sql_query_obj_2.where_conditions:
if isinstance(cond.field1, SQLProject) is False:
cond.field1 = SQLProject(str(cond.field1))
cond.field1.set_table_alias(sql_query_obj_2.table.alias)
condList.append(cond)
return condList
def ext_build_join_pr(sql_obj1, sql_obj2):
pr_list = []
common_fields = find_common_pr(sql_obj1, sql_obj2)
for common_pr in common_fields:
common_pr.set_table_alias(sql_obj1.table.alias)
pr_list.append(common_pr)
for project in sql_obj1.pr_list:
if project not in pr_list:
if in_pr_list(project, common_fields) is False:
project.set_table_alias(sql_obj1.table.alias)
pr_list.append(project)
for project in sql_obj2.pr_list:
if project not in pr_list:
if in_pr_list(project, common_fields) is False:
project.set_table_alias(sql_obj2.table.alias)
pr_list.append(project)
return pr_list
# /Extension functions | munkhbayar17/sparql-to-sql | SPARQLToSQL/sql_functions.py | sql_functions.py | py | 13,806 | python | en | code | 1 | github-code | 36 |
74050271464 | import os
from parlai.core.build_data import DownloadableFile
import parlai.core.build_data as build_data
import parlai.utils.logging as logging
RESOURCES = [
DownloadableFile(
'https://raw.githubusercontent.com/uclanlp/gn_glove/master/wordlist/male_word_file.txt',
'male_word_file.txt',
'd431679ce3ef4134647e22cb5fd89e8dbee3f04636f1c7cbae5f28a369acf60f',
zipped=False,
),
DownloadableFile(
'https://raw.githubusercontent.com/uclanlp/gn_glove/master/wordlist/female_word_file.txt',
'female_word_file.txt',
'5f0803f056de3fbc459589bce26272d3c5453112a3a625fb8ee99c0fbbed5b35',
zipped=False,
),
]
def build(datapath):
version = 'v1.0'
dpath = os.path.join(datapath, 'genderation_bias')
if not build_data.built(dpath, version):
logging.info('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
# Mark the data as built.
build_data.mark_done(dpath, version)
| facebookresearch/ParlAI | parlai/tasks/genderation_bias/build.py | build.py | py | 1,261 | python | en | code | 10,365 | github-code | 36 |
10211953658 | from collections import defaultdict
import time
import os
#This class represents a directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
self.V = vertices
self.graph = defaultdict(list)
self.file = open("Result1.txt","a")
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
# A function used by DFS
def DFSUtil(self,v,visited):
# Mark the current node as visited and print it
visited[v]= True
self.file.write(str(v) +" ")
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.DFSUtil(i,visited)
def fillOrder(self,v,visited, stack):
# Mark the current node as visited
visited[v]= True
#Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i]==False:
self.fillOrder(i, visited, stack)
stack = stack.append(v)
# The main function that finds and prints all strongly
# connected components
def printSCCs(self):
stack = []
# Mark all the vertices as not visited (For first DFS)
visited =[False]*(self.V)
# Fill vertices in stack according to their finishing
# times
for i in range(self.V):
if visited[i]==False:
self.fillOrder(i, visited, stack)
#Mark all the vertices as not visited (For second DFS)
visited =[False]*(self.V)
# Now process all vertices in order defined by Stack
while stack:
i = stack.pop()
if visited[i]==False:
self.DFSUtil(i, visited)
self.file.write("\n")
self.file.close()
#os.system('cls')
file = open("task1.txt")
headers=4
for index in range(0,4):
file.readline()
g = Graph(5105044)
g1 = Graph(5105044)
g2 = Graph(5105044)
g3 = Graph(5105044)
g4 = Graph(5105044)
g5 = Graph(5105044)
g6 = Graph(5105044)
g7 = Graph(5105044)
g8 = Graph(5105044)
g9 = Graph(5105044)
FromNodeId=0
ToNodeId=0
count=0
for value in file.readlines():
if(count<510504):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<1021008):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g1.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<1531512):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g2.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<2042016):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g3.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<2552520):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g4.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<3063024):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g5.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<3573528):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g6.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<4084032):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g7.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<4594536):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g8.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
if(count<5105043):
value=value.replace('\n','')
temp=value.split('\t')
FromNodeId=temp[0]
ToNodeId=temp[1]
g9.addEdge(int(FromNodeId), int(ToNodeId))
count+=1
print("Loading ",end = ' ')
for i in range(3):
time.sleep(0.8)
print(" . ",end = ' ')
os.system('cls')
print("Number of Iterations are ",count)
print ("Strongly connected components in graph are given below")
g.printSCCs()
g1.printSCCs()
g2.printSCCs()
"""
g3.printSCCs()
g4.printSCCs()
g5.printSCCs()
g6.printSCCs()
g7.printSCCs()
g8.printSCCs()
g9.printSCCs()
"""
fin= open("Result1.txt", 'r')
for index in range(0,15251652):
fin.readline()
for index in range(15251652,15252405):
print(fin.read())
time.sleep(0.5)
fin.close()
print("End of the procedure .........") | MuhammadAliAhson/Connected_Components_Graphs | Code_To_Find_Connected_Nodes_1.py | Code_To_Find_Connected_Nodes_1.py | py | 4,926 | python | en | code | 5 | github-code | 36 |
40533943010 | import io
import os.path
import pickle
import json
import time
from threading import Thread
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import _G
# Goole Drive File Downloader
class Downloader:
def __init__(self, id):
self.id = id
self.filename = ""
self.downloader = None
self.progress = 0
self.thread = None
self.flag_complete = False
# setup before the download
def setup(self):
self.header = _G.GDriveService.files().get(fileId=self.id).execute()
self.request = _G.GDriveService.files().get_media(fileId=self.id)
# start download
def start(self):
self.filename = self.header['name'].replace(_G.ForbiddenFileChar, '')
stream = io.FileIO(self.filename, 'wb')
self.downloader = MediaIoBaseDownload(stream, self.request)
self.downloader._chunksize = _G.DownloaderChunkSize
self.thread = Thread(target=self.download)
self.thread.start()
# async download the file
def download(self):
while self.flag_complete is False:
stat, self.flag_complete = self.downloader.next_chunk()
self.progress = int(stat.progress() * 100)
def is_completed(self):
return self.flag_complete
def get_auth_creds():
if not os.path.exists('secret/'):
os.mkdir('secret')
if os.path.exists(_G.GDriveCredCache):
with open(_G.GDriveCredCache, 'rb') as token:
return pickle.load(token)
return None
def load_creds_json():
raw = _G.loadfile_with_resuce(_G.GDriveCredFilename, 'r')
if not raw:
raw = os.environ['CLIPPER_GDRIVE_CREDS']
return json.loads(raw)
def start_auth_session(creds):
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_config(load_creds_json(), _G.GDriveScopes)
creds = flow.run_local_server(port=0)
with open(_G.GDriveCredCache, 'wb') as file:
pickle.dump(creds, file)
_G.GDriveService = build('drive', 'v3', credentials=creds)
def download_data_async(id):
worker = Downloader(id)
worker.setup()
worker.start()
return worker
def init():
creds = get_auth_creds()
start_auth_session(creds) | ken1882/pyclip_analyze_repeater | datamanager.py | datamanager.py | py | 2,333 | python | en | code | 0 | github-code | 36 |
40961303379 | # coding: utf-8
import argparse
import importlib.util
import inspect
import os
from datetime import datetime
problemas = []
def problema(mensaje, *args):
problemas.append(mensaje.format(*args))
def validar_tiempo(inicio, fin, tope, mensaje):
diferencia = (fin - inicio).total_seconds()
if diferencia > tope:
problema(mensaje)
def probar_codigo(interactivo=False, saltear_errores=False, resultado_verboso=False, grupo=None):
# dependencias
try:
from simpleai.search.models import SearchNode
except ImportError:
problema('No se pudo importar SimpleAI. Se encuentra instalado?')
return
# intentar importar la entrega
print('Importando la entrega...')
try:
inicio = datetime.now()
if grupo:
spec = importlib.util.spec_from_file_location("{}.entrega1".format(grupo),
"{}/entrega1.py".format(grupo))
entrega1 = importlib.util.module_from_spec(spec)
spec.loader.exec_module(entrega1)
else:
import entrega1
fin = datetime.now()
except ImportError:
problema('No se pudo encontrar el código python. Probablemente el nombre del archivo .py '
'no es correcto, o no está en la raiz del repositorio.')
return
validar_tiempo(inicio, fin, 3,
'El import de la entrega demora demasiado tiempo, probablemente están '
'haciendo búsqueda en el import. Hagan lo del if __name__ ... que se '
'recomienda en la consigna.')
# intentar extraer y validar la funcion resolver
print('Extrayendo la función resolver...')
resolver = getattr(entrega1, 'resolver', None)
if resolver is None:
problema('El módulo python no define la función resolver.')
return
firma_resolver = inspect.getargspec(resolver)
args = firma_resolver.args
defaults = firma_resolver.defaults or []
if args[:len(args) - len(defaults)] != ['metodo_busqueda', 'franceses', 'piratas']:
problema('La función resolver no recibe los parámetros definidos en la entrega.')
return
# validar el funcionamiento de la funcion resolver y el planteo del problema en general
print('Probando la resolución de problemas...')
franceses_consigna = (
(0, 2),
(0, 3),
(1, 2),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
(3, 0),
(3, 1),
(3, 2),
(4, 0),
(4, 1),
(5, 0),
)
piratas_consigna = (
(4, 4),
(4, 5),
(5, 4),
)
# metodo_busqueda, franceses, piratas, limite_largo_camino, limite_tiempo
pruebas = (
# sin franceses, 1 pirata
('breadth_first', [], [(4, 4)], 14, 2),
('depth_first', [], [(4, 4)], 100, 10),
('greedy', [], [(4, 4)], 25, 2),
('astar', [], [(4, 4)], 14, 2),
# 3 franceses, 2 piratas
('breadth_first', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 19, 10),
('depth_first', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 200, 30),
('greedy', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 50, 10),
('astar', [(1, 0), (2, 1), (3, 0)], [(3, 4), (4, 4)], 19, 10),
# caso de la consigna
('breadth_first', franceses_consigna, piratas_consigna, 33, 30),
('depth_first', franceses_consigna, piratas_consigna, 500, 60),
('greedy', franceses_consigna, piratas_consigna, 60, 30),
('astar', franceses_consigna, piratas_consigna, 33, 30),
)
for numero_prueba, prueba in enumerate(pruebas):
metodo_busqueda, franceses, piratas, limite_largo_camino, limite_tiempo = prueba
print(' Prueba', numero_prueba, ':', metodo_busqueda, 'franceses:', franceses, 'piratas:',
piratas)
if not interactivo or input('ejecutar? (Y/n)').strip() in ('y', ''):
try:
inicio = datetime.now()
resultado = resolver(metodo_busqueda=metodo_busqueda,
franceses=franceses,
piratas=piratas)
fin = datetime.now()
if isinstance(resultado, SearchNode):
print(' largo camino:', len(resultado.path()))
print(' estado:', resultado.state)
print(' acciones:', [accion for accion, estado in resultado.path()])
if resultado_verboso:
print(' meta:', repr(resultado.state))
print(' camino:', repr(resultado.path()))
else:
print(' resultado:', str(resultado))
print(' duración:', (fin - inicio).total_seconds())
if resultado is None:
problema('El resultado devuelto por la función resolver en la prueba {} fue '
'None, cuando el problema tiene que encontrar solución y se espera '
'que retorne el nodo resultante. Puede que la función resolver no '
'esté devolviendo el nodo resultante, o que el problema no esté '
'encontrando solución como debería.',
numero_prueba)
elif isinstance(resultado, SearchNode):
if limite_largo_camino and len(resultado.path()) > limite_largo_camino:
problema('El resultado devuelto en la prueba {} excede el largo de camino'
'esperable ({}) para ese problema y método de búsqueda. Es '
'posible que algo no esté bien.',
numero_prueba, limite_largo_camino)
else:
problema('El resultado devuelto por la función resolver en la prueba {} no es '
'un nodo de búsqueda.',
numero_prueba)
if limite_tiempo is not None:
validar_tiempo(inicio, fin, limite_tiempo,
'La prueba {} demoró demasiado tiempo (más de {} segundos), '
'probablemente algo no está demasiado '
'bien.'.format(numero_prueba, limite_tiempo))
except Exception as err:
if saltear_errores:
problema('Error al ejecutar prueba {} ({})', numero_prueba, str(err))
else:
raise
def probar_estadisticas(grupo=None):
# abrir el archivo de estadisticas
print('Abriendo estadísticas...')
nombre_archivo = 'entrega1.txt'
if grupo:
nombre_archivo = os.path.join(grupo,nombre_archivo)
if not os.path.exists(nombre_archivo):
problema('No se pudo encontrar el archivo de estadísticas. Probablemente el nombre del '
'archivo no es correcto, o no está en la raiz del repositorio.')
return
with open(nombre_archivo) as archivo_stats:
lineas_stats = archivo_stats.readlines()
# validar contenidos
casos = list(range(1, 5))
casos_pendientes = casos[:]
for linea in lineas_stats:
linea = linea.strip()
if linea:
try:
caso, valores = linea.split(':')
caso = int(caso)
valores = list(map(int, valores.split(',')))
if len(valores) != 4:
raise ValueError()
if caso not in casos:
problema('Caso desconocido en archivo de estadísticas: {}', caso)
elif caso not in casos_pendientes:
problema('Caso repetido en archivo de estadísticas: {}', caso)
else:
print(' Encontrado caso', caso)
print(' Valores:', valores)
casos_pendientes.remove(caso)
except:
problema('La siguiente linea de estadísticas no respeta el formato definido: {}',
linea)
if casos_pendientes:
problema('No se incluyeron las estadísticas de los siguientes casos: {}',
repr(casos_pendientes))
def imprimir_resultados():
def listar_cosas(titulo, cosas):
if cosas:
print(titulo + ':')
for cosa in cosas:
print('*', cosa)
listar_cosas('Problemas que es necesario corregir', problemas)
def probar(interactivo=False, saltear_errores=False, resultado_verboso=False, grupo=None):
print('#' * 80)
if grupo:
print("Probando grupo", grupo)
probar_codigo(interactivo, saltear_errores, resultado_verboso, grupo)
print()
probar_estadisticas(grupo)
print()
print('Pruebas automáticas terminadas!')
print()
imprimir_resultados()
print('#' * 80)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store_true', help='Interactivo')
parser.add_argument('-s', action='store_true', help='Saltear errores')
parser.add_argument('-v', action='store_true', help='Resultado verboso')
parser.add_argument('--path', help='Path a la entrega')
args = parser.parse_args()
probar(args.i, args.s, args.v, args.path)
| ucse-ia/ucse_ia | 2019/probar_entrega1.py | probar_entrega1.py | py | 9,466 | python | es | code | 5 | github-code | 36 |
5838427982 | from .msg_queue import MsgQueue
class MsgQueueMgr(object):
def __init__(self, queue_capacity):
self.__queue_capacity = queue_capacity
self.__dict = dict()
def clear(self):
for queue in self.__dict.values():
queue.clear()
self.__dict.clear()
def get(self, topic):
queue = self.__dict.get(topic)
if not queue:
queue = MsgQueue(self.__queue_capacity)
self.__dict[topic] = queue
return queue
def delete(self, topic):
queue = self.__dict.get(topic)
if not queue:
return
queue.clear()
self.__dict.pop(topic, None)
| maxwell-dev/maxwell-client-python | maxwell/client/msg_queue_mgr.py | msg_queue_mgr.py | py | 665 | python | en | code | 1 | github-code | 36 |
74123444263 | #!/usr/bin/env python
"""Test `crtm_api` module."""
from crtm_poll import crtm_api
from aiohttp import ClientSession
from aioresponses import aioresponses
import os
import pytest
class TestAPI:
def test_can_log_fetch(self, tmpdir):
fetch_path = 'fetch_log'
file = tmpdir.join(fetch_path)
crtm_api.stop_times.fetch_log(file, 'a', 'b', 'c')
assert file.readlines()[1] == 'a,b,c'
def test_can_not_log_fetch(self, tmpdir):
fetch_path = 'fetch_log'
file = tmpdir.join(fetch_path)
crtm_api.stop_times.fetch_log(None, 'a', 'b', 'c')
assert not os.path.isfile(file)
@pytest.mark.asyncio
async def test_can_fetch_ok_stop(self):
with aioresponses() as m:
m.get('https://www.crtm.es/widgets/api/GetStopsTimes.php?'
'codStop=8_17491&orderBy=2&stopTimesByIti=3&type=1',
status=200, body='test')
session = ClientSession()
fetch_conf = {
'log': None,
'timeout': 10,
'max_connections': 1}
resp_text = await crtm_api.stop_times.fetch('8_17491', session,
fetch_conf)
assert 'test' in resp_text
await session.close()
| cgupm/crtm_poll | tests/test_stop_times.py | test_stop_times.py | py | 1,317 | python | en | code | 0 | github-code | 36 |
11875960821 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 17 13:24:02 2019
@author: stark
"""
import threading
from queue import Queue
from spider import Spider
from domain import *
from utility import *
PROJECT_NAME = 'dragzon'
HOMEPAGE = 'https://dragzon.com'
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 8
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
def crawl():
queued_links = fileToSet(QUEUE_FILE)
if len(queued_links) > 0:
print(str(len(queued_links)) + ' links in the queue')
create_jobs()
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
def work():
while True:
url = queue.get()
Spider.crawlPage(threading.current_thread().name, url)
queue.task_done()
# Each queued link is a new job
def create_jobs():
for link in fileToSet(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
create_workers()
crawl() | pandafy/WebCrawler | main.py | main.py | py | 1,124 | python | en | code | 0 | github-code | 36 |
31462474999 | # Python built-in modules and packages
from dataclasses import dataclass
from typing import Dict, List, Union, Tuple
# --- Useful type hints ---
TableName = str
DatafieldName = str
PacketType = str
PacketData = Union[int, str, float]
Packet = Dict[DatafieldName, PacketData]
Message = Dict[str, List[Packet]]
RowDB = List[Tuple[DatafieldName, PacketData]]
DbFormat = Dict[str, List[DatafieldName]]
# Database table formats
dbFormats: DbFormat = {
"gps": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"SLIM_status",
"longitude",
"latitude",
"pdop",
"FIX",
"num_sat_tracked",
"comment",
],
"tbr": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"temperature",
"temperature_data_raw",
"noise_avg",
"noise_peak",
"frequency",
"comment",
],
"tag": [
"timestamp",
"date",
"hour",
"tbr_serial_id",
"comm_protocol",
"frequency",
"tag_id",
"tag_data",
"tag_data_raw",
"tag_data_2", # DS256
"tag_data_raw_2", # DS256
"snr",
"millisecond",
"comment",
],
}
@dataclass
class DatabasePacket:
table: TableName
columns: Tuple[str]
values: Tuple[PacketData]
numOfValues: int = 0
sql_columns: str = ""
sql_values: str = ""
def __post_init__(self):
self.numOfValues = len(self.values)
self._handle_sql_columns()
self._handle_sql_values()
def _handle_sql_columns(self):
"""
Uses columns=('col_name_1', 'col_name_2', ..., 'col_name_n')
to set self.sql_columns='(col_name_1, col_name_2, ..., col_name_3)'
"""
iterable = iter(self.columns)
column_names = f"({next(iterable)}"
while True:
try:
column_names += f", {next(iterable)}"
except StopIteration:
column_names += ")"
break
self.sql_columns = column_names
def _handle_sql_values(self):
"""
Uses numOfValues to set self.sql_values='(?, ?, ..., ?)' for safer sql insertion
"""
self.sql_values = f"({'?, '*(self.numOfValues - 1)}?)"
def convert_msg_to_database_format(msg: Message, msgID: int) -> List[DatabasePacket]:
dbmsg: List[DatabasePacket] = []
for i, packet in enumerate(msg.payload):
type: TableName = packet["packetType"]
dbformat: List[DatafieldName] = dbFormats[type]
columns, values = [], [] # type: List[str], List[PacketData]
for datafield in dbformat:
if datafield in packet:
columns.append(datafield)
values.append(packet[datafield])
# add message_id to packet
columns.insert(0, "message_id")
values.insert(0, msgID)
dbPacket = DatabasePacket(type, tuple(columns), tuple(values))
dbmsg.append(dbPacket)
return dbmsg
| PerKjelsvik/iof | src/backend/dbmanager/msgconversion.py | msgconversion.py | py | 3,037 | python | en | code | 1 | github-code | 36 |
21186201558 | from datetime import datetime
import pytest
from model_bakery import baker
from mytodo.models import Tarefa
from mytodo.services import tarefa_service
def test_should_get_tarefa_as_pending(db):
my_tarefa = baker.make(Tarefa, description='Create an ansible deploy script', due_to=datetime.now())
assert my_tarefa.status == 'pending'
def test_should_get_tarefa_as_done(db):
my_tarefa = baker.make(Tarefa, description='Create an ansible deploy script', due_to=datetime.now())
tarefa_updated = tarefa_service.mark_as_done(my_tarefa.id)
assert tarefa_updated.status == 'done'
def test_should_raise_an_erro_for_invalid_tarefa_id(db):
invalid_tarefa = 0
with pytest.raises(RuntimeError) as error:
tarefa = tarefa_service.mark_as_done(invalid_tarefa)
assert str(error.value) == f"Tarefa ID: {invalid_tarefa} invalida"
def test_should_mark_as_undone(db):
my_tarefa = baker.make(
Tarefa,
description='Create an ansible deploy script',
due_to=datetime.now(),
done=True)
tarefa_updated = tarefa_service.mark_as_done(my_tarefa.id)
assert tarefa_updated.status == 'pending'
| JonathansManoel/todolist | mytodo/tests/test_tarefa_status.py | test_tarefa_status.py | py | 1,160 | python | en | code | 0 | github-code | 36 |
12027951137 | from solid import (
part,
sphere,
cube,
translate,
hull,
)
from solid.utils import right
from utils import render_to_openscad
def main():
# -> Example 1
ex1 = part()
ex1.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex1.add(translate((0, 10, 0))(
cube((5, 5, 5), center=False)
))
combo1 = hull()(ex1)
# -> Example 2
ex2 = part()
ex2.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex2.add(translate((10, 10, 0))(
cube((5, 5, 5), center=False)
))
combo2 = right(10)(
hull()(ex2)
)
# -> Example 3 (Crytal-like)
ex3 = part()
ex3.add(translate((0, 0, 0))(
cube((5, 5, 5), center=False)
))
ex3.add(translate((10, 10, 10))(
cube((5, 5, 5), center=False)
))
combo3 = right(25)(
hull()(ex3)
)
# -> Example 4 (Hot-air balloon)
ex4 = part()
ex4.add(sphere(d=20))
ex4.add(translate((0, 0, -20))(
sphere(d=3)
))
combo4 = right(50)(
hull()(ex4)
)
# -> Example 5 (Box with rounded corner)
# Create 8 corner spheres
ex5 = part()
for x in range(-10, 20, 20):
for y in range(-10, 20, 20):
for z in range(-10, 20, 20):
ex5.add(translate((x, y, z))(
sphere(d=10)
))
combo5 = right(80)(
hull()(ex5)
)
return combo1 + combo2 + combo3 + combo4 + combo5
if __name__ == '__main__':
render_to_openscad(main(), no_run=True)
| cr8ivecodesmith/py3dp_book | solid_/hull_.py | hull_.py | py | 1,566 | python | en | code | 4 | github-code | 36 |
42334454225 | #!/usr/bin/python2
import socket
import sys
rec_ip="127.0.0.1"
rec_port=4444
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((rec_ip,rec_port))
while 4 > 2:
data=s.recvfrom(150)
if data[0]=="exit":
sys.exit(0)
else:
print("Message from sender:",data[0])
print("IP and Port Number:",data[1])
reply=raw_input("Reply:")
if len(reply) > 15:
print("Error:Length recieved of data must be smaller then 150")
reply=raw_input("Reply:")
elif reply=="exit":
sys.exit(0)
else:
s.sendto(reply,data[1])
s.close()
| ananyabisht07/Python | rec1.py | rec1.py | py | 533 | python | en | code | 0 | github-code | 36 |
70614836904 | a=input()
b=list(a.split())
c=[]
d=[]
for i in range(len(b)):
c.append(max(b[i]))
d.append(min(b[i]))
m=list(map(ord,c))
n=list(map(ord,d))
for i in range(len(m)):
print(abs(m[i]-n[i]),end=" ") | divyasrisaipravallika/codemind-python | absolute_difference_of_small_and_large.py | absolute_difference_of_small_and_large.py | py | 205 | python | en | code | 0 | github-code | 36 |
37826834676 | from Tree_zzh import tree
import queue
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.res = 0
self.dfs(root, 0)
return self.res
def dfs(self, root, val):
if root:
self.dfs(root.left, val * 10 + root.val)
self.dfs(root.right, val * 10 + root.val)
if root.left is None and root.right is None:
self.res += val * 10 + root.val
# dfs + stack
def sumNumbers2(self, root):
if not root:
return 0
stack, res = [(root, root.val)], 0
while stack:
node, value = stack.pop()
if node:
if not node.left and not node.right:
res += value
if node.right:
stack.append((node.right, value * 10 + node.right.val))
if node.left:
stack.append((node.left, value * 10 + node.left.val))
return res
# bfs + queue
def sumNumbers3(self, root):
if not root:
return 0
q, res = queue.Queue(), 0
q.put((root, root.val))
while not q.empty():
node, value = q.get()
if node:
if not node.left and not node.right:
res += value
if node.left:
q.put((node.left, value * 10 + node.left.val))
if node.right:
q.put((node.right, value * 10 + node.right.val))
return res
root = tree.get_one()
solution = Solution()
print(solution.sumNumbers(root))
print(solution.sumNumbers2(root))
print(solution.sumNumbers3(root)) | zzhznx/LeetCode-Python | 129-Sum Root to Leaf Numbers.py | 129-Sum Root to Leaf Numbers.py | py | 1,724 | python | en | code | 0 | github-code | 36 |
74261408425 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import time
import random
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from demo.main import reactor
from demo.proto import ProtocolParser
tasks = {}
finished_tasks = {}
class ClientProtocol(DatagramProtocol):
def __init__(self, config):
self.config = config
def callback(self):
task_id = '%s_%i_%i' % (self.config['name'], int(time.time()), random.randint(10, 1000))
proto = ProtocolParser(self.config['name'], {'task_client_request': True}, task_id)
tasks[proto.task_id] = time.time()
print('Send client request %s' % proto.task_id)
self.transport.write(proto.serialize())
def startProtocol(self):
self.transport.connect(self.config['dispatcher_address'], self.config['dispatcher_port'])
def stopProtocol(self):
reactor.listenUDP(0, self)
def datagramReceived(self, datagram, addr):
proto = ProtocolParser.deserialize(datagram)
print('Incoming request from %s %s' % addr)
if proto.task_id is None or proto.task_client_response is False:
print('Wrong packet')
return None
if tasks.get(proto.task_id):
finished_tasks[proto.task_id] = time.time() - tasks[proto.task_id]
tasks[proto.task_id] = True
print('Incoming response %s. Work time is %.2f sec' % (proto.task_id, finished_tasks[proto.task_id]))
else:
print('Wrong task %s' % proto.task_id)
def initial(conf):
# Prepare config
config = {
'name': 'client_%i_%i' % (int(time.time()), random.randint(10, 1000)),
'dispatcher_address': '127.0.0.1',
'dispatcher_port': 8000,
'task_send_timeout': 60
}
config.update(conf)
# Watchdog beat
watchdog = ClientProtocol(config)
reactor.listenUDP(0, watchdog)
loop = task.LoopingCall(watchdog.callback)
loop.start(config['task_send_timeout'])
def report():
times = finished_tasks.values()
average = sum(times) / len(times) if times else 0
not_answered_tasks = len([i for i in tasks.values() if i is not True])
print('Sent tasks: %d' % len(tasks))
print('Finished tasks: %d' % len(finished_tasks))
print('Not answered tasks: %d' % not_answered_tasks)
print('Minimal task time: %.2f' % (min(times) if times else 0))
print('Average task time: %.2f' % average)
print('Maximal task time: %.2f' % (max(times) if times else 0))
| frad00r4/demo_project | demo/modules/client.py | client.py | py | 2,553 | python | en | code | 0 | github-code | 36 |
11336978974 | import ujson
import time
from stepist.flow.libs.simple_queue import SimpleQueue
from stepist.flow.workers.worker_engine import BaseWorkerEngine
from stepist.flow.workers.adapters import utils
class SimpleQueueAdapter(BaseWorkerEngine):
def __init__(self, redis_connection, data_pickler=ujson, verbose=True,
jobs_limit=None, jobs_limit_wait_timeout=10):
self.redis_connection = redis_connection
self.jobs_limit = jobs_limit
self.jobs_limit_wait_timeout = jobs_limit_wait_timeout
self.verbose = verbose
self.queue = SimpleQueue(data_pickler,
self.redis_connection)
def add_job(self, step, data, **kwargs):
q_name = self.get_queue_name(step)
if self.jobs_limit:
while self.jobs_count(step) >= self.jobs_limit:
print("Jobs limit exceeded, waiting %s seconds"
% self.jobs_limit_wait_timeout)
time.sleep(self.jobs_limit_wait_timeout)
self.queue.add_job(q_name, data.get_dict())
def add_jobs(self, step, jobs_data, **kwargs):
if self.jobs_limit:
while self.jobs_count(step) >= self.jobs_limit:
print("Jobs limit exceeded, waiting %s seconds"
% self.jobs_limit_wait_timeout)
time.sleep(self.jobs_limit_wait_timeout)
jobs_data_dict = [data.get_dict() for data in jobs_data]
self.queue.add_jobs(self.get_queue_name(step), jobs_data_dict)
def receive_job(self, step, wait_timeout=3):
key, data = self.queue.reserve_jobs([self.get_queue_name(step)],
wait_timeout=wait_timeout)
return data
def process(self, *steps, die_when_empty=False, die_on_error=True):
self.queue.process({self.get_queue_name(step): step for step in steps},
die_when_empty=die_when_empty,
die_on_error=die_on_error,
verbose=self.verbose)
def flush_queue(self, step):
queue_name = self.get_queue_name(step)
self.queue.flush_jobs(queue_name)
def jobs_count(self, *steps):
sum_by_steps = 0
for step in steps:
q_key = step.get_queue_name()
sum_by_steps += self.queue.redis_db.llen(q_key)
return sum_by_steps
def register_worker(self, handler):
pass
def monitor_steps(self, steps, monitoring_for_sec):
push = dict()
pop = dict()
pool = self.redis_connection.connection_pool
monitor = utils.RedisMonitor(pool)
commands = monitor.monitor(monitoring_for_sec)
for command in commands:
command = command.lower()
for step in steps:
key = step.get_queue_name()
step_key = step.step_key()
if key in command and 'lpush' in command:
push[step_key] = push.get(step_key, 0) + 1
if key in command and 'lpop' in command:
pop[step_key] = pop.get(step_key, 0) + 1
return push, pop
@staticmethod
def get_queue_name(step):
return "stepist::%s" % step.step_key()
| electronick1/stepist | stepist/flow/workers/adapters/simple_queue.py | simple_queue.py | py | 3,251 | python | en | code | 27 | github-code | 36 |
18974168832 | from __future__ import print_function
import os
import sys
import pandas as pd
FORMATS = {
'mixcr': ('cloneCount', 'aaSeqCDR3', None),
'changeo_with_sample': ('DUPCOUNT', 'CLONE_CDR3_AA', 'SAMPLE'),
'changeo': ('DUPCOUNT', 'CLONE_CDR3_AA', None),
'vdjtools': ('count', 'cdr3aa', None),
'mitcr': ('Read_count', 'CDR3_amino_acid_sequence', None),
'immunoseq': ('count (templates/reads)', 'aminoAcid', None),
'immuneDB_cloneoverlap': ('copies','clone_id','sample')
}
FMT_COLS = (
("mixcr", ["clonalSequenceQuality", "minQualFR1", "allDAlignments"]),
# "changeo_with_sample" needs to be checked before "changeo"
("changeo_with_sample", ["SEQUENCE_ID", "JUNCTION_LENGTH", "CLONE_CDR3_AA", "SAMPLE"]),
("changeo", ["SEQUENCE_ID", "JUNCTION_LENGTH", "CLONE_CDR3_AA"]),
("vdjtools", ["freq", "cdr3nt", "cdr3aa"]),
("immunoseq", ["aminoAcid", "frequencyCount", "cdr3Length"]),
("mitcr", ["Read count", "CDR3 amino acid sequence", "V segments"]),
("immuneDB_cloneoverlap", ["copies","clone_id","sample"])
)
class BaseParser:
def __init__(self, filename, fmt=None, fmt_cols=None):
'''
Sample format parser.
Users can provide their custom formats.
'''
self.FORMATS = FORMATS
if fmt is not None:
self.FORMATS.update(fmt)
self.FMT_COLS = FMT_COLS
if fmt_cols is not None:
self.FMT_COLS.update(fmt_cols)
fmt = self._detect_format(filename)
self._filename = filename
self.cnt_field, self.cdr3_field, self.sample = self._get_field_names(fmt)
def _detect_format(self, filename):
"""
Does a simple auto-detection of file format based on column names.
"""
with open(filename, 'rt') as fh:
first_line = next(fh)
for fmt, column_names in self.FMT_COLS:
if all([column_name in first_line for column_name in column_names]):
print("%s looks like a %s file" % (filename, fmt))
return fmt
raise Exception("Unable to detect format of %s" % filename)
def _get_field_names(self, fmt):
return self.FORMATS[fmt]
def getSample(self, row):
if self.sample is None:
return os.path.splitext(os.path.basename(self._filename))[0]
return getattr(row, self.sample)
def getSequence(self, row):
return getattr(row, self.cdr3_field)
def getCount(self, row):
return getattr(row, self.cnt_field)
| NCBI-Hackathons/PyClonal | pyclonal/parser.py | parser.py | py | 2,592 | python | en | code | 5 | github-code | 36 |
1312451969 | """changing viewer layer names
Revision ID: 99ebe4492cee
Revises: b0b51fd07bfa
Create Date: 2023-06-12 15:34:29.609937
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99ebe4492cee'
down_revision = 'b0b51fd07bfa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('datastack', sa.Column('viewer_layer_name', sa.String(length=100), nullable=True))
op.add_column('image_source', sa.Column('viewer_layer_name', sa.String(length=100), nullable=True))
op.drop_column('image_source', 'ngl_layer_name')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('image_source', sa.Column('ngl_layer_name', sa.VARCHAR(length=100), autoincrement=False, nullable=True))
op.drop_column('image_source', 'viewer_layer_name')
op.drop_column('datastack', 'viewer_layer_name')
# ### end Alembic commands ###
| seung-lab/AnnotationFrameworkInfoService | migrations/versions/99ebe4492cee_changing_viewer_layer_names.py | 99ebe4492cee_changing_viewer_layer_names.py | py | 1,039 | python | en | code | 1 | github-code | 36 |
73444055143 | # Test der while-Schleife
"""
n = 0
while (n < 5):
print(n)
n = n + 1
"""
"""
Ausgabe:
0
1
2
3
4
"""
# Beliebige Zahlenreihenfolge wird aufsteigend ausgegeben
n = int(input("Bitte geben Sie eine Zahl ein: "))
i = 0
"""
while (i <= n):
print(i)
i = i +
"""
# Geben Sie alle ungeraden Zahlen kleiner als n aus
while (i < n):
if (i % 2 != 0):
print(i)
i = i + 1 # i + 2, verändern der Schrittgröße kann uns das if-Statement sparen | feldoe/university | scripts/2019-11-26/while.py | while.py | py | 474 | python | de | code | 0 | github-code | 36 |
28247026448 | import datetime
import time
from playsound import playsound
# using deffunction set the alarm-time
def set_alarm(alarm_time):
while True:
current_time = datetime.datetime.now().strftime("%H:%M:%S")
if current_time == alarm_time:
print("It's an alarm time")
print("Play the alarm")
playsound(r"C:\Users\madhu\Downloads\Alarm.mp3.mp3")
break
time.sleep(1)
time_formats = ["%H:%M:%S", "%H:%M"]
print("Assigned time formats:")
for i, time_format in enumerate(time_formats, start=1):
print(str(i) + " " + time_format)
choice = int(input("Enter your choice:"))
selected_format = time_formats[choice - 1]
alarm_time = input("Enter the alarm time(" + selected_format + "):")
if selected_format == "%H:%M:%S":
alarm_time = datetime.datetime.strptime(alarm_time, "%H:%M:%S").strftime("%H:%M:%S")
if selected_format == "%H:%M":
alarm_time = datetime.datetime.strptime(alarm_time, "%H:%M").strftime("%H:%M:%S")
# function call
set_alarm(alarm_time)
| Madhusudhan178/Alarm_clock-py | alarm_clock.py | alarm_clock.py | py | 1,033 | python | en | code | 0 | github-code | 36 |
24975211817 | import os
from setuptools import setup, find_packages
from version import get_git_version
def files(folder, exclude=[]):
found_files = []
for root, dirs, files in os.walk(folder):
for f in files:
if not any(("%s/%s" % (root, f)).startswith(e) for e in exclude):
found_files.append((root, f))
return found_files
def flatten_all_files(*dirs, **kwargs):
exclude = kwargs.get('exclude', [])
root = kwargs.get('root', '')
all_files = []
for d in dirs:
for f in files(d, exclude):
froot, fnm = f
prefix_start = froot.find(root)
assert prefix_start > -1, 'Impossible base root provided. Found files do not match: %s' % root
all_files.append(("%s" % (froot[prefix_start + len(root):]), ["%s/%s" % (froot, fnm)]))
return all_files
#
data_files = flatten_all_files('build/Onc/production/',
exclude=('build/Onc/production/lib/ext-4.2', 'build/Onc/production/.sass-cache'),
root='build/Onc/production/'
)
data_files.append(('', ['favicon.png', 'beep.wav', 'portal.html']))
setup(
name="opennode.oms.onc",
version=get_git_version(),
description="""OpenNode Console application""",
author="OpenNode Developers",
author_email="info@opennodecloud.com",
packages=find_packages(),
data_files=data_files,
namespace_packages=['opennode'],
zip_safe=False, # we need to serve real files
entry_points={'oms.plugins': ['onc = opennode.onc.main:OncPlugin']},
install_requires=[
"setuptools", # Redundant but removes a warning
"opennode.oms.core",
"opennode.oms.knot",
],
license='GPLv2',
)
| opennode/opennode-console | setup.py | setup.py | py | 1,732 | python | en | code | 13 | github-code | 36 |
17233286907 | import argparse
import torch
import torch.nn as nn
import numpy as np
import os
import pickle
from data_loader import get_loader
from build_vocab import Vocabulary
from model import EncoderCNN, DecoderRNN
from torch.nn.utils.rnn import pack_padded_sequence
from torchvision import transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main():
# Create model directory
if not os.path.exists('models/'):
os.makedirs('models/')
# Image preprocessing, normalization for the pretrained resnet
transform = transforms.Compose([
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open('data/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
# Build data loader
data_loader = get_loader('data/resized2014', 'data/annotations/captions_train2014.json', vocab,
transform, 128,
shuffle=True, num_workers=2)
# Build the models
encoder = EncoderCNN(256).to(device)
decoder = DecoderRNN(256, 512, len(vocab), 1).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.bn.parameters())
optimizer = torch.optim.Adam(params, lr=0.001)
# Train the models
total_step = len(data_loader)
for epoch in range(5):
for i, (images, captions, lengths) in enumerate(data_loader):
# Set mini-batch dataset
images = images.to(device)
captions = captions.to(device)
targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]
# Forward, backward and optimize
features = encoder(images)
outputs = decoder(features, captions, lengths)
loss = criterion(outputs, targets)
decoder.zero_grad()
encoder.zero_grad()
loss.backward()
optimizer.step()
# Print log info
if i % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Perplexity: {:5.4f}'
.format(epoch, 5, i, total_step, loss.item(), np.exp(loss.item())))
# Save the model checkpoints
if (i+1) % 1000 == 0:
torch.save(decoder, os.path.join(
'models/', 'decoder-{}-{}.pkl'.format(epoch+1, i+1)))
torch.save(encoder, os.path.join(
'models/', 'encoder-{}-{}.pkl'.format(epoch+1, i+1)))
if __name__ == '__main__':
main()
| vshantam/ImageCaptioning | train.py | train.py | py | 2,842 | python | en | code | 4 | github-code | 36 |
28856814409 | from __future__ import print_function, absolute_import
import sys
import PyQt4.QtGui as QtGui
import PyQt4.QtScript as QtScript
from PyQt4.QtCore import SIGNAL
app = QtGui.QApplication(sys.argv)
from qtreactor import pyqt4reactor
pyqt4reactor.install()
class DoNothing(object):
def __init__(self):
self.count = 0
self.running = False
def button_click(self):
if not self.running:
from twisted.scripts import trial
trial.run()
def run():
t = DoNothing()
engine = QtScript.QScriptEngine()
button = QtGui.QPushButton()
button = engine.newQObject(button)
engine.globalObject().setProperty("button", button)
app.connect(button, SIGNAL("clicked()"), t.button_click)
engine.evaluate("button.text = 'Do Twisted Gui Trial'")
engine.evaluate("button.styleSheet = 'font-style: italic'")
engine.evaluate("button.show()")
app.exec_()
print('fell off the bottom?...')
| ghtdak/qtreactor | qtreactor/gtrial.py | gtrial.py | py | 969 | python | en | code | 50 | github-code | 36 |
20163912786 | from django.db import models
from django.contrib.auth.models import User
# Crear Modelos
class Mueble(models.Model):
nombre=models.CharField(max_length=40)
modelo=models.CharField(max_length=150)
descripcion=models.CharField(max_length=250)
precio=models.FloatField(default='')
imagen=models.ImageField(null=True, blank=True, upload_to="static\core\images\\media")
oferta=models.BooleanField(default='False')
def __str__(self):
return f"{self.id} - {self.nombre}"
class Avatar(models.Model):
#vinculo con usuario
user=models.ForeignKey(User, on_delete=models.CASCADE)
#subcarpeta avatares de media
imagen=models.ImageField(upload_to='avatares',null=True, blank=True)
def __str__(self):
return f"{self.user} - {self.imagen}"
class Comentario(models.Model):
comentario = models.ForeignKey(Mueble, related_name='comentarios', on_delete=models.CASCADE, null=True)
nombre = models.CharField(max_length=40)
mensaje = models.TextField(null=True, blank=True)
fechaComentario = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-fechaComentario']
def __str__(self):
return '%s - %s' % (self.nombre, self.comentario)
class Carrito(models.Model):
def __init__(self, request):
self.request = request
self.session = request.session
carrito = self.session.get("carrito")
if not carrito:
self.session["carrito"] = {}
self.carrito = self.session["carrito"]
else:
self.carrito = carrito
def agregar(self, producto):
id = str(producto.id)
if id not in self.carrito.keys():
self.carrito[id]={
"producto_id": producto.id,
"nombre": producto.nombre,
"precio": float(producto.precio),
"acumulado": float(producto.precio),
"imagen":producto.imagen.url,
"cantidad": 1,
}
else:
self.carrito[id]["cantidad"] += 1
self.carrito[id]["precio"] = producto.precio
self.carrito[id]["acumulado"] += producto.precio
self.guardar_carrito()
def guardar_carrito(self):
self.session["carrito"] = self.carrito
self.session.modified = True
def eliminar(self, producto):
id = str(producto.id)
if id in self.carrito:
del self.carrito[id]
self.guardar_carrito()
def restar(self, producto):
id = str(producto.id)
if id in self.carrito.keys():
self.carrito[id]["cantidad"] -= 1
self.carrito[id]["precio"] = producto.precio
self.carrito[id]["acumulado"] -= producto.precio
if self.carrito[id]["cantidad"] <= 0: self.eliminar(producto)
self.guardar_carrito()
def limpiar(self):
self.session["carrito"] = {}
self.session.modified = True
| gonzalezmirko/Proyecto-Final-Coder | Proyecto/core/models.py | models.py | py | 2,980 | python | es | code | 0 | github-code | 36 |
28092663576 | from statistics import mean
n, x = (map(int, input().split()))
ar = []
for i in range(x):
ar.append(list(map(float, input().split())))
s = zip(*ar)
for i in s:
print (mean(i))
| Avani18/Hackerrank-Python | 11. Built-Ins/Zipped.py | Zipped.py | py | 188 | python | en | code | 0 | github-code | 36 |
19368162206 | #! /usr/bin/python2.7
import rospy
import datetime
import geometry_msgs.msg
CMD_VEL_TOPIC = "/cmd_vel"
def main():
rospy.init_node("drive_forward_node")
drive_speed = rospy.get_param("~drive_speed")
drive_time = rospy.get_param("~drive_time")
twist_publisher = rospy.Publisher(CMD_VEL_TOPIC, geometry_msgs.msg.Twist)
rospy.loginfo(
"Initializing drive forward node with velocity {drive_speed} m/s and drive time {drive_time} seconds".format(
drive_speed=drive_speed,
drive_time=drive_time
)
)
drive_forward_message = geometry_msgs.msg.Twist()
drive_forward_message.linear.x = drive_speed
stop_message = geometry_msgs.msg.Twist()
stop_message.linear.x = 0
drive_start_time = datetime.datetime.now()
publish_rate = rospy.Rate(10)
while not rospy.is_shutdown():
current_drive_time_seconds = (datetime.datetime.now() - drive_start_time).total_seconds()
if current_drive_time_seconds > drive_time:
twist_publisher.publish(stop_message)
rospy.loginfo("Finished driving. Stopping.")
break
twist_publisher.publish(drive_forward_message)
publish_rate.sleep()
if not rospy.is_shutdown():
rospy.loginfo("Terminating drive forward node")
if __name__ == "__main__":
main()
| slensgra/robotic-perception-systems-assignment-1 | src/drive_forward_node.py | drive_forward_node.py | py | 1,349 | python | en | code | 0 | github-code | 36 |
18355189069 | from flask import Flask, render_template, request, redirect, session, flash, url_for
from mysqlconnection import connectToMySQL # import the function that will return an instance of a connection
import re # the regex module
from flask_bcrypt import Bcrypt
import sys;
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
SCHEMA_NAME = "favoritebooks"
app = Flask(__name__)
app.secret_key ='asdfeeffefa' 'keep it secret, keep it safe' # set a secret key for security purposes
bcrypt = Bcrypt(app) # we are creating an object called bcrypt,
# which is made by invoking the function Bcrypt with our app as an argument
#603-
#There routes: / /register /login /books - /addbook /doFavorite/<bookID> /books/<bookID> - /updateDescription<bookID> /unfavorite/<bookID> /logout
# / is the root route and renders the registration/login page
# /register seems to be needed to catch the forms sent from the register side of the reglogPW.html page (typically the index page)
# redirects to /books if success, or back to / if fail
# /login seems to be needed to catch the forms sent from the login side of the reglogPW.html page (typically the index page)
# redirects to /books if success, or back to / if fail
# /books is about rendering the allBooks.html page....the success page
# /addbook is aobut adding a book to books table
# /doFavorite/<bookID> receives form from allBooks.html and oneBook.html, performs a favorting action in favoites table database and redirects to /books
# /books/<bookID> is about rendering the oneBook.html page....
# /updateDescription<bookID>
# /unfavorite>/<bookID> receives form from oneBook.html, performs action in database and redirects to /books
# /logout would do just that: log the user out, and send some place safe and useful...like the root route which then renders the reglogPW.html
#
#there are three html pages that look different only by flash messages and personalization such as using users name and populated data fields
#reglogFav.html or index.html is used for registration and login WH users_new
#allBooks.html or books_index.html is used for displaying All books whether favorited or not This is a success page WH: call it books_index
#oneBook.html or book_show displays single book WH: books_show
@app.route('/')
def index():
pass
# This will render the login and registration page
# return render_template("")
if "form" not in session: #this is about populating and retrieving registration field data. Ex: "fname_submitted" is value in first name field
dataNameEmail= {
"fname_submitted": "",
"lname_submitted": "",
"email_submitted": ""
}
session['form'] = dataNameEmail
print("-"*80)
print(dataNameEmail)
print(session['form'])
return render_template("reglogFav.html") # This is the registration login form
@app.route('/register', methods=['POST']) # this route shows up when the user clicks register and will send user to /wall, or if bad, then redirect to /
def register():
dataNameEmail= {
"fname_submitted": request.form['fname_submitted'],
"lname_submitted": request.form['lname_submitted'],
"email_submitted": request.form["email_submitted"]
}
session['form'] = dataNameEmail
valid=True
print("-"*80)
print(dataNameEmail)
print(session['form'])
print("above should be catalog dateNameEmail followed by result of web form")
if (len(request.form['fname_submitted']) < 2 or not request.form['fname_submitted'].isalpha()):
valid=False
flash("Name must be all alpha and at least 2 characters.", "register")
if (len(request.form['lname_submitted']) < 2 or not request.form['lname_submitted'].isalpha()):
valid=False
flash("Last name must be all alpha and at least 2 characters.", "register")
if not EMAIL_REGEX.match(request.form["email_submitted"]): # test whether a field matches the pattern
flash("Invalid email address!", "register")
print("you should have entered something")
if (len(request.form['pw_submitted']) < 8):
flash("Password must be at least 7 characters.", "register")
if (request.form['pw_submitted'] != request.form['pwconf_submitted']):
flash("Confirmation password did not match.", "register")
retrievedEmail = request.form['email_submitted']
print("retrievedEmail from webform ", retrievedEmail)
mysql = connectToMySQL(SCHEMA_NAME)
query = "select * from users where email=%(em)s;"
print("query is ", query)
data = {
"em": retrievedEmail
}
matching_users = mysql.query_db(query, data)
print("data is: ", data)
print("next line prints matching_users: ")
print(matching_users)
print("next line should give length of matching_user...number of items in list")
print(len(matching_users))
if len(matching_users)>0: #alternate if len(matching_users)>0: the other: if len(matching_users)>0: reason is that Python considers NONE to be false https://docs.python.org/2.4/lib/truth.html
print("email already exists")
print("*"*80)
print("matching ", matching_users[0]['email'])
print("*"*80)
valid=False
flash("Entered email already exists. Mabybe you are already registered. Else use another email address.", "register")
print("valid is")
print(valid)
if(not valid):
print("test")
return redirect ("/")
pw_hash = bcrypt.generate_password_hash(request.form['pw_submitted'])
print("hashed password is: ", pw_hash)
# input is good and we will write to database and show success page--Don't need or have an else because all invalids have already resulted in returns
mysql = connectToMySQL(SCHEMA_NAME)
query = "INSERT INTO users (fname, lname, email, pw_hash, created_at, updated_at) VALUES(%(fname_bydata)s, %(lname_bydata)s, %(email_bydata)s, %(pw_has_bydata)s, NOW(), NOW());"
data = {
"fname_bydata": request.form["fname_submitted"],
"lname_bydata": request.form["lname_submitted"],
"email_bydata": request.form["email_submitted"],
"pw_has_bydata": pw_hash
}
new_user_id = mysql.query_db(query, data)
session['user_id'] = new_user_id
session['session_fname'] = request.form["fname_submitted"]
print ("session stored user id is now: " + str(session['user_id']) + "and name is: " + session['session_fname'])
flash("You have been successfully registered", "success")
#Also should remove session cookie holding name and email from html form
# as in:
session.pop('form') #but it can be done at top of success page
print("here"*80)
return redirect('/books')
# end /register route
@app.route('/login', methods=['POST']) # this route shows up when the user clicks login and will send user to /books, or if bad, then redirect to /
def login():
print(request.form)
#print(request.form[0])
login_email=request.form['log_email_submitted']
login_pw=request.form['log_pw_submitted']
if login_pw=="": # validation prevents blank password, but if it were bcrypt returns an error, so at least for testing time...
login_pw="1" #Just a non-blank character as defensive code
hashed_login_pw=bcrypt.generate_password_hash(login_pw)
print("h"*80)
print("email, pw, hashed pw " + login_email + login_pw, hashed_login_pw)
# Check whether the email provided is associated with a user in the database
# If it is, check whether the password matches what's saved in the database
# input is good and we will write to database and show success page
mysql = connectToMySQL(SCHEMA_NAME)
query = "select email, pw_hash, id, fname from users where email=%(em)s;" #shortened from select * to select email
print("query is ", query)
data = {
"em": login_email
}
result = mysql.query_db(query, data)
print("query returns result of match between email submitted in login and user table: ")
print(result)
if len(result)<1: # result is a list of dictionaries with each dictionary being a record from database. length = 0 means list has no elements, so no match
print("evil hacker with fake email")
flash("You could not be logged in", "login")
return redirect ("/")
print("r"*70)
print(type(result[0]))
matched_hashed_pw=result[0]['pw_hash']
print(type(matched_hashed_pw))
print("matching hashed PW: " + str(result[0]['pw_hash']))
print("variable matched_hashed_pw: " + str(matched_hashed_pw))
if bcrypt.check_password_hash(matched_hashed_pw,login_pw):
print("we got a match")
session['user_id'] = result[0]['id']
print("so cookie id is: " + str(session['user_id']))
flash("You were logged in", "login")
session['session_fname'] = result[0]['fname'] #storing first name of logged in user for handy retrieval from html pages
return redirect('/books')
else:
print("evil hacker")
flash("You could not be logged in", "login")
return redirect ("/")
# End /login route
@app.route('/books')
def books():
#like a success route this route is the result of a redirect...so it renders a template, but also the user can use 5000/books as a bookmark as the entry place which will only work if they are still logged in
# check cookie to see if logged in
# no cookie, then not logged in
if 'user_id' not in session:
print("key 'user_id' does NOT exist")
flash("You must be logged in to enter this website", "login")
return redirect ("/")
else:
print("!"*80)
print('key user_id exists!')
mysql = connectToMySQL(SCHEMA_NAME)
query = """select books.id AS 'books_ID', books.title AS 'books_title', books.added_by_id AS 'book_added_by_id', favorites.fav_by_id AS 'who_faved_book',
users.fname AS 'fname who added', users.lname AS 'lname who added'
from books
join users ON books.added_by_id=users.id
join favorites on favorites.book_id=books.id;"""
print("query is ", query)
allBooks =mysql.query_db(query)
print(allBooks)
print("+"*76)
return render_template("allBooks.html", allBooks=allBooks )
# end /books route
@app.route('/addbook', methods=['POST'])
def addbook():
#adds the book to books table
#submitter is automatically added to favorites table for this book
print("p"*80)
print(" this should be the book title: " + request.form['sentbooktitle'])
print("this should be the book description: " + request.form['sentbookdescription'])
valid=True
if (len(request.form['sentbooktitle']))<1:
valid=False
flash("Title must not be blank", "addfavbook")
if (len(request.form['sentbookdescription']))<5:
valid=False
flash("Please include at least 5 characters in description", "addfavbook")
if(not valid):
print("test")
return redirect('/books')
query = """insert into books (added_by_id, title, description ) VALUES (%(currentloggedidbydata)s, %(booktitlebydata)s, %(bookdescriptionbydata)s);"""
data = {
"currentloggedidbydata": session['user_id'],
"booktitlebydata": request.form['sentbooktitle'],
"bookdescriptionbydata": request.form['sentbookdescription']
}
print("q"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_book_id = mysql.query_db(query, data)
########
query = """insert into favorites (book_id, fav_by_id) VALUES (%(newbookbydata)s, %(favedbyidbydata)s);"""
data = {
"newbookbydata": new_book_id,
"favedbyidbydata": session['user_id']
}
print("9"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_fav_id = mysql.query_db(query, data)
return redirect('/books')
# End /doFavorite route
@app.route('/doFavorite/<bookID>', methods=['GET'])
def doFavorite(bookID):
print("book to be favorited: " , bookID)
print("logged in user: ", session['user_id'])
query = """insert into favorites (book_id, fav_by_id) VALUES (%(newbookbydata)s, %(favedbyidbydata)s);"""
data = {
"newbookbydata": bookID,
"favedbyidbydata": session['user_id']
}
print("9"*80)
print(query)
print(data)
mysql = connectToMySQL(SCHEMA_NAME)
new_fav_id = mysql.query_db(query, data)
return redirect('/books')
# End /doFavorite route
@app.route('/books/<bookID>', methods=['POST'])
def booksOne():
pass
return render_template(oneBook.html)
# End /doFavorite route
#return render_template("oneBook.html", )
# end /books route
@app.route('/updateDescription<bookID>', methods=['POST'])
def updateDesc():
pass
@app.route('/unfavorite/<bookID>', methods=['POST'])
def unFav():
pass
#return redirect ("/books")
# End /doFavorite route
#return redirect ("/books")
# End /doFavorite route
@app.route('/logout', methods=['GET']) # this route is the result of the user clicking anchor tag labeled "logout"
def logout():
#Clear cookies and do flash message displaying on reglogPW.html saying "You have been logged out"
print("are we in logout?")
#Clear cookies and do flash message saying "You have been logged out"
session.pop('user_id')
session.pop('session_fname')
flash("You have been logged out", "logout")
return redirect ("/")
# end logout route
if __name__ == "__main__":
app.run(debug=True) | full-time-april-irvine/kent_hervey | flask/flask_mysql/FavoriteBooksFlask/fav_books.py | fav_books.py | py | 14,005 | python | en | code | 0 | github-code | 36 |
34892117243 | from django.contrib import admin
from .models import Product, Order, OrderProduct
class ProductAdmin(admin.ModelAdmin):
list_display = ('name', 'price', 'quantity', 'product_image')
list_filter = ('price', 'quantity')
search_fields = ('name', 'price')
class OrderAdmin(admin.ModelAdmin):
list_display = ('date_ordered', 'name', 'email', 'phone', 'address')
list_filter = ('date_ordered', 'products', 'name')
search_fields = ('name', 'email', 'phone', 'address', 'products')
class OrderProductAdmin(admin.ModelAdmin):
list_display = ('order', 'product', 'quantity')
list_filter = ('order', 'product')
search_fields = ('order', 'product')
admin.site.register(Product, ProductAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderProduct, OrderProductAdmin)
| rustamovilyos/raw_materials | app/admin.py | admin.py | py | 817 | python | en | code | 1 | github-code | 36 |
13483897568 | import bpy
from bpy import context
import sys
import os
from os.path import exists
from os.path import splitext
fileArgument = sys.argv[-1]
print("\r\n")
print("Looking for FBX file " + fileArgument + " in working directory:")
print(os.getcwd())
filename = splitext(fileArgument)[0]
if exists(filename + ".fbx"):
print("FBX file name " + filename + " was found.\r\n")
else:
sys.exit("FBX file named " + fileArgument + " was not found.\r\n")
try:
os.mkdir(filename)
except OSError as error:
print(error)
bpy.ops.wm.save_as_mainfile(filepath=filename + ".blend")
# Delete all default objects
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
bpy.ops.wm.save_mainfile()
# Import fbx object.
print("Importing FBX object.")
bpy.ops.import_scene.fbx(filepath = str(filename + ".fbx"))
bpy.ops.wm.save_mainfile()
def ApplyMaterial(material_name, obj):
mat = bpy.data.materials.get(material_name)
if mat is not None:
# assign material
if obj.data.materials:
# assign to 1st material slot
obj.data.materials[0] = mat
else:
obj.data.materials.append(mat)
def matcher(x):
return '.' not in x.name
bpy.ops.wm.save_mainfile()
print("Separating unique materials...")
uniqueMaterials = filter(matcher, bpy.data.materials)
for material in uniqueMaterials:
bpy.ops.object.select_all(action='DESELECT')
mat_name = material.name
col = bpy.data.collections.new(mat_name)
bpy.context.scene.collection.children.link(col)
print("Linking " + mat_name + " collection")
for object in bpy.data.objects:
# Select only mesh objects
if object.type == "MESH":
bpy.context.view_layer.objects.active = object
# Gets the first material for that object
objectMaterial = None
if 0 < len(object.data.materials):
objectMaterial = object.data.materials[0]
# if the object's material starts with the name of the current unique material
# apply the unique material to that object.
if objectMaterial is not None and objectMaterial.name.startswith(mat_name):
bpy.context.view_layer.objects.active = object
col.objects.link(object)
ApplyMaterial(mat_name, object)
m_col = bpy.data.collections.get("Collection")
bpy.context.scene.collection.children.unlink(m_col)
print("Unique materials separated.")
# ctx = bpy.context.copy()
# ctx['selected_objects'] = col.objects
# col_filename = filename + "/" + mat_name + ".blend"
# bpy.data.libraries.write(col_filename, set(ctx['selected_objects']), fake_user=True)
# hull_col = bpy.data.collections.new(mat_name + "_hulls")
# bpy.context.scene.collection.children.link(hull_col)
# for object in col.objects:
# if object.type == "MESH":
# print("Creating hull: " + object.name + "_hull")
# hull = convex_hull(object.name + "_hull", object)
# if hull is not None:
# hull_col.objects.link(hull)
#
# print("Completed hull: " + object.name + "_hull")
# bpy.ops.wm.save_mainfile()
bpy.ops.wm.save_mainfile()
| lmsorenson/PyGeometry | create_scene_from_fbx.py | create_scene_from_fbx.py | py | 3,235 | python | en | code | 4 | github-code | 36 |
11892197760 | class Solution:
def majorityElement(self, nums: List[int]) -> int:
# Approach 1 - Hash Map
# if len(nums) == 1:
# return nums[0]
# hash_map={}
# for i in range(len(nums)):
# if nums[i] in hash_map:
# hash_map[nums[i]]+=1
# if hash_map[nums[i]] > len(nums)/2:
# return nums[i]
# else:
# hash_map[nums[i]] = 1
#return max(hash_map, key=hash_map.get)
# Approach 2 - Boyer-Moore Voting Algorithm
majority_element = None
counter = 0
for num in nums:
if counter ==0:
majority_element = num
if majority_element == num:
counter += 1
else:
counter -=1
return majority_element
| bandiatindra/DataStructures-and-Algorithms | Additional Algorithms/LC 169. Majority Element.py | LC 169. Majority Element.py | py | 884 | python | en | code | 3 | github-code | 36 |
41308544644 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GLib, GObject
from random import randint
from threading import Thread
from time import sleep
import i3
def do_nothing():
return
def do_nothing(a, b):
return
# needed so one can jump to a specific window on button click.
# there are probably way better ways to do this...
class WindowButton():
def __init__(self, a_id, a_window):
self.id = a_id
self.button = Gtk.Button()
self.window = a_window
def clicked(self, widget):
i3.focus(id=self.id)
self.window._close_window()
# the class that handles the Window
class mywindow:
def __init__(self):
#needed because the whole window is running in a seperate thread from the loop that reads the fifo
Gdk.threads_init()
GObject.threads_init()
Thread(target=self._init_helper).start()
# the real __init__ that gets started as a new Thread
def _init_helper(self):
self.win = Gtk.Window()
# important for my i3 config. it gets set to fullscreen by that
self.win.set_role("i3-overview")
self.win.connect("delete-event", do_nothing)
self.open = False
#initial setup for the window components
self.populate_window()
Gtk.main()
def populate_window(self):
#top-level boxes stacking horizontally
self.mbox = Gtk.Box(spacing=6, orientation=1)
self.tree_grid = Gtk.Grid()
self.tree_grid.override_background_color(0,Gdk.RGBA(0,0,0,1))
self.mbox.pack_start(self.tree_grid, True, True, 0)
self.win.add(self.mbox)
# this adds a big fat exit button to the bottom of the window
#bbox = Gtk.Box(spacing=6, )
#exit_but = Gtk.Button(label="exit")
#exit_but.connect("clicked", self.exit_button_click)
#bbox.pack_start(exit_but, True, True, 0)
#self.mbox.pack_end(bbox, True, True, 0)
#this creates the tree of labels/buttons
self._create_tree()
def _create_tree(self):
#clean the tree-box from all children
for child in self.tree_grid.get_children():
self.tree_grid.remove(child)
#get the current tree layout
tree = i3.get_tree()
# the top level of the trees are the displays
num_displays = len(tree["nodes"]) - 1 # ignore the __i3 thingy
display_counter = 0
for display in tree["nodes"]:
if "__i3" in display["name"]: # ignores the __i3 thingy. i think it contains the i3bar
continue
# every display gets his own label on the top
disp_label = Gtk.Label(label=display["name"])
disp_label.override_background_color(0, Gdk.RGBA(0.8,0,0,1))
disp_label.override_color(0, Gdk.RGBA(1,1,1,1))
display_grid = Gtk.Grid() #every display gets its own grid, so we can present them tidely
display_grid.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacer = Gtk.Label(label="Hah")
spacer.override_background_color(0, Gdk.RGBA(0,0,0,1)) # needed because grids dont support spacing
spacer.override_color(0, Gdk.RGBA(0,0,0,1))
row = 0
if display_counter > num_displays / 2 - 1:
row = 1
line = display_counter % (num_displays / 2)
self.tree_grid.attach(disp_label, line, row*3, 1 , 1)
self.tree_grid.attach(display_grid, line, row*3+1, 1 , 1)
self.tree_grid.attach(spacer, line, row*3 + 2, 1 , 1)
for cont in display["nodes"]:
if "content" == cont["name"]: #each display has content and top/bottom docker. we only want the content
ws_counter = 0
num_ws = len(cont["nodes"])
for workspace in cont["nodes"]:
if len(workspace["nodes"]) == 0:
continue
# every workspace gets his own label on the top
label = Gtk.Label()
label.set_label(workspace["name"])
label.override_color(0,Gdk.RGBA(1,1,1,1))
label.override_background_color(0,Gdk.RGBA(0,0.1,0.6,0.6))
grid = Gtk.Grid()
next_level_box = Gtk.Box(spacing=0, ) # here is the place where the containers/windows get added
grid.attach(label,0,0,1,1)
grid.attach(next_level_box,0,1,1,1);
spacerh = Gtk.Label(label="Hah") # needed because grids dont support spacing
spacerv = Gtk.Label(label="Hah") # needed because grids dont support spacing
spacerh.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacerv.override_background_color(0, Gdk.RGBA(0,0,0,1))
spacerh.override_color(0, Gdk.RGBA(0,0,0,1))
spacerv.override_color(0, Gdk.RGBA(0,0,0,1))
# partion the workspaces into three rows (and in my case maximum 3 lines)
row = 0
if ws_counter > num_ws / 3 - 1:
row = 1
if ws_counter > (num_ws*2) / 3 - 1:
row = 2
line = ws_counter % (num_ws / 3)
display_grid.attach(grid, line*2, row*2, 1 , 1)
display_grid.attach(spacerh, line*2, row*2 + 1, 1 , 1)
display_grid.attach(spacerv, line*2 + 1, row*2, 1 , 1)
self._rec_tree_func(workspace, next_level_box, 0)
ws_counter += 1
display_counter += 1
def _rec_tree_func(self, root, parent_box, level):
#decide wether the leave is a container or a window
for leave in root["nodes"] :
if len(leave["nodes"]) == 0:
label = str(leave["name"]).split("-")[-1] # only display the text after the last dash. in most cases the programs name
button = WindowButton(leave["window"], self)
button.button.set_label(label)
button.button.connect("clicked", button.clicked) #jumps to the window and closes the overview
parent_box.pack_start(button.button, True, True, 0)
button.button.override_background_color(0,Gdk.RGBA(0,0,0,1))
button.button.override_color(0,Gdk.RGBA(1,1,1,1))
else:
# generating some nice grey tones for the labels for better differentiation
label = Gtk.Label()
label.override_color(0,Gdk.RGBA(1,1,1,1))
r = 0.7 - 0.1*level
label.override_background_color(0,Gdk.RGBA(r,r,r,1))
if leave["name"]: #sometimes the containers do not have names. defaulting to "container"
label.set_label(leave["name"])
else:
label.set_label("container")
grid = Gtk.Grid()
next_level_box = Gtk.Box(spacing=0, ) # here is the place for the next level of recursion
grid.attach(label,0,0,1,1)
grid.attach(next_level_box,0,1,1,1);
parent_box.pack_start(grid, True, True, 0)
self._rec_tree_func(leave, next_level_box, level + 1) # start next level of recursion only if we didnt operate on a window
# wouldnt make much of a difference but ya know
# not needed anymore. leaving it still
def exit_button_click(self, button):
self._close_window()
#open window from within the thread
def _open_window(self):
self._create_tree()
self.win.show_all()
self.open = True
#open window from outside the thread
def open_window(self):
Gdk.threads_enter()
self._open_window()
Gdk.threads_leave()
#closing window from within the thread
def _close_window(self):
self.win.hide()
self.open = False
#closing window from outside the thread
def close_window(self):
Gdk.threads_enter()
self._close_window()
Gdk.threads_leave()
#toggel the window from within the Thread
def _toggle_window(self):
if(self.open):
self._close_window()
else:
self._open_window()
#toggel the window from outside the Thread
def toggle_window(self):
if(self.open):
self.close_window()
else:
self.open_window()
#exit the Gtk loop
def exit(self):
Gtk.main_quit()
| KillingSpark/i3-Overview | mywindow.py | mywindow.py | py | 8,918 | python | en | code | 0 | github-code | 36 |
30553351838 | '''
statistics.minutes delayed.weather: number of minutes delayed (per month) caused by significant meteorological
conditions that, in the judgment of the carrier, delays or prevents the operation of a flight.
'''
'''
In de opdracht willen ze dat we list_of_airports twee keer gaan gebruiken...
Dit kan natuurlijk niet....
Maar de 'tweede' list_of_airports is nu vliegveld_code =)
'''
import json
from pprint import pprint
list_of_airports = []
with open('airports.json', 'r') as infile:
list_of_airports = json.load(infile) #Laadt alle data uit airports.json in list_of_airports
infile.close() #Sluit de file, want wij hebben m niet meer nodig
#pprint(list_of_airports)
vliegveld_code = [] #Nieuwe lijst, omdat de opdracht is slecht gemaakt en anders moeten we list_of_airports overschrijven :3
for vliegveld in list_of_airports:
airport_code = vliegveld['airport']['code'] #JSON.airport.code
min_delayed_weather = vliegveld['statistics']['minutes delayed']['weather'] #JSON.statistics.minutes delayed.weather
vliegveld_code.append((airport_code, min_delayed_weather)) #Voegt code en minuten vertraging als gevolg van Weather toe als TUPLE aan vliegveld
pprint(vliegveld_code)
# ssort airports by min_delayed_weather
#print(sorted(vliegveld_code)) #sorteerd op key, maar moet op value =(
vliegveld_code_sorted = sorted(vliegveld_code, key=lambda vliegveld_code: vliegveld_code[1], reverse=True) #Sorted op 2e plaats in tuple(Weather delay) =) en omgekeerd, want standaard is klein ---> groot :(
pprint(vliegveld_code_sorted) | puppy1004/School-Python | School/Oefentoets2/opgave_2.py | opgave_2.py | py | 1,623 | python | nl | code | 0 | github-code | 36 |
3829993290 | from waflib.Logs import pprint
def options(opt):
opt.load('python')
def configure(conf):
conf.load('python')
if not conf.env.ENABLE_CROSS:
conf.check_python_version((2, 6, 0))
conf.check_python_headers(features='pyext') # Extension-only, no embedded
try:
conf.check_python_module('curses')
conf.env['PYTHON_CURSES'] = True
except conf.errors.ConfigurationError:
pprint("YELLOW", "WARNING: ntpmon will not be built/installed since "
"python curses module was not found")
try:
conf.check_python_module('argparse')
conf.env['PYTHON_ARGPARSE'] = True
except conf.errors.ConfigurationError:
pprint("YELLOW", "WARNING: ntploggps, ntplogtemp, and ntpviz will not "
"be built/installed since python argparse module was not found")
try:
conf.check_python_module('gps', condition="ver >= num(3, 18)")
conf.env['PYTHON_GPS'] = True
except conf.errors.ConfigurationError:
pprint("YELLOW", "WARNING: ntploggps will not be built/installed since "
"python gps module >= 3.18 was not found")
def build(ctx):
srcnode = ctx.srcnode.make_node('pylib')
bldnode = ctx.bldnode.make_node('pylib')
target1 = bldnode.make_node('control.py')
target2 = bldnode.make_node('magic.py')
sources = []
if ctx.env['ntpc'] == 'ext':
sources = srcnode.ant_glob("*.py", excl='ntpc.py')
elif ctx.env['ntpc'] == 'ffi':
sources = srcnode.ant_glob('*.py')
builds = [x.get_bld() for x in sources]
# The rm's here were added to fix a reported (but underdocumented) problem
# where the results of pythonize-header were not updated when needed,
# though there is as yet no explanation for why this had occurred, and the
# alleged failure only occurred when changing the code between 'configure'
# and 'build', which is not a legal action, anyway.
# These rm's were causing unnecessary reruns of pythonize-header,
# including during 'install'. They are now disabled but retained as a
# comment.
## Remove generated files to ensure they are properly updated
#ctx.exec_command("rm -f %s" % target1.abspath())
#ctx.exec_command("rm -f %s" % target2.abspath())
if ctx.env['ntpc'] is None:
return
# Make sure Python sees .py as well as .pyc/.pyo
ctx(
features="subst",
source=sources,
target=builds,
)
ctx(
before=['pyc', 'pyo'],
cwd=srcnode,
rule='${PYTHON} ${SRC} >${TGT}',
source=["../wafhelpers/pythonize-header", "../include/ntp_control.h"],
target=target1,
)
ctx(
before=['pyc', 'pyo'],
cwd=srcnode,
rule='${PYTHON} ${SRC} >${TGT}',
source=["../wafhelpers/pythonize-header", "../include/ntp.h"],
target=target2,
)
# Force early creation of generated files
ctx.add_group()
ctx(
features='py',
source=builds+[target1, target2],
install_from=bldnode,
install_path='${PYTHONARCHDIR}/ntp'
)
# pep241 lay an egg
egg = ['ntp-%s.egg-info' % ctx.env.NTPSEC_VERSION]
ctx(
features="subst",
source=['ntp-in.egg-info'],
target=egg
)
ctx.install_files(ctx.env.PYTHONARCHDIR, egg)
| ntpsec/ntpsec | pylib/wscript | wscript | 3,341 | python | en | code | 225 | github-code | 36 | |
36978432823 | class Node():
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList():
def __init__(self):
self.head = None
# adds new node to the end of list
def add(self, data):
node = Node(data)
if (self.head == None):
# list is empty
self.head = node
else:
current = self.head
while(current.next_node != None):
current = current.next_node
current.next_node = node
# deletes node from end of list
def delete_at_end(self):
# if list empty, return False
if (self.head == None):
return False
current = self.head
previous = None
# loop to get to last node
while(current.next_node != None):
previous = current
current = current.next_node
# set next node ref to None, removes last element
if (previous == None):
# only one list item, delete it by removing reference
self.head = None
else:
# delete last item in the list
previous.next_node = None
return True
# deletes node from start (head) of list
def delete_head(self):
# if list empty, return False
if (self.head == None):
return False
current = self.head
# need to delete the head node
temp_node = current.next_node
current.next_node = None
self.head = temp_node
return True
# deletes specifc element that matches provided data
def delete(self, data):
if (self.head == None):
return False
current = self.head
if(current.data == data):
# need to delete the head node
temp_node = current.next_node
current.next_node = None
self.head = temp_node
else:
# need to keep looping to find the data
while (current.next_node != None):
previous = current
current = current.next_node
if (current.data == data):
temp_node = current.next_node
previous.next_node = temp_node
current.next_node = None
return True
return False
# insert element before index n, with provided data
def insert_before(self, n, data):
node = Node(data)
if(n == 0):
# insert before the head
temp_node = self.head
self.head = node
self.head.next_node = temp_node
else:
current = self.head
previous = None
for i in range(0, n):
previous = current
current = current.next_node
if (current == None):
previous.next_node = node
return True
temp_node = previous.next_node
previous.next_node = node
node.next_node = temp_node
def get(self, n):
if(self.head):
if (n == 0):
return self.head.data
else:
current = self.head.next_node
i = 1
while(current.next_node != None and i < n):
current = current.next_node
i += 1
if (i == n):
return current.data
else:
return None
else:
# empty list
return None
# prints entire list
def print_list(self):
print_arr = []
if(self.head != None):
current = self.head
print_arr.append(current.data)
while(current.next_node != None):
current = current.next_node
print_arr.append(current.data)
print(print_arr)
else:
print("Empty...") | scott-ammon/python-stack-queue-ll | linked_list.py | linked_list.py | py | 3,319 | python | en | code | 0 | github-code | 36 |
73103994024 | import datetime
import jwt
from api import app, db
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy import text as sa_text
from sqlalchemy_utils.types.password import PasswordType
class User(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, server_default=sa_text("uuid_generate_v4()"))
name = db.Column(db.String(120), unique=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(PasswordType(
schemes=[
'pbkdf2_sha512',
'md5_crypt'
],
deprecated=['md5_crypt']
))
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"email": self.email
}
@staticmethod
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=10),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
algorithm='HS256'
)
except Exception as e:
return e
@staticmethod
def decode_auth_token(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
| Basile-Lequeux/PerfectTripApi | api/models/User.py | User.py | py | 1,587 | python | en | code | 0 | github-code | 36 |
17618492062 | import csv
from messages_sender.model.Contact import Contact
def read_contacts(file_path: str):
file = open(file_path)
csvreader = csv.reader(file)
next(csvreader)
contacts = []
for row in csvreader:
contact = Contact(id=int(row[0]), name=row[1], cell_phone=row[2], email=row[3])
contacts.append(contact)
file.close()
return contacts
| andre-luiz-pires-silva/pytest | messages_sender/services/contacts_reader.py | contacts_reader.py | py | 383 | python | en | code | 0 | github-code | 36 |
7994328417 | import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
products_db = myclient["products"]
branches_db = myclient["branches"]
order_management_db = myclient["order_management"]
# branches = {
# 1: {"name":"Katipunan","phonenumber":"09179990000"},
# 2: {"name":"Tomas Morato","phonenumber":"09179990001"},
# 3: {"name":"Eastwood","phonenumber":"09179990002"},
# 4: {"name":"Tiendesitas","phonenumber":"09179990003"},
# 5: {"name":"Arcovia","phonenumber":"09179990004"},
# }
def get_product(code):
products_coll = products_db["products"]
product = products_coll.find_one({"code":code},{"_id":0})
return product
def get_products():
product_list = []
products_coll = products_db["products"]
for p in products_coll.find({},{"_id":0}):
product_list.append(p)
return product_list
def get_branch(code):
branches_coll = branches_db["branches"]
branches = branches_coll.find_one({"code":code})
return branches
def get_branches():
branch_list = []
branch_coll = branches_db["branches"]
for m in branch_coll.find({}):
branch_list.append(m)
return branch_list
def get_user(username):
customers_coll = order_management_db['customers']
user=customers_coll.find_one({"username":username})
return user
def create_order(order):
orders_coll = order_management_db['orders']
orders_coll.insert(order)
| mjimlee/Flask-ecommerce | digitalcafe/database.py | database.py | py | 1,431 | python | en | code | 0 | github-code | 36 |
73451722984 | '''
Lets implement CutAndPaste augmentation
This augmentations can be added as an augmentation in the DataGenerators, but for the sake of keeping this project
simple I am doing this separately and then performing other-augmentations.
This can be considered as the first augmentation of Albumentation augmentations.
ref : https://arxiv.org/pdf/2012.07177.pdf
LETSS DO IT !! CutAndPaste is no longer plagiarizing
'''
import cv2
import argparse
import base64
import json
import numpy as np
import random
import pandas as pd
from tqdm import tqdm
import os
import os.path as osp
from labelme import utils
class CopyAndPaste:
def __init__(self, input_dir, background_dir):
self.input_dir = input_dir
self.json_mask_dir = osp.join(osp.dirname(self.input_dir), 'json_mask')
self.mask_dir = osp.join(osp.dirname(self.input_dir), 'mask')
self.background_dir = background_dir
# default can be changed anytime
self.augmentation_copies = 10
self.w_J_test_size = 3
self.wo_J_test_size = 2
self.img_sz = 256
def augment_images(self):
# creating a random-test set for no leakage from training
test_samples = []
w_Js = [w_J for w_J in os.listdir(self.json_mask_dir) if 'no' not in w_J]
wo_Js = [wo_J for wo_J in os.listdir(self.json_mask_dir) if 'no' in wo_J]
test_samples += list(np.random.choice(w_Js, size=self.w_J_test_size, replace=False))
test_samples += list(np.random.choice(wo_Js, size=self.wo_J_test_size, replace=False))
imgs = []
grps = []
for img_f in tqdm(os.listdir(self.input_dir)):
if 'CAP' not in img_f:
if img_f in os.listdir(self.mask_dir):
imgs.append(img_f.replace('.json', ''))
if img_f not in test_samples:
grps.append('train')
img, mask = self.get_img_n_mask(img_f)
imgs, grps = self.create_augmentations(img, mask, imgs, grps,
img_name=img_f.replace('.png', ''))
else:
grps.append('test')
df = pd.DataFrame()
df['images'] = imgs
df['group'] = grps
df.to_csv(osp.join(osp.dirname(self.input_dir), 'log_meta.csv'), index=False)
def get_img_n_mask(self, img_f):
img_ = cv2.imread(osp.join(self.input_dir, img_f), cv2.COLOR_BGR2RGB)
mask_ = cv2.imread(osp.join(self.mask_dir, img_f), cv2.IMREAD_GRAYSCALE)
return img_, mask_
def create_augmentations(self, img, mask, img_list, group_list, img_name):
# first lets select 10-images at random from the background
background_imgs = list(np.random.choice(os.listdir(self.background_dir),
size=self.augmentation_copies,
replace=False))
for idx, background_img in enumerate(background_imgs):
'''
There are two ways of doing we can add J in the same location as it is in the original image
but that noob-level lets resize the images before pasting them on top of background and then
un-masking the pizels which are not labeled as J.
'''
bg_img = cv2.imread(osp.join(self.background_dir, background_img), cv2.COLOR_BGR2RGB)
bg_img = cv2.resize(bg_img, (self.img_sz, self.img_sz))
if len(bg_img.shape) < 3:
bg_img = np.repeat(bg_img[..., np.newaxis], 3, axis=2)
# lets resize the og-image anywhere in between 180-256 (256 final desired size)
random_sz = np.random.randint(180, 256)
re_img = cv2.resize(img, (random_sz, random_sz))
re_mask = cv2.resize(mask.astype('uint8'), (random_sz, random_sz))[..., np.newaxis]
# now lets find a patch in the background-image
x_init = np.random.randint(0, self.img_sz - random_sz)
y_init = np.random.randint(0, self.img_sz - random_sz)
bg_mask_img = np.zeros((self.img_sz, self.img_sz, 1))
ix_, iy_, _ = np.where(re_mask != 0)
bg_patch = bg_img[x_init:(x_init+random_sz), y_init:(y_init+random_sz), :]
bg_patch[ix_, iy_, :] = re_img[ix_, iy_, :]
bg_img[x_init:(x_init + random_sz), y_init:(y_init + random_sz), :] = bg_patch
if 'no' not in img_name:
bg_mask_img[x_init:(x_init + random_sz), y_init:(y_init + random_sz), :] = re_mask
# saving the mask
cv2.imwrite(osp.join(self.mask_dir, f'CAP_{img_name}_{idx}.png'), bg_mask_img)
# saving the image
cv2.imwrite(osp.join(self.input_dir, f'CAP_{img_name}_{idx}.png'), bg_img)
img_list.append(f'CAP_{img_name}_{idx}')
group_list.append('train')
return img_list, group_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_dir")
parser.add_argument("background_dir")
args = parser.parse_args()
input_dir = args.input_dir
background_dir = args.background_dir
# initialize and create masks for where J exists
augment = CopyAndPaste(input_dir, background_dir)
# Create masks
augment.augment_images() | Anshul22Verma/TP_projects | CopyAndPaste/copy_and_paste_augmentation.py | copy_and_paste_augmentation.py | py | 5,384 | python | en | code | 0 | github-code | 36 |
1985519465 | import kivy
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import NumericProperty, ObjectProperty, StringProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.recycleview import RecycleView
from kivy.uix.screenmanager import Screen, ScreenManager, WipeTransition
import bank
import calc
# Database modules.
import database
import user
from bank import init_bank
from database import main
kivy.require("1.11.1")
from os import listdir
kv_path = "./kv/"
for kv in listdir(kv_path):
Builder.load_file(kv_path + kv)
class LoginScreen(Screen):
"""This' the first screen (1)
This screen will display the login. Both, signup and login options.
Args:
Screen (Screen): A new window.
"""
username = StringProperty()
password = StringProperty()
def login(self):
# If True, go to menu screen, otherwise show a popup.
if database.db.validate_user(
self.username,
self.password,
):
# Create an instance for the current user; with his info start
# running the bank.
user.init_user(self.username, self.password)
init_bank(user.user)
sm.transition.direction = "up"
sm.current = "menu"
else:
popup_msg(
msg="Ups! It seems like you haven't created any "
+ "account yet\nTry to create a new one first!",
status=False,
)
def sign_up(self):
"""Go to sign up screen."""
sm.transition.direction = "right"
sm.current = "sign_up"
class SignupScreen(Screen):
"""Here, the user will be able to create a new account. After that, he'll go
to menu screen immediately.
Args:
Screen (Screen): A different screen for signing up.
"""
username = StringProperty()
password = StringProperty()
def add_new_user(self):
if database.db.create_new_user(
self.username,
self.password,
):
popup_msg(
func=self.go_to_menu, msg="User created successfully!", status=True
)
# After sign up as log in, creates a new user and run the bank.
user.init_user(self.username, self.password)
init_bank(user.user)
else:
popup_msg(
msg="Ups! We've caught a bug!\nPlease send an issue with"
+ " an extend description of this annoying bug!",
status=False,
)
def go_to_menu(self, *args):
sm.transition.direction = "up"
sm.current = "menu"
class MenuScreen(Screen):
"""This' the second screen (2)
# Will display the different available options to the user.
Args:
Screen (Screen): The screen.
"""
pass
class TransactionScreen(Screen):
"""This' the third screen (3)
Args:
Screen (Screen): The screen.
"""
user_id = ObjectProperty(None)
cash = ObjectProperty(None)
object = StringProperty()
def make_transaction(self):
try:
bank.bank.cash_transaction(int(self.user_id.text), float(self.cash.text))
popup_msg(msg="Transaccion completada!", status=True)
except Exception as e:
print(e)
popup_msg(msg=str(e))
class StatusScreen(Screen):
"""Screen for displying the info of the actual user only.
Args:
Screen (Screen): The screen.
"""
deposit_count = ObjectProperty(rebind=True)
loan_count = ObjectProperty(rebind=True)
deposit_total = ObjectProperty(None)
loan_total = ObjectProperty(None)
euros = ObjectProperty(None)
dollars = ObjectProperty(None)
object = ObjectProperty(None)
def show_data(self):
"""Get the data from the bank and then shows it to the current user."""
labels = (
self.deposit_count,
self.loan_count,
self.deposit_total,
self.loan_total,
self.euros,
self.dollars,
self.object,
)
data = bank.bank.load_data_user()
try:
for label, data in zip(labels, data):
label.text = str(data) if not isinstance(data, float) else f"{data:.6}"
except Exception as e:
popup_msg(msg=str(e))
class ConverterScreen(Screen):
input_amount = NumericProperty()
lbl_convert = ObjectProperty(None)
def __init__(self, **kw):
super().__init__(**kw)
self.spinner_value_from = None
self.spinner_value_to = None
def set_spinner_value_from(self, spinner):
self.spinner_value_from = spinner.text
def set_spinner_value_to(self, spinner):
self.spinner_value_to = spinner.text
def get_match_currency(self):
DO: str = "Dominican pesos"
USD: str = "Dollars"
EUR: str = "Euros"
if self.spinner_value_from == USD:
if self.spinner_value_to == EUR:
return calc.dollars_to_euros
elif self.spinner_value_to == DO:
return calc.dollars_to_dop
elif self.spinner_value_from == EUR:
if self.spinner_value_to == USD:
return calc.euros_to_dollars
elif self.spinner_value_to == DO:
return calc.euros_to_dop
elif self.spinner_value_from == DO:
if self.spinner_value_to == USD:
return calc.dop_to_dollars
elif self.spinner_value_to == EUR:
return calc.dop_to_euros
else:
popup_msg()
if self.spinner_value_from == self.spinner_value_to:
self.lbl_convert.text = str(round(self.input_amount, 2))
def do_convertion(self):
conditions = (
self.spinner_value_from is not None,
self.spinner_value_to is not None,
)
if all(conditions):
action = self.get_match_currency()
if action:
value = action(self.input_amount)
self.lbl_convert.text = str(value)
else:
self.lbl_convert.text = "0.0"
print(f"## From: {self.spinner_value_from} To: {self.spinner_value_to}")
class SaveObjectScreen(Screen):
pass
# The screen's manager; to change between different screens
class Manager(ScreenManager):
pass
class RV(RecycleView):
"""For containing the menu's buttons.
Args:
RecycleView (RecycleView): The RecycleView to be used.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = [
{"text": "Realizar un deposito", "on_press": MyLayout.show_deposit},
{"text": "Tomar un prestamo", "on_press": MyLayout.show_loan},
{"text": "Transacciones", "on_press": MyLayout.show_transaction},
{"text": "Consulta de estado", "on_press": MyLayout.show_status},
{"text": "Pago de prestamo", "on_press": MyLayout.show_payment},
{"text": "Cambio de divisas", "on_press": MyLayout.show_converter},
{"text": "Guardar un objeto", "on_press": MyLayout.show_save_object},
]
class MyLayout(BoxLayout):
"""For being used with the popups.
Args:
BoxLayout (BoxLayout): The layout to be used.
"""
message = ObjectProperty(None)
amount = StringProperty()
button = ObjectProperty(None)
def __init__(self, msg: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message.text = msg
@staticmethod
def show_deposit():
layout = MyLayout("Enter the amount to be saved.")
popup_msg(content=layout, title="Make deposit")
layout.button.text = "Save deposit!"
layout.button.bind(on_press=layout.do_deposit)
def do_deposit(self, *args):
try:
bank.bank.make_deposit(float(self.amount))
popup_msg(msg="Deposito realizado con exito!", status=True)
# else:
# popup_msg()
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_loan():
layout = MyLayout("Enter the needed cash.")
popup_msg(content=layout, title="Make a loan")
layout.button.text = "Receive the loan!"
layout.button.bind(on_press=layout.make_loan)
def make_loan(self, *args):
try:
bank.bank.make_loan(float(self.amount))
popup_msg(msg="Prestamo recibido!", status=True)
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_transaction():
sm.current = "transaction"
@staticmethod
def show_status():
sm.get_screen("status").show_data()
sm.current = "status"
@staticmethod
def show_payment():
layout = MyLayout(f"Debes {bank.bank.get_total_loan:.6}")
popup_msg(content=layout, title="Payment")
layout.button.text = "Pay loan!"
layout.button.bind(on_press=layout.make_payment)
def make_payment(self, *args):
try:
bank.bank.pay_loan(float(self.amount))
popup_msg(msg="Payment done!", status=True)
except Exception as e:
popup_msg(msg=str(e))
@staticmethod
def show_converter():
sm.current = "converter"
@staticmethod
def show_save_object():
sm.current = "save_object"
# Create the screen manager.
sm = ScreenManager(transition=WipeTransition())
# Used to run the program. This class must be one method (build) and return it.
class BankManagementApp(App):
def build(self):
# A tuple with the different screens
screens = (
LoginScreen(name="login"),
SignupScreen(name="sign_up"),
MenuScreen(name="menu"),
TransactionScreen(name="transaction"),
StatusScreen(name="status"),
ConverterScreen(name="converter"),
SaveObjectScreen(name="save_object"),
)
for i in screens:
sm.add_widget(i)
return sm
def popup_msg(
func=lambda *args: None,
msg: str = "Ups! A bug caught!",
status: bool = False,
content=None,
title: str = None,
*args,
**kwargs,
):
"""Display a popup depending in the given optional arguments.
Args:
func (def, optional): The function to be bind (on_dismiss). Defaults to None.
msg (str, optional): The menssage to show. Defaults to "Ups! A bug caught!".
status (bool, optional): True for done; False to error. Defaults to True.
content (Layout): The layout to be used by the popup. If no passed a
label will be used.
title (str): For the title of the popup. If no passed a title will be
chose depending the status (Error or Done).
"""
# Set the title.
if title is not None:
popup_title = title
else:
if status:
popup_title = "Done!"
else:
popup_title = "Error!"
# Create the predefined label, to be used if any content didn't be passed.
lbl = Label(
text=msg,
italic=True,
font_size=20,
halign="justify",
)
title_size = 20
title_align = "center"
title_color = 1, 0, 0, 0.8
# Create a new popup.
popup = Popup(
title=popup_title,
content=content if content is not None else lbl,
title_size=title_size,
size_hint=(0.8, 0.65),
title_align=title_align,
title_color=title_color,
on_dismiss=func,
)
popup.open()
# Run the app.
if __name__ == "__main__":
main()
app = BankManagementApp()
app.run()
| djose1164/bank-management-system | src/main.py | main.py | py | 11,784 | python | en | code | 3 | github-code | 36 |
31518478012 | import huobi_eth_client
import poloniex_client
import email_client
class Transfer_ETH():
def transfer_eth_from_huobi_to_poloniex(self, amount=''):
# INVALID: the method of withdraw eth needs trade pw which is not yet supported by the server
print('Start Transferring ETH from Huobi to Poloniex...')
# get deposit address from poloniex and make sure it is the same as the recorded one
print('Getting Poloniex deposit address:')
poloniexDepositAddress = poloniex_client.Poloniex_Client().client.returnDepositAddresses()['ETH']
print('Deposit address returned from Poloniex: %s' % poloniexDepositAddress)
print('Deposit address in local record: %s' % POLONIEX_ETH_DEPOSIT_ADDRESS)
isAddressSame = poloniexDepositAddress == POLONIEX_ETH_DEPOSIT_ADDRESS
if isAddressSame:
print('Deposit address verification result: PASS!')
else:
print('Deposit address verification result: FAILED! Please check Poloniex deposit address manually and update in local record!')
raise BaseException('Deposit address not the same as record')
# transfer amount from Huobi to Poloniex
print('Transferring ETH from Huobi to Poloniex:')
# get address id in eth reserved withdraw address
addresses = huobi_eth_client.Huobi_ETH_Client().get_eth_withdraw_addresses()
# print(addresses)
address_id = ''
for address in addresses:
if ('0x' + address['address']) == POLONIEX_ETH_DEPOSIT_ADDRESS:
address_id = address['id']
break
if not address_id:
raise BaseException('Address id not found!')
# transfer using withdraw address id
withdraw_id = huobi_eth_client.Huobi_ETH_Client().withdraw_eth_create(address_id, amount='0.01')
status = huobi_eth_client.Huobi_ETH_Client().withdraw_eth_place(withdraw_id)
if status['status']=='ok':
print('Transfer status: OK! Status: %s' % status)
else:
print('Transfer status: ERROR! Status: %s' % status)
raise BaseException('Transfer error! Status: %s' % status)
# return status
withdraw_id = status['data']
print('Transfer SUCCESS! Amount: %s BTC, Withdraw_ID: %s' % (amount, withdraw_id))
return withdraw_id
def transfer_eth_from_poloniex_to_huobi(self, amount=''):
print('Start Transferring ETH from Poloniex to Huobi...')
# get deposit address from huobi and make sure it is the same as the recorded one
print('Getting Huobi deposit address:')
print('Deposit address in local record: %s' % HUOBI_ETH_DEPOSIT_ADDRESS)
print('Since the address cannot be verified, please go to the website to verify the address!')
# send email about the transfer address
try:
print('Sending caution email:')
email_client.Email_client().notify_me_by_email(title='Caution about ETH deposit address in Huobi.', content='BTC is being transferred from Poloniex to Huobi.\n Amount: %s \nBTC deposit address of Huobi cannot be verified. Please check manually.' % amount)
print('Email sent successfully!')
except:
print('Email sent FAILED.')
# transfer amount from Poloniex to Huobi
print('Transferring ETH from Poloniex to Huobi:')
status = poloniex_client.Poloniex_Client().client.withdraw(currency='ETH', amount=amount, address=HUOBI_ETH_DEPOSIT_ADDRESS)
print('Returned response: \"%s\"' % status['response'])
print('Expected response: \"%s\"' % 'Withdrew %.8f ETH.' % float(amount))
if status['response'] == 'Withdrew %.8f ETH.' % float(amount):
print('Transfer status: OK! Status: %s' % status)
else:
print('Transfer status: ERROR! Status: %s' % status)
raise BaseException('Transfer error! Status: %s' % status)
# return status
print('Transfer SUCCESS! Amount: %s ETH' % amount)
return True
if __name__ == '__main__':
# print(Transfer_ETH().transfer_eth_from_huobi_to_poloniex(amount='0.01'))
# withdraw_id = Transfer_BTC().transfer_btc_from_huobi_to_poloniex('0.01')
# print(huobi_main_client.Huobi_Main_Client().cancel_withdraw(withdraw_id))
# print(Transfer_BTC().transfer_btc_from_poloniex_to_huobi('0.01'))
# print(Transfer_ETH().transfer_eth_from_poloniex_to_huobi('0.006'))
pass
| szhu3210/Arbitrage-trader | legacy/transfer_eth.py | transfer_eth.py | py | 4,497 | python | en | code | 5 | github-code | 36 |
6667647383 | from flask import request, jsonify, abort, Blueprint
import requests
import json
from app import models
from .authRoutines import *
likeRoutes = Blueprint('likesBp', __name__)
# {authToken: xxxx, like: 0, betId: xxxx}
# {authToken: xxxx, like: 1, betId: xxxx}
@likeRoutes.route('/like/update', methods=['POST'])
def like_update():
authClass = authBackend()
if request.method == 'POST':
payload = json.loads(request.data.decode())
token = payload['authToken']
email = authClass.decode_jwt(token)
user = db.session.query(models.User).filter_by(email=email).first()
if email is False:
return jsonify({'result': False, 'error': 'Failed Token'}), 400
else:
bet = db.session.query(models.Bet).filter_by(id=payload['betId']).first()
if bet is None:
return jsonify({'result': False, 'error': 'Bet Doesn\'t Exist'}), 400
else:
like = db.session.query(models.Likes).filter_by(user_id=user.id, bet_id=bet.id).first()
if payload['like'] == 1:
if like is None:
like = models.Likes(bet.id, user.id)
like.save()
return jsonify({'result': True, 'success': 'Like Created'}), 200
else:
return jsonify({'result': True, 'success': 'Like Already in DB'}), 200
else:
if like is not None:
like.delete()
return jsonify({'result': True, 'success': 'Like Removed'}), 200
else:
return jsonify({'result': True, 'success': 'Like Did not Exist'}), 200
else:
return jsonify({'result': True, 'Fail': 'Use POST'}), 400
| ThreeOhSeven/Backend | app/likesBp.py | likesBp.py | py | 1,828 | python | en | code | 0 | github-code | 36 |
27977157667 | import os
from django.core.exceptions import ValidationError
def validate_recording_file_extension(value):
# [0] returns path+filename, [1] returns the extension
extension = os.path.splitext(value.name)[1]
# Add/Remove from the valid music file extensions as you see fit
valid_extensions = ['.mp3','.ogg','.wave']
if not extension.lower() in valid_extensions:
raise ValidationError(u'Unsupported file extension') | Trainzack/MarkTime | MarkTimeSite/MarkTimeApp/validators.py | validators.py | py | 443 | python | en | code | 0 | github-code | 36 |
34954108287 |
import logging
import apache_beam as beam
from etl_operations.models.remittances import RemittanceSchema
from etl_operations.transforms.left_join import LeftJoin
def filter_remittance(remittance):
schema = RemittanceSchema()
errors = schema.validate(remittance)
logging.info(f'{errors} - {errors == {}} - {remittance}')
return errors == {}
class TransformRemittance(beam.DoFn):
def process(self, remittance):
schema = RemittanceSchema()
parsed_id = str(remittance['id']) if remittance['id'] else None
parsed_created = str(remittance['created_via_app_version']) if remittance['created_via_app_version'] else None
parsed_receipt_times = str(remittance['id']) if remittance['id'] else None
remittance.update({
'id': parsed_id,
'created_via_app_version': parsed_created,
'receipt_times': parsed_receipt_times,
})
yield schema.dump(remittance)
class MergeRemittancesUsers(beam.PTransform):
def expand(self, p):
read_remittances, read_auth = p
merged_remittances_created_user = ((read_remittances, read_auth)
| 'MergeRemittancesCreatedUsers' >> LeftJoin(
'created_by_user',
'id',
'created_by_user_'
))
merged_remittances_customers = ((merged_remittances_created_user, read_auth)
| 'MergeRemittancesCustomers' >> LeftJoin('customer', 'id', 'customer_'))
merged_remittances_tellers = ((merged_remittances_customers, read_auth)
| 'MergeRemittancesTellers' >> LeftJoin('teller', 'id', 'teller_'))
return ((merged_remittances_tellers, read_auth)
| 'MergeRemittancesUserProcessors' >> LeftJoin(
'user_processor',
'id',
'user_processor_'
))
| luisarboleda17/etls_valiu | etl_operations/etl_operations/transforms/remittances.py | remittances.py | py | 2,088 | python | en | code | 1 | github-code | 36 |
30349073082 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('paper', '0010_auto_20160131_0616'),
]
operations = [
migrations.AddField(
model_name='paper',
name='time_final',
field=models.FloatField(max_length=255, default=0),
),
]
| sychen1121/paper_label | website/paper/migrations/0011_paper_time_final.py | 0011_paper_time_final.py | py | 414 | python | en | code | 0 | github-code | 36 |
27543338589 | def count(word,character):
total = 0
for letter in word:
if letter == character:
total = total + 1
return total
inpword = input('Please enter a word: ')
inpletter = input('Please enter the letter you would like to count: ')
total = count(inpword, inpletter)
print(total) | kmcad/class-work | lettercount.py | lettercount.py | py | 303 | python | en | code | 0 | github-code | 36 |
30864450241 | import torch
from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
from synthesizer.utils.text import text_to_sequence
class SynthesizerDataset(Dataset):
def __init__(self, metadata_fpath: Path, mel_dir: Path, embed_dir: Path, hparams):
print("Using inputs from:\n\t%s\n\t%s\n\t%s" % (metadata_fpath, mel_dir, embed_dir))
with metadata_fpath.open("r") as metadata_file:
metadata = [line.split("|") for line in metadata_file]
mel_fnames = [x[1] for x in metadata if int(x[4])]
mel_fpaths = [mel_dir.joinpath(fname) for fname in mel_fnames]
embed_fnames = [x[2] for x in metadata if int(x[4])]
embed_fpaths = [embed_dir.joinpath(fname) for fname in embed_fnames]
self.samples_fpaths = list(zip(mel_fpaths, embed_fpaths))
self.samples_texts = [x[5].strip() for x in metadata if int(x[4])]
self.metadata = metadata
self.hparams = hparams
print("Found %d samples" % len(self.samples_fpaths))
def __getitem__(self, index):
# Sometimes index may be a list of 2 (not sure why this happens)
# If that is the case, return a single item corresponding to first element in index
if index is list:
index = index[0]
mel_path, embed_path = self.samples_fpaths[index]
mel = np.load(mel_path).T.astype(np.float32)
# Load the embed
embed = np.load(embed_path)
# Get the text and clean it
text = text_to_sequence(self.samples_texts[index], self.hparams.tts_cleaner_names)
# Convert the list returned by text_to_sequence to a numpy array
text = np.asarray(text).astype(np.int32)
return text, mel.astype(np.float32), embed.astype(np.float32), index
def __len__(self):
return len(self.samples_fpaths)
def collate_synthesizer(batch, r, hparams):
# batch = shape(char_sequence, mel_spec, speaker_embedding, index)
# get the lengths of every text sequence in the batch
x_lens = [len(x[0]) for x in batch]
# get the length of the longest text sequence
max_x_len = max(x_lens)
# make of list of all text sequences padded to the length of the longest one
chars = [pad1d(x[0], max_x_len) for x in batch]
# turn that list into a stack
chars = np.stack(chars)
# get the horizontal (i.e. stepwise) length of every mel-spectrogram
spec_lens = [x[1].shape[-1] for x in batch]
# get the length of the longest one
max_spec_len = max(spec_lens) + 1
# if the longest one is not divisible by reduction factor, then make it divisible by reduction factor
if max_spec_len % r != 0:
max_spec_len += r - (max_spec_len % r)
# WaveRNN mel spectrograms are normalized to [0, 1] so zero padding adds silence
# By default, SV2TTS uses symmetric mels, where -1*max_abs_value is silence.
if hparams.symmetric_mels:
mel_pad_value = -1 * hparams.max_abs_value
else:
mel_pad_value = 0
# pad mels along step-axis, then turn in numpy stack (ndArray)
mel = [pad2d(x[1], max_spec_len, pad_value=mel_pad_value) for x in batch]
mel = np.stack(mel)
# get all the speaker transfer embeddings for SV2TTS
embeds = np.array([x[2] for x in batch])
# get the indices for vocoder preprocessing
indices = [x[3] for x in batch]
# convert all of these sequence containers (apart from indices) to PyTorch tensors
chars = torch.tensor(chars).long()
mel = torch.tensor(mel)
embeds = torch.tensor(embeds)
return chars, mel, embeds, indices
def pad1d(x, max_len, pad_value=0):
return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value)
def pad2d(x, max_len, pad_value=0):
return np.pad(x, ((0, 0), (0, max_len - x.shape[-1])), mode="constant", constant_values=pad_value)
| IronIron2121/not_i | modules/synthesizer/synthesizer_dataset.py | synthesizer_dataset.py | py | 4,028 | python | en | code | 0 | github-code | 36 |
7537125263 | from __future__ import print_function
import six
from six.moves.html_parser import HTMLParser
from collections import defaultdict
from itertools import count
HTMLP = HTMLParser()
class SugarEntry:
"""Define an entry of a SugarCRM module."""
_hashes = defaultdict(count(1).next if hasattr(count(1), 'next') else count(1).__next__)
def __init__(self, module, fmap = None):
"""Represents a new or an existing entry.
Keyword arguments:
module -- SugarModule object the entry belongs to
"""
# Keep a reference to the parent module.
self._module = module
# Keep a mapping 'field_name' => value for every valid field retrieved.
self._fields = {}
self._dirty_fields = []
# Allow initial fields in constructor.
if fmap is not None:
self._fields.update(fmap)
# Make sure that the 'id' field is always defined.
if 'id' not in list(self._fields.keys()):
self._fields['id'] = ''
def __hash__(self):
return self._hashes['%s-%s' % (self._module._name, self['id'])]
def __unicode__(self):
return "<SugarCRM %s entry '%s'>" % \
(self._module._name.rstrip('s'), self['name'])
def __str__(self):
return str(self).encode('utf-8')
def __contains__(self, key):
return key in self._module._fields
def _retrieve(self, fieldlist, force = False):
qstring = "%s.id = '%s'" % (self._module._table, self['id'])
if not force:
fieldlist = set(fieldlist) - set(self._fields.keys())
if not fieldlist:
return
res = self._module._connection.get_entry_list(self._module._name,
qstring, '', 0,
list(fieldlist), 1, 0)
if not res['entry_list'] or not res['entry_list'][0]['name_value_list']:
for field in fieldlist:
self[field] = ''
return
for prop, obj in list(res['entry_list'][0]['name_value_list'].items()):
if obj['value']:
self[prop] = HTMLP.unescape(obj['value'])
else:
self[prop] = ''
def __getitem__(self, field_name):
"""Return the value of the field 'field_name' of this SugarEntry.
Keyword arguments:
field_name -- name of the field to be retrieved. Supports a tuple
of fields, in which case the return is a tuple.
"""
if isinstance(field_name, tuple):
self._retrieve(field_name)
return tuple(self[n] for n in field_name)
if field_name not in self._module._fields:
raise AttributeError("Invalid field '%s'" % field_name)
if field_name not in self._fields:
self._retrieve([field_name])
return self._fields[field_name]
def __setitem__(self, field_name, value):
"""Set the value of a field of this SugarEntry.
Keyword arguments:
field_name -- name of the field to be updated
value -- new value for the field
"""
if field_name in self._module._fields:
self._fields[field_name] = value
if field_name not in self._dirty_fields:
self._dirty_fields.append(field_name)
else:
raise AttributeError("Invalid field '%s'" % field_name)
def save(self):
"""Save this entry in the SugarCRM server.
If the 'id' field is blank, it creates a new entry and sets the
'id' value.
"""
# If 'id' wasn't blank, it's added to the list of dirty fields; this
# way the entry will be updated in the SugarCRM connection.
if self['id'] != '':
self._dirty_fields.append('id')
# nvl is the name_value_list, which has the list of attributes.
nvl = []
for field in set(self._dirty_fields):
# Define an individual name_value record.
nv = dict(name = field, value = self[field])
nvl.append(nv)
# Use the API's set_entry to update the entry in SugarCRM.
result = self._module._connection.set_entry(self._module._name, nvl)
try:
self._fields['id'] = result['id']
except:
print(result)
self._dirty_fields = []
return True
def relate(self, *related, **kwargs):
"""
Relate this SugarEntry with other Sugar Entries.
Positional Arguments:
related -- Secondary SugarEntry Object(s) to relate to this entry.
Keyword arguments:
relateby -> iterable of relationship names. Should match the
length of *secondary. Defaults to secondary
module table names (appropriate for most
predefined relationships).
"""
self._module._connection.relate(self, *related, **kwargs)
def get_related(self, module, fields = None, relateby = None, links_to_fields = None):
"""Return the related entries in another module.
Keyword arguments:
module -- related SugarModule object
relateby -- custom relationship name (defaults to module.lower())
links_to_fields -- Allows retrieval of related fields from addtional related modules for retrieved records.
"""
if fields is None:
fields = ['id']
if links_to_fields is None:
links_to_fields = []
connection = self._module._connection
# Accomodate retrieval of modules by name.
if isinstance(module, six.string_types):
module = connection[module]
result = connection.get_relationships(self._module._name,
self['id'],
relateby or module._name.lower(),
'', # Where clause placeholder.
fields,
links_to_fields)
entries = []
for idx, elem in enumerate(result['entry_list']):
entry = SugarEntry(module)
for name, field in list(elem['name_value_list'].items()):
val = field['value']
entry._fields[name] = HTMLP.unescape(val) if isinstance(val, basestring) else val
entry.related_beans = defaultdict(list)
# try:
linked = result['relationship_list'][idx]
for relmod in linked:
for record in relmod['records']:
relentry = {}
for fname, fmap in record.items():
rfield = fmap['value']
relentry[fname] = HTMLP.unescape(rfield) if isinstance(rfield, six.string_types) else val
entry.related_beans[relmod['name']].append(relentry)
# except:
# pass
entries.append(entry)
return entries
| gddc/python_webservices_library | sugarcrm/sugarentry.py | sugarentry.py | py | 7,119 | python | en | code | 46 | github-code | 36 |
6829356831 | import re
import time
from os import environ
from datetime import datetime
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from .gitinfo import GitInfo
class GitShowHistoryLogPlugin(BasePlugin):
config_scheme = (
('max_number_of_commits', config_options.Type(int, default=5)),
)
def __init__(self):
self.enabled = True
self.from_git = GitInfo()
def on_page_markdown(self, markdown, page, config, files):
if not self.enabled:
return markdown
pos = re.search(r"\{\{(\s)*git_show_history_log(\s)*\}\}", markdown)
list_of_git_commits = self.from_git.get_commits_for_file(page.file.abs_src_path, self.config['max_number_of_commits'])
table_header = "| Version | Author | When | Message |\n" \
"|---------|--------|------|---------|\n"
pos = markdown.find("{{ git_show_history_log }}")
pos += len(table_header)
markdown = re.sub(r"\{\{(\s)*git_show_history_log(\s)*\}\}",
table_header,
markdown,
flags=re.IGNORECASE)
for commit in list_of_git_commits:
author = str(commit.author)
date = time.strftime('%Y-%m-%d, %H:%M:%S', time.gmtime(commit.committed_date))
msg = commit.message.partition('\n')[0]
tag = str(self.from_git.get_tag_for_commit(commit))
newstr = "| " + tag + " | " + author + " | " + date + " | " + msg + " |\n"
new_markdown = markdown[:pos] + newstr + markdown[pos:]
markdown = new_markdown
pos += len(newstr)
return markdown
| pawelsikora/mkdocs-git-show-history-log-plugin | mkdocs_git_show_history_log_plugin/plugin.py | plugin.py | py | 1,784 | python | en | code | 2 | github-code | 36 |
4062839778 | import turtle
def main():
## Draw a partial moon.
t = turtle.Turtle()
t.hideturtle()
drawDot(t, 0, 0, 200, "orange") # Draw moon.
drawDot(t, -100, 0, 200, "white") # Take bite out of moon.
def drawDot(t, x, y, diameter, colorP):
## Draw dot with center (x, y) having color colorP.
t.up()
t.goto(x, y)
t.dot(diameter, colorP)
main()
| guoweifeng216/python | python_design/pythonprogram_design/Ch6/6-3-E13.py | 6-3-E13.py | py | 399 | python | en | code | 0 | github-code | 36 |
42152798528 | #! Lianjia_Sold/sync2es.py
# synchronize data in MongoDB to ElasticSearch with updating item
from pymongo import MongoClient
from datetime import datetime
from uuid import uuid1
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# import json
class MongoSyncEs(object):
def __init__(self):
self.es_node = '198.181.46.127:9200'
self.es_index = 'crawler.sold.v2'
self.es_type = 'info'
self.mongo_uri = 'mongodb://mongo:mongo2018@140.143.237.148:27020/?replicaSet=rs-27020'
self.mongo_db = 'scrapy-lianjia_sold'
self.count = 0
self.query = datetime.now().date().strftime('%Y-%m-%d')
def connection_mongo(self):
conn = MongoClient(self.mongo_uri)
db = conn[self.mongo_db]
return db
def connection_es(self):
es = Elasticsearch([self.es_node])
if not es.indices.exists(index=self.es_index): es.indices.create(index=self.es_index)
return es
def mongo_data_process(self, data):
# format data collected from mongo
if data['浏览'] == 'NA':
data['浏览'] = 0
if data['挂牌价格'] == 'NA':
data['挂牌价格'] = 0
if data['基本属性']['建成年代'] == '未知':
data['基本属性']['建成年代'] = 0
else:
data['基本属性']['建成年代'] = int(data['基本属性']['建成年代'])
if data['基本属性']['建筑面积']:
data['基本属性']['建筑面积'] = float(data['基本属性']['建筑面积'][:-1])
else:
data['基本属性']['建筑面积'] = float(0)
if not data['基本属性']['产权年限'] == '未知':
data['基本属性']['产权年限'] = int(data['基本属性']['产权年限'][:-1])
else:
data['基本属性']['产权年限'] = 0
if not data['小区概况']['年代'] == '未知':
data['小区概况']['年代'] = int(data['小区概况']['年代'][:-1])
else:
data['小区概况']['年代'] = 0
if data['小区概况']['楼栋总数']:
data['小区概况']['楼栋总数'] = int(data['小区概况']['楼栋总数'][:-1])
if data['成交时间']:
data['成交时间'] = data['成交时间'].replace('.', '-')
return data
def es_data_create(self, data):
doc = {
'_op_type': 'create',
'_index': self.es_index,
'_type': self.es_type,
'_id': uuid1(),
'_source': self.mongo_data_process(data)
}
yield doc
def es_pipeline_datetime(self, es):
id = 1
es.ingest.put_pipeline(
id=id,
body={
"description": "crawler.lianjia",
"processors": [
{
"date": {
"field": "initial_time",
"target_field": "@timestamp",
"formats": ["Y-M-d H:m:s"],
"timezone": "Asia/Shanghai"
}
}
]
}
)
return id
def start(self):
db = self.connection_mongo()
es = self.connection_es()
with open('sync_data.txt', 'a') as f:
f.write('+++{}\n'.format(datetime.now()))
for collection in ['sh-sold', 'su-sold']:
cursor = db[collection].find({'initial_time':{'$regex':self.query}},
{'_id':0,'基本属性.套内面积':0,'基本属性.供暖方式':0,'小区概况.hid':0,
'小区概况.rid':0,'小区概况.其他户型':0,'小区概况.在售链接':0,'小区概况.成交链接':0,
'小区概况.小区详情':0,'小区概况.出租链接':0})
for data in cursor:
bulk(es, self.es_data_create(data), pipeline=self.es_pipeline_datetime(es))
self.count += 1
f.write(data.get('房源链接')+'\n')
f.write('+++total data: {}\n'.format(self.count))
task = MongoSyncEs()
task.start()
| feelingu1314/lianjia | lianjia_sold/lianjia_sold/sync2es.py | sync2es.py | py | 4,288 | python | en | code | 0 | github-code | 36 |
13834428775 | #!/usr/bin/env python3
import json
import os
from sys import argv
import clap
import bearton
# Building UI
args = clap.formater.Formater(argv[1:])
args.format()
_file = os.path.splitext(os.path.split(__file__)[-1])[0]
uipath = os.path.join(bearton.util.getuipath(), '{0}.json'.format(_file))
builder = clap.builder.Builder(uipath, argv=list(args))
builder.build()
ui = builder.get()
ui.check()
ui.parse()
# Setting constants for later use
TARGET = os.path.abspath(ui.get('-t') if '--target' in ui else '.')
SITE_PATH = bearton.util.getrepopath(TARGET)
SCHEMES_PATH = (ui.get('-S') if '--schemes' in ui else bearton.util.getschemespath(cwd=SITE_PATH))
# Creating widely used objects
msgr = bearton.util.Messenger(verbosity=int('--verbose' in ui), debugging=('--debug' in ui), quiet=('--quiet' in ui))
db = bearton.db.db(path=SITE_PATH).load()
config = bearton.config.Configuration(path=SITE_PATH).load()
if str(ui) == 'init':
path = os.path.abspath(SITE_PATH)
if '--clean' in ui:
msgr.debug('cleaning')
bearton.init.rm(path, msgr)
if '--no-write' not in ui:
if '--update' in ui:
bearton.init.update(target=path, msgr=msgr)
else:
bearton.init.new(target=path, schemes=SCHEMES_PATH, msgr=msgr)
db.load()
config.load()
msgr.message('{0} Bearton local in {1}'.format(('updated' if '--update' in ui else 'initialized'), path), 1)
if '--clean' in ui and '--no-write' in ui: #equivalent to removal
db.unload()
config.unload()
elif str(ui) == 'rm':
target = (ui.get('-t') if '--target' in ui else '.')
target = os.path.abspath(target)
if '.bearton' in os.listdir(target):
bearton.init.rm(target, msgr)
msgr.debug('removed Bearton repository from {0}'.format(target))
else:
msgr.debug('no Bearton repository found in {0}'.format(target))
exit() # to prevent config and db from being stored
elif bearton.util.inrepo(path=TARGET) and str(ui) == 'sync':
print('ui mode:', ui.mode)
print('ui arguments:', ui.arguments)
print('ui parsed:', ui.parsed)
if '--schemes' in ui:
msgr.message(SCHEMES_PATH, 0)
current_schemes_path = os.path.join(SITE_PATH, '.bearton', 'schemes')
current_schemes = os.listdir(current_schemes_path)
available_schemes = os.listdir(SCHEMES_PATH)
to_update = (available_schemes if '--all' in ui else [s for s in available_schemes if s in current_schemes])
msgr.debug('current schemes: {0}'.format(', '.join(current_schemes)))
msgr.debug('available schemes: {0}'.format(', '.join(available_schemes)))
msgr.message('schemes to update: {0}'.format(', '.join(to_update)), 0)
bearton.init.syncschemes(target=current_schemes_path, schemes=SCHEMES_PATH, wanted=to_update, msgr=msgr)
elif str(ui) == '':
if '--version' in ui: msgr.message(('bearton version {0}' if '--verbose' in ui else '{0}').format(bearton.__version__), 0)
if '--help' in ui:
print('\n'.join(clap.helper.Helper(ui).help()))
else:
try: bearton.util.inrepo(path=TARGET, panic=True)
except bearton.exceptions.BeartonError as e: msgr.message('fatal: {0}'.format(e))
finally: pass
# Storing widely used objects state
config.store().unload()
db.store().unload()
| marekjm/bearton | ui/bearton-init.py | bearton-init.py | py | 3,312 | python | en | code | 0 | github-code | 36 |
955279052 | pkgname = "perl-json"
pkgver = "4.10"
pkgrel = 1
build_style = "perl_module"
hostmakedepends = ["gmake", "perl"]
makedepends = ["perl"]
checkdepends = ["perl-test-pod"]
depends = ["perl"]
pkgdesc = "JSON encoder/decoder"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Artistic-1.0-Perl OR GPL-1.0-or-later"
url = "https://metacpan.org/release/JSON"
source = f"$(CPAN_SITE)/JSON/JSON-{pkgver}.tar.gz"
sha256 = "df8b5143d9a7de99c47b55f1a170bd1f69f711935c186a6dc0ab56dd05758e35"
| chimera-linux/cports | main/perl-json/template.py | template.py | py | 483 | python | en | code | 119 | github-code | 36 |
5209753399 | import os
import bs4
import requests
import re
import time
def main():
# from alphabet "a" to "c"
alphabetical_list = "abc"
for char in alphabetical_list:
try:
url = "https://www.gutenberg.org/browse/authors/{}".format(char)
site = pull_site(url)
authors = scrape_author(site)
print(authors)
except:
continue
time.sleep(2)
def pull_site(url):
raw_site_page = requests.get(url)
raw_site_page.raise_for_status()
return raw_site_page
def scrape_author(site):
soup = bs4.BeautifulSoup(site.text, 'html.parser')
authors = []
for a in soup.find_all('a', href=True):
link_to_text = re.search(r'^/browse/authors/.*$', a['href'])
if link_to_text:
authors.append(link_to_text.group(1))
return authors
def scrape_bookid(site):
soup = bs4.BeautifulSoup(site.text, 'html.parser')
bookid_list = []
for a in soup.find_all('a', href=True):
# e.g. https://www.gutenberg.org/ebooks/14269
link_to_text = re.search(r'^/ebooks/(\d+)$', a['href'])
if link_to_text:
bookid_list.append(link_to_text.group(1))
return bookid_list
def download_books(book_id):
# http://www.gutenberg.org/cache/epub/14269/pg14269.txt
url = "https://www.gutenberg.org/cache/epub/{}/pg{}.txt".format(book_id, book_id)
response = requests.get(url)
response.raise_for_status()
return response.text
def save(book_id, book_data):
book_folder = "/Users/admin/Desktop/books"
if not os.path.exists(book_folder):
os.mkdir(book_folder)
book_name = "book{}.txt".format(book_id)
full_path = os.path.join(book_folder, book_name)
with open(full_path, 'w', encoding='utf-8') as fout:
fout.write(book_data)
if __name__ == '__main__':
main()
| zabuchan/web_scraping | show_gutenberg_authors.py | show_gutenberg_authors.py | py | 1,662 | python | en | code | 0 | github-code | 36 |
40861536656 | import numpy as np
from dataclasses import dataclass
import random
from image_multi_thresholding.base import _between_class_var, _image_probabilities
"""
Find thresholds of the gray levels using shuffled frog-leaping algorithm with between
class variance as fitness function.
"""
def _is_valid_frog(frog, L):
return (len(set(frog)) == len(frog) and frog[0] != 0 and frog[-1] != L-1)
@dataclass()
class SFLOptions:
"""Options to be passed to the threshold_sfl function."""
number_memeplex: int = 4
"""Number of memeplexes."""
number_frog: int = 10
"""Number of frogs in each memeplex."""
number_evolution: int = 10
"""Total of replication in memeplex evolution."""
def threshold_sfl(
img: np.ndarray,
k: int,
iter: int = 100,
options: SFLOptions = SFLOptions()):
"""Find thresholds of the gray levels using shuffled frog-leaping algorithm.
Uses between class variance as a fitness function. SFLOptions has default recommended
values for this algorithm, but you can change them by creating a new instance of it
with your preferred values.
**Arguments**:
img: A 2D numpy.ndarray containing the pixel values of the image.
k: Number of thresholds to find.
iter: Number of iterations for the algorithm.
options: If set, overrides the default options for the algorithm.
**Typical usage example**:
img = base.load_image('/route/to/image.png')
options = SFLOptions(
number_memeplex = 42
)
thresholds = threshold_sfl(
img = img,
k = 10,
options = options
)
"""
prob = _image_probabilities(img)
L = len(prob)
pop_size = options.number_memeplex * options.number_frog
frogs = np.array([[random.randint(0, L)
for _ in range(k)] for _ in range(pop_size)])
frogs.sort()
bcv = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in frogs])
best_bcv = max(bcv)
best_global_frog = frogs[np.argmax(bcv)]
counter = 0
while counter < iter:
sort_indeces = np.flip(np.argsort(bcv))
sorted_frogs = np.array([frogs[i] for i in sort_indeces])
all_frogs = []
for m in range(options.number_memeplex):
memeplex_frogs = np.array(
[sorted_frogs[n+m*options.number_frog] for n in range(options.number_frog)], dtype=np.int16)
evolution = 0
while evolution < options.number_evolution:
bcv_memeplex = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in memeplex_frogs])
best_frog = memeplex_frogs[np.argmax(bcv_memeplex)]
worst_frog = memeplex_frogs[np.argmin(bcv_memeplex)]
worst_position = np.argmin(bcv_memeplex)
new_worst_frog = np.sort(np.array(
worst_frog + random.random()*(best_frog - worst_frog), dtype=np.int16))
if _is_valid_frog(new_worst_frog, L):
if _between_class_var(prob, new_worst_frog) > _between_class_var(prob, worst_frog):
memeplex_frogs[worst_position] = new_worst_frog
else:
new_worst_frog = np.sort(
worst_frog + random.random()*(best_global_frog - worst_frog)).astype(np.int16)
if _is_valid_frog(new_worst_frog, L) and _between_class_var(prob, new_worst_frog) > _between_class_var(prob, worst_frog):
memeplex_frogs[worst_position] = new_worst_frog
else:
memeplex_frogs[worst_position] = np.array(
[random.random() * (L-1) for _ in range(k)])
evolution = evolution + 1
if len(all_frogs) == 0:
all_frogs = memeplex_frogs
else:
all_frogs = np.concatenate((all_frogs, memeplex_frogs))
bcv = np.array([_between_class_var(prob, frog)
if _is_valid_frog(frog, L) else 0 for frog in all_frogs])
best_bcv = max(bcv)
best_global_frog = all_frogs[np.argmax(bcv)]
counter += 1
return sorted(best_global_frog.tolist())
| image-multithresholding/Image-multithresholding | src/image_multi_thresholding/threshold/sfl.py | sfl.py | py | 4,404 | python | en | code | 1 | github-code | 36 |
36896022139 | import dns.exception
import dns.name
import dns.resolver
public_enum_domain = dns.name.from_text('e164.arpa.')
def from_e164(text, origin=public_enum_domain):
"""Convert an E.164 number in textual form into a Name object whose
value is the ENUM domain name for that number.
@param text: an E.164 number in textual form.
@type text: str
@param origin: The domain in which the number should be constructed.
The default is e164.arpa.
@type: dns.name.Name object or None
@rtype: dns.name.Name object
"""
parts = [d for d in text if d.isdigit()]
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
"""Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str
"""
if not origin is None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = ''.join(dlabels)
if want_plus_prefix:
text = '+' + text
return text
def query(number, domains, resolver=None):
"""Look for NAPTR RRs for the specified number in the specified domains.
e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
"""
if resolver is None:
resolver = dns.resolver.get_default_resolver()
for domain in domains:
if isinstance(domain, (str, unicode)):
domain = dns.name.from_text(domain)
qname = dns.e164.from_e164(number, domain)
try:
return resolver.query(qname, 'NAPTR')
except dns.resolver.NXDOMAIN:
pass
raise dns.resolver.NXDOMAIN
| RMerl/asuswrt-merlin | release/src/router/samba-3.6.x/lib/dnspython/dns/e164.py | e164.py | py | 2,142 | python | en | code | 6,715 | github-code | 36 |
43046798509 | from data_process import empty_extractor
import random
import numpy as np
import os
DATA_DIR = 'D:\home\zeewei\projects\\77GRadarML\classification_train_data'
PROCESSED_DATA_DIR = 'D:\home\zeewei\projects\\77GRadarML\classification_train_data'
PLAYGROUND_TRAIN_DATA_INPUT = os.path.join(DATA_DIR, 'pg_train_data.npy')
PLAYGROUND_TRAIN_DATA_LABEL = os.path.join(DATA_DIR, 'pg_train_label.npy')
PLAYGROUND_TEST_DATA_INPUT = os.path.join(DATA_DIR, 'pg_test_data.npy')
PLAYGROUND_TEST_DATA_LABEL = os.path.join(DATA_DIR, 'pg_test_label.npy')
def load_playground_data():
if os.path.exists(PLAYGROUND_TRAIN_DATA_INPUT) and os.path.exists(PLAYGROUND_TRAIN_DATA_LABEL) and os.path.exists(
PLAYGROUND_TEST_DATA_INPUT) and os.path.exists(PLAYGROUND_TEST_DATA_LABEL):
train_data = np.load(PLAYGROUND_TRAIN_DATA_INPUT)
train_label = np.load(PLAYGROUND_TRAIN_DATA_LABEL)
test_data = np.load(PLAYGROUND_TEST_DATA_INPUT)
test_label = np.load(PLAYGROUND_TEST_DATA_LABEL)
return train_data, train_label, test_data, test_label
empty_origin_data_dir = "D:\home\zeewei\\20190320\empty"
save_data_name = "pg_empty_goal_data.npy"
empty_ex = empty_extractor.FeatureExtractor(origin_data_dir=empty_origin_data_dir,
input_data_file_name=save_data_name)
empty_data = empty_ex.load_empty_data() # 操场空数据
random.shuffle(empty_data)
car_data = np.load("D:\home\zeewei\projects\\77GRadar\processed_data\\all_two_lines_data.npy")
random.shuffle(car_data)
car_data = car_data[0:len(empty_data)]
car = []
for item in car_data:
car.append(item[0])
car_data = car
car_train_data_len = int(2 * len(car_data) / 3)
empty_train_data_len = int(2 * len(empty_data) / 3)
train_data = car_data[0:car_train_data_len]
empty_train_data = empty_data[0:empty_train_data_len]
train_label = [1 for i in range(len(train_data))]
for item in empty_train_data:
train_data.append(item)
train_label.append(0)
test_data = car_data[car_train_data_len:len(car_data)]
test_label = [1 for i in range(len(test_data))]
np.save(os.path.join(PROCESSED_DATA_DIR, "pg_car_val_data.numpy"), test_data)
np.save(os.path.join(PROCESSED_DATA_DIR, "pg_car_val_label.numpy"), test_label)
empty_test_data = empty_data[empty_train_data_len:len(empty_data)]
empty_test_label = [0 for i in range(len(empty_test_data))]
np.save(os.path.join(PROCESSED_DATA_DIR, "pg_empty_val_data.numpy"), empty_test_data)
np.save(os.path.join(PROCESSED_DATA_DIR, "pg_empty_val_label.numpy"), empty_test_label)
for item in empty_test_data:
test_data.append(item)
test_label.append(0)
np.save(PLAYGROUND_TRAIN_DATA_INPUT, train_data)
np.save(PLAYGROUND_TRAIN_DATA_LABEL, train_label)
np.save(PLAYGROUND_TEST_DATA_INPUT, test_data)
np.save(PLAYGROUND_TEST_DATA_LABEL, test_label)
return train_data, train_label, test_data, test_label
| wzce/77GRadarML | data_process/empty_radar_data.py | empty_radar_data.py | py | 3,029 | python | en | code | 6 | github-code | 36 |
2648456516 | from entities.basic import Roomspace
from entities.boxes import Box
from src.text import *
class MapRoom(Roomspace):
# A MapRoom is an overworld scene. There won't be any items but
# locations and directions can still be uncovered.
def __init__(self, name):
Roomspace.__init__(self, name)
self.entity_type = 'overworld'
def list_exits(self):
# This method will list all exits of a room and their direction.
# This differs from the inherited method as there will be no
# exits from the overworld map, but there can be several entrances
# in one specific map location.
print(f"Locations in {proper(self.name)}:")
for submap in self.submaps.values(): # List all entrances first.
print(f"{proper(intr(submap.name))}")
for dir, exit in zip(self.exits.keys(), self.exits.values()):
# Then list all same-space exits.
print(f"{ex(dir.upper())}: {proper(exit.name)}")
class Room(Roomspace, Box):
# This is a classic scene, with all features.
def __init__(self, name):
Roomspace.__init__(self, name)
self.list_desc = f"inside {self.name}"
self.closed_dirs = {}
# A key of the direction and either what the room beyond should be,
# or the a door, if the direction can be closed.
self.inventory = {}
def describe(self):
# This method will differ from other describe methods since it will
# include inventory as well as exits.
for line in self.desc: print(line)
for line in self.list_items(): print(line)
def open(self, dire):
# This method opens a direction if it is closed.
# Method doesn't actually open a door.
if self.exits[dire].entity_type != 'door':
# I.E. Check if there's a door in the way first.
print("The way is already open.")
return True
elif not self.exits[dire].locked:
# Also, check if the door is locked.
print(f"You open {self.exits[dire].name}.")
self.exits[dire], self.closed_dirs[dire] = swap_items(
self.exits[dire],
self.closed_dirs[dire])
return True
else:
# Otherwise, there's a door in the way and it's locked.
print(f"You need to {intr('unlock')} {self.exits[dire].name} first.")
def close(self, dire):
# This method closes a direction if it can be.
if self.exits[dire].entity_type == 'door':
print(f"{self.exits[dire].name.capitalize()} is already closed")
return True
else:
print(f"You close {self.closed_dirs[dire].name}.")
self.exits[dire], self.closed_dirs[dire] = swap_items(
self.exits[dire],
self.closed_dirs[dire])
return True
class Door(Roomspace):
# Open and Close Method will swap out a closed door for the room beyond
# the door for ease of displaying exits. Players should not be able to
# actually go inside doors.
def __init__(self, name, locked):
Roomspace.__init__(self, name)
self.locked = locked
self.desc = ["How the hell did you end up here?"]
self.quick_desc = [
"You're inside a door.",
"Either you cheated or there's a bug."]
self.keys = []
self.entity_type = 'door'
# This will be set in the inventory initialization with the key item.
def unlock_door(self, actor, dire):
if self.locked:
found = False
for key in self.keys:
if key in actor.inventory.keys():
self.locked = False
print(f"Unlocked {self.name}.")
found = True
if found == False:
print(f"You need the key to unlock {self.name} first!")
else:
print(f"{self.name.capitalize()} is already unlocked.")
return False # Stop method if already unlocked.
print(f"Would you like to open {self.name}?")
choice = input("(y/n)> ")
if choice.lower()[0] == 'y': actor.current_room.open(dire)
elif choice.lower()[0] == 'n': return True
else:
print(f"I didn't understand that, {self.name} is still closed.") | crashonthebeat/ifrpg-engine | entities/rooms.py | rooms.py | py | 4,361 | python | en | code | 0 | github-code | 36 |
31489463671 | from flask import Flask
from flask import render_template
from flask import request
from urllib.parse import quote
from urllib.request import urlopen
import json
app = Flask(__name__)
OPEN_WEATHER_URL = "http://api.openweathermap.org/data/2.5/weather?q={0}&units=metric&APPID={1}"
OPEN_WEATHER_KEY = '36c794142f7b54ffe6765a3276168f2d'
OPEN_NEWS_URL = "http://newsapi.org/v2/everything?q={0}&from=2021-01-01&sortBy=publishedAt&apiKey={1}"
OPEN_NEWS_KEY = 'c52994d166e04d2483ab223e8abc68b7'
OPEN_COVID_URL = "http://newsapi.org/v2/everything?q=tesla&from=2021-01-01&sortBy=publishedAt&apiKey=c52994d166e04d2483ab223e8abc68b7"
@app.route('/')
def index():
city = request.args.get('city')
if not city:
city = 'bangkok'
weather = get_weather(city,OPEN_WEATHER_KEY)
url1 = OPEN_COVID_URL
data1 = urlopen(url1).read()
parsed1 = json.loads(data1)
articles = parsed1['articles']
desc = []
news = []
img = []
link = []
for i in range(1,6):
myarticles = articles[i]
news.append(myarticles['title'])
desc.append(myarticles['content'])
img.append(myarticles['urlToImage'])
link.append(myarticles['url'])
mylist = zip(news, desc, img, link)
return render_template('index.html', weather= weather, context= mylist)
def get_weather(city,API_KEY):
query = quote(city)
url = OPEN_WEATHER_URL.format(city, API_KEY)
data = urlopen(url).read()
parsed = json.loads(data)
weather = None
if parsed.get('weather'):
description = parsed['weather'][0]['description']
temperature = parsed['main']['temp']
city = parsed['name']
pressure = parsed['main']['pressure']
humidity = parsed['main']['humidity']
wind = parsed['wind']['speed']
icon = parsed['weather'][0]['icon']
country = parsed['sys']['country']
weather = {'description': description,
'temperature': temperature,
'city': city,
'country': country,
'pressure': pressure,
'humidity': humidity,
'wind': wind,
'icon': icon
}
return weather
@app.route('/news')
def news():
news = request.args.get('news')
if not news:
news = 'covid-19'
news_list = get_news(news,OPEN_NEWS_KEY)
return render_template('news.html', context = news_list)
def get_news(news,NEWS_KEY):
query_news = quote(news)
url_news = OPEN_NEWS_URL.format(news,NEWS_KEY)
data_news = urlopen(url_news).read()
parsed_news = json.loads(data_news)
articles_news = parsed_news['articles']
desc = []
news = []
link = []
for i in range(len(articles_news)):
myarticles_news = articles_news[i]
news.append(myarticles_news['title'])
desc.append(myarticles_news['content'])
link.append(myarticles_news['url'])
mylist = zip(news,desc,link)
return mylist
@app.route('/about')
def about():
return render_template('about.html')
| pacharasiri/news-app-61102010154 | app.py | app.py | py | 3,173 | python | en | code | 0 | github-code | 36 |
11686525581 | from pynamodb.models import Model
from pynamodb.attributes import (
UnicodeAttribute,
UnicodeSetAttribute,
NumberAttribute,
BooleanAttribute,
MapAttribute,
UTCDateTimeAttribute
)
from datetime import datetime
class WatchingList(Model):
"""
References:
https://pynamodb.readthedocs.io/en/latest/index.html
Notes:
Be aware that the models defined here should be consistent with
dynamo stack in cdk.
"""
class Meta:
table_name = 'watching-list'
region = 'us-east-1'
ticker_symbol = UnicodeAttribute(hash_key=True)
| tanlin2013/stockbot | .aws/stack/lib/dynamo/table.py | table.py | py | 603 | python | en | code | 0 | github-code | 36 |
21477703253 | import sys
sys.setrecursionlimit(100000000)
def getParents(a):
# 종료 조건
if parents[a] == a:
return a
parents[a] = getParents(parents[a]) #경로 최적화
return parents[a]
def union(a, b):
a = getParents(a)
b = getParents(b)
if a == b: return
if a < b: parents[b] = a
else: parents[a] = b
def findUnion(a, b):
if getParents(a) == getParents(b):
print("YES")
else:
print("NO")
n, m = map(int, sys.stdin.readline().split())
parents = list(range(n+1)) # 노드 갯수의 +1로, Node 0~1~7
for _ in range(m):
q, a, b = map(int, sys.stdin.readline().split())
if q == 0:
union(a, b)
else:
# 같은 부모인지 확인해서 출력
if getParents(a) == getParents(b):
print("YES")
else:
print("NO") | Minsoo-Shin/jungle | week03/1717_집합의표현.py | 1717_집합의표현.py | py | 838 | python | en | code | 0 | github-code | 36 |
27073968555 | from itertools import count
from geopy.distance import geodesic
from datetime import timedelta, datetime
import json
import sys
import random
time_format = '%Y-%m-%d %H:%M:%S'
default_start = datetime.strptime('2020-01-01 00:00:00', time_format)
default_end = datetime.strptime('2020-06-30 23:59:59', time_format)
myicaos = list()
avail_icaos = list()
class Flight(object):
_ids = count(0)
def __init__(self, aircraft, destination, dep_time, category): # max_velocity ?
self.id = next(self._ids) # flight id, unique identifier
self.aircraft_id = aircraft.id # aircraft id
self.callsign = aircraft.callsign # flight callsign
self.icao = aircraft.icao # flight icao
self.aircraft_cat=category # aircraft type
self.dep_airport = aircraft.location # departure airport icao address
self.arr_airport = destination # arrival airport icao address
self.dep_time = dep_time # departure time
self.distance = float(self.dep_airport.distance(self.arr_airport)) # distance between departure and arrival airports
if aircraft.location==destination:
self.duration=timedelta(hours=0.5).days/24
else:
self.duration = float(self.distance / aircraft.avg_speed) # flight duration in hours
self.arr_time = time_add([self.dep_time, self.duration])
def __str__(self):
tostr = "Flight n°"+str(self.id)+"\n"
tostr += "Aircraft ID: "+str(self.aircraft_id)+"\n"
tostr += "Callsign: "+str(self.callsign)+"\n"
tostr += "ICAO: "+str(self.icao)+"\n"
tostr += "From: "+str(self.dep_airport.icao)+"\n"
tostr += "To: "+str(self.arr_airport.icao)+"\n"
tostr += "Distance: %.2f km\n" % self.distance
tostr += "Departure: "+str(self.dep_time)+"\n"
tostr += "Arrival: "+str(self.arr_time)+"\n"
tostr += "Duration: %.2f h\n" % self.duration
return tostr
def aircraft_string(self):
string = " Callsign:"+' '*(14-len(self.callsign))+self.callsign+' '*5+"ICAO: "+self.icao+"\n"
string += " Departure:"+' '*(13-len(self.dep_airport.icao))+self.dep_airport.icao+' '*5+str(self.dep_time)+"\n"
string += " Arrival:"+' '*(15-len(self.arr_airport.icao))+self.arr_airport.icao+' '*5+str(self.arr_time)+"\n"
return string
class Aircraft(object):
_ids = count(0)
def __init__(self, callsign, icao, location, birth=default_start, \
avg_speed=660, next_update=None, cat=0): # max_velocity
self.id = next(self._ids) # global aircraft id, unmutable
self.callsign = callsign # callsign currently assigned to aircraft
self.icao = icao # icao currently assigned to aircraft
self.location = location # current aircraft location (airport or 'flying')
self.avg_speed = avg_speed # average speed in km/h of the aircraft (constant for now)
self.cat=cat # aircraft category
self.history = list() # history of flights and groundings
self.flights = list()
self.birth=birth
self.next_update=next_update
self.initial_icao=icao
self.initial_callsign=callsign
location.aircraft_arrival(self, self.birth)
def __str__(self):
tostr = "Aircraft n°"+str(self.id)+"\n"
tostr += "Current callsign: "+str(self.callsign)+"\n"
tostr += "Current ICAO: "+str(self.icao)+"\n"
tostr += "Current location: "+str(self.location.icao)+"\n"
tostr += "Average Speed: "+str(self.avg_speed)+"\n"
tostr += "Number of flights: "+str(len(self.flights))+"\n"
tostr += "Category: "+str(self.cat)+"\n"
return tostr
def new_flight(self, destination, dep_time):
# create a new flight for the given aircraft from its current location
f = Flight(aircraft=self, destination=destination, dep_time=dep_time, category=self.cat)
# append past period and flight to history
if len(self.flights)==0:
from_time=self.birth
else:
from_time=self.flights[-1].arr_time
self.history.append(AircraftHistoryRecord(status="ground", from_time=from_time, until_time=dep_time, airport=self.location))
self.history.append(AircraftHistoryRecord(status="flying", flight=f))
self.flights.append(f)
# update aircraft and airport
self.location.aircraft_departure(self, dep_time)
self.location = destination
self.location.aircraft_arrival(self, f.arr_time)
return f
def end_sim(self, time):
if len(self.flights)==0:
from_time=self.birth
else:
from_time=self.flights[-1].arr_time
self.history.append(AircraftHistoryRecord(status="ground", from_time=from_time, until_time=time, airport=self.location))
self.location.aircraft_departure(self, time)
def on_ground(self, time):
# return true if aircraft on ground at the given time
for h in self.history:
if h.from_time < time < h.until_time:
return h.status=="ground"
return True
def print_flights(self):
string = 'Aircraft n°'+str(self.id)+'\n'
string += str(len(self.flights))+' flights\n'
for f in self.flights:
string += f.aircraft_string()+'\n'
print(string[:-1])
def icao_at(self, time):
if len(self.flights)==0:
return None
tmp=None
for f in self.flights:
if f.dep_time > time:
break
tmp=f
if tmp is None:
return self.initial_icao
return tmp.icao
def new_aircraft(airports, birth=default_start, callsign='DCM', icao=None, avg_speed=660):
# returns a new aircraft, with random airport, icao, callsign etc.
location = airports[random.randint(0, len(airports)-1)]
if len(callsign)==3:
callsign += str(random.randint(1,9999))
if icao is None:
icao = get_icao()
a = Aircraft(callsign=callsign, icao=icao, location=location, birth=birth, avg_speed=avg_speed)
return a
class AircraftHistoryRecord(object):
def __init__(self, status, flight=None, from_time=None, until_time=None, airport=None):
self.status=status
if self.status == 'flying':
self.flight=flight
self.from_time=flight.dep_time
self.until_time=flight.arr_time
elif self.status == 'ground':
self.from_time=from_time
self.until_time=until_time
self.aiport=airport
class Airport(object):
def __init__(self, icao, lat, lon, alt, cat, name):
self.icao=icao # unique identifier of an airport
self.name=name # airport full name
self.lat=lat # airport latitude
self.lon=lon # airport longitude
self.alt=alt # airport altitude
self.cat=cat # airport category (small/medium/large)
self.aircraft_history=list() # history of aircrafts that stayed at airport
self.current_aircraft=dict() # list of aircraft currently at airport
def __str__(self):
tostr = "Airport: "+str(self.icao)+"\n"
tostr += "Fullname: "+str(self.name)+"\n"
tostr += "Lat/Lon/Alt: %.4f/%.4f/%.0f\n" % (self.lat, self.lon, self.alt)#+str(self.lat)+"/"+str(self.lon)+"/"+str(self.alt)+"\n"
tostr += "Category: "+str(self.cat)+"\n"
return tostr
def distance(self, other):
# compute distance between self and another given airport
return geodesic((self.lat, self.lon), (other.lat, other.lon)).km
def aircraft_arrival(self, aircraft, time):
# add the given aircraft to the list of current aircrafts
self.current_aircraft[aircraft.id]=(time, aircraft.callsign, aircraft.icao)
def aircraft_departure(self, aircraft, time):
# aircraft leaving airport, add its presence to history
self.aircraft_history.append( \
AirportHistoryElement(aircraft, self.current_aircraft[aircraft.id], time))
del self.current_aircraft[aircraft.id]
def aircraft_at(self, time):
ac_list=list()
for h in self.aircraft_history:
if h.arrival_time <= time < h.departure_time:
ac_list.append(h.aircraft)
if len(ac_list)==0:
print('strange: no aircraft at '+self.icao+' at '+str(time))
return ac_list
def print_aircraft_at(self, time):
string = "All aircraft in "+self.icao+" at "+str(time)+'\n'
for a in sorted(self.aircraft_at(time),key=lambda aircraft: aircraft.id):
string += ' n°'+str(a.id)+'\n'
print(string[:-1])
class AirportHistoryElement(object):
def __init__(self, aircraft, record, departure_time):
self.aircraft=aircraft
self.arrival_time=record[0]
self.departure_time=departure_time
self.arr_callsign=record[1]
self.arr_icao=record[2]
self.dep_callsign=aircraft.callsign
self.dep_icao=aircraft.icao
def __str__(self):
return str(self.aircraft.id)+' '+str(self.arrival_time)+'-'+str(self.departure_time)+' '+self.arr_callsign+'-'+self.dep_callsign+' '+self.arr_icao+'-'+self.dep_icao
class Airports(object):
def emtpy(self):
self.elements=list()
def __init__(self):
self.emtpy()
def __str__(self):
tostr = ""
for a in self.elements:
tostr+=str(a)
return tostr
def first(self, n):
self.elements=self.elements[:n]
return self
def append(self, el):
self.elements.append(el)
def remove(self, el):
self.elements.remove(el)
def random(self):
return self.elements[random.randint(0, len(self.elements)-1)]
def get(self, icao):
for a in self.elements:
if a.icao == icao:
return a
return None
def to_file(self, filename):
json = '[\n'
for a in self.elements:
json += ' {\n'
json += ' "icao" : "%s",\n' % a.icao
json += ' "name" : "%s",\n' % a.name
json += ' "lat" : "%s",\n' % a.lat
json += ' "lon" : "%s",\n' % a.lon
json += ' "alt" : "%s",\n' % a.alt
json += ' "cat" : "%s"\n' % a.cat
json += ' },\n'
json = json[:-2]+'\n]'
file = open(filename, "w")
file.write(json)
file.close()
def from_file(self, filename):
file = open(filename, "r")
data = file.read()
file.close()
json_data = json.loads(data)
self.elements = list()
for d in json_data:
self.append(Airport(d['icao'],float(d['lat']),float(d['lon']),float(d['alt']),d['cat'],d['name']))
categories = ['large', 'medium', 'small', 'all']
def from_opensky(airport):
# from an opensky format airport, return an Airport
typ = 'oops'
for c in categories[:-1]:
if c in airport[7]:
typ = c[0].upper()
break
return Airport(airport[2], airport[3], airport[4], airport[6], typ, airport[0])
def airports_from_file(category):
# returns the airports from the given category stored in data/
airports = Airports()
airports.from_file('data/'+category+'_airports.json')
return airports
def time_add(times):
for i in range(len(times)):
if type(times[i]) == str:
times[i] = datetime.strptime(times[i], time_format)
elif type(times[i]) in [int, float]:
times[i] = timedelta(hours=times[i])
if i == 0:
new_time = times[i]
else:
new_time += times[i]
#return new_time.strftime(time_format)
new_time = new_time - timedelta(microseconds=new_time.microsecond)
return new_time
def load_icaos(n=0):
# load PIA icaos from file
if len(myicaos)==0:
if n==0:
f = open('data/icaos.txt', 'r')
myicaos.extend(f.read().split('\n')[:-1])
f.close()
else:
myicaos.extend(list(range(0,n)))
avail_icaos.extend(myicaos)
return avail_icaos
def get_icao(old=None):
# returns a random unused ICAO address
if len(myicaos)==0:
load_icaos(n=100000)
# put back the old icao to the set
if old is not None:
avail_icaos.append(old)
icao = avail_icaos[random.randint(0,len(avail_icaos)-1)]
avail_icaos.remove(icao)
return icao
def date(string):
return datetime.strptime(string, time_format)
| guillaumemichel/aircraft-privacy-simulator | structures.py | structures.py | py | 12,932 | python | en | code | 0 | github-code | 36 |
30067750251 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import pandas as pd
# Definición de funciones de activación y su derivada
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Clase para la red neuronal
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# Inicialización de capas y pesos
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.weights_input_hidden = np.random.rand(self.input_size, self.hidden_size)
self.weights_hidden_output = np.random.rand(self.hidden_size, self.output_size)
def feedforward(self, X):
# Capa oculta
self.hidden_input = np.dot(X, self.weights_input_hidden)
self.hidden_output = sigmoid(self.hidden_input)
# Capa de salida
self.output = sigmoid(np.dot(self.hidden_output, self.weights_hidden_output))
def backpropagation(self, X, y, learning_rate):
# Cálculo del error
error = y - self.output
# Gradiente en la capa de salida
delta_output = error * sigmoid_derivative(self.output)
# Actualización de pesos en la capa de salida
self.weights_hidden_output += np.dot(self.hidden_output.T, delta_output) * learning_rate
# Gradiente en la capa oculta
error_hidden = delta_output.dot(self.weights_hidden_output.T)
delta_hidden = error_hidden * sigmoid_derivative(self.hidden_output)
# Actualización de pesos en la capa oculta
self.weights_input_hidden += X.T.dot(delta_hidden) * learning_rate
def train(self, X, y, learning_rate, epochs):
for _ in range(epochs):
self.feedforward(X)
self.backpropagation(X, y, learning_rate)
def predict(self, X):
self.feedforward(X)
return self.output
# Función para Leave-k-Out
def leave_k_out(X, y, k):
errors = []
for i in range(len(X)):
X_val = X[i]
y_val = y[i]
X_train = np.delete(X, i, axis=0)
y_train = np.delete(y, i, axis=0)
model = NeuralNetwork(input_size, hidden_size, output_size)
model.train(X_train, y_train, learning_rate, epochs)
y_pred = model.predict(X_val)
y_pred_class = np.argmax(y_pred)
y_true_class = np.argmax(y_val)
if y_pred_class != y_true_class:
errors.append(1)
return 1 - (sum(errors) / len(X))
# Cargar y preparar los datos
data = np.genfromtxt('irisbin.csv', delimiter=',')
X = data[:, :-3]
y = data[:, -3:]
# Parámetros
input_size = X.shape[1]
hidden_size = 8
output_size = 3
learning_rate = 0.01
epochs = 100
k_out = 5
k_out_accuracy = leave_k_out(X, y, k_out)
print(f'Error Leave-{k_out}-Out: {1 - k_out_accuracy:.2f}')
# Inicializa listas para almacenar los puntos correctamente clasificados y los incorrectamente clasificados
correctly_classified_points = []
incorrectly_classified_points = []
# Realiza Leave-One-Out
for i in range(len(X)):
X_val = X[i]
y_val = y[i]
X_train = np.delete(X, i, axis=0)
y_train = np.delete(y, i, axis=0)
model = NeuralNetwork(input_size, hidden_size, output_size)
model.train(X_train, y_train, learning_rate, epochs)
y_pred = model.predict(X_val)
y_pred_class = np.argmax(y_pred)
y_true_class = np.argmax(y_val)
if y_pred_class == y_true_class:
correctly_classified_points.append(X_val)
else:
incorrectly_classified_points.append(X_val)
correctly_classified_points = np.array(correctly_classified_points)
incorrectly_classified_points = np.array(incorrectly_classified_points)
# Aplica PCA para reducir a 2D
pca = PCA(n_components=2)
X_2d = pca.fit_transform(X)
# Crea un DataFrame para visualizar los resultados
df = pd.DataFrame({'X': X_2d[:, 0], 'Y': X_2d[:, 1], 'Label': ['Correcto' if x in correctly_classified_points else 'Incorrecto' for x in X]})
df['Label'] = pd.Categorical(df['Label'])
# Graficar los puntos
plt.figure(figsize=(8, 6))
colors = {'Correcto': 'g', 'Incorrecto': 'r'}
plt.scatter(df['X'], df['Y'], c=df['Label'].apply(lambda x: colors[x]), marker='o')
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.title('Visualización de Resultados Leave-One-Out en 2D')
plt.legend(['Correcto', 'Incorrecto'])
plt.show()
| Kenayman/Perceptron-simple | Ejercicio4.py | Ejercicio4.py | py | 4,365 | python | en | code | 0 | github-code | 36 |
10667830718 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 21:35:24 2021
@author: kanikshasharma
"""
import random
def randomSolution(pdp):
dlocation = list(range(len(pdp)))
solution = []
for i in range(len(pdp)):
randomlocation = dlocation[random.randint(0, len(dlocation) - 1)]
solution.append(randomlocation)
dlocation.remove(randomlocation)
return solution
def routeLength(pdp, solution):
routeLength = 0
for i in range(len(solution)):
routeLength += pdp[solution[i - 1]][solution[i]]
return routeLength
def getNextnodes(solution):
nextnodes = []
for i in range(len(solution)):
for j in range(i + 1, len(solution)):
nextnode = solution.copy()
nextnode[i] = solution[j]
nextnode[j] = solution[i]
nextnodes.append(nextnode)
return nextnodes
def getBestNextnode(pdp,nextnodes):
bestRouteLength = routeLength(pdp, nextnodes[0])
bestNextnode = nextnodes[0]
for nextnode in nextnodes:
currentRouteLength = routeLength(pdp, nextnode)
if currentRouteLength < bestRouteLength:
bestRouteLength = currentRouteLength
bestNextnode = nextnode
return bestNextnode, bestRouteLength
def hillClimbing(pdp):
currentSolution = randomSolution(pdp)
currentRouteLength = routeLength(pdp, currentSolution)
nextnodes = getNextnodes(currentSolution)
bestNextnode, bestNextnodeRouteLength = getBestNextnode(pdp, nextnodes)
while bestNextnodeRouteLength < currentRouteLength:
currentSolution = bestNextnode
currentRouteLength = bestNextnodeRouteLength
nextnodes = getNextnodes(currentSolution)
bestNextnode, bestNextnodeRouteLength = getBestNextnode(pdp, nextnodes)
return currentSolution, currentRouteLength
def main():
pdp = [
[0, 200, 300, 100],
[200, 0, 100, 300],
[300, 100, 0, 200],
[100, 300, 200, 0]
]
print(hillClimbing(pdp))
if __name__ == "__main__":
main() | kanikshas4/hill-climbing-project | hill climbing ai project.py | hill climbing ai project.py | py | 2,061 | python | en | code | 0 | github-code | 36 |
36420895419 | #!/usr/bin/env python
# coding: utf-8
# # KSHEMA S
#
# TCS iON INTERNSHIP
# RIO-125:HR Salary Dashboard - Train the Dataset and Predict Salary
# # Problem statement
# This project aims to sanitize the data, analysis and predict if an employee's salary is higher or lower than $50K/year depends on certain attributes using different ML classification algorithms.
# # Importing necessary libraries and dataset to the Python environment
# In[1]:
# Working with data
import numpy as np
import pandas as pd
# For Visualizations
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
# In[2]:
# Loading the HR dataset
# In[3]:
ds=pd.read_csv(r"C:\Users\Anish\Downloads\salarydata.csv")
# In[4]:
ds
# The dataset is shown here
# In[5]:
ds.describe()
# Dataset description
#
# Age: Age of person
#
# Workclass: Belongs to which working class like Private/government/self employed etc
#
# Education: Person's maximum qualification
#
# Education-Number: Numbered qualification
#
# Salary: Traget coloumn
#
#
# In[6]:
# Shape of the dataset
print(ds.shape)
# # DATA cleaning
#
# In[7]:
# Checking for null values in each coloumn
# In[8]:
print(ds.isna().sum())
# There is no null value in any of the coloumns
# In[9]:
# Check the datatypes of the data
ds.info()
# In[10]:
ds.nunique()
# In[11]:
ds['age'].unique()
# In[12]:
ds['workclass'].unique()
# In[13]:
ds['workclass'] = ds['workclass'].replace('?', np.nan)
# In[14]:
ds['workclass'].unique()
# In[15]:
ds.apply(lambda col: col.unique())
# In[16]:
for col in ds:
print(f'{col}: {ds[col].unique()}')
# The unique values in each coloumn have been displayed
# In[17]:
ds['occupation'].unique()
# In[18]:
ds['occupation'] = ds['occupation'].replace('?', np.nan)
ds['native-country'] = ds['native-country'].replace('?', np.nan)
# In[19]:
print(ds.isna().sum())
# It is clear that workclass,occupation and native country contains null values
# In[20]:
ds['workclass'] = ds['workclass'].fillna(ds['workclass'].mode()[0])
ds['occupation'] = ds['occupation'].fillna(ds['occupation'].mode()[0])
ds['native-country'] = ds['native-country'].fillna(ds['native-country'].mode()[0])
# In[21]:
print(ds.isna().sum())
# The null values are replaced with mode of the data
# # Exploratory Data Analysis
# Univariate Analysis
# In[22]:
freqgraph = ds.select_dtypes(include = ['int'])
freqgraph.hist(figsize =(20,15))
plt.show()
# In[23]:
ds['relationship'].value_counts().plot.pie(autopct='%.0f%%')
plt.title("relationship")
plt.show()
# The employees with relationship shown majority are husbands followed by not in a family and own child
# In[24]:
sns.countplot(x= ds['salary'], palette="dark")
#different types of credit accounts of a customer, shows the ability to handle multiple credits
plt.title("Salary scale")
plt.figure(figsize=(5,5))
plt.show()
# People are more who getting a salary of <=50K
# In[25]:
sns.countplot(x= ds['education'], palette="dark")
locs, labels = plt.xticks()
plt.setp(labels, rotation=90)
#different types of credit accounts of a customer, shows the ability to handle multiple credits
plt.title("Education Qualification")
plt.figure(figsize=(10,10))
plt.show()
# More people have eductaional qualification as HS grad
# # Bivariate analysis (w.r.t. target coloumn salary)
# In[26]:
# Annual_Income vs credit score
sns.barplot(x=ds['age'], y=ds['salary'])
plt.title('Age vs Salary')
plt.show()
# In[27]:
sns.boxplot(y=ds['salary'], x=ds['education-num'])
plt.title('education-num vs salary')
plt.show()
# In[28]:
sns.catplot(x= 'sex', col= 'salary', data = ds, kind = 'count', col_wrap = 3)
plt.show()
# # Outlier detection and removal using boxplot
# In[29]:
num_col = ds.select_dtypes(include=np.number).columns.tolist()
plt.figure(figsize=(20,30))
for i, variable in enumerate(num_col):
plt.subplot(5,4,i+1)
plt.boxplot(ds[variable],whis=1.5)
plt.tight_layout()
plt.title(variable)
# In[30]:
# Identify the outliers and remove
for i in num_col:
Q1=ds[i].quantile(0.25) # 25th quantile
Q3=ds[i].quantile(0.75) # 75th quantile
IQR = Q3-Q1
Lower_Whisker = Q1 - 1.5*IQR
Upper_Whisker = Q3 + 1.5*IQR
ds[i] = np.clip(ds[i], Lower_Whisker, Upper_Whisker)
# In[31]:
# PLot the numerical columns
plt.figure(figsize=(20,30))
for i, variable in enumerate(num_col):
plt.subplot(5,4,i+1)
plt.boxplot(ds[variable],whis=1.5)
plt.tight_layout()
plt.title(variable)
# In[32]:
ds[['age','salary']].head(24)
# # Label Encoding
# In[33]:
from sklearn import preprocessing
label= preprocessing.LabelEncoder()
ds['workclass']=label.fit_transform(ds['workclass'])
ds['education']=label.fit_transform(ds['education'])
ds['occupation']=label.fit_transform(ds['occupation'])
ds['sex']=label.fit_transform(ds['sex'])
ds['race']=label.fit_transform(ds['race'])
ds['native-country']=label.fit_transform(ds['native-country'])
ds['marital-status']=label.fit_transform(ds['marital-status'])
ds['relationship']=label.fit_transform(ds['relationship'])
# In[34]:
ds
# In[35]:
for i in ['workclass', 'education','marital-status','occupation']:
ds[i]=label.fit_transform(ds[i])
le_name_mapping =dict((zip(label.classes_, label.transform(label.classes_))))
print(le_name_mapping)
# # Standardization
# In[36]:
scale_col = ['age', 'education-num', 'capital-gain',
'capital-loss', 'hours-per-week']
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
ds[scale_col]= std.fit_transform(ds[scale_col])
# In[37]:
ds
# In[38]:
ds.describe()
# In[39]:
ds.drop(['capital-gain','capital-loss','education-num'], axis = 1,inplace = True)
ds.head()
# Feature engineering
#
#
# While analyzing the dataset,
# As we can see in 'descriptive statistics - Numerical columns',
# 'capital-gain'and 'capital-loss' columns have 75% data as 0.00
# - So, we can drop 'capital-gain'& 'capital-loss' columns.
# The column,education-num is the numerical version of the column education, so we also drop it.
# # Split dataset into test and train
# In[40]:
from sklearn.model_selection import train_test_split
# In[41]:
X = ds.drop('salary', axis=1)
y= ds['salary']
# In[42]:
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.25, random_state=42, stratify=y)
# In[43]:
ds['salary'].value_counts()
# In[44]:
ds['marital-status'].value_counts()
# # Modelling
#
# In[45]:
# split data into test and train
from sklearn.model_selection import train_test_split
# In[46]:
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.25, random_state=42, stratify=y)
# In[47]:
print("Length of y train",len(y_train))
print("Length of y test",len(y_test))
# # 1) Logistic Regression
# In logistic regression, the model predicts the probability that an instance belongs to a particular class. This probability is represented by a value between 0 and 1, where 0 indicates that the instance definitely does not belong to the class and 1 indicates that it definitely does.To make these predictions, logistic regression uses a logistic function, which takes in a linear combination of the input features and maps it to a value between 0 and 1.
# In[48]:
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score,precision_score,recall_score,classification_report
# In[49]:
from sklearn.linear_model import LogisticRegression
lr=LogisticRegression(max_iter=2000)
lr.fit(X_train,y_train)
pred_lr=lr.predict(X_test)
con_lr=confusion_matrix(y_test,pred_lr)
print("The confusion matrix of logistic regression is \n",con_lr)
ac_lr=accuracy_score(y_test,pred_lr)
print('Accuracy:',ac_lr*100)
# In[50]:
print(classification_report(y_test,pred_lr))
# *Precision is the fraction of predicted positive instances that are actually positive, and is calculated as TP / (TP + FP). It gives you an idea of the proportion of positive predictions that are correct. High precision means that the model is good at not labeling negative instances as positive.
#
# *Recall is the fraction of actual positive instances that were predicted to be positive, and is calculated as TP / (TP + FN). It gives you an idea of the proportion of positive instances that the model was able to identify. High recall means that the model is good at finding all the positive instances.
#
# *The F1 score is the harmonic mean of precision and recall, and is calculated as 2 * (precision * recall) / (precision + recall). It is a balanced metric that takes into account both precision and recall.
# Support is the number of instances in each class.
#
# *Accuracy is the fraction of correct predictions made by the model, and is calculated as (TP + TN) / (TP + TN + FP + FN). It gives you an idea of the overall accuracy of the model.
# In[51]:
y_test
# In[52]:
pred_lr[:100]
# # 2) K Nearest Negihbour Classifier
# In[53]:
from sklearn.neighbors import KNeighborsClassifier
acc_values=[]
neighbors=np.arange(70,90)
for k in neighbors:
knn=KNeighborsClassifier(n_neighbors=k, metric='minkowski')
knn.fit(X_train, y_train)
pred_knn=knn.predict(X_test)
acc=accuracy_score(y_test, pred_knn)
acc_values.append(acc)
# In[54]:
plt.plot(neighbors,acc_values,'o-')
plt.xlabel('k value')
plt.ylabel('accuracy')
# In[55]:
print(classification_report(y_test, pred_knn))
# In[56]:
pred_knn[:20]
# In[57]:
con_lr=confusion_matrix(y_test,pred_knn)
print("The confusion matrix of knn is \n",con_lr)
ac_knn=accuracy_score(y_test,pred_knn)
print('Accuracy:',ac_knn*100)
# # 3)Decision Tree classifier
# In[58]:
from sklearn.tree import DecisionTreeClassifier
dtr=DecisionTreeClassifier()
dtr.fit(X_train,y_train)
dtr.fit(X_train,y_train)
pred_dt=dtr.predict(X_test)
con_dtr=confusion_matrix(y_test,pred_dt)
print("The confusion matrix of decision tree is \n",con_dtr)
ac_dt=accuracy_score(y_test,pred_dt)
print('Accuracy:',ac_dt*100)
# In[59]:
print(classification_report(y_test, pred_dt))
# # 4)Support Vector Machine
# In[60]:
from sklearn.svm import SVC
svc=SVC()
svc.fit(X_train,y_train)
pred_svc=svc.predict(X_test)
con_svc=confusion_matrix(y_test,pred_svc)
print("The confusion matrix of decision tree is \n",con_svc)
ac_svc=accuracy_score(y_test,pred_svc)
print('Accuracy:',ac_svc*100)
# In[61]:
print(classification_report(y_test, pred_svc))
# In[62]:
pred_svc[:50]
# # 5)Random Forest Classifier
# In[63]:
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier()
rf.fit(X_train,y_train)
pred_RFC=rf.predict(X_test)
con_rf=confusion_matrix(y_test,pred_RFC)
print("The confusion matrix of random forest is \n",con_rf)
ac_rf=accuracy_score(y_test,pred_RFC)
print('Accuracy:',ac_rf*100)
# In[64]:
print(classification_report(y_test, pred_RFC))
# # 6) GradientBoostingClassifier
#
#
# In[65]:
from sklearn.ensemble import GradientBoostingClassifier
gb = GradientBoostingClassifier()
gb.fit(X_train,y_train)
pred_gb = gb.predict(X_test)
print('Classification_report is')
print(classification_report(y_test,pred_gb))
# In[66]:
# # 7) Naive_bayes Classifier
# In[67]:
# In[68]:
# # Comparisaon of accuracies of different models
# In[69]:
# In[70]:
# In[71]:
#
# Gradient Booster gives best accuracy compared to other supervised learning algorithms.
# For salary prediction,gradient booster is selected.
# In[72]:
ds
# In[73]:
# save the model
import pickle
filename = 'model.pkl'
pickle.dump(gb, open(filename, 'wb'))
# In[74]:
load_model = pickle.load(open(filename,'rb'))
# In[75]:
load_model.predict([[.03,4,11,4,3,5,4,0,0.1,34]])
# In[76]:
load_model.predict([[33,4,11,4,3,0,4,1,30,34]])
# In[77]:
load_model.predict([[.99,11,4,2,3,5,4,0,-0.19,38]])
# In[78]:
load_model.predict([[50,3,11,6,4,4,4,0,32,9]])
# In[ ]:
# In[ ]:
| Kshema85/TCS-iON--KSHEMA-HR-Salary-prediction | model1.py | model1.py | py | 12,261 | python | en | code | 0 | github-code | 36 |
856254777 | #!/usr/bin/env python
from pyhesity import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com')
parser.add_argument('-u', '--username', type=str, default='helios')
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-c', '--clustername', type=str, default=None)
parser.add_argument('-mcm', '--mcm', action='store_true')
parser.add_argument('-i', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-m', '--mfacode', type=str, default=None)
parser.add_argument('-e', '--emailmfacode', action='store_true')
parser.add_argument('-ip', '--ip', action='append', type=str)
parser.add_argument('-l', '--iplist', type=str, default=None)
parser.add_argument('-a', '--addentry', action='store_true')
parser.add_argument('-r', '--removeentry', action='store_true')
parser.add_argument('-p', '--profile', type=str, choices=['Management', 'SNMP', 'S3', 'Data Protection', 'Replication', 'SSH', 'SMB', 'NFS', ''], default='')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
clustername = args.clustername
mcm = args.mcm
useApiKey = args.useApiKey
password = args.password
mfacode = args.mfacode
emailmfacode = args.emailmfacode
ip = args.ip
iplist = args.iplist
addentry = args.addentry
removeentry = args.removeentry
profile = args.profile
if profile == '':
print('no profile specified')
exit(1)
# authenticate
if mcm:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, helios=True)
else:
if emailmfacode:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, emailMfaCode=True)
else:
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey, mfaCode=mfacode)
# if connected to helios or mcm, select to access cluster
if mcm or vip.lower() == 'helios.cohesity.com':
if clustername is not None:
heliosCluster(clustername)
else:
print('-clustername is required when connecting to Helios or MCM')
exit(1)
if apiconnected() is False:
print('authentication failed')
exit(1)
# gather list function
def gatherList(param=None, filename=None, name='items', required=True):
items = []
if param is not None:
for item in param:
items.append(item)
if filename is not None:
f = open(filename, 'r')
items += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if required is True and len(items) == 0:
print('no %s specified' % name)
exit(1)
return items
# get list of ip/cidr to process
entries = gatherList(ip, iplist, name='entries', required=False)
if addentry is True:
action = 'add'
elif removeentry is True:
action = 'remove'
else:
action = 'list'
if action != 'list' and len(entries) == 0:
print('No entries specified')
exit(1)
# get existing firewall rules
rules = api('get', '/nexus/v1/firewall/list')
for cidr in entries:
if '/' not in cidr:
cidr = '%s/32' % cidr
for attachment in rules['entry']['attachments']:
if attachment['profile'] == profile:
if action != 'list':
if attachment['subnets'] is not None:
attachment['subnets'] = [s for s in attachment['subnets'] if s != cidr]
if action == 'add':
if attachment['subnets'] is None:
attachment['subnets'] = []
attachment['subnets'].append(cidr)
print(' %s: adding %s' % (profile, cidr))
else:
print(' %s: removing %s' % (profile, cidr))
rules['updateAttachment'] = True
if action != 'list':
result = api('put', '/nexus/v1/firewall/update', rules)
if 'error' in result:
exit(1)
print('\n%s allow list:' % profile)
for attachment in rules['entry']['attachments']:
if attachment['profile'] == profile:
if attachment['subnets'] is None or len(attachment['subnets']) == 0:
print(' All IP Addresses(*)')
else:
for cidr in attachment['subnets']:
print(' %s' % cidr)
print('')
| bseltz-cohesity/scripts | python/firewallTool/firewallTool.py | firewallTool.py | py | 4,351 | python | en | code | 85 | github-code | 36 |
23966932572 | #
# Process WRF solution file
#
# written by Eliot Quon (eliot.quon@nrel.gov)
#
from __future__ import print_function
import sys, os
import numpy as np
#from netCDF4 import Dataset
from netCDF4 import Dataset, MFDataset
try:
import xarray
except ImportError:
have_xarray = False
else:
print('xarray reader available')
have_xarray = True
g = 9.81
default_aggdim = 'time'
class WRFSolution(object):
"""Object to hold a single WRF solution snapshot"""
def __init__(self,*args,**kwargs):
verbose = kwargs.get('verbose',True)
aggdim = kwargs.get('aggdim',default_aggdim)
self.use_xarray = kwargs.get('use_xarray',have_xarray)
if self.use_xarray:
desc = 'with xarray'
else:
desc = 'with netcdf'
Nfiles = len(args)
self.filelist = []
for fpath in [ fpath for fpath in args if os.path.isfile(fpath) ]:
try:
Dataset(fpath)
except (IOError,OSError): # NetCDF: Unknown file format
pass
else:
self.filelist.append(fpath)
if self.use_xarray:
nc = xarray.open_mfdataset(self.filelist, concat_dim=aggdim)
self.Nt, self.Nz, self.Ny, self.Nx = nc.variables['U'].shape
self.Nx -= 1 # U is staggered in x
else:
nc = MFDataset(self.filelist, aggdim=aggdim)
self.Nt = len(nc.dimensions['time'])
self.Nx = len(nc.dimensions['west_east'])
self.Ny = len(nc.dimensions['south_north'])
self.Nz = len(nc.dimensions['bottom_top'])
self.varlist = list(nc.variables)
self._read_vars(nc)
def _read_vars(self,nc):
# unstaggered
self.T = nc.variables['T'][:] + 300.0
# staggered in x
U = nc.variables['U'][:]
self.U = 0.5*(U[:,:,:,:-1] + U[:,:,:,1:])
# staggered in y
V = nc.variables['V'][:]
self.V = 0.5*(V[:,:,:-1,:] + V[:,:,1:,:])
# staggered in z
W = nc.variables['W'][:]
PH = nc.variables['PH'][:]
PHB = nc.variables['PHB'][:]
self.W = 0.5*(W[:,:-1,:,:] + W[:,1:,:,:])
# calculate z == (ph + phb)/g
self.z = 0.5*( PH[:,:-1,:,:] + PH[:,1:,:,:] +
PHB[:,:-1,:,:] + PHB[:,1:,:,:] ) / g
# calculate height AGL
if 'HGT' in self.varlist:
# TODO: test this
hgt = nc.variables['HGT'][:]
for i in range(self.Nx):
for j in range(self.Ny):
self.z[:,i,j,:] -= hgt[i,j]
# xarray doesn't read in the mfdataset until we call .values
if self.use_xarray:
self.z = self.z.values
self.U = self.U.values
self.V = self.V.values
self.W = self.W.values
self.T = self.T.values
def sample_profile(self,itime=slice(0,None),i=None,j=None,overwrite=False):
"""Extracts velocity and temperature profile at a specified
location (defaults to center of domain).
If overwrite is True, reduce the dimensions of the stored z, U,
V, and T variables; otherwise, return the profiles.
"""
if i is None:
i = int(self.Nx / 2)
if j is None:
j = int(self.Ny / 2)
zprofile = self.z[itime,:,j,i]
Uprofile = self.U[itime,:,j,i]
Vprofile = self.V[itime,:,j,i]
Wprofile = self.W[itime,:,j,i]
Tprofile = self.T[itime,:,j,i]
if overwrite:
self.z = zprofile
self.U = Uprofile
self.V = Vprofile
self.W = Wprofile
self.T = Tprofile
else:
return dict(
z=zprofile,
U=Uprofile,
V=Vprofile,
W=Wprofile,
T=Tprofile
)
def approx_z(self):
self.zmean = self.z.mean(axis=(0,2,3))
self.zstdev = self.z.std(axis=(0,2,3))
return self.zmean
def planar_average(self):
"""Horizontally average velocity and temperature fields
Note: upwind fetch may skew the spatial average!
"""
self.zmean = np.mean(self.z, axis=(0,2,3))
self.Umean = np.mean(self.u, axis=(0,2,3))
self.Vmean = np.mean(self.v, axis=(0,2,3))
self.Wmean = np.mean(self.w, axis=(0,2,3))
self.Tmean = np.mean(self.T, axis=(0,2,3))
| NWTC/datatools | WRF/solution.py | solution.py | py | 4,452 | python | en | code | 2 | github-code | 36 |
29111172736 | #1. 정렬이 있는 순차 검색 [중복 허용 ]
# 정렬이 되어있는 배열의 순차검색 정의
def seqSearch( ary , fdata ) :
poslist = []
size = len(ary)
for i in range( size ) :
if ary[i] == fdata :
# pos = i
poslist.append( i )
elif ary[i] > fdata :
break
return poslist
# 전역변수
dataAry = [ 188 , 50 , 168 , 50 , 105 , 120 , 177 , 50 ]
# 정렬하기
dataAry.sort() #오름차순
# 입력받기
finddata = int( input(" 검색할 데이터 : ") )
position = seqSearch( dataAry , finddata )
if position == [] :
print( finddata , "가 없습니다.")
else:
print( finddata ,"는 " , position,'위치에 있음') | itdanja/week_python_202206 | 7일차/예제4_순차검색.py | 예제4_순차검색.py | py | 709 | python | ko | code | 0 | github-code | 36 |
23873430764 | import hashlib
import plistlib
import dictionary
from file import File
from macho import MachO
from math import exp, log
from symbol import Symbol
from signature import Signature
from universal import Universal
from ctypescrypto import cms, oid
from abnormality import Abnormality
from certificate import Certificate
from entitlement import Entitlement
from requirement import Requirement
from codedirectory import CodeDirectory
from loadcommander import LoadCommander
from functionimport import FunctionImport
from utilities import get_file_name, get_int, get_ll, little, readstring
class Parser(object):
# Constructor
def __init__(self, path=None):
# Fields
self.abnormalities = []
self.path = path
self.file = File(name=get_file_name(self.path))
self.f = open(path, 'rb')
# Functions
def add_abnormality(self, abnormality):
self.abnormalities.append(abnormality)
def identify_file(self, offset):
prev = self.f.tell()
self.f.seek(offset)
magic = get_int(self.f)
self.f.seek(prev)
if magic not in dictionary.machos:
return magic
return dictionary.machos[magic]
def get_file_size(self):
prev = self.f.tell()
self.f.seek(0)
size = len(self.f.read())
self.f.seek(prev)
return size
def get_file_hashes(self):
self.f.seek(0)
b = self.f.read()
md5 = hashlib.md5(b).hexdigest()
sha1 = hashlib.sha1(b).hexdigest()
sha256 = hashlib.sha256(b).hexdigest()
return {'md5': md5, 'sha1': sha1, 'sha256': sha256}
def get_cert_name_data(self, name, o):
try:
return name[o]
except KeyError:
return 'n/a'
def list_macho_flags(self, flags):
l = []
j = 0
while j < 28:
if (0x1 & (flags >> j)) == 0x1:
l.append(dictionary.flags[2 ** j])
j = j + 1
return l
def parse_syms(self, macho):
prev = self.f.tell()
true_offset = macho.offset + macho.symtab.offset
if macho.is_64_bit():
symbol_size = 60
else:
symbol_size = 56
if (true_offset < macho.offset + macho.size and
true_offset < self.file.size):
self.f.seek(true_offset)
for i in range(macho.symtab.nsyms):
if ((self.f.tell() + symbol_size > macho.offset +
macho.size) or (self.f.tell() + symbol_size >
self.file.size)):
data = {
'offset': self.f.tell(),
'mach-o_size': macho.size,
'mach-o_offset': macho.offset,
'file_size': self.file.size
}
a = Abnormality(title='REMAINING SYMBOLS OUT OF BOUNDS',
data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
else:
index = get_int(self.f)
sym_type = int(self.f.read(1).encode('hex'), 16)
sect = int(self.f.read(1).encode('hex'), 16)
desc = int(self.f.read(2).encode('hex'), 16)
value = None
if macho.is_64_bit():
if macho.is_little():
value = little(get_ll(self.f), 'Q')
else:
value = get_ll(self.f)
else:
if macho.is_little():
value = little(get_int(self.f), 'I')
else:
value = get_int(self.f)
if macho.is_little():
index = little(index, 'I')
if sym_type >= 32:
if sym_type in dictionary.stabs:
stab = dictionary.stabs[sym_type]
else:
offset = self.f.tell() - symbol_size
data = {
'offset': offset,
'index': index,
'sym_type': sym_type,
'sect': sect,
'desc': desc,
'value': value
}
a = Abnormality(title='UNKNOWN STAB', data=data)
self.add_abnormality(a)
continue
sym = Symbol(index=index, stab=stab, sect=sect,
value=value)
macho.symtab.add_sym(sym)
else:
pext = sym_type & 0x10
if sym_type & 0x0e in dictionary.n_types:
n_type = dictionary.n_types[sym_type & 0x0e]
else:
offset = self.f.tell() - symbol_size
data = {
'offset': offset,
'index': index,
'pext': pext,
'n_type': sym_type & 0x0e,
'sect': sect,
'desc': desc,
'value': value
}
a = Abnormality(title='UNKNOWN N_TYPE', data=data)
self.add_abnormality(a)
ext = sym_type & 0x01
if macho.is_little():
dylib = desc & 0x0f
ref = (desc >> 8) & 0xff
else:
dylib = (desc >> 8) & 0xff
ref = desc & 0x0f
sym = Symbol(index=index, pext=pext, sym_type=n_type,
ext=ext, sect=sect, dylib=dylib, ref=ref,
value=value)
macho.symtab.add_sym(sym)
else:
data = {
'offset': true_offset,
'mach-o_size': macho.size,
'mach-o_offset': macho.offset,
'file_size': self.file.size
}
a = Abnormality(title='SYMBOL TABLE OUT OF BOUNDS', data=data)
self.add_abnormality(a)
self.f.seek(prev)
def parse_imports_and_strings(self, macho):
prev = self.f.tell()
true_offset = macho.offset + macho.strtab.offset
if macho.has_flag('TWOLEVEL'):
for i in macho.symtab.gen_syms():
if i.is_imp():
self.f.seek(true_offset + i.index)
if ((self.f.tell() > (true_offset +
macho.strtab.size)) or
(self.f.tell() > self.file.size)):
data = {
'offset': self.f.tell(),
'strtab_offset': true_offset,
'strtab_size': macho.strtab.size,
'file_size': self.file.size
}
a = Abnormality(title='BAD STRING INDEX', data=data)
self.add_abnormality(a)
continue
func = readstring(self.f)
if i.dylib == 0:
dylib = 'SELF_LIBRARY'
elif i.dylib <= len(macho.dylibs):
dylib = macho.dylibs[i.dylib - 1]
elif i.dylib == 254:
dylib = 'DYNAMIC_LOOKUP'
elif i.dylib == 255:
dylib = 'EXECUTABLE'
else:
data = {
'dylib': i.dylib,
'dylib_len': len(macho.dylibs)
}
a = Abnormality(title='DYLIB OUT OF RANGE', data=data)
self.add_abnormality(a)
dylib = str(i.dylib) + ' (OUT OF RANGE)'
imp = FunctionImport(func=func, dylib=dylib)
macho.add_import(imp)
else:
self.f.seek(true_offset + i.index)
if ((self.f.tell() > (true_offset +
macho.strtab.size)) or
(self.f.tell() > self.file.size)):
data = {
'offset': self.f.tell(),
'strtab_offset': true_offset,
'strtab_size': macho.strtab.size,
'file_size': self.file.size
}
a = Abnormality(title='BAD STRING INDEX', data=data)
self.add_abnormality(a)
continue
string = readstring(self.f)
if string != '':
macho.strtab.add_string(string)
else:
for i in macho.symtab.gen_syms():
if i.is_imp():
self.f.seek(true_offset + i.index)
if self.f.tell() > (true_offset +
macho.strtab.size):
data = {
'offset': self.f.tell(),
'strtab_offset': true_offset,
'strtab_size': macho.strtab.size
}
a = Abnormality(title='BAD STRING INDEX', data=data)
self.add_abnormality(a)
continue
func = readstring(self.f)
imp = FunctionImport(func=func)
macho.add_import(imp)
else:
self.f.seek(true_offset + i.index)
string = readstring(self.f)
if string != '':
macho.strtab.add_string(string)
self.f.seek(prev)
def parse_certs(self, signature, offset):
prev = self.f.tell()
true_offset = signature.offset + offset
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['BLOBWRAPPER']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['BLOBWRAPPER'])
}
a = Abnormality(title='BAD MAGIC - BLOBWRAPPER', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
size = get_int(self.f) - 8
if size > 0:
signed_data = cms.CMS(self.f.read(size), format='DER')
for cert in signed_data.certs:
serial = cert.serial
subject = {
'country': self.get_cert_name_data(cert.subject,
oid.Oid('C')),
'org': self.get_cert_name_data(cert.subject, oid.Oid('O')),
'org_unit': self.get_cert_name_data(cert.subject,
oid.Oid('OU')),
'common_name': self.get_cert_name_data(cert.subject,
oid.Oid('CN'))
}
issuer = {
'country': self.get_cert_name_data(cert.issuer, oid.Oid('C')),
'org': self.get_cert_name_data(cert.issuer, oid.Oid('O')),
'org_unit': self.get_cert_name_data(cert.issuer,
oid.Oid('OU')),
'common_name': self.get_cert_name_data(cert.issuer,
oid.Oid('CN'))
}
ca = cert.check_ca()
cert = Certificate(serial=serial, subject=subject,
issuer=issuer, ca=ca)
signature.add_cert(cert)
else:
data = {
'offset': true_offset,
'size': size
}
a = Abnormality(title='NON-POSITIVE CMS SIZE', data=data)
self.add_abnormality(a)
self.f.seek(prev)
def parse_codedirectory(self, signature, offset):
prev = self.f.tell()
true_offset = signature.offset + offset
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['CODEDIRECTORY']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['CODEDIRECTORY'])
}
a = Abnormality(title='BAD MAGIC - CODEDIRECTORY', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
# Skip size
self.f.read(4)
version = get_int(self.f)
# Not sure how to parse flags yet...
flags = get_int(self.f)
hash_offset = get_int(self.f)
ident_offset = get_int(self.f)
n_special_slots = get_int(self.f)
n_code_slots = get_int(self.f)
code_limit = get_int(self.f)
hash_size = int(self.f.read(1).encode('hex'), 16)
hash_type = dictionary.hashes[int(self.f.read(1).encode('hex'), 16)]
if version >= 0x20200:
platform = int(self.f.read(1).encode('hex'), 16)
else:
# Skip spare1
self.f.read(1)
page_size = int(round(exp(int(self.f.read(1).encode('hex'),
16) * log(2))))
# Skip spare2
self.f.read(4)
if version >= 0x20100:
scatter_offset = get_int(self.f)
if version >= 0x20200:
team_id_offset = get_int(self.f)
self.f.seek(true_offset + team_id_offset)
team_id = readstring(self.f)
self.f.seek(true_offset + ident_offset)
identity = readstring(self.f)
codedirectory = CodeDirectory(version=version, flags=flags,
hash_offset=hash_offset,
n_special_slots=n_special_slots,
n_code_slots=n_code_slots,
code_limit=code_limit,
hash_size=hash_size, hash_type=hash_type,
page_size=page_size, identity=identity)
if version >= 0x20100:
codedirectory.scatter_offset = scatter_offset
if version >= 0x20200:
codedirectory.platform = platform
codedirectory.team_id_offset = team_id_offset
codedirectory.team_id = team_id
self.f.seek(true_offset + hash_offset - n_special_slots * hash_size)
count = n_special_slots + n_code_slots
while count > 0:
hash = self.f.read(hash_size).encode('hex')
codedirectory.add_hash(hash)
count -= 1
signature.codedirectory = codedirectory
self.f.seek(prev)
# Mimicking OID parser implementation from:
# http://opensource.apple.com/source/Security/Security-57337.20.44/OSX/libsecurity_cdsa_utilities/lib/cssmdata.cpp
def get_oid(self, db, p):
q = 0
while True:
q = q * 128 + (db[p] & ~0x80)
if p < len(db) and db[p] & 0x80:
p += 1
else:
p += 1
break
return q, p
def to_oid(self, length):
if length == 0:
return ''
data_bytes = [int(self.f.read(1).encode('hex'),
16) for i in range(length)]
p = 0
# first byte is composite (q1, q2)
oid1, p = self.get_oid(data_bytes, p)
q1 = min(oid1 / 40, 2)
data = str(q1) + '.' + str(oid1 - q1 * 40)
while p < len(data_bytes):
d, p = self.get_oid(data_bytes, p)
data += '.' + str(d)
self.f.read(-length & 3)
return data
def parse_entitlement(self, signature, offset):
prev = self.f.tell()
true_offset = signature.offset + offset
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['ENTITLEMENT']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['ENTITLEMENT'])
}
a = Abnormality(title='BAD MAGIC - ENTITLEMENT', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
size = get_int(self.f) - 8
plist = plistlib.readPlistFromString(self.f.read(size))
entitlement = Entitlement(size=size, plist=plist)
signature.add_entitlement(entitlement)
self.f.seek(prev)
def parse_data(self):
length = get_int(self.f)
data = self.f.read(length)
# Skip padding
self.f.read(-length & 3)
return data
def parse_match(self):
match_type = get_int(self.f)
if match_type in dictionary.matches:
match_type = dictionary.matches[match_type]
if match_type == 'matchExists':
return ' /* exists */'
elif match_type == 'matchEqual':
return ' = "' + str(self.parse_data()) + '"'
elif match_type == 'matchContains':
return ' ~ "' + str(self.parse_data()) + '"'
elif match_type == 'matchBeginsWith':
return ' = "' + str(self.parse_data()) + '*"'
elif match_type == 'matchEndsWith':
return ' = "*' + str(self.parse_data()) + '"'
elif match_type == 'matchLessThan':
return ' < ' + str(int(self.parse_data().encode('hex'), 16))
elif match_type == 'matchGreaterThan':
return ' > ' + str(int(self.parse_data().encode('hex'), 16))
elif match_type == 'matchLessEqual':
return ' <= ' + str(int(self.parse_data().encode('hex'), 16))
elif match_type == 'matchGreaterEqual':
return ' >= ' + str(int(self.parse_data().encode('hex'), 16))
else:
return ' UNKNOWN MATCH TYPE (' + str(match_type) + ')'
def parse_expression(self, in_or):
# Zero out flags in high byte
operator = dictionary.operators[get_int(self.f) & 0xfff]
expression = ''
if operator == 'False':
expression += 'never'
elif operator == 'True':
expression += 'always'
elif operator == 'Ident':
expression += 'identity "' + str(self.parse_data()) + '"'
elif operator == 'AppleAnchor':
expression += 'anchor apple'
elif operator == 'AppleGenericAnchor':
expression += 'anchor apple generic'
elif operator == 'AnchorHash':
cert_slot = get_int(self.f)
if cert_slot in dictionary.cert_slots:
cert_slot = dictionary.cert_slots[cert_slot]
else:
cert_slot = str(cert_slot)
expression += ('certificate ' + cert_slot + ' = ' +
str(self.parse_data().encode('hex')))
elif operator == 'InfoKeyValue':
expression += ('info[' + str(self.parse_data()) + '] = "' +
str(self.parse_data()) + '"')
elif operator == 'And':
if in_or:
expression += ('(' + self.parse_expression(False) + ' and ' +
self.parse_expression(False) + ')')
else:
expression += (self.parse_expression(False) + ' and ' +
self.parse_expression(False))
elif operator == 'Or':
if in_or:
expression += ('(' + self.parse_expression(True) + ' or ' +
self.parse_expression(True) + ')')
else:
expression += (self.parse_expression(True) + ' or ' +
self.parse_expression(True))
elif operator == 'Not':
expression += '! ' + self.parse_expression(False)
elif operator == 'CDHash':
expression += 'cdhash ' + str(self.parse_data().encode('hex'))
elif operator == 'InfoKeyField':
expression += ('info[' + str(self.parse_data()) + ']' +
self.parse_match())
elif operator == 'EntitlementField':
expression += ('entitlement[' + str(self.parse_data()) +
']' + self.parse_match())
elif operator == 'CertField':
cert_slot = get_int(self.f)
if cert_slot in dictionary.cert_slots:
cert_slot = dictionary.cert_slots[cert_slot]
else:
cert_slot = str(cert_slot)
expression += ('certificate ' + cert_slot + '[' +
str(self.parse_data()) + ']' + self.parse_match())
elif operator == 'CertGeneric':
cert_slot = get_int(self.f)
if cert_slot in dictionary.cert_slots:
cert_slot = dictionary.cert_slots[cert_slot]
else:
cert_slot = str(cert_slot)
length = get_int(self.f)
expression += ('certificate ' + cert_slot + '[field.' +
self.to_oid(length) + ']' + self.parse_match())
elif operator == 'CertPolicy':
cert_slot = get_int(self.f)
if cert_slot in dictionary.cert_slots:
cert_slot = dictionary.cert_slots[cert_slot]
else:
cert_slot = str(cert_slot)
expression += ('certificate ' + cert_slot + '[policy.' +
str(self.parse_data()) + ']' + self.parse_match())
elif operator == 'TrustedCert':
cert_slot = get_int(self.f)
if cert_slot in dictionary.cert_slots:
cert_slot = dictionary.cert_slots[cert_slot]
else:
cert_slot = str(cert_slot)
expression += 'certificate ' + cert_slot + ' trusted'
elif operator == 'TrustedCerts':
expression += 'anchor trusted'
elif operator == 'NamedAnchor':
expression += 'anchor apple ' + str(self.parse_data())
elif operator == 'NamedCode':
expression += '(' + str(self.parse_data()) + ')'
elif operator == 'Platform':
expression += 'platform = ' + str(get_int(self.f))
if isinstance(expression, unicode):
return expression
else:
return unicode(expression, errors='replace')
def parse_requirement(self, requirement, offset):
prev = self.f.tell()
true_offset = offset + requirement.offset
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['REQUIREMENT']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['REQUIREMENT'])
}
a = Abnormality(title='BAD MAGIC - REQUIREMENT', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
# Skip size and kind
self.f.read(8)
requirement.expression = self.parse_expression(False)
self.f.seek(prev)
def parse_requirements(self, signature, offset):
prev = self.f.tell()
true_offset = signature.offset + offset
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['REQUIREMENTS']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['REQUIREMENTS'])
}
a = Abnormality(title='BAD MAGIC - REQUIREMENTS', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
# Skip size
self.f.read(4)
count = get_int(self.f)
while count > 0:
req_type = dictionary.requirements[get_int(self.f)]
offset = get_int(self.f)
requirement = Requirement(req_type=req_type, offset=offset)
self.parse_requirement(requirement, true_offset)
signature.add_requirement(requirement)
count -= 1
self.f.seek(prev)
def parse_sig(self, macho):
if not macho.has_lc('CODE_SIGNATURE'):
return
prev = self.f.tell()
true_offset = (macho.offset +
macho.get_lc('CODE_SIGNATURE').data['offset'])
if true_offset >= self.file.size:
data = {
'offset': true_offset,
'file_size': self.file.size
}
a = Abnormality(title='CODE_SIGNATURE OUT OF BOUNDS', data=data)
self.add_abnormality(a)
return
self.f.seek(true_offset)
magic = get_int(self.f)
if magic != dictionary.signatures['EMBEDDED_SIGNATURE']:
data = {
'offset': true_offset,
'magic': hex(magic),
'expected': hex(dictionary.signatures['EMBEDDED_SIGNATURE'])
}
a = Abnormality(title='BAD MAGIC - EMBEDDED_SIGNATURE', data=data)
self.add_abnormality(a)
self.f.seek(prev)
return
size = get_int(self.f)
count = get_int(self.f)
signature = Signature(offset=true_offset, size=size, count=count)
while count > 0:
index_type = get_int(self.f)
try:
index_type = dictionary.indeces[index_type]
except:
data = {
'offset': self.f.tell() - 4,
'index_type': index_type
}
a = Abnormality(title='INVALID CODE_SIGNATURE INDEX_TYPE',
data=data)
self.add_abnormality(a)
offset = get_int(self.f)
if index_type == 'SignatureSlot':
self.parse_certs(signature, offset)
elif index_type == 'CodeDirectorySlot':
self.parse_codedirectory(signature, offset)
elif index_type == 'EntitlementSlot':
self.parse_entitlement(signature, offset)
elif index_type == 'RequirementsSlot':
self.parse_requirements(signature, offset)
count -= 1
macho.signature = signature
self.f.seek(prev)
def parse_macho(self, macho):
self.f.seek(macho.offset)
# skip magic
self.f.read(4)
cputype = get_int(self.f)
subtype = get_int(self.f)
filetype = get_int(self.f)
nlcs = get_int(self.f)
slcs = get_int(self.f)
flags = get_int(self.f)
if macho.is_64_bit():
# skip padding
self.f.read(4)
if macho.is_little():
cputype = little(cputype, 'I')
subtype = little(subtype, 'I')
filetype = little(filetype, 'I')
nlcs = little(nlcs, 'I')
slcs = little(slcs, 'I')
flags = little(flags, 'I')
try:
cpu = dictionary.cputypes[cputype][-2]
except:
cpu = cputype
data = {
'offset': macho.offset + 4,
'cputype': cputype
}
a = Abnormality(title='UNKNOWN CPUTYPE', data=data)
self.add_abnormality(a)
try:
subtype = dictionary.cputypes[cputype][subtype]
except:
data = {
'offset': macho.offset + 8,
'cputype': cputype,
'subtype': subtype
}
a = Abnormality(title='UNKNOWN SUBTYPE', data=data)
self.add_abnormality(a)
try:
filetype = dictionary.filetypes[filetype]
except:
data = {
'offset': macho.offset + 12,
'filetype': filetype
}
a = Abnormality(title='UNKNOWN FILETYPE', data=data)
self.add_abnormality(a)
flags = self.list_macho_flags(flags)
macho.cputype = cpu
macho.subtype = subtype
macho.filetype = filetype
macho.nlcs = nlcs
macho.slcs = slcs
macho.flags = flags
lc = LoadCommander(f=self.f, macho=macho, file_size=self.file.size)
lc.parse_lcs()
self.abnormalities += lc.abnormalities
# Need to investigate whether the presence of a
# symbol/string table is expected and whether the
# abscence is indicative of shenanigans.
if macho.has_lc('SYMTAB'):
self.parse_syms(macho)
self.parse_imports_and_strings(macho)
if macho.has_lc('CODE_SIGNATURE'):
self.parse_sig(macho)
if not macho.is_archive():
self.file.content = macho
def parse_universal(self):
self.f.seek(0)
# skip magic
self.f.read(4)
nmachos = get_int(self.f)
u = Universal(nmachos=nmachos)
u_size = self.file.size
for i in range(u.nmachos):
# skip cputype, subtype
self.f.read(8)
offset = get_int(self.f)
size = get_int(self.f)
# Abnormality OUT_OF_BOUNDS check
if offset + size > u_size:
data = {
'offset': offset,
'size': size,
'file_size': u_size
}
a = Abnormality(title='MACH-O OUT OF BOUNDS', data=data)
self.add_abnormality(a)
continue
# skip align
self.f.read(4)
identity = self.identify_file(offset)
# Abnormality BAD_MAGIC check
if identity not in dictionary.machos.values():
data = {
'offset': offset,
'magic': identity,
}
a = Abnormality(title='BAD MAGIC - MACH-O')
self.add_abnormality(a)
continue
u.add_macho(MachO(archive=True, offset=offset, arch=identity[0],
endi=identity[1], size=size))
for i in u.gen_machos():
self.parse_macho(i)
self.file.content = u
def parse_file(self):
size = self.get_file_size()
hashes = self.get_file_hashes()
self.file.size = size
self.file.hashes = hashes
identity = self.identify_file(0)
if identity == 'universal':
self.parse_universal()
else:
self.parse_macho(MachO(archive=False, offset=0, arch=identity[0],
endi=identity[1], size=self.get_file_size()))
| own2pwn/macholibre | src/macholibre/parser.py | parser.py | py | 31,156 | python | en | code | null | github-code | 36 |
36494224748 | import os
import json
import platform
from os import path
from time import sleep
import winsound
from win10toast import ToastNotifier
toaster = ToastNotifier()
# assets
APP_ICO = path.join("assets","app.ico")
COFFEE_ICO = path.join("assets","coffee.ico")
TAUNT_WAV= path.join("assets","taunt.wav")
JSDATA:dict
def load_json():
with open("appdata.json") as jsfile:
return json.load(jsfile)
def update_json(data:dict):
with open("appdata.json","w") as jsfile:
json.dump(data,jsfile,indent=2)
#notifier
def _notify(msg, icon=COFFEE_ICO, title=None,Soundfile =TAUNT_WAV ):
toaster.show_toast(title=title if title else "Notification",
msg=msg,
icon_path=icon,
threaded = True)
if Soundfile:
winsound.PlaySound(Soundfile,flags=winsound.SND_FILENAME)
def sed_alert():
dt = load_json()
if dt['sedentary_alert']:
interval_secs = dt["interval"] * 6
sleep(interval_secs)
_notify(
msg="Blink Your eyes",)
sed_alert()
| roshansai24081/Sedentary-alert | _app.py | _app.py | py | 1,172 | python | en | code | 0 | github-code | 36 |
14126041012 | from re import split
from itertools import zip_longest
from more_itertools import windowed
from pyperclip import copy as ctrl_C
Lboth = []
for filename in ["input/in22_test.txt", "input/in22_real.txt"]:
with open(filename,"r") as infile:
gridstr,inststr = infile.read().split('\n\n')
gridLR = [list(filter(lambda p: p[1] != ' ', enumerate(row))) for row in gridstr.split('\n')]
gridUD = [list(filter(lambda p: p[1] != ' ', enumerate(row))) for row in zip_longest(*gridstr.split('\n'), fillvalue = ' ')]
insts = [(1 if s[0] == 'R' else -1, int(s[1:])) for s in split('(?=R|L)',inststr)[1:]] #start facing up, not right
L = gridLR, gridUD, insts
Lboth.append(L)
Ltest, Lreal = Lboth
def create_dict(gridLR, gridUD):
move = {} #pos: [pos to R, pos to D, pos to L, pos to U]
for i,row in enumerate(gridLR):
rowdict = {(i,j): [(i,jR) if cR == '.' else (i,j), None, (i,jL) if cL == '.' else (i,j), None] for (jL,cL),(j,c),(jR,cR) in windowed(row + row[:2],3) if c == '.'}
move |= rowdict
for j,col in enumerate(gridUD):
for (iU,cU),(i,c),(iD,cD) in windowed(col + col[:2],3):
if c == '.':
move[(i,j)][1] = (iD,j) if cD == '.' else (i,j)
move[(i,j)][3] = (iU,j) if cU == '.' else (i,j)
return move
def day22_part1(gridLR, gridUD, insts):
move = create_dict(gridLR, gridUD)
facing = 3
pos = (0, gridLR[0][0][0])
for turn, walk in insts:
facing = (facing + turn) % 4
for _ in range(walk):
pos = move[pos][facing]
return 1000 * (pos[0] + 1) + 4 * (pos[1] + 1) + facing
def create_dict_cube(gridLR, gridUD):
#cube shape:
# UR
# F
# LD
# B
#
# R turned 90
# L turned 90
# B turned 90
free = {(i,j): c == '.' for i,row in enumerate(gridLR) for j,c in row}
Mf = 50
move = {p: [None, None, None, None] for p, B in free.items() if B}
for i,j in move.keys():
Rp = (i,j+1)
if Rp in free:
if free[Rp]:
R = (Rp, 0)
else:
R = ((i,j), 0)
else:
R = None
Dp = (i+1,j)
if Dp in free:
if free[Dp]:
D = (Dp, 1)
else:
D = ((i,j), 1)
else:
D = None
Lp = (i,j-1)
if Lp in free:
if free[Lp]:
L = (Lp, 2)
else:
L = ((i,j), 2)
else:
L = None
Up = (i-1,j)
if Up in free:
if free[Up]:
U = (Up, 3)
else:
U = ((i,j), 3)
else:
U = None
move[(i,j)] = [R, D, L, U]
# U left -> left L
for i in range(0,Mf):
p = (i, Mf)
q = (3*Mf - 1 - i, 0)
if free[p] and free[q]:
move[p][2] = (q, 0)
move[q][2] = (p, 0)
elif free[p]:
move[p][2] = (p, 2)
elif free[q]:
move[q][2] = (q, 2)
# U up -> left B
for j in range(Mf,2*Mf):
p = (0, j)
q = (j + 2*Mf, 0)
if free[p] and free[q]:
move[p][3] = (q, 0)
move[q][2] = (p, 1)
elif free[p]:
move[p][3] = (p, 3)
elif free[q]:
move[q][2] = (q, 2)
# R up -> bottom B
for j in range(2*Mf,3*Mf):
p = (0, j)
q = (4*Mf - 1, j - 2*Mf)
if free[p] and free[q]:
move[p][3] = (q, 3)
move[q][1] = (p, 1)
elif free[p]:
move[p][3] = (p, 3)
elif free[q]:
move[q][1] = (q, 1)
# R right -> right D
for i in range(0,Mf):
p = (i, 3*Mf - 1)
q = (3*Mf - 1 - i, 2*Mf - 1)
if free[p] and free[q]:
move[p][0] = (q, 2)
move[q][0] = (p, 2)
elif free[p]:
move[p][0] = (p, 0)
elif free[q]:
move[q][0] = (q, 0)
# R down -> right F
for j in range(2*Mf,3*Mf):
p = (Mf - 1, j)
q = (j - Mf, 2*Mf - 1)
if free[p] and free[q]:
move[p][1] = (q, 2)
move[q][0] = (p, 3)
elif free[p]:
move[p][1] = (p, 1)
elif free[q]:
move[q][0] = (q, 0)
# F left -> top L
for i in range(Mf,2*Mf):
p = (i, Mf)
q = (2*Mf, i - Mf)
if free[p] and free[q]:
move[p][2] = (q, 1)
move[q][3] = (p, 0)
elif free[p]:
move[p][2] = (p, 2)
elif free[q]:
move[q][3] = (q, 3)
# D down -> right B
for j in range(Mf,2*Mf):
p = (3*Mf - 1, j)
q = (j + 2*Mf, Mf - 1)
if free[p] and free[q]:
move[p][1] = (q, 2)
move[q][0] = (p, 3)
elif free[p]:
move[p][1] = (p, 1)
elif free[q]:
move[q][0] = (q, 0)
return move
def day22_part2(gridLR, gridUD, insts):
move = create_dict_cube(gridLR, gridUD)
facing = 3
pos = (0, gridLR[0][0][0])
for turn, walk in insts:
facing = (facing + turn) % 4
for _ in range(walk):
pos, facing = move[pos][facing]
return 1000 * (pos[0] + 1) + 4 * (pos[1] + 1) + facing
result_test_1 = day22_part1(*Ltest)
result_real_1 = day22_part1(*Lreal)
print(result_real_1)
print(result_test_1)
try:
ctrl_C(result_real_1)
except:
print("cannot copy result")
result_real_2 = day22_part2(*Lreal)
if result_real_2 is not None:
print()
print(result_real_2)
try:
ctrl_C(result_real_2)
except:
print("cannot copy result") | arguhuh/AoC | 2022/code22.py | code22.py | py | 4,619 | python | en | code | 0 | github-code | 36 |
10516160537 | def frequences(str):
mot = ''
dict = {}
for i in str:
if i == ' ' and mot != '':
dict[mot] = dict.get(mot,0) +1
mot = ''
else:
mot += i
if mot != '':
dict[mot] = dict.get(mot,0) +1
return dict
def plus_frequents(str):
res = ''
maximum = 0
dict = frequences(str)
for key,value in dict.items():
if value == maximum :
res += key + ' '
elif value > maximum:
maximum = value
res = key + ' '
return res
print(plus_frequents('do do mazen mazen mi fa jjj do jjj a jjj'))
| Mazen2378/.config | aymen/serie3/ex1.py | ex1.py | py | 620 | python | en | code | 0 | github-code | 36 |
335981878 | import os, wx, atexit
class StickyNotes():
def __init__(self):
self.save_exists = False
self.list = [""]
self.list_to_save = []
def run(self):
self.check_file()
for line in self.list:
frame = StickyFrame(None, 'Sticky Note', line)
def check_file(self):
if not os.path.exists("sn_save.txt"):
f = file("sn_save.txt", "w+")
if os.stat("sn_save.txt").st_size != 0:
self.list = open("sn_save.txt").read().split("///%")
def exit_save(self):
to_save = "///%".join(self.list_to_save)
file_ = open('sn_save.txt', 'w')
file_.write(to_save)
file_.close()
class StickyFrame(wx.Frame):
def __init__(self, parent, title, words=""):
wx.Frame.__init__(self, parent, title=title, size=(400,300))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE, value=words)
self.Show(True)
filemenu= wx.Menu()
menuAbout = filemenu.Append(wx.ID_ABOUT,"&New Note","Opens a New Note")
menuExit = filemenu.Append(wx.ID_EXIT,"Save and E&xit"," Terminate the program")
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Show(True)
def OnExit(self,e):
s.list_to_save.append(self.control.Value)
self.Close(True) # Close the frame.
def OnAbout(self,e):
frame = StickyFrame(None, 'Sticky Note')
app = wx.App(False)
s = StickyNotes()
s.run()
app.MainLoop()
atexit.register(s.exit_save)
| Ghrehh/stickynotes | stickynotes.py | stickynotes.py | py | 1,750 | python | en | code | 0 | github-code | 36 |
5571273421 | from django.contrib import admin
from django.contrib.admin.options import TabularInline
from apps.info_section_app.models import SimilarLike, SimilarDislike, SimilarTitle, \
Favorite, RelatedTitle
class SimilarLikeAdminInLine(TabularInline):
extra = 1
model = SimilarLike
class SimilarDislikeAdminInline(TabularInline):
extra = 1
model = SimilarDislike
@admin.register(SimilarTitle)
class RestaurantModelAdmin(admin.ModelAdmin):
inlines = (SimilarDislikeAdminInline, SimilarLikeAdminInLine)
admin.site.register(Favorite)
admin.site.register(RelatedTitle)
| urmatovnaa/Manga-universe | apps/info_section_app/admin.py | admin.py | py | 592 | python | en | code | 0 | github-code | 36 |
34696107762 | #!/usr/bin/env python3
# coding : utf-8
# @author : Francis.zz
# @date : 2023-07-28 15:30
# @desc : 使用网页的session key访问chat gpt
from revChatGPT.V1 import Chatbot
import json
"""
使用 `pip install --upgrade revChatGPT` 安装依赖包
使用文档说明:https://github.com/CoolPlayLin/ChatGPT-Wiki/blob/master/docs/ChatGPT/V1.md
1. 可以使用用户名密码、session_token或者access_token 3种方式访问,但是不能同时存在
config参数:
{
"email" - "OpenAI 账户邮箱",
"password" - "OpenAI 账户密码",
"session_token" - "<session_token>"
"access_token" - "<access_token>"
"proxy" - "<proxy_url_string>",
"paid" - True/False #是不是plus帐户
}
2. 用户名密码方式不支持谷歌和微软账号注册的
3. https://chat.openai.com/api/auth/session 获取access_token。
在chat.openai.com的 cookie 中找到__Secure-next-auth.session-token。access_token和session-token使用一个就行了
"""
chatbot = Chatbot(config=json.load(open("D:\\qiyu-work\\chatgpt_auth.json")))
def start_chat():
print('Welcome to ChatGPT CLI')
while True:
prompt = input('> ')
response = ""
for data in chatbot.ask(
prompt
):
response = data["message"]
print(response)
if __name__ == "__main__":
start_chat()
| zzfengxia/python3-learn | gpt/chatgpt_conversion.py | chatgpt_conversion.py | py | 1,355 | python | zh | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.