body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@classmethod
def trade_message_from_db(cls, record: RowProxy, metadata: Optional[Dict]=None):
'\n *used for backtesting\n Convert a row of trade data into standard OrderBookMessage format\n :param record: a row of trade data from the database\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.TRADE, content=record.json, timestamp=record.timestamp)
| -5,456,147,756,478,211,000
|
*used for backtesting
Convert a row of trade data into standard OrderBookMessage format
:param record: a row of trade data from the database
:return: BinarzOrderBookMessage
|
hummingbot/connector/exchange/binarz/binarz_order_book.py
|
trade_message_from_db
|
amirhosein-fasihi/hummingbot
|
python
|
@classmethod
def trade_message_from_db(cls, record: RowProxy, metadata: Optional[Dict]=None):
'\n *used for backtesting\n Convert a row of trade data into standard OrderBookMessage format\n :param record: a row of trade data from the database\n :return: BinarzOrderBookMessage\n '
return BinarzOrderBookMessage(message_type=OrderBookMessageType.TRADE, content=record.json, timestamp=record.timestamp)
|
def index():
'\n Module Home Page\n - provide the list of currently-Active Problems\n '
redirect(URL(f='problem'))
module_name = settings.modules[module].name_nice
table = s3db.delphi_group
groups = db((table.active == True)).select()
result = []
for group in groups:
actions = []
duser = s3db.delphi_DelphiUser(group)
if duser.authorised:
actions.append((('group/%d/update' % group.id), T('Edit')))
actions.append((('new_problem/create/?group=%s&next=%s' % (group.id, URL(f='group_summary', args=group.id))), 'Add New Problem'))
actions.append((('group_summary/%s/#request' % group.id), T('Review Requests')))
else:
actions.append((('group_summary/%s/#request' % group.id), ('Role: %s%s' % (duser.status, (((duser.membership and duser.membership.req) and '*') or '')))))
table = s3db.delphi_problem
query = ((table.group_id == group.id) & (table.active == True))
latest_problems = db(query).select(orderby=(~ table.modified_on))
result.append((group, latest_problems, actions))
response.title = module_name
return dict(groups_problems=result, name=T('Active Problems'), module_name=module_name)
| 1,710,652,710,227,107,600
|
Module Home Page
- provide the list of currently-Active Problems
|
controllers/delphi.py
|
index
|
Code4SierraLeone/shdms
|
python
|
def index():
'\n Module Home Page\n - provide the list of currently-Active Problems\n '
redirect(URL(f='problem'))
module_name = settings.modules[module].name_nice
table = s3db.delphi_group
groups = db((table.active == True)).select()
result = []
for group in groups:
actions = []
duser = s3db.delphi_DelphiUser(group)
if duser.authorised:
actions.append((('group/%d/update' % group.id), T('Edit')))
actions.append((('new_problem/create/?group=%s&next=%s' % (group.id, URL(f='group_summary', args=group.id))), 'Add New Problem'))
actions.append((('group_summary/%s/#request' % group.id), T('Review Requests')))
else:
actions.append((('group_summary/%s/#request' % group.id), ('Role: %s%s' % (duser.status, (((duser.membership and duser.membership.req) and '*') or )))))
table = s3db.delphi_problem
query = ((table.group_id == group.id) & (table.active == True))
latest_problems = db(query).select(orderby=(~ table.modified_on))
result.append((group, latest_problems, actions))
response.title = module_name
return dict(groups_problems=result, name=T('Active Problems'), module_name=module_name)
|
def group_rheader(r, tabs=[]):
' Group rheader '
if (r.representation == 'html'):
if (r.record is None):
return None
tabs = [(T('Basic Details'), None), (T('Problems'), 'problem')]
group = r.record
duser = s3db.delphi_DelphiUser(group.id)
if duser.authorised:
tabs.append((T('Membership'), 'membership'))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH(('%s: ' % T('Group'))), group.name), TR(TH(('%s: ' % T('Description'))), group.description), TR(TH(('%s: ' % T('Active'))), group.active)), rheader_tabs)
return rheader
| 2,477,566,475,082,201,000
|
Group rheader
|
controllers/delphi.py
|
group_rheader
|
Code4SierraLeone/shdms
|
python
|
def group_rheader(r, tabs=[]):
' '
if (r.representation == 'html'):
if (r.record is None):
return None
tabs = [(T('Basic Details'), None), (T('Problems'), 'problem')]
group = r.record
duser = s3db.delphi_DelphiUser(group.id)
if duser.authorised:
tabs.append((T('Membership'), 'membership'))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH(('%s: ' % T('Group'))), group.name), TR(TH(('%s: ' % T('Description'))), group.description), TR(TH(('%s: ' % T('Active'))), group.active)), rheader_tabs)
return rheader
|
def group():
' Problem Group REST Controller '
if (not s3_has_role('DelphiAdmin')):
s3db.configure('delphi_group', deletable=False)
def prep(r):
if r.interactive:
if r.component:
tablename = r.component.tablename
list_fields = s3db.get_config(tablename, 'list_fields')
try:
list_fields.remove('group_id')
except:
pass
s3db.configure(tablename, deletable=s3_has_role('DelphiAdmin'), list_fields=list_fields)
return True
s3.prep = prep
rheader = group_rheader
return s3_rest_controller(rheader=rheader, native=True)
| -3,236,655,830,655,958,000
|
Problem Group REST Controller
|
controllers/delphi.py
|
group
|
Code4SierraLeone/shdms
|
python
|
def group():
' '
if (not s3_has_role('DelphiAdmin')):
s3db.configure('delphi_group', deletable=False)
def prep(r):
if r.interactive:
if r.component:
tablename = r.component.tablename
list_fields = s3db.get_config(tablename, 'list_fields')
try:
list_fields.remove('group_id')
except:
pass
s3db.configure(tablename, deletable=s3_has_role('DelphiAdmin'), list_fields=list_fields)
return True
s3.prep = prep
rheader = group_rheader
return s3_rest_controller(rheader=rheader, native=True)
|
def problem_rheader(r, tabs=[]):
' Problem rheader '
if (r.representation == 'html'):
if (r.record is None):
return None
problem = r.record
tabs = [(T('Problems'), 'problems'), (T('Solutions'), 'solution'), (T('Discuss'), 'discuss'), (T('Vote'), 'vote'), (T('Scale of Results'), 'results')]
duser = s3db.delphi_DelphiUser(problem.group_id)
if duser.authorised:
tabs.append((T('Edit'), None))
rheader_tabs = s3_rheader_tabs(r, tabs)
rtable = TABLE(TR(TH(('%s: ' % T('Problem'))), problem.name, TH(('%s: ' % T('Active'))), problem.active), TR(TH(('%s: ' % T('Description'))), problem.description), TR(TH(('%s: ' % T('Criteria'))), problem.criteria))
if (r.component and (r.component_name == 'solution') and r.component_id):
stable = s3db.delphi_solution
query = (stable.id == r.component_id)
solution = db(query).select(stable.name, stable.description, limitby=(0, 1)).first()
rtable.append(DIV(TR(TH(('%s: ' % T('Solution'))), solution.name), TR(TH(('%s: ' % T('Description'))), solution.description)))
rheader = DIV(rtable, rheader_tabs)
return rheader
| 6,364,680,731,863,635,000
|
Problem rheader
|
controllers/delphi.py
|
problem_rheader
|
Code4SierraLeone/shdms
|
python
|
def problem_rheader(r, tabs=[]):
' '
if (r.representation == 'html'):
if (r.record is None):
return None
problem = r.record
tabs = [(T('Problems'), 'problems'), (T('Solutions'), 'solution'), (T('Discuss'), 'discuss'), (T('Vote'), 'vote'), (T('Scale of Results'), 'results')]
duser = s3db.delphi_DelphiUser(problem.group_id)
if duser.authorised:
tabs.append((T('Edit'), None))
rheader_tabs = s3_rheader_tabs(r, tabs)
rtable = TABLE(TR(TH(('%s: ' % T('Problem'))), problem.name, TH(('%s: ' % T('Active'))), problem.active), TR(TH(('%s: ' % T('Description'))), problem.description), TR(TH(('%s: ' % T('Criteria'))), problem.criteria))
if (r.component and (r.component_name == 'solution') and r.component_id):
stable = s3db.delphi_solution
query = (stable.id == r.component_id)
solution = db(query).select(stable.name, stable.description, limitby=(0, 1)).first()
rtable.append(DIV(TR(TH(('%s: ' % T('Solution'))), solution.name), TR(TH(('%s: ' % T('Description'))), solution.description)))
rheader = DIV(rtable, rheader_tabs)
return rheader
|
def problem():
' Problem REST Controller '
tablename = ('%s_%s' % (module, resourcename))
table = s3db[tablename]
set_method = s3db.set_method
set_method(module, resourcename, method='problems', action=problems)
set_method(module, resourcename, method='discuss', action=discuss)
set_method(module, resourcename, component_name='solution', method='discuss', action=discuss)
set_method(module, resourcename, method='vote', action=vote)
set_method(module, resourcename, method='results', action=results)
s3.filter = (table.active == True)
if (not s3_has_role('DelphiAdmin')):
s3db.configure(tablename, deletable=False)
def prep(r):
if r.interactive:
if r.record:
duser = s3db.delphi_DelphiUser(r.record.group_id)
if duser.authorised:
s3db.configure(tablename, deletable=True)
if (r.component_name == 'solution'):
r.component.table.modified_on.label = T('Last Updated')
s3db.configure(r.component.tablename, deletable=duser.authorised)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if (not r.component):
s3.actions = [dict(label=str(T('Solutions')), _class='action-btn', url=URL(args=['[id]', 'solution'])), dict(label=str(T('Vote')), _class='action-btn', url=URL(args=['[id]', 'vote']))]
elif (r.component_name == 'solution'):
s3.actions = [dict(label=str(T('Discuss')), _class='action-btn', url=URL(args=[r.id, 'solution', '[id]', 'discuss']))]
return output
s3.postp = postp
rheader = problem_rheader
return s3_rest_controller(rheader=rheader)
| -7,876,399,767,762,272,000
|
Problem REST Controller
|
controllers/delphi.py
|
problem
|
Code4SierraLeone/shdms
|
python
|
def problem():
' '
tablename = ('%s_%s' % (module, resourcename))
table = s3db[tablename]
set_method = s3db.set_method
set_method(module, resourcename, method='problems', action=problems)
set_method(module, resourcename, method='discuss', action=discuss)
set_method(module, resourcename, component_name='solution', method='discuss', action=discuss)
set_method(module, resourcename, method='vote', action=vote)
set_method(module, resourcename, method='results', action=results)
s3.filter = (table.active == True)
if (not s3_has_role('DelphiAdmin')):
s3db.configure(tablename, deletable=False)
def prep(r):
if r.interactive:
if r.record:
duser = s3db.delphi_DelphiUser(r.record.group_id)
if duser.authorised:
s3db.configure(tablename, deletable=True)
if (r.component_name == 'solution'):
r.component.table.modified_on.label = T('Last Updated')
s3db.configure(r.component.tablename, deletable=duser.authorised)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if (not r.component):
s3.actions = [dict(label=str(T('Solutions')), _class='action-btn', url=URL(args=['[id]', 'solution'])), dict(label=str(T('Vote')), _class='action-btn', url=URL(args=['[id]', 'vote']))]
elif (r.component_name == 'solution'):
s3.actions = [dict(label=str(T('Discuss')), _class='action-btn', url=URL(args=[r.id, 'solution', '[id]', 'discuss']))]
return output
s3.postp = postp
rheader = problem_rheader
return s3_rest_controller(rheader=rheader)
|
def problems(r, **attr):
'\n Redirect to the list of Problems for the Group\n - used for a Tab\n '
try:
group_id = r.record.group_id
except:
raise HTTP(400)
else:
redirect(URL(f='group', args=[group_id, 'problem']))
| -3,807,189,094,633,276,400
|
Redirect to the list of Problems for the Group
- used for a Tab
|
controllers/delphi.py
|
problems
|
Code4SierraLeone/shdms
|
python
|
def problems(r, **attr):
'\n Redirect to the list of Problems for the Group\n - used for a Tab\n '
try:
group_id = r.record.group_id
except:
raise HTTP(400)
else:
redirect(URL(f='group', args=[group_id, 'problem']))
|
def solution():
'\n Used for Imports\n '
return s3_rest_controller()
| -6,854,097,583,476,038,000
|
Used for Imports
|
controllers/delphi.py
|
solution
|
Code4SierraLeone/shdms
|
python
|
def solution():
'\n \n '
return s3_rest_controller()
|
def vote(r, **attr):
'\n Custom Method to allow Voting on Solutions to a Problem\n '
problem = r.record
duser = s3db.delphi_DelphiUser(problem.group_id)
rheader = problem_rheader(r)
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
rows = db(query).select(stable.id, stable.name)
options = Storage()
for row in rows:
options[row.id] = row.name
if duser.user_id:
vtable = s3db.delphi_vote
query = ((vtable.problem_id == problem.id) & (vtable.created_by == auth.user.id))
votes = db(query).select(vtable.solution_id, orderby=vtable.rank)
else:
votes = []
rankings = OrderedDict()
for v in votes:
rankings[v.solution_id] = options[v.solution_id]
options.pop(v.solution_id)
s3.stylesheets.append('S3/delphi.css')
js = ''.join(('var problem_id=', str(problem.id), '\ni18n.delphi_failed="', str(T('Failed!')), '"\ni18n.delphi_saving="', str(T('Saving...')), '"\ni18n.delphi_saved="', str(T('Saved.')), '"\ni18n.delphi_vote="', str(T('Save Vote')), '"'))
s3.js_global.append(js)
s3.scripts.append(URL(c='static', f='scripts', args=['S3', 's3.delphi.js']))
response.view = 'delphi/vote.html'
return dict(rheader=rheader, duser=duser, votes=votes, options=options, rankings=rankings)
| 2,324,606,337,618,013,000
|
Custom Method to allow Voting on Solutions to a Problem
|
controllers/delphi.py
|
vote
|
Code4SierraLeone/shdms
|
python
|
def vote(r, **attr):
'\n \n '
problem = r.record
duser = s3db.delphi_DelphiUser(problem.group_id)
rheader = problem_rheader(r)
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
rows = db(query).select(stable.id, stable.name)
options = Storage()
for row in rows:
options[row.id] = row.name
if duser.user_id:
vtable = s3db.delphi_vote
query = ((vtable.problem_id == problem.id) & (vtable.created_by == auth.user.id))
votes = db(query).select(vtable.solution_id, orderby=vtable.rank)
else:
votes = []
rankings = OrderedDict()
for v in votes:
rankings[v.solution_id] = options[v.solution_id]
options.pop(v.solution_id)
s3.stylesheets.append('S3/delphi.css')
js = .join(('var problem_id=', str(problem.id), '\ni18n.delphi_failed="', str(T('Failed!')), '"\ni18n.delphi_saving="', str(T('Saving...')), '"\ni18n.delphi_saved="', str(T('Saved.')), '"\ni18n.delphi_vote="', str(T('Save Vote')), '"'))
s3.js_global.append(js)
s3.scripts.append(URL(c='static', f='scripts', args=['S3', 's3.delphi.js']))
response.view = 'delphi/vote.html'
return dict(rheader=rheader, duser=duser, votes=votes, options=options, rankings=rankings)
|
def save_vote():
'\n Function accessed by AJAX from vote() to save the results of a Vote\n '
try:
problem_id = request.args[0]
except:
raise HTTP(400)
ptable = s3db.delphi_problem
query = (ptable.id == problem_id)
problem = db(query).select(ptable.group_id, limitby=(0, 1)).first()
if (not problem):
raise HTTP(404)
duser = s3db.delphi_DelphiUser(problem.group_id)
if (not duser.can_vote):
auth.permission.fail()
try:
rankings = request.post_vars.keys()[0].split(',')
except IndexError:
status = current.xml.json_message(False, 400, 'No Options Ranked')
raise HTTP(400, body=status)
stable = s3db.delphi_solution
query = (stable.problem_id == problem_id)
solutions = db(query).select(stable.id)
options = []
for row in solutions:
options.append(row.id)
for ranked in rankings:
if (int(ranked) not in options):
status = current.xml.json_message(False, 400, "Option isn't valid!")
raise HTTP(400, body=status)
votes = []
count = 1
for ranked in rankings:
votes.append(Storage(solution_id=int(ranked), rank=count))
count += 1
vtable = s3db.delphi_vote
query = ((vtable.problem_id == problem_id) & (vtable.created_by == auth.user.id))
old_votes = db(query).select(vtable.solution_id, vtable.rank)
if old_votes:
ranks = {}
old_ranks = {}
used = []
for solution in solutions:
s1 = solution.id
ranks[s1] = 0
old_ranks[s1] = 0
for vote in votes:
if (vote.solution_id == s1):
ranks[s1] = vote.rank
continue
for vote in old_votes:
if (vote.solution_id == s1):
old_ranks[s1] = vote.rank
continue
for sol_2 in solutions:
changed = False
s2 = sol_2.id
if (s2 == s1):
continue
if ((s2, s1) in used):
continue
ranks[s2] = 0
old_ranks[s2] = 0
for vote in votes:
if (vote.solution_id == s2):
ranks[s2] = vote.rank
continue
for vote in old_votes:
if (vote.solution_id == s2):
old_ranks[s2] = vote.rank
continue
if ((ranks[s1] > ranks[s2]) and (old_ranks[s1] < old_ranks[s2])):
changed = True
elif ((ranks[s1] < ranks[s2]) and (old_ranks[s1] > old_ranks[s2])):
changed = True
elif ((ranks[s1] == ranks[s2]) and (old_ranks[s1] != old_ranks[s2])):
changed = True
elif ((ranks[s1] != ranks[s2]) and (old_ranks[s1] == old_ranks[s2])):
changed = True
if changed:
db(stable.id.belongs((s1, s2))).update(changes=(stable.changes + 1))
used.append((s1, s2))
db(query).delete()
count = 1
for ranked in rankings:
vtable.insert(problem_id=problem_id, solution_id=ranked, rank=count)
count += 1
status = current.xml.json_message(True, 200, 'Vote saved')
return status
| -8,974,777,343,455,827,000
|
Function accessed by AJAX from vote() to save the results of a Vote
|
controllers/delphi.py
|
save_vote
|
Code4SierraLeone/shdms
|
python
|
def save_vote():
'\n \n '
try:
problem_id = request.args[0]
except:
raise HTTP(400)
ptable = s3db.delphi_problem
query = (ptable.id == problem_id)
problem = db(query).select(ptable.group_id, limitby=(0, 1)).first()
if (not problem):
raise HTTP(404)
duser = s3db.delphi_DelphiUser(problem.group_id)
if (not duser.can_vote):
auth.permission.fail()
try:
rankings = request.post_vars.keys()[0].split(',')
except IndexError:
status = current.xml.json_message(False, 400, 'No Options Ranked')
raise HTTP(400, body=status)
stable = s3db.delphi_solution
query = (stable.problem_id == problem_id)
solutions = db(query).select(stable.id)
options = []
for row in solutions:
options.append(row.id)
for ranked in rankings:
if (int(ranked) not in options):
status = current.xml.json_message(False, 400, "Option isn't valid!")
raise HTTP(400, body=status)
votes = []
count = 1
for ranked in rankings:
votes.append(Storage(solution_id=int(ranked), rank=count))
count += 1
vtable = s3db.delphi_vote
query = ((vtable.problem_id == problem_id) & (vtable.created_by == auth.user.id))
old_votes = db(query).select(vtable.solution_id, vtable.rank)
if old_votes:
ranks = {}
old_ranks = {}
used = []
for solution in solutions:
s1 = solution.id
ranks[s1] = 0
old_ranks[s1] = 0
for vote in votes:
if (vote.solution_id == s1):
ranks[s1] = vote.rank
continue
for vote in old_votes:
if (vote.solution_id == s1):
old_ranks[s1] = vote.rank
continue
for sol_2 in solutions:
changed = False
s2 = sol_2.id
if (s2 == s1):
continue
if ((s2, s1) in used):
continue
ranks[s2] = 0
old_ranks[s2] = 0
for vote in votes:
if (vote.solution_id == s2):
ranks[s2] = vote.rank
continue
for vote in old_votes:
if (vote.solution_id == s2):
old_ranks[s2] = vote.rank
continue
if ((ranks[s1] > ranks[s2]) and (old_ranks[s1] < old_ranks[s2])):
changed = True
elif ((ranks[s1] < ranks[s2]) and (old_ranks[s1] > old_ranks[s2])):
changed = True
elif ((ranks[s1] == ranks[s2]) and (old_ranks[s1] != old_ranks[s2])):
changed = True
elif ((ranks[s1] != ranks[s2]) and (old_ranks[s1] == old_ranks[s2])):
changed = True
if changed:
db(stable.id.belongs((s1, s2))).update(changes=(stable.changes + 1))
used.append((s1, s2))
db(query).delete()
count = 1
for ranked in rankings:
vtable.insert(problem_id=problem_id, solution_id=ranked, rank=count)
count += 1
status = current.xml.json_message(True, 200, 'Vote saved')
return status
|
def _getUnitNormalDeviation(zscore):
'\n Utility function used by Scale of Results\n\n Looks up the Unit Normal Deviation based on the Z-Score (Proportion/Probability)\n http://en.wikipedia.org/wiki/Standard_normal_table\n\n @ToDo: Move to S3Statistics module\n '
UNIT_NORMAL = ((0.0, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09), (0.0, 0.5, 0.504, 0.508, 0.512, 0.516, 0.5199, 0.5239, 0.5279, 0.5319, 0.5359), (0.1, 0.5398, 0.5438, 0.5478, 0.5517, 0.5557, 0.5596, 0.5636, 0.5675, 0.5714, 0.5753), (0.2, 0.5793, 0.5832, 0.5871, 0.591, 0.5948, 0.5987, 0.6026, 0.6064, 0.6103, 0.6141), (0.3, 0.6179, 0.6217, 0.6255, 0.6293, 0.6331, 0.6368, 0.6406, 0.6443, 0.648, 0.6517), (0.4, 0.6554, 0.6591, 0.6628, 0.6664, 0.67, 0.6736, 0.6772, 0.6808, 0.6844, 0.6879), (0.5, 0.6915, 0.695, 0.6985, 0.7019, 0.7054, 0.7088, 0.7123, 0.7157, 0.719, 0.7224), (0.6, 0.7257, 0.7291, 0.7324, 0.7357, 0.7389, 0.7422, 0.7454, 0.7486, 0.7517, 0.7549), (0.7, 0.758, 0.7611, 0.7642, 0.7673, 0.7703, 0.7734, 0.7764, 0.7794, 0.7823, 0.7852), (0.8, 0.7881, 0.791, 0.7939, 0.7967, 0.7995, 0.8023, 0.8051, 0.8078, 0.8106, 0.8133), (0.9, 0.8159, 0.8186, 0.8212, 0.8238, 0.8264, 0.8289, 0.8315, 0.834, 0.8365, 0.8389), (1.0, 0.8415, 0.8438, 0.8461, 0.8485, 0.8508, 0.8531, 0.8554, 0.8577, 0.8509, 0.8621), (1.1, 0.8643, 0.8665, 0.8686, 0.8708, 0.8729, 0.8749, 0.877, 0.879, 0.881, 0.883), (1.2, 0.8849, 0.8869, 0.8888, 0.8907, 0.8925, 0.8944, 0.8962, 0.898, 0.8997, 0.90147), (1.3, 0.9032, 0.9049, 0.90658, 0.90824, 0.90988, 0.91149, 0.91309, 0.91466, 0.91621, 0.91774), (1.4, 0.91924, 0.92073, 0.9222, 0.92364, 0.92507, 0.92647, 0.92785, 0.92922, 0.93056, 0.93189), (1.5, 0.93319, 0.93448, 0.93574, 0.93699, 0.93822, 0.93943, 0.94062, 0.94179, 0.94295, 0.94408), (1.6, 0.9452, 0.9463, 0.94738, 0.94845, 0.9495, 0.95053, 0.95154, 0.95254, 0.95352, 0.95449), (1.7, 0.95543, 0.95637, 0.95728, 0.95818, 0.95907, 0.95994, 0.9608, 0.96164, 0.96246, 0.96327), (1.8, 0.96407, 0.96485, 0.96562, 0.96638, 0.96712, 0.96784, 0.97856, 0.96926, 0.96995, 0.97062), (1.9, 0.97128, 0.97193, 0.97257, 0.9732, 0.97381, 0.97441, 0.975, 0.97558, 0.97615, 0.9767), (2.0, 0.97725, 0.97778, 0.97831, 0.97882, 0.97932, 0.97982, 0.9803, 0.98077, 0.98124, 0.98169), (2.1, 0.98214, 0.98257, 0.983, 0.98341, 0.98382, 0.98422, 0.98461, 0.985, 0.98537, 0.98574), (2.2, 0.9861, 0.98645, 0.98679, 0.98713, 0.98745, 0.98778, 0.98809, 0.9884, 0.9887, 0.98899), (2.3, 0.98928, 0.98956, 0.98983, 0.990097, 0.990358, 0.990613, 0.990863, 0.991106, 0.991344, 0.991576), (2.4, 0.991802, 0.992024, 0.99224, 0.992451, 0.992656, 0.992857, 0.993053, 0.993244, 0.993431, 0.993613), (2.5, 0.99379, 0.993963, 0.994132, 0.994297, 0.994457, 0.994614, 0.994766, 0.994915, 0.99506, 0.995201), (2.6, 0.995339, 0.995473, 0.995604, 0.995731, 0.995855, 0.995975, 0.996093, 0.996207, 0.996319, 0.996427), (2.7, 0.996533, 0.996636, 0.996736, 0.996833, 0.996928, 0.99702, 0.99711, 0.997197, 0.997282, 0.997365), (2.8, 0.997445, 0.997523, 0.997599, 0.997673, 0.997744, 0.997814, 0.997882, 0.997948, 0.998012, 0.998074), (2.9, 0.998134, 0.998193, 0.99825, 0.998305, 0.998359, 0.998411, 0.99846, 0.998511, 0.998559, 0.998605), (3.0, 0.99865, 0.998694, 0.998736, 0.998777, 0.998817, 0.998856, 0.998893, 0.99893, 0.998965, 0.998999), (3.1, 0.9990324, 0.9990646, 0.9990957, 0.999126, 0.9991553, 0.9991836, 0.9992112, 0.9992378, 0.9992636, 0.9992886), (3.2, 0.9993129, 0.9993363, 0.999359, 0.999381, 0.9994024, 0.999423, 0.9994429, 0.9994623, 0.999481, 0.9994991), (3.3, 0.9995166, 0.9995335, 0.9995499, 0.9995658, 0.9995811, 0.9995959, 0.9996103, 0.9996242, 0.9996376, 0.9996505), (3.4, 0.9996631, 0.9996752, 0.9996869, 0.9996982, 0.9997091, 0.9997197, 0.9997299, 0.9997398, 0.9997493, 0.9997585), (3.5, 0.9997674, 0.9997759, 0.9997842, 0.9997922, 0.9997999, 0.9998074, 0.9998146, 0.9998215, 0.9998282, 0.9998347), (3.6, 0.9998409, 0.9998469, 0.9998527, 0.9998583, 0.9998637, 0.9998689, 0.9998739, 0.9998787, 0.9998834, 0.9998879), (3.7, 0.9998922, 0.9998964, 0.99990039, 0.99990426, 0.99990799, 0.99991158, 0.99991504, 0.99991838, 0.99992159, 0.99992468), (3.8, 0.99992765, 0.99993052, 0.99993327, 0.99993593, 0.99993848, 0.99994094, 0.99994331, 0.99994558, 0.99994777, 0.99994988), (3.9, 0.9999519, 0.99995385, 0.99995573, 0.99995753, 0.99995926, 0.99996092, 0.99996253, 0.99996406, 0.99996554, 0.99996696), (4.0, 0.99996833, 0.99996964, 0.9999709, 0.99997211, 0.99997327, 0.99997439, 0.99997546, 0.99997649, 0.99997748, 0.99997843), (4.1, 0.99997934, 0.99998022, 0.99998106, 0.99998186, 0.99998263, 0.99998338, 0.99998409, 0.99998477, 0.99998542, 0.99998605), (4.2, 0.99998665, 0.99998723, 0.99998778, 0.99998832, 0.99998882, 0.99998931, 0.99998978, 0.999990226, 0.999990655, 0.999991066), (4.3, 0.99999146, 0.999991837, 0.999992199, 0.999992545, 0.999992876, 0.999993193, 0.999993497, 0.999993788, 0.999994066, 0.999994332), (4.4, 0.999994587, 0.999994831, 0.999995065, 0.999995288, 0.999995502, 0.999995706, 0.999995902, 0.999996089, 0.999996268, 0.999996439), (4.5, 0.999996602, 0.999996759, 0.999996908, 0.999997051, 0.999997187, 0.999997318, 0.999997442, 0.999997561, 0.999997675, 0.999997784), (4.6, 0.999997888, 0.999997987, 0.999998081, 0.999998172, 0.999998258, 0.99999834, 0.999998419, 0.999998494, 0.999998566, 0.999998634), (4.7, 0.999998699, 0.999998761, 0.999998821, 0.999998877, 0.999998931, 0.999998983, 0.999999032, 0.9999990789, 0.9999991235, 0.9999991661), (4.8, 0.9999992067, 0.9999992453, 0.9999992822, 0.9999993173, 0.9999993508, 0.9999993827, 0.9999994131, 0.999999442, 0.9999994696, 0.9999994958), (4.9, 0.9999995208, 0.9999995446, 0.9999995673, 0.9999995889, 0.9999996094, 0.9999996289, 0.9999996475, 0.9999996652, 0.9999996821, 0.9999996981))
unitDeviation = 0.0
for j in range(1, 50):
if (zscore == UNIT_NORMAL[j][1]):
unitDeviation = UNIT_NORMAL[j][0]
elif ((UNIT_NORMAL[j][1] < zscore) and (zscore < UNIT_NORMAL[(j + 1)][1])):
for i in range(2, 10):
if ((UNIT_NORMAL[j][(i - 1)] < zscore) and (zscore <= UNIT_NORMAL[j][i])):
unitDeviation = (UNIT_NORMAL[j][0] + UNIT_NORMAL[0][i])
if (zscore > UNIT_NORMAL[j][10]):
unitDeviation = UNIT_NORMAL[(j + 1)][0]
if (zscore > UNIT_NORMAL[50][10]):
unitDeviation = 5.0
return unitDeviation
| -5,715,348,252,954,635,000
|
Utility function used by Scale of Results
Looks up the Unit Normal Deviation based on the Z-Score (Proportion/Probability)
http://en.wikipedia.org/wiki/Standard_normal_table
@ToDo: Move to S3Statistics module
|
controllers/delphi.py
|
_getUnitNormalDeviation
|
Code4SierraLeone/shdms
|
python
|
def _getUnitNormalDeviation(zscore):
'\n Utility function used by Scale of Results\n\n Looks up the Unit Normal Deviation based on the Z-Score (Proportion/Probability)\n http://en.wikipedia.org/wiki/Standard_normal_table\n\n @ToDo: Move to S3Statistics module\n '
UNIT_NORMAL = ((0.0, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09), (0.0, 0.5, 0.504, 0.508, 0.512, 0.516, 0.5199, 0.5239, 0.5279, 0.5319, 0.5359), (0.1, 0.5398, 0.5438, 0.5478, 0.5517, 0.5557, 0.5596, 0.5636, 0.5675, 0.5714, 0.5753), (0.2, 0.5793, 0.5832, 0.5871, 0.591, 0.5948, 0.5987, 0.6026, 0.6064, 0.6103, 0.6141), (0.3, 0.6179, 0.6217, 0.6255, 0.6293, 0.6331, 0.6368, 0.6406, 0.6443, 0.648, 0.6517), (0.4, 0.6554, 0.6591, 0.6628, 0.6664, 0.67, 0.6736, 0.6772, 0.6808, 0.6844, 0.6879), (0.5, 0.6915, 0.695, 0.6985, 0.7019, 0.7054, 0.7088, 0.7123, 0.7157, 0.719, 0.7224), (0.6, 0.7257, 0.7291, 0.7324, 0.7357, 0.7389, 0.7422, 0.7454, 0.7486, 0.7517, 0.7549), (0.7, 0.758, 0.7611, 0.7642, 0.7673, 0.7703, 0.7734, 0.7764, 0.7794, 0.7823, 0.7852), (0.8, 0.7881, 0.791, 0.7939, 0.7967, 0.7995, 0.8023, 0.8051, 0.8078, 0.8106, 0.8133), (0.9, 0.8159, 0.8186, 0.8212, 0.8238, 0.8264, 0.8289, 0.8315, 0.834, 0.8365, 0.8389), (1.0, 0.8415, 0.8438, 0.8461, 0.8485, 0.8508, 0.8531, 0.8554, 0.8577, 0.8509, 0.8621), (1.1, 0.8643, 0.8665, 0.8686, 0.8708, 0.8729, 0.8749, 0.877, 0.879, 0.881, 0.883), (1.2, 0.8849, 0.8869, 0.8888, 0.8907, 0.8925, 0.8944, 0.8962, 0.898, 0.8997, 0.90147), (1.3, 0.9032, 0.9049, 0.90658, 0.90824, 0.90988, 0.91149, 0.91309, 0.91466, 0.91621, 0.91774), (1.4, 0.91924, 0.92073, 0.9222, 0.92364, 0.92507, 0.92647, 0.92785, 0.92922, 0.93056, 0.93189), (1.5, 0.93319, 0.93448, 0.93574, 0.93699, 0.93822, 0.93943, 0.94062, 0.94179, 0.94295, 0.94408), (1.6, 0.9452, 0.9463, 0.94738, 0.94845, 0.9495, 0.95053, 0.95154, 0.95254, 0.95352, 0.95449), (1.7, 0.95543, 0.95637, 0.95728, 0.95818, 0.95907, 0.95994, 0.9608, 0.96164, 0.96246, 0.96327), (1.8, 0.96407, 0.96485, 0.96562, 0.96638, 0.96712, 0.96784, 0.97856, 0.96926, 0.96995, 0.97062), (1.9, 0.97128, 0.97193, 0.97257, 0.9732, 0.97381, 0.97441, 0.975, 0.97558, 0.97615, 0.9767), (2.0, 0.97725, 0.97778, 0.97831, 0.97882, 0.97932, 0.97982, 0.9803, 0.98077, 0.98124, 0.98169), (2.1, 0.98214, 0.98257, 0.983, 0.98341, 0.98382, 0.98422, 0.98461, 0.985, 0.98537, 0.98574), (2.2, 0.9861, 0.98645, 0.98679, 0.98713, 0.98745, 0.98778, 0.98809, 0.9884, 0.9887, 0.98899), (2.3, 0.98928, 0.98956, 0.98983, 0.990097, 0.990358, 0.990613, 0.990863, 0.991106, 0.991344, 0.991576), (2.4, 0.991802, 0.992024, 0.99224, 0.992451, 0.992656, 0.992857, 0.993053, 0.993244, 0.993431, 0.993613), (2.5, 0.99379, 0.993963, 0.994132, 0.994297, 0.994457, 0.994614, 0.994766, 0.994915, 0.99506, 0.995201), (2.6, 0.995339, 0.995473, 0.995604, 0.995731, 0.995855, 0.995975, 0.996093, 0.996207, 0.996319, 0.996427), (2.7, 0.996533, 0.996636, 0.996736, 0.996833, 0.996928, 0.99702, 0.99711, 0.997197, 0.997282, 0.997365), (2.8, 0.997445, 0.997523, 0.997599, 0.997673, 0.997744, 0.997814, 0.997882, 0.997948, 0.998012, 0.998074), (2.9, 0.998134, 0.998193, 0.99825, 0.998305, 0.998359, 0.998411, 0.99846, 0.998511, 0.998559, 0.998605), (3.0, 0.99865, 0.998694, 0.998736, 0.998777, 0.998817, 0.998856, 0.998893, 0.99893, 0.998965, 0.998999), (3.1, 0.9990324, 0.9990646, 0.9990957, 0.999126, 0.9991553, 0.9991836, 0.9992112, 0.9992378, 0.9992636, 0.9992886), (3.2, 0.9993129, 0.9993363, 0.999359, 0.999381, 0.9994024, 0.999423, 0.9994429, 0.9994623, 0.999481, 0.9994991), (3.3, 0.9995166, 0.9995335, 0.9995499, 0.9995658, 0.9995811, 0.9995959, 0.9996103, 0.9996242, 0.9996376, 0.9996505), (3.4, 0.9996631, 0.9996752, 0.9996869, 0.9996982, 0.9997091, 0.9997197, 0.9997299, 0.9997398, 0.9997493, 0.9997585), (3.5, 0.9997674, 0.9997759, 0.9997842, 0.9997922, 0.9997999, 0.9998074, 0.9998146, 0.9998215, 0.9998282, 0.9998347), (3.6, 0.9998409, 0.9998469, 0.9998527, 0.9998583, 0.9998637, 0.9998689, 0.9998739, 0.9998787, 0.9998834, 0.9998879), (3.7, 0.9998922, 0.9998964, 0.99990039, 0.99990426, 0.99990799, 0.99991158, 0.99991504, 0.99991838, 0.99992159, 0.99992468), (3.8, 0.99992765, 0.99993052, 0.99993327, 0.99993593, 0.99993848, 0.99994094, 0.99994331, 0.99994558, 0.99994777, 0.99994988), (3.9, 0.9999519, 0.99995385, 0.99995573, 0.99995753, 0.99995926, 0.99996092, 0.99996253, 0.99996406, 0.99996554, 0.99996696), (4.0, 0.99996833, 0.99996964, 0.9999709, 0.99997211, 0.99997327, 0.99997439, 0.99997546, 0.99997649, 0.99997748, 0.99997843), (4.1, 0.99997934, 0.99998022, 0.99998106, 0.99998186, 0.99998263, 0.99998338, 0.99998409, 0.99998477, 0.99998542, 0.99998605), (4.2, 0.99998665, 0.99998723, 0.99998778, 0.99998832, 0.99998882, 0.99998931, 0.99998978, 0.999990226, 0.999990655, 0.999991066), (4.3, 0.99999146, 0.999991837, 0.999992199, 0.999992545, 0.999992876, 0.999993193, 0.999993497, 0.999993788, 0.999994066, 0.999994332), (4.4, 0.999994587, 0.999994831, 0.999995065, 0.999995288, 0.999995502, 0.999995706, 0.999995902, 0.999996089, 0.999996268, 0.999996439), (4.5, 0.999996602, 0.999996759, 0.999996908, 0.999997051, 0.999997187, 0.999997318, 0.999997442, 0.999997561, 0.999997675, 0.999997784), (4.6, 0.999997888, 0.999997987, 0.999998081, 0.999998172, 0.999998258, 0.99999834, 0.999998419, 0.999998494, 0.999998566, 0.999998634), (4.7, 0.999998699, 0.999998761, 0.999998821, 0.999998877, 0.999998931, 0.999998983, 0.999999032, 0.9999990789, 0.9999991235, 0.9999991661), (4.8, 0.9999992067, 0.9999992453, 0.9999992822, 0.9999993173, 0.9999993508, 0.9999993827, 0.9999994131, 0.999999442, 0.9999994696, 0.9999994958), (4.9, 0.9999995208, 0.9999995446, 0.9999995673, 0.9999995889, 0.9999996094, 0.9999996289, 0.9999996475, 0.9999996652, 0.9999996821, 0.9999996981))
unitDeviation = 0.0
for j in range(1, 50):
if (zscore == UNIT_NORMAL[j][1]):
unitDeviation = UNIT_NORMAL[j][0]
elif ((UNIT_NORMAL[j][1] < zscore) and (zscore < UNIT_NORMAL[(j + 1)][1])):
for i in range(2, 10):
if ((UNIT_NORMAL[j][(i - 1)] < zscore) and (zscore <= UNIT_NORMAL[j][i])):
unitDeviation = (UNIT_NORMAL[j][0] + UNIT_NORMAL[0][i])
if (zscore > UNIT_NORMAL[j][10]):
unitDeviation = UNIT_NORMAL[(j + 1)][0]
if (zscore > UNIT_NORMAL[50][10]):
unitDeviation = 5.0
return unitDeviation
|
def online_variance(data):
'\n A numerically stable algorithm for calculating variance\n http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm\n '
n = 0
mean = 0
M2 = 0
for x in data:
n = (n + 1)
delta = (x - mean)
mean = (mean + (delta / n))
M2 = (M2 + (delta * (x - mean)))
variance_n = (M2 / n)
variance = (M2 / (n - 1))
return (variance, variance_n)
| -8,880,010,815,469,903,000
|
A numerically stable algorithm for calculating variance
http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm
|
controllers/delphi.py
|
online_variance
|
Code4SierraLeone/shdms
|
python
|
def online_variance(data):
'\n A numerically stable algorithm for calculating variance\n http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm\n '
n = 0
mean = 0
M2 = 0
for x in data:
n = (n + 1)
delta = (x - mean)
mean = (mean + (delta / n))
M2 = (M2 + (delta * (x - mean)))
variance_n = (M2 / n)
variance = (M2 / (n - 1))
return (variance, variance_n)
|
def results(r, **attr):
'\n Custom Method to show the Scale of Results\n '
def NBSP():
return XML(' ')
rheader = problem_rheader(r)
response.view = 'delphi/results.html'
empty = dict(rheader=rheader, num_voted=0, chart='', table_color='', grids='', summary='')
problem = r.record
if problem:
vtable = s3db.delphi_vote
query = (vtable.problem_id == problem.id)
votes = db(query).select(vtable.solution_id, vtable.rank, vtable.created_by)
else:
votes = None
if (not votes):
return empty
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
solutions = db(query).select(stable.id, stable.name, stable.problem_id, stable.changes)
if (not solutions):
return empty
arrayF = {}
arrayP = {}
arrayX = {}
arrayP2 = {}
arrayU = {}
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if (s1 == s2):
arrayF[(s1, s2)] = None
arrayP[(s1, s2)] = None
arrayX[(s1, s2)] = None
arrayP2[(s1, s2)] = None
arrayU[(s1, s2)] = None
continue
arrayF[(s1, s2)] = 0
arrayP[(s1, s2)] = 0.5
arrayX[(s1, s2)] = 0
arrayP2[(s1, s2)] = 0.5
arrayU[(s1, s2)] = 0.5
voters = []
for vote in votes:
voter = vote.created_by
if (voter not in voters):
voters.append(voter)
num_voted = len(voters)
for voter in voters:
ranks = {}
for vote in votes:
if (vote.created_by != voter):
continue
ranks[vote.rank] = vote.solution_id
for rank_1 in range(1, len(ranks)):
for rank_2 in range((rank_1 + 1), (len(ranks) + 1)):
arrayF[(ranks[rank_1], ranks[rank_2])] += 1
grids = DIV()
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayF[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array F: # times that solution in column is preferred over it's partner in row")), output)
grids.append(output)
grids.append(NBSP())
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if (s1 == s2):
continue
total = float((arrayF[(s1, s2)] + arrayF[(s2, s1)]))
if total:
proportion = (arrayF[(s2, s1)] / total)
else:
proportion = 0.5
arrayP[(s2, s1)] = proportion
if (proportion == 0.0):
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif (proportion == 1.0):
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayX[(s2, s1)] = _getUnitNormalDeviation(proportion)
novotes = (num_voted - total)
if (proportion == 0.5):
pass
elif (proportion > 0.5):
proportion = ((arrayF[(s2, s1)] - novotes) / num_voted)
else:
proportion = ((arrayF[(s2, s1)] + novotes) / num_voted)
arrayP2[(s2, s1)] = proportion
if (proportion == 0.0):
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif (proportion == 1.0):
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayU[(s2, s1)] = _getUnitNormalDeviation(proportion)
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayP[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array P: proportion of times that solution in column is preferred over it's partner in row, assuming that pairs not ranked start at the level of indifference (0.5)")), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH('Total'))
footer2 = TR(TH('Scale'))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayX[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
if (value is not None):
totals[s2] += value
counts[s2] += 1
rows.append(row)
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.scale = (totals[s1] / counts[s1])
footer2.append(TH(solution.scale))
else:
solution.scale = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2, _class='delphi_wide')
output = DIV(H4(T('Array X: unit normal deviate')), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayP2[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array P2: proportion of times that solution in column is preferred over it's partner in row, assuming that non-votes move towards indifference")), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH('Total'))
footer2 = TR(TH('Scale'))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayU[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
if (value is not None):
totals[s2] += value
counts[s2] += 1
rows.append(row)
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.uncertainty = (totals[s1] / counts[s1])
footer2.append(TH(solution.uncertainty))
else:
solution.uncertainty = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2, _class='delphi_wide')
output = DIV(H4(T('Array U: unit normal deviate of the uncertainty value (assuming that all unvoted items return the probability towards indifference)')), output)
grids.append(output)
def scale(solution):
return float(solution.scale)
solutions = solutions.sort(scale, reverse=True)
n = len(solutions)
image = ''
if image:
from s3chart import S3Chart
chart = S3Chart(9, 6)
fig = chart.fig
ax = fig.add_axes([0.35, 0.1, 0.6, 0.8])
problem = r.record
ax.set_title(problem.name)
labels = []
scales = []
uncertainties = []
for solution in solutions:
labels.append(solution.name)
scales.append(solution.scale)
uncertainties.append(solution.uncertainty)
from numpy import arange
ind = arange(n)
width = 0.35
ax.set_yticks((ind + width))
ax.set_yticklabels(labels)
labels = ax.get_yticklabels()
for label in labels:
label.set_size(8)
ax.set_xlabel('Scale')
ax.xaxis.grid(True)
rects1 = ax.barh(ind, scales, width, linewidth=0)
rects2 = ax.barh((ind + width), uncertainties, width, linewidth=0, color='red')
ax.legend((rects1[0], rects2[0]), ('Scale', 'Uncertainty'))
image = chart.draw()
classes = 5
q = []
qappend = q.append
for i in range((classes - 1)):
qappend(((1.0 / classes) * (i + 1)))
values = [float(solution.scale) for solution in solutions]
breaks = s3db.stats_quantile(values, q)
breaks = list(breaks)
values_min = min(values)
values_max = max(values)
breaks.insert(0, values_min)
breaks.append(values_max)
colours = ['edf8fb', 'b2e2e2', '66c2a4', '2ca25f', '006d2c']
for solution in solutions:
for i in range(classes):
value = solution.scale
if ((value >= breaks[i]) and (value <= breaks[(i + 1)])):
solution.color = colours[i]
break
thead = THEAD(TR(TH(T('Solution Item'), _rowspan='2'), TH(T('Scale'), _rowspan='2'), TH(T('Uncertainty'), _rowspan='2'), TH(T('Activity Level'), _colspan='3')), TR(TH(T('Voted on')), TH(T('Times Changed')), TH(T('Comments'))))
tbody = TBODY()
for solution in solutions:
rows = True
tbody.append(TR(TD(solution.name), TD(solution.scale, _class='taright'), TD(solution.uncertainty, _class='taright'), TD(solution.votes(), _class='tacenter'), TD(solution.changes, _class='tacenter'), TD(solution.comments(), _class='tacenter'), _style=('background:#%s' % solution.color)))
summary = TABLE(thead, tbody, _class='delphi_wide')
s3.stylesheets.append('S3/delphi.css')
return dict(rheader=rheader, num_voted=num_voted, chart=image, summary=summary, grids=grids)
| 7,627,450,511,142,660,000
|
Custom Method to show the Scale of Results
|
controllers/delphi.py
|
results
|
Code4SierraLeone/shdms
|
python
|
def results(r, **attr):
'\n \n '
def NBSP():
return XML(' ')
rheader = problem_rheader(r)
response.view = 'delphi/results.html'
empty = dict(rheader=rheader, num_voted=0, chart=, table_color=, grids=, summary=)
problem = r.record
if problem:
vtable = s3db.delphi_vote
query = (vtable.problem_id == problem.id)
votes = db(query).select(vtable.solution_id, vtable.rank, vtable.created_by)
else:
votes = None
if (not votes):
return empty
stable = s3db.delphi_solution
query = (stable.problem_id == problem.id)
solutions = db(query).select(stable.id, stable.name, stable.problem_id, stable.changes)
if (not solutions):
return empty
arrayF = {}
arrayP = {}
arrayX = {}
arrayP2 = {}
arrayU = {}
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if (s1 == s2):
arrayF[(s1, s2)] = None
arrayP[(s1, s2)] = None
arrayX[(s1, s2)] = None
arrayP2[(s1, s2)] = None
arrayU[(s1, s2)] = None
continue
arrayF[(s1, s2)] = 0
arrayP[(s1, s2)] = 0.5
arrayX[(s1, s2)] = 0
arrayP2[(s1, s2)] = 0.5
arrayU[(s1, s2)] = 0.5
voters = []
for vote in votes:
voter = vote.created_by
if (voter not in voters):
voters.append(voter)
num_voted = len(voters)
for voter in voters:
ranks = {}
for vote in votes:
if (vote.created_by != voter):
continue
ranks[vote.rank] = vote.solution_id
for rank_1 in range(1, len(ranks)):
for rank_2 in range((rank_1 + 1), (len(ranks) + 1)):
arrayF[(ranks[rank_1], ranks[rank_2])] += 1
grids = DIV()
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayF[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array F: # times that solution in column is preferred over it's partner in row")), output)
grids.append(output)
grids.append(NBSP())
for solution in solutions:
s1 = solution.id
for sol_2 in solutions:
s2 = sol_2.id
if (s1 == s2):
continue
total = float((arrayF[(s1, s2)] + arrayF[(s2, s1)]))
if total:
proportion = (arrayF[(s2, s1)] / total)
else:
proportion = 0.5
arrayP[(s2, s1)] = proportion
if (proportion == 0.0):
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif (proportion == 1.0):
arrayX[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayX[(s2, s1)] = _getUnitNormalDeviation(proportion)
novotes = (num_voted - total)
if (proportion == 0.5):
pass
elif (proportion > 0.5):
proportion = ((arrayF[(s2, s1)] - novotes) / num_voted)
else:
proportion = ((arrayF[(s2, s1)] + novotes) / num_voted)
arrayP2[(s2, s1)] = proportion
if (proportion == 0.0):
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.01)
elif (proportion == 1.0):
arrayU[(s2, s1)] = _getUnitNormalDeviation(0.99)
else:
arrayU[(s2, s1)] = _getUnitNormalDeviation(proportion)
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayP[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array P: proportion of times that solution in column is preferred over it's partner in row, assuming that pairs not ranked start at the level of indifference (0.5)")), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH('Total'))
footer2 = TR(TH('Scale'))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayX[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
if (value is not None):
totals[s2] += value
counts[s2] += 1
rows.append(row)
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.scale = (totals[s1] / counts[s1])
footer2.append(TH(solution.scale))
else:
solution.scale = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2, _class='delphi_wide')
output = DIV(H4(T('Array X: unit normal deviate')), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayP2[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
rows.append(row)
output = TABLE(THEAD(header), rows, _class='delphi_wide')
output = DIV(H4(T("Array P2: proportion of times that solution in column is preferred over it's partner in row, assuming that non-votes move towards indifference")), output)
grids.append(output)
grids.append(NBSP())
header = TR(TD())
rows = TBODY()
footer = TR(TH('Total'))
footer2 = TR(TH('Scale'))
totals = {}
counts = {}
for solution in solutions:
s1 = solution.id
totals[s1] = 0
counts[s1] = 0
for solution in solutions:
header.append(TH(solution.name))
s1 = solution.id
row = TR(TH(solution.name))
for sol_2 in solutions:
s2 = sol_2.id
value = arrayU[(s2, s1)]
if (value is None):
row.append(TD('-'))
else:
row.append(TD(value))
if (value is not None):
totals[s2] += value
counts[s2] += 1
rows.append(row)
for solution in solutions:
s1 = solution.id
footer.append(TH(totals[s1]))
if counts[s1]:
solution.uncertainty = (totals[s1] / counts[s1])
footer2.append(TH(solution.uncertainty))
else:
solution.uncertainty = 0
footer2.append(TH())
output = TABLE(THEAD(header), rows, footer, footer2, _class='delphi_wide')
output = DIV(H4(T('Array U: unit normal deviate of the uncertainty value (assuming that all unvoted items return the probability towards indifference)')), output)
grids.append(output)
def scale(solution):
return float(solution.scale)
solutions = solutions.sort(scale, reverse=True)
n = len(solutions)
image =
if image:
from s3chart import S3Chart
chart = S3Chart(9, 6)
fig = chart.fig
ax = fig.add_axes([0.35, 0.1, 0.6, 0.8])
problem = r.record
ax.set_title(problem.name)
labels = []
scales = []
uncertainties = []
for solution in solutions:
labels.append(solution.name)
scales.append(solution.scale)
uncertainties.append(solution.uncertainty)
from numpy import arange
ind = arange(n)
width = 0.35
ax.set_yticks((ind + width))
ax.set_yticklabels(labels)
labels = ax.get_yticklabels()
for label in labels:
label.set_size(8)
ax.set_xlabel('Scale')
ax.xaxis.grid(True)
rects1 = ax.barh(ind, scales, width, linewidth=0)
rects2 = ax.barh((ind + width), uncertainties, width, linewidth=0, color='red')
ax.legend((rects1[0], rects2[0]), ('Scale', 'Uncertainty'))
image = chart.draw()
classes = 5
q = []
qappend = q.append
for i in range((classes - 1)):
qappend(((1.0 / classes) * (i + 1)))
values = [float(solution.scale) for solution in solutions]
breaks = s3db.stats_quantile(values, q)
breaks = list(breaks)
values_min = min(values)
values_max = max(values)
breaks.insert(0, values_min)
breaks.append(values_max)
colours = ['edf8fb', 'b2e2e2', '66c2a4', '2ca25f', '006d2c']
for solution in solutions:
for i in range(classes):
value = solution.scale
if ((value >= breaks[i]) and (value <= breaks[(i + 1)])):
solution.color = colours[i]
break
thead = THEAD(TR(TH(T('Solution Item'), _rowspan='2'), TH(T('Scale'), _rowspan='2'), TH(T('Uncertainty'), _rowspan='2'), TH(T('Activity Level'), _colspan='3')), TR(TH(T('Voted on')), TH(T('Times Changed')), TH(T('Comments'))))
tbody = TBODY()
for solution in solutions:
rows = True
tbody.append(TR(TD(solution.name), TD(solution.scale, _class='taright'), TD(solution.uncertainty, _class='taright'), TD(solution.votes(), _class='tacenter'), TD(solution.changes, _class='tacenter'), TD(solution.comments(), _class='tacenter'), _style=('background:#%s' % solution.color)))
summary = TABLE(thead, tbody, _class='delphi_wide')
s3.stylesheets.append('S3/delphi.css')
return dict(rheader=rheader, num_voted=num_voted, chart=image, summary=summary, grids=grids)
|
def discuss(r, **attr):
' Custom Method to manage the discussion of a Problem or Solution '
if r.component:
resourcename = 'solution'
id = r.component_id
else:
resourcename = 'problem'
id = r.id
rheader = problem_rheader(r)
ckeditor = URL(c='static', f='ckeditor', args='ckeditor.js')
s3.scripts.append(ckeditor)
adapter = URL(c='static', f='ckeditor', args=['adapters', 'jquery.js'])
s3.scripts.append(adapter)
js = ''.join(('i18n.reply="', str(T('Reply')), '"\nvar img_path=S3.Ap.concat(\'/static/img/jCollapsible/\')\nvar ck_config={toolbar:[[\'Bold\',\'Italic\',\'-\',\'NumberedList\',\'BulletedList\',\'-\',\'Link\',\'Unlink\',\'-\',\'Smiley\',\'-\',\'Source\',\'Maximize\']],toolbarCanCollapse:false,removePlugins:\'elementspath\'}\nfunction comment_reply(id){\n $(\'#delphi_comment_solution_id__row\').hide()\n $(\'#delphi_comment_solution_id__row1\').hide()\n $(\'#comment-title\').html(i18n.reply)\n var ed = $(\'#delphi_comment_body\').ckeditorGet()\n ed.destroy()\n $(\'#delphi_comment_body\').ckeditor(ck_config)\n $(\'#comment-form\').insertAfter($(\'#comment-\'+id))\n $(\'#delphi_comment_parent\').val(id)\n var solution_id=$(\'#comment-\'+id).attr(\'solution_id\')\n if(undefined!=solution_id){\n $(\'#delphi_comment_solution_id\').val(solution_id)\n }\n}'))
s3.js_global.append(js)
response.view = 'delphi/discuss.html'
return dict(rheader=rheader, resourcename=resourcename, id=id)
| 8,840,206,016,214,611,000
|
Custom Method to manage the discussion of a Problem or Solution
|
controllers/delphi.py
|
discuss
|
Code4SierraLeone/shdms
|
python
|
def discuss(r, **attr):
' '
if r.component:
resourcename = 'solution'
id = r.component_id
else:
resourcename = 'problem'
id = r.id
rheader = problem_rheader(r)
ckeditor = URL(c='static', f='ckeditor', args='ckeditor.js')
s3.scripts.append(ckeditor)
adapter = URL(c='static', f='ckeditor', args=['adapters', 'jquery.js'])
s3.scripts.append(adapter)
js = .join(('i18n.reply="', str(T('Reply')), '"\nvar img_path=S3.Ap.concat(\'/static/img/jCollapsible/\')\nvar ck_config={toolbar:[[\'Bold\',\'Italic\',\'-\',\'NumberedList\',\'BulletedList\',\'-\',\'Link\',\'Unlink\',\'-\',\'Smiley\',\'-\',\'Source\',\'Maximize\']],toolbarCanCollapse:false,removePlugins:\'elementspath\'}\nfunction comment_reply(id){\n $(\'#delphi_comment_solution_id__row\').hide()\n $(\'#delphi_comment_solution_id__row1\').hide()\n $(\'#comment-title\').html(i18n.reply)\n var ed = $(\'#delphi_comment_body\').ckeditorGet()\n ed.destroy()\n $(\'#delphi_comment_body\').ckeditor(ck_config)\n $(\'#comment-form\').insertAfter($(\'#comment-\'+id))\n $(\'#delphi_comment_parent\').val(id)\n var solution_id=$(\'#comment-\'+id).attr(\'solution_id\')\n if(undefined!=solution_id){\n $(\'#delphi_comment_solution_id\').val(solution_id)\n }\n}'))
s3.js_global.append(js)
response.view = 'delphi/discuss.html'
return dict(rheader=rheader, resourcename=resourcename, id=id)
|
def comment_parse(comment, comments, solution_id=None):
'\n Parse a Comment\n\n @param: comment - a gluon.sql.Row: the current comment\n @param: comments - a gluon.sql.Rows: full list of comments\n @param: solution_id - a reference ID: optional solution commented on\n '
author = B(T('Anonymous'))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on((ltable.user_id == utable.id)), ptable.on((ptable.pe_id == ltable.pe_id))]
row = db(query).select(utable.email, ptable.first_name, ptable.middle_name, ptable.last_name, left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = ('http://www.gravatar.com/%s' % hash)
author = B(A(username, _href=url, _target='top'))
if ((not solution_id) and comment.solution_id):
solution = ('re: %s' % s3db.delphi_solution_represent(comment.solution_id))
header = DIV(author, ' ', solution)
solution_id = comment.solution_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by), DIV(DIV(header, _class='comment-header'), DIV(XML(comment.body)), _class='comment-text'), DIV(DIV(comment.created_on, _class='comment-date'), DIV(A(T('Reply'), _class='action-btn'), _onclick=('comment_reply(%i);' % comment.id), _class='comment-reply'), _class='fright'), _id=('comment-%i' % comment.id), _solution_id=solution_id, _class='comment-box'))
children = UL(_class='children')
id = comment.id
count = 0
for comment in comments:
if (comment.parent == id):
count = 1
child = comment_parse(comment, comments, solution_id=solution_id)
children.append(child)
if (count == 1):
thread.append(children)
return thread
| -1,991,871,730,518,547,000
|
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: solution_id - a reference ID: optional solution commented on
|
controllers/delphi.py
|
comment_parse
|
Code4SierraLeone/shdms
|
python
|
def comment_parse(comment, comments, solution_id=None):
'\n Parse a Comment\n\n @param: comment - a gluon.sql.Row: the current comment\n @param: comments - a gluon.sql.Rows: full list of comments\n @param: solution_id - a reference ID: optional solution commented on\n '
author = B(T('Anonymous'))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on((ltable.user_id == utable.id)), ptable.on((ptable.pe_id == ltable.pe_id))]
row = db(query).select(utable.email, ptable.first_name, ptable.middle_name, ptable.last_name, left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = ('http://www.gravatar.com/%s' % hash)
author = B(A(username, _href=url, _target='top'))
if ((not solution_id) and comment.solution_id):
solution = ('re: %s' % s3db.delphi_solution_represent(comment.solution_id))
header = DIV(author, ' ', solution)
solution_id = comment.solution_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by), DIV(DIV(header, _class='comment-header'), DIV(XML(comment.body)), _class='comment-text'), DIV(DIV(comment.created_on, _class='comment-date'), DIV(A(T('Reply'), _class='action-btn'), _onclick=('comment_reply(%i);' % comment.id), _class='comment-reply'), _class='fright'), _id=('comment-%i' % comment.id), _solution_id=solution_id, _class='comment-box'))
children = UL(_class='children')
id = comment.id
count = 0
for comment in comments:
if (comment.parent == id):
count = 1
child = comment_parse(comment, comments, solution_id=solution_id)
children.append(child)
if (count == 1):
thread.append(children)
return thread
|
def comments():
' Function accessed by AJAX from discuss() to handle Comments '
try:
resourcename = request.args[0]
except:
raise HTTP(400)
try:
id = request.args[1]
except:
raise HTTP(400)
if (resourcename == 'problem'):
problem_id = id
solution_id = None
elif (resourcename == 'solution'):
stable = s3db.delphi_solution
query = (stable.id == id)
solution = db(query).select(stable.problem_id, limitby=(0, 1)).first()
if solution:
problem_id = solution.problem_id
solution_id = id
else:
raise HTTP(400)
else:
raise HTTP(400)
table = s3db.delphi_comment
field = table.problem_id
field.default = problem_id
field.writable = field.readable = False
sfield = table.solution_id
if solution_id:
sfield.default = solution_id
sfield.writable = sfield.readable = False
else:
sfield.label = T('Related to Solution (optional)')
sfield.requires = IS_EMPTY_OR(IS_ONE_OF(db, 'delphi_solution.id', s3.delphi_solution_represent, filterby='problem_id', filter_opts=(problem_id,)))
from gluon.tools import Crud
form = Crud(db).create(table, formname=('delphi_%s/%s' % (resourcename, id)))
if solution_id:
comments = db((sfield == solution_id)).select(table.id, table.parent, table.body, table.created_by, table.created_on)
else:
comments = db((field == problem_id)).select(table.id, table.parent, table.solution_id, table.body, table.created_by, table.created_on)
output = UL(_id='comments')
for comment in comments:
if (not comment.parent):
thread = comment_parse(comment, comments, solution_id=solution_id)
output.append(thread)
script = "$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})\n$('#delphi_comment_parent__row1').hide()\n$('#delphi_comment_parent__row').hide()\n$('#delphi_comment_body').ckeditor(ck_config)\n$('#submit_record__row input').click(function(){$('#comment-form').hide();$('#delphi_comment_body').ckeditorGet().destroy();return true;})"
output = DIV(output, DIV(H4(T('New Post'), _id='comment-title'), form, _id='comment-form', _class='clear'), SCRIPT(script))
return XML(output)
| -7,195,253,218,591,263,000
|
Function accessed by AJAX from discuss() to handle Comments
|
controllers/delphi.py
|
comments
|
Code4SierraLeone/shdms
|
python
|
def comments():
' '
try:
resourcename = request.args[0]
except:
raise HTTP(400)
try:
id = request.args[1]
except:
raise HTTP(400)
if (resourcename == 'problem'):
problem_id = id
solution_id = None
elif (resourcename == 'solution'):
stable = s3db.delphi_solution
query = (stable.id == id)
solution = db(query).select(stable.problem_id, limitby=(0, 1)).first()
if solution:
problem_id = solution.problem_id
solution_id = id
else:
raise HTTP(400)
else:
raise HTTP(400)
table = s3db.delphi_comment
field = table.problem_id
field.default = problem_id
field.writable = field.readable = False
sfield = table.solution_id
if solution_id:
sfield.default = solution_id
sfield.writable = sfield.readable = False
else:
sfield.label = T('Related to Solution (optional)')
sfield.requires = IS_EMPTY_OR(IS_ONE_OF(db, 'delphi_solution.id', s3.delphi_solution_represent, filterby='problem_id', filter_opts=(problem_id,)))
from gluon.tools import Crud
form = Crud(db).create(table, formname=('delphi_%s/%s' % (resourcename, id)))
if solution_id:
comments = db((sfield == solution_id)).select(table.id, table.parent, table.body, table.created_by, table.created_on)
else:
comments = db((field == problem_id)).select(table.id, table.parent, table.solution_id, table.body, table.created_by, table.created_on)
output = UL(_id='comments')
for comment in comments:
if (not comment.parent):
thread = comment_parse(comment, comments, solution_id=solution_id)
output.append(thread)
script = "$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})\n$('#delphi_comment_parent__row1').hide()\n$('#delphi_comment_parent__row').hide()\n$('#delphi_comment_body').ckeditor(ck_config)\n$('#submit_record__row input').click(function(){$('#comment-form').hide();$('#delphi_comment_body').ckeditorGet().destroy();return true;})"
output = DIV(output, DIV(H4(T('New Post'), _id='comment-title'), form, _id='comment-form', _class='clear'), SCRIPT(script))
return XML(output)
|
def get_type(x):
' Return the most specific type in the type hierarchy that applies to x\n and a boolean indicating whether x is an AST. If the type cannot be\n determined, return TYPE_OBJECT as the type. '
if isinstance(x, (int, long)):
return (TYPE_INT, False)
elif isinstance(x, float):
return (TYPE_FLOAT, False)
elif isinstance(x, basestring):
if (len(x) == 1):
return (TYPE_CHAR, False)
try:
float(x)
except ValueError:
return (TYPE_STRING, False)
else:
return (TYPE_NUMERIC_STRING, False)
elif isinstance(x, Color):
return (TYPE_COLOR, False)
elif isinstance(x, Media):
return (TYPE_MEDIA, False)
elif isinstance(x, Vector):
return (TYPE_VECTOR, False)
elif hasattr(x, 'return_type'):
return (x.return_type, False)
elif isinstance(x, ast.Num):
return (get_type(x.n)[0], True)
elif isinstance(x, ast.Str):
return (get_type(x.s)[0], True)
elif isinstance(x, ast.Name):
try:
value = eval(x.id)
except NameError:
return (TYPE_OBJECT, True)
else:
return (get_type(value)[0], True)
elif isinstance(x, ast.Subscript):
if (x.value == BOX_AST):
return (TYPE_BOX, True)
elif isinstance(x, ast.Call):
if isinstance(x.func, ast.Name):
if (x.func.id == 'float'):
return (TYPE_FLOAT, True)
elif (x.func.id in ('int', 'ord')):
return (TYPE_INT, True)
elif (x.func.id == 'chr'):
return (TYPE_CHAR, True)
elif (x.func.id in ('repr', 'str', 'unicode')):
return (TYPE_STRING, True)
elif (x.func.id == 'Color'):
return (TYPE_COLOR, True)
elif (x.func.id == 'Media'):
return (TYPE_MEDIA, True)
elif isinstance(x, ast.UnaryOp):
if issubclass(x.op, ast.Not):
return (TYPE_BOOL, True)
else:
return get_type(x.operand)
if isinstance(x, (ast.BoolOp, ast.Compare)):
return (TYPE_BOOL, True)
elif isinstance(x, ast.BinOp):
type_left = get_type(x.left)[0]
type_right = get_type(x.right)[0]
if ((type_left == TYPE_STRING) or (type_right == TYPE_STRING)):
return (TYPE_STRING, True)
if (type_left == type_right == TYPE_INT):
return (TYPE_INT, True)
else:
return (TYPE_FLOAT, True)
return (TYPE_OBJECT, isinstance(x, ast.AST))
| -453,047,719,814,319,000
|
Return the most specific type in the type hierarchy that applies to x
and a boolean indicating whether x is an AST. If the type cannot be
determined, return TYPE_OBJECT as the type.
|
TurtleArt/tatype.py
|
get_type
|
sugar-activities/4742-activity
|
python
|
def get_type(x):
' Return the most specific type in the type hierarchy that applies to x\n and a boolean indicating whether x is an AST. If the type cannot be\n determined, return TYPE_OBJECT as the type. '
if isinstance(x, (int, long)):
return (TYPE_INT, False)
elif isinstance(x, float):
return (TYPE_FLOAT, False)
elif isinstance(x, basestring):
if (len(x) == 1):
return (TYPE_CHAR, False)
try:
float(x)
except ValueError:
return (TYPE_STRING, False)
else:
return (TYPE_NUMERIC_STRING, False)
elif isinstance(x, Color):
return (TYPE_COLOR, False)
elif isinstance(x, Media):
return (TYPE_MEDIA, False)
elif isinstance(x, Vector):
return (TYPE_VECTOR, False)
elif hasattr(x, 'return_type'):
return (x.return_type, False)
elif isinstance(x, ast.Num):
return (get_type(x.n)[0], True)
elif isinstance(x, ast.Str):
return (get_type(x.s)[0], True)
elif isinstance(x, ast.Name):
try:
value = eval(x.id)
except NameError:
return (TYPE_OBJECT, True)
else:
return (get_type(value)[0], True)
elif isinstance(x, ast.Subscript):
if (x.value == BOX_AST):
return (TYPE_BOX, True)
elif isinstance(x, ast.Call):
if isinstance(x.func, ast.Name):
if (x.func.id == 'float'):
return (TYPE_FLOAT, True)
elif (x.func.id in ('int', 'ord')):
return (TYPE_INT, True)
elif (x.func.id == 'chr'):
return (TYPE_CHAR, True)
elif (x.func.id in ('repr', 'str', 'unicode')):
return (TYPE_STRING, True)
elif (x.func.id == 'Color'):
return (TYPE_COLOR, True)
elif (x.func.id == 'Media'):
return (TYPE_MEDIA, True)
elif isinstance(x, ast.UnaryOp):
if issubclass(x.op, ast.Not):
return (TYPE_BOOL, True)
else:
return get_type(x.operand)
if isinstance(x, (ast.BoolOp, ast.Compare)):
return (TYPE_BOOL, True)
elif isinstance(x, ast.BinOp):
type_left = get_type(x.left)[0]
type_right = get_type(x.right)[0]
if ((type_left == TYPE_STRING) or (type_right == TYPE_STRING)):
return (TYPE_STRING, True)
if (type_left == type_right == TYPE_INT):
return (TYPE_INT, True)
else:
return (TYPE_FLOAT, True)
return (TYPE_OBJECT, isinstance(x, ast.AST))
|
def get_converter(old_type, new_type):
' If there is a converter old_type -> new_type, return it. Else return\n None. If a chain of converters is necessary, return it as a tuple or\n list (starting with the innermost, first-to-apply converter). '
if (new_type == TYPE_OBJECT):
return identity
if (old_type == new_type):
return identity
converters_from_old = TYPE_CONVERTERS.get(old_type)
if (converters_from_old is None):
return None
converter = converters_from_old.get(new_type)
if (converter is not None):
return converter
else:
backtrace = converters_from_old.copy()
new_backtrace = backtrace.copy()
break_all = False
while True:
newest_backtrace = {}
for t in new_backtrace:
for new_t in TYPE_CONVERTERS.get(t, {}):
if (new_t not in backtrace):
newest_backtrace[new_t] = t
backtrace[new_t] = t
if (new_t == new_type):
break_all = True
break
if break_all:
break
if (break_all or (not newest_backtrace)):
break
new_backtrace = newest_backtrace
if (new_type in backtrace):
converter_chain = []
t = new_type
while ((t in backtrace) and isinstance(backtrace[t], Type)):
converter_chain.insert(0, TYPE_CONVERTERS[backtrace[t]][t])
t = backtrace[t]
converter_chain.insert(0, TYPE_CONVERTERS[old_type][t])
return converter_chain
return None
| -7,958,143,292,103,566,000
|
If there is a converter old_type -> new_type, return it. Else return
None. If a chain of converters is necessary, return it as a tuple or
list (starting with the innermost, first-to-apply converter).
|
TurtleArt/tatype.py
|
get_converter
|
sugar-activities/4742-activity
|
python
|
def get_converter(old_type, new_type):
' If there is a converter old_type -> new_type, return it. Else return\n None. If a chain of converters is necessary, return it as a tuple or\n list (starting with the innermost, first-to-apply converter). '
if (new_type == TYPE_OBJECT):
return identity
if (old_type == new_type):
return identity
converters_from_old = TYPE_CONVERTERS.get(old_type)
if (converters_from_old is None):
return None
converter = converters_from_old.get(new_type)
if (converter is not None):
return converter
else:
backtrace = converters_from_old.copy()
new_backtrace = backtrace.copy()
break_all = False
while True:
newest_backtrace = {}
for t in new_backtrace:
for new_t in TYPE_CONVERTERS.get(t, {}):
if (new_t not in backtrace):
newest_backtrace[new_t] = t
backtrace[new_t] = t
if (new_t == new_type):
break_all = True
break
if break_all:
break
if (break_all or (not newest_backtrace)):
break
new_backtrace = newest_backtrace
if (new_type in backtrace):
converter_chain = []
t = new_type
while ((t in backtrace) and isinstance(backtrace[t], Type)):
converter_chain.insert(0, TYPE_CONVERTERS[backtrace[t]][t])
t = backtrace[t]
converter_chain.insert(0, TYPE_CONVERTERS[old_type][t])
return converter_chain
return None
|
def convert(x, new_type, old_type=None, converter=None):
' Convert x to the new type if possible.\n old_type -- the type of x. If not given, it is computed. '
if (not isinstance(new_type, Type)):
raise ValueError(('%s is not a type in the type hierarchy' % repr(new_type)))
if (new_type == TYPE_OBJECT):
return x
if (not isinstance(old_type, Type)):
(old_type, is_an_ast) = get_type(x)
else:
is_an_ast = isinstance(x, ast.AST)
if (old_type == new_type):
return x
if (is_an_ast and (old_type == TYPE_BOX)):
new_type_ast = ast.Name(id=new_type.constant_name)
return get_call_ast('convert', [x, new_type_ast], return_type=new_type)
if (converter is None):
converter = get_converter(old_type, new_type)
if (converter is None):
raise TATypeError(bad_value=x, bad_type=old_type, req_type=new_type, message='found no converter for this type combination')
def _apply_converter(converter, y):
try:
if is_an_ast:
if (converter == identity):
return y
elif is_instancemethod(converter):
func = ast.Attribute(value=y, attr=converter.im_func.__name__, ctx=ast.Load)
return get_call_ast(func)
else:
func_name = converter.__name__
return get_call_ast(func_name, [y])
else:
return converter(y)
except BaseException:
raise TATypeError(bad_value=x, bad_type=old_type, req_type=new_type, message='error during conversion')
if isinstance(converter, (list, tuple)):
result = x
for conv in converter:
result = _apply_converter(conv, result)
return result
elif (converter is not None):
return _apply_converter(converter, x)
| -7,584,842,374,124,464,000
|
Convert x to the new type if possible.
old_type -- the type of x. If not given, it is computed.
|
TurtleArt/tatype.py
|
convert
|
sugar-activities/4742-activity
|
python
|
def convert(x, new_type, old_type=None, converter=None):
' Convert x to the new type if possible.\n old_type -- the type of x. If not given, it is computed. '
if (not isinstance(new_type, Type)):
raise ValueError(('%s is not a type in the type hierarchy' % repr(new_type)))
if (new_type == TYPE_OBJECT):
return x
if (not isinstance(old_type, Type)):
(old_type, is_an_ast) = get_type(x)
else:
is_an_ast = isinstance(x, ast.AST)
if (old_type == new_type):
return x
if (is_an_ast and (old_type == TYPE_BOX)):
new_type_ast = ast.Name(id=new_type.constant_name)
return get_call_ast('convert', [x, new_type_ast], return_type=new_type)
if (converter is None):
converter = get_converter(old_type, new_type)
if (converter is None):
raise TATypeError(bad_value=x, bad_type=old_type, req_type=new_type, message='found no converter for this type combination')
def _apply_converter(converter, y):
try:
if is_an_ast:
if (converter == identity):
return y
elif is_instancemethod(converter):
func = ast.Attribute(value=y, attr=converter.im_func.__name__, ctx=ast.Load)
return get_call_ast(func)
else:
func_name = converter.__name__
return get_call_ast(func_name, [y])
else:
return converter(y)
except BaseException:
raise TATypeError(bad_value=x, bad_type=old_type, req_type=new_type, message='error during conversion')
if isinstance(converter, (list, tuple)):
result = x
for conv in converter:
result = _apply_converter(conv, result)
return result
elif (converter is not None):
return _apply_converter(converter, x)
|
def get_call_ast(func_name, args=None, kwargs=None, return_type=None):
' Return an AST representing the call to a function with the name\n func_name, passing it the arguments args (given as a list) and the\n keyword arguments kwargs (given as a dictionary).\n func_name -- either the name of a callable as a string, or an AST\n representing a callable expression\n return_type -- if this is not None, return a TypedCall object with this\n return type instead '
if (args is None):
args = []
keywords = []
if (kwargs is not None):
for (key, value) in kwargs.iteritems():
keywords.append(ast.keyword(arg=key, value=value))
if isinstance(func_name, ast.AST):
func_ast = func_name
else:
func_ast = ast.Name(id=func_name, ctx=ast.Load)
if (return_type is None):
return ast.Call(func=func_ast, args=args, keywords=keywords, starargs=None, kwargs=None)
else:
return TypedCall(func=func_ast, args=args, keywords=keywords, return_type=return_type)
| 7,071,092,608,266,613,000
|
Return an AST representing the call to a function with the name
func_name, passing it the arguments args (given as a list) and the
keyword arguments kwargs (given as a dictionary).
func_name -- either the name of a callable as a string, or an AST
representing a callable expression
return_type -- if this is not None, return a TypedCall object with this
return type instead
|
TurtleArt/tatype.py
|
get_call_ast
|
sugar-activities/4742-activity
|
python
|
def get_call_ast(func_name, args=None, kwargs=None, return_type=None):
' Return an AST representing the call to a function with the name\n func_name, passing it the arguments args (given as a list) and the\n keyword arguments kwargs (given as a dictionary).\n func_name -- either the name of a callable as a string, or an AST\n representing a callable expression\n return_type -- if this is not None, return a TypedCall object with this\n return type instead '
if (args is None):
args = []
keywords = []
if (kwargs is not None):
for (key, value) in kwargs.iteritems():
keywords.append(ast.keyword(arg=key, value=value))
if isinstance(func_name, ast.AST):
func_ast = func_name
else:
func_ast = ast.Name(id=func_name, ctx=ast.Load)
if (return_type is None):
return ast.Call(func=func_ast, args=args, keywords=keywords, starargs=None, kwargs=None)
else:
return TypedCall(func=func_ast, args=args, keywords=keywords, return_type=return_type)
|
def __init__(self, constant_name, value):
" constant_name -- the name of the constant that points to this Type\n object\n value -- an arbitrary integer that is different from the values of\n all other Types. The order of the integers doesn't matter. "
self.constant_name = constant_name
self.value = value
| -7,065,363,459,867,630,000
|
constant_name -- the name of the constant that points to this Type
object
value -- an arbitrary integer that is different from the values of
all other Types. The order of the integers doesn't matter.
|
TurtleArt/tatype.py
|
__init__
|
sugar-activities/4742-activity
|
python
|
def __init__(self, constant_name, value):
" constant_name -- the name of the constant that points to this Type\n object\n value -- an arbitrary integer that is different from the values of\n all other Types. The order of the integers doesn't matter. "
self.constant_name = constant_name
self.value = value
|
def __init__(self, bad_value, bad_type=None, req_type=None, message=''):
' bad_value -- the mis-typed value that caused the error\n bad_type -- the type of the bad_value\n req_type -- the type that the value was expected to have\n message -- short statement about the cause of the error. It is\n not shown to the user, but may appear in debugging output. '
self.bad_value = bad_value
self.bad_type = bad_type
self.req_type = req_type
self.message = message
| -8,337,545,677,568,987,000
|
bad_value -- the mis-typed value that caused the error
bad_type -- the type of the bad_value
req_type -- the type that the value was expected to have
message -- short statement about the cause of the error. It is
not shown to the user, but may appear in debugging output.
|
TurtleArt/tatype.py
|
__init__
|
sugar-activities/4742-activity
|
python
|
def __init__(self, bad_value, bad_type=None, req_type=None, message=):
' bad_value -- the mis-typed value that caused the error\n bad_type -- the type of the bad_value\n req_type -- the type that the value was expected to have\n message -- short statement about the cause of the error. It is\n not shown to the user, but may appear in debugging output. '
self.bad_value = bad_value
self.bad_type = bad_type
self.req_type = req_type
self.message = message
|
def poke_mode_only(cls):
"\n Class Decorator for child classes of BaseSensorOperator to indicate\n that instances of this class are only safe to use poke mode.\n\n Will decorate all methods in the class to assert they did not change\n the mode from 'poke'.\n\n :param cls: BaseSensor class to enforce methods only use 'poke' mode.\n :type cls: type\n "
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if (value != 'poke'):
raise ValueError(f"cannot set mode to 'poke'.")
if (not issubclass(cls_type, BaseSensorOperator)):
raise ValueError(f'poke_mode_only decorator should only be applied to subclasses of BaseSensorOperator, got:{cls_type}.')
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
| 1,229,829,944,032,250,400
|
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
:type cls: type
|
airflow/sensors/base_sensor_operator.py
|
poke_mode_only
|
CoverGenius/airflow
|
python
|
def poke_mode_only(cls):
"\n Class Decorator for child classes of BaseSensorOperator to indicate\n that instances of this class are only safe to use poke mode.\n\n Will decorate all methods in the class to assert they did not change\n the mode from 'poke'.\n\n :param cls: BaseSensor class to enforce methods only use 'poke' mode.\n :type cls: type\n "
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if (value != 'poke'):
raise ValueError(f"cannot set mode to 'poke'.")
if (not issubclass(cls_type, BaseSensorOperator)):
raise ValueError(f'poke_mode_only decorator should only be applied to subclasses of BaseSensorOperator, got:{cls_type}.')
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
|
def poke(self, context: Dict) -> bool:
'\n Function that the sensors defined while deriving this class should\n override.\n '
raise AirflowException('Override me.')
| -3,133,657,140,022,421,500
|
Function that the sensors defined while deriving this class should
override.
|
airflow/sensors/base_sensor_operator.py
|
poke
|
CoverGenius/airflow
|
python
|
def poke(self, context: Dict) -> bool:
'\n Function that the sensors defined while deriving this class should\n override.\n '
raise AirflowException('Override me.')
|
def _get_next_poke_interval(self, started_at, try_number):
'\n Using the similar logic which is used for exponential backoff retry delay for operators.\n '
if self.exponential_backoff:
min_backoff = int((self.poke_interval * (2 ** (try_number - 2))))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1('{}#{}#{}#{}'.format(self.dag_id, self.task_id, started_at, try_number).encode('utf-8')).hexdigest(), 16)
modded_hash = (min_backoff + (run_hash % min_backoff))
delay_backoff_in_seconds = min(modded_hash, (timedelta.max.total_seconds() - 1))
new_interval = min((self.timeout - int((current_time - started_at).total_seconds())), delay_backoff_in_seconds)
self.log.info('new %s interval is %s', self.mode, new_interval)
return new_interval
else:
return self.poke_interval
| 3,211,719,983,406,018,000
|
Using the similar logic which is used for exponential backoff retry delay for operators.
|
airflow/sensors/base_sensor_operator.py
|
_get_next_poke_interval
|
CoverGenius/airflow
|
python
|
def _get_next_poke_interval(self, started_at, try_number):
'\n \n '
if self.exponential_backoff:
min_backoff = int((self.poke_interval * (2 ** (try_number - 2))))
current_time = timezone.utcnow()
run_hash = int(hashlib.sha1('{}#{}#{}#{}'.format(self.dag_id, self.task_id, started_at, try_number).encode('utf-8')).hexdigest(), 16)
modded_hash = (min_backoff + (run_hash % min_backoff))
delay_backoff_in_seconds = min(modded_hash, (timedelta.max.total_seconds() - 1))
new_interval = min((self.timeout - int((current_time - started_at).total_seconds())), delay_backoff_in_seconds)
self.log.info('new %s interval is %s', self.mode, new_interval)
return new_interval
else:
return self.poke_interval
|
@property
def reschedule(self):
'Define mode rescheduled sensors.'
return (self.mode == 'reschedule')
| -7,624,722,879,257,717,000
|
Define mode rescheduled sensors.
|
airflow/sensors/base_sensor_operator.py
|
reschedule
|
CoverGenius/airflow
|
python
|
@property
def reschedule(self):
return (self.mode == 'reschedule')
|
@property
def deps(self):
'\n Adds one additional dependency for all sensor operators that\n checks if a sensor task instance can be rescheduled.\n '
if self.reschedule:
return (BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()})
return BaseOperator.deps.fget(self)
| -7,659,577,540,815,607,000
|
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
|
airflow/sensors/base_sensor_operator.py
|
deps
|
CoverGenius/airflow
|
python
|
@property
def deps(self):
'\n Adds one additional dependency for all sensor operators that\n checks if a sensor task instance can be rescheduled.\n '
if self.reschedule:
return (BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()})
return BaseOperator.deps.fget(self)
|
def add_source(self, name, similar=None):
"Adds a new source of absorption to the sightline.\n\n The purpose of a source is to hold multiple line models\n together, sometiimes with similar parameters\n\n Args:\n name (str): The name of the absorption source\n similar (dict): A dict of parameters that change with the source,\n not the specific line, default: None, example: similar={'b': 3}\n\n "
self.source_names.append(name)
if ((name == 'Telluric') and (similar is not None)):
par = Parameters()
for key in similar:
par.add(((name + '_') + key), value=similar[key], min=0, max=30)
self.telluric_pars = par
self.all_pars = (self.all_pars + par)
| -5,067,733,584,439,722,000
|
Adds a new source of absorption to the sightline.
The purpose of a source is to hold multiple line models
together, sometiimes with similar parameters
Args:
name (str): The name of the absorption source
similar (dict): A dict of parameters that change with the source,
not the specific line, default: None, example: similar={'b': 3}
|
edibles/sightline.py
|
add_source
|
jancami/edibles
|
python
|
def add_source(self, name, similar=None):
"Adds a new source of absorption to the sightline.\n\n The purpose of a source is to hold multiple line models\n together, sometiimes with similar parameters\n\n Args:\n name (str): The name of the absorption source\n similar (dict): A dict of parameters that change with the source,\n not the specific line, default: None, example: similar={'b': 3}\n\n "
self.source_names.append(name)
if ((name == 'Telluric') and (similar is not None)):
par = Parameters()
for key in similar:
par.add(((name + '_') + key), value=similar[key], min=0, max=30)
self.telluric_pars = par
self.all_pars = (self.all_pars + par)
|
def add_line(self, name, source=None, pars=None, guess_data=None):
'Adds a new line to a given absorption source.\n If no source is given, a new one will be created.\n\n Args:\n name (str): The name of the line\n source (str): the name of the source this line will belong to\n pars (dict): user input parameters\n guess_data (1darray): flux data to guess with\n\n '
assert (source is not None), 'Source must not be None'
if (source not in self.source_names):
print()
print("Could not find source '{}' in source_names.".format(source))
print("Creating source '{}'".format(source))
self.add_source(source)
new_line = VoigtModel(prefix=(((source + '_') + name) + '_'))
if (guess_data is not None):
new_pars = new_line.guess(guess_data, x=self.wave)
else:
new_pars = new_line.guess(self.flux, x=self.wave)
if (pars is not None):
for par in pars:
par_name = ((((source + '_') + name) + '_') + par)
new_pars[par_name].set(value=pars[par])
if (source == 'Telluric'):
b_name = (source + '_b')
new_pars[(((source + '_') + name) + '_b')].set(expr=b_name)
new_pars[(((source + '_') + name) + '_lam_0')].set(min=self.Spectrum.xmin, max=self.Spectrum.xmax)
self.old_complete_model = self.complete_model
self.complete_model = (self.complete_model * new_line)
self.old_all_pars = self.all_pars
self.all_pars = (self.all_pars + new_pars)
self.old_cont_model = self.cont_model
self.old_cont_pars = self.cont_model_pars
if (source == 'Telluric'):
try:
self.old_telluric_model = self.telluric_model
self.telluric_model = (self.telluric_model * new_line)
except AttributeError:
self.old_telluric_model = new_line
self.telluric_model = new_line
try:
self.old_telluric_pars = self.telluric_pars
self.telluric_pars = (self.telluric_pars + new_pars)
except AttributeError:
print('Something bad is probably happening')
self.old_telluric_pars = new_pars
self.telluric_pars = new_pars
else:
try:
self.old_nontelluric_model = self.nontelluric_model
self.nontelluric_model = (self.nontelluric_model * new_line)
except AttributeError:
self.old_nontelluric_model = new_line
self.nontelluric_model = new_line
try:
self.old_nontelluric_pars = self.nontelluric_pars
self.nontelluric_pars = (self.nontelluric_pars + new_pars)
except AttributeError:
self.old_nontelluric_pars = new_pars
self.nontelluric_pars = new_pars
lambda_name = (((source + '_') + name) + '_lam_0')
index = bisect.bisect(self.peaks, new_pars[lambda_name])
self.peaks.insert(index, new_pars[lambda_name])
self.most_recent = ((source + '_') + name)
self.n_lines += 1
| -8,732,481,608,611,323,000
|
Adds a new line to a given absorption source.
If no source is given, a new one will be created.
Args:
name (str): The name of the line
source (str): the name of the source this line will belong to
pars (dict): user input parameters
guess_data (1darray): flux data to guess with
|
edibles/sightline.py
|
add_line
|
jancami/edibles
|
python
|
def add_line(self, name, source=None, pars=None, guess_data=None):
'Adds a new line to a given absorption source.\n If no source is given, a new one will be created.\n\n Args:\n name (str): The name of the line\n source (str): the name of the source this line will belong to\n pars (dict): user input parameters\n guess_data (1darray): flux data to guess with\n\n '
assert (source is not None), 'Source must not be None'
if (source not in self.source_names):
print()
print("Could not find source '{}' in source_names.".format(source))
print("Creating source '{}'".format(source))
self.add_source(source)
new_line = VoigtModel(prefix=(((source + '_') + name) + '_'))
if (guess_data is not None):
new_pars = new_line.guess(guess_data, x=self.wave)
else:
new_pars = new_line.guess(self.flux, x=self.wave)
if (pars is not None):
for par in pars:
par_name = ((((source + '_') + name) + '_') + par)
new_pars[par_name].set(value=pars[par])
if (source == 'Telluric'):
b_name = (source + '_b')
new_pars[(((source + '_') + name) + '_b')].set(expr=b_name)
new_pars[(((source + '_') + name) + '_lam_0')].set(min=self.Spectrum.xmin, max=self.Spectrum.xmax)
self.old_complete_model = self.complete_model
self.complete_model = (self.complete_model * new_line)
self.old_all_pars = self.all_pars
self.all_pars = (self.all_pars + new_pars)
self.old_cont_model = self.cont_model
self.old_cont_pars = self.cont_model_pars
if (source == 'Telluric'):
try:
self.old_telluric_model = self.telluric_model
self.telluric_model = (self.telluric_model * new_line)
except AttributeError:
self.old_telluric_model = new_line
self.telluric_model = new_line
try:
self.old_telluric_pars = self.telluric_pars
self.telluric_pars = (self.telluric_pars + new_pars)
except AttributeError:
print('Something bad is probably happening')
self.old_telluric_pars = new_pars
self.telluric_pars = new_pars
else:
try:
self.old_nontelluric_model = self.nontelluric_model
self.nontelluric_model = (self.nontelluric_model * new_line)
except AttributeError:
self.old_nontelluric_model = new_line
self.nontelluric_model = new_line
try:
self.old_nontelluric_pars = self.nontelluric_pars
self.nontelluric_pars = (self.nontelluric_pars + new_pars)
except AttributeError:
self.old_nontelluric_pars = new_pars
self.nontelluric_pars = new_pars
lambda_name = (((source + '_') + name) + '_lam_0')
index = bisect.bisect(self.peaks, new_pars[lambda_name])
self.peaks.insert(index, new_pars[lambda_name])
self.most_recent = ((source + '_') + name)
self.n_lines += 1
|
def fit(self, data=None, old=False, x=None, report=False, plot=False, weights=None, method='leastsq', **kwargs):
'Fits a model to the sightline data given by the EdiblesSpectrum object.\n\n Args:\n data (1darray): Flux data to fit\n params (lmfit.parameter.Parameters): Initial parameters to fit\n model (lmfit.model.CompositeModel): The model to fit, default: self.complete_model\n x (1darray): Wavelength data to fit\n report (bool): default False: If true, prints the report from the fit.\n plot (bool): default False: If true, plots the data and the fit model.\n method (str): The method of fitting. default: leastsq\n\n '
if (data is None):
data = self.flux
if (x is None):
x = self.wave
if (old is True):
model = self.old_complete_model
params = self.old_all_pars
else:
model = self.complete_model
params = self.all_pars
self.result = model.fit(data=data, params=params, x=x, weights=weights, method=method, **kwargs)
if report:
print(self.result.fit_report())
self.result.params.pretty_print()
if plot:
self.result.plot_fit()
plt.show()
self.all_pars = self.result.params
if (old is False):
try:
tell_pars = Parameters()
for par_name in self.telluric_pars:
tell_pars.add(self.all_pars[par_name])
assert (len(self.telluric_pars) == len(tell_pars))
self.telluric_pars = tell_pars
except AttributeError:
pass
try:
non_tell_pars = Parameters()
for par_name in self.nontelluric_pars:
non_tell_pars.add(self.all_pars[par_name])
assert (len(self.nontelluric_pars) == len(non_tell_pars))
self.nontelluric_pars = non_tell_pars
except AttributeError:
pass
try:
cont_pars = Parameters()
for par_name in self.cont_model_pars:
cont_pars.add(self.all_pars[par_name])
assert (len(self.cont_model_pars) == len(cont_pars))
self.cont_model_pars = cont_pars
except AttributeError:
pass
| 3,280,400,313,545,762,000
|
Fits a model to the sightline data given by the EdiblesSpectrum object.
Args:
data (1darray): Flux data to fit
params (lmfit.parameter.Parameters): Initial parameters to fit
model (lmfit.model.CompositeModel): The model to fit, default: self.complete_model
x (1darray): Wavelength data to fit
report (bool): default False: If true, prints the report from the fit.
plot (bool): default False: If true, plots the data and the fit model.
method (str): The method of fitting. default: leastsq
|
edibles/sightline.py
|
fit
|
jancami/edibles
|
python
|
def fit(self, data=None, old=False, x=None, report=False, plot=False, weights=None, method='leastsq', **kwargs):
'Fits a model to the sightline data given by the EdiblesSpectrum object.\n\n Args:\n data (1darray): Flux data to fit\n params (lmfit.parameter.Parameters): Initial parameters to fit\n model (lmfit.model.CompositeModel): The model to fit, default: self.complete_model\n x (1darray): Wavelength data to fit\n report (bool): default False: If true, prints the report from the fit.\n plot (bool): default False: If true, plots the data and the fit model.\n method (str): The method of fitting. default: leastsq\n\n '
if (data is None):
data = self.flux
if (x is None):
x = self.wave
if (old is True):
model = self.old_complete_model
params = self.old_all_pars
else:
model = self.complete_model
params = self.all_pars
self.result = model.fit(data=data, params=params, x=x, weights=weights, method=method, **kwargs)
if report:
print(self.result.fit_report())
self.result.params.pretty_print()
if plot:
self.result.plot_fit()
plt.show()
self.all_pars = self.result.params
if (old is False):
try:
tell_pars = Parameters()
for par_name in self.telluric_pars:
tell_pars.add(self.all_pars[par_name])
assert (len(self.telluric_pars) == len(tell_pars))
self.telluric_pars = tell_pars
except AttributeError:
pass
try:
non_tell_pars = Parameters()
for par_name in self.nontelluric_pars:
non_tell_pars.add(self.all_pars[par_name])
assert (len(self.nontelluric_pars) == len(non_tell_pars))
self.nontelluric_pars = non_tell_pars
except AttributeError:
pass
try:
cont_pars = Parameters()
for par_name in self.cont_model_pars:
cont_pars.add(self.all_pars[par_name])
assert (len(self.cont_model_pars) == len(cont_pars))
self.cont_model_pars = cont_pars
except AttributeError:
pass
|
def freeze(self, pars=None, prefix=None, freeze_cont=True, unfreeze=False):
"Freezes the current params, so you can still add to the\n model but the 'old' parameters will not change\n\n Args:\n prefix (str): Prefix of parameters to freeze, default: None, example: 'Telluric'\n freeze_cont (bool): Freeze the continuum or not, default: True\n unfreeze (bool): unfreezes all parameters except x values of\n spline anchors, default=False\n\n "
if (pars is None):
pars = self.all_pars
if (unfreeze is False):
if prefix:
for par in pars:
if (prefix in par):
pars[par].set(vary=False)
else:
for par in pars:
pars[par].set(vary=False)
if (not freeze_cont):
for par in pars:
if ('y_' in par):
pars[par].set(vary=True)
if (unfreeze is True):
for par in pars:
if ('y_' in par):
pars[par].set(vary=True)
if (('Telluric' in par) and (par[(- 2):] != '_b')):
pars[par].set(vary=True)
pars['Telluric_b'].set(vary=True)
if (('Nontelluric' in par) and (par[(- 2):] != '_d')):
pars[par].set(vary=True)
| -3,010,078,550,005,135,000
|
Freezes the current params, so you can still add to the
model but the 'old' parameters will not change
Args:
prefix (str): Prefix of parameters to freeze, default: None, example: 'Telluric'
freeze_cont (bool): Freeze the continuum or not, default: True
unfreeze (bool): unfreezes all parameters except x values of
spline anchors, default=False
|
edibles/sightline.py
|
freeze
|
jancami/edibles
|
python
|
def freeze(self, pars=None, prefix=None, freeze_cont=True, unfreeze=False):
"Freezes the current params, so you can still add to the\n model but the 'old' parameters will not change\n\n Args:\n prefix (str): Prefix of parameters to freeze, default: None, example: 'Telluric'\n freeze_cont (bool): Freeze the continuum or not, default: True\n unfreeze (bool): unfreezes all parameters except x values of\n spline anchors, default=False\n\n "
if (pars is None):
pars = self.all_pars
if (unfreeze is False):
if prefix:
for par in pars:
if (prefix in par):
pars[par].set(vary=False)
else:
for par in pars:
pars[par].set(vary=False)
if (not freeze_cont):
for par in pars:
if ('y_' in par):
pars[par].set(vary=True)
if (unfreeze is True):
for par in pars:
if ('y_' in par):
pars[par].set(vary=True)
if (('Telluric' in par) and (par[(- 2):] != '_b')):
pars[par].set(vary=True)
pars['Telluric_b'].set(vary=True)
if (('Nontelluric' in par) and (par[(- 2):] != '_d')):
pars[par].set(vary=True)
|
def separate(self, data, x, old=False, plot=True):
'Separate the sources that were added to Sightline.\n\n Args:\n data (1darray): FLux data to use for separation\n x (1darray): Wavelength array to use\n old (bool): If true, uses the older, second-most recent model and parameters\n plot (bool): If true, plots separted spectrum\n\n '
assert (len(self.telluric_pars) > 0)
assert (len(self.nontelluric_pars) > 0)
if (old is True):
model = self.old_complete_model
params = self.old_all_pars
telluric_model = self.old_telluric_model
telluric_params = self.old_telluric_pars
nontelluric_model = self.old_nontelluric_model
nontelluric_params = self.old_nontelluric_pars
cont_model = self.old_cont_model
cont_params = self.old_cont_pars
else:
model = self.complete_model
params = self.all_pars
telluric_model = self.telluric_model
telluric_params = self.telluric_pars
nontelluric_model = self.nontelluric_model
nontelluric_params = self.nontelluric_pars
cont_model = self.cont_model
cont_params = self.cont_model_pars
if (len(self.source_names) == 2):
complete_out = model.eval(data=data, params=params, x=x)
telluric_out = telluric_model.eval(data=data, params=telluric_params, x=x)
nontelluric_out = nontelluric_model.eval(data=data, params=nontelluric_params, x=x)
cont_out = cont_model.eval(data=data, params=cont_params, x=x)
if plot:
plt.plot(x, data, label='Data', color='k')
plt.plot(x, complete_out, label='Final model', color='r')
plt.plot(x, (data - complete_out), label='Residual', color='g')
plt.plot(x, (telluric_out * cont_out), label='Telluric model')
plt.plot(x, (nontelluric_out * cont_out), label='Non-telluric model')
plt.xlabel('Wavelength ($\\AA$)', fontsize=14)
plt.ylabel('Flux', fontsize=14)
plt.legend()
plt.show()
return (complete_out, telluric_out, nontelluric_out, cont_out)
| -8,686,016,170,199,200,000
|
Separate the sources that were added to Sightline.
Args:
data (1darray): FLux data to use for separation
x (1darray): Wavelength array to use
old (bool): If true, uses the older, second-most recent model and parameters
plot (bool): If true, plots separted spectrum
|
edibles/sightline.py
|
separate
|
jancami/edibles
|
python
|
def separate(self, data, x, old=False, plot=True):
'Separate the sources that were added to Sightline.\n\n Args:\n data (1darray): FLux data to use for separation\n x (1darray): Wavelength array to use\n old (bool): If true, uses the older, second-most recent model and parameters\n plot (bool): If true, plots separted spectrum\n\n '
assert (len(self.telluric_pars) > 0)
assert (len(self.nontelluric_pars) > 0)
if (old is True):
model = self.old_complete_model
params = self.old_all_pars
telluric_model = self.old_telluric_model
telluric_params = self.old_telluric_pars
nontelluric_model = self.old_nontelluric_model
nontelluric_params = self.old_nontelluric_pars
cont_model = self.old_cont_model
cont_params = self.old_cont_pars
else:
model = self.complete_model
params = self.all_pars
telluric_model = self.telluric_model
telluric_params = self.telluric_pars
nontelluric_model = self.nontelluric_model
nontelluric_params = self.nontelluric_pars
cont_model = self.cont_model
cont_params = self.cont_model_pars
if (len(self.source_names) == 2):
complete_out = model.eval(data=data, params=params, x=x)
telluric_out = telluric_model.eval(data=data, params=telluric_params, x=x)
nontelluric_out = nontelluric_model.eval(data=data, params=nontelluric_params, x=x)
cont_out = cont_model.eval(data=data, params=cont_params, x=x)
if plot:
plt.plot(x, data, label='Data', color='k')
plt.plot(x, complete_out, label='Final model', color='r')
plt.plot(x, (data - complete_out), label='Residual', color='g')
plt.plot(x, (telluric_out * cont_out), label='Telluric model')
plt.plot(x, (nontelluric_out * cont_out), label='Non-telluric model')
plt.xlabel('Wavelength ($\\AA$)', fontsize=14)
plt.ylabel('Flux', fontsize=14)
plt.legend()
plt.show()
return (complete_out, telluric_out, nontelluric_out, cont_out)
|
def Battery_Func(t, SV, SV_dot, sw):
'================================================================='
'==========================INITIALIZE============================='
'================================================================='
print(t)
nSV = len(SV)
res = np.zeros([nSV])
offset_vec = sep.offsets
" anode = an.obj['electrode']\n anode_s = an.obj['surf']\n elyte = an.obj['elyte']\n cathode = cat.obj['electrode']\n cathode_s = cat.obj['surf']"
nsp_an = anode.n_species
nsp_cat = cathode.n_species
F = ct.faraday
R = ct.gas_constant
T = Inputs.T
'================================================================='
'============================ANODE================================'
'================================================================='
offset = an.offsets
ptr = an.ptr
j = 0
N_io_m = 0
i_io_m = 0
i_el_m = an.i_ext
X_an_1 = SV[(offset[j] + ptr['X_ed'][(- 1)])]
rho_k_elyte_1 = SV[(offset[j] + ptr['rho_k_elyte'])]
phi_elec_an_1 = SV[(offset[j] + ptr['Phi_ed'])]
phi_elec_elyte_1 = (phi_elec_an_1 - SV[(offset[j] + ptr['Phi_dl'])])
anode.X = [X_an_1, (1 - X_an_1)]
anode.electric_potential = phi_elec_an_1
conductor.electric_potential = phi_elec_an_1
elyte.Y = (rho_k_elyte_1 / np.sum(rho_k_elyte_1))
elyte.electric_potential = phi_elec_elyte_1
sdot_1 = anode_s.net_production_rates
j = 1
offset = int(offset_vec[j])
X_an_2 = SV[(offset + an.ptr['X_ed'][(- 1)])]
rho_k_elyte_2 = SV[(offset + an.ptr['rho_k_elyte'])]
phi_elec_an_2 = SV[(offset + an.ptr['Phi_ed'])]
phi_elec_elyte_2 = (phi_elec_an_2 - SV[(offset + an.ptr['Phi_dl'])])
anode.X = [X_an_2, (1 - X_an_2)]
conductor.electric_potential = phi_elec_an_2
anode.electric_potential = phi_elec_an_2
elyte.Y = (rho_k_elyte_2 / np.sum(rho_k_elyte_2))
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
j = 0
offset = int(offset_vec[j])
i_el_p = ((an.sigma_eff_ed * (phi_elec_an_1 - phi_elec_an_2)) * an.dyInv)
N_io_p = ((((- an.u_Li_elyte) * elyte.density_mole) * (((R * T) * (rho_k_elyte_2 - rho_k_elyte_1)) + (F * (phi_elec_elyte_2 - phi_elec_elyte_1)))) * an.dyInv)
i_io_p = (np.dot(N_io_p, Inputs.z_k_elyte) * F)
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
k_m = (np.arange(0, an.nshells) / an.nshells)
k_p = (np.arange(1, (an.nshells + 1)) / an.nshells)
'Calculate the change in X_C6 in the particle interior.\n Note that the DiffFlux is the diffusion of lithium\n toward the particle surface, and that diffusion of Li\n into the shell decreases the amount of C6. The fluxes\n must be scaled by the shell interface surface area\n relative to the total particle surface area'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = SV[(offset + an.ptr['Phi_ed'])]
'============================ANODE================================'
'INTERIOR NODES'
for j in np.arange(2, an.npoints):
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
offset = int(an.offsets[j])
X_an_2 = SV[(offset + an.ptr['X_ed'][(- 1)])]
rho_k_elyte_2 = SV[(offset + an.ptr['rho_k_elyte'])]
phi_elec_an_2 = SV[(offset + an.ptr['Phi_ed'])]
phi_elec_elyte_2 = (phi_elec_an_2 - SV[(offset + an.ptr['Phi_dl'])])
anode.X = [X_an_2, (1 - X_an_2)]
anode.electric_potential = phi_elec_an_2
conductor.electric_potential = phi_elec_an_2
elyte.Y = (rho_k_elyte_2 / np.sum(rho_k_elyte_2))
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
offset = int(an.offsets[(j - 1)])
i_el_p = ((an.sigma_eff_ed * (phi_elec_an_1 - phi_elec_an_2)) * an.dyInv)
N_io_p = ((((- an.u_Li_elyte) * elyte.density_mole) * (((R * T) * (rho_k_elyte_2 - rho_k_elyte_1)) + (F * (phi_elec_elyte_2 - phi_elec_elyte_1)))) * an.dyInv)
i_io_p = (np.dot(N_io_p, Inputs.z_k_elyte) * F)
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
'Calculate the change in X_C6 in the particle interior.'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = (((i_el_m - i_el_p) + i_io_m) - i_io_p)
'============================ANODE================================'
'Separator boundary'
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
i_el_p = 0
j = (an.npoints - 1)
offset = int(an.offsets[j])
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
i_io_p = an.i_ext
N_io_p = np.zeros_like(N_io_p)
N_io_p[2] = (i_io_p / F)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
'Calculate the change in X_C6 in the particle interior.'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = (((i_el_m - i_el_p) + i_io_m) - i_io_p)
'================================================================='
'==========================SEPARATOR=============================='
'================================================================='
'================================================================='
'===========================CATHODE==============================='
'================================================================='
'=========================CATHODE============================='
'current collector boundary'
return res
| 3,328,596,747,109,749,000
|
=================================================================
|
li_ion_battery_p2d_functions.py
|
Battery_Func
|
coresresearch/p2d_li_ion_battery
|
python
|
def Battery_Func(t, SV, SV_dot, sw):
'==========================INITIALIZE============================='
print(t)
nSV = len(SV)
res = np.zeros([nSV])
offset_vec = sep.offsets
" anode = an.obj['electrode']\n anode_s = an.obj['surf']\n elyte = an.obj['elyte']\n cathode = cat.obj['electrode']\n cathode_s = cat.obj['surf']"
nsp_an = anode.n_species
nsp_cat = cathode.n_species
F = ct.faraday
R = ct.gas_constant
T = Inputs.T
'============================ANODE================================'
offset = an.offsets
ptr = an.ptr
j = 0
N_io_m = 0
i_io_m = 0
i_el_m = an.i_ext
X_an_1 = SV[(offset[j] + ptr['X_ed'][(- 1)])]
rho_k_elyte_1 = SV[(offset[j] + ptr['rho_k_elyte'])]
phi_elec_an_1 = SV[(offset[j] + ptr['Phi_ed'])]
phi_elec_elyte_1 = (phi_elec_an_1 - SV[(offset[j] + ptr['Phi_dl'])])
anode.X = [X_an_1, (1 - X_an_1)]
anode.electric_potential = phi_elec_an_1
conductor.electric_potential = phi_elec_an_1
elyte.Y = (rho_k_elyte_1 / np.sum(rho_k_elyte_1))
elyte.electric_potential = phi_elec_elyte_1
sdot_1 = anode_s.net_production_rates
j = 1
offset = int(offset_vec[j])
X_an_2 = SV[(offset + an.ptr['X_ed'][(- 1)])]
rho_k_elyte_2 = SV[(offset + an.ptr['rho_k_elyte'])]
phi_elec_an_2 = SV[(offset + an.ptr['Phi_ed'])]
phi_elec_elyte_2 = (phi_elec_an_2 - SV[(offset + an.ptr['Phi_dl'])])
anode.X = [X_an_2, (1 - X_an_2)]
conductor.electric_potential = phi_elec_an_2
anode.electric_potential = phi_elec_an_2
elyte.Y = (rho_k_elyte_2 / np.sum(rho_k_elyte_2))
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
j = 0
offset = int(offset_vec[j])
i_el_p = ((an.sigma_eff_ed * (phi_elec_an_1 - phi_elec_an_2)) * an.dyInv)
N_io_p = ((((- an.u_Li_elyte) * elyte.density_mole) * (((R * T) * (rho_k_elyte_2 - rho_k_elyte_1)) + (F * (phi_elec_elyte_2 - phi_elec_elyte_1)))) * an.dyInv)
i_io_p = (np.dot(N_io_p, Inputs.z_k_elyte) * F)
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
k_m = (np.arange(0, an.nshells) / an.nshells)
k_p = (np.arange(1, (an.nshells + 1)) / an.nshells)
'Calculate the change in X_C6 in the particle interior.\n Note that the DiffFlux is the diffusion of lithium\n toward the particle surface, and that diffusion of Li\n into the shell decreases the amount of C6. The fluxes\n must be scaled by the shell interface surface area\n relative to the total particle surface area'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = SV[(offset + an.ptr['Phi_ed'])]
'============================ANODE================================'
'INTERIOR NODES'
for j in np.arange(2, an.npoints):
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
offset = int(an.offsets[j])
X_an_2 = SV[(offset + an.ptr['X_ed'][(- 1)])]
rho_k_elyte_2 = SV[(offset + an.ptr['rho_k_elyte'])]
phi_elec_an_2 = SV[(offset + an.ptr['Phi_ed'])]
phi_elec_elyte_2 = (phi_elec_an_2 - SV[(offset + an.ptr['Phi_dl'])])
anode.X = [X_an_2, (1 - X_an_2)]
anode.electric_potential = phi_elec_an_2
conductor.electric_potential = phi_elec_an_2
elyte.Y = (rho_k_elyte_2 / np.sum(rho_k_elyte_2))
elyte.electric_potential = phi_elec_elyte_2
sdot_2 = anode_s.net_production_rates
offset = int(an.offsets[(j - 1)])
i_el_p = ((an.sigma_eff_ed * (phi_elec_an_1 - phi_elec_an_2)) * an.dyInv)
N_io_p = ((((- an.u_Li_elyte) * elyte.density_mole) * (((R * T) * (rho_k_elyte_2 - rho_k_elyte_1)) + (F * (phi_elec_elyte_2 - phi_elec_elyte_1)))) * an.dyInv)
i_io_p = (np.dot(N_io_p, Inputs.z_k_elyte) * F)
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
'Calculate the change in X_C6 in the particle interior.'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = (((i_el_m - i_el_p) + i_io_m) - i_io_p)
'============================ANODE================================'
'Separator boundary'
N_io_m = N_io_p
i_io_m = i_io_p
i_el_m = i_el_p
X_an_1 = X_an_2
rho_k_elyte_1 = rho_k_elyte_2
phi_elec_an_1 = phi_elec_an_2
phi_elec_elyte_1 = phi_elec_elyte_2
sdot_1 = sdot_2
i_el_p = 0
j = (an.npoints - 1)
offset = int(an.offsets[j])
i_Far_1 = (((sdot_1[an.ptr['iFar']] * F) * an.A_surf) / an.dyInv)
i_io_p = an.i_ext
N_io_p = np.zeros_like(N_io_p)
N_io_p[2] = (i_io_p / F)
X_Li = (1 - SV[(offset + an.ptr['X_ed'])])
DiffFlux = np.zeros([(an.nshells + 1)])
DiffFlux[1:(- 1)] = ((an.D_Li_ed * (X_Li[0:(- 1)] - X_Li[1:])) / an.dr)
DiffFlux[(- 1)] = (sdot_1[0] / anode.density_mole)
'Calculate the change in X_C6 in the particle interior.'
res[(offset + an.ptr['X_ed'])] = (SV_dot[(offset + an.ptr['X_ed'])] - (((((DiffFlux[1:] * (k_p ** 2)) - (DiffFlux[0:(- 1)] * (k_m ** 2))) * an.A_surf) / an.eps_ed) / an.V_shell))
'Change in electrolyte_composition'
res[(offset + an.ptr['rho_k_elyte'])] = (SV_dot[(offset + an.ptr['rho_k_elyte'])] - (((((N_io_m - N_io_p) * an.dyInv) + (sdot_1[nsp_an] * an.A_surf)) / elyte.density_mole) / an.eps_elyte))
'Double-layer voltage'
res[(offset + an.ptr['Phi_dl'])] = (SV_dot[(offset + an.ptr['Phi_dl'])] - (((((i_Far_1 + i_el_m) - i_el_p) * an.dyInv) / an.C_dl) / an.A_surf))
'Algebraic equation for ANODE electric potential boundary condition'
res[(offset + an.ptr['Phi_ed'])] = (((i_el_m - i_el_p) + i_io_m) - i_io_p)
'==========================SEPARATOR=============================='
'===========================CATHODE==============================='
'=========================CATHODE============================='
'current collector boundary'
return res
|
def _update_learning_rate(self):
' Learning rate scheduling per step '
self.current_step += 1
lr = (self.init_lr * self._get_lr_scale())
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
| 5,636,199,922,058,562,000
|
Learning rate scheduling per step
|
FastSpeech2/model/optimizer.py
|
_update_learning_rate
|
ARBML/klaam
|
python
|
def _update_learning_rate(self):
' '
self.current_step += 1
lr = (self.init_lr * self._get_lr_scale())
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
def validate_one(func_name):
'\n Validate the docstring for the given func_name\n\n Parameters\n ----------\n func_name : function\n Function whose docstring will be evaluated\n\n Returns\n -------\n int\n The number of errors found in the `func_name` docstring\n '
func_obj = _load_obj(func_name)
doc = Docstring(func_name, func_obj)
sys.stderr.write(_output_header('Docstring ({})'.format(func_name)))
sys.stderr.write('{}\n'.format(doc.clean_doc))
errs = []
wrns = []
if (doc.start_blank_lines != 1):
errs.append('Docstring text (summary) should start in the line immediately after the opening quotes (not in the same line, or leaving a blank line in between)')
if (doc.end_blank_lines != 1):
errs.append('Closing quotes should be placed in the line after the last text in the docstring (do not close the quotes in the same line as the text, or leave a blank line between the last text and the quotes)')
if doc.double_blank_lines:
errs.append('Use only one blank line to separate sections or paragraphs')
if (not doc.summary):
errs.append('No summary found (a short summary in a single line should be present at the beginning of the docstring)')
else:
if (not doc.summary[0].isupper()):
errs.append('Summary does not start with a capital letter')
if (doc.summary[(- 1)] != '.'):
errs.append('Summary does not end with a period')
if (doc.is_function_or_method and (doc.summary.split(' ')[0][(- 1)] == 's')):
errs.append('Summary must start with infinitive verb, not third person (e.g. use "Generate" instead of "Generates")')
if (doc.num_summary_lines > 1):
errs.append('Summary should fit in a single line.')
if (not doc.extended_summary):
wrns.append('No extended summary found')
param_errs = doc.parameter_mismatches
for param in doc.doc_parameters:
if (not param.startswith('*')):
if (not doc.parameter_type(param)):
param_errs.append('Parameter "{}" has no type'.format(param))
elif (doc.parameter_type(param)[(- 1)] == '.'):
param_errs.append('Parameter "{}" type should not finish with "."'.format(param))
if (not doc.parameter_desc(param)):
param_errs.append('Parameter "{}" has no description'.format(param))
else:
if (not doc.parameter_desc(param)[0].isupper()):
param_errs.append('Parameter "{}" description should start with a capital letter'.format(param))
if (doc.parameter_desc(param)[(- 1)] != '.'):
param_errs.append('Parameter "{}" description should finish with "."'.format(param))
if param_errs:
errs.append('Errors in parameters section')
for param_err in param_errs:
errs.append('\t{}'.format(param_err))
if doc.is_function_or_method:
if ((not doc.returns) and ('return' in doc.method_source)):
errs.append('No Returns section found')
if ((not doc.yields) and ('yield' in doc.method_source)):
errs.append('No Yields section found')
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append('Private classes ({}) should not be mentioned in public docstring.'.format(mentioned_errs))
if (not doc.see_also):
wrns.append('See Also section not found')
else:
for (rel_name, rel_desc) in doc.see_also.items():
if (not rel_desc):
errs.append('Missing description for See Also "{}" reference'.format(rel_name))
for line in doc.raw_doc.splitlines():
if re.match('^ *\t', line):
errs.append('Tabs found at the start of line "{}", please use whitespace only'.format(line.lstrip()))
examples_errs = ''
if (not doc.examples):
wrns.append('No examples section found')
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append('Examples do not pass tests')
sys.stderr.write(_output_header('Validation'))
if errs:
sys.stderr.write('Errors found:\n')
for err in errs:
sys.stderr.write('\t{}\n'.format(err))
if wrns:
sys.stderr.write('Warnings found:\n')
for wrn in wrns:
sys.stderr.write('\t{}\n'.format(wrn))
if (not errs):
sys.stderr.write('Docstring for "{}" correct. :)\n'.format(func_name))
if examples_errs:
sys.stderr.write(_output_header('Doctests'))
sys.stderr.write(examples_errs)
return len(errs)
| 1,290,854,573,790,395,100
|
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated
Returns
-------
int
The number of errors found in the `func_name` docstring
|
scripts/validate_docstrings.py
|
validate_one
|
Anjali2019/pandas
|
python
|
def validate_one(func_name):
'\n Validate the docstring for the given func_name\n\n Parameters\n ----------\n func_name : function\n Function whose docstring will be evaluated\n\n Returns\n -------\n int\n The number of errors found in the `func_name` docstring\n '
func_obj = _load_obj(func_name)
doc = Docstring(func_name, func_obj)
sys.stderr.write(_output_header('Docstring ({})'.format(func_name)))
sys.stderr.write('{}\n'.format(doc.clean_doc))
errs = []
wrns = []
if (doc.start_blank_lines != 1):
errs.append('Docstring text (summary) should start in the line immediately after the opening quotes (not in the same line, or leaving a blank line in between)')
if (doc.end_blank_lines != 1):
errs.append('Closing quotes should be placed in the line after the last text in the docstring (do not close the quotes in the same line as the text, or leave a blank line between the last text and the quotes)')
if doc.double_blank_lines:
errs.append('Use only one blank line to separate sections or paragraphs')
if (not doc.summary):
errs.append('No summary found (a short summary in a single line should be present at the beginning of the docstring)')
else:
if (not doc.summary[0].isupper()):
errs.append('Summary does not start with a capital letter')
if (doc.summary[(- 1)] != '.'):
errs.append('Summary does not end with a period')
if (doc.is_function_or_method and (doc.summary.split(' ')[0][(- 1)] == 's')):
errs.append('Summary must start with infinitive verb, not third person (e.g. use "Generate" instead of "Generates")')
if (doc.num_summary_lines > 1):
errs.append('Summary should fit in a single line.')
if (not doc.extended_summary):
wrns.append('No extended summary found')
param_errs = doc.parameter_mismatches
for param in doc.doc_parameters:
if (not param.startswith('*')):
if (not doc.parameter_type(param)):
param_errs.append('Parameter "{}" has no type'.format(param))
elif (doc.parameter_type(param)[(- 1)] == '.'):
param_errs.append('Parameter "{}" type should not finish with "."'.format(param))
if (not doc.parameter_desc(param)):
param_errs.append('Parameter "{}" has no description'.format(param))
else:
if (not doc.parameter_desc(param)[0].isupper()):
param_errs.append('Parameter "{}" description should start with a capital letter'.format(param))
if (doc.parameter_desc(param)[(- 1)] != '.'):
param_errs.append('Parameter "{}" description should finish with "."'.format(param))
if param_errs:
errs.append('Errors in parameters section')
for param_err in param_errs:
errs.append('\t{}'.format(param_err))
if doc.is_function_or_method:
if ((not doc.returns) and ('return' in doc.method_source)):
errs.append('No Returns section found')
if ((not doc.yields) and ('yield' in doc.method_source)):
errs.append('No Yields section found')
mentioned_errs = doc.mentioned_private_classes
if mentioned_errs:
errs.append('Private classes ({}) should not be mentioned in public docstring.'.format(mentioned_errs))
if (not doc.see_also):
wrns.append('See Also section not found')
else:
for (rel_name, rel_desc) in doc.see_also.items():
if (not rel_desc):
errs.append('Missing description for See Also "{}" reference'.format(rel_name))
for line in doc.raw_doc.splitlines():
if re.match('^ *\t', line):
errs.append('Tabs found at the start of line "{}", please use whitespace only'.format(line.lstrip()))
examples_errs =
if (not doc.examples):
wrns.append('No examples section found')
else:
examples_errs = doc.examples_errors
if examples_errs:
errs.append('Examples do not pass tests')
sys.stderr.write(_output_header('Validation'))
if errs:
sys.stderr.write('Errors found:\n')
for err in errs:
sys.stderr.write('\t{}\n'.format(err))
if wrns:
sys.stderr.write('Warnings found:\n')
for wrn in wrns:
sys.stderr.write('\t{}\n'.format(wrn))
if (not errs):
sys.stderr.write('Docstring for "{}" correct. :)\n'.format(func_name))
if examples_errs:
sys.stderr.write(_output_header('Doctests'))
sys.stderr.write(examples_errs)
return len(errs)
|
def print_list_views(ctx):
'Read list view by title example'
list_object = ctx.web.lists.get_by_title(list_title)
views = list_object.views
ctx.load(views)
ctx.execute_query()
for view in views:
cur_view_title = view.properties['Title']
cur_view = views.get_by_title(cur_view_title)
ctx.load(cur_view)
ctx.execute_query()
print('View title: {0}'.format(cur_view.properties['Title']))
| -8,704,608,048,394,303,000
|
Read list view by title example
|
examples/sharepoint/view_operations.py
|
print_list_views
|
Aisbergg/Office365-REST-Python-Client
|
python
|
def print_list_views(ctx):
list_object = ctx.web.lists.get_by_title(list_title)
views = list_object.views
ctx.load(views)
ctx.execute_query()
for view in views:
cur_view_title = view.properties['Title']
cur_view = views.get_by_title(cur_view_title)
ctx.load(cur_view)
ctx.execute_query()
print('View title: {0}'.format(cur_view.properties['Title']))
|
def print_view_items(ctx):
'Example demonstrates how to retrieve View items'
list_object = ctx.web.lists.get_by_title(list_title)
view = list_object.views.get_by_title(view_title)
ctx.load(view, ['ViewQuery'])
ctx.execute_query()
view_fields = view.view_fields
ctx.load(view_fields)
ctx.execute_query()
qry = CamlQuery()
qry.ViewXml = '<View><Where>{0}</Where></View>'.format(view.properties['ViewQuery'])
items = list_object.get_items(qry)
ctx.load(items)
ctx.execute_query()
for item in items:
print('Item title: {0}'.format(item.properties['Title']))
| -7,885,829,279,766,085,000
|
Example demonstrates how to retrieve View items
|
examples/sharepoint/view_operations.py
|
print_view_items
|
Aisbergg/Office365-REST-Python-Client
|
python
|
def print_view_items(ctx):
list_object = ctx.web.lists.get_by_title(list_title)
view = list_object.views.get_by_title(view_title)
ctx.load(view, ['ViewQuery'])
ctx.execute_query()
view_fields = view.view_fields
ctx.load(view_fields)
ctx.execute_query()
qry = CamlQuery()
qry.ViewXml = '<View><Where>{0}</Where></View>'.format(view.properties['ViewQuery'])
items = list_object.get_items(qry)
ctx.load(items)
ctx.execute_query()
for item in items:
print('Item title: {0}'.format(item.properties['Title']))
|
def test_dead_default():
'\n\t\tYou may now omit a transition, or even an entire state, from the map. This\n\t\taffects every usage of `fsm.map`.\n\t'
blockquote = fsm(alphabet={'/', '*', anything_else}, states={0, 1, 2, 3, 4, 5}, initial=0, finals={4}, map={0: {'/': 1}, 1: {'*': 2}, 2: {'/': 2, anything_else: 2, '*': 3}, 3: {'/': 4, anything_else: 2, '*': 3}})
assert blockquote.accepts(['/', '*', 'whatever', '*', '/'])
assert (not blockquote.accepts(['*', '*', 'whatever', '*', '/']))
str(blockquote)
(blockquote | blockquote)
(blockquote & blockquote)
(blockquote ^ blockquote)
reversed(blockquote)
assert (not blockquote.everythingbut().accepts(['/', '*', 'whatever', '*', '/']))
assert blockquote.everythingbut().accepts(['*'])
assert blockquote.islive(3)
assert blockquote.islive(4)
assert (not blockquote.islive(5))
gen = blockquote.strings()
assert (next(gen) == ['/', '*', '*', '/'])
| -8,301,654,867,052,381,000
|
You may now omit a transition, or even an entire state, from the map. This
affects every usage of `fsm.map`.
|
greenery/fsm_test.py
|
test_dead_default
|
doni69/greenery
|
python
|
def test_dead_default():
'\n\t\tYou may now omit a transition, or even an entire state, from the map. This\n\t\taffects every usage of `fsm.map`.\n\t'
blockquote = fsm(alphabet={'/', '*', anything_else}, states={0, 1, 2, 3, 4, 5}, initial=0, finals={4}, map={0: {'/': 1}, 1: {'*': 2}, 2: {'/': 2, anything_else: 2, '*': 3}, 3: {'/': 4, anything_else: 2, '*': 3}})
assert blockquote.accepts(['/', '*', 'whatever', '*', '/'])
assert (not blockquote.accepts(['*', '*', 'whatever', '*', '/']))
str(blockquote)
(blockquote | blockquote)
(blockquote & blockquote)
(blockquote ^ blockquote)
reversed(blockquote)
assert (not blockquote.everythingbut().accepts(['/', '*', 'whatever', '*', '/']))
assert blockquote.everythingbut().accepts(['*'])
assert blockquote.islive(3)
assert blockquote.islive(4)
assert (not blockquote.islive(5))
gen = blockquote.strings()
assert (next(gen) == ['/', '*', '*', '/'])
|
def _construct_simple(coeffs, opt):
'Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. '
rationals = floats = complexes = algebraics = False
float_numbers = []
if (opt.extension is True):
is_algebraic = (lambda coeff: (coeff.is_number and coeff.is_algebraic))
else:
is_algebraic = (lambda coeff: False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
if algebraics:
return False
else:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex:
complexes = True
(x, y) = is_complex
if (x.is_Rational and y.is_Rational):
if (not (x.is_Integer and y.is_Integer)):
rationals = True
continue
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
if is_algebraic(coeff):
if floats:
return False
algebraics = True
else:
return None
max_prec = (max((c._prec for c in float_numbers)) if float_numbers else 53)
if algebraics:
(domain, result) = _construct_algebraic(coeffs, opt)
else:
if (floats and complexes):
domain = ComplexField(prec=max_prec)
elif floats:
domain = RealField(prec=max_prec)
elif (rationals or opt.field):
domain = (QQ_I if complexes else QQ)
else:
domain = (ZZ_I if complexes else ZZ)
result = [domain.from_sympy(coeff) for coeff in coeffs]
return (domain, result)
| 2,315,593,106,988,361,700
|
Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains.
|
sympy/polys/constructor.py
|
_construct_simple
|
ABKor752/sympy
|
python
|
def _construct_simple(coeffs, opt):
' '
rationals = floats = complexes = algebraics = False
float_numbers = []
if (opt.extension is True):
is_algebraic = (lambda coeff: (coeff.is_number and coeff.is_algebraic))
else:
is_algebraic = (lambda coeff: False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
if algebraics:
return False
else:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if is_complex:
complexes = True
(x, y) = is_complex
if (x.is_Rational and y.is_Rational):
if (not (x.is_Integer and y.is_Integer)):
rationals = True
continue
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
if is_algebraic(coeff):
if floats:
return False
algebraics = True
else:
return None
max_prec = (max((c._prec for c in float_numbers)) if float_numbers else 53)
if algebraics:
(domain, result) = _construct_algebraic(coeffs, opt)
else:
if (floats and complexes):
domain = ComplexField(prec=max_prec)
elif floats:
domain = RealField(prec=max_prec)
elif (rationals or opt.field):
domain = (QQ_I if complexes else QQ)
else:
domain = (ZZ_I if complexes else ZZ)
result = [domain.from_sympy(coeff) for coeff in coeffs]
return (domain, result)
|
def _construct_algebraic(coeffs, opt):
'We know that coefficients are algebraic so construct the extension. '
from sympy.polys.numberfields import primitive_element
(result, exts) = ([], set())
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
(g, span, H) = primitive_element(exts, ex=True, polys=True)
root = sum([(s * ext) for (s, ext) in zip(span, exts)])
(domain, g) = (QQ.algebraic_field((g, root)), g.rep.rep)
for (i, (coeff, a, b)) in enumerate(result):
if (coeff is not None):
coeff = ((a * domain.dtype.from_list(H[exts.index(coeff)], g, QQ)) + b)
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return (domain, result)
| 2,948,707,779,347,088,000
|
We know that coefficients are algebraic so construct the extension.
|
sympy/polys/constructor.py
|
_construct_algebraic
|
ABKor752/sympy
|
python
|
def _construct_algebraic(coeffs, opt):
' '
from sympy.polys.numberfields import primitive_element
(result, exts) = ([], set())
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
(g, span, H) = primitive_element(exts, ex=True, polys=True)
root = sum([(s * ext) for (s, ext) in zip(span, exts)])
(domain, g) = (QQ.algebraic_field((g, root)), g.rep.rep)
for (i, (coeff, a, b)) in enumerate(result):
if (coeff is not None):
coeff = ((a * domain.dtype.from_list(H[exts.index(coeff)], g, QQ)) + b)
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return (domain, result)
|
def _construct_composite(coeffs, opt):
'Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). '
(numers, denoms) = ([], [])
for coeff in coeffs:
(numer, denom) = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
(polys, gens) = parallel_dict_from_basic((numers + denoms))
if (not gens):
return None
if (opt.composite is None):
if any(((gen.is_number and gen.is_algebraic) for gen in gens)):
return None
all_symbols = set()
for gen in gens:
symbols = gen.free_symbols
if (all_symbols & symbols):
return None
else:
all_symbols |= symbols
n = len(gens)
k = (len(polys) // 2)
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
(fractions, zeros) = (False, ((0,) * n))
for denom in denoms:
if ((len(denom) > 1) or (zeros not in denom)):
fractions = True
break
coeffs = set()
if (not fractions):
for (numer, denom) in zip(numers, denoms):
denom = denom[zeros]
for (monom, coeff) in numer.items():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for (numer, denom) in zip(numers, denoms):
coeffs.update(list(numer.values()))
coeffs.update(list(denom.values()))
rationals = floats = complexes = False
float_numbers = []
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if (is_complex is not None):
complexes = True
(x, y) = is_complex
if (x.is_Rational and y.is_Rational):
if (not (x.is_Integer and y.is_Integer)):
rationals = True
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
max_prec = (max((c._prec for c in float_numbers)) if float_numbers else 53)
if (floats and complexes):
ground = ComplexField(prec=max_prec)
elif floats:
ground = RealField(prec=max_prec)
elif complexes:
if rationals:
ground = QQ_I
else:
ground = ZZ_I
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if (not fractions):
domain = ground.poly_ring(*gens)
for numer in numers:
for (monom, coeff) in numer.items():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for (numer, denom) in zip(numers, denoms):
for (monom, coeff) in numer.items():
numer[monom] = ground.from_sympy(coeff)
for (monom, coeff) in denom.items():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return (domain, result)
| -2,506,318,975,181,357,000
|
Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X).
|
sympy/polys/constructor.py
|
_construct_composite
|
ABKor752/sympy
|
python
|
def _construct_composite(coeffs, opt):
' '
(numers, denoms) = ([], [])
for coeff in coeffs:
(numer, denom) = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
(polys, gens) = parallel_dict_from_basic((numers + denoms))
if (not gens):
return None
if (opt.composite is None):
if any(((gen.is_number and gen.is_algebraic) for gen in gens)):
return None
all_symbols = set()
for gen in gens:
symbols = gen.free_symbols
if (all_symbols & symbols):
return None
else:
all_symbols |= symbols
n = len(gens)
k = (len(polys) // 2)
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
(fractions, zeros) = (False, ((0,) * n))
for denom in denoms:
if ((len(denom) > 1) or (zeros not in denom)):
fractions = True
break
coeffs = set()
if (not fractions):
for (numer, denom) in zip(numers, denoms):
denom = denom[zeros]
for (monom, coeff) in numer.items():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for (numer, denom) in zip(numers, denoms):
coeffs.update(list(numer.values()))
coeffs.update(list(denom.values()))
rationals = floats = complexes = False
float_numbers = []
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
floats = True
float_numbers.append(coeff)
else:
is_complex = pure_complex(coeff)
if (is_complex is not None):
complexes = True
(x, y) = is_complex
if (x.is_Rational and y.is_Rational):
if (not (x.is_Integer and y.is_Integer)):
rationals = True
else:
floats = True
if x.is_Float:
float_numbers.append(x)
if y.is_Float:
float_numbers.append(y)
max_prec = (max((c._prec for c in float_numbers)) if float_numbers else 53)
if (floats and complexes):
ground = ComplexField(prec=max_prec)
elif floats:
ground = RealField(prec=max_prec)
elif complexes:
if rationals:
ground = QQ_I
else:
ground = ZZ_I
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if (not fractions):
domain = ground.poly_ring(*gens)
for numer in numers:
for (monom, coeff) in numer.items():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for (numer, denom) in zip(numers, denoms):
for (monom, coeff) in numer.items():
numer[monom] = ground.from_sympy(coeff)
for (monom, coeff) in denom.items():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return (domain, result)
|
def _construct_expression(coeffs, opt):
'The last resort case, i.e. use the expression domain. '
(domain, result) = (EX, [])
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
| 8,277,807,598,530,472,000
|
The last resort case, i.e. use the expression domain.
|
sympy/polys/constructor.py
|
_construct_expression
|
ABKor752/sympy
|
python
|
def _construct_expression(coeffs, opt):
' '
(domain, result) = (EX, [])
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
|
@public
def construct_domain(obj, **args):
'Construct a minimal domain for the list of coefficients. '
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
if (not obj):
(monoms, coeffs) = ([], [])
else:
(monoms, coeffs) = list(zip(*list(obj.items())))
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = list(map(sympify, coeffs))
result = _construct_simple(coeffs, opt)
if (result is not None):
if (result is not False):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
else:
if (opt.composite is False):
result = None
else:
result = _construct_composite(coeffs, opt)
if (result is not None):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return (domain, dict(list(zip(monoms, coeffs))))
else:
return (domain, coeffs)
else:
return (domain, coeffs[0])
| 5,775,620,295,657,424,000
|
Construct a minimal domain for the list of coefficients.
|
sympy/polys/constructor.py
|
construct_domain
|
ABKor752/sympy
|
python
|
@public
def construct_domain(obj, **args):
' '
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
if (not obj):
(monoms, coeffs) = ([], [])
else:
(monoms, coeffs) = list(zip(*list(obj.items())))
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = list(map(sympify, coeffs))
result = _construct_simple(coeffs, opt)
if (result is not None):
if (result is not False):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
else:
if (opt.composite is False):
result = None
else:
result = _construct_composite(coeffs, opt)
if (result is not None):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return (domain, dict(list(zip(monoms, coeffs))))
else:
return (domain, coeffs)
else:
return (domain, coeffs[0])
|
def __init__(self, images_dir, clustering_dir, classfication_dir, output_dir=None):
'\n thie function initializes the network class\n :param images_dir: \n :param clustering_dir: \n :param classfication_dir: \n '
self.images_dir = images_dir
self.clustering_dir = clustering_dir
self.classification_dir = classfication_dir
self.model = None
self.output_dir = output_dir
self.model_file_name = 'finalModel.h5'
| 3,227,855,410,215,280,600
|
thie function initializes the network class
:param images_dir:
:param clustering_dir:
:param classfication_dir:
|
src/models/final_model.py
|
__init__
|
dsp-uga/rope
|
python
|
def __init__(self, images_dir, clustering_dir, classfication_dir, output_dir=None):
'\n thie function initializes the network class\n :param images_dir: \n :param clustering_dir: \n :param classfication_dir: \n '
self.images_dir = images_dir
self.clustering_dir = clustering_dir
self.classification_dir = classfication_dir
self.model = None
self.output_dir = output_dir
self.model_file_name = 'finalModel.h5'
|
def load_model(self):
'\n this function loads model from file\n '
if os.path.isfile(self.model_file_name):
self.model = keras.models.load_model(self.model_file_name)
| 1,793,141,196,649,369,000
|
this function loads model from file
|
src/models/final_model.py
|
load_model
|
dsp-uga/rope
|
python
|
def load_model(self):
'\n \n '
if os.path.isfile(self.model_file_name):
self.model = keras.models.load_model(self.model_file_name)
|
def train(self):
'\n this function trains the final network\n :return: \n '
self.load_model()
if (self.model is None):
self.model = self.UNet(input_shape=(64, 64, 3))
print(self.model.summary())
for k in range(0, 4):
for i in range(0, 14):
print(i)
X_train = np.load(os.path.join(self.images_dir, (('X_' + str(i)) + '.npy')))
X1_train = np.load(os.path.join(self.clustering_dir, (('train_X_' + str(i)) + '.npy')))
X2_train = np.load(os.path.join(self.classification_dir, (('train_X_' + str(i)) + '.npy')))
X_train = X_train.astype('float32')
X1_train = X1_train.astype('float32')
X2_train = X2_train.astype('float32')
y_train = np.load(os.path.join(self.images_dir, (('y_' + str(i)) + '.npy')))
y_train = keras.utils.to_categorical(y_train, 15000)
self.model.fit([X_train, X1_train, X2_train], [y_train], batch_size=64, nb_epoch=1, shuffle=True)
self.model.save('final_net_dsp.h5')
| 6,425,991,241,974,136,000
|
this function trains the final network
:return:
|
src/models/final_model.py
|
train
|
dsp-uga/rope
|
python
|
def train(self):
'\n this function trains the final network\n :return: \n '
self.load_model()
if (self.model is None):
self.model = self.UNet(input_shape=(64, 64, 3))
print(self.model.summary())
for k in range(0, 4):
for i in range(0, 14):
print(i)
X_train = np.load(os.path.join(self.images_dir, (('X_' + str(i)) + '.npy')))
X1_train = np.load(os.path.join(self.clustering_dir, (('train_X_' + str(i)) + '.npy')))
X2_train = np.load(os.path.join(self.classification_dir, (('train_X_' + str(i)) + '.npy')))
X_train = X_train.astype('float32')
X1_train = X1_train.astype('float32')
X2_train = X2_train.astype('float32')
y_train = np.load(os.path.join(self.images_dir, (('y_' + str(i)) + '.npy')))
y_train = keras.utils.to_categorical(y_train, 15000)
self.model.fit([X_train, X1_train, X2_train], [y_train], batch_size=64, nb_epoch=1, shuffle=True)
self.model.save('final_net_dsp.h5')
|
def predict(self):
'\n this function runs the prediction over the sets\n :return: \n '
if (self.model is None):
self.load_model()
if (self.model is None):
return None
i = 0
X_train = np.load(os.path.join(self.images_dir, (('X_' + str(i)) + '.npy')))
X1_train = np.load(os.path.join(self.clustering_dir, (('train_X_' + str(i)) + '.npy')))
X2_train = np.load(os.path.join(self.classification_dir, (('train_X_' + str(i)) + '.npy')))
predicted = self.model.predict([X_train, X1_train, X2_train], batch_size=20)
return predicted
| -5,065,718,216,160,854,000
|
this function runs the prediction over the sets
:return:
|
src/models/final_model.py
|
predict
|
dsp-uga/rope
|
python
|
def predict(self):
'\n this function runs the prediction over the sets\n :return: \n '
if (self.model is None):
self.load_model()
if (self.model is None):
return None
i = 0
X_train = np.load(os.path.join(self.images_dir, (('X_' + str(i)) + '.npy')))
X1_train = np.load(os.path.join(self.clustering_dir, (('train_X_' + str(i)) + '.npy')))
X2_train = np.load(os.path.join(self.classification_dir, (('train_X_' + str(i)) + '.npy')))
predicted = self.model.predict([X_train, X1_train, X2_train], batch_size=20)
return predicted
|
def plot_et(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
'Plot mean total energy stratification in the model'
grd1 = self.xzn0
plt1 = self.fht_et
plt.figure(figsize=(7, 6))
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
plt.title('total energy')
plt.plot(grd1, plt1, color='brown', label='$\\widetilde{\\varepsilon}_t$')
if (self.ig == 1):
setxlabel = 'x (cm)'
setylabel = '$\\widetilde{\\varepsilon}_t$ (erg g$^{-1}$)'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif (self.ig == 2):
setxlabel = 'r (cm)'
setylabel = '$\\widetilde{\\varepsilon}_t$ (erg g$^{-1}$)'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 18})
plt.show(block=False)
if (self.fext == 'png'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'mean_et.png'))
elif (self.fext == 'eps'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'mean_et.eps'))
| -8,758,915,946,996,778,000
|
Plot mean total energy stratification in the model
|
CANUTO1997/LuminosityEquation.py
|
plot_et
|
mmicromegas/ransX
|
python
|
def plot_et(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
grd1 = self.xzn0
plt1 = self.fht_et
plt.figure(figsize=(7, 6))
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
plt.title('total energy')
plt.plot(grd1, plt1, color='brown', label='$\\widetilde{\\varepsilon}_t$')
if (self.ig == 1):
setxlabel = 'x (cm)'
setylabel = '$\\widetilde{\\varepsilon}_t$ (erg g$^{-1}$)'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif (self.ig == 2):
setxlabel = 'r (cm)'
setylabel = '$\\widetilde{\\varepsilon}_t$ (erg g$^{-1}$)'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 18})
plt.show(block=False)
if (self.fext == 'png'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'mean_et.png'))
elif (self.fext == 'eps'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'mean_et.eps'))
|
def plot_luminosity_equation_exact(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
'Plot luminosity equation in the model'
grd1 = self.xzn0
rhs0 = self.minus_cp_rho_dTdt
rhs1 = self.plus_delta_dPdt
rhs2 = self.minus_dd_div_eiui
rhs3 = self.plus_tke_diss
res = self.minus_resLumExactEquation
plt.figure(figsize=(7, 6))
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
to_plot = [rhs0, rhs1, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
self.bconv = 400000000.0
self.tconv = 1200000000.0
xlimitrange = np.where(((grd1 > self.bconv) & (grd1 < self.tconv)))
xlimitbottom = np.where((grd1 < self.bconv))
xlimittop = np.where((grd1 > self.tconv))
plt.title('standard luminosity equation exact')
if (self.ig == 1):
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label='$-c_P \\overline{\\rho \\partial_t T}$')
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y', label='$+\\delta \\overline{\\partial_t P}$')
plt.plot(grd1[xlimitrange], rhs2[xlimitrange], color='r', label='$-\\overline{\\rho \\nabla \\cdot \\epsilon_I {\\bf u}}$')
plt.plot(grd1[xlimitrange], rhs3[xlimitrange], color='g', label='$+\\varepsilon_K$')
plt.plot(grd1, res, color='k', linestyle='--', label='res $\\sim N$')
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label='zero')
elif (self.ig == 2):
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label='$-c_P \\rho dT/dt$')
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y', label='$+\\delta dP/dt$')
plt.plot(grd1, res, color='k', linestyle='--', label='res $\\sim N$')
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label='zero')
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
if (self.ig == 1):
setxlabel = 'x (cm)'
setylabel = 'erg g$^{-1}$ s$^{-1}$'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif (self.ig == 2):
setxlabel = 'r (cm)'
setylabel = 'erg g$^{-1}$ s$^{-1}$'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
plt.show(block=False)
if (self.fext == 'png'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'standard_luminosity_exact_eq.png'))
elif (self.fext == 'eps'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'standard_luminosity_exact_eq.eps'))
| -119,366,724,537,761,120
|
Plot luminosity equation in the model
|
CANUTO1997/LuminosityEquation.py
|
plot_luminosity_equation_exact
|
mmicromegas/ransX
|
python
|
def plot_luminosity_equation_exact(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
grd1 = self.xzn0
rhs0 = self.minus_cp_rho_dTdt
rhs1 = self.plus_delta_dPdt
rhs2 = self.minus_dd_div_eiui
rhs3 = self.plus_tke_diss
res = self.minus_resLumExactEquation
plt.figure(figsize=(7, 6))
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
to_plot = [rhs0, rhs1, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
self.bconv = 400000000.0
self.tconv = 1200000000.0
xlimitrange = np.where(((grd1 > self.bconv) & (grd1 < self.tconv)))
xlimitbottom = np.where((grd1 < self.bconv))
xlimittop = np.where((grd1 > self.tconv))
plt.title('standard luminosity equation exact')
if (self.ig == 1):
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label='$-c_P \\overline{\\rho \\partial_t T}$')
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y', label='$+\\delta \\overline{\\partial_t P}$')
plt.plot(grd1[xlimitrange], rhs2[xlimitrange], color='r', label='$-\\overline{\\rho \\nabla \\cdot \\epsilon_I {\\bf u}}$')
plt.plot(grd1[xlimitrange], rhs3[xlimitrange], color='g', label='$+\\varepsilon_K$')
plt.plot(grd1, res, color='k', linestyle='--', label='res $\\sim N$')
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label='zero')
elif (self.ig == 2):
plt.plot(grd1[xlimitrange], rhs0[xlimitrange], color='#FF8C00', label='$-c_P \\rho dT/dt$')
plt.plot(grd1[xlimitrange], rhs1[xlimitrange], color='y', label='$+\\delta dP/dt$')
plt.plot(grd1, res, color='k', linestyle='--', label='res $\\sim N$')
zeros = np.zeros(self.nx)
plt.plot(grd1, zeros, color='k', linewidth=0.6, label='zero')
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
if (self.ig == 1):
setxlabel = 'x (cm)'
setylabel = 'erg g$^{-1}$ s$^{-1}$'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif (self.ig == 2):
setxlabel = 'r (cm)'
setylabel = 'erg g$^{-1}$ s$^{-1}$'
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
plt.legend(loc=ilg, prop={'size': 10}, ncol=2)
plt.show(block=False)
if (self.fext == 'png'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'standard_luminosity_exact_eq.png'))
elif (self.fext == 'eps'):
plt.savefig((('RESULTS/' + self.data_prefix) + 'standard_luminosity_exact_eq.eps'))
|
def setup_vertex_buffer(gl, data, shader, shader_variable):
'Setup a vertex buffer with `data` vertices as `shader_variable` on shader'
vbo = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)
vbo.create()
with bind(vbo):
vertices = np.array(data, np.float32)
(count, dim_vertex) = vertices.shape
vbo.allocate(vertices.flatten(), vertices.nbytes)
attr_loc = shader.attributeLocation(shader_variable)
shader.enableAttributeArray(attr_loc)
shader.setAttributeBuffer(attr_loc, gl.GL_FLOAT, 0, dim_vertex)
return vbo
| 3,822,176,870,625,612,000
|
Setup a vertex buffer with `data` vertices as `shader_variable` on shader
|
caimageviewer/gl_util.py
|
setup_vertex_buffer
|
klauer/caproto-image-viewer
|
python
|
def setup_vertex_buffer(gl, data, shader, shader_variable):
vbo = QOpenGLBuffer(QOpenGLBuffer.VertexBuffer)
vbo.create()
with bind(vbo):
vertices = np.array(data, np.float32)
(count, dim_vertex) = vertices.shape
vbo.allocate(vertices.flatten(), vertices.nbytes)
attr_loc = shader.attributeLocation(shader_variable)
shader.enableAttributeArray(attr_loc)
shader.setAttributeBuffer(attr_loc, gl.GL_FLOAT, 0, dim_vertex)
return vbo
|
def update_vertex_buffer(vbo, data):
'Update a vertex buffer with `data` vertices'
vertices = np.asarray(data, np.float32)
(count, dim_vertex) = vertices.shape
with bind(vbo):
vbo.allocate(vertices.flatten(), vertices.nbytes)
| 4,712,907,263,101,164,000
|
Update a vertex buffer with `data` vertices
|
caimageviewer/gl_util.py
|
update_vertex_buffer
|
klauer/caproto-image-viewer
|
python
|
def update_vertex_buffer(vbo, data):
vertices = np.asarray(data, np.float32)
(count, dim_vertex) = vertices.shape
with bind(vbo):
vbo.allocate(vertices.flatten(), vertices.nbytes)
|
def copy_data_to_pbo(pbo, data, *, mapped_array=None):
'Allocate or update data stored in a pixel buffer object'
(width, height) = data.shape
with bind(pbo):
if (pbo.isCreated() and (mapped_array is not None)):
mapped_array[:] = data.reshape((width, height))
return mapped_array
full_size = data.nbytes
pointer_type = np.ctypeslib.ndpointer(dtype=data.dtype, shape=(width, height), ndim=data.ndim)
pbo.create()
with bind(pbo):
pbo.allocate(data, full_size)
ptr = pbo.map(QOpenGLBuffer.WriteOnly)
assert (ptr is not None), 'Failed to map pixel buffer array'
pointer_type = np.ctypeslib.ndpointer(dtype=data.dtype, shape=(width, height), ndim=data.ndim)
mapped_array = np.ctypeslib.as_array(pointer_type(int(ptr)))
pbo.unmap()
mapped_array[:] = data.reshape((width, height))
return mapped_array
| 5,597,396,672,997,456,000
|
Allocate or update data stored in a pixel buffer object
|
caimageviewer/gl_util.py
|
copy_data_to_pbo
|
klauer/caproto-image-viewer
|
python
|
def copy_data_to_pbo(pbo, data, *, mapped_array=None):
(width, height) = data.shape
with bind(pbo):
if (pbo.isCreated() and (mapped_array is not None)):
mapped_array[:] = data.reshape((width, height))
return mapped_array
full_size = data.nbytes
pointer_type = np.ctypeslib.ndpointer(dtype=data.dtype, shape=(width, height), ndim=data.ndim)
pbo.create()
with bind(pbo):
pbo.allocate(data, full_size)
ptr = pbo.map(QOpenGLBuffer.WriteOnly)
assert (ptr is not None), 'Failed to map pixel buffer array'
pointer_type = np.ctypeslib.ndpointer(dtype=data.dtype, shape=(width, height), ndim=data.ndim)
mapped_array = np.ctypeslib.as_array(pointer_type(int(ptr)))
pbo.unmap()
mapped_array[:] = data.reshape((width, height))
return mapped_array
|
def update_pbo_texture(gl, pbo, texture, *, array_data, texture_format, source_format, source_type):
'Update a texture associated with a PBO'
(width, height) = array_data.shape[:2]
if (source_format == gl.GL_RGB):
height //= 3
with bind(pbo, texture):
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, texture_format, width, height, 0, source_format, source_type, None)
| 3,789,928,796,293,781,500
|
Update a texture associated with a PBO
|
caimageviewer/gl_util.py
|
update_pbo_texture
|
klauer/caproto-image-viewer
|
python
|
def update_pbo_texture(gl, pbo, texture, *, array_data, texture_format, source_format, source_type):
(width, height) = array_data.shape[:2]
if (source_format == gl.GL_RGB):
height //= 3
with bind(pbo, texture):
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, texture_format, width, height, 0, source_format, source_type, None)
|
@contextmanager
def bind(*objs, args=None):
'Bind all objs (optionally with positional arguments); releases at cleanup'
if (args is None):
args = (None for obj in objs)
for (obj, arg) in zip(objs, args):
if (arg is not None):
obj.bind(arg)
else:
obj.bind()
(yield)
for obj in objs[::(- 1)]:
obj.release()
| 1,272,922,605,012,459,800
|
Bind all objs (optionally with positional arguments); releases at cleanup
|
caimageviewer/gl_util.py
|
bind
|
klauer/caproto-image-viewer
|
python
|
@contextmanager
def bind(*objs, args=None):
if (args is None):
args = (None for obj in objs)
for (obj, arg) in zip(objs, args):
if (arg is not None):
obj.bind(arg)
else:
obj.bind()
(yield)
for obj in objs[::(- 1)]:
obj.release()
|
def _generate_code_verifier():
"\n Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1\n Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.\n :return str:\n "
code_verifier = _base64.urlsafe_b64encode(_os.urandom(_code_verifier_length)).decode(_utf_8)
code_verifier = _re.sub('[^a-zA-Z0-9_\\-.~]+', '', code_verifier)
if (len(code_verifier) < 43):
raise ValueError('Verifier too short. number of bytes must be > 30.')
elif (len(code_verifier) > 128):
raise ValueError('Verifier too long. number of bytes must be < 97.')
return code_verifier
| -140,427,470,149,594,020
|
Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:return str:
|
flytekit/clis/auth/auth.py
|
_generate_code_verifier
|
AdrianoKF/flytekit
|
python
|
def _generate_code_verifier():
"\n Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1\n Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.\n :return str:\n "
code_verifier = _base64.urlsafe_b64encode(_os.urandom(_code_verifier_length)).decode(_utf_8)
code_verifier = _re.sub('[^a-zA-Z0-9_\\-.~]+', , code_verifier)
if (len(code_verifier) < 43):
raise ValueError('Verifier too short. number of bytes must be > 30.')
elif (len(code_verifier) > 128):
raise ValueError('Verifier too long. number of bytes must be < 97.')
return code_verifier
|
def _create_code_challenge(code_verifier):
'\n Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.\n :param str code_verifier: represents a code verifier generated by generate_code_verifier()\n :return str: urlsafe base64-encoded sha256 hash digest\n '
code_challenge = _hashlib.sha256(code_verifier.encode(_utf_8)).digest()
code_challenge = _base64.urlsafe_b64encode(code_challenge).decode(_utf_8)
code_challenge = code_challenge.replace('=', '')
return code_challenge
| 4,351,893,662,429,492,700
|
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:param str code_verifier: represents a code verifier generated by generate_code_verifier()
:return str: urlsafe base64-encoded sha256 hash digest
|
flytekit/clis/auth/auth.py
|
_create_code_challenge
|
AdrianoKF/flytekit
|
python
|
def _create_code_challenge(code_verifier):
'\n Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.\n :param str code_verifier: represents a code verifier generated by generate_code_verifier()\n :return str: urlsafe base64-encoded sha256 hash digest\n '
code_challenge = _hashlib.sha256(code_verifier.encode(_utf_8)).digest()
code_challenge = _base64.urlsafe_b64encode(code_challenge).decode(_utf_8)
code_challenge = code_challenge.replace('=', )
return code_challenge
|
def _initialize_credentials(self, auth_token_resp):
'\n The auth_token_resp body is of the form:\n {\n "access_token": "foo",\n "refresh_token": "bar",\n "token_type": "Bearer"\n }\n '
response_body = auth_token_resp.json()
if ('access_token' not in response_body):
raise ValueError('Expected "access_token" in response from oauth server')
if ('refresh_token' in response_body):
self._refresh_token = response_body['refresh_token']
access_token = response_body['access_token']
refresh_token = response_body['refresh_token']
_keyring.set_password(_keyring_service_name, _keyring_access_token_storage_key, access_token)
_keyring.set_password(_keyring_service_name, _keyring_refresh_token_storage_key, refresh_token)
self._credentials = Credentials(access_token=access_token)
| 3,593,339,050,330,529,000
|
The auth_token_resp body is of the form:
{
"access_token": "foo",
"refresh_token": "bar",
"token_type": "Bearer"
}
|
flytekit/clis/auth/auth.py
|
_initialize_credentials
|
AdrianoKF/flytekit
|
python
|
def _initialize_credentials(self, auth_token_resp):
'\n The auth_token_resp body is of the form:\n {\n "access_token": "foo",\n "refresh_token": "bar",\n "token_type": "Bearer"\n }\n '
response_body = auth_token_resp.json()
if ('access_token' not in response_body):
raise ValueError('Expected "access_token" in response from oauth server')
if ('refresh_token' in response_body):
self._refresh_token = response_body['refresh_token']
access_token = response_body['access_token']
refresh_token = response_body['refresh_token']
_keyring.set_password(_keyring_service_name, _keyring_access_token_storage_key, access_token)
_keyring.set_password(_keyring_service_name, _keyring_refresh_token_storage_key, refresh_token)
self._credentials = Credentials(access_token=access_token)
|
@property
def credentials(self):
'\n :return flytekit.clis.auth.auth.Credentials:\n '
return self._credentials
| -7,460,607,824,305,833,000
|
:return flytekit.clis.auth.auth.Credentials:
|
flytekit/clis/auth/auth.py
|
credentials
|
AdrianoKF/flytekit
|
python
|
@property
def credentials(self):
'\n \n '
return self._credentials
|
@property
def expired(self):
'\n :return bool:\n '
return self._expired
| 5,468,941,397,680,082,000
|
:return bool:
|
flytekit/clis/auth/auth.py
|
expired
|
AdrianoKF/flytekit
|
python
|
@property
def expired(self):
'\n \n '
return self._expired
|
def get(self, request, group_id, format=None):
' List all group members\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))
except ValueError:
avatar_size = AVATAR_DEFAULT_SIZE
try:
members = ccnet_api.get_group_members(group_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
group_members_info = []
for m in members:
member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)
group_members_info.append(member_info)
group_members = {'group_id': group_id, 'group_name': group.group_name, 'members': group_members_info}
return Response(group_members)
| 716,541,368,521,144,400
|
List all group members
Permission checking:
1. only admin can perform this action.
|
seahub/api2/endpoints/admin/group_members.py
|
get
|
DMKun/seahub
|
python
|
def get(self, request, group_id, format=None):
' List all group members\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))
except ValueError:
avatar_size = AVATAR_DEFAULT_SIZE
try:
members = ccnet_api.get_group_members(group_id)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
group_members_info = []
for m in members:
member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)
group_members_info.append(member_info)
group_members = {'group_id': group_id, 'group_name': group.group_name, 'members': group_members_info}
return Response(group_members)
|
def post(self, request, group_id):
'\n Bulk add group members.\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
emails = request.POST.getlist('email', '')
if (not emails):
error_msg = 'Email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = {}
result['failed'] = []
result['success'] = []
emails_need_add = []
for email in emails:
try:
User.objects.get(email=email)
except User.DoesNotExist:
result['failed'].append({'email': email, 'error_msg': ('User %s not found.' % email)})
continue
if is_group_member(group_id, email, in_structure=False):
result['failed'].append({'email': email, 'error_msg': ('User %s is already a group member.' % email2nickname(email))})
continue
emails_need_add.append(email)
for email in emails_need_add:
try:
ccnet_api.group_add_member(group_id, group.creator_name, email)
member_info = get_group_member_info(request, group_id, email)
result['success'].append(member_info)
except Exception as e:
logger.error(e)
result['failed'].append({'email': email, 'error_msg': 'Internal Server Error'})
add_user_to_group.send(sender=None, group_staff=request.user.username, group_id=group_id, added_user=email)
return Response(result)
| 4,027,174,524,807,285,000
|
Bulk add group members.
Permission checking:
1. only admin can perform this action.
|
seahub/api2/endpoints/admin/group_members.py
|
post
|
DMKun/seahub
|
python
|
def post(self, request, group_id):
'\n Bulk add group members.\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
emails = request.POST.getlist('email', )
if (not emails):
error_msg = 'Email invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
result = {}
result['failed'] = []
result['success'] = []
emails_need_add = []
for email in emails:
try:
User.objects.get(email=email)
except User.DoesNotExist:
result['failed'].append({'email': email, 'error_msg': ('User %s not found.' % email)})
continue
if is_group_member(group_id, email, in_structure=False):
result['failed'].append({'email': email, 'error_msg': ('User %s is already a group member.' % email2nickname(email))})
continue
emails_need_add.append(email)
for email in emails_need_add:
try:
ccnet_api.group_add_member(group_id, group.creator_name, email)
member_info = get_group_member_info(request, group_id, email)
result['success'].append(member_info)
except Exception as e:
logger.error(e)
result['failed'].append({'email': email, 'error_msg': 'Internal Server Error'})
add_user_to_group.send(sender=None, group_staff=request.user.username, group_id=group_id, added_user=email)
return Response(result)
|
def put(self, request, group_id, email, format=None):
' update role of a group member\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
User.objects.get(email=email)
except User.DoesNotExist:
error_msg = ('User %s not found.' % email)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if (not is_group_member(group_id, email)):
error_msg = ('Email %s invalid.' % email)
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
is_admin = request.data.get('is_admin', '')
try:
if (is_admin.lower() == 'true'):
ccnet_api.group_set_admin(group_id, email)
elif (is_admin.lower() == 'false'):
ccnet_api.group_unset_admin(group_id, email)
else:
error_msg = 'is_admin invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
member_info = get_group_member_info(request, group_id, email)
return Response(member_info)
| 7,298,239,316,915,039,000
|
update role of a group member
Permission checking:
1. only admin can perform this action.
|
seahub/api2/endpoints/admin/group_members.py
|
put
|
DMKun/seahub
|
python
|
def put(self, request, group_id, email, format=None):
' update role of a group member\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
User.objects.get(email=email)
except User.DoesNotExist:
error_msg = ('User %s not found.' % email)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if (not is_group_member(group_id, email)):
error_msg = ('Email %s invalid.' % email)
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
is_admin = request.data.get('is_admin', )
try:
if (is_admin.lower() == 'true'):
ccnet_api.group_set_admin(group_id, email)
elif (is_admin.lower() == 'false'):
ccnet_api.group_unset_admin(group_id, email)
else:
error_msg = 'is_admin invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
member_info = get_group_member_info(request, group_id, email)
return Response(member_info)
|
def delete(self, request, group_id, email, format=None):
' Delete an user from group\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if (not is_group_member(group_id, email)):
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if (group.creator_name == email):
error_msg = ('%s is group owner, can not be removed.' % email)
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
ccnet_api.group_remove_member(group_id, group.creator_name, email)
seafile_api.remove_group_repos_by_owner(group_id, email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
| -1,522,323,534,279,099,600
|
Delete an user from group
Permission checking:
1. only admin can perform this action.
|
seahub/api2/endpoints/admin/group_members.py
|
delete
|
DMKun/seahub
|
python
|
def delete(self, request, group_id, email, format=None):
' Delete an user from group\n\n Permission checking:\n 1. only admin can perform this action.\n '
if (not request.user.admin_permissions.can_manage_group()):
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
group_id = int(group_id)
group = ccnet_api.get_group(group_id)
if (not group):
error_msg = ('Group %d not found.' % group_id)
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
if (not is_group_member(group_id, email)):
return Response({'success': True})
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
if (group.creator_name == email):
error_msg = ('%s is group owner, can not be removed.' % email)
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
try:
ccnet_api.group_remove_member(group_id, group.creator_name, email)
seafile_api.remove_group_repos_by_owner(group_id, email)
except Exception as e:
logger.error(e)
error_msg = 'Internal Server Error'
return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)
return Response({'success': True})
|
def _get_databases(self, context):
'Returns the initial databases for this instance.'
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
| 6,399,540,008,223,094,000
|
Returns the initial databases for this instance.
|
openstack_dashboard/contrib/trove/content/databases/workflows/create_instance.py
|
_get_databases
|
ChinaMassClouds/horizon
|
python
|
def _get_databases(self, context):
databases = None
if context.get('databases'):
dbs = context['databases']
databases = [{'name': d.strip()} for d in dbs.split(',')]
return databases
|
def test_init(self, event_loop):
'Test that creating an instance of Purge calls init of FoglampProcess and creates loggers'
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__') as mock_process:
with patch.object(logger, 'setup') as log:
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
assert isinstance(p, Purge)
assert isinstance(p._audit, AuditLogger)
log.assert_called_once_with('Data Purge')
mock_process.assert_called_once_with()
| 5,284,015,934,107,695,000
|
Test that creating an instance of Purge calls init of FoglampProcess and creates loggers
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_init
|
ThyagOSI/FogLAMP
|
python
|
def test_init(self, event_loop):
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__') as mock_process:
with patch.object(logger, 'setup') as log:
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
assert isinstance(p, Purge)
assert isinstance(p._audit, AuditLogger)
log.assert_called_once_with('Data Purge')
mock_process.assert_called_once_with()
|
def test_write_statistics(self, event_loop):
'Test that write_statistics calls update statistics with defined keys and value increments'
@asyncio.coroutine
def mock_s_update():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, 'update', return_value=mock_s_update()) as mock_stats_update:
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._storage = mockStorageClient
p.write_statistics(1, 2)
mock_stats_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
| 1,872,808,031,474,282,500
|
Test that write_statistics calls update statistics with defined keys and value increments
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_write_statistics
|
ThyagOSI/FogLAMP
|
python
|
def test_write_statistics(self, event_loop):
@asyncio.coroutine
def mock_s_update():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, 'update', return_value=mock_s_update()) as mock_stats_update:
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._storage = mockStorageClient
p.write_statistics(1, 2)
mock_stats_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
|
def test_set_configuration(self, event_loop):
"Test that purge's set_configuration returns configuration item with key 'PURGE_READ' "
@asyncio.coroutine
def mock_cm_return():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._storage = MagicMock(spec=StorageClient)
mock_cm = ConfigurationManager(p._storage)
with patch.object(mock_cm, 'create_category', return_value=mock_cm_return()) as mock_create_cat:
with patch.object(mock_cm, 'get_category_all_items', return_value=mock_cm_return()) as mock_get_cat:
p.set_configuration()
mock_get_cat.assert_called_once_with('PURGE_READ')
(args, kwargs) = mock_create_cat.call_args
assert (len(args) == 3)
assert (args[0] == 'PURGE_READ')
| -5,957,796,145,405,066,000
|
Test that purge's set_configuration returns configuration item with key 'PURGE_READ'
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_set_configuration
|
ThyagOSI/FogLAMP
|
python
|
def test_set_configuration(self, event_loop):
" "
@asyncio.coroutine
def mock_cm_return():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._storage = MagicMock(spec=StorageClient)
mock_cm = ConfigurationManager(p._storage)
with patch.object(mock_cm, 'create_category', return_value=mock_cm_return()) as mock_create_cat:
with patch.object(mock_cm, 'get_category_all_items', return_value=mock_cm_return()) as mock_get_cat:
p.set_configuration()
mock_get_cat.assert_called_once_with('PURGE_READ')
(args, kwargs) = mock_create_cat.call_args
assert (len(args) == 3)
assert (args[0] == 'PURGE_READ')
|
@pytest.mark.parametrize('conf, expected_return, expected_calls', [(config['purgeAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}), (config['purgeAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}), (config['purgeSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}), (config['retainAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retain'}), (config['retainAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retain'}), (config['retainSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retain'})])
def test_purge_data(self, event_loop, conf, expected_return, expected_calls):
"Test that purge_data calls Storage's purge with defined configuration"
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge) as mock_storage_purge:
with patch.object(audit, 'information', return_value=mock_audit_info()) as audit_info:
assert (expected_return == p.purge_data(conf))
assert audit_info.called
(args, kwargs) = mock_storage_purge.call_args
assert (kwargs == expected_calls)
| -469,366,175,127,310,850
|
Test that purge_data calls Storage's purge with defined configuration
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_purge_data
|
ThyagOSI/FogLAMP
|
python
|
@pytest.mark.parametrize('conf, expected_return, expected_calls', [(config['purgeAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'purge'}), (config['purgeAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'purge'}), (config['purgeSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'purge'}), (config['retainAgeSize'], (2, 4), {'sent_id': 0, 'size': '20', 'flag': 'retain'}), (config['retainAge'], (1, 2), {'sent_id': 0, 'age': '72', 'flag': 'retain'}), (config['retainSize'], (1, 2), {'sent_id': 0, 'size': '100', 'flag': 'retain'})])
def test_purge_data(self, event_loop, conf, expected_return, expected_calls):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge) as mock_storage_purge:
with patch.object(audit, 'information', return_value=mock_audit_info()) as audit_info:
assert (expected_return == p.purge_data(conf))
assert audit_info.called
(args, kwargs) = mock_storage_purge.call_args
assert (kwargs == expected_calls)
|
@pytest.mark.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'False'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0)), ({'retainUnsent': {'value': 'True'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0))])
def test_purge_data_no_data_purged(self, event_loop, conf, expected_return):
'Test that purge_data logs message when no data was purged'
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert (expected_return == p.purge_data(conf))
p._logger.info.assert_called_once_with('No rows purged')
| 8,077,367,409,494,260,000
|
Test that purge_data logs message when no data was purged
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_purge_data_no_data_purged
|
ThyagOSI/FogLAMP
|
python
|
@pytest.mark.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'False'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0)), ({'retainUnsent': {'value': 'True'}, 'age': {'value': '0'}, 'size': {'value': '0'}}, (0, 0))])
def test_purge_data_no_data_purged(self, event_loop, conf, expected_return):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert (expected_return == p.purge_data(conf))
p._logger.info.assert_called_once_with('No rows purged')
|
@pytest.mark.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'True'}, 'age': {'value': '-1'}, 'size': {'value': '-1'}}, (0, 0))])
def test_purge_error_storage_response(self, event_loop, conf, expected_return):
'Test that purge_data logs error when storage purge returns an error response'
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert (expected_return == p.purge_data(conf))
| -4,718,137,762,322,573,000
|
Test that purge_data logs error when storage purge returns an error response
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_purge_error_storage_response
|
ThyagOSI/FogLAMP
|
python
|
@pytest.mark.parametrize('conf, expected_return', [({'retainUnsent': {'value': 'True'}, 'age': {'value': '-1'}, 'size': {'value': '-1'}}, (0, 0))])
def test_purge_error_storage_response(self, event_loop, conf, expected_return):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
assert (expected_return == p.purge_data(conf))
|
@pytest.mark.parametrize('conf, expected_error_key', [({'retainUnsent': {'value': 'True'}, 'age': {'value': 'bla'}, 'size': {'value': '0'}}, 'age'), ({'retainUnsent': {'value': 'True'}, 'age': {'value': '0'}, 'size': {'value': 'bla'}}, 'size')])
def test_purge_data_invalid_conf(self, event_loop, conf, expected_error_key):
'Test that purge_data raises exception when called with invalid configuration'
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
p.purge_data(conf)
p._logger.error.assert_called_with('Configuration item {} bla should be integer!'.format(expected_error_key))
| -9,136,429,344,613,736,000
|
Test that purge_data raises exception when called with invalid configuration
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_purge_data_invalid_conf
|
ThyagOSI/FogLAMP
|
python
|
@pytest.mark.parametrize('conf, expected_error_key', [({'retainUnsent': {'value': 'True'}, 'age': {'value': 'bla'}, 'size': {'value': '0'}}, 'age'), ({'retainUnsent': {'value': 'True'}, 'age': {'value': '0'}, 'size': {'value': 'bla'}}, 'size')])
def test_purge_data_invalid_conf(self, event_loop, conf, expected_error_key):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
p._logger = logger
p._logger.info = MagicMock()
p._logger.error = MagicMock()
p._storage = MagicMock(spec=StorageClient)
p._readings_storage = MagicMock(spec=ReadingsStorageClient)
audit = p._audit
with patch.object(p._readings_storage, 'purge', side_effect=self.store_purge):
with patch.object(audit, 'information', return_value=mock_audit_info()):
p.purge_data(conf)
p._logger.error.assert_called_with('Configuration item {} bla should be integer!'.format(expected_error_key))
|
def test_run(self, event_loop):
'Test that run calls all units of purge process'
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
config = 'Some config'
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config) as mock_set_config:
with patch.object(p, 'purge_data', return_value=(1, 2)) as mock_purge_data:
with patch.object(p, 'write_statistics') as mock_write_stats:
p.run()
mock_set_config.assert_called_once_with()
mock_purge_data.assert_called_once_with(config)
mock_write_stats.assert_called_once_with(1, 2)
| 346,495,409,171,678,200
|
Test that run calls all units of purge process
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_run
|
ThyagOSI/FogLAMP
|
python
|
def test_run(self, event_loop):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
config = 'Some config'
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config) as mock_set_config:
with patch.object(p, 'purge_data', return_value=(1, 2)) as mock_purge_data:
with patch.object(p, 'write_statistics') as mock_write_stats:
p.run()
mock_set_config.assert_called_once_with()
mock_purge_data.assert_called_once_with(config)
mock_write_stats.assert_called_once_with(1, 2)
|
def test_run_exception(self, event_loop):
'Test that run calls all units of purge process and checks the exception handling'
@asyncio.coroutine
def mock_audit_info():
return ''
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
config = 'Some config'
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config):
with patch.object(p, 'purge_data', return_value=Exception()):
with patch.object(p, 'write_statistics'):
p.run()
p._logger.exception.assert_called_once_with("'Exception' object is not iterable")
| -5,734,047,374,759,279,000
|
Test that run calls all units of purge process and checks the exception handling
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
test_run_exception
|
ThyagOSI/FogLAMP
|
python
|
def test_run_exception(self, event_loop):
@asyncio.coroutine
def mock_audit_info():
return
mockStorageClient = MagicMock(spec=StorageClient)
mockAuditLogger = AuditLogger(mockStorageClient)
with patch.object(FoglampProcess, '__init__'):
with patch.object(mockAuditLogger, '__init__', return_value=None):
p = Purge(loop=event_loop)
config = 'Some config'
p._logger.exception = MagicMock()
with patch.object(p, 'set_configuration', return_value=config):
with patch.object(p, 'purge_data', return_value=Exception()):
with patch.object(p, 'write_statistics'):
p.run()
p._logger.exception.assert_called_once_with("'Exception' object is not iterable")
|
def updates_configured():
'Returns True if update checking have been configured.'
return (prepare_updater() is not None)
| 1,418,974,148,192,326,000
|
Returns True if update checking have been configured.
|
core/update.py
|
updates_configured
|
McArcady/python-lnp
|
python
|
def updates_configured():
return (prepare_updater() is not None)
|
def check_update():
'Checks for updates using the URL specified in PyLNP.json.'
if (not updates_configured()):
return
if (not lnp.userconfig.has_value('updateDays')):
interval = lnp.config.get_value('updates/defaultInterval', (- 1))
if ((interval != (- 1)) and lnp.ui.on_request_update_permission(interval)):
next_update(interval)
else:
next_update((- 1))
if (lnp.userconfig.get_value('updateDays', (- 1)) == (- 1)):
return
if (lnp.userconfig.get_number('nextUpdate') < time.time()):
t = threading.Thread(target=perform_update_check)
t.daemon = True
t.start()
| -8,196,296,363,672,645,000
|
Checks for updates using the URL specified in PyLNP.json.
|
core/update.py
|
check_update
|
McArcady/python-lnp
|
python
|
def check_update():
if (not updates_configured()):
return
if (not lnp.userconfig.has_value('updateDays')):
interval = lnp.config.get_value('updates/defaultInterval', (- 1))
if ((interval != (- 1)) and lnp.ui.on_request_update_permission(interval)):
next_update(interval)
else:
next_update((- 1))
if (lnp.userconfig.get_value('updateDays', (- 1)) == (- 1)):
return
if (lnp.userconfig.get_number('nextUpdate') < time.time()):
t = threading.Thread(target=perform_update_check)
t.daemon = True
t.start()
|
def perform_update_check():
'Performs the actual update check. Runs in a thread.'
prepare_updater()
if lnp.updater.update_needed():
lnp.new_version = lnp.updater.get_version()
lnp.ui.on_update_available()
| -2,845,390,626,586,245,600
|
Performs the actual update check. Runs in a thread.
|
core/update.py
|
perform_update_check
|
McArcady/python-lnp
|
python
|
def perform_update_check():
prepare_updater()
if lnp.updater.update_needed():
lnp.new_version = lnp.updater.get_version()
lnp.ui.on_update_available()
|
def prepare_updater():
'Returns an Updater object for the configured updater.'
if lnp.updater:
return lnp.updater
updaters = {'regex': RegexUpdater, 'json': JSONUpdater, 'dffd': DFFDUpdater}
updater_id = lnp.config.get('updates/updateMethod', None)
if (updater_id is None):
log.w('Update method not configured in PyLNP.json! Will attempt to auto-detect. Please set this value correctly, auto-detection will go away eventually!')
if lnp.config.get_string('updates/dffdID'):
updater_id = 'dffd'
log.w('Updater detected: dffd')
elif lnp.config.get_string('updates/versionRegex'):
updater_id = 'regex'
log.w('Updater detected: regex')
elif lnp.config.get_string('updates/versionJsonPath'):
updater_id = 'json'
log.w('Updater detected: json')
else:
log.w('Could not detect update method, updates will not work')
return None
elif ((updater_id == '') or (not lnp.config.get('updates'))):
return None
if (updater_id not in updaters):
log.e(('Unknown update method: ' + updater_id))
return None
lnp.updater = updaters[updater_id]()
return lnp.updater
| -3,500,684,316,671,097,300
|
Returns an Updater object for the configured updater.
|
core/update.py
|
prepare_updater
|
McArcady/python-lnp
|
python
|
def prepare_updater():
if lnp.updater:
return lnp.updater
updaters = {'regex': RegexUpdater, 'json': JSONUpdater, 'dffd': DFFDUpdater}
updater_id = lnp.config.get('updates/updateMethod', None)
if (updater_id is None):
log.w('Update method not configured in PyLNP.json! Will attempt to auto-detect. Please set this value correctly, auto-detection will go away eventually!')
if lnp.config.get_string('updates/dffdID'):
updater_id = 'dffd'
log.w('Updater detected: dffd')
elif lnp.config.get_string('updates/versionRegex'):
updater_id = 'regex'
log.w('Updater detected: regex')
elif lnp.config.get_string('updates/versionJsonPath'):
updater_id = 'json'
log.w('Updater detected: json')
else:
log.w('Could not detect update method, updates will not work')
return None
elif ((updater_id == ) or (not lnp.config.get('updates'))):
return None
if (updater_id not in updaters):
log.e(('Unknown update method: ' + updater_id))
return None
lnp.updater = updaters[updater_id]()
return lnp.updater
|
def next_update(days):
'Sets the next update check to occur in <days> days.'
lnp.userconfig['nextUpdate'] = (time.time() + (((days * 24) * 60) * 60))
lnp.userconfig['updateDays'] = days
lnp.save_config()
| 6,134,476,219,030,161,000
|
Sets the next update check to occur in <days> days.
|
core/update.py
|
next_update
|
McArcady/python-lnp
|
python
|
def next_update(days):
lnp.userconfig['nextUpdate'] = (time.time() + (((days * 24) * 60) * 60))
lnp.userconfig['updateDays'] = days
lnp.save_config()
|
def start_update():
'Launches a webbrowser to the specified update URL.'
launcher.open_url(lnp.updater.get_download_url())
| 3,976,281,430,918,038,000
|
Launches a webbrowser to the specified update URL.
|
core/update.py
|
start_update
|
McArcady/python-lnp
|
python
|
def start_update():
launcher.open_url(lnp.updater.get_download_url())
|
def download_df_baseline(immediate=False):
'Download the current version of DF from Bay12 Games to serve as a\n baseline, in LNP/Baselines/'
filename = lnp.df_info.get_archive_name()
url = ('http://www.bay12games.com/dwarves/' + filename)
target = os.path.join(paths.get('baselines'), filename)
queue_name = ('immediate' if immediate else 'baselines')
download.download(queue_name, url, target)
| 2,693,516,806,954,899,000
|
Download the current version of DF from Bay12 Games to serve as a
baseline, in LNP/Baselines/
|
core/update.py
|
download_df_baseline
|
McArcady/python-lnp
|
python
|
def download_df_baseline(immediate=False):
'Download the current version of DF from Bay12 Games to serve as a\n baseline, in LNP/Baselines/'
filename = lnp.df_info.get_archive_name()
url = ('http://www.bay12games.com/dwarves/' + filename)
target = os.path.join(paths.get('baselines'), filename)
queue_name = ('immediate' if immediate else 'baselines')
download.download(queue_name, url, target)
|
def direct_download_pack():
'Directly download a new version of the pack to the current BASEDIR'
url = lnp.updater.get_direct_url()
fname = lnp.updater.get_direct_filename()
target = os.path.join(lnp.BASEDIR, fname)
download.download('updates', url, target, end_callback=extract_new_pack)
| -514,763,240,004,540,100
|
Directly download a new version of the pack to the current BASEDIR
|
core/update.py
|
direct_download_pack
|
McArcady/python-lnp
|
python
|
def direct_download_pack():
url = lnp.updater.get_direct_url()
fname = lnp.updater.get_direct_filename()
target = os.path.join(lnp.BASEDIR, fname)
download.download('updates', url, target, end_callback=extract_new_pack)
|
def extract_new_pack(_, fname, bool_val):
'Extract a downloaded new pack to a sibling dir of the current pack.'
exts = ('.zip', '.bz2', '.gz', '.7z', '.xz')
if ((not bool_val) or (not any((fname.endswith(ext) for ext in exts)))):
return None
archive = os.path.join(lnp.BASEDIR, os.path.basename(fname))
return extract_archive(archive, os.path.join(lnp.BASEDIR, '..'))
| 929,262,011,020,408,300
|
Extract a downloaded new pack to a sibling dir of the current pack.
|
core/update.py
|
extract_new_pack
|
McArcady/python-lnp
|
python
|
def extract_new_pack(_, fname, bool_val):
exts = ('.zip', '.bz2', '.gz', '.7z', '.xz')
if ((not bool_val) or (not any((fname.endswith(ext) for ext in exts)))):
return None
archive = os.path.join(lnp.BASEDIR, os.path.basename(fname))
return extract_archive(archive, os.path.join(lnp.BASEDIR, '..'))
|
def extract_archive(fname, target):
'Extract the archive fname to dir target, avoiding explosions.'
if zipfile.is_zipfile(fname):
zf = zipfile.ZipFile(fname)
namelist = zf.namelist()
topdir = namelist[0].split(os.path.sep)[0]
if (not all((f.startswith(topdir) for f in namelist))):
target = os.path.join(target, os.path.basename(fname).split('.')[0])
zf.extractall(target)
os.remove(fname)
return True
if tarfile.is_tarfile(fname):
tf = tarfile.open(fname)
namelist = tf.getmembers()
topdir = namelist[0].split(os.path.sep)[0]
if (not all((f.startswith(topdir) for f in namelist))):
target = os.path.join(target, fname.split('.')[0])
tf.extractall(target)
os.remove(fname)
return True
return False
| -4,466,180,850,622,522,400
|
Extract the archive fname to dir target, avoiding explosions.
|
core/update.py
|
extract_archive
|
McArcady/python-lnp
|
python
|
def extract_archive(fname, target):
if zipfile.is_zipfile(fname):
zf = zipfile.ZipFile(fname)
namelist = zf.namelist()
topdir = namelist[0].split(os.path.sep)[0]
if (not all((f.startswith(topdir) for f in namelist))):
target = os.path.join(target, os.path.basename(fname).split('.')[0])
zf.extractall(target)
os.remove(fname)
return True
if tarfile.is_tarfile(fname):
tf = tarfile.open(fname)
namelist = tf.getmembers()
topdir = namelist[0].split(os.path.sep)[0]
if (not all((f.startswith(topdir) for f in namelist))):
target = os.path.join(target, fname.split('.')[0])
tf.extractall(target)
os.remove(fname)
return True
return False
|
def update_needed(self):
'Checks if an update is necessary.'
self.text = download.download_str(self.get_check_url())
if (self.text is None):
log.e('Error checking for updates, could not download text')
curr_version = lnp.config.get_string('updates/packVersion')
if (not curr_version):
log.e('Current pack version is not set, cannot check for updates')
return False
return (self.get_version() != curr_version)
| -603,070,208,608,805,800
|
Checks if an update is necessary.
|
core/update.py
|
update_needed
|
McArcady/python-lnp
|
python
|
def update_needed(self):
self.text = download.download_str(self.get_check_url())
if (self.text is None):
log.e('Error checking for updates, could not download text')
curr_version = lnp.config.get_string('updates/packVersion')
if (not curr_version):
log.e('Current pack version is not set, cannot check for updates')
return False
return (self.get_version() != curr_version)
|
def get_check_url(self):
'Returns the URL used to check for updates.'
return lnp.config.get_string('updates/checkURL')
| -2,481,073,543,452,999,000
|
Returns the URL used to check for updates.
|
core/update.py
|
get_check_url
|
McArcady/python-lnp
|
python
|
def get_check_url(self):
return lnp.config.get_string('updates/checkURL')
|
def get_version(self):
'Returns the version listed at the update URL. Must be overridden by\n subclasses.'
pass
| 6,166,192,657,965,314,000
|
Returns the version listed at the update URL. Must be overridden by
subclasses.
|
core/update.py
|
get_version
|
McArcady/python-lnp
|
python
|
def get_version(self):
'Returns the version listed at the update URL. Must be overridden by\n subclasses.'
pass
|
def get_download_url(self):
'Returns a URL from which the user can download the update.'
return lnp.config.get_string('updates/downloadURL')
| 6,181,411,547,175,209,000
|
Returns a URL from which the user can download the update.
|
core/update.py
|
get_download_url
|
McArcady/python-lnp
|
python
|
def get_download_url(self):
return lnp.config.get_string('updates/downloadURL')
|
def get_direct_url(self):
'Returns a URL pointing directly to the update, for download by the\n program.'
return lnp.config.get_string('updates/directURL')
| 4,640,743,652,360,267,000
|
Returns a URL pointing directly to the update, for download by the
program.
|
core/update.py
|
get_direct_url
|
McArcady/python-lnp
|
python
|
def get_direct_url(self):
'Returns a URL pointing directly to the update, for download by the\n program.'
return lnp.config.get_string('updates/directURL')
|
def get_direct_filename(self):
'Returns the filename that should be used for direct downloads.'
directFilename = lnp.config.get_string('updates/directFilename')
if directFilename:
return directFilename
url_fragments = urlparse(self.get_direct_url())
return os.path.basename(unquote(url_fragments.path))
| 471,715,325,318,947,500
|
Returns the filename that should be used for direct downloads.
|
core/update.py
|
get_direct_filename
|
McArcady/python-lnp
|
python
|
def get_direct_filename(self):
directFilename = lnp.config.get_string('updates/directFilename')
if directFilename:
return directFilename
url_fragments = urlparse(self.get_direct_url())
return os.path.basename(unquote(url_fragments.path))
|
def compile_model(network):
'\n :param network dict: dictionary with network parameters\n :return: compiled model\n '
model = lgb.LGBMRegressor(num_leaves=network.get('num_leaves', 31), learning_rate=network.get('learning_rate', 0.1), n_estimators=network.get('n_estimators', 20), max_bin=network.get('max_bin', 1000), colsample_bytree=network.get('colsample_bytree', 0.5), subsample_for_bin=network.get('subsample_for_bin', 200000), boosting_type=network.get('boosting_type', 'gbdt'), num_iterations=network.get('num_iterations', 100), extra_trees=network.get('extra_trees', False), reg_sqrt=network.get('reg_sqrt', False), bagging_freq=network.get('bagging_freq', 1), bagging_fraction=network.get('bagging_fraction', 0.1))
return model
| -3,924,933,480,687,173,600
|
:param network dict: dictionary with network parameters
:return: compiled model
|
gb_rf_evolution/gb_train.py
|
compile_model
|
EvanBagis/gb_rf_evolution
|
python
|
def compile_model(network):
'\n :param network dict: dictionary with network parameters\n :return: compiled model\n '
model = lgb.LGBMRegressor(num_leaves=network.get('num_leaves', 31), learning_rate=network.get('learning_rate', 0.1), n_estimators=network.get('n_estimators', 20), max_bin=network.get('max_bin', 1000), colsample_bytree=network.get('colsample_bytree', 0.5), subsample_for_bin=network.get('subsample_for_bin', 200000), boosting_type=network.get('boosting_type', 'gbdt'), num_iterations=network.get('num_iterations', 100), extra_trees=network.get('extra_trees', False), reg_sqrt=network.get('reg_sqrt', False), bagging_freq=network.get('bagging_freq', 1), bagging_fraction=network.get('bagging_fraction', 0.1))
return model
|
def train_and_score(network, x_train, y_train, x_test, y_test):
'\n\n :param network dict: dictionary with network parameters\n :param x_train array: numpy array with features for traning\n :param y_train array: numpy array with labels for traning\n :param x_test array: numpy array with labels for test\n :param y_test array: numpy array with labels for test\n :return float: score\n '
model = compile_model(network)
model.fit(x_train, y_train)
y_pred = model.predict(np.array(x_test))
true = y_test
pred = y_pred
print(' R2 = ', r2_score(true, pred))
return (r2_score(true, pred), model)
| 1,104,790,927,388,677,900
|
:param network dict: dictionary with network parameters
:param x_train array: numpy array with features for traning
:param y_train array: numpy array with labels for traning
:param x_test array: numpy array with labels for test
:param y_test array: numpy array with labels for test
:return float: score
|
gb_rf_evolution/gb_train.py
|
train_and_score
|
EvanBagis/gb_rf_evolution
|
python
|
def train_and_score(network, x_train, y_train, x_test, y_test):
'\n\n :param network dict: dictionary with network parameters\n :param x_train array: numpy array with features for traning\n :param y_train array: numpy array with labels for traning\n :param x_test array: numpy array with labels for test\n :param y_test array: numpy array with labels for test\n :return float: score\n '
model = compile_model(network)
model.fit(x_train, y_train)
y_pred = model.predict(np.array(x_test))
true = y_test
pred = y_pred
print(' R2 = ', r2_score(true, pred))
return (r2_score(true, pred), model)
|
@cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
| 1,702,168,743,392,494,600
|
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
|
code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py
|
additional_properties_type
|
factset/enterprise-sdk
|
python
|
@cached_property
def additional_properties_type():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n '
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'id': (str,), 'filter': (PricesTradingScheduleEventListDataFilter,)}
| 6,639,963,327,444,114,000
|
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
|
code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py
|
openapi_types
|
factset/enterprise-sdk
|
python
|
@cached_property
def openapi_types():
'\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n '
lazy_import()
return {'id': (str,), 'filter': (PricesTradingScheduleEventListDataFilter,)}
|
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, *args, **kwargs):
'PricesTradingScheduleEventListData - a model defined in OpenAPI\n\n Args:\n id (str): Identifier of the notation.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.id = id
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
| 3,884,444,163,335,825,000
|
PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
|
code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py
|
_from_openapi_data
|
factset/enterprise-sdk
|
python
|
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, *args, **kwargs):
'PricesTradingScheduleEventListData - a model defined in OpenAPI\n\n Args:\n id (str): Identifier of the notation.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.id = id
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
|
@convert_js_args_to_python_args
def __init__(self, id, *args, **kwargs):
'PricesTradingScheduleEventListData - a model defined in OpenAPI\n\n Args:\n id (str): Identifier of the notation.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.id = id
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
| 3,673,162,969,030,290,000
|
PricesTradingScheduleEventListData - a model defined in OpenAPI
Args:
id (str): Identifier of the notation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501
|
code/python/QuotesAPIforDigitalPortals/v2/fds/sdk/QuotesAPIforDigitalPortals/model/prices_trading_schedule_event_list_data.py
|
__init__
|
factset/enterprise-sdk
|
python
|
@convert_js_args_to_python_args
def __init__(self, id, *args, **kwargs):
'PricesTradingScheduleEventListData - a model defined in OpenAPI\n\n Args:\n id (str): Identifier of the notation.\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n filter (PricesTradingScheduleEventListDataFilter): [optional] # noqa: E501\n '
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
self.id = id
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
|
def __init__(self, error):
'Instantiate super class with passed message.'
self.message = 'No domains were passed!'
super().__init__(self.message)
| -5,821,761,965,115,697,000
|
Instantiate super class with passed message.
|
src/findcdn/findcdn_err.py
|
__init__
|
Pascal-0x90/findCDN
|
python
|
def __init__(self, error):
self.message = 'No domains were passed!'
super().__init__(self.message)
|
def __init__(self, outFile):
'Instantiate super class with passed message with passed in filename.'
self.message = (('A file with the name ' + outFile) + ' already exists!')
super().__init__(self.message)
| -103,428,230,241,982,430
|
Instantiate super class with passed message with passed in filename.
|
src/findcdn/findcdn_err.py
|
__init__
|
Pascal-0x90/findCDN
|
python
|
def __init__(self, outFile):
self.message = (('A file with the name ' + outFile) + ' already exists!')
super().__init__(self.message)
|
def __init__(self, item):
'Instantiate super class with passed message with passed in item.'
self.message = (item + ' is not a valid domain in findcdn.main()')
super().__init__(self.message)
| -3,419,750,793,731,725,300
|
Instantiate super class with passed message with passed in item.
|
src/findcdn/findcdn_err.py
|
__init__
|
Pascal-0x90/findCDN
|
python
|
def __init__(self, item):
self.message = (item + ' is not a valid domain in findcdn.main()')
super().__init__(self.message)
|
def __init__(self, error):
'Instantiate super class with passed message using passed in error.'
self.message = ('The following error occurred in findcdn while file writing:\n' + repr(error))
super().__init__(self.message)
| -4,781,970,469,078,838,000
|
Instantiate super class with passed message using passed in error.
|
src/findcdn/findcdn_err.py
|
__init__
|
Pascal-0x90/findCDN
|
python
|
def __init__(self, error):
self.message = ('The following error occurred in findcdn while file writing:\n' + repr(error))
super().__init__(self.message)
|
def match_target_amplitude(aChunk, target_dBFS):
' Normalize given audio chunk '
change_in_dBFS = (target_dBFS - aChunk.dBFS)
return aChunk.apply_gain(change_in_dBFS)
| 5,784,923,878,507,612,000
|
Normalize given audio chunk
|
src/subtitle.py
|
match_target_amplitude
|
whilemind/subtitle
|
python
|
def match_target_amplitude(aChunk, target_dBFS):
' '
change_in_dBFS = (target_dBFS - aChunk.dBFS)
return aChunk.apply_gain(change_in_dBFS)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.