text stringlengths 0 1.05M | meta dict |
|---|---|
"""Add selection answers
Revision ID: 150_add_selection_answers
Revises: 140_service_id_null_for_drafts
Create Date: 2015-06-19 16:22:21.510509
"""
# revision identifiers, used by Alembic.
revision = '150_add_selection_answers'
down_revision = '140_service_id_null_for_drafts'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('selection_answers',
sa.Column('supplier_id', sa.Integer(), nullable=False),
sa.Column('framework_id', sa.Integer(), nullable=False),
sa.Column('question_answers', postgresql.JSON(), nullable=True),
sa.ForeignKeyConstraint(['framework_id'], ['frameworks.id'], ),
sa.ForeignKeyConstraint(['supplier_id'], ['suppliers.supplier_id'], ),
sa.PrimaryKeyConstraint('supplier_id', 'framework_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('selection_answers')
### end Alembic commands ###
| {
"repo_name": "RichardKnop/digitalmarketplace-api",
"path": "migrations/versions/150_add_selection_answers.py",
"copies": "3",
"size": "1078",
"license": "mit",
"hash": 5338329219157455000,
"line_mean": 31.6666666667,
"line_max": 74,
"alpha_frac": 0.7012987013,
"autogenerated": false,
"ratio": 3.70446735395189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0108928696940576,
"num_lines": 33
} |
"""Add Semester Enum to Co-Op Table
Revision ID: 2c3193839c9d
Revises: 6ae578b76143
Create Date: 2017-05-24 00:18:22.645256
"""
# revision identifiers, used by Alembic.
revision = '2c3193839c9d'
down_revision = '6ae578b76143'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
semester = postgresql.ENUM('Fall', 'Spring', 'Neither', name='co_op_enum')
semester.create(op.get_bind())
op.add_column('current_coops', sa.Column('semester', sa.Enum('Fall', 'Spring', 'Neither', name='co_op_enum'), server_default='Neither', nullable=False))
op.drop_column('current_coops', 'active')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('current_coops', sa.Column('active', sa.BOOLEAN(), server_default=sa.sql.expression.true(), autoincrement=False, nullable=False))
op.drop_column('current_coops', 'semester')
### end Alembic commands ###
| {
"repo_name": "ComputerScienceHouse/conditional",
"path": "migrations/versions/2c3193839c9d_.py",
"copies": "2",
"size": "1063",
"license": "mit",
"hash": -6841091685544432000,
"line_mean": 33.2903225806,
"line_max": 156,
"alpha_frac": 0.6999059266,
"autogenerated": false,
"ratio": 3.2018072289156625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4901713155515663,
"avg_score": null,
"num_lines": null
} |
"Add sequences from a scanned database to the profile tree."
import logging
import os
import tempfile
from os.path import basename, isdir, isfile, join
from Bio import SeqIO
from .tasks import ext, noext, sh
from ._share import get_hmm_evalues, parse_scanned_hits
def cmd_add(args):
# Validation
assert isdir(args.basedir), "Not a directory: %s" % args.basedir
hmmdb = args.basedir.rstrip('/') + '_all.hmm'
assert isfile(hmmdb), "Didn't find " + hmmdb
assert isfile(args.sequence_db)
assert args.evalue
assert 0 < args.min_length <= 1, \
"Minimum hit length (-L, --min-length) must be between 0 and 1."
# Traverse the directory tree -- if there's an unbuilt HMM that we'll need,
# best to identify it now and raise an error
fastas_and_hmms = list(walk_profiles(args.basedir))
# Scan & classify the target database, just like in cmd_scan
dbhits = get_hmm_data(hmmdb, args.sequence_db, args.evalue)
assert dbhits
index = SeqIO.index(args.sequence_db, 'fasta')
assert index
for fasta, hmm in fastas_and_hmms:
logging.info("Processing %s ...", noext(fasta))
# HMMscan the corresponding FASTA file; record the max (worst) e-value
seq_evals = get_hmm_evalues(hmm, fasta)
max_eval = max(seq_evals.values())
# Select the hits matching the current profile
hits = list(filter_profile(dbhits, basename(noext(fasta))))
# if not hits:
# logging.info("No hits for profile %s", hmm)
# continue
# Filter hits by this profile's maximum (worst) e-value
hits = list(filter_evalue(hits, max_eval))
if not hits:
# logging.info("No hits for profile %s with e-value <= %s",
# hmm, max_eval)
continue
# Align the hit sequences (retrieved from the target DB)
aln = get_hmm_alignment(hmm, hits, index)
# Calculate minimum sequence length based on HMM profile length
hmm_len = get_hmm_len(hmm)
min_len_aa = int(hmm_len * args.min_length)
# Fetch the aligned sequence regions as SeqRecords
hit_seqs = []
for record in aln:
# Read the domain hit start/end position from Stockholm record ID
acc, _offset = record.id.split('/')
start, end = map(int, _offset.split('-'))
# Filter hits by minimum length
hit_len = end - start + 1 # HMMer uses 1-based numbering
if hit_len < min_len_aa:
continue
# Remove the "/123-456" suffix from aligned record keys
record.id = acc
# Remove gaps from the hit sequence
record.seq._data = str(record.seq).\
replace('-', '').replace('.', '').upper()
hit_seqs.append(record)
# ENH: extend hits that are close to the edge of the profile (e.g. 5aa) to
# fill sequence to the edges (no gaps)
# e.g. if hmmstart = 4, add another 3aa sliced from the full sequence
# to the aligned bit -- if only 2aa available, then take that much
# - can I tell from the alignment? (# terminal gaps)
# Add the accepted hits to the FASTA profile
logging.info("Adding %d hits to profile %s", len(hit_seqs), fasta)
with open(fasta, 'a') as fafile:
SeqIO.write(hit_seqs, fafile, 'fasta')
def walk_profiles(topdir):
"""Iterate through paired FASTA and HMM profiles in the directory tree."""
for dirpath, _subdirs, fnames in os.walk(topdir):
fasta_fnames = [join(dirpath, fn)
for fn in fnames if fn.endswith('.fasta')]
for fasta in fasta_fnames:
hmm_fname = ext(fasta, 'hmm')
if not isfile(hmm_fname):
raise ValueError("Unbuild .hmm for %s" % fasta)
yield fasta, hmm_fname
def get_hmm_data(hmmfname, seqfname, evalue):
"""Search a sequence database with an HMM profile.
Return a mapping of hit sequence names to the best profile and e-value.
"""
with tempfile.NamedTemporaryFile(suffix='.tbl') as tbl:
sh('hmmsearch --noali --notextw '
'-E %s --domE %s --tblout %s %s %s > /dev/null'
% (evalue, evalue, tbl.name, hmmfname, seqfname))
tbl.seek(0)
# Parse the table; get the domain score for each sequence
hits = parse_scanned_hits(tbl)
return hits
def get_hmm_alignment(hmmfname, hits, index):
fullseqs = [index[seqname] for seqname, _e in hits]
assert fullseqs
tmp_seqfname = hmmfname + '.fa'
with open(tmp_seqfname, 'w+') as handle:
SeqIO.write(fullseqs, handle, 'fasta')
handle.flush()
with tempfile.NamedTemporaryFile(suffix='.stk') as alnfile:
sh(('hmmsearch --noali -A %s %s %s > /dev/null')
% (alnfile.name, hmmfname, tmp_seqfname))
alnfile.seek(0)
alignment = list(SeqIO.parse(alnfile, 'stockholm'))
os.remove(tmp_seqfname)
return alignment
def get_hmm_len(hmmfname):
"""Read the HMM profile length from an HMM file."""
with open(hmmfname) as hmmfile:
for line in hmmfile:
if line.startswith('LENG'):
key, length = line.split()
assert key == 'LENG'
return int(length)
def filter_profile(hits, profile_name):
"""Select the hits matching the given profile name.
Hits are a dict of: {sequence name: (best profile, e-value)}
Generates tuples of: (sequence name, e-value)
"""
for seqname, data in hits.iteritems():
profname, evalue = data
if profname == profile_name:
yield (seqname, evalue)
def filter_evalue(hits, max_evalue):
"""Skip hits that are above the given e-value."""
for hit in hits:
if hit[1] <= max_evalue:
yield hit
| {
"repo_name": "fivejjs/fammer",
"path": "fammerlib/add.py",
"copies": "2",
"size": "5884",
"license": "bsd-2-clause",
"hash": 5978083433644923000,
"line_mean": 35.775,
"line_max": 82,
"alpha_frac": 0.6050305914,
"autogenerated": false,
"ratio": 3.627620221948212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232650813348212,
"avg_score": null,
"num_lines": null
} |
'''Adds error column flux to treated lcs
Also save all (treated and untreated) as space-separated values
Search pairs of tables and match them
'''
import numpy as np
import os
import sys
import glob
import matplotlib.pyplot as plt
def VerboseID(kic_int):
kic_str = []
kic_int = map(str,kic_int)
for i in range(0,len(kic_int)):
if len(kic_int[i]) == 5:
kic_str.append('kplr0000' + kic_int[i])
elif len(kic_int[i]) == 6:
kic_str.append('kplr000' + kic_int[i])
elif len(kic_int[i]) == 7:
kic_str.append('kplr00' + kic_int[i])
elif len(kic_int[i]) == 8:
kic_str.append('kplr0' + kic_int[i])
elif len(kic_int[i]) == 9:
kic_str.append('kplr' + kic_int[i])
else:
print '\n\tDummy function encountered some error'
exit(0)
return kic_str
def Pairs(path1,path2,kic_str):
pathList = [[],[]]
for i in range(0,len(kic_str)):
for fname1 in glob.glob(os.path.join(path1, 'treat_*')):
if kic_str[i] in fname1:
pathList[0].append(fname1)
for fname2 in glob.glob(os.path.join(path2, 'd13.kcp*')):
if kic_str[i] in fname2:
pathList[1].append(fname2)
return pathList
def NearestPos(arr1,value2):
return np.argmin(np.abs(arr1-value2))
#function matches elements from both lists, and create updated data
def Match2(path_out,tabs2pair):
for j in range(0,len(tabs2pair[0][:])):
#treated cols: time|flux .dat
trt = np.loadtxt(tabs2pair[0][j],delimiter=' ')
aux_fn1 = tabs2pair[0][j][ tabs2pair[0][j].find('kplr'):tabs2pair[0][j].find('.dat') ]
#with errors cols: time|flux|flux_err .csv
werr = np.loadtxt(tabs2pair[1][j],delimiter=',')
aux_fn2 = tabs2pair[1][j][ tabs2pair[1][j].find('kplr'):tabs2pair[1][j].find('.csv') ]
print '\n\tworking on: {0}'.format(aux_fn1)
time,flux,flux_err = np.empty([0]),np.empty([0]),np.empty([0])
for p in xrange(0,trt.shape[0]):
time = np.append(time,[trt[p,0]],axis=0)
flux = np.append(flux,[trt[p,1]],axis=0)
flux_err = np.append(flux_err, [ werr[NearestPos( werr[:,0],trt[p,0] ),2] ] )
'''After rotate array is ok, but cols must be inserted last-to-first to appear
firs- to-last
'''
out1 = path_out+'kcp_trt_'+aux_fn1+'.tsv'
nrot = 3
np.savetxt(out1,np.rot90(np.vstack([flux_err,flux,time]),nrot),delimiter=' ')
out2 = path_out+'kcp_raw_'+aux_fn2+'.tsv'
np.savetxt(out2,werr,delimiter=' ')
return True
if __name__=='__main__':
path_treat = 's01tos04_treat'
path_werr = 'kcp_lcs'
path_tables = 'LC2work/'
#generate list of paths, to match lists
list2 = Pairs(path_treat,path_werr,VerboseID(np.loadtxt('kcp.txt',dtype='int')))
#match tables
transf = Match2(path_tables,list2)
if transf:
print 'All worked fine'
else:
print '\n\tcalled from another script\n'
| {
"repo_name": "paztronomer/kepler_tools",
"path": "addCol.py",
"copies": "1",
"size": "3019",
"license": "mit",
"hash": -2247128159968791300,
"line_mean": 30.4479166667,
"line_max": 94,
"alpha_frac": 0.586949321,
"autogenerated": false,
"ratio": 2.8031569173630455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3890106238363045,
"avg_score": null,
"num_lines": null
} |
"""add service whitelist table
Revision ID: 0055_service_whitelist
Revises: 0054_perform_drop_status_column
Create Date: 2016-09-20 12:12:30.838095
"""
# revision identifiers, used by Alembic.
revision = '0055_service_whitelist'
down_revision = '0054_perform_drop_status_column'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('service_whitelist',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('recipient_type', sa.Enum('mobile', 'email', name='recipient_type'), nullable=False),
sa.Column('recipient', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_service_whitelist_service_id'), 'service_whitelist', ['service_id'], unique=False)
def downgrade():
op.drop_table('service_whitelist')
| {
"repo_name": "alphagov/notifications-api",
"path": "migrations/versions/0055_service_whitelist.py",
"copies": "1",
"size": "1101",
"license": "mit",
"hash": 2621796810134006000,
"line_mean": 34.5161290323,
"line_max": 111,
"alpha_frac": 0.6993642144,
"autogenerated": false,
"ratio": 3.540192926045016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4739557140445016,
"avg_score": null,
"num_lines": null
} |
"""add_session_table
Revision ID: 8ce3d6d070da
Revises: 885280875a1c
Create Date: 2016-10-27 20:04:26.845536
"""
# revision identifiers, used by Alembic.
revision = '8ce3d6d070da'
down_revision = '885280875a1c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('session_map',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('session_id', sa.Integer(), nullable=False),
sa.Column('uid', sa.Text(), nullable=True),
sa.Column('data', sa.Text(), nullable=True),
sa.Column('expiration', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('session_id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('session_map')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/8ce3d6d070da_add_session_table.py",
"copies": "1",
"size": "1155",
"license": "cc0-1.0",
"hash": -4408698554833553400,
"line_mean": 23.0625,
"line_max": 63,
"alpha_frac": 0.6718614719,
"autogenerated": false,
"ratio": 3.338150289017341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9369680731487657,
"avg_score": 0.028066205885936595,
"num_lines": 48
} |
"""Add session types table
Revision ID: 9c4418d7a6aa
Revises: 2af245be72a6
Create Date: 2017-12-14 10:59:47.872426
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '9c4418d7a6aa'
down_revision = '2af245be72a6'
branch_labels = None
depends_on = None
def upgrade():
# Create session type table
op.create_table('session_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False, index=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('is_poster', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.PrimaryKeyConstraint('id'),
schema='events')
op.create_index('ix_uq_session_types_event_id_name_lower', 'session_types', ['event_id', sa.text('lower(name)')],
unique=True, schema='events')
# Add session type to sessions
op.add_column('sessions', sa.Column('type_id', sa.Integer(), nullable=True), schema='events')
op.create_index(None, 'sessions', ['type_id'], unique=False, schema='events')
op.create_foreign_key(None, 'sessions', 'session_types', ['type_id'], ['id'], source_schema='events',
referent_schema='events')
# Migrate poster sessions to sessions with poster session type
op.execute('''
INSERT INTO events.session_types (event_id, name, is_poster)
SELECT event_id, 'Poster session', true
FROM events.sessions
WHERE is_poster
GROUP BY event_id;
UPDATE events.sessions s
SET type_id = st.id
FROM events.session_types st
WHERE st.event_id = s.event_id AND s.is_poster;
''')
op.drop_column('sessions', 'is_poster', schema='events')
def downgrade():
# Migrate poster session types to poster sessions
op.add_column('sessions', sa.Column('is_poster', sa.Boolean(), nullable=False, server_default='false'),
schema='events')
op.execute('''
UPDATE events.sessions s
SET is_poster = true
FROM events.session_types st
WHERE st.id = s.type_id AND st.is_poster;
''')
# Delete session type from sessions
op.drop_column('sessions', 'type_id', schema='events')
# Delete session types table
op.drop_table('session_types', schema='events')
| {
"repo_name": "mvidalgarcia/indico",
"path": "indico/migrations/versions/20171214_1059_9c4418d7a6aa_add_session_types_table.py",
"copies": "2",
"size": "2473",
"license": "mit",
"hash": 4762055433127767000,
"line_mean": 35.3676470588,
"line_max": 117,
"alpha_frac": 0.6190861302,
"autogenerated": false,
"ratio": 3.718796992481203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5337883122681204,
"avg_score": null,
"num_lines": null
} |
"""add set owned
Revision ID: f06e0da1bdf4
Revises: 980fdee06558
Create Date: 2016-11-17 20:32:18.950479
"""
# revision identifiers, used by Alembic.
revision = 'f06e0da1bdf4'
down_revision = '980fdee06558'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('set_owned',
sa.Column('uuid', postgresql.UUID(as_uuid=True), server_default=sa.text('gen_random_uuid()'), nullable=False),
sa.Column('created', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('deleted', sa.DateTime(), nullable=True),
sa.Column('user', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('set', postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(['set'], ['set.uuid'], name=op.f('fk_set_owned_set_set')),
sa.ForeignKeyConstraint(['user'], ['user.uuid'], name=op.f('fk_set_owned_user_user')),
sa.PrimaryKeyConstraint('uuid', name=op.f('pk_set_owned'))
)
def downgrade():
op.drop_table('set_owned')
| {
"repo_name": "EliRibble/dominus",
"path": "alembic/versions/f06e0da1bdf4_add_set_owned.py",
"copies": "1",
"size": "1212",
"license": "mit",
"hash": 6166883206905483000,
"line_mean": 34.6470588235,
"line_max": 118,
"alpha_frac": 0.6749174917,
"autogenerated": false,
"ratio": 3.2406417112299466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4415559202929947,
"avg_score": null,
"num_lines": null
} |
"""Adds fenced code block support
Copied from
https://pythonhosted.org/Markdown/extensions/fenced_code_blocks.html
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
import re
from pygments import highlight
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from django.utils.html import escape
from django.utils.deconstruct import deconstructible
@deconstructible
class FencedCodeExtension(Extension):
"""Adds fenced code blocks
E.g.:
```python
class FencedCodeExtension(Extension):
pass
```
"""
def extendMarkdown(self, md, md_globals):
"""Add FencedBlockPreprocessor to the Markdown instance
:param md: Current markdown instance.
:param md_globals: Global markdown vars.
"""
md.registerExtension(self)
md.preprocessors.add('fenced_code_block',
FencedBlockPreprocessor(md),
">normalize_whitespace")
class Formatter(HtmlFormatter):
"""
Formats a highlighted code block so that it is compatible with
the `Light` framework layout.
"""
def _wrap_pre(self, inner):
yield 0, ('<pre><ol>')
for tup in inner:
yield tup[0], '<li>%s</li>' % tup[1]
yield 0, '</ol></pre>'
class FencedBlockPreprocessor(Preprocessor):
"""Main fenced code block renderer"""
block_re = re.compile(
r'(?P<fence>^(?:`{3}))[ ]*'
r'(?P<lang>[a-zA-Z0-9_+-]*)?[ ]*\n'
r'(?P<code>.*?)(?<=\n)'
r'(?P=fence)[ ]*$', re.MULTILINE | re.DOTALL | re.VERBOSE)
def run(self, lines):
"""Match and store Fenced Code Blocks in the HtmlStash
:param lines: Lines of code.
"""
text = "\n".join(lines)
while 1:
m = self.block_re.search(text)
if m:
lang = ''
if m.group('lang'):
lang = m.group('lang')
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = Formatter()
code = highlight(m.group('code'), lexer, formatter)
except ClassNotFound:
code = escape(m.group('code'))
code = code.split('\n')
while code and not code[-1].strip():
code.pop()
if lang:
code = ''.join(map(lambda x: '<li>%s</li>' % x, code))
code = '<ol>%s</ol>' % code
else:
code = '\n'.join(code)
code = '<pre>%s</pre>' % code
placeholder = self.markdown.htmlStash.store(code, safe=True)
text = '%s\n%s\n%s' % (text[:m.start()],
placeholder,
text[m.end():])
else:
break
return text.split("\n")
| {
"repo_name": "AmatanHead/collective-blog",
"path": "s_markdown/extensions/fenced_code.py",
"copies": "1",
"size": "3379",
"license": "mit",
"hash": 8520660955084263000,
"line_mean": 28.6403508772,
"line_max": 78,
"alpha_frac": 0.5398046759,
"autogenerated": false,
"ratio": 4.161330049261084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5201134725161084,
"avg_score": null,
"num_lines": null
} |
""" Adds functionality to make LaTeX figures from the raw TikZ code produced by
core.StreamerFlowchart.
"""
FIGURE = r'''\begin{{figure}}
\centering
\begin{{tikzpicture}}[{tikzoptions}]
{tikz}
\end{{tikzpicture}}
\caption{{Flowchart of {name}}}
\end{{figure}}'''
DOCUMENT = r'''\documentclass{{scrartcl}}
\usepackage{{fontspec}}
\usepackage{{tikz}}
\usetikzlibrary{{shapes, arrows, positioning}}
\usepackage{{xcolor}}
\input{{colors}}
\input{{tikzstyles}}
\begin{{document}}
{figures}
\end{{document}}
'''
def indent(spaces, multilinestring):
""" Indents a given multilinestring by a given number of spaces. This is
used to produce properly formatted LaTeX documents.
"""
indentation = ' ' * spaces
return '\n{}'.format(indentation).join(multilinestring.split('\n'))
def make_figure(sf, tikzoptions='node distance=.75cm and 2.75cm'):
""" Generates a LaTeX figure from a given hltflow.core.StreamerFlowchart.
Additionally, tikzoptions can be supplied manually.
"""
from .core import StreamerFlowchart
assert type(sf) is StreamerFlowchart
return FIGURE.format(tikz=indent(4, sf.tikz), name=sf.name,
tikzoptions=tikzoptions)
def make_document(figures):
""" Generates a LaTeX document from a given list of figures produced by
hltflow.latex.make_figure.
"""
return DOCUMENT.format(figures='\n\n'.join(figures))
| {
"repo_name": "kdungs/lhcb-hltflow",
"path": "hltflow/latex.py",
"copies": "1",
"size": "1423",
"license": "mit",
"hash": -4206713809387773400,
"line_mean": 25.3518518519,
"line_max": 79,
"alpha_frac": 0.6865776528,
"autogenerated": false,
"ratio": 3.420673076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9607250729723077,
"avg_score": 0,
"num_lines": 54
} |
"""Add shard column to query
Revision ID: a76be8b92780
Revises: c107fa0468ff
Create Date: 2017-08-25 11:39:24.754460
"""
# revision identifiers, used by Alembic.
revision = 'a76be8b92780'
down_revision = 'c107fa0468ff'
from alembic import op
import sqlalchemy as sa
import doorman.database
def upgrade():
### commands auto generated by Alembic - please adjust! ###
query_tbl = sa.sql.table('query', sa.sql.column('platform', sa.String))
pack_tbl = sa.sql.table('pack', sa.sql.column('platform', sa.String))
op.execute(
query_tbl.update() \
.where(
sa.or_(
query_tbl.c.platform==op.inline_literal('redhat,centos'),
query_tbl.c.platform==op.inline_literal('ubuntu'),
)
).values({'platform': op.inline_literal('linux')})
)
op.execute(
pack_tbl.update() \
.where(
sa.or_(
query_tbl.c.platform==op.inline_literal('redhat,centos'),
query_tbl.c.platform==op.inline_literal('ubuntu'),
)
).values({'platform': op.inline_literal('linux')})
)
op.add_column('query', sa.Column('shard', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('query', 'shard')
### end Alembic commands ###
| {
"repo_name": "mwielgoszewski/doorman",
"path": "migrations/versions/a76be8b92780_add_shard_column_to_query.py",
"copies": "1",
"size": "1436",
"license": "mit",
"hash": -399355631787466430,
"line_mean": 29.5531914894,
"line_max": 77,
"alpha_frac": 0.5814763231,
"autogenerated": false,
"ratio": 3.6446700507614214,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4726146373861422,
"avg_score": null,
"num_lines": null
} |
"""Add Shared Notes
Revision ID: ffe4c66b5772
Revises: 5d3c326dd901
Create Date: 2016-08-30 08:40:35.993215
"""
# revision identifiers, used by Alembic.
revision = 'ffe4c66b5772'
down_revision = '5d3c326dd901'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('shared_notes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_date', sa.DateTime(), nullable=True),
sa.Column('updated_date', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('note_id', sa.Integer(), nullable=True),
sa.Column('recipient_email', sa.String(length=254), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['note_id'], ['notes.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_shared_notes_created_date'), 'shared_notes', ['created_date'], unique=False)
op.create_index(op.f('ix_shared_notes_updated_date'), 'shared_notes', ['updated_date'], unique=False)
op.add_column('users', sa.Column('last_login_date', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'last_login_date')
op.drop_index(op.f('ix_shared_notes_updated_date'), table_name='shared_notes')
op.drop_index(op.f('ix_shared_notes_created_date'), table_name='shared_notes')
op.drop_table('shared_notes')
### end Alembic commands ###
| {
"repo_name": "levlaz/braindump",
"path": "migrations/versions/ffe4c66b5772_add_shared_notes.py",
"copies": "1",
"size": "1588",
"license": "mit",
"hash": -6007470733241688000,
"line_mean": 36.8095238095,
"line_max": 105,
"alpha_frac": 0.6750629723,
"autogenerated": false,
"ratio": 3.2474437627811863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44225067350811864,
"avg_score": null,
"num_lines": null
} |
"""Add shopping categories
Revision ID: 430dc4ed6dd6
Revises: 3a1dddd0c0f8
Create Date: 2014-12-24 09:55:10.993854
"""
# revision identifiers, used by Alembic.
revision = '430dc4ed6dd6'
down_revision = '3a1dddd0c0f8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
shopping_categories = table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True)
)
op.bulk_insert(
shopping_categories,
[
{'id': 1, 'name': 'Clothing',
'daily_limit': 5},
{'id': 2, 'name': 'Household',
'daily_limit': 5},
{'id': 3, 'name': 'Shoes',
'daily_limit': 5},
{'id': 4, 'name': 'Baby',
'daily_limit': 5},
{'id': 5, 'name': 'Coats',
'daily_limit': 5},
{'id': 6, 'name': 'Other',
'daily_limit': 5}
])
def downgrade():
pass
| {
"repo_name": "jlutz777/FreeStore",
"path": "alembic/versions/430dc4ed6dd6_add_shopping_categories.py",
"copies": "1",
"size": "1108",
"license": "mit",
"hash": 7437124817223749000,
"line_mean": 23.6222222222,
"line_max": 59,
"alpha_frac": 0.5252707581,
"autogenerated": false,
"ratio": 3.307462686567164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9319905893796134,
"avg_score": 0.002565510174205826,
"num_lines": 45
} |
"""Add short value
Revision ID: 8d12872e0b77
Revises: 76d21e039dfd
Create Date: 2017-09-15 13:30:39.443982
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "8d12872e0b77"
down_revision = "76d21e039dfd"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("code", sa.Column("short_value", sa.String(length=50), nullable=True))
op.add_column("code_history", sa.Column("short_value", sa.String(length=50), nullable=True))
op.alter_column(
"participant_summary",
"city",
existing_type=mysql.VARCHAR(length=80),
type_=sa.String(length=255),
existing_nullable=True,
)
op.alter_column(
"participant_summary",
"first_name",
existing_type=mysql.VARCHAR(length=80),
type_=sa.String(length=255),
existing_nullable=False,
)
op.alter_column(
"participant_summary",
"last_name",
existing_type=mysql.VARCHAR(length=80),
type_=sa.String(length=255),
existing_nullable=False,
)
op.alter_column(
"participant_summary",
"middle_name",
existing_type=mysql.VARCHAR(length=80),
type_=sa.String(length=255),
existing_nullable=True,
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"participant_summary",
"middle_name",
existing_type=sa.String(length=255),
type_=mysql.VARCHAR(length=80),
existing_nullable=True,
)
op.alter_column(
"participant_summary",
"last_name",
existing_type=sa.String(length=255),
type_=mysql.VARCHAR(length=80),
existing_nullable=False,
)
op.alter_column(
"participant_summary",
"first_name",
existing_type=sa.String(length=255),
type_=mysql.VARCHAR(length=80),
existing_nullable=False,
)
op.alter_column(
"participant_summary",
"city",
existing_type=sa.String(length=255),
type_=mysql.VARCHAR(length=80),
existing_nullable=True,
)
op.drop_column("code_history", "short_value")
op.drop_column("code", "short_value")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/8d12872e0b77_add_short_value.py",
"copies": "1",
"size": "2848",
"license": "bsd-3-clause",
"hash": 7623959289049182000,
"line_mean": 25.8679245283,
"line_max": 96,
"alpha_frac": 0.6155196629,
"autogenerated": false,
"ratio": 3.618805590851334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47343252537513336,
"avg_score": null,
"num_lines": null
} |
"""Add signature token column to User
Revision ID: a2472148d2c5
Revises: f6fba869a27c
Create Date: 2019-10-21 13:50:50.046577
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a2472148d2c5'
down_revision = 'f6fba869a27c'
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
'users',
sa.Column(
'signing_secret',
postgresql.UUID(),
nullable=False,
# give every user a 'signing_secret' column with a random UUID
# thanks to the author of https://stackoverflow.com/a/21327318
server_default=sa.text('''
uuid_in(overlay(overlay(md5(random()::text || ':' || clock_timestamp()::text) placing '4' from 13)
placing to_hex(floor(random()*(11-8+1) + 8)::int)::text from 17)::cstring)''')
),
schema='users'
)
op.alter_column('users', 'signing_secret', server_default=None, schema='users')
def downgrade():
op.drop_column('users', 'signing_secret', schema='users')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20191021_1350_a2472148d2c5_add_signature_token_column_to_user.py",
"copies": "1",
"size": "1126",
"license": "mit",
"hash": 8872799590402610000,
"line_mean": 27.8717948718,
"line_max": 114,
"alpha_frac": 0.6287744227,
"autogenerated": false,
"ratio": 3.401812688821752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4530587111521752,
"avg_score": null,
"num_lines": null
} |
"""Add sites
Revision ID: d5cb0d93b970
Revises: 0cc3557c43d0
Create Date: 2017-05-09 11:37:47.244859
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "d5cb0d93b970"
down_revision = "0cc3557c43d0"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"site",
sa.Column("site_id", sa.Integer(), nullable=False),
sa.Column("site_name", sa.String(length=255), nullable=False),
sa.Column("google_group", sa.String(length=255), nullable=False),
sa.Column("consortium_name", sa.String(length=255), nullable=False),
sa.Column("mayolink_client_number", sa.Integer(), nullable=True),
sa.Column("hpo_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["hpo_id"], ["hpo.hpo_id"]),
sa.PrimaryKeyConstraint("site_id"),
sa.UniqueConstraint("google_group"),
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("site")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/d5cb0d93b970_add_sites.py",
"copies": "1",
"size": "1592",
"license": "bsd-3-clause",
"hash": 3011603045257330000,
"line_mean": 26.4482758621,
"line_max": 76,
"alpha_frac": 0.6407035176,
"autogenerated": false,
"ratio": 3.445887445887446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4586590963487446,
"avg_score": null,
"num_lines": null
} |
"""Add size fields to task
Revision ID: 54dff15ea3f9
Revises: 8a4558f255a4
Create Date: 2019-03-18 10:40:13.391823
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '54dff15ea3f9'
down_revision = '69aa59' \
'1d226b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('task',
sa.Column('group_id', sa.String(length=250), nullable=True))
op.add_column('task', sa.Column('height', sa.Integer(), nullable=False,
server_default=sa.schema.DefaultClause("0")))
op.add_column('task', sa.Column('width', sa.Integer(), nullable=False,
server_default=sa.schema.DefaultClause("0")))
op.create_foreign_key('fk_task_group_id', 'task', 'task', ['group_id'],
['id'])
op.add_column('workflow', sa.Column('type', sa.Enum(
'WORKFLOW', 'SYSTEM_TEMPLATE', 'SUB_FLOW', 'USER_TEMPLATE',
name='WorkflowTypeEnumType'), nullable=False))
op.add_column('operation', sa.Column('cssClass', sa.String(length=250),
nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('workflow', 'type')
op.drop_constraint('fk_task_group_id', 'task', type_='foreignkey')
op.drop_column('task', 'width')
op.drop_column('task', 'height')
op.drop_column('task', 'group_id')
op.drop_column('operation', 'cssClass')
# ### end Alembic commands ###
pass
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/54dff15ea3f9_add_size_fields_to_task.py",
"copies": "1",
"size": "1629",
"license": "apache-2.0",
"hash": -8828260686017227000,
"line_mean": 33.6595744681,
"line_max": 78,
"alpha_frac": 0.6163290362,
"autogenerated": false,
"ratio": 3.4367088607594938,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45530378969594937,
"avg_score": null,
"num_lines": null
} |
"""Adds JSON serialization to SQLalchemy ORM objects.
Use the `add_json_converter` function to attach the default JSON Converter to
the SQLAlchemy declarative baseclass for automatich record to dict conversion.
"""
import datetime
import json
import re
JSON_SAFE_TYPES = set([type(None), list, dict, int, float, basestring])
class Converter(object):
def __init__(self, **settings):
self.convert_key = settings.get('key_converter', to_camelcase)
self.str_fallback = settings.get('str_fallback', True)
self.type_converters = {}
if 'type_converters' in settings:
self.type_converters.update(settings['type_converters'])
def __call__(self, record):
"""Returns a dictionary of the mapped ORM attributes.
Attribute types are converted to plain Python types which can be dumped by
JSON serializers.
"""
result = {}
for attr in vars(record):
if attr.startswith('_sa_'):
continue # Do not include SQLAlchemy internal attributes
result[self.convert_key(attr)] = self.convert_value(getattr(record, attr))
return result
def convert_value(self, value):
"""Converts and returns the value using the appropriate type converter.
If no suitable type converter is found and the value is not of a known safe
type, a default conversion to string is performed.
"""
val_type = type(value)
if val_type in self.type_converters:
return self.type_converters[val_type](value)
if self.str_fallback and not isinstance(value, JSON_SAFE_TYPES):
return str(value)
return value
def add_type_converter(self, type_, converter):
"""Adds or replaces an existing converter for the given type."""
self.type_converters[type_] = converter
def add_json_converter(declarative_base, pyramid=False, converter=None):
"""Adds a converters to JSON-ready dictionary and JSON string."""
if converter is None:
converter = DEFAULT_CONVERTER
declarative_base.to_dict = lambda self: converter(self)
declarative_base.to_json = lambda self: json.dumps(self.to_dict())
if pyramid:
declarative_base.__json__ = lambda self, _request: self.to_dict()
return declarative_base
def add_type_converter(type_, converter):
"""Adds or replaces a converter to the default converter."""
DEFAULT_CONVERTER.add_type_converter(type_, converter)
def default_converter():
"""Returs a default JSON-preparer class."""
return Converter(type_converters={
datetime.date: datetime.date.isoformat,
datetime.datetime: datetime.datetime.isoformat})
DEFAULT_CONVERTER = default_converter()
def to_camelcase(varname):
"""Converts a name from lower_with_under naming to camelCase.
The assumption is made that the given variable name is in all lowercase and
uses single underscores to separate words.
"""
def _convert(match):
return match.group(1).upper()
return re.sub('_([a-z])', _convert, varname)
| {
"repo_name": "edelooff/sqla-orm-to-json",
"path": "orm_json/__init__.py",
"copies": "1",
"size": "2920",
"license": "mit",
"hash": -7776028278864968000,
"line_mean": 32.5632183908,
"line_max": 80,
"alpha_frac": 0.7133561644,
"autogenerated": false,
"ratio": 4.016506189821183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229862354221183,
"avg_score": null,
"num_lines": null
} |
"""Add sketch attribute model
Revision ID: fc7bc5c66c63
Revises: c380f6dff0bd
Create Date: 2020-10-23 15:44:16.011715
"""
# revision identifiers, used by Alembic.
revision = 'fc7bc5c66c63'
down_revision = 'c380f6dff0bd'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('attribute',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('ontology', sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('attributevalue',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('attribute_id', sa.Integer(), nullable=True),
sa.Column('value', sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(['attribute_id'], ['attribute.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('attribute')
op.drop_table('attributevalue')
# ### end Alembic commands ###
| {
"repo_name": "google/timesketch",
"path": "timesketch/migrations/versions/fc7bc5c66c63_.py",
"copies": "1",
"size": "1707",
"license": "apache-2.0",
"hash": -1060096485748537600,
"line_mean": 33.8367346939,
"line_max": 66,
"alpha_frac": 0.6596367897,
"autogenerated": false,
"ratio": 3.4346076458752517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45942444355752515,
"avg_score": null,
"num_lines": null
} |
"""Add Skipped enum value
Revision ID: 8b536bc5d716
Revises: 40c86deefd01
Create Date: 2020-04-27 12:12:47.075110
"""
# revision identifiers, used by Alembic.
revision = '8b536bc5d716'
down_revision = '40c86deefd01'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("COMMIT")
op.execute("ALTER TYPE dlstate_enum ADD VALUE 'skipped';")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
### end Alembic commands ###
pass | {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2020-04-27_8b536bc5d716_add_skipped_enum_value.py",
"copies": "1",
"size": "1165",
"license": "bsd-3-clause",
"hash": 8127617468636277000,
"line_mean": 24.9111111111,
"line_max": 65,
"alpha_frac": 0.764806867,
"autogenerated": false,
"ratio": 3.675078864353312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9855501960844266,
"avg_score": 0.0168767541018092,
"num_lines": 45
} |
# Adds locations to the database from the text dump.
#
# Usage: python add-xy-locations.py | psql pokedex
import re
def make_identifier(name):
"""Make a string safe to use as an identifier.
Valid characters are lowercase alphanumerics and "-". This function may
raise ValueError if it can't come up with a suitable identifier.
This function is useful for scripts which add things with names.
"""
if isinstance(name, bytes):
identifier = name.decode('utf-8')
else:
identifier = name
identifier = identifier.lower()
identifier = identifier.replace(u'+', u' plus ')
identifier = re.sub(u'[ _–]+', u'-', identifier)
identifier = re.sub(u"['./;’(),:]", u'', identifier)
identifier = identifier.replace(u'é', u'e')
if identifier.startswith('route-'):
identifier = 'kalos-' + identifier
if not identifier.replace(u"-", u"").isalnum():
raise ValueError(identifier)
return identifier
en = open('rips/text/en/72')
foreign = []
foreign.append(('ja', open('rips/text/ja-kana/72')))
for lang in 'ja-kanji', 'en', 'fr', 'it', 'de', 'es', 'ko':
f = open('rips/text/'+lang+'/72')
foreign.append((lang, f))
print("BEGIN;")
print("UPDATE pokemon_evolution SET location_id = NULL WHERE location_id in (SELECT id FROM locations WHERE region_id = 6);")
print("DELETE FROM location_game_indices WHERE generation_id = 6;")
print("DELETE FROM location_names WHERE location_id IN (SELECT id FROM locations WHERE region_id = 6);")
print("DELETE FROM locations WHERE region_id=6;")
print("SELECT setval('locations_id_seq', max(id)) FROM locations;")
for i, name in enumerate(en):
foreign_names = [(lang, next(iter).strip()) for lang, iter in foreign]
if i == 0:
continue
if name == '\n':
continue
try:
ident = make_identifier(name.strip())
except ValueError:
continue
print("\echo '%s'" % ident)
if ident not in ('mystery-zone', 'faraway-place'):
print("""INSERT INTO locations (identifier, region_id) VALUES ('%s', %s) RETURNING id;""" % (ident, 6))
for lang, name in foreign_names:
print("""INSERT INTO location_names (location_id, local_language_id, name) SELECT loc.id, lang.id, '%s' FROM locations loc, languages lang WHERE loc.identifier = '%s' AND (loc.region_id is NULL OR loc.region_id = 6) AND lang.identifier = '%s';""" % (name, ident, lang))
print("""INSERT INTO location_game_indices (location_id, generation_id, game_index) SELECT id, %s, %s FROM locations WHERE identifier='%s' AND (region_id is NULL OR region_id = 6);""" % (6, i, ident))
for pokemon_id, location_identifier in (462, 'kalos-route-13'), (470, 'kalos-route-20'), (471, 'frost-cavern'), (476, 'kalos-route-13'):
print("UPDATE pokemon_evolution SET location_id = (SELECT id FROM locations WHERE identifier = '%s') WHERE location_id is NULL AND evolved_species_id = %d;" % (location_identifier, pokemon_id))
print("COMMIT;")
| {
"repo_name": "DaMouse404/pokedex",
"path": "scripts/add-xy-locations.py",
"copies": "5",
"size": "2989",
"license": "mit",
"hash": 7143274909119560000,
"line_mean": 43.5373134328,
"line_max": 281,
"alpha_frac": 0.6544906166,
"autogenerated": false,
"ratio": 3.523022432113341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.667751304871334,
"avg_score": null,
"num_lines": null
} |
"""Adds locking capabilities to payout table, creates new BonusPayout table, and DonationPercent table.
Revision ID: f84430b16b
Revises: 53d03041be56
Create Date: 2014-03-21 16:32:08.622226
"""
# revision identifiers, used by Alembic.
revision = 'f84430b16b'
down_revision = '53d03041be56'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('donation_percent',
sa.Column('user', sa.String(), nullable=False),
sa.Column('perc', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user')
)
op.create_table('bonus_payout',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user', sa.String(), nullable=True),
sa.Column('amount', sa.BigInteger(), nullable=True),
sa.Column('locked', sa.Boolean(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('transaction_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['transaction_id'], ['transaction.txid'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'payout', sa.Column('locked', sa.Boolean(), nullable=True, server_default="FALSE"))
op.add_column(u'payout', sa.Column('perc', sa.Float(), nullable=True, server_default="0"))
op.add_column(u'payout', sa.Column('perc_applied', sa.BigInteger(), nullable=True, server_default="0"))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'payout', 'perc_applied')
op.drop_column(u'payout', 'perc')
op.drop_column(u'payout', 'locked')
op.drop_table('bonus_payout')
op.drop_table('donation_percent')
### end Alembic commands ###
| {
"repo_name": "simplecrypto/simplecoin",
"path": "migrations/versions/f84430b16b_.py",
"copies": "2",
"size": "1740",
"license": "mit",
"hash": -4455709355528389000,
"line_mean": 36.0212765957,
"line_max": 107,
"alpha_frac": 0.6775862069,
"autogenerated": false,
"ratio": 3.3461538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023740053053847,
"avg_score": null,
"num_lines": null
} |
"""Adds LRU cache management and automatic data timeout to Python's Shelf.
Classes:
LRUShelf: A shelf with LRU cache management.
TimeoutShelf: A shelf with automatic data timeout features.
LRUTimeoutShelf: A shelf with LRU cache management and data timeout.
Functions:
open: Open a database file as a persistent dictionary.
"""
from collections import deque
from shelve import Shelf
import sys
from time import time
is_py3 = sys.version_info[0] > 2
DEFAULT_MAXSIZE = 300
DEFAULT_TIMEOUT = 300 # 5 minutes
class _LRUMixin(object):
"""Adds LRU cache management to containers, e.g. :class:`~shelve.Shelf`.
This mixin will keep a container under a given size by discarding the
least recently used items when the container overflows.
.. NOTE::
The queue that keeps track of which keys are the least recently used
is not stored in the container itself. This means that even if the
container is persistent, the LRU queue will not persist with the data.
For this mixin to work well, all dict methods that involve setting a key,
getting a value, or deleting a key need to be routed through this class'
:meth:`__setitem__`, :meth:`__getitem__`, and :meth:`__delitem__`. The
built-in dict class won't do this by default, so it is better to inherit
from UserDict if you want to make a custom dictionary. If you subclass
dict, you might want to also inherit from
:class:`~collections.abc.MutableMapping` so the _LRUMixin will work
properly. Otherwise, you will need to manually code methods such as
``update()``, ``copy()``, ``keys()``, ``values()``, etc. So, it's best to
stick with :class:`~collections.abc.MutableMapping` or
:class:`~collections.UserDict` if possible.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU size management for a container.
Keyword arguments:
maxsize: The maximum size the container should be. Defaults to
module-level DEFAULT_MAXSIZE.
"""
self.maxsize = kwargs.get('maxsize', DEFAULT_MAXSIZE)
if 'maxsize' in kwargs:
del kwargs['maxsize']
if self.maxsize is None:
raise TypeError("maxsize must be a non-negative integer")
super(_LRUMixin, self).__init__(*args, **kwargs)
self._queue = deque() # create a queue of keys
for key in list(self.keys()): # populate queue with existing keys
self._remove_add_key(key)
def _remove_add_key(self, key):
"""Move a key to the end of the linked list and discard old entries."""
if not hasattr(self, '_queue'):
return # haven't initialized yet, so don't bother
if key in self._queue:
self._queue.remove(key)
self._queue.append(key)
if self.maxsize == 0:
return
while len(self._queue) > self.maxsize:
del self[self._queue[0]]
def __getitem__(self, key):
value = super(_LRUMixin, self).__getitem__(key)
self._remove_add_key(key)
return value
def __setitem__(self, key, value):
super(_LRUMixin, self).__setitem__(key, value)
self._remove_add_key(key)
def __delitem__(self, key):
super(_LRUMixin, self).__delitem__(key)
if hasattr(self, '_queue'):
self._queue.remove(key)
class _TimeoutMixin(object):
"""A mixin that adds automatic data timeout to mapping containers.
If you try to access an expired key, a KeyError will be raised, just like
when you try to access a non-existent key.
For this mixin to work well, all dict methods that involve setting a key,
getting a value, deleting a key, iterating over the container, or getting
the length or formal representation need to be routed through this class'
:meth:`__setitem__`, :meth:`__getitem__`, :meth:`__delitem__`,
:meth:`__iter__`, :meth:`__len__`, and :meth:`__repr__`. The built-in dict
class won't do this by default, so it is better to inherit from
:class:`~collections.UserDict` if you want to make a custom dictionary. If
you subclass dict, you might want to also inherit from
:class:`~collections.abc.MutableMapping` so the _TimeoutMixin will work
properly. Otherwise, you will need to manually code methods such as
``update()``, ``copy()``, ``keys()``, ``values()``, etc. So, it's
best to stick with :class:`~collections.abc.MutableMapping` or
:class:`~collections.UserDict` if possible.
Attributes:
timeout: The default timeout value in seconds.
A zero means that keys won't timeout by default.
_index: The timeout index mapping (maps keys to timeout values).
_INDEX: The key name used for the timeout index.
"""
#: The timeout index key name. This key is considered protected and access
#: to it is blocked.
_INDEX = 'f1dd04ff3d4d9adfabd43a3f9fda9b4b78302b21'
def __init__(self, *args, **kwargs):
"""Initialize the timeout features of the mapping container.
After calling the base class' __init__() method, the timeout index
is read from the container or created if it doesn't exist. Then, any
existing expired values are deleted.
Keyword arguments:
timeout: The default timeout value in seconds to use. If
not present, the module-level constant timeout value
is used.
"""
self.timeout = kwargs.get('timeout', DEFAULT_TIMEOUT)
if 'timeout' in kwargs:
del kwargs['timeout']
if self.timeout is None:
raise TypeError("timeout must be a non-negative integer")
super(_TimeoutMixin, self).__init__(*args, **kwargs)
try:
self._index = super(_TimeoutMixin, self).__getitem__(self._INDEX)
except KeyError:
self._index = {}
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
else:
for key in self:
pass # Force keys to expire using __iter__().
def _is_expired(self, key):
"""Check if a key is expired. If so, delete the key."""
if not hasattr(self, '_index'):
return False # haven't initalized yet, so don't bother
try:
timeout = self._index[key]
except KeyError:
if self.timeout:
self._index[key] = int(time() + self.timeout)
else:
self._index[key] = None
return False
if timeout is None or timeout >= time():
return False
del self[key] # key expired, so delete it from container
return True
def __getitem__(self, key):
if key == self._INDEX:
raise KeyError("cannot access protected key '%s'" % self._INDEX)
try:
if not self._is_expired(key):
return super(_TimeoutMixin, self).__getitem__(key)
except KeyError:
pass
raise KeyError(key)
def set(self, key, func, *args, **kwargs):
"""Return key's value if it exists, otherwise call given function.
:param key: The key to lookup/set.
:param func: A function to use if the key doesn't exist.
All other arguments and keyword arguments are passed to *func*.
"""
if key in self:
return self[key]
self[key] = value = func(*args, **kwargs)
return value
def settimeout(self, key, value, timeout):
"""Set a key with a timeout value (in seconds).
:meth:`settimeout` is used to override the shelf's timeout value.
:param timeout: The timeout value in seconds for the given key.
``0`` means that the key will never expire.
:type timeout: integer
"""
self[key] = value
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
self._index[key] = int(time() + timeout) if timeout else None
def __setitem__(self, key, value):
if key == self._INDEX:
raise TypeError("reserved key name '%s'" % self._INDEX)
super(_TimeoutMixin, self).__setitem__(key, value)
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
self._index[key] = int(time() + self.timeout) if self.timeout else None
def __delitem__(self, key):
if key == self._INDEX:
raise KeyError("cannot delete protected key '%s'" % self._INDEX)
super(_TimeoutMixin, self).__delitem__(key)
if not hasattr(self, '_index'):
return # don't update index if __init__ hasn't completed
del self._index[key]
def __iter__(self):
for key in super(_TimeoutMixin, self).__iter__():
if key == self._INDEX:
continue
if not self._is_expired(key):
yield key
def __contains__(self, key):
"""Hide the timeout index from __contains__."""
if key == self._INDEX:
return False
return super(_TimeoutMixin, self).__contains__(key)
def __len__(self):
"""Hide the timeout index from the object's length."""
return super(_TimeoutMixin, self).__len__() - 1
def __repr__(self):
"""Remove the timeout index from the object representation."""
for key in self: # delete expired data via __iter__()
pass
super(_TimeoutMixin, self).__delitem__(self._INDEX) # hide the index
_repr = super(_TimeoutMixin, self).__repr__()
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
return _repr
def sync(self):
"""Sync the timeout index entry with the shelf."""
if self.writeback and self.cache:
super(_TimeoutMixin, self).__delitem__(self._INDEX)
super(_TimeoutMixin, self).sync()
self.writeback = False
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
self.writeback = True
if hasattr(self.dict, 'sync'):
self.dict.sync()
def __del__(self):
"""Sync timeout index when object is deleted."""
super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index)
super(_TimeoutMixin, self).__del__()
def __exit__(self, *exc_info):
"""Sync timeout index on exit."""
self.sync()
super(_TimeoutMixin, self).__exit__(*exc_info)
class _NewOldMixin(object):
"""Makes certain dict methods follow MRO to the container."""
def __init__(self, *args, **kwargs):
self._class = kwargs.pop('old_class')
self._class.__init__(self, *args, **kwargs)
def __getitem__(self, key):
return self._class.__getitem__(self, key)
def __setitem__(self, key, value):
return self._class.__setitem__(self, key, value)
def __delitem__(self, key):
return self._class.__delitem__(self, key)
def __iter__(self):
return self._class.__iter__(self)
def __len__(self):
return self._class.__len__(self)
class LRUShelf(_LRUMixin, _NewOldMixin, Shelf):
"""A :class:`~shelve.Shelf` with LRU cache management.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU cache management.
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced.
:type maxsize: integer
"""
super(LRUShelf, self).__init__(*args, old_class=Shelf, **kwargs)
class TimeoutShelf(_TimeoutMixin, _NewOldMixin, Shelf):
"""A :class:`~shelve.Shelf` with automatic data timeout.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize the data timeout index.
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires.
:type timeout: integer
"""
super(TimeoutShelf, self).__init__(*args, old_class=Shelf, **kwargs)
if not is_py3:
def keys(self):
"""Override :meth:`~shelve.Shelf.keys` to hide timeout index.
This also removes expired keys.
"""
_keys = self.dict.keys()
if self._INDEX in _keys:
_keys.remove(self._INDEX)
keys = []
for key in _keys:
if not self._is_expired(key):
keys.append(key)
return keys
class LRUTimeoutShelf(_LRUMixin, TimeoutShelf):
"""A :class:`~shelve.Shelf` with LRU cache management and data timeout.
.. NOTE::
The *keyencoding* keyword argument is only used in Python 3.
"""
def __init__(self, *args, **kwargs):
"""Initialize LRU cache management and data timeout index.
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced.
:type maxsize: integer
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires.
:type timeout: integer
"""
super(LRUTimeoutShelf, self).__init__(*args, **kwargs)
def open(filename, flag='c', protocol=None, writeback=False,
maxsize=DEFAULT_MAXSIZE, timeout=DEFAULT_TIMEOUT):
"""Open a database file as a persistent dictionary.
The persistent dictionary file is opened using :func:`dbm.open`, so
performance will depend on which :mod:`dbm` modules are installed.
:func:`open` chooses to open a :class:`Shelf <shelve.Shelf>`,
:class:`LRUShelf`, :class:`TimeoutShelf`, or :class:`LRUTimeoutShelf`
depending on the values of keyword arguments *maxsize* and *timeout*.
A :data:`None` value for *maxsize* and *timeout* will disable the LRU
cache management and automatic data timeout features respectively.
:param filename: The base filename for the underlying database that is
passed to :func:`dbm.open`.
:param flag: The flag to pass to :func:`dbm.open`.
:param protocol: The pickle protocol to pass to :func:`pickle.dump`.
:param writeback: Whether or not to write back all accessed entries on
:meth:`Shelf.sync <shelve.Shelf.sync>` and
:meth:`Shelf.close <shelve.Shelf.close>`
:type writeback: bool
:param maxsize: The maximum size the container is allowed to grow to.
``0`` means that no size limit is enforced. :data:`None` means that
LRU cache management is disabled.
:type maxsize: integer or :data:`None`
:param timeout: The default timeout value for data (in seconds). ``0``
means that the data never expires. :data:`None` means that automatic
timeout features will be disabled.
:type timeout: integer or :data:`None`
:return: A shelf
:rtype: :class:`~shelve.Shelf`, :class:`LRUShelf`, :class:`TimeoutShelf`,
or :class:`LRUTimeoutShelf`
"""
import dbm
dict = dbm.open(filename, flag)
if maxsize is None and timeout is None:
return Shelf(dict, protocol, writeback)
elif maxsize is None:
return TimeoutShelf(dict, protocol, writeback, timeout=timeout)
elif timeout is None:
return LRUShelf(dict, protocol, writeback, maxsize=maxsize)
return LRUTimeoutShelf(dict, protocol, writeback, timeout=timeout,
maxsize=maxsize)
| {
"repo_name": "tsroten/ticktock",
"path": "ticktock.py",
"copies": "1",
"size": "15620",
"license": "mit",
"hash": 923405044000628700,
"line_mean": 36.6385542169,
"line_max": 79,
"alpha_frac": 0.6101152369,
"autogenerated": false,
"ratio": 4.1843021698365925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 415
} |
"""Add slugs to events.
Revision ID: 177a65486a0
Revises: 32dc1be3b22
Create Date: 2015-04-27 04:40:15.594924
"""
# revision identifiers, used by Alembic.
revision = '177a65486a0'
down_revision = '32dc1be3b22'
from alembic import op
from slugify import slugify
import sqlalchemy as sa
from sqlalchemy import Integer, String
from sqlalchemy.sql import table, column
events_table = table(
'events',
column('id', Integer),
column('name', String),
column('slug', String),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('slug', sa.String(length=75), nullable=True))
op.create_unique_constraint('events_slug_key', 'events', ['slug'])
### end Alembic commands ###
conn = op.get_bind()
events = conn.execute(events_table.select())
for event in events:
if not event.slug:
op.execute(
events_table.update().where(
events_table.c.id == event.id
).values(
slug=slugify(event.name)
)
)
op.alter_column('events', sa.Column('slug', sa.String(length=75), nullable=False))
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('events_slug_key', 'events', type_='unique')
op.drop_column('events', 'slug')
### end Alembic commands ###
| {
"repo_name": "djds23/pygotham-1",
"path": "migrations/versions/177a65486a0_.py",
"copies": "3",
"size": "1418",
"license": "bsd-3-clause",
"hash": 3136896212389005000,
"line_mean": 26.2692307692,
"line_max": 86,
"alpha_frac": 0.6248236953,
"autogenerated": false,
"ratio": 3.598984771573604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5723808466873603,
"avg_score": null,
"num_lines": null
} |
"""Adds metric counter"""
from baseCmd import *
from baseResponse import *
class createCounterCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""Name of the counter."""
"""Required"""
self.name = None
self.typeInfo['name'] = 'string'
"""Source of the counter."""
"""Required"""
self.source = None
self.typeInfo['source'] = 'string'
"""Value of the counter e.g. oid in case of snmp."""
"""Required"""
self.value = None
self.typeInfo['value'] = 'string'
self.required = ["name", "source", "value", ]
class createCounterResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the id of the Counter"""
self.id = None
self.typeInfo['id'] = 'string'
"""Name of the counter."""
self.name = None
self.typeInfo['name'] = 'string'
"""Source of the counter."""
self.source = None
self.typeInfo['source'] = 'string'
"""Value in case of snmp or other specific counters."""
self.value = None
self.typeInfo['value'] = 'string'
"""zone id of counter"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/createCounter.py",
"copies": "1",
"size": "1275",
"license": "apache-2.0",
"hash": -3723135960603883000,
"line_mean": 27.3333333333,
"line_max": 63,
"alpha_frac": 0.5364705882,
"autogenerated": false,
"ratio": 4.0476190476190474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9972978524707936,
"avg_score": 0.022222222222222223,
"num_lines": 45
} |
"""Adds missing 'niceToHaveRequirements' to old published Brief data blobs.
Revision ID: 990
Revises: 980
Create Date: 2017-09-05 17:08:57.947569
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '990'
down_revision = '980'
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('data', sa.JSON, nullable=True),
sa.Column('published_at', sa.DateTime, nullable=True)
)
def upgrade():
conn = op.get_bind()
# SELECT id, data FROM briefs WHERE briefs.published_at IS NOT null
query = briefs_table.select(
briefs_table.c.published_at != sa.null()
).with_only_columns(
briefs_table.c.id,
briefs_table.c.data
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if 'niceToHaveRequirements' not in brief_data:
brief_data['niceToHaveRequirements'] = []
# UPDATE briefs SET data = brief_data WHERE id = brief_id;
query = briefs_table.update().where(briefs_table.c.id == brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/990_add_missing_nice_to_have_requirements_field.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": 4541472595939958000,
"line_mean": 25.8888888889,
"line_max": 102,
"alpha_frac": 0.652892562,
"autogenerated": false,
"ratio": 3.201058201058201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9352872014406637,
"avg_score": 0.00021574973031283708,
"num_lines": 45
} |
"""Add SnapshotImage
Revision ID: 30c86cc72856
Revises: 17d61d9621cb
Create Date: 2014-08-13 16:24:07.482438
"""
# revision identifiers, used by Alembic.
revision = '30c86cc72856'
down_revision = '17d61d9621cb'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'snapshot_image',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('snapshot_id', sa.GUID(), nullable=False),
sa.Column('plan_id', sa.GUID(), nullable=False),
sa.Column('job_id', sa.GUID(), nullable=True, unique=True),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['plan_id'], ['plan.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['snapshot_id'], ['snapshot.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('snapshot_id', 'plan_id', name='unq_snapshotimage_plan')
)
op.create_unique_constraint(None, 'snapshot', ['build_id'])
def downgrade():
op.drop_constraint(None, 'snapshot')
op.drop_table('snapshot_image')
| {
"repo_name": "dropbox/changes",
"path": "migrations/versions/30c86cc72856_add_snapshotimage.py",
"copies": "3",
"size": "1228",
"license": "apache-2.0",
"hash": -3579563337268420000,
"line_mean": 31.3157894737,
"line_max": 86,
"alpha_frac": 0.654723127,
"autogenerated": false,
"ratio": 3.449438202247191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5604161329247191,
"avg_score": null,
"num_lines": null
} |
"""add snap_status table
Revision ID: fb372bb87c37
Revises: 64c7a405e4c8
Create Date: 2019-03-19 22:32:13.745440+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'fb372bb87c37'
down_revision = '64c7a405e4c8'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('snap_status',
sa.Column('time', sa.BigInteger(), nullable=False),
sa.Column('hostname', sa.String(), nullable=False),
sa.Column('node', sa.Integer(), nullable=True),
sa.Column('snap_loc_num', sa.Integer(), nullable=True),
sa.Column('serial_number', sa.String(), nullable=True),
sa.Column('psu_alert', sa.Boolean(), nullable=True),
sa.Column('pps_count', sa.BigInteger(), nullable=True),
sa.Column('fpga_temp', sa.Float(), nullable=True),
sa.Column('uptime_cycles', sa.BigInteger(), nullable=True),
sa.Column('last_programmed_time', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('time', 'hostname')
)
def downgrade():
op.drop_table('snap_status')
| {
"repo_name": "HERA-Team/Monitor_and_Control",
"path": "alembic/versions/fb372bb87c37_add_snap_status_table.py",
"copies": "2",
"size": "1294",
"license": "bsd-2-clause",
"hash": 8414598632374373000,
"line_mean": 34.9444444444,
"line_max": 86,
"alpha_frac": 0.5927357032,
"autogenerated": false,
"ratio": 3.729106628242075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5321842331442075,
"avg_score": null,
"num_lines": null
} |
# Adds new customers to the customer database
from faker import Faker
import openpyxl
def add_customer(n):
"""Creates dummy customers.
Returns:
list. List of dictionaries containing customer info.
"""
f = Faker()
num_cust = n
customers = []
for customer in range(1, num_cust + 1):
cust_data = {'first_name': f.first_name(), 'last_name': f.last_name(),
'phone': f.phone_number(), 'email': f.email(),
'date': f.date_time_this_year()}
customers.append(cust_data)
return customers
def update_worksheet(customer_list):
"""Adds customer(s) to the database.
Attributes:
customer_list: list. A list of dictionaries containing customer info.
"""
# Write customer details to a database
wb = openpyxl.load_workbook('customer_data-edited-dates.xlsx')
sheet = wb.active
for row in range(len(customer_list)):
# add 2 to row to account for header row and zero based indexing.
sheet.cell(row=row + 2, column=1).value = \
customer_list[row]['first_name']
sheet.cell(row=row + 2, column=2).value = \
customer_list[row]['last_name']
sheet.cell(row=row + 2, column=3).value = \
customer_list[row]['phone']
sheet.cell(row=row + 2, column=4).value = \
customer_list[row]['email']
sheet.cell(row=row + 2, column=5).value = \
customer_list[row]['date']
wb.save('customer_data-edited-dates.xlsx')
if __name__ == "__main__":
update_worksheet(add_customer(100))
| {
"repo_name": "terrameijar/excel-extractor",
"path": "add_customer.py",
"copies": "1",
"size": "1596",
"license": "mit",
"hash": -5432519560929418000,
"line_mean": 30.2941176471,
"line_max": 78,
"alpha_frac": 0.5958646617,
"autogenerated": false,
"ratio": 3.6108597285067874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9703093308144333,
"avg_score": 0.0007262164124909223,
"num_lines": 51
} |
"""add soft delete feature table
Revision ID: af4279aa1f64
Revises: fa2cd0ecc30f
Create Date: 2019-05-13 11:47:29.388502
"""
# revision identifiers, used by Alembic.
revision = 'af4279aa1f64'
down_revision = 'fa2cd0ecc30f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic ###
op.create_table('deleted_features',
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('deleted_on', postgresql.TIMESTAMP(), nullable=True),
sa.Column('deleted_by', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['deleted_by'], ['employees.username'], ),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
op.drop_constraint('toggles_feature_fkey', 'toggles', type_='foreignkey')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic ###
op.create_foreign_key('toggles_feature_fkey', 'toggles', 'features', ['feature'], ['name'])
op.drop_table('deleted_features')
# ### end Alembic commands ###
| {
"repo_name": "CanopyTax/toggle-meister",
"path": "migrations/versions/af4279aa1f64_add_soft_delete_feature_table.py",
"copies": "1",
"size": "1256",
"license": "apache-2.0",
"hash": 4289506201621261000,
"line_mean": 32.0526315789,
"line_max": 95,
"alpha_frac": 0.6281847134,
"autogenerated": false,
"ratio": 3.7604790419161676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869951068087917,
"avg_score": 0.0037425374456503168,
"num_lines": 38
} |
#Add Solver
solver = self.add('solver',BroydenSolver())
solver.itmax = 50 #max iterations
solver.tol = .001
#Add Parameters and Constraints
solver.add_parameter('compress.W_in',low=-1e15,high=1e15)
solver.add_parameter('compress.c2_PR_des', low=-1e15, high=1e15)
solver.add_parameter(['compress.Ts_tube','flow_limit.Ts_tube',
'tube_wall_temp.temp_boundary'], low=-1e-15, high=1e15)
solver.add_parameter(['flow_limit.radius_tube', 'pod.radius_tube_inner']
, low=-1e15, high=1e15)
solver.add_constraint('.01*(compress.W_in-flow_limit.W_excess) = 0')
solver.add_constraint('compress.Ps_bearing_residual=0')
solver.add_constraint('tube_wall_temp.ss_temp_residual=0')
solver.add_constraint('.01*(pod.area_compressor_bypass-compress.area_c1_out)=0')
driver = self.driver
driver.workflow.add('solver')
driver.recorders = [CSVCaseRecorder(filename="hyperloop.csv")]#record only converged
driver.printvars = ['Mach_bypass', 'Mach_pod_max', 'Mach_c1_in',
'c1_PR_des', 'pod.radius_inlet_back_outer',
'pod.inlet.radius_back_inner', 'flow_limit.radius_tube',
'compress.W_in', 'compress.c2_PR_des', 'pod.net_force',
'compress.F_net','compress.pwr_req','pod.energy','mission.time',
'compress.speed_max', 'tube_wall_temp.temp_boundary']
#Declare Solver Workflow
solver.workflow.add(['compress','mission','pod','flow_limit','tube_wall_temp']) | {
"repo_name": "kennethdecker/MagnePlane",
"path": "paper/code/example3.py",
"copies": "13",
"size": "1417",
"license": "apache-2.0",
"hash": 9023902364324520000,
"line_mean": 47.8965517241,
"line_max": 84,
"alpha_frac": 0.6908962597,
"autogenerated": false,
"ratio": 2.8511066398390343,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""add some probably good indexes (???)
Revision ID: 6d298e6406f2
Revises: 8fac6e10bdb3
Create Date: 2017-08-14 20:27:49.103672
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6d298e6406f2'
down_revision = '8fac6e10bdb3'
branch_labels = None
depends_on = None
def upgrade():
op.create_index(op.f('ix_accounts_last_delete'), 'accounts', ['last_delete'], unique=False)
op.create_index(op.f('ix_accounts_last_fetch'), 'accounts', ['last_fetch'], unique=False)
op.create_index(op.f('ix_accounts_last_refresh'), 'accounts', ['last_refresh'], unique=False)
op.create_index(op.f('ix_oauth_tokens_account_id'), 'oauth_tokens', ['account_id'], unique=False)
op.create_index(op.f('ix_posts_author_id'), 'posts', ['author_id'], unique=False)
op.create_index(op.f('ix_sessions_account_id'), 'sessions', ['account_id'], unique=False)
def downgrade():
op.drop_index(op.f('ix_sessions_account_id'), table_name='sessions')
op.drop_index(op.f('ix_posts_author_id'), table_name='posts')
op.drop_index(op.f('ix_oauth_tokens_account_id'), table_name='oauth_tokens')
op.drop_index(op.f('ix_accounts_last_refresh'), table_name='accounts')
op.drop_index(op.f('ix_accounts_last_fetch'), table_name='accounts')
op.drop_index(op.f('ix_accounts_last_delete'), table_name='accounts')
| {
"repo_name": "codl/forget",
"path": "migrations/versions/6d298e6406f2_add_some_probably_good_indexes.py",
"copies": "1",
"size": "1368",
"license": "isc",
"hash": -1816445155139426300,
"line_mean": 39.2352941176,
"line_max": 101,
"alpha_frac": 0.692251462,
"autogenerated": false,
"ratio": 2.9044585987261144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4096710060726114,
"avg_score": null,
"num_lines": null
} |
"""Add some stock stuff
Revision ID: afc57bd04606
Revises: 580385482580
Create Date: 2017-02-25 18:59:11.096406
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'afc57bd04606'
down_revision = '580385482580'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('guild', sa.Column('stock_announcement_id', sa.BigInteger(), nullable=True))
op.create_unique_constraint(None, 'stock', ['channel_id'])
# op.add_column('user__stock', sa.Column('crashed', sa.Boolean(), nullable=False))
# op.add_column('user__stock', sa.Column('crashed_at', sa.Float(), nullable=False))
conn = op.get_bind()
conn.execute("""BEGIN;
ALTER TABLE user__stock ADD COLUMN crashed BOOLEAN NOT NULL DEFAULT FALSE;
ALTER TABLE user__stock ADD COLUMN crashed_at FLOAT NOT NULL DEFAULT 0.0;
COMMIT;
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user__stock', 'crashed_at')
op.drop_column('user__stock', 'crashed')
op.drop_constraint(None, 'stock', type_='unique')
op.drop_column('guild', 'stock_announcement_id')
# ### end Alembic commands ###
| {
"repo_name": "MJB47/Jokusoramame",
"path": "migrations/versions/afc57bd04606_add_some_stock_stuff.py",
"copies": "1",
"size": "1289",
"license": "mit",
"hash": -1147986206885681400,
"line_mean": 31.225,
"line_max": 94,
"alpha_frac": 0.6826997673,
"autogenerated": false,
"ratio": 3.330749354005168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4513449121305168,
"avg_score": null,
"num_lines": null
} |
""" Adds organization name to the project search tsv
Revision ID: 4b2b7cde821f
Revises: 15593ff6a15f
Create Date: 2015-11-30 17:21:56.928359
"""
# revision identifiers, used by Alembic.
revision = '4b2b7cde821f'
down_revision = '15593ff6a15f'
from alembic import op
import sqlalchemy as sa
def upgrade():
droptrigger = "DROP TRIGGER IF EXISTS tsvupdate_projects_trigger ON project"
droptriggerfunc = "DROP FUNCTION IF EXISTS project_search_trigger()"
createtriggerfunc = '''
CREATE FUNCTION project_search_trigger() RETURNS trigger AS $$
begin
new.tsv_body :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.status,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.tags,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.name,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.description,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.languages,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.organization_name,'')), 'A');
return new;
end
$$ LANGUAGE plpgsql;
'''
createtrigger = "CREATE TRIGGER tsvupdate_projects_trigger BEFORE INSERT OR UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE project_search_trigger();"
op.execute(droptrigger)
op.execute(droptriggerfunc)
op.execute(createtriggerfunc)
op.execute(createtrigger)
def downgrade():
droptrigger = "DROP TRIGGER IF EXISTS tsvupdate_projects_trigger ON project"
droptriggerfunc = "DROP FUNCTION IF EXISTS project_search_trigger()"
createtriggerfunc = '''
CREATE FUNCTION project_search_trigger() RETURNS trigger AS $$
begin
new.tsv_body :=
setweight(to_tsvector('pg_catalog.english', coalesce(new.status,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.tags,'')), 'A') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.name,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.description,'')), 'B') ||
setweight(to_tsvector('pg_catalog.english', coalesce(new.languages,'')), 'A');
return new;
end
$$ LANGUAGE plpgsql;
'''
createtrigger = "CREATE TRIGGER tsvupdate_projects_trigger BEFORE INSERT OR UPDATE ON project FOR EACH ROW EXECUTE PROCEDURE project_search_trigger();"
op.execute(droptrigger)
op.execute(droptriggerfunc)
op.execute(createtriggerfunc)
op.execute(createtrigger)
| {
"repo_name": "smalley/cfapi",
"path": "migrations/versions/4b2b7cde821f_add_org_name_to_search.py",
"copies": "2",
"size": "2641",
"license": "mit",
"hash": -8318985161012931000,
"line_mean": 43.0166666667,
"line_max": 155,
"alpha_frac": 0.6580840591,
"autogenerated": false,
"ratio": 3.4933862433862433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005012389440316781,
"num_lines": 60
} |
"""Add soundcloud upload try logging table
Revision ID: 46085f5c8e36
Revises: 1e3be3ff186d
Create Date: 2017-02-10 11:20:12.308503
"""
# revision identifiers, used by Alembic.
revision = '46085f5c8e36'
down_revision = '1e3be3ff186d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('soundcloud_track_retry',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('soundcloud_track_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['soundcloud_track_id'], ['soundcloud_track.id'], name=op.f('fk_soundcloud_track_retry_soundcloud_track_id_soundcloud_track')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_soundcloud_track_retry'))
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('soundcloud_track_retry')
### end Alembic commands ###
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/46085f5c8e36_add_soundcloud_upload_try_logging_table.py",
"copies": "1",
"size": "1105",
"license": "apache-2.0",
"hash": 1951043937141210000,
"line_mean": 32.4848484848,
"line_max": 155,
"alpha_frac": 0.7049773756,
"autogenerated": false,
"ratio": 3.308383233532934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9473202124248011,
"avg_score": 0.00803169697698447,
"num_lines": 33
} |
"""Add Source
Revision ID: 2c7cbd9b7e54
Revises: 380d20771802
Create Date: 2013-12-17 13:53:19.836264
"""
# revision identifiers, used by Alembic.
revision = '2c7cbd9b7e54'
down_revision = '380d20771802'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'source',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('repository_id', sa.GUID(), nullable=False),
sa.Column('patch_id', sa.GUID(), nullable=True),
sa.Column('revision_sha', sa.String(length=40), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['patch_id'], ['patch.id']),
sa.ForeignKeyConstraint(['repository_id'], ['repository.id']),
sa.PrimaryKeyConstraint('id')
)
op.add_column('build', sa.Column('source_id', sa.GUID(), nullable=True))
op.add_column('buildfamily', sa.Column('source_id', sa.GUID(), nullable=True))
op.create_index('idx_build_source_id', 'build', ['source_id'])
op.create_index('idx_buildfamily_source_id', 'buildfamily', ['source_id'])
def downgrade():
op.drop_index('idx_build_source_id', 'build')
op.drop_index('idx_buildfamily_source_id', 'buildfamily')
op.drop_column('buildfamily', 'source_id')
op.drop_column('build', 'source_id')
op.drop_table('source')
| {
"repo_name": "dropbox/changes",
"path": "migrations/versions/2c7cbd9b7e54_add_source.py",
"copies": "4",
"size": "1349",
"license": "apache-2.0",
"hash": -4569530090052676600,
"line_mean": 32.725,
"line_max": 82,
"alpha_frac": 0.6538176427,
"autogenerated": false,
"ratio": 3.1741176470588237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5827935289758824,
"avg_score": null,
"num_lines": null
} |
# Add spaces to syntax in lines (for easier splitting by white-space)
FORT_TEXT_REPLACEMENTS = {
"DOUBLE PRECISION": "REAL ( KIND ( 0.0D0 ) )",
"END DO": "ENDDO",
"END IF": "ENDIF",
",":" , ",
"(":" ( ",
")":" ) ",
":":" : ",
"+":" + ",
"-":" - ",
"*":" * ",
"/":" / ",
"=":" = ",
}
# Some spaces that were created above were misplaced, so this fixes them.
FORT_TEXT_FIXES = {
"= >":"=>",
}
# Keep all comment lines and all lines from a file that start with the
# following expressions (the file is converted to all upper case).
ACCEPTABLE_LINE_STARTS = {'ABSTRACT', 'CHARACTER', 'END', # 'EXTERNAL',
'INTEGER', 'LOGICAL', 'REAL', 'COMPLEX',
'IMPLICIT', 'INTERFACE', 'MODULE', 'FUNCTION',
'OPTIONAL', 'PRIVATE', 'PROCEDURE',
'PUBLIC', 'PURE', 'RECURSIVE',
'SUBROUTINE', 'TYPE', 'USE'}
LINE_STARTS_TO_REMOVE = {'PURE', 'RECURSIVE'}
# Immediately exclude a file from automatic compilation if it has a
# line starting with the following.
IMMEDIATELY_EXCLUDE = {"PROGRAM"}
please_report_to = f"\nIf this is syntactically correct Fortran, please report this\n"+\
f"as an issue with the relevant Fortran code at\n\n"+\
f" https://github.com/tchlux/fmodpy\n"
from .util import after_dot, before_dot, class_name, \
legal_module_name, simplify_fortran_file, pop_group, \
wrap_long_lines
# --------------------------------------------------------------------
# Define a function for parsing an Interface. Returns a list of instances
def parse_interface(list_of_lines, comments, parent):
from .interface import Interface
return parse_code(Interface, list_of_lines, comments, parent)
# Define a function for parsing a Module. Returns a list of instances
def parse_module(list_of_lines, comments, parent):
from .module import Module
return parse_code(Module, list_of_lines, comments, parent)
# Define a function for parsing a Subroutine. Returns a list of instances
def parse_subroutine(list_of_lines, comments, parent):
from .subroutine import Subroutine
return parse_code(Subroutine, list_of_lines, comments, parent)
# Define a function for parsing a Function. Returns a list of instances
def parse_function(list_of_lines, comments, parent):
from .function import Function
return parse_code(Function, list_of_lines, comments, parent)
# Define a function for parsing a Type. Returns a list of instances
def parse_type(list_of_lines, comments, parent):
from .type import TypeDeclaration
return parse_code(TypeDeclaration, list_of_lines, comments, parent)
# Parse a PUBLIC line, identifying routines that are public.
def parse_public(list_of_lines, comments, parent, keyword="PUBLIC"):
# Skip if there are no lines to process.
if (len(list_of_lines) == 0): return []
line = list_of_lines[0].strip().split()
# Remove any comments on the line that may exist.
if ("!" in line): line = line[:line.index("!")]
# Skip empty lines.
if (len(line) == 0):
list_of_lines.pop(0)
return []
# Check to see if the first element is 'PUBLIC'.
if (line[0] == keyword):
# If there is only the word 'PUBLIC', then it is a status.
if (len(line) == 1):
parent.status = keyword
# Remove this line from the list (because it has been parsed).
list_of_lines.pop(0)
return []
# Strip out the double colon if it exists (it's optional).
if (''.join(line[1:3]) == "::"): line = line[3:]
# Remove all commas from the line.
commas = [i for i in range(len(line)) if (line[i] == ",")]
for i in reversed(commas): line.pop(i)
# Remove this line from the list (because it has been parsed).
list_of_lines.pop(0)
# Only variable and function names remain, they are the instances.
return line
# Otherwise, no 'PUBLIC' line found, return empty list.
return []
# Parse a PRIVATE line, identifying routines that are private.
def parse_private(list_of_lines, comments, parent):
return parse_public(list_of_lines, comments, parent, keyword="PRIVATE")
# Parse a USE line, return a string in a list.
def parse_use(list_of_lines, comments, parent):
# Skip if there are no lines to process.
if (len(list_of_lines) == 0): return []
line = list_of_lines[0].strip().split()
# Skip empty lines.
if (len(line) == 0): return []
# Check to see if the first element is "USE".
# If it is, then return a list with one string (the line).
if (line[0] == "USE"): return [list_of_lines.pop(0).replace(": :","::")]
# Otherwise, no USE line found, return empty list.
else: return []
# Parse a line with IMPLICIT NONE, return string in a list.
def parse_implicit(list_of_lines, comments, parent):
# Skip if there are no lines to process.
if (len(list_of_lines) == 0): return []
line = list_of_lines[0].strip().split()
# Skip empty lines.
if (len(line) == 0): return []
# Check to see if the first element is "IMPLICIT".
# If it is, then return a list with one string (the line).
if (line[0] == "IMPLICIT"):
if (len(line) > 1):
if (line[1] == "NONE"): return [list_of_lines.pop(0)]
else:
from fmodpy.exceptions import ParseError
raise(ParseError("Found phrase 'IMPLICIT' but it was not followed by 'NONE'."))
# Otherwise, no IMPLICIT line found, return empty list.
else: return []
# Only initialize a "code" object if the line is not empty and the
# type is matched (after removing any prefixes).
def parse_code(code, list_of_lines, comments, parent):
# Skip if there are no lines to process.
if (len(list_of_lines) == 0): return []
line = list_of_lines[0].strip().split()
# Skip empty lines.
if (len(line) == 0): return []
# Remove a prefix if it is at the front of the line.
for p in code.prefixes:
if (line[0] == p):
line = line[1:]
break
# Check for an empty line (this shouldn't happen).
if (len(line) == 0):
import warnings
text = f"\nAn enexpected thing just happened when parsing.\n"+\
f"After removing {class_name(code)} prefix '{p}', the line was empty.\n"
warnings.warn(text+please_report_to)
# Check for a match, if it matches complete instance initialization.
elif (line[0] == code.type):
parsed_code = code(list_of_lines, comments, parent)
if (parsed_code.lines > 0): return [parsed_code]
else: return []
# No objects were found, return empty list of instances.
return []
# Given a list of strings that represent lines of a file, determine if
# it is a recognizable declaration. If so, define Argument object(s),
# return the list of Argument(s).
def parse_argument(list_of_lines, comments, parent):
line = list_of_lines[0].strip().split()
success = False
if (len(line) == 0): return []
# Try out all possible declarations.
from .real import Real
from .integer import Integer
from .logical import Logical
from .complex import Complex
from .type import TypeArgument
for arg_type in [Real, Integer, Logical, Complex, TypeArgument]:
if (line[0] == arg_type.type):
success = True
break
else: return []
# If an argument type was identified, then finish parsing.
double_colon = [i for i in range(len(line)-1)
if line[i] == line[i+1] == ":"]
# If there is no "::", then variable names will immediately follow type(kind).
# TODO: This might be a parsing error, or a FortranLanguageError.
if (len(double_colon) > 1): raise(NotImplementedError)
elif (len(double_colon) == 1):
# There is a double colon in this line.
double_colon = double_colon[0]
base = line[:double_colon]
tail = line[double_colon+2:]
else:
# If there is a KIND, then include that in the base.
if ((len(line) > 2) and (line[1]) == '('):
# Check to see of there is a paranthetical group after
# the arugment type (this would be for a KIND).
kind, tail = pop_group(line[1:], open_with="(", close_with=")")
base = [line[0]] + ["("] + kind + [")"]
else:
base = line[:1]
tail = line[1:]
# Check to make sure there are variable names after the TYPE.
if (len(tail) == 0): raise(NotImplementedError)
# Now we are guaranteed to use this line to define an argument.
list_of_lines.pop(0)
# Get all names from the tail (with their dimensions, if given).
names = [tail.pop(0)]
while (len(tail) > 0):
# First, check to see of there is a paranthetical group after
# the previous argument name (this would be for a size, like
# ARG(10,2), but it could have expresssions inside as well).
group, tail = pop_group(tail, open_with="(", close_with=")")
if (len(group) > 0): names[-1] += " ( "+" ".join(group)+" )"
# If there is a "," it indicates more arguments follow.
elif (tail[0] == ","): tail.pop(0)
# If this is a PARAMETER assignment, there could be an "= value"
# after the name. There will only be other arguments after
# this one if there is a "," in the line. If no "," then stop.
elif (tail[0] == "="):
if (("," in tail) and ("=" in tail[1:])):
tail = tail[tail.index(",")+1:]
else: break
# Finally, if it is not a comma, group, or value, it must be an argument name.
else: names.append(tail.pop(0))
# Cycle the names, converting all into proper Argument objects.
args = []
for n in names:
args.append( arg_type(base[:]) )
args[-1].parent = parent
# Split the dimension out of the name, if it is there.
if "(" in n:
begin_dim = n.index("(")
n, group = n[:begin_dim].strip(), n[begin_dim:].strip().split()
group, _ = pop_group(group)
dimension = [""]
num_open = 0
while (len(group) > 0):
next_value = group.pop(0)
if (next_value == ",") and (num_open == 0):
dimension.append("")
else: dimension[-1] += next_value
if (next_value == "("): num_open += 1
if (next_value == ")"): num_open -= 1
# Overwrite the dimension.
args[-1].dimension = dimension
# Overwrite the name.
args[-1].name = n
# Return the final list of declared arguments, and success.
return args
| {
"repo_name": "tchlux/fmodpy",
"path": "fmodpy/parsing/__init__.py",
"copies": "1",
"size": "10832",
"license": "mit",
"hash": -6512208863947161000,
"line_mean": 42.1553784861,
"line_max": 91,
"alpha_frac": 0.5937038405,
"autogenerated": false,
"ratio": 3.742916378714582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4836620219214582,
"avg_score": null,
"num_lines": null
} |
# Add SpamBayes as an option to the autotagger. We like SpamBayes.
#
# We feed the classifier the same terms as go into the search engine,
# which should allow us to actually introspect a bit into the behavior
# of the classifier.
from spambayes.classifier import Classifier
import mailpile.plugins.autotag
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
def _classifier(autotagger):
if not hasattr(autotagger, 'spambayes'):
autotagger.spambayes = Classifier()
return autotagger.spambayes
class SpamBayesTagger(mailpile.plugins.autotag.Trainer):
def should_tag(self, atagger, at_config, msg, keywords):
score, evidence = _classifier(atagger).chi2_spamprob(keywords,
evidence=True)
if score >= 1 - at_config.threshold:
want = True
elif score > at_config.threshold:
want = None
else:
want = False
return (want, score)
class SpamBayesTrainer(mailpile.plugins.autotag.Trainer):
def learn(self, atagger, at_config, msg, keywords, should_tag):
_classifier(atagger).learn(keywords, should_tag)
def reset(self, atagger, at_config):
atagger.spambayes = Classifier()
mailpile.plugins.autotag.TAGGERS['spambayes'] = SpamBayesTagger
mailpile.plugins.autotag.TRAINERS['spambayes'] = SpamBayesTrainer
| {
"repo_name": "jparyani/Mailpile",
"path": "mailpile/plugins/autotag_sb.py",
"copies": "3",
"size": "1411",
"license": "apache-2.0",
"hash": -8560783546862550000,
"line_mean": 32.5952380952,
"line_max": 75,
"alpha_frac": 0.6817859674,
"autogenerated": false,
"ratio": 3.391826923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5573612890476923,
"avg_score": null,
"num_lines": null
} |
"""add spanish columns
Revision ID: ebaea6f9f6a9
Revises: 32df5fa890bf
Create Date: 2018-06-27 13:58:57.650447
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "ebaea6f9f6a9"
down_revision = "32df5fa890bf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("site", sa.Column("notes_es", sa.String(length=1024), nullable=True))
op.add_column("site", sa.Column("schedule_instructions_es", sa.String(length=2048), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("site", "schedule_instructions_es")
op.drop_column("site", "notes_es")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/ebaea6f9f6a9_add_spanish_columns.py",
"copies": "1",
"size": "1254",
"license": "bsd-3-clause",
"hash": 3232245676160092700,
"line_mean": 24.5918367347,
"line_max": 103,
"alpha_frac": 0.6650717703,
"autogenerated": false,
"ratio": 3.389189189189189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4554260959489189,
"avg_score": null,
"num_lines": null
} |
"""Add specified shots."""
from optparse import make_option
import itertools
from django.core.management.base import BaseCommand
from h1ds.models import Shot, Device
def parse_shot_args(args):
individual_shots = []
shot_ranges = []
for arg in args:
if "-" in arg:
shot_ranges.append(map(int, arg.split('-')))
else:
individual_shots.append(int(arg))
collected_shots = [range(i[0], i[1]+1) for i in shot_ranges]
collected_shots.append(individual_shots)
return sorted(set(itertools.chain(*collected_shots)), reverse=True)
class Command(BaseCommand):
args = '<shot_number shot_number ...>'
help = 'Add specified shots.'
option_list = BaseCommand.option_list + (
make_option('-d', '--device',
dest='device',
help='Device to add shot to (use slug string)'
),
)
def handle(self, *args, **options):
if options['device']:
device = Device.objects.get(slug=options['device'])
else:
device = Device.objects.get(is_default=True)
for shot_number in parse_shot_args(args):
shot, created = Shot.objects.get_or_create(number=shot_number, device=device)
if created:
self.stdout.write('Successfully added shot %d' % shot_number)
else:
self.stdout.write('Shot %d exists, ignoring.' % shot_number)
| {
"repo_name": "h1ds/h1ds",
"path": "h1ds/h1ds/management/commands/addshots.py",
"copies": "1",
"size": "1450",
"license": "mit",
"hash": 8328735223455631000,
"line_mean": 34.3658536585,
"line_max": 89,
"alpha_frac": 0.5951724138,
"autogenerated": false,
"ratio": 3.7859007832898173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4881073197089817,
"avg_score": null,
"num_lines": null
} |
"""Adds random forces to the base of Minitaur during the simulation steps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
parentdir = os.path.dirname(os.path.dirname(parentdir))
os.sys.path.insert(0, parentdir)
import math
import numpy as np
from pybullet_envs.minitaur.envs import env_randomizer_base
_PERTURBATION_START_STEP = 100
_PERTURBATION_INTERVAL_STEPS = 200
_PERTURBATION_DURATION_STEPS = 10
_HORIZONTAL_FORCE_UPPER_BOUND = 120
_HORIZONTAL_FORCE_LOWER_BOUND = 240
_VERTICAL_FORCE_UPPER_BOUND = 300
_VERTICAL_FORCE_LOWER_BOUND = 500
class MinitaurPushRandomizer(env_randomizer_base.EnvRandomizerBase):
"""Applies a random impulse to the base of Minitaur."""
def __init__(
self,
perturbation_start_step=_PERTURBATION_START_STEP,
perturbation_interval_steps=_PERTURBATION_INTERVAL_STEPS,
perturbation_duration_steps=_PERTURBATION_DURATION_STEPS,
horizontal_force_bound=None,
vertical_force_bound=None,
):
"""Initializes the randomizer.
Args:
perturbation_start_step: No perturbation force before the env has advanced
this amount of steps.
perturbation_interval_steps: The step interval between applying
perturbation forces.
perturbation_duration_steps: The duration of the perturbation force.
horizontal_force_bound: The lower and upper bound of the applied force
magnitude when projected in the horizontal plane.
vertical_force_bound: The z component (abs value) bound of the applied
perturbation force.
"""
self._perturbation_start_step = perturbation_start_step
self._perturbation_interval_steps = perturbation_interval_steps
self._perturbation_duration_steps = perturbation_duration_steps
self._horizontal_force_bound = (horizontal_force_bound if horizontal_force_bound else
[_HORIZONTAL_FORCE_LOWER_BOUND, _HORIZONTAL_FORCE_UPPER_BOUND])
self._vertical_force_bound = (vertical_force_bound if vertical_force_bound else
[_VERTICAL_FORCE_LOWER_BOUND, _VERTICAL_FORCE_UPPER_BOUND])
def randomize_env(self, env):
"""Randomizes the simulation environment.
Args:
env: The Minitaur gym environment to be randomized.
"""
pass
def randomize_step(self, env):
"""Randomizes simulation steps.
Will be called at every timestep. May add random forces/torques to Minitaur.
Args:
env: The Minitaur gym environment to be randomized.
"""
base_link_ids = env.minitaur.chassis_link_ids
if env.env_step_counter % self._perturbation_interval_steps == 0:
self._applied_link_id = base_link_ids[np.random.randint(0, len(base_link_ids))]
horizontal_force_magnitude = np.random.uniform(self._horizontal_force_bound[0],
self._horizontal_force_bound[1])
theta = np.random.uniform(0, 2 * math.pi)
vertical_force_magnitude = np.random.uniform(self._vertical_force_bound[0],
self._vertical_force_bound[1])
self._applied_force = horizontal_force_magnitude * np.array(
[math.cos(theta), math.sin(theta), 0]) + np.array([0, 0, -vertical_force_magnitude])
if (env.env_step_counter % self._perturbation_interval_steps <
self._perturbation_duration_steps) and (env.env_step_counter >=
self._perturbation_start_step):
env.pybullet_client.applyExternalForce(objectUniqueId=env.minitaur.quadruped,
linkIndex=self._applied_link_id,
forceObj=self._applied_force,
posObj=[0.0, 0.0, 0.0],
flags=env.pybullet_client.LINK_FRAME)
| {
"repo_name": "MadManRises/Madgine",
"path": "shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/minitaur/envs/env_randomizers/minitaur_push_randomizer.py",
"copies": "4",
"size": "4087",
"license": "mit",
"hash": 8416899236588307000,
"line_mean": 43.4239130435,
"line_max": 99,
"alpha_frac": 0.6584291656,
"autogenerated": false,
"ratio": 3.7842592592592594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006557228139888075,
"num_lines": 92
} |
""" Adds registry entries on developer's machine. Emulating what the installer will do. """
import sys
import os
import json
from _winreg import *
def register_tool(tool):
reg_path = "SOFTWARE\\META\\AnalysisTools\\%s\\" % (tool['name'])
reg_key = "InstallLocation"
reg_value = "C:\\sadasfa\\afasf"
with CreateKeyEx(HKEY_LOCAL_MACHINE, reg_path, 0, KEY_ALL_ACCESS) as reg:
SetValueEx(reg, 'InstallLocation', 0, REG_SZ, tool['InstallLocation'])
SetValueEx(reg, 'Version', 0, REG_SZ, tool['version'])
SetValueEx(reg, 'OutputDirectory', 0, REG_SZ, tool['outputDirectory'])
SetValueEx(reg, 'RunCommand', 0, REG_SZ, tool['runCommand'])
SetValueEx(reg, 'RequiredInterpreter', 0, REG_SZ, tool['requiredInterpreter'])
print ' {0} is registered with parameters {1}'.format(tool['name'], tool)
def get_tool(tool_dir):
tools = []
with open(os.path.join(tool_dir, 'analysis_tool.manifest.json'), 'r') as f_p:
manifest = json.load(f_p)
for tool_name in manifest:
tool = {}
tool['InstallLocation'] = os.path.abspath(tool_dir)
tool.update(manifest[tool_name])
tool['name'] = tool_name
tools.append(tool)
return tools
def main():
# get all analysis tools
analysis_tools_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..\\..\\analysis_tools'))
print 'Registering analysis tools from {0}'.format(analysis_tools_dir)
directories = [os.path.join(analysis_tools_dir, name) for name in os.listdir(analysis_tools_dir) if os.path.isdir(os.path.join(analysis_tools_dir, name)) and not name == '.svn']
tools = []
for tool_dir in directories:
tools += get_tool(tool_dir)
for tool in tools:
register_tool(tool)
print 'Analysis tool registration is done.'
if __name__ == '__main__':
sys.exit(main()) | {
"repo_name": "pombredanne/metamorphosys-desktop",
"path": "metamorphosys/META/meta/CyPhyML/add_registry_entries.py",
"copies": "1",
"size": "1969",
"license": "mit",
"hash": 6770759434090367000,
"line_mean": 33.1964285714,
"line_max": 181,
"alpha_frac": 0.6140172676,
"autogenerated": false,
"ratio": 3.547747747747748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643376964700379,
"avg_score": 0.003677610129473988,
"num_lines": 56
} |
# Adds Remote to the Window menu of Motionbuilder. This is used to launch the q_telnet_client subprocess that
# connects to the arduino remote and routes input back to Motionbuilder. It uses subprocess to prevent blocking
# the Motionbuilder interface.
# Also adds Reload Remote to the Window menu. This is allows us to update the MoBoReceiver code without
# restarting Motionbuilder.
import os
import subprocess
import pyfbsdk
import Remote
import iphelper
import Remote.moboreceiver
receiver = None
ip = None
def eventMenu(control, event):
name = event.Name
if name == "Remote":
global receiver, ip
h = iphelper.IPHelper()
if h.exec_():
ip = h.ipAddress
path = os.path.split(__file__)[0]
subprocess.Popen(['cmd.exe', ['/k python.exe', ' "{}"'.format(os.path.join(path, 'q_telnet_client.py')), ' {}'.format(ip)]])
if receiver == None:
receiver = Remote.moboreceiver.MoBoReceiver(ip, 23)
elif name == 'Reload Remote':
reload(Remote.moboreceiver)
receiver = Remote.moboreceiver.MoBoReceiver(ip, 23)
mgr = pyfbsdk.FBMenuManager()
pythonTools = mgr.GetMenu('Window')
pythonTools.OnMenuActivate.Add(eventMenu)
mgr.InsertLast('Window', '') #Separator
mgr.InsertLast('Window', 'Remote')
mgr.InsertLast('Window', 'Reload Remote')
| {
"repo_name": "MHendricks/Motionbuilder-Remote",
"path": "__init__.py",
"copies": "1",
"size": "1257",
"license": "mit",
"hash": 6895680986248828000,
"line_mean": 32.972972973,
"line_max": 127,
"alpha_frac": 0.7350835322,
"autogenerated": false,
"ratio": 3.316622691292876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45517062234928757,
"avg_score": null,
"num_lines": null
} |
"""Adds S3 Image Store"""
from baseCmd import *
from baseResponse import *
class addImageStoreS3Cmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""S3 access key"""
"""Required"""
self.accesskey = None
self.typeInfo['accesskey'] = 'string'
"""Name of the storage bucket"""
"""Required"""
self.bucket = None
self.typeInfo['bucket'] = 'string'
"""S3 endpoint"""
"""Required"""
self.endpoint = None
self.typeInfo['endpoint'] = 'string'
"""S3 secret key"""
"""Required"""
self.secretkey = None
self.typeInfo['secretkey'] = 'string'
"""Connection timeout (milliseconds)"""
self.connectiontimeout = None
self.typeInfo['connectiontimeout'] = 'integer'
"""Connection TTL (milliseconds)"""
self.connectionttl = None
self.typeInfo['connectionttl'] = 'integer'
"""Maximum number of times to retry on error"""
self.maxerrorretry = None
self.typeInfo['maxerrorretry'] = 'integer'
"""Signer Algorithm to use, either S3SignerType or AWSS3V4SignerType"""
self.s3signer = None
self.typeInfo['s3signer'] = 'string'
"""Socket timeout (milliseconds)"""
self.sockettimeout = None
self.typeInfo['sockettimeout'] = 'integer'
"""Use HTTPS instead of HTTP"""
self.usehttps = None
self.typeInfo['usehttps'] = 'boolean'
"""Whether TCP keep-alive is used"""
self.usetcpkeepalive = None
self.typeInfo['usetcpkeepalive'] = 'boolean'
self.required = ["accesskey", "bucket", "endpoint", "secretkey", ]
class addImageStoreS3Response (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the image store"""
self.id = None
self.typeInfo['id'] = 'string'
"""the details of the image store"""
self.details = None
self.typeInfo['details'] = 'set'
"""the name of the image store"""
self.name = None
self.typeInfo['name'] = 'string'
"""the protocol of the image store"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the provider name of the image store"""
self.providername = None
self.typeInfo['providername'] = 'string'
"""the scope of the image store"""
self.scope = None
self.typeInfo['scope'] = 'scopetype'
"""the url of the image store"""
self.url = None
self.typeInfo['url'] = 'string'
"""the Zone ID of the image store"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name of the image store"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addImageStoreS3.py",
"copies": "1",
"size": "2840",
"license": "apache-2.0",
"hash": 559806965150627200,
"line_mean": 33.6341463415,
"line_max": 79,
"alpha_frac": 0.5690140845,
"autogenerated": false,
"ratio": 4.080459770114943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5149473854614942,
"avg_score": null,
"num_lines": null
} |
"""Adds schools to the database
Revision ID: d2a3b14d6a89
Revises: ce7d019512b1
Create Date: 2016-12-13 18:24:45.336983
"""
# revision identifiers, used by Alembic.
revision = 'd2a3b14d6a89'
down_revision = 'ce7d019512b1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from ifc.models import Fraternity, School
Session = sessionmaker()
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('schools',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=120), nullable=False),
sa.Column('short_title', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
# create the initial school for the existing data
bind = op.get_bind()
session = Session(bind=bind)
new_school = School(title="Worcester Polytechnic Institute",
short_title="WPI")
session.add(new_school)
op.add_column(u'fraternities', sa.Column('school_id', sa.Integer()))
op.drop_constraint(u'fraternities_title_key', 'fraternities', type_='unique')
op.create_foreign_key(None, 'fraternities', 'schools', ['school_id'], ['id'])
# update the current fraternities to belong to the only school
for frat in session.query(Fraternity):
frat.school = new_school
session.commit()
op.alter_column('fraternities', 'school_id',
existing_type=sa.Integer(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'fraternities', type_='foreignkey')
op.create_unique_constraint(u'fraternities_title_key', 'fraternities', ['title'])
op.drop_column(u'fraternities', 'school_id')
op.drop_table('schools')
# ### end Alembic commands ###
| {
"repo_name": "Rdbaker/WPI-IFC",
"path": "migrations/versions/d2a3b14d6a89_.py",
"copies": "1",
"size": "1922",
"license": "bsd-3-clause",
"hash": -1754934289529976600,
"line_mean": 30.5081967213,
"line_max": 85,
"alpha_frac": 0.6680541103,
"autogenerated": false,
"ratio": 3.2466216216216215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4414675731921621,
"avg_score": null,
"num_lines": null
} |
"""Adds secondary storage."""
from baseCmd import *
from baseResponse import *
class addSecondaryStorageCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""the URL for the secondary storage"""
"""Required"""
self.url = None
self.typeInfo['url'] = 'string'
"""the Zone ID for the secondary storage"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
self.required = ["url", ]
class addSecondaryStorageResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the image store"""
self.id = None
self.typeInfo['id'] = 'string'
"""the details of the image store"""
self.details = None
self.typeInfo['details'] = 'set'
"""the name of the image store"""
self.name = None
self.typeInfo['name'] = 'string'
"""the protocol of the image store"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the provider name of the image store"""
self.providername = None
self.typeInfo['providername'] = 'string'
"""the scope of the image store"""
self.scope = None
self.typeInfo['scope'] = 'scopetype'
"""the url of the image store"""
self.url = None
self.typeInfo['url'] = 'string'
"""the Zone ID of the image store"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name of the image store"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addSecondaryStorage.py",
"copies": "1",
"size": "1612",
"license": "apache-2.0",
"hash": -115277121848466240,
"line_mean": 30,
"line_max": 51,
"alpha_frac": 0.5595533499,
"autogenerated": false,
"ratio": 4.070707070707071,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5130260420607071,
"avg_score": null,
"num_lines": null
} |
"""Adds share tracking tables for three different time slices.
Revision ID: b1e627b9a0
Revises: 2b5117cc3df6
Create Date: 2014-03-05 21:48:22.753744
"""
# revision identifiers, used by Alembic.
revision = 'b1e627b9a0'
down_revision = '2b5117cc3df6'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('five_minute_share',
sa.Column('user', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'time', 'worker')
)
op.create_table('one_hour_share',
sa.Column('user', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'time', 'worker')
)
op.alter_column(u'one_minute_share', u'minute', new_column_name='time')
op.alter_column(u'one_minute_share', u'shares', new_column_name='value')
op.add_column(u'one_minute_share', sa.Column('worker', sa.String(), nullable=False, default="", server_default=""))
op.drop_constraint('one_minute_share_pkey', 'one_minute_share')
op.create_primary_key('one_minute_share_pkey', 'one_minute_share',
['user', 'time', 'worker'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'one_minute_share', sa.Column('minute', postgresql.TIMESTAMP(), nullable=False))
op.add_column(u'one_minute_share', sa.Column('shares', sa.INTEGER(), nullable=True))
op.drop_column(u'one_minute_share', 'worker')
op.drop_column(u'one_minute_share', 'value')
op.drop_column(u'one_minute_share', 'time')
op.drop_table('one_hour_share')
op.drop_table('five_minute_share')
### end Alembic commands ###
| {
"repo_name": "lae/simplemona",
"path": "migrations/versions/b1e627b9a0_.py",
"copies": "2",
"size": "2074",
"license": "mit",
"hash": 1921019201511098400,
"line_mean": 39.6666666667,
"line_max": 119,
"alpha_frac": 0.668756027,
"autogenerated": false,
"ratio": 3.2816455696202533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9902970100955429,
"avg_score": 0.009486299132964981,
"num_lines": 51
} |
"""Adds some color to the tweets."""
import re
from blessings import Terminal
class Term(object):
"""Static class to store terminal color info."""
@staticmethod
def colors():
"""Returns the colors in use for this terminal."""
if not hasattr(Term, "_colors"):
Term._colors = {}
term = Terminal()
if term.color:
Term._colors["text"] = term.normal
if term.number_of_colors >= 256:
Term._colors["name"] = term.color(35)
Term._colors["url"] = term.color(45)
Term._colors["hashtag"] = term.color(227)
else:
Term._colors["name"] = term.color(4)
Term._colors["url"] = term.color(6)
Term._colors["hashtag"] = term.color(3)
return Term._colors
@staticmethod
def patterns():
"""Returns the patterns used for searching."""
if not hasattr(Term, "_patterns"):
Term._patterns = {}
if Term.colors():
Term._patterns["url"] = re.compile(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+'
)
Term._patterns["name"] = re.compile(r'(^|[^@\w])@(\w{1,15})\b')
Term._patterns["hashtag"] = re.compile(r'(^|[ ])#\w+')
return Term._patterns
def highlight_tweet(tweet):
"""Highlights the tweet with console colors if supported."""
if not Term.colors():
return tweet
return _re_hl(_re_hl(_re_hl(tweet, "name"), "hashtag"), "url")
def _re_hl(tweet, re_name):
"""Highlights the tweet with the color and pattern of name."""
words = []
colors = Term.colors()
patterns = Term.patterns()
last_match = 0
for match in re.finditer(patterns[re_name], tweet):
span = match.span()
bump = int(span[0] != 0) and re_name != "url"
words.append(tweet[last_match:span[0] + bump])
word = "{0}{1}{2}".format(
colors[re_name],
tweet[span[0] + bump:span[1]],
colors["text"],
)
words.append(word)
last_match = span[1]
words.append(tweet[last_match:])
return "".join(words)
| {
"repo_name": "a-tal/pyweet",
"path": "pyweet/colors.py",
"copies": "1",
"size": "2316",
"license": "bsd-3-clause",
"hash": -4254257922012327400,
"line_mean": 29.88,
"line_max": 79,
"alpha_frac": 0.4982728843,
"autogenerated": false,
"ratio": 3.670364500792393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4668637385092393,
"avg_score": null,
"num_lines": null
} |
""" Adds specialized pack patterns to avoid unroutable situations during packing
Given an input architecture, this utility will find all the directs that need
to have a specialized pack pattern and adds it to them.
To find the correct direct that needs to be updated there are two different ways:
1. If the direct belongs to the top level pb_type, the direct can be
checked against a regular expression specified at the beginning of
this file or with a string contained in the direct name.
2. If the direct belongs to an intermediate/leaf pb_type, the port name
and belonging operational `mode` are checked to select the correct
direct that needs update.
Currently IOPADs need specialized pack patterns to enable VTR to create molecules
between the various sites of the tile (e.g. ISERDES, IDELAY, IOB33 and OSERDES).
"""
import lxml.etree as ET
import argparse
import re
import itertools
# Regular Expressions to select the directs that need additional pack patterns.
# Being multiple IOB and IOPAD types, the name of the direct changes according
# to different types, hence a regex is needed.
IOPAD_OLOGIC_OQ_REGEX = re.compile("OLOGICE3.OQ_to_IOB33[MS]?.O")
IOPAD_OLOGIC_TQ_REGEX = re.compile("OLOGICE3.TQ_to_IOB33[MS]?.T")
IOPAD_ILOGIC_REGEX = re.compile("IOB33[MS]?.I_to_ILOGICE3.D")
# =============================================================================
def get_top_pb_type(element):
""" Returns the top level pb_type given a subelement of the XML tree."""
# Already top-level
parent = element.getparent()
if parent is not None and parent.tag == "complexblocklist":
return None
# Traverse
while True:
parent = element.getparent()
if parent is None:
return None
if parent.tag == "complexblocklist":
assert element.tag == "pb_type", element.tag
return element
element = parent
def add_pack_pattern(element, pack_pattern_prefix, for_input=None):
""" Adds the pack pattern to the given direct / mux with a specified
prefix. """
top_pb_type = get_top_pb_type(element)
pack_pattern_name = "{}_{}".format(
pack_pattern_prefix, top_pb_type.attrib['name']
)
if for_input is not None:
assert for_input in element.attrib['input']
pack_pattern_in_port = for_input
else:
pack_pattern_in_port = element.attrib['input']
pack_pattern_out_port = element.attrib['output']
# Check if not already there
for child in element.findall("pack_pattern"):
if child.attrib.get("name", None) == pack_pattern_name:
assert False, (
element.attrib["name"],
pack_pattern_name,
)
ET.SubElement(
element,
'pack_pattern',
attrib={
'name': pack_pattern_name,
'in_port': pack_pattern_in_port,
'out_port': pack_pattern_out_port
}
)
# =============================================================================
def maybe_add_pack_pattern(element, pack_pattern_prefix, list_to_check):
"""
Adds a pack pattern to the element ("direct" or "mux") that spans the
connection only if both of its endpoints are found in the list.
The pack pattern is prefixed with the pack_pattern_prefix and with a name
of the topmost pb_type in the hierarchy.
The list_to_check must contain tuples specifying connection endpoints in
the same way as in the arch.xml ie. "<pb_name>.<port_name>".
"""
# Check if we have a direct under an interconnect inside a mode/pb_type
interconnect = element.getparent()
if interconnect.tag != "interconnect":
return
mode_or_pb_type = interconnect.getparent()
if mode_or_pb_type.tag not in ['mode', 'pb_type']:
return
# A direct connection
if element.tag == "direct":
inp = element.attrib['input']
out = element.attrib['output']
if (inp, out) in list_to_check:
add_pack_pattern(element, pack_pattern_prefix)
# A mux connections (match the input)
elif element.tag == "mux":
ins = element.attrib['input'].split()
out = element.attrib['output']
for inp in ins:
if (inp, out) in list_to_check:
add_pack_pattern(element, pack_pattern_prefix, inp)
# Shouldn't happen
else:
assert False, element.tag
# =============================================================================
def main():
parser = argparse.ArgumentParser(
description="Adds needed pack patterns to the architecture file."
)
parser.add_argument('--in_arch', required=True, help="Input arch.xml")
args = parser.parse_args()
arch_xml = ET.ElementTree()
xml_parser = ET.XMLParser(remove_blank_text=True)
root_element = arch_xml.parse(args.in_arch, xml_parser)
gen = itertools.chain(
root_element.iter('direct'), root_element.iter('mux')
)
for direct in gen:
if 'name' not in direct.attrib:
continue
top_parent = get_top_pb_type(direct)
if top_parent is not None:
top_name = top_parent.attrib["name"]
else:
top_name = ""
dir_name = direct.attrib['name']
#
# OBUFT
#
# Adding OBUFT.TQ via T_INV helper primitive
if IOPAD_OLOGIC_TQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'T_INV_to_OBUFT')
maybe_add_pack_pattern(
direct, 'T_INV_to_OBUFT', [
('T_INV.TO', 'OLOGIC_TFF.TQ'),
('OLOGIC_TFF.TQ', 'OLOGICE3.TQ'),
('IOB33M.T', 'IOB33_MODES.T'),
('IOB33S.T', 'IOB33_MODES.T'),
('IOB33.T', 'IOB33_MODES.T'),
('IOB33_MODES.T', 'OBUFT_VPR.T'),
('OBUFT_VPR.O', 'outpad.outpad'),
]
)
#
# ODDR
#
# Adding ODDR.OQ via OBUF/OBUFT pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'ODDR_to_OBUFT')
maybe_add_pack_pattern(
direct, 'ODDR_to_OBUFT', [
('ODDR_OQ.Q', 'OLOGIC_OFF.OQ'),
('OLOGIC_OFF.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFT_VPR.I'),
('OBUFT_VPR.O', 'outpad.outpad'),
]
)
# Adding ODDR.OQ via OBUF/OBUFT + TQ via T_INV pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'ODDR_to_T_INV_to_OBUFT')
if IOPAD_OLOGIC_TQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'ODDR_to_T_INV_to_OBUFT')
maybe_add_pack_pattern(
direct, 'ODDR_to_T_INV_to_OBUFT', [
('ODDR_OQ.Q', 'OLOGIC_OFF.OQ'),
('OLOGIC_OFF.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFT_VPR.I'),
('OBUFT_VPR.O', 'outpad.outpad'),
('T_INV.TO', 'OLOGIC_TFF.TQ'),
('OLOGIC_TFF.TQ', 'OLOGICE3.TQ'),
('IOB33M.T', 'IOB33_MODES.T'),
('IOB33S.T', 'IOB33_MODES.T'),
('IOB33.T', 'IOB33_MODES.T'),
('IOB33_MODES.T', 'OBUFT_VPR.T'),
]
)
# Adding ODDR.TQ via OBUFT pack patterns
if IOPAD_OLOGIC_TQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'TDDR_to_OBUFT')
maybe_add_pack_pattern(
direct, 'TDDR_to_OBUFT', [
('ODDR_TQ.Q', 'OLOGIC_TFF.TQ'),
('OLOGIC_TFF.TQ', 'OLOGICE3.TQ'),
('IOB33M.T', 'IOB33_MODES.T'),
('IOB33S.T', 'IOB33_MODES.T'),
('IOB33.T', 'IOB33_MODES.T'),
('IOB33_MODES.T', 'OBUFT_VPR.T'),
('OBUFT_VPR.O', 'outpad.outpad'),
]
)
# TODO: "TDDR" via IOBUF, OBUFTDS, IOBUFDS
# TODO: ODDR+"TDDR" via OBUFT, IOBUF, OBUFTDS, IOBUFDS
#
# OSERDES
#
# Adding OSERDES via NO_OBUF pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_NO_OBUF')
maybe_add_pack_pattern(
direct, 'OSERDES_to_NO_OBUF', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
("IOB33_MODES.O", "outpad.outpad"),
]
)
# Adding OSERDES via OBUF/OBUFT pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_OBUF')
maybe_add_pack_pattern(
direct, 'OSERDES_to_OBUF', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFT_VPR.I'),
('OBUFT_VPR.O', 'outpad.outpad'),
]
)
# Adding OSERDES via OBUF/OBUFT + TQ via T_INV pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_T_INV_to_OBUF')
if IOPAD_OLOGIC_TQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_T_INV_to_OBUF')
maybe_add_pack_pattern(
direct, 'OSERDES_to_T_INV_to_OBUF', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFT_VPR.I'),
('OBUFT_VPR.O', 'outpad.outpad'),
('T_INV.TO', 'OLOGICE3.TQ'),
('IOB33M.T', 'IOB33_MODES.T'),
('IOB33S.T', 'IOB33_MODES.T'),
('IOB33.T', 'IOB33_MODES.T'),
('IOB33_MODES.T', 'OBUFT_VPR.T'),
]
)
# Adding OSERDES via IOBUF pack patterns
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_IOBUF')
maybe_add_pack_pattern(
direct, 'OSERDES_to_IOBUF', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33S.O', 'IOB33_MODES.O'),
('IOB33.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'IOBUF_VPR.I'),
('IOBUF_VPR.IOPAD_$out', 'outpad.outpad'),
('inpad.inpad', 'IOBUF_VPR.IOPAD_$inp'),
]
)
# Adding OSERDES via differential OBUFDS/OBUFTDS pack patterns
if "IOPAD_M" in top_name:
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_OBUFDS')
maybe_add_pack_pattern(
direct, 'OSERDES_to_OBUFDS', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFTDS_M_VPR.I'),
('OBUFTDS_M_VPR.O', 'outpad.outpad'),
]
)
# Adding OSERDES via differential OBUFDS/OBUFTDS + TQ via T_INV
# pack patterns
if "IOPAD_M" in top_name:
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_T_INV_to_OBUFDS')
if IOPAD_OLOGIC_TQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_T_INV_to_OBUFDS')
maybe_add_pack_pattern(
direct, 'OSERDES_to_T_INV_to_OBUFDS', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'OBUFTDS_M_VPR.I'),
('OBUFTDS_M_VPR.O', 'outpad.outpad'),
('T_INV.TO', 'OLOGICE3.TQ'),
('IOB33M.T', 'IOB33_MODES.T'),
('IOB33_MODES.T', 'OBUFTDS_M_VPR.T'),
]
)
# Adding OSERDES via differential IOBUFDS pack patterns
if "IOPAD_M" in top_name:
if IOPAD_OLOGIC_OQ_REGEX.match(dir_name):
add_pack_pattern(direct, 'OSERDES_to_IOBUFDS')
maybe_add_pack_pattern(
direct, 'OSERDES_to_IOBUFDS', [
('OSERDESE2.OQ', 'OLOGICE3.OQ'),
('IOB33M.O', 'IOB33_MODES.O'),
('IOB33_MODES.O', 'IOBUFDS_M_VPR.I'),
('IOBUFDS_M_VPR.IOPAD_$out', 'outpad.outpad'),
('inpad.inpad', 'IOBUFDS_M_VPR.IOPAD_$inp'),
]
)
#
# IDDR
#
for use_idelay in [False, True]:
if use_idelay:
name = "IDDR_to_IDELAY"
connections = [('ILOGICE3.DDLY', 'IFF.D')]
else:
name = "IDDR"
connections = [('ILOGICE3.D', 'IFF.D')]
# Adding IDDR via IBUF pack patterns
if not use_idelay and IOPAD_ILOGIC_REGEX.match(dir_name):
add_pack_pattern(direct, name + '_to_IBUF')
if use_idelay and ('I_to_IDELAYE2' in dir_name
or 'DATAOUT_to_ILOGICE3' in dir_name):
add_pack_pattern(direct, name + '_to_IBUF')
maybe_add_pack_pattern(
direct, name + '_to_IBUF', [
('inpad.inpad', 'IBUF_VPR.I'),
('IBUF_VPR.O', 'IOB33_MODES.I'),
('IOB33_MODES.I', 'IOB33.I'),
('IOB33_MODES.I', 'IOB33M.I'),
('IOB33_MODES.I', 'IOB33S.I'),
] + connections
)
# TODO: IDDR via IOBUF, IDDR via IOBUFDS
#
# ISERDES
#
for use_idelay in [False, True]:
if use_idelay:
name = "ISERDESE2_to_IDELAY"
connections = [('ILOGICE3.DDLY', 'ISERDESE2_IDELAY.DDLY')]
else:
name = "ISERDESE2"
connections = [('ILOGICE3.D', 'ISERDESE2_NO_IDELAY.D')]
# Adding ISERDES via NO_IBUF pack patterns
if not use_idelay and IOPAD_ILOGIC_REGEX.match(dir_name):
add_pack_pattern(direct, name + '_to_NO_IBUF')
if use_idelay and ('I_to_IDELAYE2' in dir_name
or 'DATAOUT_to_ILOGICE3' in dir_name):
add_pack_pattern(direct, name + '_to_NO_IBUF')
maybe_add_pack_pattern(
direct, name + '_to_NO_IBUF', [
("inpad.inpad", "IOB33_MODES.I"),
('IOB33_MODES.I', 'IOB33.I'),
('IOB33_MODES.I', 'IOB33M.I'),
('IOB33_MODES.I', 'IOB33S.I'),
] + connections
)
# Adding ISERDES via IBUF pack patterns
if not use_idelay and IOPAD_ILOGIC_REGEX.match(dir_name):
add_pack_pattern(direct, name + '_to_IBUF')
if use_idelay and ('I_to_IDELAYE2' in dir_name
or 'DATAOUT_to_ILOGICE3' in dir_name):
add_pack_pattern(direct, name + '_to_IBUF')
maybe_add_pack_pattern(
direct, name + '_to_IBUF', [
('inpad.inpad', 'IBUF_VPR.I'),
('IBUF_VPR.O', 'IOB33_MODES.I'),
('IOB33_MODES.I', 'IOB33.I'),
('IOB33_MODES.I', 'IOB33M.I'),
('IOB33_MODES.I', 'IOB33S.I'),
] + connections
)
# Adding ISERDES via IOBUF pack patterns
if not use_idelay and IOPAD_ILOGIC_REGEX.match(dir_name):
add_pack_pattern(direct, name + '_to_IOBUF')
if use_idelay and ('I_to_IDELAYE2' in dir_name
or 'DATAOUT_to_ILOGICE3' in dir_name):
add_pack_pattern(direct, name + '_to_IOBUF')
maybe_add_pack_pattern(
direct, name + '_to_IOBUF', [
('IOBUF_VPR.IOPAD_$out', 'outpad.outpad'),
('inpad.inpad', 'IOBUF_VPR.IOPAD_$inp'),
('IOBUF_VPR.O', 'IOB33_MODES.I'),
('IOB33_MODES.I', 'IOB33.I'),
('IOB33_MODES.I', 'IOB33M.I'),
('IOB33_MODES.I', 'IOB33S.I'),
] + connections
)
# Adding ISERDES via differential IOBUFDS pack patterns
if "IOPAD_M" in top_name:
if not use_idelay and IOPAD_ILOGIC_REGEX.match(dir_name):
add_pack_pattern(direct, name + '_to_IOBUFDS')
if use_idelay and ('I_to_IDELAYE2' in dir_name
or 'DATAOUT_to_ILOGICE3' in dir_name):
add_pack_pattern(direct, name + '_to_IOBUFDS')
maybe_add_pack_pattern(
direct, name + '_to_IOBUFDS', [
('IOBUFDS_M_VPR.IOPAD_$out', 'outpad.outpad'),
('inpad.inpad', 'IOBUFDS_M_VPR.IOPAD_$inp'),
('IOBUFDS_M_VPR.O', 'IOB33_MODES.I'),
('IOB33_MODES.I', 'IOB33M.I'),
] + connections
)
#
# IDELAY only
#
# TODO: Need to change sth in the arch.
print(ET.tostring(arch_xml, pretty_print=True).decode('utf-8'))
if __name__ == "__main__":
main()
| {
"repo_name": "SymbiFlow/symbiflow-arch-defs",
"path": "xc/common/utils/add_pack_patterns.py",
"copies": "1",
"size": "17843",
"license": "isc",
"hash": 6266654578773550000,
"line_mean": 34.8293172691,
"line_max": 81,
"alpha_frac": 0.5029983747,
"autogenerated": false,
"ratio": 3.2289178429243575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9231423430456414,
"avg_score": 0.00009855743358861522,
"num_lines": 498
} |
"""Adds SRPD and four datasets
Revision ID: c5e276c0d67f
Revises: 77ad8047becf
Create Date: 2017-01-20 03:58:12.333638
"""
# revision identifiers, used by Alembic.
revision = 'c5e276c0d67f'
down_revision = '77ad8047becf'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'use_of_force_incidents_srpd',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('department_id', sa.Integer(), nullable=False),
sa.Column('opaque_id', sa.String(length=255), nullable=False),
sa.Column('case_number', sa.String(length=128), nullable=True),
sa.Column('file_number', sa.String(length=128), nullable=True),
sa.Column('occurred_date', sa.DateTime(), nullable=True),
sa.Column('team', sa.String(length=255), nullable=True),
sa.Column('assignment', sa.String(length=255), nullable=True),
sa.Column('service_type', sa.String(length=255), nullable=True),
sa.Column('use_of_force_reason', sa.String(length=255), nullable=True),
sa.Column('aggravating_factors', sa.String(length=255), nullable=True),
sa.Column('arrest_made', sa.Boolean(), nullable=True),
sa.Column('resident_injured', sa.Boolean(), nullable=True),
sa.Column('resident_hospitalized', sa.Boolean(), nullable=True),
sa.Column('officer_injured', sa.Boolean(), nullable=True),
sa.Column('officer_hospitalized', sa.Boolean(), nullable=True),
sa.Column('officer_force_type', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'officer_involved_shootings_srpd',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('department_id', sa.Integer(), nullable=False),
sa.Column('opaque_id', sa.String(length=255), nullable=False),
sa.Column('case_number', sa.String(length=128), nullable=True),
sa.Column('occurred_date', sa.DateTime(), nullable=True),
sa.Column('team', sa.String(length=255), nullable=True),
sa.Column('assignment', sa.String(length=255), nullable=True),
sa.Column('resident_race', sa.String(length=255), nullable=True),
sa.Column('resident_sex', sa.String(length=255), nullable=True),
sa.Column('resident_age', sa.String(length=255), nullable=True),
sa.Column('service_type', sa.String(length=255), nullable=True),
sa.Column('officer_weapon_used', sa.String(length=255), nullable=True),
sa.Column('intentional', sa.Boolean(), nullable=True),
sa.Column('resident_condition', sa.String(length=255), nullable=True),
sa.Column('officer_condition', sa.String(length=255), nullable=True),
sa.Column('disposition', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'citizen_complaints_srpd',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('department_id', sa.Integer(), nullable=False),
sa.Column('opaque_id', sa.String(length=255), nullable=False),
sa.Column('case_number', sa.String(length=128), nullable=True),
sa.Column('file_number', sa.String(length=128), nullable=True),
sa.Column('occurred_date', sa.DateTime(), nullable=True),
sa.Column('team', sa.String(length=255), nullable=True),
sa.Column('assignment', sa.String(length=255), nullable=True),
sa.Column('bureau', sa.String(length=255), nullable=True),
sa.Column('division', sa.String(length=255), nullable=True),
sa.Column('resident_race', sa.String(length=255), nullable=True),
sa.Column('resident_sex', sa.String(length=255), nullable=True),
sa.Column('resident_age', sa.String(length=255), nullable=True),
sa.Column('allegation', sa.String(length=255), nullable=True),
sa.Column('disposition', sa.String(length=255), nullable=True),
sa.Column('service_type', sa.String(length=255), nullable=True),
sa.Column('source', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'pursuits_srpd',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('department_id', sa.Integer(), nullable=False),
sa.Column('opaque_id', sa.String(length=255), nullable=False),
sa.Column('case_number', sa.String(length=128), nullable=True),
sa.Column('pursuit_number', sa.String(length=128), nullable=True),
sa.Column('occurred_date', sa.DateTime(), nullable=True),
sa.Column('team', sa.String(length=255), nullable=True),
sa.Column('assignment', sa.String(length=255), nullable=True),
sa.Column('aborted', sa.Boolean(), nullable=True),
sa.Column('accident', sa.Boolean(), nullable=True),
sa.Column('arrest_made', sa.Boolean(), nullable=True),
sa.Column('distance', sa.String(length=255), nullable=True),
sa.Column('reason', sa.String(length=255), nullable=True),
sa.Column('vehicle_type', sa.String(length=255), nullable=True),
sa.Column('max_speed', sa.String(length=255), nullable=True),
sa.Column('resident_race', sa.String(length=255), nullable=True),
sa.Column('resident_sex', sa.String(length=255), nullable=True),
sa.Column('resident_age', sa.String(length=255), nullable=True),
sa.Column('copter_available', sa.Boolean(), nullable=True),
sa.Column('copter_used', sa.Boolean(), nullable=True),
sa.Column('dui_arrest', sa.Boolean(), nullable=True),
sa.Column('stop_device_used', sa.Boolean(), nullable=True),
sa.Column('stop_device', sa.String(length=255), nullable=True),
sa.Column('follow_policy', sa.Boolean(), nullable=True),
sa.Column('weather_condition', sa.String(length=255), nullable=True),
sa.Column('location_began', sa.String(length=255), nullable=True),
sa.Column('location_ended', sa.String(length=255), nullable=True),
sa.Column('in_car_cam_available', sa.Boolean(), nullable=True),
sa.Column('in_car_cam_used', sa.Boolean(), nullable=True),
sa.Column('total_time_minutes', sa.String(length=255), nullable=True),
sa.Column('influencing_factor', sa.String(length=255), nullable=True),
sa.Column('aborted_by', sa.String(length=255), nullable=True),
sa.Column('concluded_by', sa.String(length=255), nullable=True),
sa.Column('damage_type', sa.String(length=255), nullable=True),
sa.Column('injury_type', sa.String(length=255), nullable=True),
sa.Column('initiated_by_agency', sa.String(length=255), nullable=True),
sa.Column('concluded_by_agency', sa.String(length=255), nullable=True),
sa.Column('liability_claim', sa.Boolean(), nullable=True),
sa.Column('associated_officer_count', sa.String(length=255), nullable=True),
sa.Column('violation', sa.String(length=255), nullable=True),
sa.Column('justified', sa.Boolean(), nullable=True),
sa.Column('officer_condition', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column('departments', sa.Column('is_public_pursuits', sa.Boolean(), server_default=sa.true(), nullable=False))
def downgrade():
op.drop_column('departments', 'is_public_pursuits')
op.drop_table('pursuits_srpd')
op.drop_table('citizen_complaints_srpd')
op.drop_table('officer_involved_shootings_srpd')
op.drop_table('use_of_force_incidents_srpd')
| {
"repo_name": "codeforamerica/comport",
"path": "migrations/versions/c5e276c0d67f_.py",
"copies": "1",
"size": "7821",
"license": "bsd-3-clause",
"hash": 8748911696314503000,
"line_mean": 55.6739130435,
"line_max": 121,
"alpha_frac": 0.6523462473,
"autogenerated": false,
"ratio": 3.410815525512429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559353571554093,
"avg_score": 0.000761640251667103,
"num_lines": 138
} |
""" Adds Status for ppagent tracking. Changes payout foreign key to point to blockhash instead of height.
Revision ID: 2db48f0c89c7
Revises: b1e627b9a0
Create Date: 2014-03-08 18:12:27.925170
"""
# revision identifiers, used by Alembic.
revision = '2db48f0c89c7'
down_revision = 'b1e627b9a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('status',
sa.Column('user', sa.String(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('status', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('user', 'worker')
)
op.drop_table('payout')
op.drop_constraint('block_pkey', 'block')
op.create_primary_key('block_pkey', 'block', ['hash'])
op.create_table('payout',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('blockhash', sa.String(), nullable=True),
sa.Column('user', sa.String(), nullable=True),
sa.Column('shares', sa.BigInteger(), nullable=True),
sa.Column('amount', sa.BigInteger(), nullable=True),
sa.Column('transaction_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['blockhash'], ['block.hash'], ),
sa.ForeignKeyConstraint(['transaction_id'], ['transaction.txid'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user', 'blockhash'),
sa.CheckConstraint('amount>0', 'min_payout_amount')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('payout')
op.drop_table('status')
### end Alembic commands ###
| {
"repo_name": "lae/simplemona",
"path": "migrations/versions/2db48f0c89c7_.py",
"copies": "2",
"size": "1916",
"license": "mit",
"hash": -153211591428671940,
"line_mean": 38.9166666667,
"line_max": 105,
"alpha_frac": 0.5735908142,
"autogenerated": false,
"ratio": 4.0336842105263155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5607275024726316,
"avg_score": null,
"num_lines": null
} |
"""Adds summary tags/fields to a merged VCF file.
Collaborates with two summary "callers" to add INFO and FORMAT tags to each
variant record based on the presence of previously translated tags.
"""
from __future__ import print_function, absolute_import, division
import os
import jacquard.utils.logger as logger
import jacquard.utils.utils as utils
import jacquard.utils.summarize_rollup_transform as summarize_caller
import jacquard.utils.summarize_zscore_transform as zscore_caller
import jacquard.utils.vcf as vcf
def _write_metaheaders(caller,
vcf_reader,
file_writer,
execution_context=None,
new_meta_headers=None):
new_headers = list(vcf_reader.metaheaders)
if execution_context:
new_headers.extend(execution_context)
new_headers.extend(caller.get_metaheaders())
if new_meta_headers:
new_headers.append(new_meta_headers)
sorted_metaheaders = utils.sort_metaheaders(new_headers)
sorted_metaheaders.append(vcf_reader.column_header)
file_writer.write("\n".join(sorted_metaheaders) +"\n")
def _write_to_tmp_file(caller, vcf_reader, tmp_writer):
vcf_reader.open()
tmp_writer.open()
try:
_write_metaheaders(caller, vcf_reader, tmp_writer)
logger.info("Adding summary tags for [{}]", vcf_reader.file_name)
_add_tags(caller, vcf_reader, tmp_writer)
finally:
vcf_reader.close()
tmp_writer.close()
def _write_zscores(caller,
metaheaders,
vcf_reader,
file_writer):
#TODO: (jebene) make zscores and tmp file follow the same pattern when writing
try:
file_writer.open()
headers = list(metaheaders)
headers.extend(vcf_reader.metaheaders)
headers.extend(caller.metaheaders)
sorted_metaheaders = utils.sort_metaheaders(headers)
sorted_metaheaders.append(vcf_reader.column_header)
file_writer.write("\n".join(sorted_metaheaders) +"\n")
vcf_reader.open()
for vcf_record in vcf_reader.vcf_records():
line = caller.add_tags(vcf_record)
file_writer.write(line)
finally:
vcf_reader.close()
file_writer.close()
def _add_tags(caller, vcf_reader, file_writer):
for vcf_record in vcf_reader.vcf_records():
caller.add_tags(vcf_record)
file_writer.write(vcf_record.text())
def add_subparser(subparser):
# pylint: disable=line-too-long
parser = subparser.add_parser("summarize",
formatter_class=utils._JacquardHelpFormatter,
usage=[""],
description=('\n\n'
'Arguments in the [] are DEFAULT\n'
'\n'),
help="Accepts a Jacquard-merged VCF file and creates a new file, adding summary fields.")
parser.add_argument("input", help="Jacquard-merged VCF file (or any VCF with Jacquard tags; e.g. JQ_SOM_MT)")
parser.add_argument("output", help="VCF file")
parser.add_argument("--force", action='store_true', help="Overwrite contents of output directory")
parser.add_argument("--log_file", help="Log file destination")
parser.add_argument("-v", "--verbose", action='store_true')
def report_prediction(args):
return set([os.path.basename(args.output)])
def get_required_input_output_types():
return ("file", "file")
#TODO (cgates): Validate should actually validate
def validate_args(dummy):
pass
def execute(args, execution_context):
input_file = os.path.abspath(args.input)
output = os.path.abspath(args.output)
summary_caller = summarize_caller.SummarizeCaller()
vcf_reader = vcf.VcfReader(vcf.FileReader(input_file))
tmp_output_file = output + ".tmp"
tmp_writer = vcf.FileWriter(tmp_output_file)
_write_to_tmp_file(summary_caller, vcf_reader, tmp_writer)
tmp_reader = vcf.VcfReader(vcf.FileReader(tmp_output_file))
file_writer = vcf.FileWriter(output)
logger.info("Calculating zscores")
caller = zscore_caller.ZScoreCaller(tmp_reader)
metaheaders = execution_context + summary_caller.get_metaheaders()
_write_zscores(caller, metaheaders, tmp_reader, file_writer)
os.remove(tmp_output_file)
| {
"repo_name": "umich-brcf-bioinf/Jacquard",
"path": "jacquard/summarize.py",
"copies": "1",
"size": "4407",
"license": "apache-2.0",
"hash": 1450445814603431400,
"line_mean": 34.8292682927,
"line_max": 123,
"alpha_frac": 0.6412525528,
"autogenerated": false,
"ratio": 3.687866108786611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9816368380187379,
"avg_score": 0.002550056279846296,
"num_lines": 123
} |
"""Adds support for generic thermostat units."""
import asyncio
import logging
import math
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, CoreState, callback
from homeassistant.exceptions import ConditionError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): cv.positive_time_period,
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
unique_id = config.get(CONF_UNIQUE_ID)
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
)
]
)
class GenericThermostat(ClimateEntity, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = unit
self._unique_id = unique_id
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._away_temp = away_temp
self._is_away = False
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.sensor_entity_id], self._async_sensor_changed
)
)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.heater_entity_id], self._async_switch_changed
)
)
if self._keep_alive:
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
)
@callback
def _async_startup(*_):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self._async_update_temp(sensor_state)
self.async_write_ha_state()
if self.hass.state == CoreState.running:
_async_startup()
else:
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
old_state = await self.async_get_last_state()
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY:
self._is_away = True
if not self._hvac_mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode:
self._hvac_mode = HVAC_MODE_OFF
# Prevent the device from keep running if HVAC_MODE_OFF
if self._hvac_mode == HVAC_MODE_OFF and self._is_device_active:
await self._async_heater_turn_off()
_LOGGER.warning(
"The climate mode is OFF, but the switch device is ON. Turning off device %s",
self.heater_entity_id,
)
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this thermostat."""
return self._unique_id
@property
def precision(self):
"""Return the precision of the system."""
if self._temp_precision is not None:
return self._temp_precision
return super().precision
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
# Since this integration does not yet have a step size parameter
# we have to re-use the precision as the step size for now.
return self.precision
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return current operation."""
return self._hvac_mode
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._hvac_mode == HVAC_MODE_OFF:
return CURRENT_HVAC_OFF
if not self._is_device_active:
return CURRENT_HVAC_IDLE
if self.ac_mode:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_HEAT
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_list
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return PRESET_AWAY if self._is_away else PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes or PRESET_NONE if _away_temp is undefined."""
return [PRESET_NONE, PRESET_AWAY] if self._away_temp else PRESET_NONE
async def async_set_hvac_mode(self, hvac_mode):
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
self._hvac_mode = HVAC_MODE_HEAT
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_COOL:
self._hvac_mode = HVAC_MODE_COOL
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_OFF:
self._hvac_mode = HVAC_MODE_OFF
if self._is_device_active:
await self._async_heater_turn_off()
else:
_LOGGER.error("Unrecognized hvac mode: %s", hvac_mode)
return
# Ensure we update the current operation after changing the mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._target_temp = temperature
await self._async_control_heating(force=True)
self.async_write_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
if self._min_temp is not None:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._max_temp is not None:
return self._max_temp
# Get default temp from super class
return super().max_temp
async def _async_sensor_changed(self, event):
"""Handle temperature changes."""
new_state = event.data.get("new_state")
if new_state is None or new_state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return
self._async_update_temp(new_state)
await self._async_control_heating()
self.async_write_ha_state()
@callback
def _async_switch_changed(self, event):
"""Handle heater switch state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self.async_write_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
try:
cur_temp = float(state.state)
if math.isnan(cur_temp) or math.isinf(cur_temp):
raise ValueError(f"Sensor has illegal state {state.state}")
self._cur_temp = cur_temp
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
async def _async_control_heating(self, time=None, force=False):
"""Check if we need to turn heating on or off."""
async with self._temp_lock:
if not self._active and None not in (
self._cur_temp,
self._target_temp,
self._is_device_active,
):
self._active = True
_LOGGER.info(
"Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp,
self._target_temp,
)
if not self._active or self._hvac_mode == HVAC_MODE_OFF:
return
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if not force and time is None and self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = HVAC_MODE_OFF
try:
long_enough = condition.state(
self.hass,
self.heater_entity_id,
current_state,
self.min_cycle_duration,
)
except ConditionError:
long_enough = False
if not long_enough:
return
too_cold = self._target_temp >= self._cur_temp + self._cold_tolerance
too_hot = self._cur_temp >= self._target_temp + self._hot_tolerance
if self._is_device_active:
if (self.ac_mode and too_cold) or (not self.ac_mode and too_hot):
_LOGGER.info("Turning off heater %s", self.heater_entity_id)
await self._async_heater_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning on heater heater %s",
self.heater_entity_id,
)
await self._async_heater_turn_on()
else:
if (self.ac_mode and too_hot) or (not self.ac_mode and too_cold):
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
await self._async_heater_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning off heater %s", self.heater_entity_id
)
await self._async_heater_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
if not self.hass.states.get(self.heater_entity_id):
return None
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_ON, data, context=self._context
)
async def _async_heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_OFF, data, context=self._context
)
async def async_set_preset_mode(self, preset_mode: str):
"""Set new preset mode."""
if preset_mode == PRESET_AWAY and not self._is_away:
self._is_away = True
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
await self._async_control_heating(force=True)
elif preset_mode == PRESET_NONE and self._is_away:
self._is_away = False
self._target_temp = self._saved_target_temp
await self._async_control_heating(force=True)
self.async_write_ha_state()
| {
"repo_name": "w1ll1am23/home-assistant",
"path": "homeassistant/components/generic_thermostat/climate.py",
"copies": "3",
"size": "18623",
"license": "apache-2.0",
"hash": 6833568006033393000,
"line_mean": 33.8745318352,
"line_max": 96,
"alpha_frac": 0.5756859797,
"autogenerated": false,
"ratio": 3.9009216589861753,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003039071532448823,
"num_lines": 534
} |
"""Adds support for generic thermostat units."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, callback
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change,
async_track_time_interval,
)
from homeassistant.helpers.restore_state import RestoreEntity
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): vol.All(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
)
]
)
class GenericThermostat(ClimateDevice, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = unit
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._away_temp = away_temp
self._is_away = False
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
async_track_state_change(
self.hass, self.sensor_entity_id, self._async_sensor_changed
)
async_track_state_change(
self.hass, self.heater_entity_id, self._async_switch_changed
)
if self._keep_alive:
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
@callback
def _async_startup(event):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state != STATE_UNKNOWN:
self._async_update_temp(sensor_state)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
old_state = await self.async_get_last_state()
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY:
self._is_away = True
if not self._hvac_mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode:
self._hvac_mode = HVAC_MODE_OFF
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def precision(self):
"""Return the precision of the system."""
if self._temp_precision is not None:
return self._temp_precision
return super().precision
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return current operation."""
return self._hvac_mode
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._hvac_mode == HVAC_MODE_OFF:
return CURRENT_HVAC_OFF
if not self._is_device_active:
return CURRENT_HVAC_IDLE
if self.ac_mode:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_HEAT
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_list
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return PRESET_AWAY if self._is_away else PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes or PRESET_NONE if _away_temp is undefined."""
return [PRESET_NONE, PRESET_AWAY] if self._away_temp else PRESET_NONE
async def async_set_hvac_mode(self, hvac_mode):
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
self._hvac_mode = HVAC_MODE_HEAT
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_COOL:
self._hvac_mode = HVAC_MODE_COOL
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_OFF:
self._hvac_mode = HVAC_MODE_OFF
if self._is_device_active:
await self._async_heater_turn_off()
else:
_LOGGER.error("Unrecognized hvac mode: %s", hvac_mode)
return
# Ensure we update the current operation after changing the mode
self.schedule_update_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._target_temp = temperature
await self._async_control_heating(force=True)
await self.async_update_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
if self._min_temp:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._max_temp:
return self._max_temp
# Get default temp from super class
return super().max_temp
async def _async_sensor_changed(self, entity_id, old_state, new_state):
"""Handle temperature changes."""
if new_state is None:
return
self._async_update_temp(new_state)
await self._async_control_heating()
await self.async_update_ha_state()
@callback
def _async_switch_changed(self, entity_id, old_state, new_state):
"""Handle heater switch state changes."""
if new_state is None:
return
self.async_schedule_update_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
try:
self._cur_temp = float(state.state)
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
async def _async_control_heating(self, time=None, force=False):
"""Check if we need to turn heating on or off."""
async with self._temp_lock:
if not self._active and None not in (self._cur_temp, self._target_temp):
self._active = True
_LOGGER.info(
"Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp,
self._target_temp,
)
if not self._active or self._hvac_mode == HVAC_MODE_OFF:
return
if not force and time is None:
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = HVAC_MODE_OFF
long_enough = condition.state(
self.hass,
self.heater_entity_id,
current_state,
self.min_cycle_duration,
)
if not long_enough:
return
too_cold = self._target_temp >= self._cur_temp + self._cold_tolerance
too_hot = self._cur_temp >= self._target_temp + self._hot_tolerance
if self._is_device_active:
if (self.ac_mode and too_cold) or (not self.ac_mode and too_hot):
_LOGGER.info("Turning off heater %s", self.heater_entity_id)
await self._async_heater_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning on heater heater %s",
self.heater_entity_id,
)
await self._async_heater_turn_on()
else:
if (self.ac_mode and too_hot) or (not self.ac_mode and too_cold):
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
await self._async_heater_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning off heater %s", self.heater_entity_id
)
await self._async_heater_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_ON, data)
async def _async_heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(HA_DOMAIN, SERVICE_TURN_OFF, data)
async def async_set_preset_mode(self, preset_mode: str):
"""Set new preset mode."""
if preset_mode == PRESET_AWAY and not self._is_away:
self._is_away = True
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
await self._async_control_heating(force=True)
elif preset_mode == PRESET_NONE and self._is_away:
self._is_away = False
self._target_temp = self._saved_target_temp
await self._async_control_heating(force=True)
await self.async_update_ha_state()
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/generic_thermostat/climate.py",
"copies": "2",
"size": "16475",
"license": "apache-2.0",
"hash": 8342902311525437000,
"line_mean": 34.278372591,
"line_max": 96,
"alpha_frac": 0.5779666161,
"autogenerated": false,
"ratio": 3.8556049613854433,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5433571577485443,
"avg_score": null,
"num_lines": null
} |
"""Adds support for generic thermostat units."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_PRESET_MODE,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
CONF_NAME,
CONF_UNIQUE_ID,
EVENT_HOMEASSISTANT_START,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, CoreState, callback
from homeassistant.exceptions import ConditionError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_interval,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DEFAULT_TOLERANCE = 0.3
DEFAULT_NAME = "Generic Thermostat"
CONF_HEATER = "heater"
CONF_SENSOR = "target_sensor"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_TARGET_TEMP = "target_temp"
CONF_AC_MODE = "ac_mode"
CONF_MIN_DUR = "min_cycle_duration"
CONF_COLD_TOLERANCE = "cold_tolerance"
CONF_HOT_TOLERANCE = "hot_tolerance"
CONF_KEEP_ALIVE = "keep_alive"
CONF_INITIAL_HVAC_MODE = "initial_hvac_mode"
CONF_AWAY_TEMP = "away_temp"
CONF_PRECISION = "precision"
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_AC_MODE): cv.boolean,
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_DUR): cv.positive_time_period,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_COLD_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_HOT_TOLERANCE, default=DEFAULT_TOLERANCE): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_KEEP_ALIVE): cv.positive_time_period,
vol.Optional(CONF_INITIAL_HVAC_MODE): vol.In(
[HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
),
vol.Optional(CONF_AWAY_TEMP): vol.Coerce(float),
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the generic thermostat platform."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
cold_tolerance = config.get(CONF_COLD_TOLERANCE)
hot_tolerance = config.get(CONF_HOT_TOLERANCE)
keep_alive = config.get(CONF_KEEP_ALIVE)
initial_hvac_mode = config.get(CONF_INITIAL_HVAC_MODE)
away_temp = config.get(CONF_AWAY_TEMP)
precision = config.get(CONF_PRECISION)
unit = hass.config.units.temperature_unit
unique_id = config.get(CONF_UNIQUE_ID)
async_add_entities(
[
GenericThermostat(
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
)
]
)
class GenericThermostat(ClimateEntity, RestoreEntity):
"""Representation of a Generic Thermostat device."""
def __init__(
self,
name,
heater_entity_id,
sensor_entity_id,
min_temp,
max_temp,
target_temp,
ac_mode,
min_cycle_duration,
cold_tolerance,
hot_tolerance,
keep_alive,
initial_hvac_mode,
away_temp,
precision,
unit,
unique_id,
):
"""Initialize the thermostat."""
self._name = name
self.heater_entity_id = heater_entity_id
self.sensor_entity_id = sensor_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._cold_tolerance = cold_tolerance
self._hot_tolerance = hot_tolerance
self._keep_alive = keep_alive
self._hvac_mode = initial_hvac_mode
self._saved_target_temp = target_temp or away_temp
self._temp_precision = precision
if self.ac_mode:
self._hvac_list = [HVAC_MODE_COOL, HVAC_MODE_OFF]
else:
self._hvac_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
self._active = False
self._cur_temp = None
self._temp_lock = asyncio.Lock()
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = unit
self._unique_id = unique_id
self._support_flags = SUPPORT_FLAGS
if away_temp:
self._support_flags = SUPPORT_FLAGS | SUPPORT_PRESET_MODE
self._away_temp = away_temp
self._is_away = False
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
# Add listener
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.sensor_entity_id], self._async_sensor_changed
)
)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self.heater_entity_id], self._async_switch_changed
)
)
if self._keep_alive:
self.async_on_remove(
async_track_time_interval(
self.hass, self._async_control_heating, self._keep_alive
)
)
@callback
def _async_startup(*_):
"""Init on startup."""
sensor_state = self.hass.states.get(self.sensor_entity_id)
if sensor_state and sensor_state.state not in (
STATE_UNAVAILABLE,
STATE_UNKNOWN,
):
self._async_update_temp(sensor_state)
self.async_write_ha_state()
if self.hass.state == CoreState.running:
_async_startup()
else:
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)
# Check If we have an old state
old_state = await self.async_get_last_state()
if old_state is not None:
# If we have no initial temperature, restore
if self._target_temp is None:
# If we have a previously saved temperature
if old_state.attributes.get(ATTR_TEMPERATURE) is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"Undefined target temperature, falling back to %s",
self._target_temp,
)
else:
self._target_temp = float(old_state.attributes[ATTR_TEMPERATURE])
if old_state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY:
self._is_away = True
if not self._hvac_mode and old_state.state:
self._hvac_mode = old_state.state
else:
# No previous state, try and restore defaults
if self._target_temp is None:
if self.ac_mode:
self._target_temp = self.max_temp
else:
self._target_temp = self.min_temp
_LOGGER.warning(
"No previously saved temperature, setting to %s", self._target_temp
)
# Set default state to off
if not self._hvac_mode:
self._hvac_mode = HVAC_MODE_OFF
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this thermostat."""
return self._unique_id
@property
def precision(self):
"""Return the precision of the system."""
if self._temp_precision is not None:
return self._temp_precision
return super().precision
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
# Since this integration does not yet have a step size parameter
# we have to re-use the precision as the step size for now.
return self.precision
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return current operation."""
return self._hvac_mode
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if self._hvac_mode == HVAC_MODE_OFF:
return CURRENT_HVAC_OFF
if not self._is_device_active:
return CURRENT_HVAC_IDLE
if self.ac_mode:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_HEAT
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_list
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return PRESET_AWAY if self._is_away else PRESET_NONE
@property
def preset_modes(self):
"""Return a list of available preset modes or PRESET_NONE if _away_temp is undefined."""
return [PRESET_NONE, PRESET_AWAY] if self._away_temp else PRESET_NONE
async def async_set_hvac_mode(self, hvac_mode):
"""Set hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
self._hvac_mode = HVAC_MODE_HEAT
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_COOL:
self._hvac_mode = HVAC_MODE_COOL
await self._async_control_heating(force=True)
elif hvac_mode == HVAC_MODE_OFF:
self._hvac_mode = HVAC_MODE_OFF
if self._is_device_active:
await self._async_heater_turn_off()
else:
_LOGGER.error("Unrecognized hvac mode: %s", hvac_mode)
return
# Ensure we update the current operation after changing the mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._target_temp = temperature
await self._async_control_heating(force=True)
self.async_write_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
if self._min_temp is not None:
return self._min_temp
# get default temp from super class
return super().min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._max_temp is not None:
return self._max_temp
# Get default temp from super class
return super().max_temp
async def _async_sensor_changed(self, event):
"""Handle temperature changes."""
new_state = event.data.get("new_state")
if new_state is None or new_state.state in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return
self._async_update_temp(new_state)
await self._async_control_heating()
self.async_write_ha_state()
@callback
def _async_switch_changed(self, event):
"""Handle heater switch state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self.async_write_ha_state()
@callback
def _async_update_temp(self, state):
"""Update thermostat with latest state from sensor."""
try:
self._cur_temp = float(state.state)
except ValueError as ex:
_LOGGER.error("Unable to update from sensor: %s", ex)
async def _async_control_heating(self, time=None, force=False):
"""Check if we need to turn heating on or off."""
async with self._temp_lock:
if not self._active and None not in (self._cur_temp, self._target_temp):
self._active = True
_LOGGER.info(
"Obtained current and target temperature. "
"Generic thermostat active. %s, %s",
self._cur_temp,
self._target_temp,
)
if not self._active or self._hvac_mode == HVAC_MODE_OFF:
return
if not force and time is None:
# If the `force` argument is True, we
# ignore `min_cycle_duration`.
# If the `time` argument is not none, we were invoked for
# keep-alive purposes, and `min_cycle_duration` is irrelevant.
if self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = HVAC_MODE_OFF
try:
long_enough = condition.state(
self.hass,
self.heater_entity_id,
current_state,
self.min_cycle_duration,
)
except ConditionError:
long_enough = False
if not long_enough:
return
too_cold = self._target_temp >= self._cur_temp + self._cold_tolerance
too_hot = self._cur_temp >= self._target_temp + self._hot_tolerance
if self._is_device_active:
if (self.ac_mode and too_cold) or (not self.ac_mode and too_hot):
_LOGGER.info("Turning off heater %s", self.heater_entity_id)
await self._async_heater_turn_off()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning on heater heater %s",
self.heater_entity_id,
)
await self._async_heater_turn_on()
else:
if (self.ac_mode and too_hot) or (not self.ac_mode and too_cold):
_LOGGER.info("Turning on heater %s", self.heater_entity_id)
await self._async_heater_turn_on()
elif time is not None:
# The time argument is passed only in keep-alive case
_LOGGER.info(
"Keep-alive - Turning off heater %s", self.heater_entity_id
)
await self._async_heater_turn_off()
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return self.hass.states.is_state(self.heater_entity_id, STATE_ON)
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
async def _async_heater_turn_on(self):
"""Turn heater toggleable device on."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_ON, data, context=self._context
)
async def _async_heater_turn_off(self):
"""Turn heater toggleable device off."""
data = {ATTR_ENTITY_ID: self.heater_entity_id}
await self.hass.services.async_call(
HA_DOMAIN, SERVICE_TURN_OFF, data, context=self._context
)
async def async_set_preset_mode(self, preset_mode: str):
"""Set new preset mode."""
if preset_mode == PRESET_AWAY and not self._is_away:
self._is_away = True
self._saved_target_temp = self._target_temp
self._target_temp = self._away_temp
await self._async_control_heating(force=True)
elif preset_mode == PRESET_NONE and self._is_away:
self._is_away = False
self._target_temp = self._saved_target_temp
await self._async_control_heating(force=True)
self.async_write_ha_state()
| {
"repo_name": "partofthething/home-assistant",
"path": "homeassistant/components/generic_thermostat/climate.py",
"copies": "1",
"size": "18000",
"license": "mit",
"hash": 504039053055987800,
"line_mean": 33.8837209302,
"line_max": 96,
"alpha_frac": 0.5743888889,
"autogenerated": false,
"ratio": 3.911342894393742,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9984147240561905,
"avg_score": 0.0003169085463675318,
"num_lines": 516
} |
"""Adds support for rotation policies.
Creates a default rotation policy (30 days) with the name
'default' ensures that all existing certificates use the default
policy.
Revision ID: a02a678ddc25
Revises: 8ae67285ff14
Create Date: 2017-07-12 11:45:49.257927
"""
# revision identifiers, used by Alembic.
revision = "a02a678ddc25"
down_revision = "8ae67285ff14"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"rotation_policies",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=True),
sa.Column("days", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.add_column(
"certificates", sa.Column("rotation_policy_id", sa.Integer(), nullable=True)
)
op.create_foreign_key(
None, "certificates", "rotation_policies", ["rotation_policy_id"], ["id"]
)
conn = op.get_bind()
stmt = text("insert into rotation_policies (days, name) values (:days, :name)")
stmt = stmt.bindparams(days=30, name="default")
conn.execute(stmt)
stmt = text("select id from rotation_policies where name=:name")
stmt = stmt.bindparams(name="default")
rotation_policy_id = conn.execute(stmt).fetchone()[0]
stmt = text("update certificates set rotation_policy_id=:rotation_policy_id")
stmt = stmt.bindparams(rotation_policy_id=rotation_policy_id)
conn.execute(stmt)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "certificates", type_="foreignkey")
op.drop_column("certificates", "rotation_policy_id")
op.drop_index(
"certificate_replacement_associations_ix",
table_name="certificate_replacement_associations",
)
op.create_index(
"certificate_replacement_associations_ix",
"certificate_replacement_associations",
["replaced_certificate_id", "certificate_id"],
unique=True,
)
op.drop_table("rotation_policies")
# ### end Alembic commands ###
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/a02a678ddc25_.py",
"copies": "1",
"size": "2193",
"license": "apache-2.0",
"hash": 4040232984219374600,
"line_mean": 31.7313432836,
"line_max": 84,
"alpha_frac": 0.6703146375,
"autogenerated": false,
"ratio": 3.729591836734694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9887633739964113,
"avg_score": 0.002454546854116205,
"num_lines": 67
} |
"""Adds Swift."""
from baseCmd import *
from baseResponse import *
class addSwiftCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""the URL for swift"""
"""Required"""
self.url = None
self.typeInfo['url'] = 'string'
"""the account for swift"""
self.account = None
self.typeInfo['account'] = 'string'
"""key for the user for swift"""
self.key = None
self.typeInfo['key'] = 'string'
"""the username for swift"""
self.username = None
self.typeInfo['username'] = 'string'
self.required = ["url", ]
class addSwiftResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the image store"""
self.id = None
self.typeInfo['id'] = 'string'
"""the details of the image store"""
self.details = None
self.typeInfo['details'] = 'set'
"""the name of the image store"""
self.name = None
self.typeInfo['name'] = 'string'
"""the protocol of the image store"""
self.protocol = None
self.typeInfo['protocol'] = 'string'
"""the provider name of the image store"""
self.providername = None
self.typeInfo['providername'] = 'string'
"""the scope of the image store"""
self.scope = None
self.typeInfo['scope'] = 'scopetype'
"""the url of the image store"""
self.url = None
self.typeInfo['url'] = 'string'
"""the Zone ID of the image store"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name of the image store"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addSwift.py",
"copies": "1",
"size": "1766",
"license": "apache-2.0",
"hash": 2172777111909247000,
"line_mean": 29.4482758621,
"line_max": 50,
"alpha_frac": 0.543601359,
"autogenerated": false,
"ratio": 4.00453514739229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.504813650639229,
"avg_score": null,
"num_lines": null
} |
"""Adds syntax for creating habracut"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
import re
from django.utils.deconstruct import deconstructible
from django.utils.html import escape
@deconstructible
class CutExtension(Extension):
def __init__(self, *args, **kwargs):
"""Adds habracut
E.g. `----cut----`
"""
self.anchor = kwargs.pop('anchor', '')
super(CutExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Add FencedBlockPreprocessor to the Markdown instance
:param md: Current markdown instance.
:param md_globals: Global markdown vars.
"""
md.registerExtension(self)
if 'fenced_code_block' in md.preprocessors:
md.preprocessors.add('cut',
CutPreprocessor(md, anchor=self.anchor),
">fenced_code_block")
else:
md.preprocessors.add('cut',
CutPreprocessor(md, anchor=self.anchor),
">normalize_whitespace")
class CutPreprocessor(Preprocessor):
def __init__(self, *args, **kwargs):
"""Main fenced code block renderer"""
self.anchor = kwargs.pop('anchor', '')
super(CutPreprocessor, self).__init__(*args, **kwargs)
block_re = re.compile(
r'-{4,}[ ]*'
r'cut[ ]*(here)?[ ]*'
r'(\{\{(?P<caption>[^\}]+)\}\})?'
r'[ ]*-{4,}', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
def run(self, lines):
"""Match cut tags and store them in the htmlStash
:param lines: Lines of code.
"""
text = '\n'.join(lines)
m = self.block_re.search(text)
if m is not None:
if 'caption' in m.groupdict() and m.groupdict()['caption'] is not None:
html = '<!-- cut here {{ %s }} -->' % escape(m.groupdict()['caption'])
else:
html = '<!-- cut here -->'
if self.anchor:
html += '<a name="%s"></a>' % escape(self.anchor)
placeholder = self.markdown.htmlStash.store(html,
safe=True)
text = '%s\n%s\n%s' % (text[:m.start()],
placeholder,
text[m.end():])
return text.split('\n')
| {
"repo_name": "AmatanHead/collective-blog",
"path": "s_markdown/extensions/cut.py",
"copies": "1",
"size": "2551",
"license": "mit",
"hash": 22005731126125308,
"line_mean": 30.1097560976,
"line_max": 86,
"alpha_frac": 0.518620149,
"autogenerated": false,
"ratio": 4.1683006535947715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5186920802594771,
"avg_score": null,
"num_lines": null
} |
# Adds table of Gaia photometry/astrometry to a DESI MWS mock file
import sys
import os
import numpy as np
import datetime
from astropy.io import fits
from astropy.table import Table, Column
import astropy.units as u
############################################################
def make_gaia_observables(desi_table,dust='galaxia'):
"""
"""
import pygaia
import pygaia.errors.astrometric as gaia_astro
import pygaia.errors.spectroscopic as gaia_spectro
import pygaia.errors.photometric as gaia_photo
np.random.seed(2019)
gaia_table = Table()
# Intrinsic heliocentric distance in parsecs
d_parsecs = desi_table['d_helio']*1000.0
# Heliocentric RV in km/s
v_helio_kms = desi_table['v_helio']
# Intrinsic photometry (using values without extinction)
g = desi_table['SDSSg_true_nodust']
r = desi_table['SDSSr_true_nodust']
i = desi_table['SDSSi_true_nodust']
# Lupton conversions
magV_nodust = g - 0.5784*(g-r)-0.0038
magI_nodust = r - 1.2444*(r-i)-0.3820
gaia_table.add_column(Column(magV_nodust,name='V_Lupton_nodust',unit=1.0,
description='J-C V-band magnitude converted from SDSS g, gmr following Lupton 2005'))
gaia_table.add_column(Column(magI_nodust,name='I_Lupton_nodust',unit=1.0,
description='J-C I-band magnitude converted from SDSS r, rmi following Lupton 2005'))
# Add extinction back (currently in a poor way!). If we care in detail we
# should know exactly which V and I pygaia wants!
if dust == 'galaxia':
# Galaxia dust equations use E(B-V) and coefficients Am/E(B-V).
#
# E_schelgel is seletive absorption to star in fiducial bands (V and B in this case).
#
# Am = E_schlegel * A_over_EBV[band]
# mag_true = mag_nodust + Am
#
# Hence:
# mag_nodust = mag_true - E_schelgel*A_over_EBV[band]
# Extinction coefficients for SDSS from galaxia documentation
# These are Am/E(B-V) for Rv = 3.1 according to the Finkbeiner 99 law.
# These give the coefficients to use with E(B-V)_Schelgel get reddenings
# consistent with S&F 2011 and Schlafly 2010.
ext_coeffs_ctio = {'V':3.240,
'I':1.962}
magV = magV_nodust + desi_table['ABV']*ext_coeffs_ctio['V']
magI = magI_nodust + desi_table['ABV']*ext_coeffs_ctio['I']
elif dust == 'galfast':
# GalFast dust equations use Ar and coefficients Am/Ar.
# Am0 is total absorption to star in fiducial band (r, in this case).
#
# Am = Am0 * reddening[band]
# mag_true = mag_nodust + Am
#
# Hence:
# mag_nodust = mag_true - Am0*reddening[band]
Ar_coeffs_ctio = {'V':1.1800,
'I':0.5066}
magV = magV_nodust + desi_table['Ar']*Ar_coeffs_ctio['V']
magI = magI_nodust + desi_table['Ar']*Ar_coeffs_ctio['I']
gaia_table.add_column(Column(magV,name='V_Lupton',unit=1.0,
description='J-C V-band magnitude converted from SDSS g, gmr following Lupton 2005, with extinction'))
gaia_table.add_column(Column(magI,name='I_Lupton',unit=1.0,
description='J-C I-band magnitude converted from SDSS r, rmi following Lupton 2005, with extinction'))
# Gaia routines need this colour as observed (something to do with PSF size)
VminI = magV - magI
from pygaia.photometry.transformations import gminvFromVmini, vminGrvsFromVmini
# Calculate the value of (G-V) from (V-I)
# Should this be the true or extincted V-I?
GmV = gminvFromVmini(VminI)
GmVrvs = vminGrvsFromVmini(VminI)
magG = GmV + magV
magGrvs = GmVrvs + magV
gaia_table.add_column(Column(magV,name='G_gaia',unit=1.0,
description='Gaia G apparent magnitude (pygaia)'))
gaia_table.add_column(Column(magI,name='G_gaia_rvs',unit=1.0,
description='Gaia G apparent magnitude for RVS (pygaia)'))
# Sky coordinates and intrinsic PMs
ra = desi_table['RA'] # Degrees
dec = desi_table['DEC'] # Degrees
pm_ra = desi_table['pm_RA'] # mas/yr
pm_dec = desi_table['pm_DEC'] # mas/yr
import pygaia.astrometry.coordinates as gaia_coordinates
matrix_equ_to_ecl = gaia_coordinates.Transformations.ICRS2ECL
equ_to_ecl = gaia_coordinates.CoordinateTransformation(matrix_equ_to_ecl)
# Note input in radians and output in radians. This is only used for input
# into the proper motion error routine.
ecl_lambda, ecl_beta = equ_to_ecl.transformSkyCoordinates(ra*np.pi/180.0,dec*np.pi/180.0)
# The error in mu_alpha* and the error in mu_delta, in that order, in
# micro-arcsecond/year. The * on mu_alpha* indicates
# mu_alpha*=mu_alpha*cos(delta). These are in MICRO arcsec/yr so convert
# to MILLI arcsec/yr.
mu_alpha_star_err, mu_delta_err = gaia_astro.properMotionError(magG,VminI,ecl_beta)
mu_alpha_star_err = mu_alpha_star_err/1000.0
mu_delta_err = mu_delta_err/1000.0
gaia_table.add_column(Column(mu_alpha_star_err,name='pm_RA_gaia_error',unit=1.0,
description='Gaia error on proper motion in RA (mu_alpha_star; pygaia) [mas/yr]'))
gaia_table.add_column(Column(mu_delta_err,name='pm_DEC_gaia_error',unit=1.0,
description='Gaia error on proper motion in DEC (mu_delta; pygaia) [mas/yr]'))
# Error-convolved proper motions. Question here whether pm_ra from Galfast
# is mu_alpha or mu_alpha_star. Give Mario the benefit of the doubt and
# assume it is mu_alpha_star.
GALFAST_PMRA_IS_MU_ALPHA_STAR = True
if GALFAST_PMRA_IS_MU_ALPHA_STAR:
RA_FIX_FACTOR = 1.0
else:
RA_FIX_FACTOR = np.cos(dec*np.pi/180.0)
gaia_mu_alpha_star = np.random.normal(pm_ra*RA_FIX_FACTOR, mu_alpha_star_err)
gaia_mu_delta = np.random.normal(pm_dec,mu_delta_err)
gaia_table.add_column(Column(gaia_mu_alpha_star,name='pm_RA_star_gaia',unit=1.0,
description='Proper motion in RA convolved with Gaia error (mu_alpha_star; pygaia) [mas/yr]'))
gaia_table.add_column(Column(gaia_mu_delta,name='pm_DEC_gaia',unit=1.0,
description='Proper motion in DEC convolved with Gaia error (mu_delta; pygaia) [mas/yr]'))
# True parallax in **milli-arcsec**
true_parallax_arcsec = 1.0/d_parsecs
true_parallax_milliarcsec = true_parallax_arcsec*1e3
# Pygaia error on parallax is returned in micro-arcsec
# Convert to **milli-arcsec**
MICRO_TO_MILLI = 1.0/1e3
gaia_parallax_err_milliarcsec = gaia_astro.parallaxError(magG,VminI,ecl_beta)*MICRO_TO_MILLI
# Error convolved parallax
gaia_parallax_milliarcsec = np.random.normal(true_parallax_milliarcsec,gaia_parallax_err_milliarcsec)
gaia_table.add_column(Column(gaia_parallax_err_milliarcsec,name='parallax_gaia_error',unit=1.0,
description='Gaia error on parallax (pygaia) [mas]'))
gaia_table.add_column(Column(gaia_parallax_milliarcsec,name='parallax_gaia',unit=1.0,
description='Parallax convolved with Gaia error (pygaia) [mas]'))
# Error convolved RV
for spectype in ['G0V','F0V','K1III', 'K1IIIMP']:
gaia_rv_error_kms = gaia_spectro.vradErrorSkyAvg(magV,spectype)
gaia_rv_kms = np.random.normal(v_helio_kms,gaia_rv_error_kms)
gaia_table.add_column(Column(gaia_rv_error_kms,name='v_helio_gaia_error_%s'%(spectype),unit=1.0,
description='Gaia error on heliocentric radial velocity assuming star is type %s (pygaia) [km/s]'%(spectype)))
gaia_table.add_column(Column(gaia_rv_kms,name='v_helio_gaia_%s'%(spectype),unit=1.0,
description='Heliocentric radial velocity convolved with Gaia error assuming star is type %s (pygaia) [km/s]'%(spectype)))
return gaia_table
| {
"repo_name": "apcooper/bright_analysis",
"path": "py/bright_analysis/gaia/gaia.py",
"copies": "1",
"size": "8201",
"license": "bsd-3-clause",
"hash": -2896884896647984000,
"line_mean": 44.8156424581,
"line_max": 159,
"alpha_frac": 0.6246799171,
"autogenerated": false,
"ratio": 3.01064610866373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.413532602576373,
"avg_score": null,
"num_lines": null
} |
"""add_staging_tables
Revision ID: 50faca665f9c
Revises: 7833b2378161
Create Date: 2016-08-01 13:52:53.038526
"""
# revision identifiers, used by Alembic.
revision = '50faca665f9c'
down_revision = '7833b2378161'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('appropriation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('appropriation_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('adjustments_to_unobligated_cpe', sa.Numeric(), nullable=True),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('borrowing_authority_amount_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_appropria_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_available_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_unobligat_fyb', sa.Numeric(), nullable=True),
sa.Column('contract_authority_amount_cpe', sa.Numeric(), nullable=True),
sa.Column('deobligations_recoveries_r_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_tas_cpe', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('obligations_incurred_total_cpe', sa.Numeric(), nullable=True),
sa.Column('other_budgetary_resources_cpe', sa.Numeric(), nullable=True),
sa.Column('spending_authority_from_of_cpe', sa.Numeric(), nullable=True),
sa.Column('status_of_budgetary_resour_cpe', sa.Numeric(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('unobligated_balance_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('appropriation_id')
)
op.create_index(op.f('ix_appropriation_job_id'), 'appropriation', ['job_id'], unique=False)
op.create_index(op.f('ix_appropriation_submission_id'), 'appropriation', ['submission_id'], unique=False)
op.create_index(op.f('ix_appropriation_tas'), 'appropriation', ['tas'], unique=False)
op.create_table('award_financial',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_financial_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('by_direct_reimbursable_fun', sa.Text(), nullable=True),
sa.Column('deobligations_recov_by_awa_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('fain', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_awa_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlay_amount_by_awa_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_fyb', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('object_class', sa.Text(), nullable=True),
sa.Column('obligations_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('obligations_incurred_byawa_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('parent_award_id', sa.Text(), nullable=True),
sa.Column('piid', sa.Text(), nullable=True),
sa.Column('program_activity_code', sa.Text(), nullable=True),
sa.Column('program_activity_name', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('transaction_obligated_amou', sa.Numeric(), nullable=True),
sa.Column('uri', sa.Text(), nullable=True),
sa.Column('ussgl480100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480100_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl483100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl483200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl490200_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl493100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('award_financial_id')
)
op.create_index(op.f('ix_award_financial_fain'), 'award_financial', ['fain'], unique=False)
op.create_index(op.f('ix_award_financial_job_id'), 'award_financial', ['job_id'], unique=False)
op.create_index(op.f('ix_award_financial_piid'), 'award_financial', ['piid'], unique=False)
op.create_index(op.f('ix_award_financial_submission_id'), 'award_financial', ['submission_id'], unique=False)
op.create_index('ix_award_financial_tas_oc_pa', 'award_financial', ['tas', 'object_class', 'program_activity_code'], unique=False)
op.create_index(op.f('ix_award_financial_uri'), 'award_financial', ['uri'], unique=False)
op.create_table('award_financial_assistance',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_financial_assistance_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('action_date', sa.Text(), nullable=True),
sa.Column('action_type', sa.Text(), nullable=True),
sa.Column('assistance_type', sa.Text(), nullable=True),
sa.Column('award_description', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_legal', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('awarding_agency_code', sa.Text(), nullable=True),
sa.Column('awarding_agency_name', sa.Text(), nullable=True),
sa.Column('awarding_office_code', sa.Text(), nullable=True),
sa.Column('awarding_office_name', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_c', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True),
sa.Column('award_modification_amendme', sa.Text(), nullable=True),
sa.Column('business_funds_indicator', sa.Text(), nullable=True),
sa.Column('business_types', sa.Text(), nullable=True),
sa.Column('cfda_number', sa.Text(), nullable=True),
sa.Column('cfda_title', sa.Text(), nullable=True),
sa.Column('correction_late_delete_ind', sa.Text(), nullable=True),
sa.Column('face_value_loan_guarantee', sa.Numeric(), nullable=True),
sa.Column('fain', sa.Text(), nullable=True),
sa.Column('federal_action_obligation', sa.Numeric(), nullable=True),
sa.Column('fiscal_year_and_quarter_co', sa.Text(), nullable=True),
sa.Column('funding_agency_code', sa.Text(), nullable=True),
sa.Column('funding_agency_name', sa.Text(), nullable=True),
sa.Column('funding_office_name', sa.Text(), nullable=True),
sa.Column('funding_office_code', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_co', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line1', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line2', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line3', sa.Text(), nullable=True),
sa.Column('legal_entity_city_code', sa.Text(), nullable=True),
sa.Column('legal_entity_city_name', sa.Text(), nullable=True),
sa.Column('legal_entity_congressional', sa.Text(), nullable=True),
sa.Column('legal_entity_country_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_name', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_city', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_posta', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_provi', sa.Text(), nullable=True),
sa.Column('legal_entity_state_code', sa.Text(), nullable=True),
sa.Column('legal_entity_state_name', sa.Text(), nullable=True),
sa.Column('legal_entity_zip5', sa.Text(), nullable=True),
sa.Column('legal_entity_zip_last4', sa.Text(), nullable=True),
sa.Column('non_federal_funding_amount', sa.Numeric(), nullable=True),
sa.Column('original_loan_subsidy_cost', sa.Numeric(), nullable=True),
sa.Column('period_of_performance_curr', sa.Text(), nullable=True),
sa.Column('period_of_performance_star', sa.Text(), nullable=True),
sa.Column('place_of_performance_city', sa.Text(), nullable=True),
sa.Column('place_of_performance_code', sa.Text(), nullable=True),
sa.Column('place_of_performance_congr', sa.Text(), nullable=True),
sa.Column('place_of_perform_country_c', sa.Text(), nullable=True),
sa.Column('place_of_perform_county_na', sa.Text(), nullable=True),
sa.Column('place_of_performance_forei', sa.Text(), nullable=True),
sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True),
sa.Column('place_of_performance_zip4a', sa.Text(), nullable=True),
sa.Column('record_type', sa.Integer(), nullable=True),
sa.Column('sai_number', sa.Text(), nullable=True),
sa.Column('total_funding_amount', sa.Numeric(), nullable=True),
sa.Column('uri', sa.Text(), nullable=True),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('award_financial_assistance_id')
)
op.create_index(op.f('ix_award_financial_assistance_fain'), 'award_financial_assistance', ['fain'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_job_id'), 'award_financial_assistance', ['job_id'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_submission_id'), 'award_financial_assistance', ['submission_id'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_uri'), 'award_financial_assistance', ['uri'], unique=False)
op.create_table('cgac',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('cgac_id', sa.Integer(), nullable=False),
sa.Column('cgac_code', sa.Text(), nullable=False),
sa.Column('agency_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('cgac_id')
)
op.create_index(op.f('ix_cgac_cgac_code'), 'cgac', ['cgac_code'], unique=True)
op.create_table('object_class',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('object_class_id', sa.Integer(), nullable=False),
sa.Column('object_class_code', sa.Text(), nullable=False),
sa.Column('object_class_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('object_class_id')
)
op.create_index(op.f('ix_object_class_object_class_code'), 'object_class', ['object_class_code'], unique=True)
op.create_table('object_class_program_activity',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('object_class_program_activity_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('by_direct_reimbursable_fun', sa.Text(), nullable=True),
sa.Column('deobligations_recov_by_pro_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_pro_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlay_amount_by_pro_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_fyb', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('object_class', sa.Text(), nullable=True),
sa.Column('obligations_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('obligations_incurred_by_pr_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('program_activity_code', sa.Text(), nullable=True),
sa.Column('program_activity_name', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('ussgl480100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480100_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl483100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl483200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl490200_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl493100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('object_class_program_activity_id')
)
op.create_index(op.f('ix_object_class_program_activity_job_id'), 'object_class_program_activity', ['job_id'], unique=False)
op.create_index(op.f('ix_object_class_program_activity_submission_id'), 'object_class_program_activity', ['submission_id'], unique=False)
op.create_index('ix_oc_pa_tas_oc_pa', 'object_class_program_activity', ['tas', 'object_class', 'program_activity_code'], unique=False)
op.create_table('program_activity',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('program_activity_id', sa.Integer(), nullable=False),
sa.Column('budget_year', sa.Text(), nullable=False),
sa.Column('agency_id', sa.Text(), nullable=False),
sa.Column('allocation_transfer_id', sa.Text(), nullable=True),
sa.Column('account_number', sa.Text(), nullable=False),
sa.Column('program_activity_code', sa.Text(), nullable=False),
sa.Column('program_activity_name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('program_activity_id')
)
op.create_index('ix_pa_tas_pa', 'program_activity', ['budget_year', 'agency_id', 'allocation_transfer_id', 'account_number', 'program_activity_code', 'program_activity_name'], unique=True)
op.create_table('sf_133',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sf133_id', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=False),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=False),
sa.Column('sub_account_code', sa.Text(), nullable=False),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('fiscal_year', sa.Text(), nullable=True),
sa.Column('period', sa.Text(), nullable=True),
sa.Column('line', sa.Integer(), nullable=False),
sa.Column('amount', sa.Numeric(), server_default='0', nullable=False),
sa.PrimaryKeyConstraint('sf133_id')
)
op.create_index('ix_sf_133_tas', 'sf_133', ['tas', 'line'], unique=True)
op.create_table('tas_lookup',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('tas_id', sa.Integer(), nullable=False),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availability', sa.Text(), nullable=True),
sa.Column('ending_period_of_availability', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('tas_id')
)
op.create_index('ix_tas', 'tas_lookup', ['allocation_transfer_agency', 'agency_identifier', 'beginning_period_of_availability', 'ending_period_of_availability', 'availability_type_code', 'main_account_code', 'sub_account_code'], unique=True)
op.create_index(op.f('ix_tas_lookup_agency_identifier'), 'tas_lookup', ['agency_identifier'], unique=False)
op.create_index(op.f('ix_tas_lookup_allocation_transfer_agency'), 'tas_lookup', ['allocation_transfer_agency'], unique=False)
op.create_index(op.f('ix_tas_lookup_availability_type_code'), 'tas_lookup', ['availability_type_code'], unique=False)
op.create_index(op.f('ix_tas_lookup_beginning_period_of_availability'), 'tas_lookup', ['beginning_period_of_availability'], unique=False)
op.create_index(op.f('ix_tas_lookup_ending_period_of_availability'), 'tas_lookup', ['ending_period_of_availability'], unique=False)
op.create_index(op.f('ix_tas_lookup_main_account_code'), 'tas_lookup', ['main_account_code'], unique=False)
op.create_index(op.f('ix_tas_lookup_sub_account_code'), 'tas_lookup', ['sub_account_code'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_tas_lookup_sub_account_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_main_account_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_ending_period_of_availability'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_beginning_period_of_availability'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_availability_type_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_allocation_transfer_agency'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_agency_identifier'), table_name='tas_lookup')
op.drop_index('ix_tas', table_name='tas_lookup')
op.drop_table('tas_lookup')
op.drop_index('ix_sf_133_tas', table_name='sf_133')
op.drop_table('sf_133')
op.drop_index('ix_pa_tas_pa', table_name='program_activity')
op.drop_table('program_activity')
op.drop_index('ix_oc_pa_tas_oc_pa', table_name='object_class_program_activity')
op.drop_index(op.f('ix_object_class_program_activity_submission_id'), table_name='object_class_program_activity')
op.drop_index(op.f('ix_object_class_program_activity_job_id'), table_name='object_class_program_activity')
op.drop_table('object_class_program_activity')
op.drop_index(op.f('ix_object_class_object_class_code'), table_name='object_class')
op.drop_table('object_class')
op.drop_index(op.f('ix_cgac_cgac_code'), table_name='cgac')
op.drop_table('cgac')
op.drop_index(op.f('ix_award_financial_assistance_uri'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_submission_id'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_job_id'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_fain'), table_name='award_financial_assistance')
op.drop_table('award_financial_assistance')
op.drop_index(op.f('ix_award_financial_uri'), table_name='award_financial')
op.drop_index('ix_award_financial_tas_oc_pa', table_name='award_financial')
op.drop_index(op.f('ix_award_financial_submission_id'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_piid'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_job_id'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_fain'), table_name='award_financial')
op.drop_table('award_financial')
op.drop_index(op.f('ix_appropriation_tas'), table_name='appropriation')
op.drop_index(op.f('ix_appropriation_submission_id'), table_name='appropriation')
op.drop_index(op.f('ix_appropriation_job_id'), table_name='appropriation')
op.drop_table('appropriation')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/50faca665f9c_add_staging_tables.py",
"copies": "2",
"size": "24606",
"license": "cc0-1.0",
"hash": 6568207678755086000,
"line_mean": 64.4414893617,
"line_max": 245,
"alpha_frac": 0.6921482565,
"autogenerated": false,
"ratio": 3.1083880747852453,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48005363312852456,
"avg_score": null,
"num_lines": null
} |
"""Add standard theano-related flags to an argparse.ArgumentParser."""
import argparse
import sys
import theano
from .. import log, log_dict
class NLPArgumentParser(argparse.ArgumentParser):
"""An ArgumentParser with some built-in arguments.
Allows you to not have to retype help messages every time.
"""
def add_flag_helper(self, long_name, short_name, *args, **kwargs):
long_flag = '--%s' % long_name
if 'help' in kwargs:
if 'default' in kwargs:
# Append default value to help message
new_help = '%s (default=%s)' % (kwargs['help'], str(kwargs['default']))
kwargs['help'] = new_help
# Append period to end, if missing
if not kwargs['help'].endswith('.'):
kwargs['help'] = kwargs['help'] + '.'
if short_name:
short_flag = '-%s' % short_name
self.add_argument(long_flag, short_flag, *args, **kwargs)
else:
self.add_argument(long_flag, *args, **kwargs)
# Model hyperparameters
def add_hidden_size(self, short_name=None):
self.add_flag_helper('hidden-size', short_name, type=int,
help='Dimension of hidden vectors')
def add_emb_size(self, short_name=None):
self.add_flag_helper('emb-size', short_name, type=int,
help='Dimension of word vectors')
def add_weight_scale(self, short_name=None, default=1e-1):
self.add_flag_helper('weight-scale', short_name, type=float, default=default,
help='Weight scale for initialization')
def add_l2_reg(self, short_name=None, default=0.0):
self.add_flag_helper('l2-reg', short_name, type=float, default=default,
help='L2 Regularization constant.')
def add_unk_cutoff(self, short_name=None):
self.add_flag_helper('unk-cutoff', short_name, type=int, default=0,
help='Treat input words with <= this many occurrences as UNK')
# Training hyperparameters
def add_num_epochs(self, short_name=None):
self.add_flag_helper(
'num-epochs', short_name, default=[], type=lambda s: [int(x) for x in s.split(',')],
help=('Number of epochs to train. If comma-separated list, will run for some epochs, halve learning rate, etc.'))
def add_learning_rate(self, short_name=None, default=0.1):
self.add_flag_helper('learning-rate', short_name, type=float, default=default,
help='Initial learning rate.')
def add_clip_thresh(self, short_name=None):
self.add_flag_helper('clip-thresh', short_name, type=float, default=1.0,
help='Total-norm threshold to clip gradients.')
def add_batch_size(self, short_name=None):
self.add_flag_helper('batch-size', short_name, type=int, default=1,
help='Maximum batch size')
# Decoding hyperparameters
def add_beam_size(self, short_name=None):
self.add_flag_helper('beam-size', short_name, type=int, default=0,
help='Use beam search with given beam size, or greedy if 0')
# Data
def add_train_file(self, short_name=None):
self.add_flag_helper('train-file', short_name, help='Path to training data')
def add_dev_file(self, short_name=None):
self.add_flag_helper('dev-file', short_name, help='Path to dev data')
def add_test_file(self, short_name=None):
self.add_flag_helper('test-file', short_name, help='Path to test data')
def add_dev_frac(self, short_name=None):
self.add_flag_helper('dev-frac', short_name, type=float, default=0.0,
help='Take this fraction of train data as dev data')
# Random seeds
def add_dev_seed(self, short_name=None):
self.add_flag_helper('dev-seed', short_name, type=int, default=0,
help='RNG seed for the train/dev splits')
def add_model_seed(self, short_name=None):
self.add_flag_helper('model-seed', short_name, type=int, default=0,
help="RNG seed for the model")
# Sasving and loading
def add_save_file(self, short_name=None):
self.add_flag_helper('save-file', short_name, help='Path for saving model')
def add_load_file(self, short_name=None):
self.add_flag_helper('load-file', short_name, help='Path for loading model')
# Output
def add_stats_file(self, short_name=None):
self.add_flag_helper('stats-file', short_name, help='File to write stats JSON')
def add_html_file(self, short_name=None):
self.add_flag_helper('html-file', short_name, help='File to write output HTML')
def add_theano_flags(self):
self.add_flag_helper('theano-fast-compile', None, action='store_true', help='Run Theano in fast compile mode')
self.add_flag_helper('theano-profile', None, action='store_true', help='Turn on profiling in Theano')
def parse_args(self):
"""Configure theano and print help on empty arguments."""
if len(sys.argv) == 1:
self.print_help()
sys.exit(1)
args = super(NLPArgumentParser, self).parse_args()
log_dict(vars(args), 'Command-line Arguments')
configure_theano(args)
return args
def configure_theano(opts):
"""Configure theano given arguments passed in."""
if opts.theano_fast_compile:
theano.config.mode='FAST_COMPILE'
theano.config.optimizer = 'None'
theano.config.traceback.limit = 20
else:
theano.config.mode='FAST_RUN'
theano.config.linker='cvm'
if opts.theano_profile:
theano.config.profile = True
| {
"repo_name": "robinjia/nectar",
"path": "nectar/theanoutil/args.py",
"copies": "1",
"size": "5420",
"license": "mit",
"hash": -7707927492177636000,
"line_mean": 44.1666666667,
"line_max": 121,
"alpha_frac": 0.6507380074,
"autogenerated": false,
"ratio": 3.510362694300518,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46611007017005185,
"avg_score": null,
"num_lines": null
} |
"""add starred association table
Revision ID: 37b62c1b2866
Revises: aee4d743e57
Create Date: 2015-06-06 23:10:45.415730
"""
# revision identifiers, used by Alembic.
revision = '37b62c1b2866'
down_revision = 'aee4d743e57'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('contract_starred_association',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('contract_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['contract_id'], ['contract.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='SET NULL')
)
op.create_index(op.f('ix_contract_starred_association_contract_id'), 'contract_starred_association', ['contract_id'], unique=False)
op.create_index(op.f('ix_contract_starred_association_user_id'), 'contract_starred_association', ['user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_contract_starred_association_user_id'), table_name='contract_starred_association')
op.drop_index(op.f('ix_contract_starred_association_contract_id'), table_name='contract_starred_association')
op.drop_table('contract_starred_association')
### end Alembic commands ###
| {
"repo_name": "CityofPittsburgh/pittsburgh-purchasing-suite",
"path": "migrations/versions/37b62c1b2866_add_starred_association_table.py",
"copies": "3",
"size": "1372",
"license": "bsd-3-clause",
"hash": 6725971736624940000,
"line_mean": 38.2,
"line_max": 135,
"alpha_frac": 0.7091836735,
"autogenerated": false,
"ratio": 3.354523227383863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5563706900883864,
"avg_score": null,
"num_lines": null
} |
"""add starred
Revision ID: 4a802b741d2f
Revises: 312cd5a9f878
Create Date: 2015-02-12 18:10:19.187733
"""
# revision identifiers, used by Alembic.
revision = '4a802b741d2f'
down_revision = '312cd5a9f878'
import warnings
from alembic import op
import sqlalchemy as sa
from gertty.dbsupport import sqlite_alter_columns
def upgrade():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
op.add_column('change', sa.Column('starred', sa.Boolean()))
op.add_column('change', sa.Column('pending_starred', sa.Boolean()))
connection = op.get_bind()
change = sa.sql.table('change',
sa.sql.column('starred', sa.Boolean()),
sa.sql.column('pending_starred', sa.Boolean()))
connection.execute(change.update().values({'starred':False,
'pending_starred':False}))
sqlite_alter_columns('change', [
sa.Column('starred', sa.Boolean(), index=True, nullable=False),
sa.Column('pending_starred', sa.Boolean(), index=True, nullable=False),
])
def downgrade():
pass
| {
"repo_name": "aspiers/gertty",
"path": "gertty/alembic/versions/4a802b741d2f_add_starred.py",
"copies": "1",
"size": "1136",
"license": "apache-2.0",
"hash": -5813366072593175000,
"line_mean": 26.7073170732,
"line_max": 79,
"alpha_frac": 0.625,
"autogenerated": false,
"ratio": 3.561128526645768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4686128526645768,
"avg_score": null,
"num_lines": null
} |
"""Adds tasks to task queue"""
import pickle
from google.appengine.api import taskqueue
def add_feeds_update_task():
"""Enqueue task updating feeds"""
taskqueue.add(url='/task/update_feeds')
def add_feed_build_tasks(params_list):
"""Enqueue task for building feed for specific category"""
q = taskqueue.Queue()
tasks = [taskqueue.Task(url='/task/build_feed', payload=pack_payload(p)) for p in params_list]
_add_multi(q, tasks)
def add_torrent_tasks(params_list):
""""Enqueue task for torrent entry represented by dict"""
q = taskqueue.Queue()
tasks = [taskqueue.Task(url='/task/torrent', payload=pack_payload(p)) for p in params_list]
_add_multi(q, tasks)
def add_map_rebuild_task():
""""Enqueue task for rebuilding category map"""
taskqueue.add(url='/task/buildmap')
def pack_payload(value):
"""Pack value for use as task payload"""
return pickle.dumps(value)
def unpack_payload(payload):
"""Unpack task payload"""
return pickle.loads(payload)
def _add_multi(queue, tasks, *args, **kwargs):
"""Enqeue multiple tasks, splitting batch adds if needed"""
for chunk in chunks(tasks, taskqueue.MAX_TASKS_PER_ADD):
queue.add(chunk, *args, **kwargs)
def chunks(seq, n):
"""Yield successive n-sized chunks from seq."""
for i in range(0, len(seq), n):
yield seq[i:i + n]
| {
"repo_name": "notapresent/rutracker_rss",
"path": "taskmaster.py",
"copies": "1",
"size": "1374",
"license": "apache-2.0",
"hash": -8985403653443226000,
"line_mean": 27.0408163265,
"line_max": 98,
"alpha_frac": 0.6688500728,
"autogenerated": false,
"ratio": 3.4522613065326633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46211113793326636,
"avg_score": null,
"num_lines": null
} |
"""Add state, city, and county name to PublishedAwardFinancialAssistance table
Revision ID: 0bf2ed508f33
Revises: 2c2b9b1ff0e5
Create Date: 2017-07-21 13:05:06.714431
"""
# revision identifiers, used by Alembic.
revision = '0bf2ed508f33'
down_revision = '2c2b9b1ff0e5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_county_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('place_of_performance_city', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'place_of_performance_city')
op.drop_column('published_award_financial_assistance', 'place_of_perform_state_nam')
op.drop_column('published_award_financial_assistance', 'place_of_perform_county_na')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/0bf2ed508f33_add_state_city_and_county_name_to_.py",
"copies": "1",
"size": "1405",
"license": "cc0-1.0",
"hash": 6555400982647146000,
"line_mean": 30.9318181818,
"line_max": 124,
"alpha_frac": 0.7209964413,
"autogenerated": false,
"ratio": 3.178733031674208,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43997294729742076,
"avg_score": null,
"num_lines": null
} |
"""Add status field to project and remove is_ready field.
Revision ID: 32da2a33a6cc
Revises: 41dda8349f27
Create Date: 2013-10-08 22:41:31.545627
"""
# revision identifiers, used by Alembic.
revision = '32da2a33a6cc'
down_revision = '41dda8349f27'
from alembic import op
import sqlalchemy as sa
status_type = sa.Enum(u'locked', u'notready', u'ready', name=u'proj_status')
project = sa.sql.table('project', sa.Column(u'is_ready', sa.Boolean),
sa.Column(u'status', status_type))
def upgrade():
### commands auto generated by Alembic - please adjust! ###
status_type.create(op.get_bind(), checkfirst=False)
op.add_column('project', sa.Column('status', status_type,
server_default=u'notready',
nullable=False))
op.execute(project.update().where(project.c.is_ready)
.values(status=u'ready'))
op.drop_column('project', u'is_ready')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column(u'is_ready', sa.BOOLEAN(),
nullable=False, server_default=u'0'))
op.execute(project.update().where(project.c.status == 'ready')
.values(is_ready=True))
op.drop_column('project', 'status')
status_type.drop(op.get_bind(), checkfirst=False)
### end Alembic commands ###
| {
"repo_name": "ucsb-cs/submit",
"path": "submit/migrations/versions/32da2a33a6cc_add_status_field_to_.py",
"copies": "1",
"size": "1462",
"license": "bsd-2-clause",
"hash": 5122181228924012000,
"line_mean": 33.8095238095,
"line_max": 76,
"alpha_frac": 0.6155950752,
"autogenerated": false,
"ratio": 3.44811320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.456370828274717,
"avg_score": null,
"num_lines": null
} |
# Adds Text-based access to trees
from .cut import Cut
class TextBranch(str):
def __lt__(self, other):
return Cut('({0}<({1}))'.format(self, other))
def __gt__(self, other):
return Cut('({0}>({1}))'.format(self, other))
def __le__(self, other):
return Cut('({0}<=({1}))'.format(self, other))
def __ge__(self, other):
return Cut('({0}>=({1}))'.format(self, other))
def __eq__(self, other):
return Cut('({0}==({1}))'.format(self, other))
def __neq__(self, other):
return Cut('({0}!=({1}))'.format(self, other))
def __sub__(self, other):
return self.__class__('({0}-{1})'.format(self, other))
def __rsub__(self, other):
return self.__class__('({1}-{0})'.format(self, other))
def __add__(self, other):
return self.__class__('({0}+{1})'.format(self, other))
def __radd__(self, other):
return self.__class__('({1}+{0})'.format(self, other))
def __mul__(self, other):
return self.__class__('({0}*{1})'.format(self, other))
def __rmul__(self, other):
return self.__class__('({1}*{0})'.format(self, other))
def __div__(self, other):
return self.__class__('({0}/{1})'.format(self, other))
def __rdiv__(self, other):
return self.__class__('({1}/{0})'.format(self, other))
def __truediv__(self, other):
return self.__class__('({0}/{1})'.format(self, other))
def __rtruediv__(self, other):
return self.__class__('({1}/{0})'.format(self, other))
def __abs__(self):
return self.__class__('abs({0})'.format(self))
def __rshift__(self, tup):
if len(tup) == 3:
return '{0}>>({1[0]},{1[1]},{1[2]})'.format(self,tup)
elif len(tup) == 4:
return '{0}>>{1[0]}({1[1]},{1[2]},{1[3]})'.format(self,tup)
else:
raise RuntimeError("Must shift a len 3 or 4 tuple")
class TextTree(object):
def __init__(self, tree):
self._tree = tree
self._branch_names = [b.GetName() for b in tree.GetListOfBranches()]
for name in self._branch_names:
setattr(self, name, TextBranch(name))
def __iter__(self):
return iter(self._branch_names)
| {
"repo_name": "ndawe/rootpy",
"path": "rootpy/tree/texttree.py",
"copies": "2",
"size": "2218",
"license": "bsd-3-clause",
"hash": -1837007510015727600,
"line_mean": 38.6071428571,
"line_max": 76,
"alpha_frac": 0.5144274121,
"autogenerated": false,
"ratio": 3.285925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48003533380259267,
"avg_score": null,
"num_lines": null
} |
"""Adds the ability to Controllers to checkpoint upon receipt of a
signal, by default SIGTERM. There can be only one instance of Handler.
Your class must subclass from both Handler.Handler and
Controller.Controller, furthermore Handler must be the leftmost of the
superclasses. E.g.,
class MyController(Handler.Handler, Controller.Controller):
pass
"""
import sys
import signal
import weakref
class Handler(object):
# The commented out bits were from an attempt to relax the
# singleton requirement.
instance = None#weakref.WeakValueDictionary()
#i = 0
def __new__(cls, *args, **kwargs):
if Handler.instance is None or Handler.instance() is None:
self = object.__new__(cls, *args, **kwargs)
self._signalled = False
Handler.instance = weakref.ref(self)
else:
raise RuntimeError('This is a singleton class that inherits from Handler!')
#cls.instances[cls.i] = self
#cls.i += 1
return self
@classmethod
def handle(cls, signum, frame):
#for con in cls.instances.values():
if cls.instance is not None:
con = cls.instance()
if con is not None:
# easiest would be to call con.checkpoint() but we can't
# just stop part way through a timestep, need to let this
# one finish, THEN checkpoint (& die). Instead, set
# con._signalled to True and alter isCPStep to deal.
con._signalled = True
con.log('Received signal to checkpoint and die')
return
def isCheckpointStep(self):
if self._signalled:
return True
return super(Handler, self).isCheckpointStep()
def checkpoint(self):
signalled = self._signalled
# We really don't want to be saving the state with this flag set:
# as soon as it runs on resume it will checkpoint & die!
self._signalled = False
super(Handler, self).checkpoint()
if signalled:
self.log('Terminating due to signal... bye!')
sys.exit(0)
pass
return
pass
signal.signal(signal.SIGTERM, Handler.handle)
| {
"repo_name": "rupertnash/subgrid",
"path": "python/dqTools/Handler.py",
"copies": "2",
"size": "2220",
"license": "mit",
"hash": 968760433830084100,
"line_mean": 30.2676056338,
"line_max": 87,
"alpha_frac": 0.6189189189,
"autogenerated": false,
"ratio": 4.396039603960396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6014958522860396,
"avg_score": null,
"num_lines": null
} |
"""Adds the custom filter 'urlize_quoted_links'
This is identical to the built-in filter 'urlize' with the exception that
single and double quotes are permitted as leading or trailing punctuation.
"""
# Almost all of this code is copied verbatim from django.utils.html
# LEADING_PUNCTUATION and TRAILING_PUNCTUATION have been modified
import re
import string
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_unicode
from django.utils.http import urlquote
from django.utils.html import escape
from django import template
# Configuration for urlize() function.
LEADING_PUNCTUATION = ['(', '<', '<', '"', "'"]
TRAILING_PUNCTUATION = ['.', ',', ')', '>', '\n', '>', '"', "'"]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\xe2\x80\xa2', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
punctuation_re = re.compile('^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % \
('|'.join([re.escape(x) for x in LEADING_PUNCTUATION]),
'|'.join([re.escape(x) for x in TRAILING_PUNCTUATION])))
simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def urlize_quoted_links(text, trim_url_limit=None, nofollow=True, autoescape=True):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links and links ending in .org, .net or
.com. Links can have trailing punctuation (periods, commas, close-parens)
and leading punctuation (opening parens) and it'll still do the right
thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_unicode(text))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
match = punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
# Make URL we want to point to.
url = None
if middle.startswith('http://') or middle.startswith('https://'):
url = middle
elif middle.startswith('www.') or ('@' not in middle and \
middle and middle[0] in string.ascii_letters + string.digits and \
(middle.endswith('.org') or middle.endswith('.net') or middle.endswith('.com'))):
url = 'http://%s' % middle
elif '@' in middle and not ':' in middle and simple_email_re.match(middle):
url = 'mailto:%s' % middle
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return u''.join(words)
#urlize_quoted_links.needs_autoescape = True
urlize_quoted_links.is_safe = True
# Register urlize_quoted_links as a custom filter
# http://docs.djangoproject.com/en/dev/howto/custom-template-tags/
register = template.Library()
register.filter(urlize_quoted_links)
| {
"repo_name": "seanfisk/buzzword-bingo-server",
"path": "djangorestframework/templatetags/urlize_quoted_links.py",
"copies": "1",
"size": "4544",
"license": "bsd-3-clause",
"hash": -3262581655606986000,
"line_mean": 44.44,
"line_max": 143,
"alpha_frac": 0.5985915493,
"autogenerated": false,
"ratio": 3.391044776119403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476126644274079,
"avg_score": 0.002701936229064763,
"num_lines": 100
} |
"""Adds the meal table and the location_meal join table
Revision ID: 9628d0303d10
Revises: dc30e7bcdd08
Create Date: 2017-05-23 21:04:53.966486
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9628d0303d10'
down_revision = 'dc30e7bcdd08'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'meal',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('scheduled_for', sa.DateTime(), nullable=False),
sa.Column('location_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('price', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['location_id'], ['location.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'location_meal',
sa.Column('location_guid',
postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('meal_guid', postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(['location_guid'], ['location.id'], ),
sa.ForeignKeyConstraint(['meal_guid'], ['meal.id'], ),
sa.PrimaryKeyConstraint('location_guid', 'meal_guid')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('location_meal')
op.drop_table('meal')
# ### end Alembic commands ###
| {
"repo_name": "Rdbaker/Mealbound",
"path": "migrations/versions/9628d0303d10_.py",
"copies": "1",
"size": "1530",
"license": "bsd-3-clause",
"hash": 101359903756084400,
"line_mean": 32.2608695652,
"line_max": 79,
"alpha_frac": 0.6490196078,
"autogenerated": false,
"ratio": 3.5416666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4690686274466666,
"avg_score": null,
"num_lines": null
} |
#Adds the missing redshift column to our data
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
#The file for all of our data
fulldataout = '/Users/blorenz/COSMOS/COSMOSData/all_data_UVISTA.txt'
#Read in all of our data
ourdata = ascii.read(fulldataout).to_pandas()
#Search each row for the temp number, find the redshift associated with that number and append it to a list
z_cc = []
for i in range(0,len(ourdata)):
temp = ourdata.iloc[i].temp
if temp == 23:
z_cc.append(ourdata.iloc[i,1])
elif temp == 24:
z_cc.append(ourdata.iloc[i,7])
elif temp == 25:
z_cc.append(ourdata.iloc[i,13])
elif temp == 26:
z_cc.append(ourdata.iloc[i,19])
elif temp == 27:
z_cc.append(ourdata.iloc[i,25])
else:
z_cc.append(None)
#
OBJID = []
for i in range(0,len(ourdata)):
OBJID.append(ourdata.iloc[i].ImageName[4:10])
#Turn the list into a df and join it with the data
z_ccdf = pd.DataFrame({'z_cc':z_cc})
ourdata = ourdata.join(z_ccdf)
OBJIDdf = pd.DataFrame({'OBJID':OBJID}).astype(str)
ourdata = ourdata.join(OBJIDdf)
ourdata.to_csv(fulldataout.replace('data_UVISTA','data_UVISTA_z'),index=False)
| {
"repo_name": "brianlorenz/COSMOS_IMACS_Redshifts",
"path": "Data_Conversion/AddZ.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": -5951308408443340000,
"line_mean": 26.8444444444,
"line_max": 107,
"alpha_frac": 0.6727853152,
"autogenerated": false,
"ratio": 2.8284424379232505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40012277531232504,
"avg_score": null,
"num_lines": null
} |
# Adds the missing redshift uncertainties column to our data
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys
import os
import string
import pandas as pd
# The file for all of our data
fulldataout = '/Users/galaxies-air/COSMOS/COSMOSData/all_c_hasinger.txt'
# Read in all of our data
ourdata = ascii.read(fulldataout).to_pandas()
# Search each row for the temp number, find the redshift associated with that number and append it to a list
template_int = ourdata.temp.astype(int)
dzhis = []
dzlos = []
for i in range(0, len(ourdata)):
# Make sure the template is nonzero for the strange cases where it is
current_temp = template_int.iloc[i]
if current_temp > 0:
dzhis.append(ourdata[f'dzhi{current_temp}'].iloc[i])
dzlos.append(ourdata[f'dzlo{current_temp}'].iloc[i])
else:
dzhis.append(None)
dzlos.append(None)
#
OBJID = []
for i in range(0, len(ourdata)):
OBJID.append(ourdata.iloc[i].ImageName[4:10])
ourdata['dzhi'] = dzhis
ourdata['dzlo'] = dzlos
ourdata.to_csv(fulldataout.replace(
'all_c_hasinger', 'all_c_hasinger_dz'), index=False)
| {
"repo_name": "brianlorenz/COSMOS_IMACS_Redshifts",
"path": "Data_Conversion/Add_Zerrs.py",
"copies": "1",
"size": "1155",
"license": "mit",
"hash": 8822674581073050000,
"line_mean": 26.5,
"line_max": 108,
"alpha_frac": 0.7021645022,
"autogenerated": false,
"ratio": 2.9768041237113403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9177876445920077,
"avg_score": 0.00021843599825251202,
"num_lines": 42
} |
"""Adds the review table
Revision ID: 9f2495bd66de
Revises: 3e4b230c5582
Create Date: 2017-07-09 11:02:10.683542
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9f2495bd66de'
down_revision = '3e4b230c5582'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('review',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('rating', sa.Float(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('meal_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['meal_id'], ['meal.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('review')
# ### end Alembic commands ###
| {
"repo_name": "Rdbaker/Mealbound",
"path": "migrations/versions/9f2495bd66de_.py",
"copies": "1",
"size": "1178",
"license": "bsd-3-clause",
"hash": -6139113137148429000,
"line_mean": 30,
"line_max": 72,
"alpha_frac": 0.6791171477,
"autogenerated": false,
"ratio": 3.3657142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4544831433414286,
"avg_score": null,
"num_lines": null
} |
"""Adds the shared emote association table.
Revision ID: a7f3bef62fdb
Revises: 4af4c837801f
Create Date: 2016-08-27 23:42:16.954777
"""
# revision identifiers, used by Alembic.
revision = 'a7f3bef62fdb'
down_revision = '4af4c837801f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('shared_emotes_link',
sa.Column('guild_id', sa.BigInteger(), nullable=False),
sa.Column('emote_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['emote_id'], ['emote.id'], ),
sa.ForeignKeyConstraint(['guild_id'], ['guild.id'], )
)
op.create_index(op.f('ix_shared_emotes_link_emote_id'), 'shared_emotes_link', ['emote_id'], unique=False)
op.create_index(op.f('ix_shared_emotes_link_guild_id'), 'shared_emotes_link', ['guild_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_shared_emotes_link_guild_id'), table_name='shared_emotes_link')
op.drop_index(op.f('ix_shared_emotes_link_emote_id'), table_name='shared_emotes_link')
op.drop_table('shared_emotes_link')
### end Alembic commands ###
| {
"repo_name": "abalabahaha/emotes-website",
"path": "migrations/versions/a7f3bef62fdb_.py",
"copies": "2",
"size": "1239",
"license": "mit",
"hash": 8829643811870673000,
"line_mean": 34.4,
"line_max": 109,
"alpha_frac": 0.6820016142,
"autogenerated": false,
"ratio": 2.95,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9591207058989909,
"avg_score": 0.00815891104201828,
"num_lines": 35
} |
""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """
import lldb
class DisassemblyMode:
def __init__(self, debugger, unused):
self.dbg = debugger
self.interp = debugger.GetCommandInterpreter()
self.store_state()
self.mode_off = True
def store_state(self):
self.dis_count = self.get_string_value("stop-disassembly-count")
self.dis_display = self.get_string_value("stop-disassembly-display")
self.before_count = self.get_string_value("stop-line-count-before")
self.after_count = self.get_string_value("stop-line-count-after")
def get_string_value(self, setting):
result = lldb.SBCommandReturnObject()
self.interp.HandleCommand("settings show " + setting, result)
value = result.GetOutput().split(" = ")[1].rstrip("\n")
return value
def set_value(self, setting, value):
result = lldb.SBCommandReturnObject()
self.interp.HandleCommand("settings set " + setting + " " + value, result)
def __call__(self, debugger, command, exe_ctx, result):
if self.mode_off:
self.mode_off = False
self.store_state()
self.set_value("stop-disassembly-display","always")
self.set_value("stop-disassembly-count", "8")
self.set_value("stop-line-count-before", "0")
self.set_value("stop-line-count-after", "0")
result.AppendMessage("Disassembly mode on.")
else:
self.mode_off = True
self.set_value("stop-disassembly-display",self.dis_display)
self.set_value("stop-disassembly-count", self.dis_count)
self.set_value("stop-line-count-before", self.before_count)
self.set_value("stop-line-count-after", self.after_count)
result.AppendMessage("Disassembly mode off.")
def get_short_help(self):
return "Toggles between a disassembly only mode and normal source mode\n"
def __lldb_init_module(debugger, unused):
debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
| {
"repo_name": "llvm-mirror/lldb",
"path": "examples/python/disassembly_mode.py",
"copies": "15",
"size": "2170",
"license": "apache-2.0",
"hash": 5691543506909668000,
"line_mean": 44.2083333333,
"line_max": 103,
"alpha_frac": 0.6331797235,
"autogenerated": false,
"ratio": 3.807017543859649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.014067284610316241,
"num_lines": 48
} |
"""Adds the users <-> location tables FK
Revision ID: ab25240c9bf9
Revises: f9a432f6a0b2
Create Date: 2017-05-18 21:21:59.969585
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'ab25240c9bf9'
down_revision = 'f9a432f6a0b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('location', sa.Column('host_id', sa.Integer()))
op.create_foreign_key(None, 'location', 'users', ['host_id'], ['id'])
op.add_column('users', sa.Column('location_id', postgresql.UUID(as_uuid=True)))
op.create_foreign_key(None, 'users', 'location', ['location_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_column('users', 'location_id')
op.drop_constraint(None, 'location', type_='foreignkey')
op.drop_column('location', 'host_id')
# ### end Alembic commands ###
| {
"repo_name": "Rdbaker/Mealbound",
"path": "migrations/versions/ab25240c9bf9_.py",
"copies": "1",
"size": "1107",
"license": "bsd-3-clause",
"hash": 8028957808770612000,
"line_mean": 31.5588235294,
"line_max": 83,
"alpha_frac": 0.674796748,
"autogenerated": false,
"ratio": 3.2558823529411764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9428928400661064,
"avg_score": 0.00035014005602240897,
"num_lines": 34
} |
"""Adds this path to the PYTHONPATH so normal import usage can occur for external packages."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
# import platform
import sys
# import warnings
THIS_FILE = __file__
"""This file, ala ``/path/to/pkg/libs_external/__init__.py``"""
THIS_PATH = os.path.abspath(os.path.dirname(THIS_FILE))
"""The path from this file, ala ``/path/to/pkg/libs_external``"""
# NON PLATFORM SPECIFIC EXTERNAL LIBRARIES
ANY_DIR = "any"
"""Non-platform specific directory for this system."""
ANY_PATH = os.path.join(THIS_PATH, ANY_DIR)
"""The non-platform specific library path, ala ``/path/to/pkg/libs_external/any``"""
'''# PLATFORM SPECIFIC EXTERNAL LIBRARIES
PLATFORM_MAP = {
"darwin": "osx",
"windows": "win",
"linux": "linux",
}
"""Mapping of platform.system().lower() to platform specific library directories."""
THIS_PLATFORM = platform.system().lower()
"""Platform for this system."""
PLATFORM_DIR = PLATFORM_MAP.get(THIS_PLATFORM, "")
"""Platform specific directory for this system."""
PLATFORM_PATH = os.path.join(THIS_PATH, PLATFORM_DIR)
"""The platform specific library path, ala ``/path/to/pkg/libs_external/osx``"""
if not PLATFORM_DIR:
w = "No platform specific binary packages provided in this tool for platform {}"
w = w.format(THIS_PLATFORM)
warnings.warn(w)
'''
PATHS = [
ANY_PATH,
# PLATFORM_PATH,
]
def add_path(p, first=True):
"""Add a path to beginning or end of sys.path."""
if p not in sys.path:
if first:
sys.path.insert(0, p)
else:
sys.path.append(p)
def add_paths():
"""Add all paths in PATHS to sys.path."""
for p in PATHS:
add_path(p, True)
add_paths()
| {
"repo_name": "tanium/pytan",
"path": "lib/libs_external/__init__.py",
"copies": "1",
"size": "1896",
"license": "mit",
"hash": -5244252518086211000,
"line_mean": 25.4782608696,
"line_max": 94,
"alpha_frac": 0.6424050633,
"autogenerated": false,
"ratio": 3.3857142857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45281193490142857,
"avg_score": null,
"num_lines": null
} |
"""Adds thresholds and events for tracking email send rates.
Revision ID: 53d03041be56
Revises: 19df16914a19
Create Date: 2014-03-17 22:09:03.802725
"""
# revision identifiers, used by Alembic.
revision = '53d03041be56'
down_revision = '19df16914a19'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('threshold',
sa.Column('user', sa.String(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('temp_thresh', sa.Integer(), nullable=True),
sa.Column('offline_thresh', sa.Integer(), nullable=True),
sa.Column('hashrate_thresh', sa.Integer(), nullable=True),
sa.Column('hashrate_err', sa.Boolean(), nullable=True),
sa.Column('temp_err', sa.Boolean(), nullable=True),
sa.Column('offline_err', sa.Boolean(), nullable=True),
sa.Column('green_notif', sa.Boolean(), nullable=True),
sa.Column('emails', postgresql.ARRAY(sa.String()), nullable=True),
sa.PrimaryKeyConstraint('user', 'worker')
)
op.create_table('event',
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('user', sa.String(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('address', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('time', 'user', 'worker', 'address')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('event')
op.drop_table('threshold')
### end Alembic commands ###
| {
"repo_name": "lae/simplemona",
"path": "migrations/versions/53d03041be56_.py",
"copies": "2",
"size": "1915",
"license": "mit",
"hash": 8757987246825652000,
"line_mean": 40.6304347826,
"line_max": 86,
"alpha_frac": 0.5738903394,
"autogenerated": false,
"ratio": 4.190371991247265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5764262330647265,
"avg_score": null,
"num_lines": null
} |
"""Adds TimeExercisesHistory and TimeExercisesTaxonomy Tables
Revision ID: 7791f2c862d2
Revises: 56630a78dca0
Create Date: 2017-07-30 20:40:17.174425
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7791f2c862d2'
down_revision = '56630a78dca0'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'time_exercises_taxonomy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table(
'time_exercises_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('exercise_id', sa.Integer(), nullable=True),
sa.Column('distance', sa.Float(), nullable=True),
sa.Column('duration', sa.Float(), nullable=True),
sa.Column('exercise_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['exercise_id'], ['time_exercises_history.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('time_exercises_history')
op.drop_table('time_exercises_taxonomy')
| {
"repo_name": "pbraunstein/trackercise",
"path": "migrations/versions/7791f2c862d2_.py",
"copies": "1",
"size": "1318",
"license": "mit",
"hash": -5577462867663133000,
"line_mean": 30.380952381,
"line_max": 82,
"alpha_frac": 0.655538695,
"autogenerated": false,
"ratio": 3.3451776649746194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4500716359974619,
"avg_score": null,
"num_lines": null
} |
"""Add stock related tables
Revision ID: 580385482580
Revises: 9c8ce0562634
Create Date: 2017-02-18 02:11:26.185087
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '580385482580'
down_revision = '9c8ce0562634'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('stock',
sa.Column('guild_id', sa.BigInteger(), nullable=True),
sa.Column('channel_id', sa.BigInteger(), nullable=False),
sa.Column('price', sa.Float(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['guild_id'], ['guild.id'], ),
sa.PrimaryKeyConstraint('channel_id'),
sa.UniqueConstraint('channel_id')
)
op.create_table('user__stock',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.Column('stock_id', sa.BigInteger(), nullable=True),
sa.Column('amount', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['stock_id'], ['stock.channel_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user__stock')
op.drop_table('stock')
# ### end Alembic commands ###
| {
"repo_name": "MJB47/Jokusoramame",
"path": "migrations/versions/580385482580_add_stock_related_tables.py",
"copies": "1",
"size": "1431",
"license": "mit",
"hash": -8987087467605449000,
"line_mean": 30.1086956522,
"line_max": 66,
"alpha_frac": 0.6596785465,
"autogenerated": false,
"ratio": 3.4481927710843374,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607871317584337,
"avg_score": null,
"num_lines": null
} |
"""Adds total income to all public figures."""
import six
# String to prefix log messages with:
LOG_PREFIX = '[post_process_income] '
INCOME = 'income'
COMPENSATIONS = 'compensations'
OTHER_INCOME = 'other_income'
ALL_INCOMES = (INCOME, COMPENSATIONS, OTHER_INCOME)
SK_TO_EUR = 30.126
CURRENCY_SK = "Sk"
def parse_money_part(money_part):
tokens = money_part.split(" ")
if len(tokens) == 1: # currency is missing -> not a money value
return None
currency = tokens[-1]
# fix spaces between numerals and possible newline characters
value = int("".join(tokens[:-1]).replace("\xa0", ""))
return value, currency
def parse_income_col(val, col):
if not isinstance(val, six.string_types):
return None
parts = val.split(",")
if col == OTHER_INCOME: # the other parts do not contain incomes in this case
parts = parts[:1]
results = []
for part in parts:
money_part = part.split("(")[0].strip() # discard the part in parenthesis
result = parse_money_part(money_part)
if result is not None:
results.append(result)
return results
def parse_income_row(row):
total_income = 0
currencies = []
for i, col in enumerate(ALL_INCOMES):
parsed_incomes = parse_income_col(row[i], col)
if parsed_incomes is None:
continue
for result in parsed_incomes:
total_income += result[0]
currencies.append(result[1])
assert len(set(currencies)) <= 1, "Too many currencies appearing in the row."
currency = currencies[0] if currencies else None
if currency == CURRENCY_SK:
total_income = int(total_income / SK_TO_EUR)
return total_income
def add_incomes(db):
"""Parse and add incomes to assets."""
query = """
SELECT id, {}, {}, {}
FROM assetdeclarations
""".format(*ALL_INCOMES)
incomes = []
with db.get_server_side_cursor(query) as cur:
for row in cur:
eid = row[0]
income = parse_income_row(row[1:])
incomes.append((eid, income))
print('%sAccumulated %d incomes' % (LOG_PREFIX, len(incomes)))
query = "DROP TABLE IF EXISTS incomes"
db.execute(query)
query = """
CREATE TABLE incomes(
id serial PRIMARY KEY,
asset_declaration_id int REFERENCES assetdeclarations(Id) NOT NULL,
income int NOT NULL
);"""
db.execute(query)
with db.cursor() as cur:
q = """
INSERT INTO incomes(asset_declaration_id, income)
VALUES (%s, %s);
"""
cur.executemany(q, incomes)
| {
"repo_name": "verejnedigital/verejne.digital",
"path": "data/prod_generation/post_process_income.py",
"copies": "1",
"size": "2624",
"license": "apache-2.0",
"hash": -7419266183270995000,
"line_mean": 27.5217391304,
"line_max": 82,
"alpha_frac": 0.6082317073,
"autogenerated": false,
"ratio": 3.5221476510067116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46303793583067115,
"avg_score": null,
"num_lines": null
} |
from PIL import Image
import numpy as np
import csv
filename = raw_input("Enter filename with extension.\n")
temp = Image.open(filename)
maxsize = (640, 640)
temp = temp.resize(maxsize, Image.ANTIALIAS)#shrink image
temp = np.asarray(temp)#array to extract features
foodName = raw_input("Enter the name of the food.\n")#pairs with name
foodName = [foodName]
print "Array is named 'temp'"
average = [0,0,0]
numOfValues = 0
for i in temp:
for j in i:#for each set of RGB values
if ((j[0] + j[1] + j[2]) > 50) or ((j[0] + j[1] + j[2]) < 700):
#ignore transparent pixels and white pixels
average[0] += j[0]
average[1] += j[1]
average[2] += j[2]
numOfValues += 1
for i in xrange(0,3):
average[i] = average[i] / numOfValues
total = float((average[0] + average[1] + average[2]))
for i in xrange(0,3):
average[i] = average[i] / total#make the averages a ratio
print "RGB average is: " + str(average)
with open("RGBValue.csv", "a") as fp:
wr = csv.writer(fp, dialect='excel')
wr.writerow(average)
with open('foodNames.csv' , 'a') as fn:
wr = csv.writer(fn, dialect='excel')
wr.writerow(foodName)
####################################
########## 3D Graphing #############
####################################
showGraph = raw_input('Show graph? [y/n]\n')
status = True
while(status):
showGraph.lower()#case insensitive
if showGraph == 'y':
showGraph = True
status = False
elif showGraph == 'n':
showGraph = False
status = False
else:
showGraph = raw_input('Invalid input, show graph? [y/n]\n')
while(showGraph):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
with open('RGBValue.csv','rb') as f:
reader = csv.reader(f)
RGBValue = list(reader)
with open('foodNames.csv','rb') as f:
reader = csv.reader(f)
foodNameTemp = list(reader)
foodName = []
for i in foodNameTemp:
foodName += i
pairList = zip(foodName,RGBValue)#pair foods with their values
colorMap = {}#assign each food its own color
for i in pairList:
if (i[0] not in colorMap):
colorMap[i[0]] = [[np.random.rand()],[np.random.rand()],[np.random.rand()]]#set a random color to each food
for i in pairList:
x = float(i[1][0])
y = float(i[1][1])
z = float(i[1][2])
ax.scatter(x, y, z, c = colorMap[i[0]], label = i[0])#plot on 3d graph
ax.set_xlabel('Red')#set axis labels
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
#position legend
plt.legend(loc='upper left', numpoints=1, ncol=3, fontsize=8, bbox_to_anchor=(0, 0))
plt.show()#display graph
showGraph = False
| {
"repo_name": "SNAPPETITE/backend",
"path": "python programs/sampleGroup.py",
"copies": "1",
"size": "2776",
"license": "mit",
"hash": -853068046010255100,
"line_mean": 22.5663716814,
"line_max": 110,
"alpha_frac": 0.6123919308,
"autogenerated": false,
"ratio": 2.815415821501014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8759137735021639,
"avg_score": 0.03373400345587485,
"num_lines": 113
} |
"""Adds tracking for temperature, rejected shares, and hashrates each in their own set of timeslice tables.
Revision ID: 19df16914a19
Revises: 58441c58e37e
Create Date: 2014-03-15 12:50:30.998479
"""
# revision identifiers, used by Alembic.
revision = '19df16914a19'
down_revision = '58441c58e37e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('one_hour_hashrate',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
op.create_table('five_minute_reject',
sa.Column('user', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'time', 'worker')
)
op.create_table('five_minute_hashrate',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
op.create_table('one_minute_reject',
sa.Column('user', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'time', 'worker')
)
op.create_table('one_hour_reject',
sa.Column('user', sa.String(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'time', 'worker')
)
op.create_table('one_minute_hashrate',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
op.create_table('one_hour_temperature',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
op.create_table('five_minute_temperature',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
op.create_table('one_minute_temperature',
sa.Column('user', sa.String(), nullable=False),
sa.Column('device', sa.Integer(), nullable=False),
sa.Column('time', sa.DateTime(), nullable=False),
sa.Column('worker', sa.String(), nullable=False),
sa.Column('value', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('user', 'device', 'time', 'worker')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('one_minute_temperature')
op.drop_table('five_minute_temperature')
op.drop_table('one_hour_temperature')
op.drop_table('one_minute_hashrate')
op.drop_table('one_hour_reject')
op.drop_table('one_minute_reject')
op.drop_table('five_minute_hashrate')
op.drop_table('five_minute_reject')
op.drop_table('one_hour_hashrate')
### end Alembic commands ###
| {
"repo_name": "simplecrypto/simplecoin",
"path": "migrations/versions/19df16914a19_.py",
"copies": "2",
"size": "4182",
"license": "mit",
"hash": 7215091619742694000,
"line_mean": 40,
"line_max": 107,
"alpha_frac": 0.656384505,
"autogenerated": false,
"ratio": 3.4908180300500833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5147202535050084,
"avg_score": null,
"num_lines": null
} |
"""Adds Traffic Monitor Host for Direct Network Usage"""
from baseCmd import *
from baseResponse import *
class addTrafficMonitorCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""URL of the traffic monitor Host"""
"""Required"""
self.url = None
self.typeInfo['url'] = 'string'
"""Zone in which to add the external firewall appliance."""
"""Required"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
"""Traffic going into the listed zones will not be metered"""
self.excludezones = None
self.typeInfo['excludezones'] = 'string'
"""Traffic going into the listed zones will be metered"""
self.includezones = None
self.typeInfo['includezones'] = 'string'
self.required = ["url", "zoneid", ]
class addTrafficMonitorResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the external firewall"""
self.id = None
self.typeInfo['id'] = 'string'
"""the management IP address of the external firewall"""
self.ipaddress = None
self.typeInfo['ipaddress'] = 'string'
"""the number of times to retry requests to the external firewall"""
self.numretries = None
self.typeInfo['numretries'] = 'string'
"""the timeout (in seconds) for requests to the external firewall"""
self.timeout = None
self.typeInfo['timeout'] = 'string'
"""the zone ID of the external firewall"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addTrafficMonitor.py",
"copies": "1",
"size": "1624",
"license": "apache-2.0",
"hash": -6121904261098334000,
"line_mean": 33.5531914894,
"line_max": 76,
"alpha_frac": 0.5997536946,
"autogenerated": false,
"ratio": 4.284960422163588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5384714116763588,
"avg_score": null,
"num_lines": null
} |
"""Adds traffic type to a physical network"""
from baseCmd import *
from baseResponse import *
class addTrafficTypeCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the Physical Network ID"""
"""Required"""
self.physicalnetworkid = None
self.typeInfo['physicalnetworkid'] = 'uuid'
"""the trafficType to be added to the physical network"""
"""Required"""
self.traffictype = None
self.typeInfo['traffictype'] = 'string'
"""Used if physical network has multiple isolation types and traffic type is public. Choose which isolation method. Valid options currently 'vlan' or 'vxlan', defaults to 'vlan'."""
self.isolationmethod = None
self.typeInfo['isolationmethod'] = 'string'
"""The network name label of the physical device dedicated to this traffic on a KVM host"""
self.kvmnetworklabel = None
self.typeInfo['kvmnetworklabel'] = 'string'
"""The VLAN id to be used for Management traffic by the host"""
self.vlan = None
self.typeInfo['vlan'] = 'string'
"""The network name label of the physical device dedicated to this traffic on a XenServer host"""
self.xennetworklabel = None
self.typeInfo['xennetworklabel'] = 'string'
self.required = ["physicalnetworkid", "traffictype", ]
class addTrafficTypeResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""id of the network provider"""
self.id = None
self.typeInfo['id'] = 'string'
"""The network name label of the physical device dedicated to this traffic on a KVM host"""
self.kvmnetworklabel = None
self.typeInfo['kvmnetworklabel'] = 'string'
"""the physical network this belongs to"""
self.physicalnetworkid = None
self.typeInfo['physicalnetworkid'] = 'string'
"""the trafficType to be added to the physical network"""
self.traffictype = None
self.typeInfo['traffictype'] = 'string'
"""The network name label of the physical device dedicated to this traffic on a XenServer host"""
self.xennetworklabel = None
self.typeInfo['xennetworklabel'] = 'string'
| {
"repo_name": "MissionCriticalCloud/marvin",
"path": "marvin/cloudstackAPI/addTrafficType.py",
"copies": "1",
"size": "2251",
"license": "apache-2.0",
"hash": -5831602195409945000,
"line_mean": 41.4716981132,
"line_max": 189,
"alpha_frac": 0.6414926699,
"autogenerated": false,
"ratio": 4.362403100775194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5503895770675193,
"avg_score": null,
"num_lines": null
} |
"""Add string-indexer (scikit_learn).
Revision ID: dbb12fc54827
Revises: fb9ab7489253
Create Date: 2019-12-13 11:15:51.313657
"""
from alembic import op
import sqlalchemy as sa
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, text
import json
# revision identifiers, used by Alembic.
revision = 'dbb12fc54827'
down_revision = 'fb9ab7489253'
branch_labels = None
depends_on = None
def _insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer), )
columns = ('operation_id', 'platform_id')
data = [
(40, 4)
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = ('operation_id', 'operation_form_id')
data = [
#Flatten - data_format
(40, 41), #appearance
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation_platform,
'DELETE FROM operation_platform WHERE operation_id = 40 AND platform_id = 4'),
(_insert_operation_operation_form,
'DELETE FROM operation_operation_form WHERE operation_id = 40 AND operation_form_id = 41'),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit() | {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/dbb12fc54827_add_string_indexer_scikit_learn.py",
"copies": "1",
"size": "2753",
"license": "apache-2.0",
"hash": -5674104987326354000,
"line_mean": 25.2285714286,
"line_max": 96,
"alpha_frac": 0.6087904105,
"autogenerated": false,
"ratio": 3.7920110192837466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9884715456540664,
"avg_score": 0.003217194648616569,
"num_lines": 105
} |
"""Add student_id_confirmed to user.
Revision ID: 18485381c9be
Revises: 776b0d9121b7
Create Date: 2018-06-13 13:45:44.390612
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# revision identifiers, used by Alembic.
revision = '18485381c9be'
down_revision = '776b0d9121b7'
Base = declarative_base()
db = sa
db.Model = Base
db.relationship = relationship
def create_session():
connection = op.get_bind()
session_maker = sa.orm.sessionmaker()
session = session_maker(bind=connection)
db.session = session
def upgrade():
create_session()
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('student_id_confirmed', sa.Boolean(), nullable=False, default=False, server_default='0'))
# ### end Alembic commands ###
def downgrade():
create_session()
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'student_id_confirmed')
# ### end Alembic commands ###
# vim: ft=python
| {
"repo_name": "viaict/viaduct",
"path": "migrations/versions/18485381c9be_add_student_id_confirmed_to_user.py",
"copies": "1",
"size": "1153",
"license": "mit",
"hash": 9084644082484560000,
"line_mean": 23.0208333333,
"line_max": 125,
"alpha_frac": 0.7085862966,
"autogenerated": false,
"ratio": 3.493939393939394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4702525690539394,
"avg_score": null,
"num_lines": null
} |
"""addSubMetaData
Revision ID: b8cf7fa342bb
Revises: 608afa719fb8
Create Date: 2016-04-04 09:38:39.555000
"""
# revision identifiers, used by Alembic.
revision = 'b8cf7fa342bb'
down_revision = '608afa719fb8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def upgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('agency_name', sa.Text(), nullable=True))
op.add_column('submission', sa.Column('reporting_end_date', sa.Date(), nullable=True))
op.add_column('submission', sa.Column('reporting_start_date', sa.Date(), nullable=True))
### end Alembic commands ###
def downgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'reporting_start_date')
op.drop_column('submission', 'reporting_end_date')
op.drop_column('submission', 'agency_name')
### end Alembic commands ###
def upgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ### | {
"repo_name": "fedspendingtransparency/data-act-core",
"path": "dataactcore/migrations/versions/b8cf7fa342bb_addsubmetadata.py",
"copies": "1",
"size": "1673",
"license": "cc0-1.0",
"hash": -2968031789247638500,
"line_mean": 24.3636363636,
"line_max": 92,
"alpha_frac": 0.6694560669,
"autogenerated": false,
"ratio": 3.6369565217391306,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9768478318616932,
"avg_score": 0.007586854004439749,
"num_lines": 66
} |
"""add submission_id to PublishedAFA
Revision ID: 03257ae6000f
Revises: 4d66a8d6e11b
Create Date: 2017-10-26 09:57:31.577694
"""
# revision identifiers, used by Alembic.
revision = '03257ae6000f'
down_revision = '4d66a8d6e11b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('published_award_financial_assistance', sa.Column('submission_id', sa.Numeric(), nullable=True))
op.create_index(op.f('ix_published_award_financial_assistance_submission_id'), 'published_award_financial_assistance', ['submission_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_published_award_financial_assistance_submission_id'), table_name='published_award_financial_assistance')
op.drop_column('published_award_financial_assistance', 'submission_id')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/03257ae6000f_add_submission_id_to_publishedafa.py",
"copies": "1",
"size": "1202",
"license": "cc0-1.0",
"hash": 4799372395559078000,
"line_mean": 27.619047619,
"line_max": 155,
"alpha_frac": 0.7188019967,
"autogenerated": false,
"ratio": 3.2574525745257454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.93290260365425,
"avg_score": 0.02944570693664897,
"num_lines": 42
} |
"""add submissions table
Revision ID: 3c6f2c76ca55
Revises:
Create Date: 2016-01-27 11:06:58.772187
"""
# revision identifiers, used by Alembic.
revision = '3c6f2c76ca55'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('submissions',
sa.Column('id', sa.String(), nullable=False),
sa.Column('created_utc', sa.Float(), nullable=False),
sa.Column('fullname', sa.String(), nullable=False),
sa.Column('subreddit', sa.String(), nullable=False),
sa.Column('subreddit_id', sa.String(), nullable=False),
sa.Column('permalink', sa.String(), nullable=False),
sa.Column('author', sa.String(), nullable=False),
sa.Column('title', sa.Unicode(), nullable=False),
sa.Column('selftext', sa.UnicodeText(), nullable=True),
sa.Column('ups', sa.Integer(), nullable=False),
sa.Column('downs', sa.Integer(), nullable=False),
sa.Column('score', sa.Integer(), nullable=False),
sa.Column('num_comments', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('submissions')
### end Alembic commands ###
| {
"repo_name": "PsyBorgs/redditanalyser",
"path": "migrate/versions/3c6f2c76ca55_add_submissions_table.py",
"copies": "1",
"size": "1348",
"license": "mit",
"hash": -4081037246019232000,
"line_mean": 30.3488372093,
"line_max": 63,
"alpha_frac": 0.6698813056,
"autogenerated": false,
"ratio": 3.483204134366925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9590337694202387,
"avg_score": 0.012549549152907803,
"num_lines": 43
} |
"""Add submission_tags for fast searching
Revision ID: 9afc9a45510c
Revises: abac1922735d
Create Date: 2016-06-13 14:40:01.782784
"""
# revision identifiers, used by Alembic.
revision = '9afc9a45510c'
down_revision = 'abac1922735d'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('submission_tags',
sa.Column('submitid', sa.Integer(), nullable=False),
sa.Column('tags', postgresql.ARRAY(sa.Integer()), nullable=False),
sa.ForeignKeyConstraint(['submitid'], ['submission.submitid'], name='submission_tags_submitid_fkey', onupdate='CASCADE', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('submitid')
)
op.execute(
'INSERT INTO submission_tags (submitid, tags) '
'SELECT targetid, array_agg(tagid) FROM searchmapsubmit GROUP BY targetid')
op.create_index('ind_submission_tags_tags', 'submission_tags', ['tags'], unique=False, postgresql_using='gin')
def downgrade():
op.drop_index('ind_submission_tags_tags', table_name='submission_tags')
op.drop_table('submission_tags')
| {
"repo_name": "Weasyl/weasyl",
"path": "libweasyl/libweasyl/alembic/versions/9afc9a45510c_add_submission_tags_for_fast_searching.py",
"copies": "1",
"size": "1157",
"license": "apache-2.0",
"hash": 1180157578331437600,
"line_mean": 35.15625,
"line_max": 145,
"alpha_frac": 0.7208297321,
"autogenerated": false,
"ratio": 3.2869318181818183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45077615502818186,
"avg_score": null,
"num_lines": null
} |
"""add subreddit_id field to subreddit_pages
Revision ID: 2957ac0e11b6
Revises: 66a893890997
Create Date: 2016-07-18 17:23:04.904423
"""
# revision identifiers, used by Alembic.
revision = '2957ac0e11b6'
down_revision = '66a893890997'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.add_column('subreddit_pages', sa.Column('subreddit_id', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddit_pages', 'subreddit_id')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.add_column('subreddit_pages', sa.Column('subreddit_id', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddit_pages', 'subreddit_id')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.add_column('subreddit_pages', sa.Column('subreddit_id', sa.String(length=32), nullable=True))
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddit_pages', 'subreddit_id')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/2957ac0e11b6_add_subreddit_id_field_to_subreddit_.py",
"copies": "1",
"size": "1688",
"license": "mit",
"hash": -5288021321925363000,
"line_mean": 25.375,
"line_max": 100,
"alpha_frac": 0.6795023697,
"autogenerated": false,
"ratio": 3.553684210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4733186580226316,
"avg_score": null,
"num_lines": null
} |
"""Add summary columns
Revision ID: f2aa951ca1a7
Revises: 7e250583b9cb
Create Date: 2017-10-23 16:50:06.586388
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus
# revision identifiers, used by Alembic.
revision = "f2aa951ca1a7"
down_revision = "7e250583b9cb"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("biospecimen_collected_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_finalized_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_order_time", model.utils.UTCDateTime(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_processed_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_source_site_id", sa.Integer(), nullable=True))
op.add_column("participant_summary", sa.Column("biospecimen_status", model.utils.Enum(OrderStatus), nullable=True))
op.add_column(
"participant_summary", sa.Column("physical_measurements_created_site_id", sa.Integer(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("physical_measurements_finalized_site_id", sa.Integer(), nullable=True)
)
op.add_column(
"participant_summary",
sa.Column("physical_measurements_finalized_time", model.utils.UTCDateTime(), nullable=True),
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed04", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed04_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed10_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1hep4", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1hep4_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sal", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sal_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1sst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur10_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2ed10", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2ed10_time", model.utils.UTCDateTime(), nullable=True)
)
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_processed_site_id"], ["site_id"])
op.create_foreign_key(
None, "participant_summary", "site", ["physical_measurements_finalized_site_id"], ["site_id"]
)
op.create_foreign_key(None, "participant_summary", "site", ["physical_measurements_created_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_collected_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_source_site_id"], ["site_id"])
op.create_foreign_key(None, "participant_summary", "site", ["biospecimen_finalized_site_id"], ["site_id"])
op.add_column("physical_measurements", sa.Column("finalized", model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("physical_measurements", "finalized")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_constraint(None, "participant_summary", type_="foreignkey")
op.drop_column("participant_summary", "sample_order_status_2ed10_time")
op.drop_column("participant_summary", "sample_order_status_2ed10")
op.drop_column("participant_summary", "sample_order_status_1ur10_time")
op.drop_column("participant_summary", "sample_order_status_1ur10")
op.drop_column("participant_summary", "sample_order_status_1sst8_time")
op.drop_column("participant_summary", "sample_order_status_1sst8")
op.drop_column("participant_summary", "sample_order_status_1sal_time")
op.drop_column("participant_summary", "sample_order_status_1sal")
op.drop_column("participant_summary", "sample_order_status_1pst8_time")
op.drop_column("participant_summary", "sample_order_status_1pst8")
op.drop_column("participant_summary", "sample_order_status_1hep4_time")
op.drop_column("participant_summary", "sample_order_status_1hep4")
op.drop_column("participant_summary", "sample_order_status_1ed10_time")
op.drop_column("participant_summary", "sample_order_status_1ed10")
op.drop_column("participant_summary", "sample_order_status_1ed04_time")
op.drop_column("participant_summary", "sample_order_status_1ed04")
op.drop_column("participant_summary", "physical_measurements_finalized_time")
op.drop_column("participant_summary", "physical_measurements_finalized_site_id")
op.drop_column("participant_summary", "physical_measurements_created_site_id")
op.drop_column("participant_summary", "biospecimen_status")
op.drop_column("participant_summary", "biospecimen_source_site_id")
op.drop_column("participant_summary", "biospecimen_processed_site_id")
op.drop_column("participant_summary", "biospecimen_order_time")
op.drop_column("participant_summary", "biospecimen_finalized_site_id")
op.drop_column("participant_summary", "biospecimen_collected_site_id")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/f2aa951ca1a7_add_summary_columns.py",
"copies": "1",
"size": "7574",
"license": "bsd-3-clause",
"hash": -4937783012136233000,
"line_mean": 48.5032679739,
"line_max": 119,
"alpha_frac": 0.7010826512,
"autogenerated": false,
"ratio": 3.4163283716734325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9607054253175571,
"avg_score": 0.0020713539395723446,
"num_lines": 153
} |
""" Add super_admin role and grant base administrator roles to it
Revision ID: 93a6e0ee63c9
Revises: 8ff0624764b7
Create Date: 2016-04-18 19:27:39.063915
"""
# revision identifiers, used by Alembic.
revision = '93a6e0ee63c9'
down_revision = '8ff0624764b7'
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
from sqlalchemy.sql import text
op.get_bind().execute(text("INSERT INTO role (name, description) VALUES ('super_admin', 'Super Admin Role for all Organizations');"))
op.get_bind().execute(text("UPDATE \"user\" SET login='super_admin', display = 'Super Administrator' WHERE id=1;"))
op.get_bind().execute(text("UPDATE role SET name='organization_admin' WHERE name = 'admin'"))
op.get_bind().execute(text("UPDATE role SET parent_id = (SELECT id FROM role WHERE name = 'super_admin') WHERE name LIKE '%organization_%';"))
op.get_bind().execute(text("UPDATE role SET parent_id = (SELECT id FROM role WHERE name = 'organization_admin') WHERE name LIKE '%user_%';"))
op.get_bind().execute(text("UPDATE role SET parent_id = (SELECT id FROM role WHERE name = 'organization_admin') WHERE name LIKE '%role_%';"))
op.get_bind().execute(text("INSERT INTO roles_users (role_id, user_id) VALUES ((SELECT id FROM role WHERE name='super_admin'), 1);"))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
from sqlalchemy.sql import text
op.get_bind().execute(text("DELETE FROM roles_users WHERE role_id = (SELECT id FROM role WHERE name='super_admin') AND user_id = 1;"))
op.get_bind().execute(text("UPDATE \"user\" SET login='admin' WHERE id=1;"))
op.get_bind().execute(text("UPDATE role SET parent_id = NULL WHERE name LIKE '%organization_%';"))
op.get_bind().execute(text("UPDATE role SET parent_id = NULL WHERE name LIKE '%user_%';"))
op.get_bind().execute(text("UPDATE role SET parent_id = NULL WHERE name LIKE '%role_%';"))
op.get_bind().execute(text("UPDATE role SET name='admin' WHERE name = 'organization_admin'"))
op.get_bind().execute(text("DELETE FROM role WHERE name='super_admin';"))
### end Alembic commands ###
| {
"repo_name": "betterlife/flask-psi",
"path": "psi/migrations/versions/19_93a6e0ee63c9_.py",
"copies": "2",
"size": "2208",
"license": "mit",
"hash": -2205408357696920800,
"line_mean": 54.2,
"line_max": 146,
"alpha_frac": 0.6870471014,
"autogenerated": false,
"ratio": 3.5384615384615383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225508639861538,
"avg_score": null,
"num_lines": null
} |
# add_supplement_to_poemaster.py
# This script adds files from the Graham's Magazine - Westminster Review
# supplement to the master poetry metadata file.
# At the same time it creates a list of poetry volids.
import csv
def dirty_pairtree(htid):
period = htid.find('.')
prefix = htid[0:period]
postfix = htid[(period+1): ]
if '=' in postfix:
postfix = postfix.replace('+',':')
postfix = postfix.replace('=','/')
dirtyname = prefix + "." + postfix
return dirtyname
supplement = '/Users/tunder/Dropbox/GenreProject/python/reception/poetry/GrMWRsupplement.csv'
dirtyIDs = list()
allrows = list()
with open(supplement, encoding = 'utf-8') as f:
reader = csv.DictReader(f)
fieldnames = reader.fieldnames
for row in reader:
htid = row['docid']
genre = row['jgenre']
if genre.startswith('po'):
dirtyIDs.append(dirty_pairtree(htid))
row.pop('jgenre')
allrows.append(row)
fieldnames.pop(fieldnames.index('jgenre'))
cleanpoetry = '/Users/tunder/Dropbox/GenreProject/python/reception/poetry/GrMWRpoetry.csv'
with open(cleanpoetry, mode='w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fieldnames)
writer.writeheader()
for row in allrows:
writer.writerow(row)
getpoetry = '/Users/tunder/Dropbox/GenreProject/python/reception/poetry/GrMWRids.txt'
with open(getpoetry, mode='w', encoding = 'utf-8') as f:
for anid in dirtyIDs:
f.write(anid + '\n')
| {
"repo_name": "tedunderwood/GenreProject",
"path": "python/reception/poetry/add_supplement_to_poemaster.py",
"copies": "1",
"size": "1515",
"license": "mit",
"hash": 6761111309712019000,
"line_mean": 28.1346153846,
"line_max": 93,
"alpha_frac": 0.6633663366,
"autogenerated": false,
"ratio": 3.1301652892561984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42935316258561984,
"avg_score": null,
"num_lines": null
} |
"""Add SupplierFramework.prefill_declaration_from_framework_id column and constraints
Revision ID: 830
Revises: 820
Create Date: 2017-02-02 14:18:54.512957
"""
# revision identifiers, used by Alembic.
revision = '830'
down_revision = '820'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('supplier_frameworks', sa.Column('prefill_declaration_from_framework_id', sa.Integer(), nullable=True))
# manually truncated constraint name here to prevent postgres from truncating automatically
op.create_foreign_key(op.f('fk_supplier_frameworks_prefill_declaration_from_framework_id'), 'supplier_frameworks', 'frameworks', ['prefill_declaration_from_framework_id'], ['id'])
op.create_foreign_key(op.f('fk_supplier_frameworks_supplier_id_supplier_frameworks'), 'supplier_frameworks', 'supplier_frameworks', ['supplier_id', 'prefill_declaration_from_framework_id'], ['supplier_id', 'framework_id'])
def downgrade():
op.drop_constraint(op.f('fk_supplier_frameworks_supplier_id_supplier_frameworks'), 'supplier_frameworks', type_='foreignkey')
op.drop_constraint(op.f('fk_supplier_frameworks_prefill_declaration_from_framework_id'), 'supplier_frameworks', type_='foreignkey')
op.drop_column('supplier_frameworks', 'prefill_declaration_from_framework_id')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/830_add_supplierframework_prefill_declaration.py",
"copies": "1",
"size": "1304",
"license": "mit",
"hash": 1238767962351034000,
"line_mean": 49.1538461538,
"line_max": 226,
"alpha_frac": 0.7569018405,
"autogenerated": false,
"ratio": 3.5824175824175826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48393194229175823,
"avg_score": null,
"num_lines": null
} |
"""Add supplier_id and created/updated dates
Revision ID: c8bce0740af
Revises: 4cb7869866dc
Create Date: 2015-01-26 15:23:03.984803
"""
# revision identifiers, used by Alembic.
revision = 'c8bce0740af'
down_revision = '4cb7869866dc'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('services', sa.Column('created_at', sa.DateTime(), nullable=False))
op.add_column('services', sa.Column('supplier_id', sa.BigInteger(), nullable=False))
op.add_column('services', sa.Column('updated_at', sa.DateTime(), nullable=False))
op.create_index(op.f('ix_services_supplier_id'), 'services', ['supplier_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_services_supplier_id'), table_name='services')
op.drop_column('services', 'updated_at')
op.drop_column('services', 'supplier_id')
op.drop_column('services', 'created_at')
### end Alembic commands ###
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/c8bce0740af_add_supplier_id_and_created_updated_.py",
"copies": "3",
"size": "1081",
"license": "mit",
"hash": 3841081417121556000,
"line_mean": 32.78125,
"line_max": 95,
"alpha_frac": 0.6891766883,
"autogenerated": false,
"ratio": 3.315950920245399,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5505127608545399,
"avg_score": null,
"num_lines": null
} |
"""Add support for discovering mDNS services."""
import itertools
import logging
from typing import List # noqa: F401
from zeroconf import DNSPointer, DNSRecord
from zeroconf import Error as ZeroconfError
from zeroconf import ServiceBrowser, ServiceInfo, ServiceStateChange, Zeroconf
_LOGGER = logging.getLogger(__name__)
class FastServiceBrowser(ServiceBrowser):
"""ServiceBrowser that does not process record updates."""
def update_record(self, zc: Zeroconf, now: float, record: DNSRecord) -> None:
"""Ignore record updates for non-ptrs."""
if record.name not in self.types or not isinstance(record, DNSPointer):
return
super().update_record(zc, now, record)
class MDNS:
"""Base class to discover mDNS services."""
def __init__(self, zeroconf_instance=None):
"""Initialize the discovery."""
self.zeroconf = zeroconf_instance
self._created_zeroconf = False
self.services = [] # type: List[ServiceInfo]
self._browser = None # type: ServiceBrowser
def register_service(self, service):
"""Register a mDNS service."""
self.services.append(service)
def start(self):
"""Start discovery."""
try:
if not self.zeroconf:
self.zeroconf = Zeroconf()
self._created_zeroconf = True
services_by_type = {}
for service in self.services:
services_by_type.setdefault(service.typ, [])
services_by_type[service.typ].append(service)
def _service_update(zeroconf, service_type, name, state_change):
if state_change == ServiceStateChange.Added:
for service in services_by_type[service_type]:
try:
service.add_service(zeroconf, service_type, name)
except ZeroconfError:
_LOGGER.exception("Failed to add service %s", name)
elif state_change == ServiceStateChange.Removed:
for service in services_by_type[service_type]:
service.remove_service(zeroconf, service_type, name)
types = [service.typ for service in self.services]
self._browser = FastServiceBrowser(
self.zeroconf, types, handlers=[_service_update]
)
except Exception: # pylint: disable=broad-except
self.stop()
raise
def stop(self):
"""Stop discovering."""
if self._browser:
self._browser.cancel()
self._browser = None
for service in self.services:
service.reset()
if self._created_zeroconf:
self.zeroconf.close()
self.zeroconf = None
@property
def entries(self):
"""Return all entries in the cache."""
return list(
itertools.chain(
*[
self.zeroconf.cache.entries_with_name(name)
for name in self.zeroconf.cache.names()
]
)
)
| {
"repo_name": "balloob/netdisco",
"path": "netdisco/mdns.py",
"copies": "1",
"size": "3135",
"license": "mit",
"hash": 5812231356289238000,
"line_mean": 33.0760869565,
"line_max": 81,
"alpha_frac": 0.574800638,
"autogenerated": false,
"ratio": 4.583333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001325556733828208,
"num_lines": 92
} |
"""Add support for discovering mDNS services."""
import zeroconf
class MDNS(object):
"""Base class to discover mDNS services."""
def __init__(self):
"""Initialize the discovery."""
self.zeroconf = None
self.services = []
self._browsers = []
def register_service(self, service):
"""Register a mDNS service."""
self.services.append(service)
def start(self):
"""Start discovery."""
self.zeroconf = zeroconf.Zeroconf()
for service in self.services:
self._browsers.append(
zeroconf.ServiceBrowser(self.zeroconf, service.typ, service))
def stop(self):
"""Stop discovering."""
while self._browsers:
self._browsers.pop().cancel()
for service in self.services:
service.reset()
self.zeroconf.close()
self.zeroconf = None
@property
def entries(self):
"""Return all entries in the cache."""
return self.zeroconf.cache.entries()
| {
"repo_name": "sfam/netdisco",
"path": "netdisco/mdns.py",
"copies": "3",
"size": "1034",
"license": "mit",
"hash": 8696967591023680000,
"line_mean": 24.85,
"line_max": 77,
"alpha_frac": 0.581237911,
"autogenerated": false,
"ratio": 4.38135593220339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.646259384320339,
"avg_score": null,
"num_lines": null
} |
"""Add support for external authentication.
Revision ID: 009eafe4838f
Revises: 9715822acf6c
Create Date: 2017-07-23 02:16:39.493032
"""
# revision identifiers, used by Alembic.
revision = '009eafe4838f'
down_revision = '9715822acf6c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('users', 'username', new_column_name='identifier', existing_type=sa.String(length=500))
with op.batch_alter_table('users') as batch_op:
batch_op.add_column(sa.Column('username', sa.String(length=500), nullable=True))
batch_op.add_column(sa.Column('password', sa.String(length=500), nullable=True))
batch_op.add_column(sa.Column('active', sa.Boolean(), nullable=True))
batch_op.add_column(sa.Column('name', sa.String(length=500), nullable=True))
batch_op.add_column(sa.Column('preferred_name', sa.String(length=500), nullable=True))
batch_op.add_column(sa.Column('avatar_uri', sa.Text(), nullable=True))
batch_op.add_column(sa.Column('email', sa.String(length=500), nullable=True))
batch_op.add_column(sa.Column('last_login_at', sa.DateTime(), nullable=True))
def downgrade():
with op.batch_alter_table('users') as batch_op:
batch_op.drop_column('username')
batch_op.drop_column('preferred_name')
batch_op.drop_column('password')
batch_op.drop_column('name')
batch_op.drop_column('last_login_at')
batch_op.drop_column('email')
batch_op.drop_column('active')
batch_op.drop_column('avatar_uri')
op.alter_column('users', 'identifier', new_column_name='username', existing_type=sa.String(length=500))
| {
"repo_name": "airbnb/knowledge-repo",
"path": "knowledge_repo/app/migrations/versions/009eafe4838f_add_support_for_external_authentication.py",
"copies": "1",
"size": "1662",
"license": "apache-2.0",
"hash": -7089244010786861000,
"line_mean": 38.5714285714,
"line_max": 107,
"alpha_frac": 0.6805054152,
"autogenerated": false,
"ratio": 3.2271844660194176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9387251525053653,
"avg_score": 0.0040876712331529745,
"num_lines": 42
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.