text stringlengths 0 1.05M | meta dict |
|---|---|
"""Adding certified_total_obligations table
Revision ID: a9bfc7a2bd16
Revises: 9e1acf1450dc
Create Date: 2021-03-18 11:04:34.942653
"""
# revision identifiers, used by Alembic.
revision = 'a9bfc7a2bd16'
down_revision = '9e1acf1450dc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('certified_total_obligations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('certified_total_obligations_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('total_obligations', sa.Numeric(), nullable=True),
sa.Column('total_proc_obligations', sa.Numeric(), nullable=True),
sa.Column('total_asst_obligations', sa.Numeric(), nullable=True),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_certified_total_obligations_submission_id', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('certified_total_obligations_id')
)
op.create_index(op.f('ix_certified_total_obligations_submission_id'), 'certified_total_obligations', ['submission_id'], unique=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_certified_total_obligations_submission_id'), table_name='certified_total_obligations')
op.drop_table('certified_total_obligations')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/a9bfc7a2bd16_adding_certified_total_obligations_table.py",
"copies": "1",
"size": "1850",
"license": "cc0-1.0",
"hash": 5999057334533509000,
"line_mean": 34.5769230769,
"line_max": 154,
"alpha_frac": 0.6978378378,
"autogenerated": false,
"ratio": 3.3882783882783882,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45861162260783883,
"avg_score": null,
"num_lines": null
} |
"""Adding CloudTrail table and indexes everywhere!
Revision ID: e0a6af364a3f
Revises: bfb550a500ab
Create Date: 2016-07-27 17:49:23.243518
"""
# revision identifiers, used by Alembic.
revision = 'e0a6af364a3f'
down_revision = 'bfb550a500ab'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('cloudtrail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('event_id', sa.String(length=36), nullable=True),
sa.Column('request_id', sa.String(length=36), nullable=True),
sa.Column('event_source', sa.String(length=64), nullable=False),
sa.Column('event_name', sa.String(length=64), nullable=False),
sa.Column('event_time', sa.DateTime(), nullable=False),
sa.Column('request_parameters', postgresql.JSON(), nullable=True),
sa.Column('responseElements', postgresql.JSON(), nullable=True),
sa.Column('source_ip', sa.String(length=45), nullable=True),
sa.Column('user_agent', sa.String(length=300), nullable=True),
sa.Column('full_entry', postgresql.JSON(), nullable=True),
sa.Column('user_identity', postgresql.JSON(), nullable=True),
sa.Column('user_identity_arn', sa.String(length=300), nullable=True),
sa.Column('revision_id', sa.Integer(), nullable=False),
sa.Column('item_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['revision_id'], ['itemrevision.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_cloudtrail_event_id', 'cloudtrail', ['event_id'], unique=True)
op.create_index('ix_cloudtrail_event_time', 'cloudtrail', ['event_time'], unique=False)
op.create_index('ix_cloudtrail_item_id', 'cloudtrail', ['item_id'], unique=False)
op.create_index('ix_cloudtrail_request_id', 'cloudtrail', ['request_id'], unique=False)
op.create_index('ix_cloudtrail_revision_id', 'cloudtrail', ['revision_id'], unique=False)
op.create_index('ix_cloudtrail_user_identity_arn', 'cloudtrail', ['user_identity_arn'], unique=False)
op.create_index('ix_auditorsettings_account_id', 'auditorsettings', ['account_id'], unique=False)
op.create_index('ix_auditorsettings_tech_id', 'auditorsettings', ['tech_id'], unique=False)
op.create_index('ix_ignorelist_tech_id', 'ignorelist', ['tech_id'], unique=False)
op.create_index('ix_item_account_id', 'item', ['account_id'], unique=False)
op.create_index('ix_item_tech_id', 'item', ['tech_id'], unique=False)
op.create_index('ix_itemaudit_auditor_setting_id', 'itemaudit', ['auditor_setting_id'], unique=False)
op.create_index('ix_itemaudit_item_id', 'itemaudit', ['item_id'], unique=False)
op.create_index('ix_itemaudit_justified_user_id', 'itemaudit', ['justified_user_id'], unique=False)
op.create_index('ix_itemcomment_item_id', 'itemcomment', ['item_id'], unique=False)
op.create_index('ix_itemcomment_user_id', 'itemcomment', ['user_id'], unique=False)
op.create_index('ix_itemrevision_item_id', 'itemrevision', ['item_id'], unique=False)
op.create_index('ix_itemrevisioncomment_revision_id', 'itemrevisioncomment', ['revision_id'], unique=False)
op.create_index('ix_itemrevisioncomment_user_id', 'itemrevisioncomment', ['user_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_itemrevisioncomment_user_id', table_name='itemrevisioncomment')
op.drop_index('ix_itemrevisioncomment_revision_id', table_name='itemrevisioncomment')
op.drop_index('ix_itemrevision_item_id', table_name='itemrevision')
op.drop_index('ix_itemcomment_user_id', table_name='itemcomment')
op.drop_index('ix_itemcomment_item_id', table_name='itemcomment')
op.drop_index('ix_itemaudit_justified_user_id', table_name='itemaudit')
op.drop_index('ix_itemaudit_item_id', table_name='itemaudit')
op.drop_index('ix_itemaudit_auditor_setting_id', table_name='itemaudit')
op.drop_index('ix_item_tech_id', table_name='item')
op.drop_index('ix_item_account_id', table_name='item')
op.drop_index('ix_ignorelist_tech_id', table_name='ignorelist')
op.drop_index('ix_auditorsettings_tech_id', table_name='auditorsettings')
op.drop_index('ix_auditorsettings_account_id', table_name='auditorsettings')
op.drop_index('ix_cloudtrail_user_identity_arn', table_name='cloudtrail')
op.drop_index('ix_cloudtrail_revision_id', table_name='cloudtrail')
op.drop_index('ix_cloudtrail_request_id', table_name='cloudtrail')
op.drop_index('ix_cloudtrail_item_id', table_name='cloudtrail')
op.drop_index('ix_cloudtrail_event_time', table_name='cloudtrail')
op.drop_index('ix_cloudtrail_event_id', table_name='cloudtrail')
op.drop_table('cloudtrail')
### end Alembic commands ### | {
"repo_name": "stackArmor/security_monkey",
"path": "migrations/versions/e0a6af364a3f_.py",
"copies": "4",
"size": "4986",
"license": "apache-2.0",
"hash": -8593038516456413000,
"line_mean": 57.6705882353,
"line_max": 111,
"alpha_frac": 0.6909346169,
"autogenerated": false,
"ratio": 3.2084942084942085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006264769976961098,
"num_lines": 85
} |
"""Adding consent_for_cabor
Revision ID: 19687c270db8
Revises: d3bcbe963bf0
Create Date: 2017-05-31 10:42:22.868438
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import QuestionnaireStatus
# revision identifiers, used by Alembic.
revision = "19687c270db8"
down_revision = "d3bcbe963bf0"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("consent_for_cabor", model.utils.Enum(QuestionnaireStatus), nullable=True)
)
op.add_column("participant_summary", sa.Column("consent_for_cabor_time", model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "consent_for_cabor_time")
op.drop_column("participant_summary", "consent_for_cabor")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/19687c270db8_adding_consent_for_cabor.py",
"copies": "1",
"size": "1447",
"license": "bsd-3-clause",
"hash": -7402286606313712000,
"line_mean": 25.7962962963,
"line_max": 119,
"alpha_frac": 0.6862474084,
"autogenerated": false,
"ratio": 3.4534606205250595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4639708028925059,
"avg_score": null,
"num_lines": null
} |
"""Adding Contributors and relations
Revision ID: 4e3c8db63bb7
Revises: 42f7e2ac0c98
Create Date: 2013-11-08 15:41:11.281282
"""
# revision identifiers, used by Alembic.
revision = '4e3c8db63bb7'
down_revision = '42f7e2ac0c98'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('contributor',
sa.Column('username', sa.String(), nullable=False),
sa.Column('gravatar', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('username')
)
op.create_table('contrib_mixin',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('mixin_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.ForeignKeyConstraint(['mixin_url'], ['mixin.url'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('contrib_seasoning',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('seasoning_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.ForeignKeyConstraint(['seasoning_url'], ['seasoning.url'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('contrib_shell',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('shell_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.ForeignKeyConstraint(['shell_url'], ['shell.url'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('contrib_baselayer',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('baselayer_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['baselayer_url'], ['base_layer.url'], ),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('contrib_condiment',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('condiment_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['condiment_url'], ['condiment.url'], ),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('contrib_fulltaco',
sa.Column('contrib_username', sa.String(), nullable=True),
sa.Column('full_taco_url', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['contrib_username'], ['contributor.username'], ),
sa.ForeignKeyConstraint(['full_taco_url'], ['full_taco.url'], ),
sa.PrimaryKeyConstraint()
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('contrib_fulltaco')
op.drop_table('contrib_condiment')
op.drop_table('contrib_baselayer')
op.drop_table('contrib_shell')
op.drop_table('contrib_seasoning')
op.drop_table('contrib_mixin')
op.drop_table('contributor')
### end Alembic commands ###
| {
"repo_name": "evz/tacofancy-api",
"path": "alembic/versions/4e3c8db63bb7_adding_contributors_.py",
"copies": "1",
"size": "3014",
"license": "mit",
"hash": 5939509692929426000,
"line_mean": 37.641025641,
"line_max": 78,
"alpha_frac": 0.6768414068,
"autogenerated": false,
"ratio": 3.6444981862152357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9769919129919717,
"avg_score": 0.010284092619103716,
"num_lines": 78
} |
"""adding cross evaluation in sklearn
Revision ID: 21de6c275230
Revises: ea5b31a667ff
Create Date: 2021-03-09 16:47:05.549578
"""
from alembic import op
import sqlalchemy as sa
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, text
import json
# revision identifiers, used by Alembic.
revision = '21de6c275230'
down_revision = 'ea5b31a667ff'
branch_labels = None
depends_on = None
FIELD_ID = 4377
FORM_FIELD = 4046
def _insert_operation_form():
operation_form_table = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = ('id', 'enabled', 'order', 'category')
data = [
# form for cross validation (classification)
(FORM_FIELD, 1, 1, 'execution'),
# form for cross validation (regression)
(FORM_FIELD + 1, 1, 1, 'execution'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(operation_form_table, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String))
columns = ('id', 'locale', 'name')
data = [
(FORM_FIELD, 'en', 'Execution'),
(FORM_FIELD, 'pt', 'Execução'),
(FORM_FIELD + 1, 'en', 'Execution'),
(FORM_FIELD + 1, 'pt', 'Execução'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = [c.name for c in tb.columns]
data = [
(4021, FORM_FIELD),
(4022, FORM_FIELD),
(4023, FORM_FIELD),
(4024, FORM_FIELD),
(4025, FORM_FIELD),
(4031, FORM_FIELD),
(4032, FORM_FIELD),
(4034, FORM_FIELD),
(4036, FORM_FIELD),
(4026, FORM_FIELD + 1),
(4027, FORM_FIELD + 1),
(4028, FORM_FIELD + 1),
(4029, FORM_FIELD + 1),
(4030, FORM_FIELD + 1),
(4035, FORM_FIELD + 1),
(4038, FORM_FIELD + 1),
(4046, FORM_FIELD + 1),
(4038, 4021)
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
column('enable_conditions', Integer),
)
columns = [c.name for c in tb.columns]
data = [
(FIELD_ID, "apply_cross_validation", "INTEGER", 0, 20, 0,
"checkbox", None, None, "EXECUTION", FORM_FIELD, None),
(FIELD_ID + 1, 'metric_cross_validation', 'INTEGER', 0, 21,
'f1_weighted', 'dropdown', None, json.dumps([
{'key': 'balanced_accuracy', 'value': 'Balanced accuracy'},
{'key': 'f1_weighted', 'value': 'F1 weighted'},
{'key': 'precision_weighted', 'value': 'Weighted Precision'},
{'key': 'recall_weighted', 'value': 'Weighted Recall'},
{'key': 'jaccard_weighted', 'value': 'Weighted Jaccard'},
{'key': 'roc_auc', 'value': 'Area Under ROC (if binary)'},
]), 'EXECUTION',
FORM_FIELD, "this.apply_cross_validation.internalValue === '1'"),
(FIELD_ID+2, "folds", "INTEGER", 0, 22, 3, "integer", None, None,
"EXECUTION", FORM_FIELD,
"this.apply_cross_validation.internalValue === '1'"),
(FIELD_ID+3, "apply_cross_validation", "INTEGER", 0, 20, 0,
"checkbox", None, None, "EXECUTION", FORM_FIELD+1, None),
(FIELD_ID + 4, 'metric_cross_validation', 'INTEGER', 0, 21,
'r2', 'dropdown', None, json.dumps([
{'key': 'explained_variance', 'value': 'Explained variance'},
{'key': 'max_error', 'value': 'Maximum residual error'},
{'key': 'neg_mean_absolute_error', 'value': 'Mean absolute error'},
{'key': 'neg_mean_squared_error', 'value': 'Mean squared error'},
{'key': 'neg_root_mean_squared_error',
'value': 'Mean root squared error'},
{'key': 'neg_mean_squared_log_error',
'value': 'Mean squared logarithmic error'},
{'key': 'neg_median_absolute_error',
'value': 'Median absolute error'},
{'key': 'r2', 'value': 'R^2 (coefficient of determination)'},
]), 'EXECUTION',
FORM_FIELD+1, "this.apply_cross_validation.internalValue === '1'"),
(FIELD_ID + 5, "folds", "INTEGER", 0, 22, 3, "integer", None, None,
"EXECUTION", FORM_FIELD+1,
"this.apply_cross_validation.internalValue === '1'"),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
(FIELD_ID, "en", "Perform cross-validation",
"Perform cross-validation"),
(FIELD_ID, 'pt', "Realizar a validação cruzada",
"Realizar a validação cruzada"),
(FIELD_ID + 1, "en", "Metric for applying cross-validation",
"If informed, this metric will be used and cross-validation will be "
"executed."),
(FIELD_ID + 1, "pt", "Métrica para validação cruzada",
"Se informada, essa métrica será usada e a validação cruzada será "
"executada"),
(FIELD_ID + 2, "en", "Attribute with fold number",
"Contains the fold number for the row"),
(FIELD_ID + 2, "pt", "Atributo com o número da partição (fold)",
"Contém o número da partição para a linha"),
(FIELD_ID + 3, "en", "Perform cross-validation",
"Perform cross-validation"),
(FIELD_ID + 3, 'pt', "Realizar a validação cruzada",
"Realizar a validação cruzada"),
(FIELD_ID + 4, "en", "Metric for applying cross-validation",
"If informed, this metric will be used and cross-validation will be "
"executed."),
(FIELD_ID + 4, "pt", "Métrica para validação cruzada",
"Se informada, essa métrica será usada e a validação cruzada será "
"executada"),
(FIELD_ID + 5, "en", "Attribute with fold number",
"Contains the fold number for the row"),
(FIELD_ID + 5, "pt", "Atributo com o número da partição (fold)",
"Contém o número da partição para a linha"),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation_form,
"DELETE FROM operation_form WHERE id IN ({}, {})"
.format(FORM_FIELD, FORM_FIELD + 1)),
(_insert_operation_form_translation,
"DELETE FROM operation_form_translation WHERE id IN ({}, {})"
.format(FORM_FIELD, FORM_FIELD + 1)),
(_insert_operation_operation_form,
"DELETE FROM operation_operation_form "
"WHERE operation_id IN (4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, "
"4029, 4030, 4031, 4032, 4034, 4035, 4036, 4038, 4046) and "
"operation_form_id IN({}, {})"
.format(FORM_FIELD, FORM_FIELD + 1)),
(_insert_operation_form_field,
"DELETE FROM operation_form_field WHERE id BETWEEN {} AND {}"
.format(FIELD_ID, FIELD_ID + 5)),
(_insert_operation_form_field_translation,
"DELETE FROM operation_form_field_translation WHERE id BETWEEN {} AND {}"
.format(FIELD_ID, FIELD_ID + 5)),
# removing duplicated fields (feature, label, alias)
("DELETE FROM operation_form_field_translation "
"WHERE id IN (4122, 4148, 4149, 4187, 4188, 4189, 4357, 4358, 4359)",
""),
("DELETE FROM operation_form_field "
"WHERE id IN (4122, 4148, 4149, 4187, 4188, 4189,4357, 4358, 4359)", ""),
# mae critirion is deprecated
("""
UPDATE operation_form_field
SET `values` = '{}' WHERE id = 4364
""".format(json.dumps([
{'key': 'friedman_mse',
'value': 'Mean squared error with improvement score by Friedman'},
{'key': 'mse', 'value': 'Mean squared error'}]
)),
"""
UPDATE operation_form_field
SET `values` = '{}' WHERE id = 4364
""".format(json.dumps([
{'key': 'friedman_mse',
'value': 'Mean squared error with improvement score by Friedman'},
{'key': 'mse', 'value': 'Mean squared error'},
{'key': 'mae', 'value': 'Mean absolute error'}]
))),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in all_commands:
if isinstance(cmd[0], str):
if len(cmd[0]) > 0:
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
if len(cmd[1]) > 0:
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/21de6c275230_adding_cross_evaluation_in_sklearn.py",
"copies": "1",
"size": "10611",
"license": "apache-2.0",
"hash": 4388433007190518000,
"line_mean": 32.6592356688,
"line_max": 79,
"alpha_frac": 0.5643864131,
"autogenerated": false,
"ratio": 3.5549949545913218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9600335532796855,
"avg_score": 0.003809166978893468,
"num_lines": 314
} |
"""Adding DB functions for code_id lookup and determining participant ethnicity
Revision ID: 335d191204c1
Revises: 1e944af3ad04
Create Date: 2019-01-10 13:05:48.899901
"""
from alembic import op
from rdr_service.dao.alembic_utils import ReplaceableObject
# revision identifiers, used by Alembic.
revision = "335d191204c1"
down_revision = "1e944af3ad04"
branch_labels = None
depends_on = None
fn_get_code_id_from_key = ReplaceableObject(
"fn_get_code_id_from_key",
"""
(code_value VARCHAR(80))
RETURNS INT
READS SQL DATA
BEGIN
# Return the record code_id for the given key from the code table.
DECLARE result INT;
SET result = (SELECT code_id FROM code
WHERE `value` = code_value ORDER BY code_id DESC LIMIT 1);
RETURN result;
END
""",
)
fn_get_participant_ethnicity = ReplaceableObject(
"fn_get_participant_ethnicity",
"""
(participant INT, code_id INT)
RETURNS CHAR(1)
READS SQL DATA
BEGIN
# Determine if the participant's selected ethnicity matches the given id from the code table.
# Use fn_get_code_id_from_key() to get the code_id value from a code table key value.
# Returns: 'Y' or 'N'
DECLARE result CHAR(1);
SET result = (
SELECT
CASE
WHEN
(SELECT count(1)
FROM questionnaire_response qr
INNER JOIN questionnaire_response_answer qra
ON qra.questionnaire_response_id = qr.questionnaire_response_id
INNER JOIN questionnaire_question qq
ON qra.question_id = qq.questionnaire_question_id
WHERE qr.participant_id = participant
AND qq.code_id = fn_get_code_id_from_key('Race_WhatRaceEthnicity')
AND qra.value_code_id = code_id
AND qra.end_time IS NULL) > 0 THEN 'Y'
ELSE 'N' END
);
RETURN result;
END
""",
)
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
op.create_fn(fn_get_code_id_from_key)
op.create_fn(fn_get_participant_ethnicity)
def downgrade_rdr():
op.drop_fn(fn_get_code_id_from_key)
op.drop_fn(fn_get_participant_ethnicity)
def upgrade_metrics():
pass
def downgrade_metrics():
pass
def unittest_schemas():
schemas = list()
schemas.append("DROP FUNCTION IF EXISTS `{0}`".format(fn_get_code_id_from_key.name))
schemas.append("CREATE FUNCTION `{0}` {1}".format(fn_get_code_id_from_key.name, fn_get_code_id_from_key.sqltext))
schemas.append("DROP FUNCTION IF EXISTS `{0}`".format(fn_get_participant_ethnicity.name))
schemas.append(
"CREATE FUNCTION `{0}` {1}".format(fn_get_participant_ethnicity.name, fn_get_participant_ethnicity.sqltext)
)
return schemas
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/335d191204c1_adding_db_functions_for_code_id_lookup_.py",
"copies": "1",
"size": "2941",
"license": "bsd-3-clause",
"hash": 7251589098378148000,
"line_mean": 27.2788461538,
"line_max": 117,
"alpha_frac": 0.6348180891,
"autogenerated": false,
"ratio": 3.3688430698739977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4503661158973998,
"avg_score": null,
"num_lines": null
} |
""" Adding DEFC table
Revision ID: c0be99167fbf
Revises: 7b70f7defa50
Create Date: 2020-08-20 20:14:46.265694
"""
# revision identifiers, used by Alembic.
revision = 'c0be99167fbf'
down_revision = '7b70f7defa50'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('defc',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('defc_id', sa.Integer(), nullable=False),
sa.Column('code', sa.Text(), nullable=False),
sa.Column('group', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('defc_id')
)
op.create_index(op.f('ix_defc_code'), 'defc', ['code'], unique=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_defc_code'), table_name='defc')
op.drop_table('defc')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c0be99167fbf_adding_defc_table.py",
"copies": "1",
"size": "1287",
"license": "cc0-1.0",
"hash": -6008604919013842000,
"line_mean": 25.2653061224,
"line_max": 72,
"alpha_frac": 0.6534576535,
"autogenerated": false,
"ratio": 3.25,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44034576535000003,
"avg_score": null,
"num_lines": null
} |
"""Adding DEFC to SF133
Revision ID: 05edb849e42a
Revises: c8f39196cbba
Create Date: 2020-04-30 17:07:01.466026
"""
# revision identifiers, used by Alembic.
revision = '05edb849e42a'
down_revision = 'c8f39196cbba'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('sf_133', sa.Column('disaster_emergency_fund_code', sa.Text(), nullable=True))
op.create_index(op.f('ix_sf_133_disaster_emergency_fund_code'), 'sf_133', ['disaster_emergency_fund_code'], unique=False)
op.drop_index('ix_sf_133_tas_group', table_name='sf_133')
op.create_index('ix_sf_133_tas_group', 'sf_133', ['tas', 'fiscal_year', 'period', 'line', 'disaster_emergency_fund_code'], unique=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_sf_133_tas_group', table_name='sf_133')
op.create_index('ix_sf_133_tas_group', 'sf_133', ['tas', 'fiscal_year', 'period', 'line'], unique=True)
op.drop_index(op.f('ix_sf_133_disaster_emergency_fund_code'), table_name='sf_133')
op.drop_column('sf_133', 'disaster_emergency_fund_code')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/05edb849e42a_adding_defc_to_sf133.py",
"copies": "1",
"size": "1503",
"license": "cc0-1.0",
"hash": -7138202091886316000,
"line_mean": 31.6739130435,
"line_max": 139,
"alpha_frac": 0.6793080506,
"autogenerated": false,
"ratio": 2.9586614173228347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41379694679228346,
"avg_score": null,
"num_lines": null
} |
#Adding directory to the path where Python searches for modules
import os
import sys
cmd_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/Crypto/modules/')
sys.path.insert(0, cmd_folder)
#Importing common crypto module
import common
import block
'''
- Edit the ciphertext using key, nonce, counter, offset and newtext
- Now think of an attacker who has ciphertext, can choose offset and newtext but NOT the key, nonce or counter. This attacker must be able to recover plaintext
Questions:
- What does "but hold on to it" mean for the random key I am not supposed to know?
- Internally the program should use the key for decryption, but attacker doesn't have this
- Should I be using random nonces during encryption or does it not matter, since attacker won't have it anyway.
- Doesn't matter
- If nonces are random per block, isn't that the correct way to implement CTR? Why is this breakable?
- nonces are generated per-message, not per-block. if you generate them per block you have to transmit a list of nonces that's as long as your original message
'''
if __name__ == "__main__":
filename= '25.txt'
content= common.openfile(filename)
key= '71e6efcfb44e362b6e14f7abbecf5503'
nonce = '0'*8
enc_string= block.ctr_encrypt_string(''.join(content), key, nonce)
plaintext= ''
for offset in range(0, len(enc_string)):
for guess in range(0,127):
t1= block.decrypt_ctr_byte(enc_string[offset], offset, chr(guess))
if t1 is not None:
plaintext += chr(guess)
break
else:
continue
print plaintext
| {
"repo_name": "arvinddoraiswamy/blahblah",
"path": "cryptopals/Set4/c25.py",
"copies": "1",
"size": "1662",
"license": "mit",
"hash": -761403259822468400,
"line_mean": 36.7727272727,
"line_max": 163,
"alpha_frac": 0.6913357401,
"autogenerated": false,
"ratio": 3.7945205479452055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9918181642240704,
"avg_score": 0.013534929160900261,
"num_lines": 44
} |
"""Adding disaster_emergency_fund_code to object_class_program_acctivity and award_financial and certified tables
Revision ID: c8f39196cbba
Revises: 229820d5d7b1
Create Date: 2020-04-23 14:50:15.504403
"""
# revision identifiers, used by Alembic.
revision = 'c8f39196cbba'
down_revision = '229820d5d7b1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial', sa.Column('disaster_emergency_fund_code', sa.Text(), nullable=True))
op.add_column('certified_award_financial', sa.Column('disaster_emergency_fund_code', sa.Text(), nullable=True))
op.add_column('certified_object_class_program_activity', sa.Column('disaster_emergency_fund_code', sa.Text(), nullable=True))
op.add_column('object_class_program_activity', sa.Column('disaster_emergency_fund_code', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('object_class_program_activity', 'disaster_emergency_fund_code')
op.drop_column('certified_object_class_program_activity', 'disaster_emergency_fund_code')
op.drop_column('certified_award_financial', 'disaster_emergency_fund_code')
op.drop_column('award_financial', 'disaster_emergency_fund_code')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c8f39196cbba_adding_disaster_emergency_fund_code_to_.py",
"copies": "1",
"size": "1608",
"license": "cc0-1.0",
"hash": 2916748606446999600,
"line_mean": 33.9565217391,
"line_max": 129,
"alpha_frac": 0.7201492537,
"autogenerated": false,
"ratio": 3.146771037181996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9237835807499398,
"avg_score": 0.025816896676519503,
"num_lines": 46
} |
""" Adding display_tas to staging tables and SF133 table
Revision ID: de5e3fa1d2d2
Revises: d753553fa79b
Create Date: 2019-10-29 17:43:50.519330
"""
# revision identifiers, used by Alembic.
revision = 'de5e3fa1d2d2'
down_revision = 'd753553fa79b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('appropriation', sa.Column('display_tas', sa.Text()))
op.add_column('award_financial', sa.Column('display_tas', sa.Text()))
op.add_column('object_class_program_activity', sa.Column('display_tas', sa.Text()))
op.add_column('sf_133', sa.Column('display_tas', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('object_class_program_activity', 'display_tas')
op.drop_column('award_financial', 'display_tas')
op.drop_column('appropriation', 'display_tas')
op.drop_column('sf_133', 'display_tas')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/de5e3fa1d2d2_adding_display_tas_to_staging_tables_sf133.py",
"copies": "1",
"size": "1322",
"license": "cc0-1.0",
"hash": -6821128002328372000,
"line_mean": 27.7391304348,
"line_max": 87,
"alpha_frac": 0.6875945537,
"autogenerated": false,
"ratio": 3.2322738386308068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9289948506096901,
"avg_score": 0.025983977246781132,
"num_lines": 46
} |
"""Adding Email Templates
Revision ID: 3d723944025f
Revises: 148ca74a6d3
Create Date: 2015-01-23 12:28:20.581924
"""
# revision identifiers, used by Alembic.
revision = '3d723944025f'
down_revision = '148ca74a6d3'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('email_template',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.String(length=1024), nullable=True),
sa.Column('subject', sa.String(length=100), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text(u'now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text(u'now()'), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_email_template_created_at', 'email_template', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_email_template_created_at', table_name='email_template')
op.drop_table('email_template')
### end Alembic commands ###
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/3d723944025f_adding_email_templates.py",
"copies": "1",
"size": "1337",
"license": "apache-2.0",
"hash": 7467999926276752000,
"line_mean": 33.2820512821,
"line_max": 106,
"alpha_frac": 0.6910994764,
"autogenerated": false,
"ratio": 3.293103448275862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4484202924675862,
"avg_score": null,
"num_lines": null
} |
"""Adding endpoint tables
Revision ID: 29d8c8455c86
Revises: 3307381f3b88
Create Date: 2016-06-28 16:05:25.720213
"""
# revision identifiers, used by Alembic.
revision = '29d8c8455c86'
down_revision = '3307381f3b88'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('ciphers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('policy',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('policies_ciphers',
sa.Column('cipher_id', sa.Integer(), nullable=True),
sa.Column('policy_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['cipher_id'], ['ciphers.id'], ),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], )
)
op.create_index('policies_ciphers_ix', 'policies_ciphers', ['cipher_id', 'policy_id'], unique=False)
op.create_table('endpoints',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('owner', sa.String(length=128), nullable=True),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('dnsname', sa.String(length=256), nullable=True),
sa.Column('type', sa.String(length=128), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('port', sa.Integer(), nullable=True),
sa.Column('date_created', sa.DateTime(), server_default=sa.text(u'now()'), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('endpoints')
op.drop_index('policies_ciphers_ix', table_name='policies_ciphers')
op.drop_table('policies_ciphers')
op.drop_table('policy')
op.drop_table('ciphers')
### end Alembic commands ###
| {
"repo_name": "nevins-b/lemur",
"path": "lemur/migrations/versions/29d8c8455c86_.py",
"copies": "2",
"size": "2317",
"license": "apache-2.0",
"hash": -2145385822338868200,
"line_mean": 36.3709677419,
"line_max": 104,
"alpha_frac": 0.6685369012,
"autogenerated": false,
"ratio": 3.453055141579732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5121592042779731,
"avg_score": null,
"num_lines": null
} |
"""Adding endpoint tables
Revision ID: 29d8c8455c86
Revises: 3307381f3b88
Create Date: 2016-06-28 16:05:25.720213
"""
# revision identifiers, used by Alembic.
revision = "29d8c8455c86"
down_revision = "3307381f3b88"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"ciphers",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"policy",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"policies_ciphers",
sa.Column("cipher_id", sa.Integer(), nullable=True),
sa.Column("policy_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["cipher_id"], ["ciphers.id"]),
sa.ForeignKeyConstraint(["policy_id"], ["policy.id"]),
)
op.create_index(
"policies_ciphers_ix",
"policies_ciphers",
["cipher_id", "policy_id"],
unique=False,
)
op.create_table(
"endpoints",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("owner", sa.String(length=128), nullable=True),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("dnsname", sa.String(length=256), nullable=True),
sa.Column("type", sa.String(length=128), nullable=True),
sa.Column("active", sa.Boolean(), nullable=True),
sa.Column("port", sa.Integer(), nullable=True),
sa.Column(
"date_created",
sa.DateTime(),
server_default=sa.text(u"now()"),
nullable=False,
),
sa.Column("policy_id", sa.Integer(), nullable=True),
sa.Column("certificate_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["certificate_id"], ["certificates.id"]),
sa.ForeignKeyConstraint(["policy_id"], ["policy.id"]),
sa.PrimaryKeyConstraint("id"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("endpoints")
op.drop_index("policies_ciphers_ix", table_name="policies_ciphers")
op.drop_table("policies_ciphers")
op.drop_table("policy")
op.drop_table("ciphers")
### end Alembic commands ###
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/29d8c8455c86_.py",
"copies": "1",
"size": "2539",
"license": "apache-2.0",
"hash": -7604886591036155000,
"line_mean": 32.4078947368,
"line_max": 73,
"alpha_frac": 0.6100827097,
"autogenerated": false,
"ratio": 3.6850507982583456,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781958718346782,
"avg_score": 0.002634957922312906,
"num_lines": 76
} |
"""Adding exec comp to DUNS
Revision ID: 5f29b283f23e
Revises: e5b90e0b2ff8
Create Date: 2019-05-10 15:04:16.159511
"""
# revision identifiers, used by Alembic.
revision = '5f29b283f23e'
down_revision = 'e5b90e0b2ff8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('duns', sa.Column('high_comp_officer1_amount', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer1_full_na', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer2_amount', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer2_full_na', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer3_amount', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer3_full_na', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer4_amount', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer4_full_na', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer5_amount', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('high_comp_officer5_full_na', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('last_exec_comp_mod_date', sa.Date(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('duns', 'high_comp_officer5_full_na')
op.drop_column('duns', 'high_comp_officer5_amount')
op.drop_column('duns', 'high_comp_officer4_full_na')
op.drop_column('duns', 'high_comp_officer4_amount')
op.drop_column('duns', 'high_comp_officer3_full_na')
op.drop_column('duns', 'high_comp_officer3_amount')
op.drop_column('duns', 'high_comp_officer2_full_na')
op.drop_column('duns', 'high_comp_officer2_amount')
op.drop_column('duns', 'high_comp_officer1_full_na')
op.drop_column('duns', 'high_comp_officer1_amount')
op.drop_column('duns', 'last_exec_comp_mod_date')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/5f29b283f23e_adding_exec_comp_to_duns.py",
"copies": "1",
"size": "2398",
"license": "cc0-1.0",
"hash": -435502377954668540,
"line_mean": 38.9666666667,
"line_max": 92,
"alpha_frac": 0.6814011676,
"autogenerated": false,
"ratio": 2.7219069239500566,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8794714616962842,
"avg_score": 0.021718694917442976,
"num_lines": 60
} |
"""Adding exec comp to historic duns
Revision ID: 0dc4a1fbb52e
Revises: 827e8db6242e
Create Date: 2019-08-16 19:06:24.888878
"""
# revision identifiers, used by Alembic.
revision = '0dc4a1fbb52e'
down_revision = '827e8db6242e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('historic_duns', sa.Column('high_comp_officer1_amount', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer1_full_na', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer2_amount', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer2_full_na', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer3_amount', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer3_full_na', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer4_amount', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer4_full_na', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer5_amount', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('high_comp_officer5_full_na', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('historic_duns', 'high_comp_officer5_full_na')
op.drop_column('historic_duns', 'high_comp_officer5_amount')
op.drop_column('historic_duns', 'high_comp_officer4_full_na')
op.drop_column('historic_duns', 'high_comp_officer4_amount')
op.drop_column('historic_duns', 'high_comp_officer3_full_na')
op.drop_column('historic_duns', 'high_comp_officer3_amount')
op.drop_column('historic_duns', 'high_comp_officer2_full_na')
op.drop_column('historic_duns', 'high_comp_officer2_amount')
op.drop_column('historic_duns', 'high_comp_officer1_full_na')
op.drop_column('historic_duns', 'high_comp_officer1_amount')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/0dc4a1fbb52e_adding_exec_comp_to_historic_duns.py",
"copies": "1",
"size": "2443",
"license": "cc0-1.0",
"hash": 9155677588879569000,
"line_mean": 41.1206896552,
"line_max": 101,
"alpha_frac": 0.6987310684,
"autogenerated": false,
"ratio": 2.6994475138121548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8787624799856424,
"avg_score": 0.02211075647114625,
"num_lines": 58
} |
'''Adding extension operators.'''
__author__ = 'Robert Smallshire'
from ._portability import function_name
def add_method(function, klass, name=None):
'''Add an existing function to a class as a method.
Note: Consider using the extend decorator as a more readable alternative
to using this function directly.
Args:
function: The function to be added to the class klass.
klass: The class to which the new method will be added.
name: An optional name for the new method. If omitted or None the
original name of the function is used.
Returns:
The function argument unmodified.
Raises:
ValueError: If klass already has an attribute with the same name as the
extension method.
'''
# Should we be using functools.update_wrapper in here?
if name is None:
name = function_name(function)
if hasattr(klass, name):
raise ValueError("Cannot replace existing attribute with method "
"'{name}'".format(name=name))
setattr(klass, name, function)
return function
def extend(klass, name=None):
'''A function decorator for extending an existing class.
Use as a decorator for functions to add to an existing class.
Args:
klass: The class to be decorated.
name: The name the new method is to be given in the klass class.
Returns:
A decorator function which accepts a single function as its only
argument. The decorated function will be added to class klass.
Raises:
ValueError: If klass already has an attribute with the same name as the
extension method.
'''
def decorator(f):
return add_method(f, klass, name)
return decorator
| {
"repo_name": "SlamJam/asq",
"path": "asq/extension.py",
"copies": "5",
"size": "1838",
"license": "mit",
"hash": -6709805697076182000,
"line_mean": 28.6333333333,
"line_max": 79,
"alpha_frac": 0.6376496192,
"autogenerated": false,
"ratio": 4.862433862433862,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 60
} |
'''Adding extension operators.'''
__author__ = 'Sixty North'
from ._portability import function_name
def add_method(function, klass, name=None):
'''Add an existing function to a class as a method.
Note: Consider using the extend decorator as a more readable alternative
to using this function directly.
Args:
function: The function to be added to the class klass.
klass: The class to which the new method will be added.
name: An optional name for the new method. If omitted or None the
original name of the function is used.
Returns:
The function argument unmodified.
Raises:
ValueError: If klass already has an attribute with the same name as the
extension method.
'''
# Should we be using functools.update_wrapper in here?
if name is None:
name = function_name(function)
if hasattr(klass, name):
raise ValueError("Cannot replace existing attribute with method "
"'{name}'".format(name=name))
setattr(klass, name, function)
return function
def extend(klass, name=None):
'''A function decorator for extending an existing class.
Use as a decorator for functions to add to an existing class.
Args:
klass: The class to be decorated.
name: The name the new method is to be given in the klass class.
Returns:
A decorator function which accepts a single function as its only
argument. The decorated function will be added to class klass.
Raises:
ValueError: If klass already has an attribute with the same name as the
extension method.
'''
def decorator(f):
return add_method(f, klass, name)
return decorator
| {
"repo_name": "rob-smallshire/asq",
"path": "asq/extension.py",
"copies": "1",
"size": "1832",
"license": "mit",
"hash": 2491054699264284000,
"line_mean": 28.5333333333,
"line_max": 79,
"alpha_frac": 0.6364628821,
"autogenerated": false,
"ratio": 4.859416445623342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5995879327723342,
"avg_score": null,
"num_lines": null
} |
"""adding FDI tables
Revision ID: 57b0a21da161
Revises: 3ac1923eb5b3
Create Date: 2016-10-07 10:08:04.592892
"""
# revision identifiers, used by Alembic.
revision = '57b0a21da161'
down_revision = '3ac1923eb5b3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('currencies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_currencies_name'), 'currencies', ['name'], unique=True)
op.create_table('investment_locations',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_investment_locations_name'), 'investment_locations', ['name'], unique=True)
op.create_table('investment_origins',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_investment_origins_name'), 'investment_origins', ['name'], unique=True)
op.create_table('investment_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_investment_types_name'), 'investment_types', ['name'], unique=True)
op.create_table('phases',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_phases_name'), 'phases', ['name'], unique=True)
op.create_table('sectors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=130), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_sectors_name'), 'sectors', ['name'], unique=True)
op.create_table('investments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('doc_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('value', sa.Integer(), nullable=True),
sa.Column('temp_opps', sa.Integer(), nullable=True),
sa.Column('perm_opps', sa.Integer(), nullable=True),
sa.Column('company', sa.String(length=1024), nullable=True),
sa.Column('government', sa.String(length=1024), nullable=True),
sa.Column('additional_place', sa.String(length=1024), nullable=True),
sa.Column('investment_begin', sa.Date(), nullable=True),
sa.Column('investment_end', sa.Date(), nullable=True),
sa.Column('currency_id', sa.Integer(), nullable=True),
sa.Column('phase_id', sa.Integer(), nullable=True),
sa.Column('invest_origin_id', sa.Integer(), nullable=True),
sa.Column('invest_loc_id', sa.Integer(), nullable=True),
sa.Column('invest_type_id', sa.Integer(), nullable=True),
sa.Column('sector_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['currency_id'], ['currencies.id'], ),
sa.ForeignKeyConstraint(['doc_id'], ['documents.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['invest_loc_id'], ['investment_locations.id'], ),
sa.ForeignKeyConstraint(['invest_origin_id'], ['investment_origins.id'], ),
sa.ForeignKeyConstraint(['invest_type_id'], ['investment_types.id'], ),
sa.ForeignKeyConstraint(['phase_id'], ['phases.id'], ),
sa.ForeignKeyConstraint(['sector_id'], ['sectors.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_investments_currency_id'), 'investments', ['currency_id'], unique=False)
op.create_index(op.f('ix_investments_doc_id'), 'investments', ['doc_id'], unique=False)
op.create_index(op.f('ix_investments_invest_loc_id'), 'investments', ['invest_loc_id'], unique=False)
op.create_index(op.f('ix_investments_invest_origin_id'), 'investments', ['invest_origin_id'], unique=False)
op.create_index(op.f('ix_investments_invest_type_id'), 'investments', ['invest_type_id'], unique=False)
op.create_index(op.f('ix_investments_investment_begin'), 'investments', ['investment_begin'], unique=False)
op.create_index(op.f('ix_investments_investment_end'), 'investments', ['investment_end'], unique=False)
op.create_index(op.f('ix_investments_name'), 'investments', ['name'], unique=False)
op.create_index(op.f('ix_investments_phase_id'), 'investments', ['phase_id'], unique=False)
op.create_index(op.f('ix_investments_sector_id'), 'investments', ['sector_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('investments')
op.drop_table('sectors')
op.drop_table('phases')
op.drop_table('investment_types')
op.drop_table('investment_origins')
op.drop_table('investment_locations')
op.drop_table('currencies')
### end Alembic commands ###
| {
"repo_name": "Code4SA/mma-dexter",
"path": "migrations/versions/57b0a21da161_adding_fdi_tables.py",
"copies": "1",
"size": "5032",
"license": "apache-2.0",
"hash": 8017069705066599000,
"line_mean": 47.3846153846,
"line_max": 111,
"alpha_frac": 0.6699125596,
"autogenerated": false,
"ratio": 3.13715710723192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9253085468234274,
"avg_score": 0.010796839719528993,
"num_lines": 104
} |
"""adding federal_agency_id, federal_agency_name
Revision ID: 08c4db2f3007
Revises: 8d66e1b5702d
Create Date: 2019-02-28 20:50:02.184053
"""
# revision identifiers, used by Alembic.
revision = '08c4db2f3007'
down_revision = '8d66e1b5702d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('fsrs_grant', sa.Column('federal_agency_id', sa.String(), nullable=True))
op.add_column('fsrs_grant', sa.Column('federal_agency_name', sa.String(), nullable=True))
op.add_column('fsrs_subgrant', sa.Column('federal_agency_id', sa.String(), nullable=True))
op.add_column('fsrs_subgrant', sa.Column('federal_agency_name', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('fsrs_subgrant', 'federal_agency_name')
op.drop_column('fsrs_subgrant', 'federal_agency_id')
op.drop_column('fsrs_grant', 'federal_agency_name')
op.drop_column('fsrs_grant', 'federal_agency_id')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/08c4db2f3007_adding_federal_agency_id_federal_agency_.py",
"copies": "1",
"size": "1389",
"license": "cc0-1.0",
"hash": 4116311405911339500,
"line_mean": 29.1956521739,
"line_max": 96,
"alpha_frac": 0.6933045356,
"autogenerated": false,
"ratio": 2.967948717948718,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4161253253548718,
"avg_score": null,
"num_lines": null
} |
"""adding field for current ehr status
Revision ID: 6fcdd1810837
Revises: e4a837723c94
Create Date: 2020-11-02 15:00:54.373743
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '6fcdd1810837'
down_revision = 'baa91c8a684e'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('is_ehr_data_available', sa.Boolean(), server_default=sa.text('false'), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'is_ehr_data_available')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/6fcdd1810837_adding_field_for_current_ehr_status.py",
"copies": "1",
"size": "1986",
"license": "bsd-3-clause",
"hash": 360784103025070000,
"line_mean": 32.1,
"line_max": 139,
"alpha_frac": 0.750755287,
"autogenerated": false,
"ratio": 3.624087591240876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4874842878240876,
"avg_score": null,
"num_lines": null
} |
"""adding file name fields to d file table
Revision ID: 1f43c2880644
Revises: 6f5c2c66b328
Create Date: 2016-08-02 19:57:57.561765
"""
# revision identifiers, used by Alembic.
revision = '1f43c2880644'
down_revision = '6f5c2c66b328'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('d_file_metadata', sa.Column('original_file_name', sa.Text(), nullable=True))
op.add_column('d_file_metadata', sa.Column('upload_file_name', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('d_file_metadata', 'upload_file_name')
op.drop_column('d_file_metadata', 'original_file_name')
### end Alembic commands ###
| {
"repo_name": "chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend",
"path": "dataactcore/migrations/versions/1f43c2880644_adding_file_name_fields_to_d_file_table.py",
"copies": "2",
"size": "1037",
"license": "cc0-1.0",
"hash": -3583380978145670700,
"line_mean": 23.6904761905,
"line_max": 95,
"alpha_frac": 0.68756027,
"autogenerated": false,
"ratio": 3.210526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.97510517060611,
"avg_score": 0.029406975945674683,
"num_lines": 42
} |
"""adding fiscal year and period to submission
Revision ID: 449ab366f333
Revises: a0a4f1ef56ae
Create Date: 2016-08-11 13:21:49.526346
"""
# revision identifiers, used by Alembic.
revision = '449ab366f333'
down_revision = '5a9051f9bfc5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('reporting_fiscal_period', sa.Integer(), server_default='0', nullable=False))
op.add_column('submission', sa.Column('reporting_fiscal_year', sa.Integer(), server_default='0', nullable=False))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'reporting_fiscal_year')
op.drop_column('submission', 'reporting_fiscal_period')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/449ab366f333_adding_fiscal_year_and_period_to_submission.py",
"copies": "2",
"size": "1089",
"license": "cc0-1.0",
"hash": -7525969050230702000,
"line_mean": 24.9285714286,
"line_max": 119,
"alpha_frac": 0.6997245179,
"autogenerated": false,
"ratio": 3.468152866242038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02930585559009165,
"num_lines": 42
} |
""" Adding FSRSGrant federal_agency_id
Revision ID: 4c4dd19af4ac
Revises: 81969f6e3733
Create Date: 2019-12-20 16:29:32.890994
"""
# revision identifiers, used by Alembic.
revision = '4c4dd19af4ac'
down_revision = '81969f6e3733'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import text
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_fsrs_grant_federal_agency_id_upper'), 'fsrs_grant', [text('UPPER(federal_agency_id)')], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_fsrs_grant_federal_agency_id_upper'), table_name='fsrs_grant')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/4c4dd19af4ac_adding_fsrs_upper_fai.py",
"copies": "1",
"size": "1024",
"license": "cc0-1.0",
"hash": -1770538599664266200,
"line_mean": 23.9756097561,
"line_max": 130,
"alpha_frac": 0.7001953125,
"autogenerated": false,
"ratio": 3.065868263473054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42660635759730536,
"avg_score": null,
"num_lines": null
} |
"""adding games
Revision ID: 4ee7c51f1bbd
Revises: 58404d0b7e55
Create Date: 2015-08-05 10:28:03.266280
"""
# revision identifiers, used by Alembic.
revision = '4ee7c51f1bbd'
down_revision = '58404d0b7e55'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('games',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('board_state', sa.String(length=128), nullable=True),
sa.Column('player_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('last_played', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['player_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_games_created_at'), 'games', ['created_at'], unique=False)
op.create_index(op.f('ix_games_last_played'), 'games', ['last_played'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_games_last_played'), table_name='games')
op.drop_index(op.f('ix_games_created_at'), table_name='games')
op.drop_table('games')
### end Alembic commands ###
| {
"repo_name": "jeffthemaximum/crispy-hippopotamus",
"path": "migrations/versions/4ee7c51f1bbd_adding_games.py",
"copies": "1",
"size": "1251",
"license": "mit",
"hash": 734778046911595000,
"line_mean": 31.9210526316,
"line_max": 89,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.1275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42941666666999995,
"avg_score": null,
"num_lines": null
} |
"""Adding geographic projection converter.
Revision ID: 29ecca388884
Revises: b4a88e0c224e
Create Date: 2020-02-20 12:48:10.392917
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '29ecca388884'
down_revision = 'b4a88e0c224e'
branch_labels = None
depends_on = None
def _insert_operation():
tb = table('operation',
column("id", Integer),
column("slug", String),
column('enabled', Integer),
column('type', String),
column('icon', String),
)
columns = ['id', 'slug', 'enabled', 'type', 'icon']
data = [
(4045, 'cartographic-projection', 1, 'TRANSFORMATION', 'fa-globe-stand'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_new_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer))
columns = ('operation_id', 'platform_id')
data = [
(4045, 4),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_category_operation():
tb = table(
'operation_category_operation',
column('operation_id', Integer),
column('operation_category_id', Integer))
columns = ('operation_id', 'operation_category_id')
data = [
(4045, 4001),
(4045, 41),
(4045, 42),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form():
operation_form_table = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = ('id', 'enabled', 'order', 'category')
data = [
(4044, 1, 1, 'execution'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(operation_form_table, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String))
columns = ('id', 'locale', 'name')
data = [
(4044, 'en', 'Execution'),
(4044, 'pt', 'Execução'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = ('operation_id', 'operation_form_id')
data = [
(4045, 41),
(4045, 110),
(4045, 4044),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
data = [
(4045, 'pt', 'Conversor de projeções cartográficas',
'Converte diferentes projeções cartograficas entre si.'),
(4045, 'en', 'Cartographic projections converter',
'Converts different cartographic projections to each other.'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port():
tb = table(
'operation_port',
column('id', Integer),
column('type', String),
column('tags', String),
column('operation_id', Integer),
column('order', Integer),
column('multiplicity', String),
column('slug', String), )
columns = [c.name for c in tb.columns]
data = [
(4098, 'INPUT', None, 4045, 1, 'ONE', 'input data'),
(4099, 'OUTPUT', None, 4045, 1, 'MANY', 'output data'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_translation():
tb = table(
'operation_port_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
data = [
(4098, 'en', 'input data', 'Input data'),
(4098, 'pt', 'dados de entrada', 'Dados de entrada'),
(4099, 'en', 'output data', 'Output data'),
(4099, 'pt', 'dados de saída', 'Dados de saída'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer), )
columns = ('operation_port_id', 'operation_port_interface_id')
data = [
(4098, 1),
(4099, 1),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer), )
columns = ('id', 'name', 'type', 'required', 'order', 'default',
'suggested_widget', 'values_url', 'values', 'scope', 'form_id')
data = [
(4344, 'src_projection', 'INTEGER', 1, 1, None, 'integer', None, None, 'EXECUTION', 4044),
(4345, 'dst_projection', 'INTEGER', 1, 2, None, 'integer', None, None, 'EXECUTION', 4044),
(4346, 'col_lat', 'TEXT', 1, 3, None, 'attribute-selector', None, '{"multiple": false}', 'EXECUTION', 4044),
(4347, 'col_lon', 'TEXT', 1, 4, None, 'attribute-selector', None, '{"multiple": false}', 'EXECUTION', 4044),
(4348, 'alias_lat', 'TEXT', 0, 5, None, 'text', None, None, 'EXECUTION', 4044),
(4349, 'alias_lon', 'TEXT', 0, 6, None, 'text', None, None, 'EXECUTION', 4044),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = ('id', 'locale', 'label', 'help')
data = [
(4344, 'en', 'Source projection (epsg code)', 'Source projection (epsg)'),
(4344, 'pt', 'Projeção de origem (epsg)', 'Projeção de origem (epsg)'),
(4345, 'en', 'Destination projection (epsg code)', 'Destination projection (epsg code).'),
(4345, 'pt', 'Projeção de destino (epsg)', 'Projeção de destino (epsg).'),
(4346, 'en', 'Latitude column', 'Latitude column name.'),
(4346, 'pt', 'Coluna de Latitude', 'Nome da coluna que contem a Latitude.'),
(4347, 'en', 'Longitude column', 'Longitude column name.'),
(4347, 'pt', 'Coluna de Longitude', 'Nome da coluna que contem a Longitude.'),
(4348, 'en', 'New Latitude column', 'Alias for the converted Latitude.'),
(4348, 'pt', 'Nova coluna de Latitude', 'Novo nome da coluna para as Latitudes convertidas (se vazia, '
'vai substituir a atual).'),
(4349, 'en', 'New Longitude column', 'Alias for the converted Longitude (if empty will replace it).'),
(4349, 'pt', 'Nova coluna de Longitude', 'Novo nome da coluna para as Longitudes convertidas (se vazia, '
'vai substituir a atual).'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation, 'DELETE FROM operation WHERE id = 4045'),
(_insert_new_operation_platform,
'DELETE FROM operation_platform WHERE operation_id = 4045 AND '
'platform_id = 4;'
),
(_insert_operation_category_operation,
'DELETE FROM operation_category_operation '
'WHERE operation_id = 4045'),
(_insert_operation_form,
'DELETE FROM operation_form WHERE id = 4044'),
(_insert_operation_form_translation,
'DELETE FROM operation_form_translation WHERE id = 4044'),
(_insert_operation_operation_form,
'DELETE FROM operation_operation_form '
'WHERE operation_id = 4045'),
(_insert_operation_translation,
'DELETE FROM operation_translation WHERE id = 4045'),
(_insert_operation_port,
'DELETE FROM operation_port WHERE id BETWEEN 4098 AND 4099'),
(_insert_operation_port_translation,
'DELETE FROM operation_port_translation WHERE id BETWEEN 4098 AND 4099'),
(_insert_operation_port_interface_operation_port,
'DELETE FROM operation_port_interface_operation_port WHERE '
'operation_port_id BETWEEN 4098 AND 4099'),
(_insert_operation_form_field,
'DELETE FROM operation_form_field WHERE id BETWEEN 4344 AND 4349;'),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation WHERE id BETWEEN 4344 AND 4349;'),
("""
DELETE FROM operation_form_field_translation where id = 235;
DELETE FROM operation_form_field where id = 235;
""",
"""
INSERT INTO operation_form_field (`id`, `name`, `type`, `required`, `order`, `default`, `suggested_widget`,
`values_url`, `values`, `scope`, `form_id`, `enable_conditions`)
VALUES (235, 'save_criteria', 'TEXT', 1, 3, 'ALL', 'dropdown', NULL,
'[{\"en\": \"Save best model\", \"key\": \"BEST\", \"value\": \"Save best model\",
\"pt\": \"Salvar o melhor modelo\"}, {\"en\": \"Save all (names will be suffixed with model rank)\",
\"key\": \"ALL\", \"value\": \"Save all (names will be suffixed with model rank)\",
\"pt\": \"Salvar todos (nomes ser\\u00e3o sufixados com o ranking do modelo)\"}]', 'EXECUTION', 100, NULL);
INSERT INTO operation_form_field_translation (id, locale, label, help) VALUES
(235, 'en', 'Which model to save? (required if many models)', 'Which model to save.'),
(235, 'pt', 'Qual modelo salvar? (obrigatório se vários modelos)', 'Qual modelo salvar.');
""")
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
cmds = cmd[0].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
cmds = cmd[1].split(';')
for new_cmd in cmds:
if new_cmd.strip():
connection.execute(new_cmd)
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/29ecca388884_adding_geographic_projection_converter.py",
"copies": "1",
"size": "12035",
"license": "apache-2.0",
"hash": -3683108800045878300,
"line_mean": 32.5642458101,
"line_max": 116,
"alpha_frac": 0.5771471372,
"autogenerated": false,
"ratio": 3.614921780986763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.968254201091103,
"avg_score": 0.0019053814551463842,
"num_lines": 358
} |
## Adding ./getresults to the Python path so that the modules in folder can be imported
import sys
sys.path.insert(0, './getresults')
import datetime
from flightsearch import flightsearch, flightresult
import os
import uuid
import time
from pprint import pprint
def main():
flyfrom = 'YYZ' #input("Enter departure city or airport code, e.g. Toronto or YYZ:\n")
datefrom = '2017-04-26' #input("Enter departure date and time, e.g. 2017-03-31 12:00:\n")
flyto = 'LHR' #input("Enter arrival city or airport code, e.g. London or LHR:\n")
dateto = '2017-05-26' #input("Enter arrival date and time, e.g. 2017-03-31 20:00:\n")
searchuuid = uuid.uuid4()
searchbegintime = time.time()
search = flightsearch(searchuuid = searchuuid, searchbegintime = searchbegintime, flyfrom = flyfrom,
datefrom = datefrom, flyto = flyto, dateto = dateto)
results = aggregatedflights(search)
search.searchendtime = time.time()
for key, value in results.items():
for item in value:
pprint(vars(item))
## This function aggegates the various results obtained from the modules in the ./getresults folder
def aggregatedflights(flightsearch):
getresultsdir = './getresults'
resultdict = {}
for filename in os.listdir(getresultsdir):
if filename.startswith("get") and filename.endswith(".py"):
modulename = filename.split('.')[0]
mod = __import__(modulename)
resultdict[modulename] = mod.getresult(flightsearch)
else:
continue
return sortbyprice(resultdict)
def sortbyprice(flightresult):
## Coming soon
return flightresult
if __name__ == '__main__':
main()
| {
"repo_name": "brahul90/cheapflights",
"path": "cheapflights/main.py",
"copies": "1",
"size": "1721",
"license": "apache-2.0",
"hash": -392951166487776300,
"line_mean": 32.7450980392,
"line_max": 104,
"alpha_frac": 0.6647298083,
"autogenerated": false,
"ratio": 3.6931330472103006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.972672674741716,
"avg_score": 0.02622722161862816,
"num_lines": 51
} |
"""Adding groups for devices
Revision ID: 38f3c80e9932
Revises: 8d64bce23c6b
Create Date: 2017-06-08 10:18:53.432112
"""
import textwrap
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '38f3c80e9932'
down_revision = '8d64bce23c6b'
branch_labels = None
depends_on = None
def upgrade():
"""
Upgrade to add groups.
:return:
"""
op.create_table('group',
sa.Column('group_name', sa.String(length=128), nullable=False),
sa.Column('device_list', sa.String(length=1024), nullable=False),
sa.PrimaryKeyConstraint('group_name', name=op.f('group_pkey'))
)
op.execute(textwrap.dedent("""
CREATE OR REPLACE FUNCTION public.upsert_group(p_group_name character varying, p_device_list character varying)
RETURNS integer AS
$BODY$
DECLARE num_rows integer;
BEGIN
INSERT INTO public.group AS gro (group_name, device_list)
VALUES (p_group_name, p_device_list)
ON CONFLICT (group_name) DO UPDATE
SET
device_list = p_device_list
WHERE gro.group_name = p_group_name;
GET DIAGNOSTICS num_rows = ROW_COUNT;
RETURN num_rows;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
def downgrade():
"""
Remove the add group items
:return:
"""
op.execute(textwrap.dedent("""DROP FUNCTION public.upsert_group(character varying, character varying);"""))
op.drop_table('group')
| {
"repo_name": "intel-ctrlsys/actsys",
"path": "datastore/datastore/database_schema/schema_migration/versions/38f3c80e9932_add_groups.py",
"copies": "1",
"size": "1613",
"license": "apache-2.0",
"hash": -7233913711876846000,
"line_mean": 27.298245614,
"line_max": 119,
"alpha_frac": 0.6057036578,
"autogenerated": false,
"ratio": 3.5844444444444443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46901481022444447,
"avg_score": null,
"num_lines": null
} |
"""Adding header column
Revision ID: ddcb08ebb9e1
Revises: 1b4006c4f120
Create Date: 2016-11-24 07:26:09.796411
"""
# revision identifiers, used by Alembic.
revision = 'ddcb08ebb9e1'
down_revision = '1b4006c4f120'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('nu_resolved_outbound', sa.Column('resolved_title', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('nu_resolved_outbound', 'resolved_title')
### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/00022_ddcb08ebb9e1_adding_header_column.py",
"copies": "1",
"size": "1218",
"license": "bsd-3-clause",
"hash": 9065852317941188000,
"line_mean": 27.3255813953,
"line_max": 96,
"alpha_frac": 0.7660098522,
"autogenerated": false,
"ratio": 3.5510204081632653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48170302603632653,
"avg_score": null,
"num_lines": null
} |
"""Adding Hints and Unlocks tables
Revision ID: c7225db614c1
Revises: d6514ec92738
Create Date: 2017-03-23 01:31:43.940187
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c7225db614c1'
down_revision = 'd6514ec92738'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('hints',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('type', sa.Integer(), nullable=True),
sa.Column('chal', sa.Integer(), nullable=True),
sa.Column('hint', sa.Text(), nullable=True),
sa.Column('cost', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['chal'], ['challenges.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('unlocks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('teamid', sa.Integer(), nullable=True),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('itemid', sa.Integer(), nullable=True),
sa.Column('model', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['teamid'], ['teams.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('unlocks')
op.drop_table('hints')
# ### end Alembic commands ###
| {
"repo_name": "semprix/CTFIgniter",
"path": "payload/CTFd/migrations/versions/c7225db614c1_adding_hints_and_unlocks_tables.py",
"copies": "2",
"size": "1395",
"license": "apache-2.0",
"hash": 5785097236911143000,
"line_mean": 29.3260869565,
"line_max": 65,
"alpha_frac": 0.6559139785,
"autogenerated": false,
"ratio": 3.3859223300970873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5041836308597087,
"avg_score": null,
"num_lines": null
} |
"""Adding HPO columns
Revision ID: 3d4a6433e26d
Revises: ae11bf43cab2
Create Date: 2017-04-28 09:55:09.728226
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrganizationType
# revision identifiers, used by Alembic.
revision = "3d4a6433e26d"
down_revision = "ae11bf43cab2"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("hpo", sa.Column("display_name", sa.String(length=255), nullable=True))
op.add_column("hpo", sa.Column("organization_type", model.utils.Enum(OrganizationType), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("hpo", "organization_type")
op.drop_column("hpo", "display_name")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/3d4a6433e26d_adding_hpo_columns.py",
"copies": "1",
"size": "1333",
"license": "bsd-3-clause",
"hash": -3842122934584527400,
"line_mean": 24.6346153846,
"line_max": 107,
"alpha_frac": 0.6781695424,
"autogenerated": false,
"ratio": 3.4444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9620655297385755,
"avg_score": 0.0003917378917378918,
"num_lines": 52
} |
"""Adding in a suspicious crash table
Revision ID: 2c03d8ea0a50
Revises: 35604f61bc24
Create Date: 2013-08-09 18:46:42.618063
"""
# revision identifiers, used by Alembic.
revision = '2c03d8ea0a50'
down_revision = '49bf379b5a8'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "json"
def upgrade():
op.create_table(u'suspicious_crash_signatures',
sa.Column(u'suspicious_crash_signature_id', sa.INTEGER()),
sa.Column(u'signature_id', sa.INTEGER()),
sa.Column(u'report_date', sa.TIMESTAMP(timezone=True))
)
def downgrade():
op.drop_table(u'suspicious_crash_signatures')
| {
"repo_name": "rhelmer/socorro-lib",
"path": "alembic/versions/2c03d8ea0a50_adding_in_a_suspicio.py",
"copies": "16",
"size": "1349",
"license": "mpl-2.0",
"hash": -8538420468336132000,
"line_mean": 21.8644067797,
"line_max": 66,
"alpha_frac": 0.6790214974,
"autogenerated": false,
"ratio": 3.503896103896104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""adding index for questionnaire response
Revision ID: 625fa91e6bac
Revises: 88ad431e793f, 241803b2c2d2
Create Date: 2021-05-21 09:14:49.739249
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import Connection
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '625fa91e6bac'
down_revision = ('88ad431e793f', '241803b2c2d2')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# Index seems to have been manually created in prod, creating for environments that don't have it
connection: Connection = op.get_bind()
index_exists = connection.scalar("show index from questionnaire_response where key_name = 'idx_created_q_id'")
if not index_exists:
op.create_index('idx_created_q_id', 'questionnaire_response', ['questionnaire_id', 'created'], unique=False)
def downgrade_rdr():
op.drop_index('idx_created_q_id', table_name='questionnaire_response')
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/625fa91e6bac_adding_index_for_questionnaire_response.py",
"copies": "1",
"size": "2130",
"license": "bsd-3-clause",
"hash": -8380484888443779000,
"line_mean": 34.5,
"line_max": 125,
"alpha_frac": 0.765258216,
"autogenerated": false,
"ratio": 3.5738255033557045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4839083719355704,
"avg_score": null,
"num_lines": null
} |
"""Adding indexing to zips in detached_award_financial_assistance table
Revision ID: 9589687eea88
Revises: 97bf80bdd459
Create Date: 2017-09-12 13:45:54.398890
"""
# revision identifiers, used by Alembic.
revision = '9589687eea88'
down_revision = '97bf80bdd459'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_financial_assistance_legal_entity_zip5'), 'detached_award_financial_assistance', ['legal_entity_zip5'], unique=False)
op.create_index(op.f('ix_detached_award_financial_assistance_legal_entity_zip_last4'), 'detached_award_financial_assistance', ['legal_entity_zip_last4'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_financial_assistance_legal_entity_zip_last4'), table_name='detached_award_financial_assistance')
op.drop_index(op.f('ix_detached_award_financial_assistance_legal_entity_zip5'), table_name='detached_award_financial_assistance')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/9589687eea88_adding_indexing_to_zips_in_detached_.py",
"copies": "1",
"size": "1365",
"license": "cc0-1.0",
"hash": -5734497318855159000,
"line_mean": 31.5,
"line_max": 171,
"alpha_frac": 0.7296703297,
"autogenerated": false,
"ratio": 3.1307339449541285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4360404274654128,
"avg_score": null,
"num_lines": null
} |
"""Adding index to created_date column for the vulnerability table and pubished_date for the nvd_jsons table.
Revision ID: 5dd1d09f15fe
Revises: 32ded3390554
Create Date: 2019-07-03 18:21:52.819263
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5dd1d09f15fe"
down_revision = "32ded3390554"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(
op.f("ix_vulnerability_date_created"),
"vulnerability",
["date_created"],
unique=False,
)
op.create_index(
"idx_nvd_jsons_published_date",
"nvd_jsons",
["published_date"],
unique=False,
schema="cve",
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("idx_nvd_jsons_published_date", table_name="nvd_jsons", schema="cve")
op.drop_index(op.f("ix_vulnerability_date_created"), table_name="vulnerability")
# ### end Alembic commands ###
| {
"repo_name": "google/vulncode-db",
"path": "migrations/versions/5dd1d09f15fe_adding_index_to_created_date_column_for_.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": -7754917636383371000,
"line_mean": 26.0243902439,
"line_max": 109,
"alpha_frac": 0.6525270758,
"autogenerated": false,
"ratio": 3.3373493975903616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489876473390362,
"avg_score": null,
"num_lines": null
} |
"""Adding index to region. Dropping item.cloud.
Revision ID: 2ce75615b24d
Revises: d08d0b37788a
Create Date: 2016-11-15 20:31:27.393066
"""
# revision identifiers, used by Alembic.
revision = '2ce75615b24d'
down_revision = 'd08d0b37788a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'exceptions_tech_id_fkey', 'exceptions', type_='foreignkey')
op.drop_constraint(u'exceptions_item_id_fkey', 'exceptions', type_='foreignkey')
op.drop_constraint(u'exceptions_account_id_fkey', 'exceptions', type_='foreignkey')
op.create_foreign_key(None, 'exceptions', 'account', ['account_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'exceptions', 'item', ['item_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key(None, 'exceptions', 'technology', ['tech_id'], ['id'], ondelete='CASCADE')
op.create_index('ix_item_region', 'item', ['region'], unique=False)
op.drop_column('item', 'cloud')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('item', sa.Column('cloud', sa.VARCHAR(length=32), autoincrement=False, nullable=True))
op.drop_index('ix_item_region', table_name='item')
op.drop_constraint(u'exceptions_tech_id_fkey', 'exceptions', type_='foreignkey')
op.drop_constraint(u'exceptions_item_id_fkey', 'exceptions', type_='foreignkey')
op.drop_constraint(u'exceptions_account_id_fkey', 'exceptions', type_='foreignkey')
op.create_foreign_key(u'exceptions_account_id_fkey', 'exceptions', 'account', ['account_id'], ['id'])
op.create_foreign_key(u'exceptions_item_id_fkey', 'exceptions', 'item', ['item_id'], ['id'])
op.create_foreign_key(u'exceptions_tech_id_fkey', 'exceptions', 'technology', ['tech_id'], ['id'])
### end Alembic commands ### | {
"repo_name": "stackArmor/security_monkey",
"path": "migrations/versions/2ce75615b24d_.py",
"copies": "3",
"size": "1905",
"license": "apache-2.0",
"hash": -6272917109000821000,
"line_mean": 46.65,
"line_max": 105,
"alpha_frac": 0.6829396325,
"autogenerated": false,
"ratio": 3.261986301369863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5444925933869863,
"avg_score": null,
"num_lines": null
} |
"""Adding initial tables
Revision ID: 4f310004f218
Revises:
Create Date: 2015-10-23 16:08:21.396455
"""
# revision identifiers, used by Alembic.
revision = '4f310004f218'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('reservations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('servers',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('cpu_count', sa.Integer(), nullable=True),
sa.Column('local_drive_capacity', sa.Integer(), nullable=True),
sa.Column('psu_capacity', sa.Integer(), nullable=True),
sa.Column('psu_size', sa.String(length=36), nullable=True),
sa.Column('memory_mb', sa.Integer(), nullable=True),
sa.Column('cpu_architecture', sa.String(), nullable=True),
sa.Column('driver_name', sa.String(), nullable=True),
sa.Column('deploy_kernel', sa.String(), nullable=True),
sa.Column('deploy_ramdisk', sa.String(), nullable=True),
sa.Column('ipmi_address', sa.String(), nullable=True),
sa.Column('ipmi_password', sa.String(), nullable=True),
sa.Column('impi_username', sa.String(), nullable=True),
sa.Column('impi_priv_level', sa.String(), nullable=True),
sa.Column('ipmi_mac_address', sa.String(), nullable=True),
sa.Column('reservation_id', sa.Integer(), nullable=True),
sa.Column('deployed', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['reservation_id'], ['reservations.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('ipmi_mac_address', name='uniq_servers0impmimacaddress'),
sa.UniqueConstraint('name', name='uniq_servers0name'),
sa.UniqueConstraint('uuid', name='uniq_servers0uuid')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('servers')
op.drop_table('reservations')
### end Alembic commands ###
| {
"repo_name": "softlayer/ironic-inventory-integrator",
"path": "ironic_inventory/db/sqlalchemy/alembic/versions/4f310004f218_adding_initial_tables.py",
"copies": "1",
"size": "2415",
"license": "apache-2.0",
"hash": -9049334719141657000,
"line_mean": 37.9516129032,
"line_max": 81,
"alpha_frac": 0.6732919255,
"autogenerated": false,
"ratio": 3.4598853868194843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46331773123194847,
"avg_score": null,
"num_lines": null
} |
"""Adding in models for certificate sources
Revision ID: 1ff763f5b80b
Revises: 4dc5ddd111b8
Create Date: 2015-08-01 15:24:20.412725
"""
# revision identifiers, used by Alembic.
revision = '1ff763f5b80b'
down_revision = '4dc5ddd111b8'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('sources',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=32), nullable=True),
sa.Column('options', sqlalchemy_utils.types.json.JSONType(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('plugin_name', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('certificate_source_associations',
sa.Column('source_id', sa.Integer(), nullable=True),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['source_id'], ['destinations.id'], ondelete='cascade')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('certificate_source_associations')
op.drop_table('sources')
### end Alembic commands ###
| {
"repo_name": "rhoml/lemur",
"path": "lemur/migrations/versions/1ff763f5b80b_.py",
"copies": "9",
"size": "1363",
"license": "apache-2.0",
"hash": 3409049383786499600,
"line_mean": 31.4523809524,
"line_max": 89,
"alpha_frac": 0.693323551,
"autogenerated": false,
"ratio": 3.57742782152231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010029981362622457,
"num_lines": 42
} |
"""adding january cope fields
Revision ID: ebb897e84b1b
Revises: cab7fdee2895
Create Date: 2020-12-07 13:07:17.786985
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'ebb897e84b1b'
down_revision = 'cab7fdee2895'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan', rdr_service.model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan_authored', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('questionnaire_on_cope_jan_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'questionnaire_on_cope_jan_time')
op.drop_column('participant_summary', 'questionnaire_on_cope_jan_authored')
op.drop_column('participant_summary', 'questionnaire_on_cope_jan')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/ebb897e84b1b_adding_january_cope_fields.py",
"copies": "1",
"size": "2428",
"license": "bsd-3-clause",
"hash": 5715112283738173000,
"line_mean": 36.9375,
"line_max": 146,
"alpha_frac": 0.7549423394,
"autogenerated": false,
"ratio": 3.523947750362845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47788900897628445,
"avg_score": null,
"num_lines": null
} |
"""Adding jira ticket metadata to a user inputted ticket"""
from dateutil import parser
from jira import JIRA
import os
import re
JIRA_USER = os.environ['JIRA_USER']
JIRA_AUTH = os.environ['JIRA_AUTH']
OPTIONS = {
'server': 'https://trifacta.atlassian.net'
}
DISPLAY_KEYS = ['summary', 'description', 'status', 'priority',
'assignee', 'reporter', 'created_at', 'updated']
def extract_repeated_field(array, field_name, merge_fields=True):
"""The structure of JIRA's issues contains arrays of dictionaries. Here
we have a simple helper to extract a field from each and send back an
array of those values
:param list array:
:param string field_name:
:returns: (*list*)
"""
if array is None:
return None
vals = map(lambda entry: entry.get(field_name, None), array)
return_fields = filter(lambda entry: entry is not None, vals)
if merge_fields:
return ','.join(return_fields)
else:
return return_fields
def preprocess(issue):
"""Takes a single issue pulled from a JIRA query and
extracts the relevant information. Organizing it into a python
dict
:param jira.resource.Issue issue:
:returns: (*dict*)
"""
raw = issue.raw['fields']
if raw.get('customfield_11204', None) is not None:
bug_cause = raw['customfield_11204']['value']
else:
bug_cause = None
return {
'id': issue.key,
'summary': raw['summary'],
'description': raw['description'],
'assignee': raw['assignee']['displayName'],
'components': extract_repeated_field(raw['components'], 'name'),
'affected_versions': extract_repeated_field(raw['versions'], 'name'),
'fix_versions': extract_repeated_field(raw['fixVersions'], 'name'),
'priority': raw['priority']['name'],
'status': raw['status']['name'],
'resolution': raw['resolution'],
'created_at': parser.parse(raw['created']).strftime('%Y-%m-%d'),
'reporter': raw['reporter']['displayName'],
'issue_type': raw['issuetype']['name'],
'architecture_component': extract_repeated_field(raw['customfield_11203'], 'value', False),
'bug_cause': bug_cause,
'updated': parser.parse(raw['updated']).strftime('%Y-%m-%d')
}
def short_preprocess(issue):
"""Takes a single issue pulled from a JIRA query and
extracts shortend version for more compact posting by rosencrantz
:returns: (*dict*)
"""
raw = issue.raw['fields']
return {
'summary': raw['summary'],
'assignee': raw['assignee']['displayName'],
'status': raw['status']['name'],
}
def run_short_pull(ticket, conn):
issue = short_preprocess(conn.issue('{0}'.format(ticket)))
for key in issue.keys():
issue[key] = issue[key].encode('ascii', 'ignore')
issue['url'] = 'https://trifacta.atlassian.net/browse/{0}'.format(ticket)
issue['ticket'] = ticket
return'{ticket}: {summary}, {status}, {assignee}, {url}'.format(**issue)
def run_long_pull(ticket, conn):
issue = preprocess(conn.issue('{0}'.format(ticket)))
lines = [ticket]
for key in DISPLAY_KEYS:
issue_data = '{0}: {1}'.format(key, issue.get(key, '').encode('ascii', 'ignore'))
new_line = ' '.join(issue_data.split('\n\r')[0:3])
lines.append(new_line)
lines.append('url: https://trifacta.atlassian.net/browse/{0}'.format(ticket))
return '\n'.join(lines)
def query_ticket(searchterm):
context, ticket = searchterm
conn = JIRA(OPTIONS, basic_auth=(JIRA_USER, JIRA_AUTH))
if context.replace(' ', '') != 'rs':
line = run_short_pull(ticket, conn)
else:
line = run_long_pull(ticket, conn)
return line
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"(\w+\s+|\b)([A-Z]+-\d+)", text)
if not match:
return
searchterm = match[0]
return query_ticket(map(lambda term: term.encode('utf8'), searchterm))
| {
"repo_name": "michaelMinar/limbo",
"path": "limbo/plugins/jira_tickets.py",
"copies": "1",
"size": "4002",
"license": "mit",
"hash": 3648066231693054000,
"line_mean": 32.0743801653,
"line_max": 99,
"alpha_frac": 0.6169415292,
"autogenerated": false,
"ratio": 3.5604982206405693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46774397498405695,
"avg_score": null,
"num_lines": null
} |
# adding keyword into index
def add_to_index(index, keyword, url):
for entry in index:
if entry[0] == keyword:
if url not in entry[1]:
entry[1].append(url)
return
index.append([keyword, [url]])
# find keyword on the index
def lookup(index, keyword):
for entry in index:
if entry[0] == keyword:
return entry[1]
return []
# adding all the keyword from a webpage to index
def add_page_to_index(index, url, content):
words = content.split()
for word in words:
add_to_index(index, word, url)
# split content into what split_items character contains
def split_string(source,split_items):
output = []
start_index = 0
stop_index = 0
for char in source:
if char in split_items:
content = source[start_index:stop_index]
if len(content) > 0:
output.append(content)
start_index = stop_index + 1
stop_index = stop_index + 1
if len(source[start_index:]) > 0:
output.append(source[start_index:])
return output | {
"repo_name": "ulmalana/ubsearch",
"path": "src/indexing.py",
"copies": "2",
"size": "1097",
"license": "unlicense",
"hash": 7799228222930132000,
"line_mean": 28.6756756757,
"line_max": 56,
"alpha_frac": 0.5952597995,
"autogenerated": false,
"ratio": 3.8090277777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404287577277778,
"avg_score": null,
"num_lines": null
} |
"""Adding Kmodes in Spark platform
Revision ID: 86699b2e6672
Revises: b7442131c810
Create Date: 2020-09-09 14:44:51.915594
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
import json
# revision identifiers, used by Alembic.
from migrations.utils import TablesV1 as T1
# revision identifiers, used by Alembic.
revision = '86699b2e6672'
down_revision = 'b7442131c810'
branch_labels = None
depends_on = None
OFFSET_OP = 138
OFFSET_FORM = 152
OFFSET_PORT = 322
OFFSET_FIELD = 575
def _insert_operation():
T1.execute(T1.OPERATION, rows=[
T1.operation(OFFSET_OP, 'k-modes-clustering-model', 1, 'TRANSFORMATION',
'fa-braille')
])
def _insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
rows = [
(OFFSET_OP, 'en', 'K-Modes Clustering',
'Uses a distributed version of K-Modes algorithm '
'(Ensemble-based incremental distributed K-Modes) for clustering'),
(OFFSET_OP, 'pt', 'Agrupamento por K-Modes',
'Usa uma versão distribuída do algoritmo K-Modes '
'(Ensemble-based incremental distributed K-Modes) para agrupamento'),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer))
rows = [
(OFFSET_OP, 1),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_port():
tb = table(
'operation_port',
column('id', Integer),
column('type', String),
column('tags', String),
column('operation_id', Integer),
column('order', Integer),
column('multiplicity', String),
column('slug', String))
rows = [
(OFFSET_PORT, 'INPUT', None, OFFSET_OP, 1, 'ONE', 'train input data'),
(OFFSET_PORT+1, 'OUTPUT', None, OFFSET_OP, 2, 'MANY', 'model'),
(OFFSET_PORT+2, 'OUTPUT', None, OFFSET_OP, 1, 'MANY', 'output data'),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_port_translation():
tb = table(
'operation_port_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
rows = [
(OFFSET_PORT, 'en', 'train input data', 'Train input data'),
(OFFSET_PORT, 'pt', 'entrada do treino', 'Train input data'),
(OFFSET_PORT+1, 'en', 'model', 'Output model'),
(OFFSET_PORT+1, 'pt', 'modelo', 'Output model'),
(OFFSET_PORT+2, 'en', 'output data', 'Output data'),
(OFFSET_PORT+2, 'pt', 'dados de saída', 'Dados de saída'),
]
rows = [dict(list(zip([c.name for c in tb.columns], row))) for row in rows]
op.bulk_insert(tb, rows)
def _insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer))
columns = [c.name for c in tb.columns]
data = [
(OFFSET_PORT, 1),
(OFFSET_PORT + 1, 2),
(OFFSET_PORT + 2, 1),
]
rows = [dict(list(zip(columns, cat))) for cat in data]
op.bulk_insert(tb, rows)
def _insert_operation_category_operation():
tb = table(
'operation_category_operation',
column('operation_id', Integer),
column('operation_category_id', Integer))
columns = [c.name for c in tb.columns]
data = [
(OFFSET_OP, 1),
(OFFSET_OP, 19),
(OFFSET_OP, 8),
]
rows = [dict(list(zip(columns, cat))) for cat in data]
op.bulk_insert(tb, rows)
def _insert_operation_form():
tb = table(
'operation_form',
column('id', Integer),
column('enabled', Integer),
column('order', Integer),
column('category', String), )
columns = [c.name for c in tb.columns]
data = [
(OFFSET_FORM, 1, 1, 'execution'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_operation_form():
tb = table(
'operation_operation_form',
column('operation_id', Integer),
column('operation_form_id', Integer))
columns = [c.name for c in tb.columns]
data = [(OFFSET_OP, i) for i in (10, 41, 110, OFFSET_FORM)]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_translation():
tb = table(
'operation_form_translation',
column('id', Integer),
column('locale', String),
column('name', String)
)
columns = [c.name for c in tb.columns]
data = [
(OFFSET_FORM, 'en', 'Execution'),
(OFFSET_FORM, 'pt', 'Execução'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer), )
data = [
[OFFSET_FIELD, 'number_of_clusters', "INTEGER", 1, 3, None,
"integer", None, None, "EXECUTION", OFFSET_FORM],
[OFFSET_FIELD + 1, 'max_iterations', 'INTEGER', 1, 4, 10, 'integer',
None, None, 'EXECUTION', OFFSET_FORM],
[OFFSET_FIELD + 2, "similarity", "TEXT", 0, 5, "hamming", "dropdown",
None,
json.dumps([
{'key': 'frequency', 'value': 'Frequency-based dissimilarity'},
{'key': 'hamming', 'value': 'Hamming distance'},
{'key': 'all_frequency', 'value':
'All Frequency-based dissimilarity for modes.'},
]), "EXECUTION", OFFSET_FORM, None],
[OFFSET_FIELD + 3, "metamodessimilarity", "TEXT", 0, 6, "hamming",
"dropdown", None,
json.dumps([
{'key': 'frequency', 'value': 'Frequency-based dissimilarity'},
{'key': 'hamming', 'value': 'Hamming distance'},
{'key': 'all_frequency', 'value':
'All Frequency-based dissimilarity for modes.'},
]), "EXECUTION", OFFSET_FORM, None]
]
columns = [c.name for c in tb.columns]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
[OFFSET_FIELD, "en", "Number of clusters (K)",
"Number of clusters (K)"],
[OFFSET_FIELD, "pt",
"Quantidade de agrupamentos (K)", "Quantidade de agrupamentos (K)"],
[OFFSET_FIELD + 1, "en", "Max iterations", "Max iterations"],
[OFFSET_FIELD + 1, "pt", "Número máx. de iterações",
"Número máx. de iterações"],
[OFFSET_FIELD + 2, "en", "Dissimilarity function", "Distance function"],
[OFFSET_FIELD + 2, "pt", "Função de dissimilaridade",
"Função de dissimilaridade"],
[OFFSET_FIELD + 3, "en", "Dissimilarity function for Metamodes",
"Distance function for Metamodes"],
[OFFSET_FIELD + 3, "pt", "Função de dissimilaridade para os Metamodes",
"Função de dissimilaridade para os metamodes"],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation,
'DELETE FROM operation WHERE id BETWEEN {s} AND {s}'.format(s=OFFSET_OP)),
(_insert_operation_translation,
'DELETE FROM operation_translation WHERE id BETWEEN {s} AND {s}'.format(
s=OFFSET_OP)),
(_insert_operation_port,
'DELETE FROM operation_port '
'WHERE (operation_id BETWEEN {s} AND {s})'.format(s=OFFSET_OP)),
(_insert_operation_port_translation,
'DELETE FROM operation_port_translation WHERE id IN '
'(SELECT id FROM operation_port '
' WHERE (operation_id BETWEEN {s} AND {s}))'.format(s=OFFSET_OP)),
(_insert_operation_port_interface_operation_port,
'DELETE FROM operation_port_interface_operation_port '
'WHERE operation_port_id IN (SELECT id FROM operation_port '
'WHERE operation_id BETWEEN {s} AND {s})'.format(s=OFFSET_OP)),
(_insert_operation_category_operation,
'DELETE FROM operation_category_operation '
'WHERE operation_id BETWEEN {s} AND {s}'.format(s=OFFSET_OP)),
(_insert_operation_platform,
'DELETE FROM operation_platform WHERE operation_id BETWEEN {s} AND {s}'
.format(s=OFFSET_OP)),
(_insert_operation_form,
'DELETE FROM operation_form WHERE id BETWEEN {f} AND {f}'
.format(f=OFFSET_FORM)),
(_insert_operation_operation_form, 'DELETE FROM operation_operation_form '
'WHERE operation_id BETWEEN {s} AND {s}'
.format(s=OFFSET_OP)),
(_insert_operation_form_translation,
'DELETE FROM operation_form_translation WHERE id BETWEEN {f} AND {f}'
.format(f=OFFSET_FORM)),
(_insert_operation_form_field, """DELETE FROM operation_form_field
WHERE id BETWEEN {} AND {}""".format(OFFSET_FIELD, OFFSET_FIELD+3)),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation WHERE id BETWEEN {} AND {}'
.format(OFFSET_FIELD, OFFSET_FIELD+3)),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/86699b2e6672_adding_kmodes_in_spark_platform.py",
"copies": "1",
"size": "11370",
"license": "apache-2.0",
"hash": -4853850015995537000,
"line_mean": 30.4349030471,
"line_max": 80,
"alpha_frac": 0.5890905886,
"autogenerated": false,
"ratio": 3.600253807106599,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9685863471173042,
"avg_score": 0.0006961849067112224,
"num_lines": 361
} |
"""Adding legal entity, place of performance state name columns to all FSRS tables
Revision ID: c75d57250419
Revises: 3ff0ad501645
Create Date: 2018-03-22 12:52:12.495263
"""
# revision identifiers, used by Alembic.
revision = 'c75d57250419'
down_revision = '3ff0ad501645'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.add_column('fsrs_grant', sa.Column('awardee_address_state_name', sa.String(), nullable=True))
op.add_column('fsrs_grant', sa.Column('principle_place_state_name', sa.String(), nullable=True))
op.add_column('fsrs_procurement', sa.Column('company_address_state_name', sa.String(), nullable=True))
op.add_column('fsrs_procurement', sa.Column('principle_place_state_name', sa.String(), nullable=True))
op.add_column('fsrs_subcontract', sa.Column('company_address_state_name', sa.String(), nullable=True))
op.add_column('fsrs_subcontract', sa.Column('principle_place_state_name', sa.String(), nullable=True))
op.add_column('fsrs_subgrant', sa.Column('awardee_address_state_name', sa.String(), nullable=True))
op.add_column('fsrs_subgrant', sa.Column('principle_place_state_name', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
op.drop_column('fsrs_subgrant', 'principle_place_state_name')
op.drop_column('fsrs_subgrant', 'awardee_address_state_name')
op.drop_column('fsrs_subcontract', 'principle_place_state_name')
op.drop_column('fsrs_subcontract', 'company_address_state_name')
op.drop_column('fsrs_procurement', 'principle_place_state_name')
op.drop_column('fsrs_procurement', 'company_address_state_name')
op.drop_column('fsrs_grant', 'principle_place_state_name')
op.drop_column('fsrs_grant', 'awardee_address_state_name')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c75d57250419_FSRS_add_state_name.py",
"copies": "1",
"size": "2010",
"license": "cc0-1.0",
"hash": -1079617505647484700,
"line_mean": 40.0204081633,
"line_max": 106,
"alpha_frac": 0.7129353234,
"autogenerated": false,
"ratio": 3.0828220858895707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42957574092895706,
"avg_score": null,
"num_lines": null
} |
ADDING_LIST = ("+", "plus", "add")
SUBTRACTING_LIST = ("-", "minus", "subtract")
MULTIPLYING_LIST = ("*", "multiply", "times")
DIVIDING_LIST = ("/", "//", "divide")
def question_handler():
"""this function handles the user input"""
global num1, num2, decision
# while blocks that check if the input is of valid type
while True:
try:
num1 = int(input("Enter the first number you want to work with: "))
# if the input cannot be made into a number this happenes
except ValueError:
print("Why are you playing around?")
continue
else:
break
while True:
ALL_OPERATIONS = ("+", "plus", "add", "-", "minus", "subtract", "*",
"multiply", "times", "/", "//", "divide")
decision = input("Enter desired operation: ")
if(decision == "quit"):
quit()
elif(decision in ALL_OPERATIONS):
break
else:
print('This is not a valid operation here, choose one: ', end="")
print(ALL_OPERATIONS, end="")
print(" or type in \"quit\" to exit")
while True:
try:
num2 = int(input("Enter the second number: "))
except ValueError:
print("Hey stop that!")
continue
else:
break
# making use of classes here just for fun
class MathFunctions:
def adding(num1, num2):
result = num1 + num2
print("Result: " + format(result))
def subtracting(num1, num2):
result = num1 - num2
print("Result: " + format(result))
def multiplying(num1, num2):
result = num1 * num2
print("Result: " + format(result))
def dividing(num1, num2):
try:
result = num1 / num2
except ZeroDivisionError:
print(r"Life tip nr 42: 'never, ever, neeeever divide by zero.'")
quit()
print("Result: " + format(result))
def main():
"""the main function first calls question_handler and then uses its output
to call the MathFunctions functions"""
question_handler()
if(decision in ADDING_LIST):
MathFunctions.adding(num1, num2)
elif(decision in SUBTRACTING_LIST):
MathFunctions.subtracting(num1, num2)
elif(decision in MULTIPLYING_LIST):
MathFunctions.multiplying(num1, num2)
elif(decision in DIVIDING_LIST):
MathFunctions.dividing(num1, num2)
else:
print("Unexpected situation, calling mentors...")
if __name__ == '__main__':
main()
| {
"repo_name": "Mdlkxzmcp/various_python",
"path": "random_old_stuff/Calculator/calcm.py",
"copies": "1",
"size": "2573",
"license": "mit",
"hash": 304124944774029600,
"line_mean": 30.3780487805,
"line_max": 79,
"alpha_frac": 0.5635445006,
"autogenerated": false,
"ratio": 4.039246467817897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00045167118337850043,
"num_lines": 82
} |
"""Adding logging database tables.
Revision ID: e3691fc396e9
Revises: 932525b82f1a
Create Date: 2016-11-28 13:15:46.995219
"""
# revision identifiers, used by Alembic.
revision = 'e3691fc396e9'
down_revision = '932525b82f1a'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('logs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('certificate_id', sa.Integer(), nullable=True),
sa.Column('log_type', sa.Enum('key_view', name='log_type'), nullable=False),
sa.Column('logged_at', sqlalchemy_utils.types.arrow.ArrowType(), server_default=sa.text('now()'), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['certificate_id'], ['certificates.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('logs')
### end Alembic commands ###
| {
"repo_name": "nevins-b/lemur",
"path": "lemur/migrations/versions/e3691fc396e9_.py",
"copies": "2",
"size": "1122",
"license": "apache-2.0",
"hash": -5728768157761082000,
"line_mean": 31.0571428571,
"line_max": 118,
"alpha_frac": 0.6800356506,
"autogenerated": false,
"ratio": 3.4207317073170733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5100767357917074,
"avg_score": null,
"num_lines": null
} |
"""Adding logging database tables.
Revision ID: e3691fc396e9
Revises: 932525b82f1a
Create Date: 2016-11-28 13:15:46.995219
"""
# revision identifiers, used by Alembic.
revision = "e3691fc396e9"
down_revision = "932525b82f1a"
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"logs",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("certificate_id", sa.Integer(), nullable=True),
sa.Column("log_type", sa.Enum("key_view", name="log_type"), nullable=False),
sa.Column(
"logged_at",
sqlalchemy_utils.types.arrow.ArrowType(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["certificate_id"], ["certificates.id"]),
sa.ForeignKeyConstraint(["user_id"], ["users.id"]),
sa.PrimaryKeyConstraint("id"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("logs")
### end Alembic commands ###
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/e3691fc396e9_.py",
"copies": "1",
"size": "1220",
"license": "apache-2.0",
"hash": -3285387512164763600,
"line_mean": 28.0476190476,
"line_max": 84,
"alpha_frac": 0.6254098361,
"autogenerated": false,
"ratio": 3.6417910447761193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9739768456984297,
"avg_score": 0.005486484778364574,
"num_lines": 42
} |
"""Adding LoginHistory table
Revision ID: 15ba1cfb5eef
Revises: 585fd56a9833
Create Date: 2015-05-31 19:52:02.437000
"""
# revision identifiers, used by Alembic.
revision = '15ba1cfb5eef'
down_revision = '585fd56a9833'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('login_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login_date', sa.DateTime(), nullable=False),
sa.Column('ip', sa.Unicode(length=45), nullable=False),
sa.Column('username', sa.Unicode(length=50), nullable=False),
sa.Column('successful', sa.Boolean(), nullable=False),
sa.Column('failure_reason', sa.Unicode(length=20), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('login_history')
### end Alembic commands ###
| {
"repo_name": "AllieDeford/radremedy",
"path": "remedy/rad/migrations/versions/15ba1cfb5eef_adding_loginhistory_table.py",
"copies": "3",
"size": "1035",
"license": "bsd-3-clause",
"hash": -440864203176410200,
"line_mean": 16.875,
"line_max": 70,
"alpha_frac": 0.6483091787,
"autogenerated": false,
"ratio": 3.306709265175719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5455018443875719,
"avg_score": null,
"num_lines": null
} |
"""Adding measurement
Revision ID: 0ffdadea0b92
Revises: ffcd82a35890
Create Date: 2017-08-30 17:33:54.104062
"""
import model.utils
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "0ffdadea0b92"
down_revision = "ffcd82a35890"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"measurement",
sa.Column("measurement_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("physical_measurements_id", sa.Integer(), nullable=False),
sa.Column("code_system", sa.String(length=255), nullable=False),
sa.Column("code_value", sa.String(length=255), nullable=False),
sa.Column("measurement_time", model.utils.UTCDateTime(), nullable=False),
sa.Column("body_site_code_system", sa.String(length=255), nullable=True),
sa.Column("body_site_code_value", sa.String(length=255), nullable=True),
sa.Column("value_string", sa.String(length=1024), nullable=True),
sa.Column("value_decimal", sa.Float(), nullable=True),
sa.Column("value_unit", sa.String(length=255), nullable=True),
sa.Column("value_code_system", sa.String(length=255), nullable=True),
sa.Column("value_code_value", sa.String(length=255), nullable=True),
sa.Column("value_datetime", model.utils.UTCDateTime(), nullable=True),
sa.Column("parent_id", sa.BIGINT(), nullable=True),
sa.Column("qualifier_id", sa.BIGINT(), nullable=True),
sa.ForeignKeyConstraint(["parent_id"], ["measurement.measurement_id"]),
sa.ForeignKeyConstraint(["physical_measurements_id"], ["physical_measurements.physical_measurements_id"]),
sa.ForeignKeyConstraint(["qualifier_id"], ["measurement.measurement_id"]),
sa.PrimaryKeyConstraint("measurement_id"),
)
op.create_table(
"measurement_to_qualifier",
sa.Column("measurement_id", sa.BIGINT(), nullable=False),
sa.Column("qualifier_id", sa.BIGINT(), nullable=False),
sa.ForeignKeyConstraint(["measurement_id"], ["measurement.measurement_id"]),
sa.ForeignKeyConstraint(["qualifier_id"], ["measurement.measurement_id"]),
sa.PrimaryKeyConstraint("measurement_id", "qualifier_id"),
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("measurement_to_qualifier")
op.drop_table("measurement")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/0ffdadea0b92_adding_measurement.py",
"copies": "1",
"size": "2991",
"license": "bsd-3-clause",
"hash": -1138750319665422800,
"line_mean": 37.3461538462,
"line_max": 114,
"alpha_frac": 0.6629889669,
"autogenerated": false,
"ratio": 3.7670025188916876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49299914857916877,
"avg_score": null,
"num_lines": null
} |
"""Adding model for aggregate review scores.
Revision ID: 51589067470d
Revises: 5736a0d64e8b
Create Date: 2015-08-06 22:53:47.325000
"""
# revision identifiers, used by Alembic.
revision = '51589067470d'
down_revision = '5736a0d64e8b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('resource_review_score',
sa.Column('resource_id', sa.Integer(), nullable=False, autoincrement=False),
sa.Column('population_id', sa.Integer(), nullable=False, autoincrement=False),
sa.Column('num_ratings', sa.Integer(), nullable=False),
sa.Column('first_reviewed', sa.DateTime(), nullable=False),
sa.Column('last_reviewed', sa.DateTime(), nullable=False),
sa.Column('rating_avg', sa.Float(), nullable=True),
sa.Column('staff_rating_avg', sa.Float(), nullable=True),
sa.Column('intake_rating_avg', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['resource_id'], ['resource.id'], ),
sa.PrimaryKeyConstraint('resource_id', 'population_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('resource_review_score')
### end Alembic commands ###
| {
"repo_name": "radremedy/radremedy",
"path": "remedy/rad/migrations/versions/51589067470d_adding_model_for_aggregate_review_scores.py",
"copies": "2",
"size": "1348",
"license": "mpl-2.0",
"hash": -5929218986774691000,
"line_mean": 20.8474576271,
"line_max": 82,
"alpha_frac": 0.6491097923,
"autogenerated": false,
"ratio": 3.5567282321899736,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5205838024489974,
"avg_score": null,
"num_lines": null
} |
"""Adding model for news posts.
Revision ID: 1726eb3f34a5
Revises: 58620e8a20f9
Create Date: 2016-02-29 23:06:23.028000
"""
# revision identifiers, used by Alembic.
revision = '1726eb3f34a5'
down_revision = '58620e8a20f9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('news',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('subject', sa.Unicode(length=500), nullable=False),
sa.Column('author', sa.Unicode(length=500), nullable=False),
sa.Column('summary', sa.UnicodeText(), nullable=False),
sa.Column('body', sa.UnicodeText(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.Column('visible', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('news')
### end Alembic commands ###
| {
"repo_name": "radioprotector/radremedy",
"path": "remedy/rad/migrations/versions/1726eb3f34a5_adding_model_for_news_posts.py",
"copies": "2",
"size": "1071",
"license": "mpl-2.0",
"hash": 6004445564891134000,
"line_mean": 17.1754385965,
"line_max": 65,
"alpha_frac": 0.6423902894,
"autogenerated": false,
"ratio": 3.2652439024390243,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4907634191839024,
"avg_score": null,
"num_lines": null
} |
"""adding_more_profile_customization
Revision ID: 50bb91d1615
Revises: 13234475ad5
Create Date: 2015-05-11 21:04:17.237732
"""
# revision identifiers, used by Alembic.
revision = '50bb91d1615'
down_revision = '13234475ad5'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('github_url', sa.String(length=200), nullable=True))
op.add_column('user', sa.Column('gplus_url', sa.String(length=200), nullable=True))
op.add_column('user', sa.Column('linkedin_url', sa.String(length=200), nullable=True))
op.add_column('user', sa.Column('twitter_url', sa.String(length=200), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'twitter_url')
op.drop_column('user', 'linkedin_url')
op.drop_column('user', 'gplus_url')
op.drop_column('user', 'github_url')
### end Alembic commands ###
| {
"repo_name": "Depado/MarkDownBlog",
"path": "migrations/versions/50bb91d1615_adding_more_profile_customization.py",
"copies": "1",
"size": "1028",
"license": "mit",
"hash": -8544914740630118000,
"line_mean": 31.125,
"line_max": 90,
"alpha_frac": 0.6828793774,
"autogenerated": false,
"ratio": 3.1925465838509317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9340785226616208,
"avg_score": 0.00692814692694471,
"num_lines": 32
} |
"""Adding new columns to DetachedAwardProcurement table
Revision ID: cbe6511d50ef
Revises: c048f1dcdfa2
Create Date: 2017-08-14 09:35:03.931423
"""
# revision identifiers, used by Alembic.
revision = 'cbe6511d50ef'
down_revision = 'c048f1dcdfa2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_procurement', sa.Column('annual_revenue', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('division_name', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('division_number_or_office', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('number_of_employees', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_alternate_name', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_alternate_site_code', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_enabled', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_legal_org_name', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_location_disabled_f', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('vendor_site_code', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_procurement', 'vendor_site_code')
op.drop_column('detached_award_procurement', 'vendor_location_disabled_f')
op.drop_column('detached_award_procurement', 'vendor_legal_org_name')
op.drop_column('detached_award_procurement', 'vendor_enabled')
op.drop_column('detached_award_procurement', 'vendor_alternate_site_code')
op.drop_column('detached_award_procurement', 'vendor_alternate_name')
op.drop_column('detached_award_procurement', 'number_of_employees')
op.drop_column('detached_award_procurement', 'division_number_or_office')
op.drop_column('detached_award_procurement', 'division_name')
op.drop_column('detached_award_procurement', 'annual_revenue')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/cbe6511d50ef_adding_new_columns_to_DetachedAwardProcurement.py",
"copies": "1",
"size": "2552",
"license": "cc0-1.0",
"hash": 2425821080323646000,
"line_mean": 43,
"line_max": 114,
"alpha_frac": 0.7194357367,
"autogenerated": false,
"ratio": 3.267605633802817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44870413705028167,
"avg_score": null,
"num_lines": null
} |
"""Adding new fields and keys / indices to the vulnerability table to support states.
Revision ID: 611733367157
Revises: 9a935d8fb960
Create Date: 2020-04-25 11:52:22.369350
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "611733367157"
down_revision = "9a935d8fb960"
branch_labels = None
depends_on = None
def upgrade():
# Drop all foreign keys on vulnerability.id as we intend to update it.
op.drop_constraint(
"vulnerability_git_commits_ibfk_1",
"vulnerability_git_commits",
type_="foreignkey",
)
# To make things simpler we will sever the complete link to vulnerability resources for now.
op.drop_constraint(
"vulnerability_resources_ibfk_1", "vulnerability_resources", type_="foreignkey"
)
op.drop_column("vulnerability_resources", "vulnerability_details_id")
# ----------------------------------------------------------------------------------------------------
# Add new columns to the vulnerability table.
op.add_column(
"vulnerability", sa.Column("review_feedback", sa.Text(), nullable=True)
)
op.add_column(
"vulnerability", sa.Column("reviewer_id", sa.Integer(), nullable=True)
)
op.create_foreign_key(
"fk_reviewer_id", "vulnerability", "user", ["reviewer_id"], ["id"]
)
op.add_column(
"vulnerability",
sa.Column(
"state",
sa.Enum(
"NEW",
"READY",
"IN_REVIEW",
"REVIEWED",
"PUBLISHED",
"ARCHIVED",
name="vulnerabilitystate",
),
nullable=False,
),
)
op.add_column("vulnerability", sa.Column("version", sa.Integer(), nullable=False))
# Update the vulnerability primary key.
# Remove autoincrement from the PK as there can only be one auto key and it has to be the PK.
op.alter_column(
"vulnerability",
"id",
existing_type=sa.Integer(),
autoincrement=False,
nullable=False,
)
op.drop_constraint("id", "vulnerability", type_="primary")
# Now we can define a new primary key.
op.create_primary_key("pk", "vulnerability", ["id", "version"])
# Re-enable auto incrementing for the id column, too.
op.alter_column(
"vulnerability",
"id",
existing_type=sa.Integer(),
autoincrement=True,
nullable=False,
)
# ---------------------------------------------------------------------------------------------------
# A CVE ID can appear multiple times across different versions so we need to remove it's unique constraint.
op.drop_index("cve_id", table_name="vulnerability")
op.create_unique_constraint("uk_ver_cve_id", "vulnerability", ["version", "cve_id"])
op.create_index(
op.f("ix_vulnerability_cve_id"), "vulnerability", ["cve_id"], unique=False
)
# ----------------------------------------------------------------------------------------------------
# Now that the vulnerability multi column primary key is intact, create the foreign keys again.
op.add_column(
"vulnerability_git_commits", sa.Column("version", sa.Integer(), nullable=False)
)
op.alter_column(
"vulnerability_git_commits",
"vulnerability_details_id",
existing_type=mysql.INTEGER(display_width=11),
nullable=False,
)
op.create_foreign_key(
"fk_vuln",
"vulnerability_git_commits",
"vulnerability",
["vulnerability_details_id", "version"],
["id", "version"],
)
def downgrade():
op.drop_constraint("fk_vuln", "vulnerability_git_commits", type_="foreignkey")
op.alter_column(
"vulnerability_git_commits",
"vulnerability_details_id",
existing_type=mysql.INTEGER(display_width=11),
nullable=True,
)
op.drop_column("vulnerability_git_commits", "version")
op.drop_index(op.f("ix_vulnerability_cve_id"), table_name="vulnerability")
op.drop_constraint("uk_ver_cve_id", "vulnerability", type_="unique")
op.create_index("cve_id", "vulnerability", ["cve_id"], unique=True)
# Remove autoincrement from the PK as there can only be one auto key and it has to be the PK.
op.alter_column(
"vulnerability",
"id",
existing_type=sa.Integer(),
autoincrement=False,
nullable=False,
)
op.drop_constraint("pk", "vulnerability", type_="primary")
op.create_primary_key("id", "vulnerability", ["id"])
op.alter_column(
"vulnerability",
"id",
existing_type=sa.Integer(),
autoincrement=True,
nullable=False,
)
op.drop_column("vulnerability", "version")
op.drop_column("vulnerability", "state")
op.drop_constraint("fk_reviewer_id", "vulnerability", type_="foreignkey")
op.drop_column("vulnerability", "reviewer_id")
op.drop_column("vulnerability", "review_feedback")
op.add_column(
"vulnerability_resources",
sa.Column(
"vulnerability_details_id",
mysql.INTEGER(display_width=11),
autoincrement=False,
nullable=True,
),
)
op.create_foreign_key(
"vulnerability_resources_ibfk_1",
"vulnerability_resources",
"vulnerability",
["vulnerability_details_id"],
["id"],
)
op.create_foreign_key(
"vulnerability_git_commits_ibfk_1",
"vulnerability_git_commits",
"vulnerability",
["vulnerability_details_id"],
["id"],
)
| {
"repo_name": "google/vulncode-db",
"path": "migrations/versions/611733367157_adding_new_fields_and_keys_indices_to_.py",
"copies": "1",
"size": "5685",
"license": "apache-2.0",
"hash": -8386810815507400000,
"line_mean": 33.4545454545,
"line_max": 111,
"alpha_frac": 0.5787159191,
"autogenerated": false,
"ratio": 3.917987594762233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999273903549192,
"avg_score": 0.0007928956740625552,
"num_lines": 165
} |
"""Adding new fields to the User model for SECURITY_TRACKABLE
Revision ID: 61a6fd4b4500
Revises: 538eeb160af6
Create Date: 2016-04-23 18:17:47.216434
"""
# revision identifiers, used by Alembic.
revision = '61a6fd4b4500'
down_revision = '538eeb160af6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('current_login_at', sa.DateTime(), nullable=True))
op.add_column('user', sa.Column('current_login_ip', sa.String(length=45), nullable=True))
op.add_column('user', sa.Column('last_login_at', sa.DateTime(), nullable=True))
op.add_column('user', sa.Column('last_login_ip', sa.String(length=45), nullable=True))
op.add_column('user', sa.Column('login_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'login_count')
op.drop_column('user', 'last_login_ip')
op.drop_column('user', 'last_login_at')
op.drop_column('user', 'current_login_ip')
op.drop_column('user', 'current_login_at')
### end Alembic commands ###
| {
"repo_name": "stackArmor/security_monkey",
"path": "migrations/versions/61a6fd4b4500_.py",
"copies": "4",
"size": "1195",
"license": "apache-2.0",
"hash": -3193357499461199400,
"line_mean": 33.1428571429,
"line_max": 93,
"alpha_frac": 0.680334728,
"autogenerated": false,
"ratio": 3.153034300791557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5833369028791557,
"avg_score": null,
"num_lines": null
} |
"""adding new kmeans parameters
Revision ID: 5bf9db6d7909
Revises: a13c4b5cc25f
Create Date: 2020-10-15 10:01:50.734058
"""
from alembic import context, op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
import json
# revision identifiers, used by Alembic.
revision = '5bf9db6d7909'
down_revision = 'a13c4b5cc25f'
branch_labels = None
depends_on = None
OFFSET_FIELD = 582
FORM_KMEANS = 27
FORM_KMODES = 152
def _insert_operation_form_field():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
column('enable_conditions', String),
)
data = [
[OFFSET_FIELD, 'distance', 'TEXT', 0, 9, 'euclidean',
"dropdown", None, json.dumps([
{'key': 'euclidean', 'value': 'Euclidean'},
{'key': 'cosine', 'value': 'Cosine'},
]), "EXECUTION", FORM_KMEANS, None],
[OFFSET_FIELD+1, 'fragmentation', 'INTEGER', 0, 9, None, 'checkbox',
None, None, 'EXECUTION', FORM_KMODES, None],
]
columns = [c.name for c in tb.columns]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_operation_form_field_translation():
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
[OFFSET_FIELD, "en", "Distance Measure", "The distance measure"],
[OFFSET_FIELD, "pt", "Medida de distância", "A medida de distância"],
[OFFSET_FIELD+1, "en", "Reduce fragmentation",
"If enabled, it will reduce the parallelization in favor of the "
"ability to handle small databases."],
[OFFSET_FIELD+1, "pt", "Reduzir a fragmentação",
"Se ativado, irá reduzir a paralelização em favor da capacidade de "
"lidar com pequenas bases"],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_operation_form_field, """DELETE FROM operation_form_field
WHERE id BETWEEN {} AND {}""".format(OFFSET_FIELD, OFFSET_FIELD+1)),
(_insert_operation_form_field_translation,
'DELETE FROM operation_form_field_translation WHERE id BETWEEN {} AND {}'
.format(OFFSET_FIELD, OFFSET_FIELD+1)),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/5bf9db6d7909_adding_new_kmeans_parameters.py",
"copies": "1",
"size": "3982",
"license": "apache-2.0",
"hash": 716830247547702100,
"line_mean": 29.5769230769,
"line_max": 78,
"alpha_frac": 0.5808805031,
"autogenerated": false,
"ratio": 3.757088846880907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48379693499809073,
"avg_score": null,
"num_lines": null
} |
"""Adding new regression metrics
Revision ID: 0206bdda81bc
Revises: 2b4ce751c4de
Create Date: 2020-07-27 17:06:02.862772
"""
import json
import pymysql
from alembic import context
from alembic import op
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '0206bdda81bc'
down_revision = '2b4ce751c4de'
branch_labels = None
depends_on = None
metrics_evaluation_new = [
{
"key": "areaUnderROC",
"value": "Area under ROC curve (binary classification)",
"en": "Area under ROC curve(binary classification)",
"pt": "Área sob a curva ROC (classificação binária)"
},
{
"key": "areaUnderPR",
"value": "Area under precision-recall curve (binary classification)",
"en": "Area under precision-recall curve (binary classification)",
"pt": "Área sob a curva precisão-revocação"
},
{
"key": "f1", "value": "F1 score (multiclass classification)",
"en": "F1 score (multiclass classification)",
"pt": "F1"
},
{
"key": "weightedPrecision",
"value": "Weighted precision (multiclass classification)",
"en": "Weighted precision (multiclass classification)",
"pt": "Precisão ponderada"
},
{
"key": "weightedRecall",
"value": "Weighted recall (multiclass classification)",
"en": "Weighted recall (multiclass classification)",
"pt": "Revocação ponderada"
},
{
"key": "accuracy", "value": "Accuracy (multiclass classification)",
"en": "Accuracy (multiclass classification)",
"pt": "Acurácia"
},
{
"key": "rmse", "value": "Root mean squared error (regression)",
"en": "Root mean squared error (regression)",
"pt": "Raíz do erro quadrático médio"
},
{
"mse": "mse", "value": "Mean squared error (regression)",
"en": "Mean squared error (regression)",
"pt": "Erro quadrático médio"
},
{
"key": "mae", "value": "Mean absolute error (regression)",
"en": "Mean absolute error (regression)",
"pt": "Erro absoluto médio"
},
{
"key": "mape", "value": "Mean absolute percentage error (regression)",
"en": "Mean absolute percentage error (regression)",
"pt": "Média Percentual Absoluta do Erro"
},
{
"key": "r2", "value": "Coefficient of determination R2 (regression)",
"en": "Coefficient of determination R2 (regression)",
"pt": "Coeficiente de determinação (R2)"
}
]
metrics_cross_new = [
{
"key": "areaUnderROC",
"value": "Area under ROC curve (binary classification)",
"en": "Area under ROC curve(binary classification)",
"pt": "Área sob a curva ROC (classificação binária)"
},
{
"key": "areaUnderPR",
"value": "Area under precision-recall curve (binary classification)",
"en": "Area under precision-recall curve (binary classification)",
"pt": "Área sob a curva precisão-revocação"
},
{
"key": "f1", "value": "F1 score (multiclass classification)",
"en": "F1 score (multiclass classification)",
"pt": "F1"
},
{
"key": "weightedPrecision",
"value": "Weighted precision (multiclass classification)",
"en": "Weighted precision (multiclass classification)",
"pt": "Precisão ponderada"
},
{
"key": "weightedRecall",
"value": "Weighted recall (multiclass classification)",
"en": "Weighted recall (multiclass classification)",
"pt": "Revocação ponderada"
},
{
"key": "accuracy", "value": "Accuracy (multiclass classification)",
"en": "Accuracy (multiclass classification)",
"pt": "Acurácia"
},
{
"key": "rmse", "value": "Root mean squared error (regression)",
"en": "Root mean squared error (regression)",
"pt": "Raíz do erro quadrático médio"
},
{
"mse": "mse", "value": "Mean squared error (regression)",
"en": "Mean squared error (regression)",
"pt": "Erro quadrático médio"
},
{
"key": "mae", "value": "Mean absolute error (regression)",
"en": "Mean absolute error (regression)",
"pt": "Erro absoluto médio"
},
{
"key": "r2", "value": "Coefficient of determination R2 (regression)",
"en": "Coefficient of determination R2 (regression)",
"pt": "Coeficiente de determinação (R2)"
},
]
metrics_old = [
{
"key": "areaUnderROC",
"value": "Area under ROC curve (binary classification)",
"en": "Area under ROC curve(binary classification)",
"pt": "Área sob a curva ROC (classificação binária)"
},
{
"key": "areaUnderPR",
"value": "Area under precision-recall curve (binary classification)",
"en": "Area under precision-recall curve (binary classification)",
"pt": "Área sob a curva precisão-revocação"
},
{
"key": "f1", "value": "F1 score (multiclass classification)",
"en": "F1 score (multiclass classification)",
"pt": "F1"
},
{
"key": "weightedPrecision",
"value": "Weighted precision (multiclass classification)",
"en": "Weighted precision (multiclass classification)",
"pt": "Precisão ponderada"
},
{
"key": "weightedRecall",
"value": "Weighted recall (multiclass classification)",
"en": "Weighted recall (multiclass classification)",
"pt": "Revocação ponderada"
},
{
"key": "accuracy", "value": "Accuracy (multiclass classification)",
"en": "Accuracy (multiclass classification)",
"pt": "Acurácia"
},
{
"key": "rmse", "value": "Root mean squared error (regression)",
"en": "Root mean squared error (regression)",
"pt": "Raíz do erro quadrático médio"
},
{
"mse": "mse", "value": "Mean squared error (regression)",
"en": "Mean squared error (regression)",
"pt": "Erro quadrático médio"
},
{
"key": "mae", "value": "Mean absolute error (regression)",
"en": "Mean absolute error (regression)",
"pt": "Erro absoluto médio"
}
]
all_commands = [
(
"""UPDATE operation_form_field SET `values` = '{}' WHERE `id` = 101
""".format(pymysql.escape_string(json.dumps(metrics_evaluation_new))),
"""UPDATE operation_form_field SET `values` = '{}' WHERE `id` = 101
""".format(pymysql.escape_string(json.dumps(metrics_old)))
),
(
"""UPDATE operation_form_field SET `values` = '{}' WHERE `id` = 487
""".format(pymysql.escape_string(json.dumps(metrics_cross_new))),
"""UPDATE operation_form_field SET `values` = '{}' WHERE `id` = 487
""".format(pymysql.escape_string(json.dumps(metrics_old)))
),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
if cmd[0] != '':
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
if cmd[1] != '':
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit()
| {
"repo_name": "eubr-bigsea/tahiti",
"path": "migrations/versions/0206bdda81bc_adding_new_regression_metrics.py",
"copies": "1",
"size": "8113",
"license": "apache-2.0",
"hash": 1328287540297398300,
"line_mean": 31.3453815261,
"line_max": 78,
"alpha_frac": 0.5660541346,
"autogenerated": false,
"ratio": 3.7081031307550645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9770810545140874,
"avg_score": 0.0006693440428380187,
"num_lines": 249
} |
"""Adding notifications
Revision ID: 4c8915e461b3
Revises: 3b718f59b8ce
Create Date: 2015-07-24 14:34:57.316273
"""
# revision identifiers, used by Alembic.
revision = '4c8915e461b3'
down_revision = '3b718f59b8ce'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('notifications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=128), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('options', sqlalchemy_utils.types.json.JSONType(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('plugin_name', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_column(u'certificates', 'challenge')
op.drop_column(u'certificates', 'csr_config')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'certificates', sa.Column('csr_config', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column(u'certificates', sa.Column('challenge', postgresql.BYTEA(), autoincrement=False, nullable=True))
op.drop_table('notifications')
### end Alembic commands ###
| {
"repo_name": "rpicard/lemur",
"path": "lemur/migrations/versions/4c8915e461b3_.py",
"copies": "9",
"size": "1367",
"license": "apache-2.0",
"hash": -3516471134331367000,
"line_mean": 32.3414634146,
"line_max": 114,
"alpha_frac": 0.7037307974,
"autogenerated": false,
"ratio": 3.49616368286445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.869989448026445,
"avg_score": null,
"num_lines": null
} |
"""Adding Operator Role
Revision ID: 4a666113c7bb
Revises: 1274ed462010
Create Date: 2018-08-30 13:28:06.836208
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4a666113c7bb'
down_revision = '1274ed462010'
branch_labels = None
depends_on = None
def update_data():
setting_table = sa.sql.table('setting',
sa.sql.column('id', sa.Integer),
sa.sql.column('name', sa.String),
sa.sql.column('value', sa.String),
sa.sql.column('view', sa.String)
)
# add new settings
op.bulk_insert(setting_table,
[
{'id': 44, 'name': 'ldap_operator_group', 'value': '', 'view': 'authentication'},
{'id': 45, 'name': 'allow_user_create_domain', 'value': 'False', 'view': 'basic'},
{'id': 46, 'name': 'record_quick_edit', 'value': 'True', 'view': 'basic'},
]
)
role_table = sa.sql.table('role',
sa.sql.column('id', sa.Integer),
sa.sql.column('name', sa.String),
sa.sql.column('description', sa.String)
)
# add new role
op.bulk_insert(role_table,
[
{'id': 3, 'name': 'Operator', 'description': 'Operator'}
]
)
def upgrade():
update_data()
def downgrade():
# remove user Operator role
op.execute("UPDATE user SET role_id = 2 WHERE role_id=3")
op.execute("DELETE FROM role WHERE name = 'Operator'")
# delete settings
op.execute("DELETE FROM setting WHERE name = 'ldap_operator_group'")
op.execute("DELETE FROM setting WHERE name = 'allow_user_create_domain'")
| {
"repo_name": "ngoduykhanh/PowerDNS-Admin",
"path": "migrations/versions/4a666113c7bb_add_operator_role.py",
"copies": "1",
"size": "1605",
"license": "mit",
"hash": 6469592363586452000,
"line_mean": 25.3114754098,
"line_max": 94,
"alpha_frac": 0.599376947,
"autogenerated": false,
"ratio": 3.3718487394957983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44712256864957983,
"avg_score": null,
"num_lines": null
} |
"""Adding parent duns number and name to (Published)AwardFinancialAssistance
Revision ID: 1fabe0bdd48c
Revises: 6973101b6853
Create Date: 2018-03-27 15:07:45.721751
"""
# revision identifiers, used by Alembic.
revision = '1fabe0bdd48c'
down_revision = '6973101b6853'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True))
op.add_column('award_financial_assistance', sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('ultimate_parent_legal_enti', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('ultimate_parent_unique_ide', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'ultimate_parent_unique_ide')
op.drop_column('published_award_financial_assistance', 'ultimate_parent_legal_enti')
op.drop_column('award_financial_assistance', 'ultimate_parent_unique_ide')
op.drop_column('award_financial_assistance', 'ultimate_parent_legal_enti')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/1fabe0bdd48c_adding_parent_duns_number_and_name_to_.py",
"copies": "1",
"size": "1579",
"license": "cc0-1.0",
"hash": 64505197080038950,
"line_mean": 33.3260869565,
"line_max": 124,
"alpha_frac": 0.7238758708,
"autogenerated": false,
"ratio": 3.2158859470468433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9301729074407765,
"avg_score": 0.02760654868781555,
"num_lines": 46
} |
"""adding parent id index
Revision ID: f6a9c7e6694b
Revises: d10d998b796b
Create Date: 2018-03-29 09:25:30.500405
"""
# revision identifiers, used by Alembic.
revision = 'f6a9c7e6694b'
down_revision = 'd10d998b796b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_fsrs_subcontract_parent_id'), 'fsrs_subcontract', ['parent_id'], unique=False)
op.create_index(op.f('ix_fsrs_subgrant_parent_id'), 'fsrs_subgrant', ['parent_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_fsrs_subgrant_parent_id'), table_name='fsrs_subgrant')
op.drop_index(op.f('ix_fsrs_subcontract_parent_id'), table_name='fsrs_subcontract')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/f6a9c7e6694b_adding_parent_id_index.py",
"copies": "1",
"size": "1092",
"license": "cc0-1.0",
"hash": -2634925203582335000,
"line_mean": 25,
"line_max": 107,
"alpha_frac": 0.6904761905,
"autogenerated": false,
"ratio": 3.016574585635359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4207050776135359,
"avg_score": null,
"num_lines": null
} |
"""Adding place_of_performance_scope to (certified_)award_financial_assistance table
Revision ID: 229820d5d7b1
Revises: 3fd9a578c9c5
Create Date: 2020-04-23 13:21:38.651516
"""
# revision identifiers, used by Alembic.
revision = '229820d5d7b1'
down_revision = '3fd9a578c9c5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('place_of_performance_scope', sa.Text(), nullable=True))
op.add_column('certified_award_financial_assistance', sa.Column('place_of_performance_scope', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('certified_award_financial_assistance', 'place_of_performance_scope')
op.drop_column('award_financial_assistance', 'place_of_performance_scope')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/229820d5d7b1_adding_place_of_performance_scope_to_.py",
"copies": "1",
"size": "1187",
"license": "cc0-1.0",
"hash": -3119694691399127000,
"line_mean": 27.2619047619,
"line_max": 124,
"alpha_frac": 0.7110362258,
"autogenerated": false,
"ratio": 3.1994609164420487,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9272178299660416,
"avg_score": 0.02766376851632654,
"num_lines": 42
} |
"""Adding population join tables and backrefs.
Revision ID: 5736a0d64e8b
Revises: 4a09ad931d71
Create Date: 2015-07-30 22:35:57.458000
"""
# revision identifiers, used by Alembic.
revision = '5736a0d64e8b'
down_revision = '4a09ad931d71'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('userpopulation',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('population_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['population_id'], ['population.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('user_id', 'population_id')
)
op.create_table('resourcepopulation',
sa.Column('resource_id', sa.Integer(), nullable=False),
sa.Column('population_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['population_id'], ['population.id'], ),
sa.ForeignKeyConstraint(['resource_id'], ['resource.id'], ),
sa.PrimaryKeyConstraint('resource_id', 'population_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('resourcepopulation')
op.drop_table('userpopulation')
### end Alembic commands ###
| {
"repo_name": "radremedy/radremedy",
"path": "remedy/rad/migrations/versions/5736a0d64e8b_adding_population_join_tables_and_.py",
"copies": "2",
"size": "1364",
"license": "mpl-2.0",
"hash": 5014245012483744000,
"line_mean": 20.3548387097,
"line_max": 68,
"alpha_frac": 0.6517595308,
"autogenerated": false,
"ratio": 3.479591836734694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5131351367534694,
"avg_score": null,
"num_lines": null
} |
"""Adding principle place street to subaward
Revision ID: 4be5e411246b
Revises: 87d7a9b0ea7b
Create Date: 2019-08-07 15:13:50.092991
"""
# revision identifiers, used by Alembic.
revision = '4be5e411246b'
down_revision = '87d7a9b0ea7b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('subaward', sa.Column('place_of_perform_street', sa.Text(), nullable=True))
op.add_column('subaward', sa.Column('sub_place_of_perform_street', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('subaward', 'sub_place_of_perform_street')
op.drop_column('subaward', 'place_of_perform_street')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/4be5e411246b_adding_principle_place_street_to_.py",
"copies": "1",
"size": "1093",
"license": "cc0-1.0",
"hash": -1122300397415694800,
"line_mean": 25.0238095238,
"line_max": 97,
"alpha_frac": 0.6962488564,
"autogenerated": false,
"ratio": 3.158959537572254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4355208393972254,
"avg_score": null,
"num_lines": null
} |
"""Adding published_submission_ids and certified flag to submission table
Revision ID: 78a36e024274
Revises: 24331fcfcd00
Create Date: 2020-06-04 20:01:06.814406
"""
# revision identifiers, used by Alembic.
revision = '78a36e024274'
down_revision = '24331fcfcd00'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('published_submission_ids', sa.ARRAY(sa.Integer()), nullable=True,
server_default="{}"))
op.add_column('submission', sa.Column('certified', sa.Boolean(), server_default='False', nullable=False))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission', 'published_submission_ids')
op.drop_column('submission', 'certified')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/78a36e024274_adding_published_submission_ids_to_.py",
"copies": "1",
"size": "1200",
"license": "cc0-1.0",
"hash": 143722891493556260,
"line_mean": 26.9069767442,
"line_max": 109,
"alpha_frac": 0.6791666667,
"autogenerated": false,
"ratio": 3.658536585365854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9697916677879131,
"avg_score": 0.027957314837344427,
"num_lines": 43
} |
"""adding qm tables
Revision ID: e66c069eb92b
Revises: 3e86ac6195d8
Create Date: 2017-08-04 23:04:23.097038+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e66c069eb92b'
down_revision = '3e86ac6195d8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('metric_list',
sa.Column('metric', sa.String(), nullable=False),
sa.Column('desc', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('metric')
)
op.create_table('ant_metrics',
sa.Column('obsid', sa.BigInteger(), nullable=False),
sa.Column('ant', sa.Integer(), nullable=False),
sa.Column('pol', sa.String(), nullable=False),
sa.Column('metric', sa.String(), nullable=False),
sa.Column('mc_time', sa.BigInteger(), nullable=False),
sa.Column('val', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['metric'], ['metric_list.metric'], ),
sa.ForeignKeyConstraint(['obsid'], ['hera_obs.obsid'], ),
sa.PrimaryKeyConstraint('obsid', 'ant', 'pol', 'metric')
)
op.create_table('array_metrics',
sa.Column('obsid', sa.BigInteger(), nullable=False),
sa.Column('metric', sa.String(), nullable=False),
sa.Column('mc_time', sa.BigInteger(), nullable=False),
sa.Column('val', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['metric'], ['metric_list.metric'], ),
sa.ForeignKeyConstraint(['obsid'], ['hera_obs.obsid'], ),
sa.PrimaryKeyConstraint('obsid', 'metric')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('array_metrics')
op.drop_table('ant_metrics')
op.drop_table('metric_list')
# ### end Alembic commands ###
| {
"repo_name": "HERA-Team/hera_mc",
"path": "alembic/versions/e66c069eb92b_adding_qm_tables.py",
"copies": "2",
"size": "1823",
"license": "bsd-2-clause",
"hash": -6684565080715553000,
"line_mean": 32.7592592593,
"line_max": 66,
"alpha_frac": 0.6560614372,
"autogenerated": false,
"ratio": 3.3759259259259258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031987363125925,
"avg_score": null,
"num_lines": null
} |
"""Adding resource flags and hospital affiliation.
Revision ID: 3548e1ad8a3c
Revises: 157debc89661
Create Date: 2015-09-01 00:43:10.831000
"""
# revision identifiers, used by Alembic.
revision = '3548e1ad8a3c'
down_revision = '157debc89661'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('resource', sa.Column('has_sliding_scale', sa.Boolean(), nullable=True))
op.add_column('resource', sa.Column('hospital_affiliation', sa.UnicodeText(), nullable=True))
op.add_column('resource', sa.Column('is_accessible', sa.Boolean(), nullable=True))
op.add_column('resource', sa.Column('is_icath', sa.Boolean(), nullable=True))
op.add_column('resource', sa.Column('is_wpath', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('resource', 'is_wpath')
op.drop_column('resource', 'is_icath')
op.drop_column('resource', 'is_accessible')
op.drop_column('resource', 'hospital_affiliation')
op.drop_column('resource', 'has_sliding_scale')
### end Alembic commands ###
| {
"repo_name": "radremedy/radremedy",
"path": "remedy/rad/migrations/versions/3548e1ad8a3c_adding_resource_flags_and_hospital_.py",
"copies": "2",
"size": "1259",
"license": "mpl-2.0",
"hash": -7288100786638314000,
"line_mean": 20.875,
"line_max": 97,
"alpha_frac": 0.6616362192,
"autogenerated": false,
"ratio": 3.187341772151899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48489779913518993,
"avg_score": null,
"num_lines": null
} |
"""Adding Resource model.
Revision ID: 4a09ad931d71
Revises: 3ec157e32f35
Create Date: 2015-07-30 22:33:07.145000
"""
# revision identifiers, used by Alembic.
revision = '4a09ad931d71'
down_revision = '3ec157e32f35'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('population',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('keywords', sa.UnicodeText(), nullable=True),
sa.Column('visible', sa.Boolean(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('population')
### end Alembic commands ###
| {
"repo_name": "radioprotector/radremedy",
"path": "remedy/rad/migrations/versions/4a09ad931d71_adding_resource_model.py",
"copies": "2",
"size": "1048",
"license": "mpl-2.0",
"hash": 3755315540935201300,
"line_mean": 16.7719298246,
"line_max": 63,
"alpha_frac": 0.6450381679,
"autogenerated": false,
"ratio": 3.358974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9928891909755343,
"avg_score": 0.015024123423803314,
"num_lines": 35
} |
"""Adding resource submission fields.
Revision ID: 3ec157e32f35
Revises: b511e51f3a0
Create Date: 2015-07-30 22:24:47.518000
"""
# revision identifiers, used by Alembic.
revision = '3ec157e32f35'
down_revision = 'b511e51f3a0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('resource', sa.Column('is_approved', sa.Boolean(), server_default='1', nullable=False))
op.add_column('resource', sa.Column('submitted_date', sa.DateTime(), nullable=True))
op.add_column('resource', sa.Column('submitted_ip', sa.Unicode(length=45), nullable=True))
op.add_column('resource', sa.Column('submitted_user_id', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('resource', 'submitted_user_id')
op.drop_column('resource', 'submitted_ip')
op.drop_column('resource', 'submitted_date')
op.drop_column('resource', 'is_approved')
### end Alembic commands ###
| {
"repo_name": "radremedy/radremedy",
"path": "remedy/rad/migrations/versions/3ec157e32f35_adding_resource_submission_fields.py",
"copies": "2",
"size": "1136",
"license": "mpl-2.0",
"hash": 5747739085902364000,
"line_mean": 19.4444444444,
"line_max": 105,
"alpha_frac": 0.6602112676,
"autogenerated": false,
"ratio": 3.2550143266475646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9856076403339673,
"avg_score": 0.011829838181578374,
"num_lines": 32
} |
"""Adding role column to user table
Revision ID: 538eeb160af6
Revises: 6b9d673d8e30
Create Date: 2015-09-17 04:22:21.262285
"""
# revision identifiers, used by Alembic.
revision = '538eeb160af6'
down_revision = '6b9d673d8e30'
from alembic import op
from flask_sqlalchemy import _SessionSignalEvents
import sqlalchemy as sa
from sqlalchemy import event
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session as BaseSession, relationship
Session = sessionmaker()
event.remove(BaseSession, 'before_commit', _SessionSignalEvents.session_signal_before_commit)
event.remove(BaseSession, 'after_commit', _SessionSignalEvents.session_signal_after_commit)
event.remove(BaseSession, 'after_rollback', _SessionSignalEvents.session_signal_after_rollback)
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
active = sa.Column(sa.Boolean())
role = sa.Column(sa.String(30), default="View")
def upgrade():
### commands auto generated by Alembic - please adjust! ###
### end Alembic commands ###
bind = op.get_bind()
session = Session(bind=bind)
op.add_column('user', sa.Column('role', sa.String(length=30), nullable=True))
for user in session.query(User):
if user.active:
user.role = 'View'
session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'role')
### end Alembic commands ###
| {
"repo_name": "bunjiboys/security_monkey",
"path": "migrations/versions/538eeb160af6_.py",
"copies": "3",
"size": "1530",
"license": "apache-2.0",
"hash": 7861026154885357000,
"line_mean": 28.4230769231,
"line_max": 95,
"alpha_frac": 0.7183006536,
"autogenerated": false,
"ratio": 3.5011441647597255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5719444818359726,
"avg_score": null,
"num_lines": null
} |
"""Adding section to settings
Revision ID: 19c8a92553d6
Revises: 508568dd8957
Create Date: 2014-06-08 17:49:17.714667
"""
# revision identifiers, used by Alembic.
revision = '19c8a92553d6'
down_revision = '508568dd8957'
from alembic import op
import sqlalchemy as sa
# from impression.models import Setting
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('settings', sa.Column('section', sa.VARCHAR(length=128), server_default='main', nullable=False))
### end Alembic commands ###
# Setting(name='email-to', section='contact-form', vartype='str', system=True).insert()
# Setting(name='email-from', section='contact-form', vartype='str', system=True).insert()
# Setting(name='email-subject', section='contact-form', vartype='str', system=True).insert()
# Setting(name='email-cc', section='contact-form', vartype='str', system=True).insert()
# Setting(name='email-bcc', section='contact-form', vartype='str', system=True).insert()
# Setting(name='smtp-server', section='contact-form', vartype='str', system=True).insert()
# Setting(name='smtp-port', section='contact-form', vartype='str', system=True).insert()
# Setting(name='smtp-ssl', section='contact-form', vartype='str', system=True).insert()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('settings', 'section')
### end Alembic commands ###
| {
"repo_name": "smeggingsmegger/impression",
"path": "alembic/versions/19c8a92553d6_adding_section_to_settings.py",
"copies": "1",
"size": "1446",
"license": "bsd-3-clause",
"hash": -6356647212990124000,
"line_mean": 40.3142857143,
"line_max": 114,
"alpha_frac": 0.6922544952,
"autogenerated": false,
"ratio": 3.4346793349168645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4626933830116865,
"avg_score": null,
"num_lines": null
} |
"""Adding slug field
Revision ID: 42f7e2ac0c98
Revises: None
Create Date: 2013-11-06 20:07:37.909974
"""
# revision identifiers, used by Alembic.
revision = '42f7e2ac0c98'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('base_layer', sa.Column('slug', sa.String(), nullable=True))
op.add_column('condiment', sa.Column('slug', sa.String(), nullable=True))
op.add_column('full_taco', sa.Column('slug', sa.String(), nullable=True))
op.add_column('mixin', sa.Column('slug', sa.String(), nullable=True))
op.add_column('seasoning', sa.Column('slug', sa.String(), nullable=True))
op.add_column('shell', sa.Column('slug', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('shell', 'slug')
op.drop_column('seasoning', 'slug')
op.drop_column('mixin', 'slug')
op.drop_column('full_taco', 'slug')
op.drop_column('condiment', 'slug')
op.drop_column('base_layer', 'slug')
### end Alembic commands ###
| {
"repo_name": "evz/tacofancy-api",
"path": "alembic/versions/42f7e2ac0c98_adding_slug_field.py",
"copies": "1",
"size": "1168",
"license": "mit",
"hash": 8500408181364876000,
"line_mean": 31.4444444444,
"line_max": 78,
"alpha_frac": 0.6601027397,
"autogenerated": false,
"ratio": 3.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43601027397000003,
"avg_score": null,
"num_lines": null
} |
"""adding solicidation date fpds
Revision ID: 94efce2e7882
Revises: ba7e8e488c36
Create Date: 2019-03-15 18:44:14.362923
"""
# revision identifiers, used by Alembic.
revision = '94efce2e7882'
down_revision = 'ba7e8e488c36'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_procurement', sa.Column('solicitation_date', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('solicitation_date', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_procurement', 'solicitation_date')
op.drop_column('award_procurement', 'solicitation_date')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/94efce2e7882_adding_solicidation_date_fpds.py",
"copies": "1",
"size": "1103",
"license": "cc0-1.0",
"hash": 2520967526931580400,
"line_mean": 25.2619047619,
"line_max": 105,
"alpha_frac": 0.7044424297,
"autogenerated": false,
"ratio": 3.292537313432836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4496979743132836,
"avg_score": null,
"num_lines": null
} |
"""adding streetAddress2 field
Revision ID: e3272c2dbf9a
Revises: 58e8df756d74
Create Date: 2019-01-30 12:39:41.452800
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "e3272c2dbf9a"
down_revision = "58e8df756d74"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("street_address2", sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "street_address2")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/e3272c2dbf9a_adding_streetaddress2_field.py",
"copies": "1",
"size": "1146",
"license": "bsd-3-clause",
"hash": -176638839389731460,
"line_mean": 23.3829787234,
"line_max": 108,
"alpha_frac": 0.6719022688,
"autogenerated": false,
"ratio": 3.4832826747720365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4655184943572036,
"avg_score": null,
"num_lines": null
} |
"""Adding submission comment
Revision ID: 8b8e9fe94b4a
Revises: c0be99167fbf
Create Date: 2021-01-13 19:26:51.112780
"""
# revision identifiers, used by Alembic.
revision = '8b8e9fe94b4a'
down_revision = 'c0be99167fbf'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('certified_comment', 'file_type_id', existing_type=sa.INTEGER(), nullable=True)
op.alter_column('comment', 'file_type_id', existing_type=sa.INTEGER(), nullable=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('comment', 'file_type_id', existing_type=sa.INTEGER(), nullable=False)
op.alter_column('certified_comment', 'file_type_id', existing_type=sa.INTEGER(), nullable=False)
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/8b8e9fe94b4a_adding_submission_comment.py",
"copies": "1",
"size": "1147",
"license": "cc0-1.0",
"hash": -7233858463846657000,
"line_mean": 26.3095238095,
"line_max": 100,
"alpha_frac": 0.7009590235,
"autogenerated": false,
"ratio": 3.286532951289398,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4487491974789398,
"avg_score": null,
"num_lines": null
} |
"""Adding subreddit page and post tables
Revision ID: edc5377c32c7
Revises: 4d46b88366fc
Create Date: 2016-06-15 10:26:46.224539
"""
# revision identifiers, used by Alembic.
revision = 'edc5377c32c7'
down_revision = '4d46b88366fc'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('subreddit_pages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('page_type', sa.Integer(), nullable=True),
sa.Column('page_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('subreddits',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('post_data', mysql.MEDIUMTEXT(), nullable=True),
sa.ForeignKeyConstraint(['subreddit_id'], ['subreddits.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.add_column('front_pages', sa.Column('page_type', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('front_pages', 'page_type')
op.drop_table('posts')
op.drop_table('subreddits')
op.drop_table('subreddit_pages')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('subreddit_pages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('page_type', sa.Integer(), nullable=True),
sa.Column('page_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('subreddits',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('post_data', mysql.MEDIUMTEXT(), nullable=True),
sa.ForeignKeyConstraint(['subreddit_id'], ['subreddits.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.add_column('front_pages', sa.Column('page_type', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('front_pages', 'page_type')
op.drop_table('posts')
op.drop_table('subreddits')
op.drop_table('subreddit_pages')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('subreddit_pages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('page_type', sa.Integer(), nullable=True),
sa.Column('page_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('subreddits',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('post_data', mysql.MEDIUMTEXT(), nullable=True),
sa.ForeignKeyConstraint(['subreddit_id'], ['subreddits.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.add_column('front_pages', sa.Column('page_type', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('front_pages', 'page_type')
op.drop_table('posts')
op.drop_table('subreddits')
op.drop_table('subreddit_pages')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/edc5377c32c7_adding_subreddit_page_and_post_tables.py",
"copies": "1",
"size": "4910",
"license": "mit",
"hash": 6762393291041179000,
"line_mean": 34.3237410072,
"line_max": 85,
"alpha_frac": 0.666191446,
"autogenerated": false,
"ratio": 3.4748761500353855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9551807795865725,
"avg_score": 0.017851960033931996,
"num_lines": 139
} |
'''Adding support to termcolor.
Failures and errors are red and sucessful is green.
>>> SupportToTermcolor.run()
False
>>> colored_output2 = """Story: Support to termcolor
... As a pyhistorian commiter
... I want to have support to colored output
... So that the output becomes more readable
...
... Scenario 1: Green color
... """+green_output+"""
... Scenario 2: Red color
... """+red_output+"""
... Scenario 3: Green and red colors
... """+green_and_red_output+"""
... Ran 3 scenarios with 3 failures, 0 errors and 0 pending steps
... """
>>> checker.check_output(colored_output2, output.getvalue(), doctest.ELLIPSIS)
True
'''
from pyhistorian import *
from pyhistorian.output import colored
from should_dsl import *
from cStringIO import StringIO
import os
import doctest
HERE = os.path.dirname(__file__) + '/colors.py'
checker = doctest.OutputChecker()
class GreenColor(Scenario):
@Given('I want my output colored and it pass')
def nothing(self):
pass
@Then('I have green messages')
def nothing2(self):
pass
class RedColor(Scenario):
@Given('I want my output colored and it fails')
def fail1(self):
'this scenario' |should_be| 'red colored'
@Then('I have red messages')
def fail2(self):
'this fail color' |should_be| 'red'
class GreenAndRedColors(Scenario):
@Given('I want my output colored (green and red)')
def nothing(self):
pass
@Then('I have green message')
def green_message(self):
pass
@Then('I have red message')
def red_message(self):
'this step' |should_be| 'red'
def red_colored(text):
return colored(text, color='red')
def green_colored(text):
return colored(text, color='green')
output = StringIO()
class SupportToTermcolor(Story):
"""As a pyhistorian commiter
I want to have support to colored output
So that the output becomes more readable"""
output = output
colored = True
scenarios = (GreenColor, RedColor, GreenAndRedColors)
green_output = green_colored('\
Given I want my output colored and it pass ... OK\n')+ \
green_colored('\
Then I have green messages ... OK\n')
red_output = red_colored('\
Given I want my output colored and it fails ... FAIL\n')+ \
red_colored('\
Then I have red messages ... FAIL\n') +\
red_colored('\n Failures:\n')+\
red_colored(""" File "%(here)s", line ..., in fail1
'this scenario' |should_be| 'red colored'
...
ShouldNotSatisfied: 'this scenario' is not 'red colored'
""" % {'here': HERE})+\
red_colored(""" File "%(here)s", line ..., in fail2
'this fail color' |should_be| 'red'
...
ShouldNotSatisfied: 'this fail color' is not 'red'
""" % {'here': HERE})
green_and_red_output = green_colored('\
Given I want my output colored (green and red) ... OK\n')+\
green_colored('\
Then I have green message ... OK\n') + \
red_colored('\
And I have red message ... FAIL\n') +\
red_colored('\n Failures:\n') + \
red_colored(""" File "%(here)s", line ..., in red_message
'this step' |should_be| 'red'
...
ShouldNotSatisfied: 'this step' is not 'red'
""" % {'here': HERE})
| {
"repo_name": "hltbra/pyhistorian",
"path": "specs/colors.py",
"copies": "1",
"size": "3210",
"license": "mit",
"hash": -3660818479475298300,
"line_mean": 26.4358974359,
"line_max": 78,
"alpha_frac": 0.631152648,
"autogenerated": false,
"ratio": 3.422174840085288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4553327488085288,
"avg_score": null,
"num_lines": null
} |
"""Adding table for tracking when versioning items were flattened
Revision ID: b21ea54b22a2
Revises: 49308bd51717
Create Date: 2018-07-07 07:44:47.984194
"""
# revision identifiers, used by Alembic.
revision = 'b21ea54b22a2'
down_revision = '49308bd51717'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('version_checked_table',
sa.Column('url', sa.Text(), nullable=False),
sa.Column('checked', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('url')
)
op.create_index(op.f('ix_version_checked_table_checked'), 'version_checked_table', ['checked'], unique=False)
op.create_index(op.f('ix_version_checked_table_url'), 'version_checked_table', ['url'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_version_checked_table_url'), table_name='version_checked_table')
op.drop_index(op.f('ix_version_checked_table_checked'), table_name='version_checked_table')
op.drop_table('version_checked_table')
# ### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/00043_b21ea54b22a2_adding_table_for_tracking_when_.py",
"copies": "1",
"size": "1758",
"license": "bsd-3-clause",
"hash": 7068797002788783000,
"line_mean": 32.8076923077,
"line_max": 113,
"alpha_frac": 0.7428896473,
"autogenerated": false,
"ratio": 3.5301204819277108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4773010129227711,
"avg_score": null,
"num_lines": null
} |
"""Adding tables with Backref
Revision ID: bbafc5e64b7
Revises: 5979861082d5
Create Date: 2013-05-07 22:29:17.287823
"""
# revision identifiers, used by Alembic.
revision = 'bbafc5e64b7'
down_revision = '5979861082d5'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('organisation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('yer_es', sa.Integer(), nullable=False),
sa.Column('about', sa.String(length=255), nullable=False),
sa.Column('reg_adr', sa.String(length=255), nullable=False),
sa.Column('cor_adr', sa.String(length=255), nullable=False),
sa.Column('contact_name', sa.String(length=255), nullable=False),
sa.Column('contact_des', sa.String(length=255), nullable=False),
sa.Column('icare_phone', sa.Integer(), nullable=False),
sa.Column('icare_mobile', sa.Integer(), nullable=False),
sa.Column('icare_fax', sa.Integer(), nullable=False),
sa.Column('website', sa.String(length=255), nullable=False),
sa.Column('icare_email', sa.String(length=255), nullable=False),
sa.Column('social', sa.Boolean(), nullable=True),
sa.Column('skills_based', sa.Boolean(), nullable=True),
sa.Column('micro', sa.Boolean(), nullable=True),
sa.Column('areaofwork', sa.String(length=255), nullable=True),
sa.Column('community', sa.Boolean(), nullable=True),
sa.Column('intl_camp', sa.Boolean(), nullable=True),
sa.Column('dis_hr', sa.Boolean(), nullable=True),
sa.Column('dis_stationary', sa.Boolean(), nullable=True),
sa.Column('dis_software', sa.Boolean(), nullable=True),
sa.Column('dis_hardware', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('focus',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('interest_name', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('org_focus',
sa.Column('org_id', sa.Integer(), nullable=True),
sa.Column('focus_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['focus_id'], ['focus.id'], ),
sa.ForeignKeyConstraint(['org_id'], ['organisation.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('user_focus',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('focus_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['focus_id'], ['focus.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint()
)
op.add_column(u'user', sa.Column('college', sa.String(length=255), nullable=False))
op.add_column(u'user', sa.Column('first_name', sa.String(length=255), nullable=False))
op.add_column(u'user', sa.Column('last_name', sa.String(length=255), nullable=False))
op.add_column(u'user', sa.Column('date_of_birth', sa.Integer(), nullable=True))
op.add_column(u'user', sa.Column('disaster', sa.Boolean(), nullable=True))
op.add_column(u'user', sa.Column('organ', sa.Boolean(), nullable=True))
op.add_column(u'user', sa.Column('blood_group', sa.String(length=5), nullable=False))
op.add_column(u'user', sa.Column('twitter', sa.Integer(), nullable=True))
op.add_column(u'user', sa.Column('linkedin', sa.String(length=255), nullable=True))
op.add_column(u'user', sa.Column('branch', sa.String(length=60), nullable=False))
op.add_column(u'user', sa.Column('phone', sa.Integer(), nullable=False))
op.add_column(u'user', sa.Column('google', sa.Integer(), nullable=True))
op.add_column(u'user', sa.Column('facebook', sa.String(length=255), nullable=True))
op.add_column(u'user', sa.Column('blood', sa.Boolean(), nullable=True))
op.add_column(u'user', sa.Column('year_of_study', sa.Integer(), nullable=False))
op.add_column(u'user', sa.Column('address', sa.String(length=255), nullable=False))
op.add_column(u'user', sa.Column('pintrest', sa.String(length=255), nullable=True))
op.add_column(u'user', sa.Column('news', sa.Boolean(), nullable=True))
op.add_column(u'user', sa.Column('role', sa.String(length=255), nullable=True))
op.add_column(u'user', sa.Column('email', sa.String(length=255), nullable=True))
op.drop_column(u'user', u'name')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'user', sa.Column(u'name', mysql.VARCHAR(length=255), nullable=False))
op.drop_column(u'user', 'email')
op.drop_column(u'user', 'role')
op.drop_column(u'user', 'news')
op.drop_column(u'user', 'pintrest')
op.drop_column(u'user', 'address')
op.drop_column(u'user', 'year_of_study')
op.drop_column(u'user', 'blood')
op.drop_column(u'user', 'facebook')
op.drop_column(u'user', 'google')
op.drop_column(u'user', 'phone')
op.drop_column(u'user', 'branch')
op.drop_column(u'user', 'linkedin')
op.drop_column(u'user', 'twitter')
op.drop_column(u'user', 'blood_group')
op.drop_column(u'user', 'organ')
op.drop_column(u'user', 'disaster')
op.drop_column(u'user', 'date_of_birth')
op.drop_column(u'user', 'last_name')
op.drop_column(u'user', 'first_name')
op.drop_column(u'user', 'college')
op.drop_table('user_focus')
op.drop_table('org_focus')
op.drop_table('focus')
op.drop_table('organisation')
### end Alembic commands ###
| {
"repo_name": "pesos/ngo-portal",
"path": "src/core/migrations/versions/bbafc5e64b7_adding_tables_with_b.py",
"copies": "1",
"size": "5503",
"license": "mit",
"hash": -9172307893433015000,
"line_mean": 46.852173913,
"line_max": 90,
"alpha_frac": 0.6610939488,
"autogenerated": false,
"ratio": 3.09679234665166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9209522598773903,
"avg_score": 0.009672739335551439,
"num_lines": 115
} |
"""Adding table that keeps track of open source products.
Revision ID: 32ded3390554
Revises: 82f8f48c6ee6
Create Date: 2019-06-16 12:13:28.964876
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "32ded3390554"
down_revision = "82f8f48c6ee6"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"oss_products",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("date_created", sa.DateTime(), nullable=True),
sa.Column("date_modified", sa.DateTime(), nullable=True),
sa.Column("vendor", sa.String(length=255), nullable=False),
sa.Column("product", sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("vendor", "product"),
)
op.create_index(
"idx_oss_products_main", "oss_products", ["vendor", "product"], unique=True
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("idx_oss_products_main", table_name="oss_products")
op.drop_table("oss_products")
# ### end Alembic commands ###
| {
"repo_name": "google/vulncode-db",
"path": "migrations/versions/32ded3390554_adding_table_that_keeps_track_of_open_.py",
"copies": "1",
"size": "1274",
"license": "apache-2.0",
"hash": -7854141982977339000,
"line_mean": 30.0731707317,
"line_max": 83,
"alpha_frac": 0.6577708006,
"autogenerated": false,
"ratio": 3.5193370165745854,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4677107817174585,
"avg_score": null,
"num_lines": null
} |
"""adding tas columns
Revision ID: 089e94d3dc03
Revises: b35d25a57731
Create Date: 2018-08-07 16:23:36.485535
"""
# revision identifiers, used by Alembic.
revision = '089e94d3dc03'
down_revision = 'b35d25a57731'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tas_lookup', sa.Column('account_title', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_bureau_code', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_bureau_name', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_function_code', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_function_title', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_subfunction_code', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('budget_subfunction_title', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('reporting_agency_aid', sa.Text(), nullable=True))
op.add_column('tas_lookup', sa.Column('reporting_agency_name', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tas_lookup', 'reporting_agency_name')
op.drop_column('tas_lookup', 'reporting_agency_aid')
op.drop_column('tas_lookup', 'budget_subfunction_title')
op.drop_column('tas_lookup', 'budget_subfunction_code')
op.drop_column('tas_lookup', 'budget_function_title')
op.drop_column('tas_lookup', 'budget_function_code')
op.drop_column('tas_lookup', 'budget_bureau_name')
op.drop_column('tas_lookup', 'budget_bureau_code')
op.drop_column('tas_lookup', 'account_title')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/089e94d3dc03_adding_tas_columns.py",
"copies": "1",
"size": "2062",
"license": "cc0-1.0",
"hash": 7159851039734590000,
"line_mean": 35.8214285714,
"line_max": 96,
"alpha_frac": 0.684772066,
"autogenerated": false,
"ratio": 3.1577335375191424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4342505603519142,
"avg_score": null,
"num_lines": null
} |
"""adding territories or freely associated states flag to country code
Revision ID: 1fb19acc7d64
Revises: 0cf297fa927c
Create Date: 2019-04-16 14:32:40.284194
"""
# revision identifiers, used by Alembic.
revision = '1fb19acc7d64'
down_revision = '0cf297fa927c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('country_code', sa.Column('territory_free_state', sa.Boolean(), server_default='False',
nullable=False))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('country_code', 'territory_free_state')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/1fb19acc7d64_adding_territories_or_freely_associated_.py",
"copies": "1",
"size": "1033",
"license": "cc0-1.0",
"hash": 949512146299677800,
"line_mean": 24.1951219512,
"line_max": 105,
"alpha_frac": 0.6737657309,
"autogenerated": false,
"ratio": 3.5993031358885017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4773068866788502,
"avg_score": null,
"num_lines": null
} |
"""Adding the ability to specify certificate replacements
Revision ID: 33de094da890
Revises: ed422fc58ba
Create Date: 2015-11-30 15:40:19.827272
"""
# revision identifiers, used by Alembic.
revision = "33de094da890"
down_revision = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"certificate_replacement_associations",
sa.Column("replaced_certificate_id", sa.Integer(), nullable=True),
sa.Column("certificate_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["certificate_id"], ["certificates.id"], ondelete="cascade"
),
sa.ForeignKeyConstraint(
["replaced_certificate_id"], ["certificates.id"], ondelete="cascade"
),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("certificate_replacement_associations")
### end Alembic commands ###
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/33de094da890_.py",
"copies": "1",
"size": "1076",
"license": "apache-2.0",
"hash": -1754181247824798000,
"line_mean": 28.0810810811,
"line_max": 80,
"alpha_frac": 0.6737918216,
"autogenerated": false,
"ratio": 4.138461538461539,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5312253360061538,
"avg_score": null,
"num_lines": null
} |
# adding this to suppress sklearn DeprecationWarnings...
import numpy
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import time
from sklearn.model_selection import train_test_split
from model.MLPRegressionModel import MLPRegressionModel
from util.config import Config
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from model.overall_runner import corpora_to_day_features, \
init_corpora, combine_day_ranges, match_features_to_labels
current_milli_time = lambda: int(round(time.time() * 1000))
if __name__ == '__main__':
hidden_layer_sizes = range(50, 200, 10)
alpha_sizes = numpy.arange(0.00001, 0.001, 0.00010)
plot_x = []
plot_y = []
plot_z = []
approval_ratings, political_article_corpora = init_corpora()
for index, hidden_layer_size in enumerate(hidden_layer_sizes):
plot_x.append([])
plot_y.append([])
plot_z.append([])
for alpha in alpha_sizes:
print("Testing with hidden layer size {} and alpha {}".format(hidden_layer_size, alpha))
plot_x[index].append(hidden_layer_size)
plot_y[index].append(alpha)
# print("Prediction delay is {}, day_range is {}".format(delay, day_range))
features_by_day = corpora_to_day_features(political_article_corpora)
#print("Number of days of data: " + str(len(features_by_day.items())))
features_by_range = combine_day_ranges(features_by_day)
X, Y = match_features_to_labels(features_by_range, approval_ratings)
#print("Number of feature vectors (ideally this is # days - moving_range_size + 1): " + str(len(X)))
X_train_and_val, X_test, Y_train_and_val, Y_test = \
train_test_split(X, Y, test_size=Config.TRAINING_PARTITION, random_state=2)
X_train, X_val, Y_train, Y_val = \
train_test_split(X_train_and_val, Y_train_and_val, test_size=0.125, random_state=2)
model = MLPRegressionModel([X_train, Y_train], hidden_layer_sizes=(hidden_layer_size,), alpha=alpha)
model.train()
mse = model.evaluate(X_val, Y_val)
print("MSE is {}".format(mse))
plot_z[index].append(mse)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_wireframe(plot_x, plot_y, plot_z, cmap=cm.coolwarm, antialiased=True)
plt.savefig('mlp_hyperparam_tuning.png')
| {
"repo_name": "ASethi77/StateOfTheMedia",
"path": "src/model/tune_mlp_hyperparams.py",
"copies": "1",
"size": "2527",
"license": "apache-2.0",
"hash": 8926406520310566000,
"line_mean": 39.1111111111,
"line_max": 112,
"alpha_frac": 0.6323703997,
"autogenerated": false,
"ratio": 3.3693333333333335,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45017037330333337,
"avg_score": null,
"num_lines": null
} |
"""adding timestamps to all tables
Revision ID: 2701ef6ccb69
Revises: 19c18c1545c3
Create Date: 2016-04-20 14:19:25.366380
"""
# revision identifiers, used by Alembic.
revision = '2701ef6ccb69'
down_revision = '19c18c1545c3'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.add_column('error_data', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('error_data', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('error_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('error_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('file_status', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_status', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('status', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('status', sa.Column('updated_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('status', 'updated_at')
op.drop_column('status', 'created_at')
op.drop_column('file_status', 'updated_at')
op.drop_column('file_status', 'created_at')
op.drop_column('error_type', 'updated_at')
op.drop_column('error_type', 'created_at')
op.drop_column('error_data', 'updated_at')
op.drop_column('error_data', 'created_at')
### end Alembic commands ###
def upgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('file_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('job_dependency', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('job_dependency', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('job_status', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('job_status', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('status', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('status', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('submission', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('submission', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('type', sa.Column('updated_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('type', 'updated_at')
op.drop_column('type', 'created_at')
op.drop_column('submission', 'updated_at')
op.drop_column('submission', 'created_at')
op.drop_column('status', 'updated_at')
op.drop_column('status', 'created_at')
op.drop_column('job_status', 'updated_at')
op.drop_column('job_status', 'created_at')
op.drop_column('job_dependency', 'updated_at')
op.drop_column('job_dependency', 'created_at')
op.drop_column('file_type', 'updated_at')
op.drop_column('file_type', 'created_at')
### end Alembic commands ###
def upgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
op.add_column('email_template', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('email_template', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('email_template_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('email_template_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('email_token', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('email_token', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('permission_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('permission_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('user_status', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('user_status', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('updated_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'updated_at')
op.drop_column('users', 'created_at')
op.drop_column('user_status', 'updated_at')
op.drop_column('user_status', 'created_at')
op.drop_column('permission_type', 'updated_at')
op.drop_column('permission_type', 'created_at')
op.drop_column('email_token', 'updated_at')
op.drop_column('email_token', 'created_at')
op.drop_column('email_template_type', 'updated_at')
op.drop_column('email_template_type', 'created_at')
op.drop_column('email_template', 'updated_at')
op.drop_column('email_template', 'created_at')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-core",
"path": "dataactcore/migrations/versions/2701ef6ccb69_adding_timestamps_to_all_tables.py",
"copies": "1",
"size": "5537",
"license": "cc0-1.0",
"hash": -2670376556632250400,
"line_mean": 44.3852459016,
"line_max": 95,
"alpha_frac": 0.6693155138,
"autogenerated": false,
"ratio": 3.309623430962343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9406546765103516,
"avg_score": 0.014478435931765508,
"num_lines": 122
} |
"""adding timestamps to all tables
Revision ID: c0a714ade734
Revises: 1a886e694fca
Create Date: 2016-04-20 14:46:06.407765
"""
# revision identifiers, used by Alembic.
revision = 'c0a714ade734'
down_revision = '1a886e694fca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_validation():
### commands auto generated by Alembic - please adjust! ###
op.add_column('field_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('field_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('file_columns', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_columns', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('file_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('file_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('multi_field_rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule_timing', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule_timing', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('rule_type', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('rule_type', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('tas_lookup', sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column('tas_lookup', sa.Column('updated_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_validation():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('tas_lookup', 'updated_at')
op.drop_column('tas_lookup', 'created_at')
op.drop_column('rule_type', 'updated_at')
op.drop_column('rule_type', 'created_at')
op.drop_column('rule_timing', 'updated_at')
op.drop_column('rule_timing', 'created_at')
op.drop_column('rule', 'updated_at')
op.drop_column('rule', 'created_at')
op.drop_column('multi_field_rule_type', 'updated_at')
op.drop_column('multi_field_rule_type', 'created_at')
op.drop_column('multi_field_rule', 'updated_at')
op.drop_column('multi_field_rule', 'created_at')
op.drop_column('file_type', 'updated_at')
op.drop_column('file_type', 'created_at')
op.drop_column('file_columns', 'updated_at')
op.drop_column('file_columns', 'created_at')
op.drop_column('field_type', 'updated_at')
op.drop_column('field_type', 'created_at')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-validator",
"path": "dataactvalidator/migrations/versions/c0a714ade734_adding_timestamps_to_all_tables.py",
"copies": "1",
"size": "3174",
"license": "cc0-1.0",
"hash": -8599767726455748000,
"line_mean": 45,
"line_max": 97,
"alpha_frac": 0.6742281033,
"autogenerated": false,
"ratio": 3.1803607214428857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9324171777896597,
"avg_score": 0.0060834093692576285,
"num_lines": 69
} |
""" adding total_obligations table
Revision ID: 9e1acf1450dc
Revises: 8b8e9fe94b4a
Create Date: 2021-02-03 18:24:27.124242
"""
# revision identifiers, used by Alembic.
revision = '9e1acf1450dc'
down_revision = '8b8e9fe94b4a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('total_obligations',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('total_obligations_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('total_obligations', sa.Numeric(), nullable=True),
sa.Column('total_proc_obligations', sa.Numeric(), nullable=True),
sa.Column('total_asst_obligations', sa.Numeric(), nullable=True),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_total_obligations_submission_id', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('total_obligations_id')
)
op.create_index(op.f('ix_total_obligations_submission_id'), 'total_obligations', ['submission_id'], unique=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_total_obligations_submission_id'), table_name='total_obligations')
op.drop_table('total_obligations')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/9e1acf1450dc_adding_total_obligations_table.py",
"copies": "1",
"size": "1715",
"license": "cc0-1.0",
"hash": -3974760464003821600,
"line_mean": 31.9807692308,
"line_max": 140,
"alpha_frac": 0.7002915452,
"autogenerated": false,
"ratio": 3.3172147001934236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9388181637246926,
"avg_score": 0.02586492162929956,
"num_lines": 52
} |
"""Adding UEI elements to detached_award_procurement table
Revision ID: 77bd1ef8c53b
Revises: e72f2699bcaa
Create Date: 2021-05-11 13:24:50.207342
"""
# revision identifiers, used by Alembic.
revision = '77bd1ef8c53b'
down_revision = 'e72f2699bcaa'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_procurement', sa.Column('awardee_or_recipient_uei', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('awardee_or_recipient_uei_n', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('ultimate_parent_uei', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('ultimate_parent_uei_name', sa.Text(), nullable=True))
op.create_index(op.f('ix_detached_award_procurement_awardee_or_recipient_uei'), 'detached_award_procurement', ['awardee_or_recipient_uei'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_awardee_or_recipient_uei'), table_name='detached_award_procurement')
op.drop_column('detached_award_procurement', 'ultimate_parent_uei_name')
op.drop_column('detached_award_procurement', 'ultimate_parent_uei')
op.drop_column('detached_award_procurement', 'awardee_or_recipient_uei_n')
op.drop_column('detached_award_procurement', 'awardee_or_recipient_uei')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/77bd1ef8c53b_adding_uei_elements_to_detached_award_.py",
"copies": "1",
"size": "1830",
"license": "cc0-1.0",
"hash": 4906742183393795000,
"line_mean": 37.125,
"line_max": 157,
"alpha_frac": 0.7213114754,
"autogenerated": false,
"ratio": 3.0860033726812817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4307314848081282,
"avg_score": null,
"num_lines": null
} |
"""Adding UEI values to DUNS tables
Revision ID: e72f2699bcaa
Revises: 24e0fa72827a
Create Date: 2021-02-26 20:40:28.254073
"""
# revision identifiers, used by Alembic.
revision = 'e72f2699bcaa'
down_revision = '24e0fa72827a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('duns', sa.Column('uei', sa.Text(), nullable=True))
op.add_column('duns', sa.Column('ultimate_parent_uei', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('uei', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('ultimate_parent_uei', sa.Text(), nullable=True))
op.add_column('historic_duns', sa.Column('entity_structure', sa.Text(), nullable=True))
op.create_index(op.f('ix_duns_uei'), 'duns', ['uei'], unique=False)
op.create_index(op.f('ix_historic_duns_uei'), 'historic_duns', ['uei'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_historic_duns_uei'), table_name='historic_duns')
op.drop_column('historic_duns', 'entity_structure')
op.drop_column('historic_duns', 'ultimate_parent_uei')
op.drop_column('historic_duns', 'uei')
op.drop_index(op.f('ix_duns_uei'), table_name='duns')
op.drop_column('duns', 'ultimate_parent_uei')
op.drop_column('duns', 'uei')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/e72f2699bcaa_adding_uei_values_to_duns.py",
"copies": "1",
"size": "1732",
"license": "cc0-1.0",
"hash": -188499842054542600,
"line_mean": 32.3076923077,
"line_max": 94,
"alpha_frac": 0.6749422633,
"autogenerated": false,
"ratio": 2.793548387096774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39684906503967743,
"avg_score": null,
"num_lines": null
} |
"""Adding UniqueConstraint to table nicety, deleting duplicate rows
Revision ID: e88e0781857f
Revises: 490ca3791aac
Create Date: 2021-05-10 16:20:13.973239
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = 'e88e0781857f'
down_revision = '490ca3791aac'
branch_labels = None
depends_on = None
def upgrade():
nicety = table('nicety', column('id', sa.Integer))
connection = op.get_bind()
result = connection.execute('''select A.id, B.id from nicety as A
join nicety as B
on A.author_id = B.author_id
and A.target_id = B.target_id
and A.text = B.text
and A.id > B.id;''')
for row in result:
print("Deleting duplicate nicety with ID {}".format(row[0]))
# One option for deletion
op.execute(sa.delete(nicety).where(nicety.c.id == row[0]))
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'nicety', ['author_id', 'target_id', 'end_date'])
# ### end Alembic commands ###
def downgrade():
pass
| {
"repo_name": "mjec/rc-niceties",
"path": "migrations/versions/e88e0781857f_.py",
"copies": "1",
"size": "1108",
"license": "mit",
"hash": -7603760668032071000,
"line_mean": 28.1578947368,
"line_max": 87,
"alpha_frac": 0.6714801444,
"autogenerated": false,
"ratio": 3.2684365781710913,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4439916722571091,
"avg_score": null,
"num_lines": null
} |
"""Adding unique PK to fpds_update
Revision ID: 5f1470603fa0
Revises: 7597deb348fb
Create Date: 2018-03-05 13:25:41.189603
"""
# revision identifiers, used by Alembic.
revision = '5f1470603fa0'
down_revision = '7597deb348fb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('fpds_update', sa.Column('fpds_update_id', sa.Integer(), server_default='1', nullable=False))
op.execute("ALTER TABLE fpds_update DROP CONSTRAINT fpds_update_pkey")
op.execute("ALTER TABLE fpds_update ADD CONSTRAINT fpds_update_pkey PRIMARY KEY (fpds_update_id)")
op.alter_column('fpds_update', 'update_date',
existing_type=sa.DATE(),
nullable=True)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE fpds_update DROP CONSTRAINT fpds_update_pkey")
op.execute("ALTER TABLE fpds_update ADD CONSTRAINT fpds_update_pkey PRIMARY KEY (update_date)")
op.alter_column('fpds_update', 'update_date',
existing_type=sa.DATE(),
nullable=False)
op.drop_column('fpds_update', 'fpds_update_id')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/5f1470603fa0_adding_unique_pk_to_fpds_update.py",
"copies": "1",
"size": "1479",
"license": "cc0-1.0",
"hash": -3225232814426291000,
"line_mean": 28.58,
"line_max": 111,
"alpha_frac": 0.6781609195,
"autogenerated": false,
"ratio": 3.4637002341920375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46418611536920373,
"avg_score": null,
"num_lines": null
} |
"""Adding unique transaction keys to staging models
Revision ID: 0cf297fa927c
Revises: 94efce2e7882
Create Date: 2019-03-21 17:14:34.938006
"""
# revision identifiers, used by Alembic.
revision = '0cf297fa927c'
down_revision = '94efce2e7882'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('afa_generated_unique', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('detached_award_proc_unique', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('award_procurement', 'detached_award_proc_unique')
op.drop_column('award_financial_assistance', 'afa_generated_unique')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/0cf297fa927c_adding_unique_transaction_keys_to_.py",
"copies": "1",
"size": "1146",
"license": "cc0-1.0",
"hash": 720550773957362400,
"line_mean": 26.2857142857,
"line_max": 108,
"alpha_frac": 0.7076788831,
"autogenerated": false,
"ratio": 3.3508771929824563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45585560760824567,
"avg_score": null,
"num_lines": null
} |
"""Adding upper fabs awarding subtier code index
Revision ID: b998d20b46e6
Revises: d7e1a2433dca
Create Date: 2019-09-06 15:51:01.640884
"""
# revision identifiers, used by Alembic.
revision = 'b998d20b46e6'
down_revision = 'd7e1a2433dca'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import text
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_pafa_awarding_subtier_c_upper', 'published_award_financial_assistance',
[text('UPPER(awarding_sub_tier_agency_c)')], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_pafa_awarding_subtier_c_upper', table_name='published_award_financial_assistance')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/b998d20b46e6_adding_upper_fabs_awarding_subtier_code_.py",
"copies": "1",
"size": "1026",
"license": "cc0-1.0",
"hash": -6001739497240760000,
"line_mean": 24.65,
"line_max": 104,
"alpha_frac": 0.6910331384,
"autogenerated": false,
"ratio": 3.1569230769230767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43479562153230766,
"avg_score": null,
"num_lines": null
} |
"""Adding user columns for email confirmation, password resets, and a display name.
Revision ID: 1fa61500b159
Revises: cecb3808cb0
Create Date: 2014-11-09 17:07:00.503000
"""
# revision identifiers, used by Alembic.
revision = '1fa61500b159'
down_revision = 'cecb3808cb0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('display_name', sa.Unicode(length=100), server_default='', nullable=False))
op.add_column('user', sa.Column('email_activated', sa.Boolean(), server_default='1', nullable=False))
op.add_column('user', sa.Column('email_code', sa.Unicode(length=36), nullable=True))
op.add_column('user', sa.Column('reset_pass_date', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'reset_pass_date')
op.drop_column('user', 'email_code')
op.drop_column('user', 'email_activated')
op.drop_column('user', 'display_name')
### end Alembic commands ###
| {
"repo_name": "radremedy/radremedy",
"path": "remedy/rad/migrations/versions/1fa61500b159_user_management_columns.py",
"copies": "3",
"size": "1176",
"license": "mpl-2.0",
"hash": 432706949865116740,
"line_mean": 20.1851851852,
"line_max": 111,
"alpha_frac": 0.6573129252,
"autogenerated": false,
"ratio": 3.248618784530387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5405931709730387,
"avg_score": null,
"num_lines": null
} |
"""Adding username information to praw_keys
Revision ID: 022d8114fe2a
Revises: 2957ac0e11b6
Create Date: 2016-07-23 12:31:14.863110
"""
# revision identifiers, used by Alembic.
revision = '022d8114fe2a'
down_revision = '2957ac0e11b6'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.add_column('praw_keys', sa.Column('authorized_user_id', sa.String(length=256), nullable=True))
op.add_column('praw_keys', sa.Column('authorized_username', sa.String(length=256), nullable=True))
op.create_index(op.f('ix_praw_keys_authorized_user_id'), 'praw_keys', ['authorized_user_id'], unique=False)
op.create_index(op.f('ix_praw_keys_authorized_username'), 'praw_keys', ['authorized_username'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_praw_keys_authorized_username'), table_name='praw_keys')
op.drop_index(op.f('ix_praw_keys_authorized_user_id'), table_name='praw_keys')
op.drop_column('praw_keys', 'authorized_username')
op.drop_column('praw_keys', 'authorized_user_id')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.add_column('praw_keys', sa.Column('authorized_user_id', sa.String(length=256), nullable=True))
op.add_column('praw_keys', sa.Column('authorized_username', sa.String(length=256), nullable=True))
op.create_index(op.f('ix_praw_keys_authorized_user_id'), 'praw_keys', ['authorized_user_id'], unique=False)
op.create_index(op.f('ix_praw_keys_authorized_username'), 'praw_keys', ['authorized_username'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_praw_keys_authorized_username'), table_name='praw_keys')
op.drop_index(op.f('ix_praw_keys_authorized_user_id'), table_name='praw_keys')
op.drop_column('praw_keys', 'authorized_username')
op.drop_column('praw_keys', 'authorized_user_id')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.add_column('praw_keys', sa.Column('authorized_user_id', sa.String(length=256), nullable=True))
op.add_column('praw_keys', sa.Column('authorized_username', sa.String(length=256), nullable=True))
op.create_index(op.f('ix_praw_keys_authorized_user_id'), 'praw_keys', ['authorized_user_id'], unique=False)
op.create_index(op.f('ix_praw_keys_authorized_username'), 'praw_keys', ['authorized_username'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_praw_keys_authorized_username'), table_name='praw_keys')
op.drop_index(op.f('ix_praw_keys_authorized_user_id'), table_name='praw_keys')
op.drop_column('praw_keys', 'authorized_username')
op.drop_column('praw_keys', 'authorized_user_id')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/022d8114fe2a_adding_username_information_to_praw_keys.py",
"copies": "1",
"size": "3343",
"license": "mit",
"hash": 4154764770236267500,
"line_mean": 39.7682926829,
"line_max": 113,
"alpha_frac": 0.6886030512,
"autogenerated": false,
"ratio": 3.165719696969697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43543227481696967,
"avg_score": null,
"num_lines": null
} |
"""adding user table
Revision ID: 407e74de5553
Revises: 3a5eba38e4e8
Create Date: 2015-03-31 07:53:28.662125
"""
# revision identifiers, used by Alembic.
revision = '407e74de5553'
down_revision = '3a5eba38e4e8'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('email_address', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('locked', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('password_changed_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email_address'), 'users', ['email_address'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_email_address'), table_name='users')
op.drop_table('users')
### end Alembic commands ###
| {
"repo_name": "mtekel/digitalmarketplace-api",
"path": "migrations/versions/407e74de5553_adding_user_table.py",
"copies": "3",
"size": "1206",
"license": "mit",
"hash": -5085148392684234000,
"line_mean": 30.7368421053,
"line_max": 92,
"alpha_frac": 0.6724709784,
"autogenerated": false,
"ratio": 3.268292682926829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.544076366132683,
"avg_score": null,
"num_lines": null
} |
"""Adding valueUri to QRA
Revision ID: 4e8daad637a6
Revises: 19687c270db8
Create Date: 2017-06-15 12:28:16.260816
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4e8daad637a6"
down_revision = "19687c270db8"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("questionnaire_response_answer", sa.Column("value_uri", sa.String(length=1024), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("questionnaire_response_answer", "value_uri")
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/4e8daad637a6_adding_valueuri_to_qra.py",
"copies": "1",
"size": "1150",
"license": "bsd-3-clause",
"hash": 6824534255842645000,
"line_mean": 23.4680851064,
"line_max": 113,
"alpha_frac": 0.6704347826,
"autogenerated": false,
"ratio": 3.4328358208955225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46032706034955223,
"avg_score": null,
"num_lines": null
} |
"""adding vcf tbi files to genomic gc metrics validation
Revision ID: c68d427d67b3
Revises: f80b79cff59e
Create Date: 2020-08-12 15:51:23.285273
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c68d427d67b3'
down_revision = 'f80b79cff59e'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_tbi_path', sa.String(length=255), nullable=True))
op.add_column('genomic_gc_validation_metrics', sa.Column('vcf_tbi_received', sa.SmallInteger(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_gc_validation_metrics', 'vcf_tbi_received')
op.drop_column('genomic_gc_validation_metrics', 'vcf_tbi_path')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/c68d427d67b3_adding_vcf_tbi_files_to_genomic_gc_.py",
"copies": "1",
"size": "1376",
"license": "bsd-3-clause",
"hash": -6642959563185807000,
"line_mean": 26.52,
"line_max": 116,
"alpha_frac": 0.6802325581,
"autogenerated": false,
"ratio": 3.331719128329298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4511951686429298,
"avg_score": null,
"num_lines": null
} |
"""Adding wl masking.
"""
import json
import os
from jsmin import jsmin
import simulators
def get_maskinfo(star, obsnum, chip):
mask_file = os.path.join(simulators.paths["spectra"], "detector_masks.json")
try:
with open(mask_file, "r") as f:
mask_data = json.loads(jsmin(f.read()))
this_mask = mask_data[str(star)][str(obsnum)][str(chip)]
return this_mask
except (KeyError, FileNotFoundError) as e:
print(e)
print("No Masking file/data present for {0}-{1}_{2}".format(star, obsnum, chip))
return []
def spectrum_masking(spec, star, obsnum, chip):
chip_masks = get_maskinfo(star, obsnum, chip)
if int(chip) == 4:
# Ignore first 50 pixels of detector 4
dw = 0.0000001 # small offset to mask inclusive
spec.wav_select(spec.xaxis[50] - dw, spec.xaxis[-1] + dw)
for mask_limits in chip_masks:
# If empty they do not go in here
if len(mask_limits) is not 2:
raise ValueError("Mask limits in mask file are incorrect for {0}-{1}_{2}".format(star, obsnum, chip))
spec.wav_select(*mask_limits) # Wavelengths to include
return spec
| {
"repo_name": "jason-neal/companion_simulations",
"path": "mingle/utilities/masking.py",
"copies": "1",
"size": "1178",
"license": "mit",
"hash": -4975188879926966000,
"line_mean": 31.7222222222,
"line_max": 113,
"alpha_frac": 0.6256366723,
"autogenerated": false,
"ratio": 3.3183098591549296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9439452980914222,
"avg_score": 0.0008987101081413015,
"num_lines": 36
} |
"""Add in Hue bridge info
Revision ID: 258d289a169a
Revises: b6637f24d0e0
Create Date: 2016-12-04 10:25:10.087095
"""
# revision identifiers, used by Alembic.
revision = '258d289a169a'
down_revision = 'b6637f24d0e0'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('HueBridges',
sa.Column('bridge_id', mysql.INTEGER(unsigned=True), nullable=False),
sa.Column('user_id', mysql.INTEGER(unsigned=True), nullable=False),
sa.Column('name', sa.VARCHAR(length=150), nullable=False),
sa.Column('address', sa.VARCHAR(length=45), nullable=False),
sa.Column('user', sa.VARCHAR(length=40), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['Users.user_id'], ),
sa.PrimaryKeyConstraint('bridge_id'),
sa.UniqueConstraint('user')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('HueBridges')
### end Alembic commands ###
| {
"repo_name": "geudrik/homestack-db-library",
"path": "alembic/versions/258d289a169a_add_in_hue_bridge_info.py",
"copies": "1",
"size": "1118",
"license": "mit",
"hash": 7242570643224328000,
"line_mean": 29.2162162162,
"line_max": 73,
"alpha_frac": 0.6949910555,
"autogenerated": false,
"ratio": 3.3175074183976263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4512498473897626,
"avg_score": null,
"num_lines": null
} |
"""Add initial groups features.
Revision ID: 36218ef15edc
Revises: c0732331a3c8
Create Date: 2016-10-13 13:53:30.935455
"""
# revision identifiers, used by Alembic.
revision = '36218ef15edc'
down_revision = 'c0732331a3c8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('_join_code', sa.String(length=64), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('user_config', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_groups'))
)
op.create_table('blueprint_templates',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('config', sa.Text(), nullable=True),
sa.Column('is_enabled', sa.Boolean(), nullable=True),
sa.Column('plugin', sa.String(length=32), nullable=True),
sa.Column('blueprint_schema', sa.Text(), nullable=True),
sa.Column('blueprint_form', sa.Text(), nullable=True),
sa.Column('blueprint_model', sa.Text(), nullable=True),
sa.Column('allowed_attrs', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['plugin'], ['plugins.id'], name=op.f('fk_blueprint_templates_plugin_plugins')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_blueprint_templates'))
)
op.create_table('groups_banned_users',
sa.Column('group_id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], name=op.f('fk_groups_banned_users_group_id_groups')),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_groups_banned_users_user_id_users')),
sa.PrimaryKeyConstraint('group_id', 'user_id', name=op.f('pk_groups_banned_users'))
)
op.create_table('groups_owners',
sa.Column('group_id', sa.String(length=32), nullable=False),
sa.Column('owner_id', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], name=op.f('fk_groups_owners_group_id_groups')),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name=op.f('fk_groups_owners_owner_id_users')),
sa.PrimaryKeyConstraint('group_id', 'owner_id', name=op.f('pk_groups_owners'))
)
op.create_table('groups_users',
sa.Column('group_id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], name=op.f('fk_groups_users_group_id_groups')),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_groups_users_user_id_users')),
sa.PrimaryKeyConstraint('group_id', 'user_id', name=op.f('pk_groups_users'))
)
op.add_column(u'blueprints', sa.Column('group_id', sa.String(length=32), nullable=True))
op.add_column(u'blueprints', sa.Column('template_id', sa.String(length=32), nullable=True))
op.drop_constraint(u'fk_blueprints_plugin_plugins', 'blueprints', type_='foreignkey')
op.create_foreign_key(op.f('fk_blueprints_template_id_blueprint_templates'), 'blueprints', 'blueprint_templates', ['template_id'], ['id'])
op.create_foreign_key(op.f('fk_blueprints_group_id_groups'), 'blueprints', 'groups', ['group_id'], ['id'])
op.drop_column(u'blueprints', 'plugin')
op.create_unique_constraint(op.f('uq_locks_lock_id'), 'locks', ['lock_id'])
op.add_column(u'users', sa.Column('is_group_owner', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'users', 'is_group_owner')
op.drop_constraint(op.f('uq_locks_lock_id'), 'locks', type_='unique')
op.add_column(u'blueprints', sa.Column('plugin', sa.VARCHAR(length=32), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_blueprints_group_id_groups'), 'blueprints', type_='foreignkey')
op.drop_constraint(op.f('fk_blueprints_template_id_blueprint_templates'), 'blueprints', type_='foreignkey')
op.create_foreign_key(u'fk_blueprints_plugin_plugins', 'blueprints', 'plugins', ['plugin'], ['id'])
op.drop_column(u'blueprints', 'template_id')
op.drop_column(u'blueprints', 'group_id')
op.drop_table('groups_users')
op.drop_table('groups_owners')
op.drop_table('groups_banned_users')
op.drop_table('blueprint_templates')
op.drop_table('groups')
### end Alembic commands ###
| {
"repo_name": "CSC-IT-Center-for-Science/pouta-blueprints",
"path": "migrations/versions/36218ef15edc_.py",
"copies": "1",
"size": "4619",
"license": "mit",
"hash": 7668719135898193000,
"line_mean": 52.091954023,
"line_max": 142,
"alpha_frac": 0.6763368695,
"autogenerated": false,
"ratio": 3.1702127659574466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4346549635457446,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.