text stringlengths 0 1.05M | meta dict |
|---|---|
"""add fixtures
Revision ID: 0c6de0437b27
Revises: 75b78a5e83a9
Create Date: 2016-03-03 06:45:43.814497
"""
# revision identifiers, used by Alembic.
revision = '0c6de0437b27'
down_revision = '75b78a5e83a9'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
from sqlalchemy.sql import text
from assignment.fixtures import read_csv
traits = read_csv('./assignment/fixtures/dataset.csv')
people = {}
conn = op.get_bind()
for trait_id, trait, matched in traits:
# Not optimal, but ok for the purpose of the assignment
conn.execute(text('INSERT INTO traits (name) VALUES (:trait)'),
trait=trait)
for match in matched:
people.setdefault(match, []).append(trait_id)
for person, trait_ids in people.items():
conn.execute(text('INSERT INTO people (name, traits) VALUES (:name, :trait_ids)'),
name=person, trait_ids=trait_ids)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| {
"repo_name": "avanov/assignment",
"path": ".alembic/versions/0c6de0437b27_add_fixtures.py",
"copies": "1",
"size": "1210",
"license": "mit",
"hash": 1828435711850721000,
"line_mean": 27.8095238095,
"line_max": 90,
"alpha_frac": 0.6520661157,
"autogenerated": false,
"ratio": 3.723076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9852763276975578,
"avg_score": 0.00447595236026913,
"num_lines": 42
} |
"""add flag for ghost id
Revision ID: 93d831aa6fb4
Revises: e3272c2dbf9a
Create Date: 2019-01-30 14:55:59.604938
"""
import model.utils
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "93d831aa6fb4"
down_revision = "e3272c2dbf9a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant", sa.Column("date_added_ghost", model.utils.UTCDateTime(), nullable=True))
op.add_column("participant", sa.Column("is_ghost_id", sa.Boolean(), nullable=True))
op.add_column("participant_history", sa.Column("date_added_ghost", model.utils.UTCDateTime(), nullable=True))
op.add_column("participant_history", sa.Column("is_ghost_id", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_history", "is_ghost_id")
op.drop_column("participant_history", "date_added_ghost")
op.drop_column("participant", "is_ghost_id")
op.drop_column("participant", "date_added_ghost")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/93d831aa6fb4_add_flag_for_ghost_id.py",
"copies": "1",
"size": "1615",
"license": "bsd-3-clause",
"hash": -5851108034972173000,
"line_mean": 28.9074074074,
"line_max": 113,
"alpha_frac": 0.6773993808,
"autogenerated": false,
"ratio": 3.3575883575883574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9531285312914375,
"avg_score": 0.000740485094796581,
"num_lines": 54
} |
# add Flask-related imports before this point
from flask.ext.login import LoginManager, login_user, UserMixin, \
login_required, logout_user, current_user
from wtforms import Form, TextField, PasswordField, validators
# simpleldap is way more convenient than python-ldap
import simpleldap
# initialize the Flask app
app = Flask(__name__)
# initialize the login manager
login_manager = LoginManager()
login_manager.init_app(app)
ldapsrv = '173.36.129.204'
basedn = 'ou=active,ou=employees,ou=people,o=cisco.com'
def ldap_fetch(uid=None, name=None, passwd=None):
try:
if name is not None and passwd is not None:
l = simpleldap.Connection(ldapsrv,
dn='uid={0},{1}'.format(name, basedn), password=passwd)
r = l.search('uid={0}'.format(name), base_dn=basedn)
else:
l = simpleldap.Connection(ldapsrv)
r = l.search('uidNumber={0}'.format(uid), base_dn=basedn)
return {
'name': r[0]['uid'][0],
'id': unicode(r[0]['uidNumber'][0]),
'gid': int(r[0]['gidNumber'][0])
}
except:
return None
class User(UserMixin):
def __init__(self, uid=None, name=None, passwd=None):
self.active = False
ldapres = ldap_fetch(uid=uid, name=name, passwd=passwd)
if ldapres is not None:
self.name = ldapres['name']
self.id = ldapres['id']
# assume that a disabled user belongs to group 404
if ldapres['gid'] != 404:
self.active = True
self.gid = ldapres['gid']
def is_active(self):
return self.active
def get_id(self):
return self.id
@login_manager.user_loader
def load_user(userid):
return User(uid=userid)
class LoginForm(Form):
username = TextField("Username", [validators.Length(min=2, max=25)])
password = PasswordField('Password', [validators.Required()])
@app.route("/", methods=["GET", "POST"])
def login():
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
user = User(name=form.username.data, passwd=form.password.data)
if user.active is not False:
login_user(user)
flash("Logged in successfully.")
return redirect(url_for("some_secret_page"))
return render_template("login.html", form=form)
@app.route("/logout", methods=["GET", "POST"])
@login_required
def logout():
logout_user()
return redirect(url_for("login"))
| {
"repo_name": "karthik339/Agni",
"path": "MainDemo/venv/ldap.py",
"copies": "1",
"size": "2510",
"license": "apache-2.0",
"hash": 912021227792200600,
"line_mean": 28.1860465116,
"line_max": 72,
"alpha_frac": 0.6155378486,
"autogenerated": false,
"ratio": 3.6324167872648334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47479546358648334,
"avg_score": null,
"num_lines": null
} |
"""Add flat position column
Revision ID: d21933db9ad8
Revises: 8155b83242eb
Create Date: 2021-02-08 16:26:37.190842
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as types
import json
class StringyJSON(types.TypeDecorator):
"""
Stores and retrieves JSON as TEXT for SQLite.
From
https://avacariu.me/articles/2016/compiling-json-as-text-for-sqlite-with-sqlalchemy.
.. note ::
The associated field is immutable. That is, changes to the data
(typically, changing the value of a dict field) will not trigger an
update on the SQL side upon ``commit`` as the reference to the object
will not have been updated. One should force the update by forcing an
update of the reference (by performing a ``copy`` operation on the dict
for instance).
"""
impl = types.TEXT
def process_bind_param(self, value, dialect):
"""
Process the bound param, serialize the object to JSON before saving
into database.
"""
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
"""
Process the value fetched from the database, deserialize the JSON
string before returning the object.
"""
if value is not None:
value = json.loads(value)
return value
# TypeEngine.with_variant says "use StringyJSON instead when
# connecting to 'sqlite'"
# pylint: disable=locally-disabled,invalid-name
MagicJSON = types.JSON().with_variant(StringyJSON, "sqlite")
# revision identifiers, used by Alembic.
revision = "d21933db9ad8"
down_revision = "8155b83242eb"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("flats", sa.Column("flatisfy_position", MagicJSON, default=False))
def downgrade():
op.drop_column("flats", "flatisfy_position")
| {
"repo_name": "Phyks/Flatisfy",
"path": "migrations/versions/d21933db9ad8_add_flat_position_column.py",
"copies": "1",
"size": "1921",
"license": "mit",
"hash": 280822919124026430,
"line_mean": 26.8405797101,
"line_max": 88,
"alpha_frac": 0.6767308693,
"autogenerated": false,
"ratio": 3.888663967611336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5065394836911336,
"avg_score": null,
"num_lines": null
} |
"""add-flexfield-table
Revision ID: c239f4080b5c
Revises: a97dabbd44f4
Create Date: 2016-11-15 11:40:19.663751
"""
# revision identifiers, used by Alembic.
revision = 'c239f4080b5c'
down_revision = 'a97dabbd44f4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('flex_field',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('flex_field_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('header', sa.Text(), nullable=True),
sa.Column('cell', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('flex_field_id')
)
op.create_index(op.f('ix_flex_field_job_id'), 'flex_field', ['job_id'], unique=False)
op.create_index(op.f('ix_flex_field_submission_id'), 'flex_field', ['submission_id'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_flex_field_submission_id'), table_name='flex_field')
op.drop_index(op.f('ix_flex_field_job_id'), table_name='flex_field')
op.drop_table('flex_field')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c239f4080b5c_add_flexfield_table.py",
"copies": "1",
"size": "1629",
"license": "cc0-1.0",
"hash": 7003241656159850000,
"line_mean": 29.1666666667,
"line_max": 103,
"alpha_frac": 0.6697360344,
"autogenerated": false,
"ratio": 3.150870406189555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.919113513597329,
"avg_score": 0.025894260923253226,
"num_lines": 54
} |
"""add flow archive flag
Revision ID: 2cc5046d4fae
Revises: 36a730c5724c
Create Date: 2015-11-03 11:14:36.852502
"""
# revision identifiers, used by Alembic.
revision = '2cc5046d4fae'
down_revision = '36a730c5724c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('updated_by_id_fkey', 'accepted_domains', 'users', ['updated_by_id'], ['id'])
op.create_foreign_key('created_by_id_fkey', 'accepted_domains', 'users', ['created_by_id'], ['id'])
op.add_column('flow', sa.Column('is_archived', sa.Boolean(), nullable=False, server_default=sa.text("false")))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('flow', 'is_archived')
op.drop_constraint('created_by_id_fkey', 'accepted_domains', type_='foreignkey')
op.drop_constraint('updated_by_id_fkey', 'accepted_domains', type_='foreignkey')
### end Alembic commands ###
| {
"repo_name": "codeforamerica/pittsburgh-purchasing-suite",
"path": "migrations/versions/2cc5046d4fae_add_flow_archive_flag.py",
"copies": "2",
"size": "1031",
"license": "bsd-3-clause",
"hash": 2380703107156381000,
"line_mean": 33.3666666667,
"line_max": 114,
"alpha_frac": 0.6818622696,
"autogenerated": false,
"ratio": 3.1625766871165646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9806362819831285,
"avg_score": 0.007615227377056023,
"num_lines": 30
} |
"""add follow relation
Revision ID: ed1f2e57555c
Revises: cd35d0723824
Create Date: 2016-08-20 12:33:36.891000
"""
# revision identifiers, used by Alembic.
revision = 'ed1f2e57555c'
down_revision = 'cd35d0723824'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.alter_column(u'users', 'activated',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column(u'users', 'activated',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
op.drop_table('follows')
### end Alembic commands ###
| {
"repo_name": "zlasd/flaskr_exercise",
"path": "migrations/versions/ed1f2e57555c_add_follow_relation.py",
"copies": "1",
"size": "1313",
"license": "mit",
"hash": -6200963548993626000,
"line_mean": 31.0243902439,
"line_max": 63,
"alpha_frac": 0.6481340442,
"autogenerated": false,
"ratio": 3.5972602739726027,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47453943181726027,
"avg_score": null,
"num_lines": null
} |
"""Add foreign key constraints
Revision ID: 457bbf802239
Revises: e912ea8b3cb1
Create Date: 2016-01-25 23:04:37.492418
"""
# revision identifiers, used by Alembic.
revision = '457bbf802239'
down_revision = 'e912ea8b3cb1'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_foreign_key(
constraint_name='audio_reports_report_id_fkey',
source_table='audio_reports', referent_table='reports',
local_cols=['report_id'], remote_cols=['id'], ondelete='CASCADE')
op.create_foreign_key(
constraint_name='location_reports_report_id_fkey',
source_table='location_reports', referent_table='reports',
local_cols=['report_id'], remote_cols=['id'], ondelete='CASCADE')
op.create_foreign_key(
constraint_name='placemark_reports_location_report_id_fkey',
source_table='placemark_reports', referent_table='location_reports',
local_cols=['location_report_id'], remote_cols=['id'],
ondelete='CASCADE')
op.create_foreign_key(
constraint_name='weather_reports_report_id_fkey',
source_table='weather_reports', referent_table='reports',
local_cols=['report_id'], remote_cols=['id'], ondelete='CASCADE')
op.create_foreign_key(
constraint_name='responses_report_id_fkey',
source_table='responses', referent_table='reports',
local_cols=['report_id'], remote_cols=['id'], ondelete='CASCADE')
def downgrade():
with op.batch_alter_table('audio_reports') as batch_op:
batch_op.drop_constraint('audio_reports_report_id_fkey')
with op.batch_alter_table('location_reports') as batch_op:
batch_op.drop_constraint('location_reports_report_id_fkey')
with op.batch_alter_table('placemark_reports') as batch_op:
batch_op.drop_constraint('placemark_reports_location_report_id_fkey')
with op.batch_alter_table('weather_reports') as batch_op:
batch_op.drop_constraint('weather_reports_report_id_fkey')
with op.batch_alter_table('responses') as batch_op:
batch_op.drop_constraint('responses_report_id_fkey')
| {
"repo_name": "thejunglejane/datums",
"path": "datums/migrations/versions/457bbf802239_add_foreign_key_constraints.py",
"copies": "1",
"size": "2149",
"license": "mit",
"hash": -412883407662309000,
"line_mean": 34.2295081967,
"line_max": 77,
"alpha_frac": 0.6826430898,
"autogenerated": false,
"ratio": 3.4439102564102564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46265533462102565,
"avg_score": null,
"num_lines": null
} |
"""add foreign keys
Revision ID: 4ec1ca506d1f
Revises: f26e5c5e347
Create Date: 2015-03-19 11:34:16.695193
"""
# revision identifiers, used by Alembic.
revision = '4ec1ca506d1f'
down_revision = '1eb29cd1981'
from alembic import op
def upgrade():
op.create_foreign_key(name="fk_vmmaster_log_step_to_session",
source="vmmaster_log_steps",
referent="sessions",
local_cols=["session_id"],
remote_cols=["id"],
ondelete='CASCADE')
op.create_foreign_key(name="fk_session_log_step_to_vmmaster_log_step",
source="session_log_steps",
referent="vmmaster_log_steps",
local_cols=["vmmaster_log_step_id"],
remote_cols=["id"],
ondelete='CASCADE')
def downgrade():
op.drop_constraint(name="fk_vmmaster_log_step_to_session",
table_name="vmmaster_log_steps",
type_="foreignkey")
op.drop_constraint(name="fk_session_log_step_to_vmmaster_log_step",
table_name="session_log_steps",
type_="foreignkey")
| {
"repo_name": "2gis/vmmaster",
"path": "migrations/alembic/versions/4ec1ca506d1f_add_foreign_keys.py",
"copies": "1",
"size": "1257",
"license": "mit",
"hash": -2065848418342795800,
"line_mean": 32.972972973,
"line_max": 74,
"alpha_frac": 0.5250596659,
"autogenerated": false,
"ratio": 3.774774774774775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4799834440674775,
"avg_score": null,
"num_lines": null
} |
"""Add formation
Revision ID: 3bc5fc4758
Revises: 17034142d9b
Create Date: 2015-07-20 22:57:54.221307
"""
# revision identifiers, used by Alembic.
revision = '3bc5fc4758'
down_revision = '17034142d9b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('formation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('office_code', sa.String(), nullable=False),
sa.Column('uic', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('soldier_formation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('formation_id', sa.Integer(), nullable=False),
sa.Column('soldier_id', sa.Integer(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.ForeignKeyConstraint(['formation_id'], ['formation.id'], ),
sa.ForeignKeyConstraint(['soldier_id'], ['soldier.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('soldier_formation')
op.drop_table('formation')
### end Alembic commands ###
| {
"repo_name": "jschaf/ibolc",
"path": "migrations/versions/3bc5fc4758_add_formation.py",
"copies": "2",
"size": "1328",
"license": "bsd-3-clause",
"hash": -4771614496317590000,
"line_mean": 29.8837209302,
"line_max": 66,
"alpha_frac": 0.6626506024,
"autogenerated": false,
"ratio": 3.3877551020408165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5050405704440817,
"avg_score": null,
"num_lines": null
} |
"""Add framework and framework status fields
Revision ID: 70_add_framework_status
Revises: 60_archive_current_services
Create Date: 2015-06-16 09:18:05.389816
"""
# revision identifiers, used by Alembic.
revision = '70_add_framework_status'
down_revision = '60_archive_current_services'
from alembic import op
import sqlalchemy as sa
def upgrade():
# add framework field
framework_enum = sa.Enum('gcloud', name='framework_enum')
framework_enum.create(op.get_bind())
framework_column = sa.Column('framework', framework_enum,
nullable=False, index=True, server_default='gcloud')
op.add_column('frameworks', framework_column)
# remove framework default
op.alter_column('frameworks', 'framework', server_default=None)
# add status column
status_enum = sa.Enum(
'pending', 'live', 'expired',
name='framework_status_enum')
status_enum.create(op.get_bind())
status_column = sa.Column('status', status_enum,
nullable=False, index=True, server_default='pending')
op.add_column('frameworks', status_column)
op.execute("UPDATE frameworks SET status = 'expired' WHERE expired = TRUE;")
op.execute("UPDATE frameworks SET status = 'live' WHERE expired = FALSE;")
def downgrade():
op.drop_column('frameworks', 'framework')
op.drop_column('frameworks', 'status')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/70_add_framework_and_status.py",
"copies": "3",
"size": "1399",
"license": "mit",
"hash": -5308162077476205000,
"line_mean": 33.1219512195,
"line_max": 85,
"alpha_frac": 0.6740528949,
"autogenerated": false,
"ratio": 3.801630434782609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029517872056117962,
"num_lines": 41
} |
"""add framework lifecycle dates
Revision ID: 1140
Revises: 1130
Create Date: 2018-04-24 16:12:57.035498
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1140'
down_revision = '1130'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('frameworks', sa.Column('applications_close_at_utc', sa.DateTime(), nullable=True))
op.add_column('frameworks', sa.Column('clarifications_close_at_utc', sa.DateTime(), nullable=True))
op.add_column('frameworks', sa.Column('clarifications_publish_at_utc', sa.DateTime(), nullable=True))
op.add_column('frameworks', sa.Column('framework_expires_at_utc', sa.DateTime(), nullable=True))
op.add_column('frameworks', sa.Column('framework_live_at_utc', sa.DateTime(), nullable=True))
op.add_column('frameworks', sa.Column('intention_to_award_at_utc', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('frameworks', 'intention_to_award_at_utc')
op.drop_column('frameworks', 'framework_live_at_utc')
op.drop_column('frameworks', 'framework_expires_at_utc')
op.drop_column('frameworks', 'clarifications_publish_at_utc')
op.drop_column('frameworks', 'clarifications_close_at_utc')
op.drop_column('frameworks', 'applications_close_at_utc')
# ### end Alembic commands ###
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/1140_add_framework_lifecycle_dates.py",
"copies": "1",
"size": "1468",
"license": "mit",
"hash": -6641100721981109000,
"line_mean": 37.6315789474,
"line_max": 105,
"alpha_frac": 0.6975476839,
"autogenerated": false,
"ratio": 3.3747126436781607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9429740261045148,
"avg_score": 0.028504013306602433,
"num_lines": 38
} |
"""Add framework lot constraint to services
Revision ID: 540
Revises: 530
Create Date: 2016-01-26 14:22:07.624062
"""
# revision identifiers, used by Alembic.
revision = '540'
down_revision = '530'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_foreign_key('archived_services_framework_id_fkey1', 'archived_services', 'framework_lots', ['framework_id', 'lot_id'], ['framework_id', 'lot_id'])
op.create_foreign_key('draft_services_framework_id_fkey1', 'draft_services', 'framework_lots', ['framework_id', 'lot_id'], ['framework_id', 'lot_id'])
op.create_foreign_key('services_framework_id_fkey1', 'services', 'framework_lots', ['framework_id', 'lot_id'], ['framework_id', 'lot_id'])
def downgrade():
op.drop_constraint('services_framework_id_fkey1', 'services', type_='foreignkey')
op.drop_constraint('draft_services_framework_id_fkey1', 'draft_services', type_='foreignkey')
op.drop_constraint('archived_services_framework_id_fkey1', 'archived_services', type_='foreignkey')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/540_add_framework_lot_constraint_to_services.py",
"copies": "1",
"size": "1032",
"license": "mit",
"hash": -9182355531036815000,
"line_mean": 38.6923076923,
"line_max": 160,
"alpha_frac": 0.7073643411,
"autogenerated": false,
"ratio": 3.2049689440993787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4412333285199379,
"avg_score": null,
"num_lines": null
} |
"""add frec_code to submission model
Revision ID: c42d328ef2fa
Revises: 4d8408c33fee
Create Date: 2017-07-10 13:16:56.855163
"""
# revision identifiers, used by Alembic.
revision = 'c42d328ef2fa'
down_revision = '4d8408c33fee'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('submission', sa.Column('frec_code', sa.Text(), nullable=True))
op.add_column('frec', sa.Column('cgac_code', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
op.execute("DELETE FROM submission "
"WHERE cgac_code IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.drop_column('frec', 'cgac_code')
op.drop_column('submission', 'frec_code')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c42d328ef2fa_add_frec_code_to_submission_model.py",
"copies": "1",
"size": "1050",
"license": "cc0-1.0",
"hash": -1435241034188922600,
"line_mean": 22.8636363636,
"line_max": 81,
"alpha_frac": 0.6714285714,
"autogenerated": false,
"ratio": 3.29153605015674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9323619945746451,
"avg_score": 0.02786893516205793,
"num_lines": 44
} |
"""add FREC to user model
Revision ID: 4d8408c33fee
Revises: da2e50d423ff
Create Date: 2017-07-06 13:19:01.155328
"""
# revision identifiers, used by Alembic.
revision = '4d8408c33fee'
down_revision = 'da2e50d423ff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_affiliation', sa.Column('frec_id', sa.Integer(), nullable=True))
op.add_column('user_affiliation', sa.Column('user_affiliation_id', sa.Integer(), nullable=False, primary_key=True))
op.create_index(op.f('ix_user_affiliation_cgac_id'), 'user_affiliation', ['cgac_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_frec_id'), 'user_affiliation', ['frec_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_user_id'), 'user_affiliation', ['user_id'], unique=False)
op.create_foreign_key('user_affiliation_frec_fk', 'user_affiliation', 'frec', ['frec_id'], ['frec_id'], ondelete='CASCADE')
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_affiliation_id'])
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
def downgrade_data_broker():
op.execute("DELETE FROM user_affiliation "
"WHERE cgac_id IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_id', 'cgac_id'])
op.drop_constraint('user_affiliation_frec_fk', 'user_affiliation', type_='foreignkey')
op.drop_index(op.f('ix_user_affiliation_user_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_frec_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_cgac_id'), table_name='user_affiliation')
op.drop_column('user_affiliation', 'user_affiliation_id')
op.drop_column('user_affiliation', 'frec_id')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/4d8408c33fee_add_frec_to_user_model.py",
"copies": "1",
"size": "2519",
"license": "cc0-1.0",
"hash": -880519483324606100,
"line_mean": 39.6290322581,
"line_max": 127,
"alpha_frac": 0.6756649464,
"autogenerated": false,
"ratio": 3.1606022584692597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9217499937182847,
"avg_score": 0.023753453537282667,
"num_lines": 62
} |
"""add friend invites table
Revision ID: 553bdd8a749e
Revises: 6f74c797dbd0
Create Date: 2017-10-20 10:06:36.730512
"""
# revision identifiers, used by Alembic.
revision = '553bdd8a749e'
down_revision = '6f74c797dbd0'
branch_labels = None
depends_on = None
import datetime
from alembic import op
import sqlalchemy as sa
utc_now = sa.text("(now() at time zone 'utc')")
def upgrade(engine_name):
print("Upgrading {}".format(engine_name))
# your upgrade script goes here
op.execute(sa.schema.CreateSequence(sa.Sequence('ck_friend_invites_id_seq')))
op.create_table(
'ck_friend_invites',
sa.Column('id', sa.BigInteger, sa.Sequence('ck_friend_invites_id_seq'), primary_key=True, server_default=sa.text("nextval('ck_friend_invites_id_seq'::regclass)")),
sa.Column('issued_by_player_id', sa.Integer, sa.ForeignKey('ck_players.player_id'), nullable=False, index=True),
sa.Column('token', sa.String(50), nullable=False, index=True),
sa.Column('expiry_date', sa.DateTime, nullable=False),
sa.Column('deleted', sa.Boolean, nullable=True, default=False),
sa.Column('create_date', sa.DateTime, nullable=False, server_default=utc_now),
sa.Column('modify_date', sa.DateTime, nullable=False, server_default=utc_now, onupdate=datetime.datetime.utcnow),
)
sql = "GRANT INSERT, SELECT, UPDATE, DELETE ON TABLE ck_friend_invites to zzp_user;"
op.execute(sql)
sql = "GRANT ALL ON SEQUENCE ck_friend_invites_id_seq TO zzp_user;"
op.execute(sql)
def downgrade(engine_name):
print("Downgrading {}".format(engine_name))
# your downgrade script goes here
op.drop_table('ck_friend_invites')
op.execute(sa.schema.DropSequence(sa.Sequence('ck_friend_invites_id_seq')))
| {
"repo_name": "dgnorth/drift-base",
"path": "alembic/versions/553bdd8a749e_add_friendship_tokens_table.py",
"copies": "1",
"size": "1767",
"license": "mit",
"hash": -8503179800919376000,
"line_mean": 35.8125,
"line_max": 171,
"alpha_frac": 0.6977928693,
"autogenerated": false,
"ratio": 3.1109154929577465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9287370300275772,
"avg_score": 0.004267612396394984,
"num_lines": 48
} |
"""add friendships table
Revision ID: 553bdd8a749f
Revises: 553bdd8a749e
Create Date: 2018-04-19 14:24:33.050913
"""
# revision identifiers, used by Alembic.
revision = '553bdd8a749f'
down_revision = '553bdd8a749e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
utc_now = sa.text("(now() at time zone 'utc')")
def upgrade(engine_name):
print "Upgrading {}".format(engine_name)
# your upgrade script goes here
op.create_index('ix_ck_clients_ip_address', 'ck_clients', ['ip_address'])
op.create_index('ix_ck_clients_user_id', 'ck_clients', ['user_id'])
op.create_index('ix_ck_clients_player_id', 'ck_clients', ['player_id'])
op.create_index('ix_ck_clients_build', 'ck_clients', ['build'])
op.create_index('ix_ck_clients_identity_id', 'ck_clients', ['identity_id'])
def downgrade(engine_name):
print "Downgrading {}".format(engine_name)
op.drop_index('ix_ck_clients_ip_address')
op.drop_index('ix_ck_clients_user_id')
op.drop_index('ix_ck_clients_player_id')
op.drop_index('ix_ck_clients_build')
op.drop_index('ix_ck_clients_identity_id')
| {
"repo_name": "dgnorth/drift-base",
"path": "alembic/versions/553bdd8a749f_add_clients_indices.py",
"copies": "1",
"size": "1137",
"license": "mit",
"hash": -3139868574635064300,
"line_mean": 28.9210526316,
"line_max": 79,
"alpha_frac": 0.6860158311,
"autogenerated": false,
"ratio": 2.9005102040816326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40865260351816324,
"avg_score": null,
"num_lines": null
} |
"""add friendships table
Revision ID: 6f74c797dbd0
Revises: b43f41a4f76
Create Date: 2017-10-16 14:24:33.050913
"""
# revision identifiers, used by Alembic.
revision = '6f74c797dbd0'
down_revision = 'b43f41a4f76'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import datetime
utc_now = sa.text("(now() at time zone 'utc')")
def upgrade(engine_name):
print "Upgrading {}".format(engine_name)
# your upgrade script goes here
op.execute(sa.schema.CreateSequence(sa.Sequence('ck_friendships_id_seq')))
op.create_table(
'ck_friendships',
sa.Column('id', sa.BigInteger, sa.Sequence('ck_friendships_id_seq'), primary_key=True, server_default=sa.text("nextval('ck_friendships_id_seq'::regclass)")),
sa.Column('player1_id', sa.Integer, sa.ForeignKey('ck_players.player_id'), nullable=False, index=True),
sa.Column('player2_id', sa.Integer, sa.ForeignKey('ck_players.player_id'), nullable=False, index=True),
sa.Column('create_date', sa.DateTime, nullable=False, server_default=utc_now),
sa.Column('modify_date', sa.DateTime, nullable=False, server_default=utc_now, onupdate=datetime.datetime.utcnow),
sa.Column('status', sa.String(20), nullable=False, server_default="active"),
sa.CheckConstraint('player1_id < player2_id'),
)
sql = "GRANT INSERT, SELECT, UPDATE, DELETE ON TABLE ck_friendships to zzp_user;"
op.execute(sql)
sql = "GRANT ALL ON SEQUENCE ck_friendships_id_seq TO zzp_user;"
op.execute(sql)
def downgrade(engine_name):
print "Downgrading {}".format(engine_name)
# your downgrade script goes here
op.drop_table('ck_friendships')
op.execute(sa.schema.DropSequence(sa.Sequence('ck_friendships_id_seq')))
| {
"repo_name": "dgnorth/drift-base",
"path": "alembic/versions/6f74c797dbd0_add_friendships_table.py",
"copies": "1",
"size": "1772",
"license": "mit",
"hash": 6065413897428494000,
"line_mean": 36.7021276596,
"line_max": 165,
"alpha_frac": 0.6997742664,
"autogenerated": false,
"ratio": 3.1307420494699647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43305163158699644,
"avg_score": null,
"num_lines": null
} |
"""Add from_developer also to the ActiveMessageTranslation
Revision ID: 2943b8eaa095
Revises: 435d360d3398
Create Date: 2015-05-04 18:59:24.113378
"""
# revision identifiers, used by Alembic.
revision = '2943b8eaa095'
down_revision = '435d360d3398'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
active_translation = db.Table('ActiveTranslationMessages', metadata,
db.Column('id', db.Integer()),
db.Column('bundle_id', db.Integer),
db.Column('taken_from_default', db.Boolean),
db.Column('from_developer', db.Boolean),
db.Column('key', db.Unicode(255)),
db.Column('value', db.UnicodeText()),
)
bundle = db.Table('TranslationBundles', metadata,
db.Column('id', db.Integer()),
db.Column('from_developer', db.Boolean),
db.Column('translation_url_id', db.Integer),
db.Column('language', db.Unicode(20)),
db.Column('target', db.Unicode(20)),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('ActiveTranslationMessages', sa.Column('from_developer', sa.Boolean(), nullable=True))
op.create_index(u'ix_ActiveTranslationMessages_from_developer', 'ActiveTranslationMessages', ['from_developer'], unique=False)
### end Alembic commands ###
update_stmt = active_translation.update().where(active_translation.c.value == None).values(value = "")
with app.app_context():
db.session.execute(update_stmt)
active_translations = {
# bundle_id : {
# key : value,
# }
}
# 15256
with app.app_context():
for key, value, bundle_id in db.session.execute(sql.select([active_translation.c.key, active_translation.c.value, active_translation.c.bundle_id])):
if bundle_id not in active_translations:
active_translations[bundle_id] = {}
active_translations[bundle_id][key] = value
default_bundle_ids = {
# translation_url_id : bundle_id # pointing to English
}
for bundle_id, translation_url_id, target in db.session.execute(sql.select([bundle.c.id, bundle.c.translation_url_id, bundle.c.target], bundle.c.language == 'en_ALL')):
default_bundle_ids[translation_url_id] = bundle_id
active_translation_query = sql.select([active_translation.c.id, active_translation.c.taken_from_default, bundle.c.from_developer, bundle.c.translation_url_id, bundle.c.id, active_translation.c.key, active_translation.c.value], active_translation.c.bundle_id == bundle.c.id, use_labels = True)
for row in db.session.execute(active_translation_query):
active_msg_id = row[active_translation.c.id]
from_default = row[active_translation.c.taken_from_default]
bundle_from_developer = row[bundle.c.from_developer]
bundle_id = row[bundle.c.id]
translation_url_id = row[bundle.c.translation_url_id]
current_value = row[active_translation.c.value]
current_key = row[active_translation.c.key]
default_bundle_id = default_bundle_ids.get(translation_url_id)
default_value = active_translations.get(default_bundle_id, {}).get(current_key)
if bundle_id == default_bundle_id:
from_developer = True
else:
if bundle_from_developer and not from_default:
if default_value is not None and current_value != default_value and current_value:
from_developer = True
else:
from_developer = False
from_default = True
else:
from_developer = False
update_stmt = active_translation.update().where(active_translation.c.id == active_msg_id).values(from_developer = from_developer, taken_from_default = from_default)
db.session.execute(update_stmt)
db.session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_ActiveTranslationMessages_from_developer', table_name='ActiveTranslationMessages')
op.drop_column('ActiveTranslationMessages', 'from_developer')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/2943b8eaa095_add_from_developer_also_to_the_.py",
"copies": "3",
"size": "4346",
"license": "bsd-2-clause",
"hash": 5796492947327480000,
"line_mean": 41.1941747573,
"line_max": 300,
"alpha_frac": 0.6502531063,
"autogenerated": false,
"ratio": 3.7857142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5935967392014285,
"avg_score": null,
"num_lines": null
} |
"""add_fsrs_indices
Revision ID: dcbb5afa125e
Revises: 1ae491ca0925
Create Date: 2016-09-08 14:43:04.547230
"""
# revision identifiers, used by Alembic.
revision = 'dcbb5afa125e'
down_revision = '1ae491ca0925'
branch_labels = None
depends_on = None
from alembic import op
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_fsrs_grant_fain'), 'fsrs_grant', ['fain'], unique=False)
op.create_index(op.f('ix_fsrs_procurement_contract_number'), 'fsrs_procurement', ['contract_number'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_fsrs_procurement_contract_number'), table_name='fsrs_procurement')
op.drop_index(op.f('ix_fsrs_grant_fain'), table_name='fsrs_grant')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/dcbb5afa125e_add_fsrs_indices.py",
"copies": "2",
"size": "1053",
"license": "cc0-1.0",
"hash": -2052657953942973400,
"line_mean": 24.6829268293,
"line_max": 119,
"alpha_frac": 0.6866096866,
"autogenerated": false,
"ratio": 3.0879765395894427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4774586226189443,
"avg_score": null,
"num_lines": null
} |
"""add_fsrs_tables
Revision ID: caa6895e7450
Revises: b0445ef35b9a
Create Date: 2016-08-26 00:44:49.773600
"""
# revision identifiers, used by Alembic.
revision = 'caa6895e7450'
down_revision = 'b0445ef35b9a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('fsrs_grant',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('duns', sa.String(), nullable=True),
sa.Column('dba_name', sa.String(), nullable=True),
sa.Column('principle_place_city', sa.String(), nullable=True),
sa.Column('principle_place_street', sa.String(), nullable=True),
sa.Column('principle_place_state', sa.String(), nullable=True),
sa.Column('principle_place_country', sa.String(), nullable=True),
sa.Column('principle_place_zip', sa.String(), nullable=True),
sa.Column('principle_place_district', sa.String(), nullable=True),
sa.Column('parent_duns', sa.String(), nullable=True),
sa.Column('funding_agency_id', sa.String(), nullable=True),
sa.Column('funding_agency_name', sa.String(), nullable=True),
sa.Column('top_paid_fullname_1', sa.String(), nullable=True),
sa.Column('top_paid_amount_1', sa.String(), nullable=True),
sa.Column('top_paid_fullname_2', sa.String(), nullable=True),
sa.Column('top_paid_amount_2', sa.String(), nullable=True),
sa.Column('top_paid_fullname_3', sa.String(), nullable=True),
sa.Column('top_paid_amount_3', sa.String(), nullable=True),
sa.Column('top_paid_fullname_4', sa.String(), nullable=True),
sa.Column('top_paid_amount_4', sa.String(), nullable=True),
sa.Column('top_paid_fullname_5', sa.String(), nullable=True),
sa.Column('top_paid_amount_5', sa.String(), nullable=True),
sa.Column('dunsplus4', sa.String(), nullable=True),
sa.Column('awardee_name', sa.String(), nullable=True),
sa.Column('awardee_address_city', sa.String(), nullable=True),
sa.Column('awardee_address_street', sa.String(), nullable=True),
sa.Column('awardee_address_state', sa.String(), nullable=True),
sa.Column('awardee_address_country', sa.String(), nullable=True),
sa.Column('awardee_address_zip', sa.String(), nullable=True),
sa.Column('awardee_address_district', sa.String(), nullable=True),
sa.Column('cfda_numbers', sa.String(), nullable=True),
sa.Column('project_description', sa.String(), nullable=True),
sa.Column('compensation_q1', sa.Boolean(), nullable=True),
sa.Column('compensation_q2', sa.Boolean(), nullable=True),
sa.Column('internal_id', sa.String(), nullable=True),
sa.Column('date_submitted', sa.DateTime(), nullable=True),
sa.Column('report_period_mon', sa.String(), nullable=True),
sa.Column('report_period_year', sa.String(), nullable=True),
sa.Column('fain', sa.String(), nullable=True),
sa.Column('total_fed_funding_amount', sa.String(), nullable=True),
sa.Column('obligation_date', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('fsrs_procurement',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('duns', sa.String(), nullable=True),
sa.Column('dba_name', sa.String(), nullable=True),
sa.Column('principle_place_city', sa.String(), nullable=True),
sa.Column('principle_place_street', sa.String(), nullable=True),
sa.Column('principle_place_state', sa.String(), nullable=True),
sa.Column('principle_place_country', sa.String(), nullable=True),
sa.Column('principle_place_zip', sa.String(), nullable=True),
sa.Column('principle_place_district', sa.String(), nullable=True),
sa.Column('parent_duns', sa.String(), nullable=True),
sa.Column('funding_agency_id', sa.String(), nullable=True),
sa.Column('funding_agency_name', sa.String(), nullable=True),
sa.Column('top_paid_fullname_1', sa.String(), nullable=True),
sa.Column('top_paid_amount_1', sa.String(), nullable=True),
sa.Column('top_paid_fullname_2', sa.String(), nullable=True),
sa.Column('top_paid_amount_2', sa.String(), nullable=True),
sa.Column('top_paid_fullname_3', sa.String(), nullable=True),
sa.Column('top_paid_amount_3', sa.String(), nullable=True),
sa.Column('top_paid_fullname_4', sa.String(), nullable=True),
sa.Column('top_paid_amount_4', sa.String(), nullable=True),
sa.Column('top_paid_fullname_5', sa.String(), nullable=True),
sa.Column('top_paid_amount_5', sa.String(), nullable=True),
sa.Column('company_name', sa.String(), nullable=True),
sa.Column('bus_types', sa.String(), nullable=True),
sa.Column('company_address_city', sa.String(), nullable=True),
sa.Column('company_address_street', sa.String(), nullable=True),
sa.Column('company_address_state', sa.String(), nullable=True),
sa.Column('company_address_country', sa.String(), nullable=True),
sa.Column('company_address_zip', sa.String(), nullable=True),
sa.Column('company_address_district', sa.String(), nullable=True),
sa.Column('parent_company_name', sa.String(), nullable=True),
sa.Column('naics', sa.String(), nullable=True),
sa.Column('funding_office_id', sa.String(), nullable=True),
sa.Column('funding_office_name', sa.String(), nullable=True),
sa.Column('recovery_model_q1', sa.Boolean(), nullable=True),
sa.Column('recovery_model_q2', sa.Boolean(), nullable=True),
sa.Column('internal_id', sa.String(), nullable=True),
sa.Column('date_submitted', sa.DateTime(), nullable=True),
sa.Column('report_period_mon', sa.String(), nullable=True),
sa.Column('report_period_year', sa.String(), nullable=True),
sa.Column('contract_number', sa.String(), nullable=True),
sa.Column('idv_reference_number', sa.String(), nullable=True),
sa.Column('report_type', sa.String(), nullable=True),
sa.Column('contract_agency_code', sa.String(), nullable=True),
sa.Column('contract_idv_agency_code', sa.String(), nullable=True),
sa.Column('contracting_office_aid', sa.String(), nullable=True),
sa.Column('contracting_office_aname', sa.String(), nullable=True),
sa.Column('contracting_office_id', sa.String(), nullable=True),
sa.Column('contracting_office_name', sa.String(), nullable=True),
sa.Column('treasury_symbol', sa.String(), nullable=True),
sa.Column('dollar_obligated', sa.String(), nullable=True),
sa.Column('date_signed', sa.Date(), nullable=True),
sa.Column('transaction_type', sa.String(), nullable=True),
sa.Column('program_title', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('fsrs_subcontract',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('duns', sa.String(), nullable=True),
sa.Column('dba_name', sa.String(), nullable=True),
sa.Column('principle_place_city', sa.String(), nullable=True),
sa.Column('principle_place_street', sa.String(), nullable=True),
sa.Column('principle_place_state', sa.String(), nullable=True),
sa.Column('principle_place_country', sa.String(), nullable=True),
sa.Column('principle_place_zip', sa.String(), nullable=True),
sa.Column('principle_place_district', sa.String(), nullable=True),
sa.Column('parent_duns', sa.String(), nullable=True),
sa.Column('funding_agency_id', sa.String(), nullable=True),
sa.Column('funding_agency_name', sa.String(), nullable=True),
sa.Column('top_paid_fullname_1', sa.String(), nullable=True),
sa.Column('top_paid_amount_1', sa.String(), nullable=True),
sa.Column('top_paid_fullname_2', sa.String(), nullable=True),
sa.Column('top_paid_amount_2', sa.String(), nullable=True),
sa.Column('top_paid_fullname_3', sa.String(), nullable=True),
sa.Column('top_paid_amount_3', sa.String(), nullable=True),
sa.Column('top_paid_fullname_4', sa.String(), nullable=True),
sa.Column('top_paid_amount_4', sa.String(), nullable=True),
sa.Column('top_paid_fullname_5', sa.String(), nullable=True),
sa.Column('top_paid_amount_5', sa.String(), nullable=True),
sa.Column('company_name', sa.String(), nullable=True),
sa.Column('bus_types', sa.String(), nullable=True),
sa.Column('company_address_city', sa.String(), nullable=True),
sa.Column('company_address_street', sa.String(), nullable=True),
sa.Column('company_address_state', sa.String(), nullable=True),
sa.Column('company_address_country', sa.String(), nullable=True),
sa.Column('company_address_zip', sa.String(), nullable=True),
sa.Column('company_address_district', sa.String(), nullable=True),
sa.Column('parent_company_name', sa.String(), nullable=True),
sa.Column('naics', sa.String(), nullable=True),
sa.Column('funding_office_id', sa.String(), nullable=True),
sa.Column('funding_office_name', sa.String(), nullable=True),
sa.Column('recovery_model_q1', sa.Boolean(), nullable=True),
sa.Column('recovery_model_q2', sa.Boolean(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('subcontract_amount', sa.String(), nullable=True),
sa.Column('subcontract_date', sa.Date(), nullable=True),
sa.Column('subcontract_num', sa.String(), nullable=True),
sa.Column('overall_description', sa.Text(), nullable=True),
sa.Column('recovery_subcontract_amt', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['fsrs_procurement.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('fsrs_subgrant',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('duns', sa.String(), nullable=True),
sa.Column('dba_name', sa.String(), nullable=True),
sa.Column('principle_place_city', sa.String(), nullable=True),
sa.Column('principle_place_street', sa.String(), nullable=True),
sa.Column('principle_place_state', sa.String(), nullable=True),
sa.Column('principle_place_country', sa.String(), nullable=True),
sa.Column('principle_place_zip', sa.String(), nullable=True),
sa.Column('principle_place_district', sa.String(), nullable=True),
sa.Column('parent_duns', sa.String(), nullable=True),
sa.Column('funding_agency_id', sa.String(), nullable=True),
sa.Column('funding_agency_name', sa.String(), nullable=True),
sa.Column('top_paid_fullname_1', sa.String(), nullable=True),
sa.Column('top_paid_amount_1', sa.String(), nullable=True),
sa.Column('top_paid_fullname_2', sa.String(), nullable=True),
sa.Column('top_paid_amount_2', sa.String(), nullable=True),
sa.Column('top_paid_fullname_3', sa.String(), nullable=True),
sa.Column('top_paid_amount_3', sa.String(), nullable=True),
sa.Column('top_paid_fullname_4', sa.String(), nullable=True),
sa.Column('top_paid_amount_4', sa.String(), nullable=True),
sa.Column('top_paid_fullname_5', sa.String(), nullable=True),
sa.Column('top_paid_amount_5', sa.String(), nullable=True),
sa.Column('dunsplus4', sa.String(), nullable=True),
sa.Column('awardee_name', sa.String(), nullable=True),
sa.Column('awardee_address_city', sa.String(), nullable=True),
sa.Column('awardee_address_street', sa.String(), nullable=True),
sa.Column('awardee_address_state', sa.String(), nullable=True),
sa.Column('awardee_address_country', sa.String(), nullable=True),
sa.Column('awardee_address_zip', sa.String(), nullable=True),
sa.Column('awardee_address_district', sa.String(), nullable=True),
sa.Column('cfda_numbers', sa.String(), nullable=True),
sa.Column('project_description', sa.String(), nullable=True),
sa.Column('compensation_q1', sa.Boolean(), nullable=True),
sa.Column('compensation_q2', sa.Boolean(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('subaward_amount', sa.String(), nullable=True),
sa.Column('subaward_date', sa.Date(), nullable=True),
sa.Column('subaward_num', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['fsrs_grant.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('fsrs_subgrant')
op.drop_table('fsrs_subcontract')
op.drop_table('fsrs_procurement')
op.drop_table('fsrs_grant')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/caa6895e7450_add_fsrs_tables.py",
"copies": "2",
"size": "12994",
"license": "cc0-1.0",
"hash": -3911777001794261000,
"line_mean": 53.3682008368,
"line_max": 88,
"alpha_frac": 0.673464676,
"autogenerated": false,
"ratio": 3.268930817610063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9853067388611877,
"avg_score": 0.017865620999637143,
"num_lines": 239
} |
"""Add full menu to gallery
Limitations:
- Currently only supports sorting albums by name in normal order (can not be reversed).
"""
import operator
import os
from sigal import signals
def full_tree(gallery):
"""full menu tree"""
sorted_tree = sorted(gallery.albums.items(), key=operator.itemgetter(0))
gallery.full_tree = dict()
for name, album in sorted_tree:
if name == '.':
continue
ancestors = album.path.split('/')[:-1]
current_ancestor = gallery.full_tree
for ancestor in ancestors:
current_ancestor = current_ancestor[ancestor]['subalbums']
current_ancestor[album.name] = {
'self': album,
'subalbums': dict()
}
def path_to_root(album):
"""url path back to gallery root"""
path_to_root = os.path.relpath('.', album.path)
if path_to_root == '.':
path_to_root = ''
else:
path_to_root += '/'
album.path_to_root = path_to_root
def path_from_root(album):
"""url from gallery root"""
album.path_from_root = album.path
def register(settings):
signals.gallery_initialized.connect(full_tree)
signals.album_initialized.connect(path_to_root)
signals.album_initialized.connect(path_from_root)
| {
"repo_name": "153957/153957-theme",
"path": "153957_theme/full_menu.py",
"copies": "1",
"size": "1274",
"license": "mit",
"hash": 3894610338065509400,
"line_mean": 22.1636363636,
"line_max": 87,
"alpha_frac": 0.6248037677,
"autogenerated": false,
"ratio": 3.692753623188406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9816524333037167,
"avg_score": 0.00020661157024793388,
"num_lines": 55
} |
"""Add Full Text Search
Revision ID: 488e3dae5a17
Revises: 34fa673d7905
Create Date: 2015-12-07 10:49:44.124657
"""
# revision identifiers, used by Alembic.
revision = '488e3dae5a17'
down_revision = '34fa673d7905'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from sqlalchemy_searchable import sync_trigger
def upgrade():
### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
op.drop_table('sections')
op.add_column('notes', sa.Column('search_vector', sqlalchemy_utils.types.ts_vector.TSVectorType(), nullable=True))
op.create_index('ix_notes_search_vector', 'notes', ['search_vector'], unique=False, postgresql_using='gin')
### end Alembic commands ###
sync_trigger(conn, 'notes', 'search_vector', ['title', 'body'])
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_notes_search_vector', table_name='notes')
op.drop_column('notes', 'search_vector')
op.create_table('sections',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('title', sa.VARCHAR(length=200), autoincrement=False, nullable=True),
sa.Column('notebook_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['notebook_id'], [u'notebooks.id'], name=u'sections_notebook_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'sections_pkey')
)
### end Alembic commands ###
| {
"repo_name": "iamgroot42/braindump",
"path": "migrations/versions/488e3dae5a17_add_full_text_search.py",
"copies": "1",
"size": "1437",
"license": "mit",
"hash": -157090821099469920,
"line_mean": 36.8157894737,
"line_max": 118,
"alpha_frac": 0.6972860125,
"autogenerated": false,
"ratio": 3.3811764705882354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45784624830882353,
"avg_score": null,
"num_lines": null
} |
"""add fulltext view
Revision ID: 21c0f59b5214
Revises: 37b62c1b2866
Create Date: 2015-06-09 14:29:23.154330
"""
# revision identifiers, used by Alembic.
revision = '21c0f59b5214'
down_revision = '37b62c1b2866'
from alembic import op
import sqlalchemy as sa
from purchasing.data.models import TRIGGER_TUPLES
index_set = [
'tsv_contract_description',
'tsv_company_name',
'tsv_detail_value',
'tsv_line_item_description'
]
def upgrade():
# grab a connection to the database
conn = op.get_bind()
# create the materialized view
conn.execute(sa.sql.text('''
CREATE MATERIALIZED VIEW search_view AS (
SELECT
c.id::VARCHAR || contract_property.id::VARCHAR || line_item.id::VARCHAR || company.id::VARCHAR AS id,
c.id AS contract_id,
company.id AS company_id,
c.expiration_date, c.financial_id,
c.description AS contract_description,
to_tsvector(c.description) AS tsv_contract_description,
company.company_name AS company_name,
to_tsvector(company.company_name) AS tsv_company_name,
contract_property.key AS detail_key,
contract_property.value AS detail_value,
to_tsvector(contract_property.value) AS tsv_detail_value,
line_item.description AS line_item_description,
to_tsvector(line_item.description) AS tsv_line_item_description
FROM contract c
LEFT OUTER JOIN contract_property ON c.id = contract_property.contract_id
LEFT OUTER JOIN line_item ON c.id = line_item.contract_id
LEFT OUTER JOIN company_contract_association ON c.id = company_contract_association.contract_id
LEFT OUTER JOIN company ON company.id = company_contract_association.company_id
)
'''))
# create unique index on ids
op.create_index(op.f('ix_search_view_id'), 'search_view', ['id'], unique=True)
# create remaining indices on the tsv columns
for index in index_set:
op.create_index(op.f(
'ix_tsv_{}'.format(index)), 'search_view', [index], postgresql_using='gin'
)
# for triggers, we need to build a new function which runs our refresh materialized view
conn.execute(sa.sql.text('''
CREATE OR REPLACE FUNCTION trig_refresh_search_view() RETURNS trigger AS
$$
BEGIN
REFRESH MATERIALIZED VIEW CONCURRENTLY search_view;
RETURN NULL;
END;
$$
LANGUAGE plpgsql ;
'''))
for table, column, _ in TRIGGER_TUPLES:
conn.execute(sa.sql.text('''
DROP TRIGGER IF EXISTS tsv_{table}_{column}_trigger ON {table}
'''.format(table=table, column=column)))
conn.execute(sa.sql.text('''
CREATE TRIGGER tsv_{table}_{column}_trigger AFTER TRUNCATE OR INSERT OR DELETE OR UPDATE OF {column}
ON {table} FOR EACH STATEMENT
EXECUTE PROCEDURE trig_refresh_search_view()
'''.format(table=table, column=column)))
### end Alembic commands ###
def downgrade():
# grab a connection to the database
conn = op.get_bind()
# drop the materialized view
conn.execute(sa.sql.text('''
DROP MATERIALIZED VIEW search_view
'''))
for table, column, _ in TRIGGER_TUPLES:
conn.execute(sa.sql.text('''
DROP TRIGGER IF EXISTS tsv_{table}_{column}_trigger ON {table}
'''.format(table=table, column=column)))
### end Alembic commands ###
| {
"repo_name": "CityofPittsburgh/pittsburgh-purchasing-suite",
"path": "migrations/versions/21c0f59b5214_add_fulltext_view.py",
"copies": "3",
"size": "3574",
"license": "bsd-3-clause",
"hash": 6128369421355660000,
"line_mean": 36.6210526316,
"line_max": 117,
"alpha_frac": 0.6253497482,
"autogenerated": false,
"ratio": 3.770042194092827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034540982408915845,
"num_lines": 95
} |
"""Add functions in this module usually provide no setter for the lamp's 3D
rotation, because one usually implicitly sets the rotation by pointing the
light to an object (and specifying an up vector), by using
:func:`point_light_to`.
"""
from os.path import basename
import numpy as np
from ..imprt import preset_import
bpy = preset_import('bpy')
Vector = preset_import('Vector')
from ..log import get_logger
logger = get_logger()
def point_light_to(light, target):
"""Points the directional light to a target.
Args:
light (bpy_types.Object): Light object.
target (tuple(float)): Target location to which light rays point.
"""
target = Vector(target)
# Point it to target
direction = target - light.location
# Find quaternion that rotates lamp facing direction '-Z',
# so that it aligns with 'direction'
# This rotation is not unique because the rotated lamp can still rotate
# about direction vector
# Specifying 'Y' gives the rotation quaternion with lamp's 'Y' pointing up
rot_quat = direction.to_track_quat('-Z', 'Y')
light.rotation_euler = rot_quat.to_euler()
logger.info("Lamp '%s' points to %s now", light.name, target)
def add_light_sun(xyz=(0, 0, 0), rot_vec_rad=(0, 0, 0), name=None,
energy=1, size=0.1):
"""Adds a sun lamp that emits parallel light rays.
Args:
xyz (tuple(float), optional): Location only used to compute light ray
direction.
rot_vec_rad (tuple(float), optional): Rotations in radians around x,
y and z.
name (str, optional): Light name.
energy (float, optional): Light intensity.
size (float, optional): Light size for ray shadow tracing. Use larger
for softer shadows.
Returns:
bpy_types.Object: Light added.
"""
bpy.ops.object.lamp_add(
type='SUN', location=xyz, rotation=rot_vec_rad)
sun = bpy.context.active_object
if name is not None:
sun.name = name
sun.data.shadow_soft_size = size # larger means softer shadows
# Strength
engine = bpy.context.scene.render.engine
if engine == 'CYCLES':
sun.data.node_tree.nodes['Emission'].inputs[
'Strength'].default_value = energy
else:
raise NotImplementedError(engine)
logger.info("Sun lamp (parallel light) added")
return sun
def add_light_area(xyz=(0, 0, 0), rot_vec_rad=(0, 0, 0), name=None,
energy=100, size=0.1):
"""Adds an area light that emits light rays the lambertian way.
Args:
xyz (tuple(float), optional): Location.
rot_vec_rad (tuple(float), optional): Rotations in radians around x,
y and z.
name (str, optional): Light name.
energy (float, optional): Light intensity.
size (float, optional): Light size for ray shadow tracing.
Use larger values for softer shadows.
Returns:
bpy_types.Object: Light added.
"""
if (np.abs(rot_vec_rad) > 2 * np.pi).any():
logger.warning(
("Some input value falls outside [-2pi, 2pi]. "
"Sure inputs are in radians?"))
bpy.ops.object.lamp_add(type='AREA', location=xyz, rotation=rot_vec_rad)
area = bpy.context.active_object
if name is not None:
area.name = name
area.data.size = size # larger means softer shadows
# Strength
engine = bpy.context.scene.render.engine
if engine == 'CYCLES':
area.data.node_tree.nodes['Emission'].inputs[
'Strength'].default_value = energy
else:
raise NotImplementedError(engine)
logger.info("Area light added")
return area
def add_light_point(
xyz=(0, 0, 0), name=None, size=0, color=(1, 1, 1), energy=100):
"""Adds an omnidirectional point lamp.
Args:
xyz (tuple(float), optional): Location.
name (str, optional): Light name.
size (float, optional): Light size; the larger the softer shadows are.
color (tuple(float), optional): Color of the light.
energy (float, optional): Light intensity.
Returns:
bpy_types.Object: Light added.
"""
bpy.ops.object.lamp_add(type='POINT', location=xyz)
point = bpy.context.active_object
if name is not None:
point.name = name
if len(color) == 3:
color += (1.,)
point.data.shadow_soft_size = size
# Strength
engine = bpy.context.scene.render.engine
if engine == 'CYCLES':
point.data.node_tree.nodes['Emission'].inputs[
'Strength'].default_value = energy
point.data.node_tree.nodes['Emission'].inputs[
'Color'].default_value = color
else:
raise NotImplementedError(engine)
logger.info("Omnidirectional point light added")
return point
def add_light_spot(xyz=(0, 0, 0), name=None, energy=100, shadow_soft_size=0.1,
spot_size=0.785, spot_blend=0.15):
"""Adds a spotlight lamp.
Args:
xyz (tuple(float), optional): Location.
name (str, optional): Light name.
energy (float, optional): Light intensity.
shadow_soft_size (float, optional): Light size for raytracing the
shadow.
spot_size (float, optional): Angle, in radians, of the spotlight beam.
spot_blend (float, optional): Softness of the spotlight edge.
Returns:
bpy_types.Object: Light added.
"""
bpy.ops.object.lamp_add(type='SPOT', location=xyz)
spot = bpy.context.active_object
if name is not None:
spot.name = name
# Strength
engine = bpy.context.scene.render.engine
if engine == 'CYCLES':
spot.data.node_tree.nodes['Emission'].inputs[
'Strength'].default_value = energy
else:
raise NotImplementedError(engine)
spot.data.shadow_soft_size = shadow_soft_size
# Spot shape
spot.data.spot_size = spot_size
spot.data.spot_blend = spot_blend
logger.info("Spotlight lamp added")
return spot
def add_light_env(env=(1, 1, 1, 1), strength=1, rot_vec_rad=(0, 0, 0),
scale=(1, 1, 1)):
r"""Adds environment lighting.
Args:
env (tuple(float) or str, optional): Environment map. If tuple,
it's RGB or RGBA, each element of which :math:`\in [0,1]`.
Otherwise, it's the path to an image.
strength (float, optional): Light intensity.
rot_vec_rad (tuple(float), optional): Rotations in radians around x,
y and z.
scale (tuple(float), optional): If all changed simultaneously,
then no effects.
"""
engine = bpy.context.scene.render.engine
assert engine == 'CYCLES', "Rendering engine is not Cycles"
if isinstance(env, str):
bpy.data.images.load(env, check_existing=True)
env = bpy.data.images[basename(env)]
else:
if len(env) == 3:
env += (1,)
assert len(env) == 4, "If tuple, env must be of length 3 or 4"
world = bpy.context.scene.world
world.use_nodes = True
node_tree = world.node_tree
nodes = node_tree.nodes
links = node_tree.links
bg_node = nodes.new('ShaderNodeBackground')
links.new(bg_node.outputs['Background'],
nodes['World Output'].inputs['Surface'])
if isinstance(env, tuple):
# Color
bg_node.inputs['Color'].default_value = env
logger.warning(("Environment is pure color, "
"so rotation and scale have no effect"))
else:
# Environment map
texcoord_node = nodes.new('ShaderNodeTexCoord')
env_node = nodes.new('ShaderNodeTexEnvironment')
env_node.image = env
mapping_node = nodes.new('ShaderNodeMapping')
mapping_node.rotation = rot_vec_rad
mapping_node.scale = scale
links.new(texcoord_node.outputs['Generated'],
mapping_node.inputs['Vector'])
links.new(mapping_node.outputs['Vector'], env_node.inputs['Vector'])
links.new(env_node.outputs['Color'], bg_node.inputs['Color'])
bg_node.inputs['Strength'].default_value = strength
logger.info("Environment light added")
| {
"repo_name": "google/neural-light-transport",
"path": "third_party/xiuminglib/xiuminglib/blender/light.py",
"copies": "1",
"size": "8194",
"license": "apache-2.0",
"hash": 5003744135845470000,
"line_mean": 31.0078125,
"line_max": 78,
"alpha_frac": 0.6206980718,
"autogenerated": false,
"ratio": 3.7466849565615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48673830283615,
"avg_score": null,
"num_lines": null
} |
"""add gain table
Revision ID: 40dae38fbfe8
Revises: 37edda381f28
Create Date: 2014-11-01 20:09:35.925118
"""
# revision identifiers, used by Alembic.
revision = '40dae38fbfe8'
down_revision = '37edda381f28'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('meas_GainHistoryTable',
sa.Column('id',sa.INTEGER, primary_key=True),
sa.Column('hash',sa.String(32)),
sa.Column('create_date', sa.DateTime),
sa.Column('user_id', sa.INTEGER, sa.ForeignKey('gen_UserTable.id')),
mysql_engine='InnoDB')
op.create_table('meas_GainTable',
sa.Column('id',sa.INTEGER, primary_key=True),
sa.Column('value',sa.Float(32)),
sa.Column('history_id', sa.INTEGER,
sa.ForeignKey('meas_GainHistoryTable.id')),
sa.Column('detector_id',sa.INTEGER,
sa.ForeignKey('gen_DetectorTable.id')),
mysql_engine='InnoDB')
op.add_column('meas_AnalysisTable',
sa.Column('gain_history_id',
sa.INTEGER,
sa.ForeignKey('meas_GainHistoryTable.id')))
def downgrade():
try:
op.drop_constraint('meas_analysistable_ibfk_9',
'meas_AnalysisTable','foreignkey')
except:
pass
op.drop_column('meas_AnalysisTable', 'gain_history_id')
op.drop_table('meas_GainTable')
op.drop_table('meas_GainHistoryTable')
| {
"repo_name": "USGSDenverPychron/pychron",
"path": "migration/versions/40dae38fbfe8_add_gain_table.py",
"copies": "1",
"size": "1595",
"license": "apache-2.0",
"hash": -224191695119438370,
"line_mean": 31.5510204082,
"line_max": 88,
"alpha_frac": 0.5542319749,
"autogenerated": false,
"ratio": 3.8433734939759034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48976054688759035,
"avg_score": null,
"num_lines": null
} |
"""Add game and player context to events
Usage:
munerator [options] context
Options:
-v --verbose Verbose logging
--events-socket url ZMQ socket for raw events [default: tcp://127.0.0.1:9001]
--context-socket url ZMQ socket for context events [default: tcp://0.0.0.0:9002]
--rcon-socket url ZMQ socket for rcon commands [default: tcp://127.0.0.1:9005]
"""
from docopt import docopt
import zmq
import logging
import collections
import json
import time
import dateutil.parser
log = logging.getLogger(__name__)
team_id_map = ['', 'blue', 'red', 'spectator']
deduplicate = collections.deque(maxlen=5)
def get_dict_value_from_key_if_key_value(data, value_key, query_key, query_value):
item = [v for k, v in data.items() if v.get(query_key) == query_value]
if item:
return item[0].get(value_key)
class GameContext(object):
def __init__(self, in_socket, out_socket, rcon_socket):
self.gameinfo = {}
self.clients = {}
self.in_socket = in_socket
self.out_socket = out_socket
self.rcon_socket = rcon_socket
def eventstream(self):
# prime commands for current game info
self.rcon_socket.send_string('status')
self.rcon_socket.send_string('getstatus')
while True:
data = self.in_socket.recv_json()
log.debug(' in: %s' % data)
if str(data) in deduplicate:
log.debug('skip')
continue
deduplicate.append(str(data))
contexted_data = self.handle_event(data)
self.out_socket.send_string(
"%s %s" % (contexted_data.get('kind'), json.dumps(contexted_data)))
def handle_event(self, data):
kind = data.get('kind')
client_id = data.get('client_id')
# for some events we need to translate player_name into client_id
if not client_id and kind in ['say', 'killer', 'killed']:
if data.get('player_name') == '<world>':
player_name = data.get('killed_name')
else:
player_name = data.get('player_name')
client_id = get_dict_value_from_key_if_key_value(
self.clients, 'client_id', 'name', player_name)
data['client_id'] = client_id
if client_id and client_id not in self.clients:
self.clients[client_id] = {}
if kind in ['initgame', 'getstatus']:
self.gameinfo = {
'mapname': data.get('mapname'),
'gametype': data.get('gametype'),
'timestamp': data.get('timestamp'),
'fraglimit': data.get('fraglimit'),
'start': dateutil.parser.parse(data.get('timestamp')).strftime('%s'),
'current': True
}
if kind == 'initgame':
self.gameinfo.update({
'num_players': 0,
'stop': ''
})
self.clients = {}
elif kind in ['clientuserinfochanged', 'clientstatus']:
log.debug('setting client info: %s' % client_id)
self.clients[client_id].update({
'name': data.get('player_name'),
'id': data.get('guid'),
'guid': data.get('guid'),
'client_id': client_id,
'team_id': data.get('team_id'),
'skill': data.get('skill'),
'team': team_id_map[int(data.get('team_id', 0))],
'score': 0,
'online': True,
'bot': False,
})
if data.get('skill') or data.get('address') == 'bot':
self.clients[client_id]['bot'] = True
self.gameinfo['num_players'] = len(self.clients)
elif kind == 'dumpuser':
self.clients[client_id].update(data)
elif kind == 'playerscore':
log.debug('setting client score: %s' % client_id)
self.clients[client_id]['score'] = data.get('score')
elif kind == 'clientdisconnect':
if client_id in self.clients:
self.clients[client_id]['online'] = False
if kind in ['clientstatus', 'clientconnect']:
# on clientstatus also request dumpuser
self.rcon_socket.send_string('dumpuser %s' % client_id)
data['game_info'] = self.gameinfo
data['client_info'] = self.clients.get(client_id, {})
if kind == 'clientdisconnect':
try:
del self.clients[client_id]
except KeyError:
pass
elif kind == 'shutdowngame':
# add stop time
self.gameinfo['stop'] = str(int(time.time()))
self.gameinfo['current'] = False
# reset current context
self.gameinfo = {}
self.clients = {}
log.debug(' out: %s' % data)
return data
def main(argv):
args = docopt(__doc__, argv=argv)
context = zmq.Context()
in_socket = context.socket(zmq.PULL)
in_socket.bind(args['--events-socket'])
out_socket = context.socket(zmq.PUB)
out_socket.bind(args['--context-socket'])
# setup rcon socket
rcon_socket = context.socket(zmq.PUSH)
rcon_socket.connect(args['--rcon-socket'])
gc = GameContext(in_socket, out_socket, rcon_socket)
gc.eventstream()
| {
"repo_name": "aequitas/munerator",
"path": "munerator/context.py",
"copies": "1",
"size": "5383",
"license": "mit",
"hash": -1535824262468400600,
"line_mean": 31.6242424242,
"line_max": 85,
"alpha_frac": 0.5422626788,
"autogenerated": false,
"ratio": 3.7643356643356642,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9804433329457152,
"avg_score": 0.0004330027357025745,
"num_lines": 165
} |
add_game_times = lambda count: '''
insert into game_played_time (author_id, name, duration)
values {}
on conflict (author_id, name)
do update set duration = game_played_time.duration + EXCLUDED.duration
where game_played_time.author_id = EXCLUDED.author_id
and game_played_time.name = EXCLUDED.name
'''.format(','.join(['%s'] * count))
author_games = '''
select id, author_id, name, duration, created_at
from game_played_time
where author_id = %(author_id)s
order by duration desc
'''
top_games = '''
select name, sum(duration), count(author_id)
from game_played_time
where author_id in %(authors_id)s
group by name
order by sum(duration) desc
limit %(limit)s
'''
top_users = '''
select author_id, duration
from game_played_time
where name = %(name)s
order by duration desc
limit %(limit)s
'''
| {
"repo_name": "best-coloc-ever/globibot",
"path": "bot/plugins/stats/queries.py",
"copies": "1",
"size": "1060",
"license": "mit",
"hash": 2606553420464164000,
"line_mean": 32.125,
"line_max": 76,
"alpha_frac": 0.5396226415,
"autogenerated": false,
"ratio": 3.7062937062937062,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9741452062079421,
"avg_score": 0.0008928571428571428,
"num_lines": 32
} |
"""add gateways table
Revision ID: 33f748cdc2f
Revises: 956b1aac1
Create Date: 2014-03-22 23:28:27.605724
"""
# revision identifiers, used by Alembic.
revision = '33f748cdc2f'
down_revision = '956b1aac1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('gateways',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('isp_name', sa.String(length=32), nullable=False),
sa.Column('isp_url', sa.String(), nullable=False),
sa.Column('country', sa.String(length=2), nullable=False),
sa.Column('token', sa.String(length=32), nullable=False),
sa.Column('ipv4', sa.String(), nullable=True),
sa.Column('ipv6', sa.String(), nullable=True),
sa.Column('bps', sa.BigInteger(), nullable=True),
sa.Column('enabled', sa.Boolean(), server_default='true', nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('gateways')
### end Alembic commands ###
| {
"repo_name": "CCrypto/ccvpn",
"path": "alembic/versions/33f748cdc2f_add_gateways_table.py",
"copies": "2",
"size": "1179",
"license": "mit",
"hash": 2198781639251720200,
"line_mean": 30.0263157895,
"line_max": 78,
"alpha_frac": 0.6700593723,
"autogenerated": false,
"ratio": 3.3211267605633803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9941399954844036,
"avg_score": 0.009957235603869054,
"num_lines": 38
} |
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extattrib as xa
#
# The attribute parameters
#
xa.params = {
'Inputs': ['Input'],
'ZSampMargin' : {'Value': [-5,5], 'Hidden': True, 'Symmetric': True},
'Par_0' : {'Name': 'S/N Ratio', 'Value': 1},
'Parallel' : True,
'Help' : 'http://waynegm.github.io/OpendTect-Plugin-Docs/external_attributes/Add_Noise.html'
}
#
# Define the compute function
#
def doCompute():
#
# Initialise some constants from the attribute parameters
#
zw = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
#
# This is the trace processing loop
#
while True:
xa.doInput()
data = xa.Input['Input'][0,0,:]
#
# Compute noise
#
vardata = np.var(data)
noise = np.random.randn(data.shape[-1])
varnoise = np.var(noise)
scale = vardata/(varnoise*xa.params['Par_0']['Value'])
#
# Output
#
xa.Output = data + scale*noise
xa.doOutput()
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| {
"repo_name": "waynegm/OpendTect-External-Attributes",
"path": "Python_3/Miscellaneous/ex_addnoise.py",
"copies": "2",
"size": "1603",
"license": "mit",
"hash": 899343510564055700,
"line_mean": 22.9253731343,
"line_max": 99,
"alpha_frac": 0.6874610106,
"autogenerated": false,
"ratio": 2.8024475524475525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489908563047552,
"avg_score": null,
"num_lines": null
} |
"""Add gem date of import.
Revision ID: 848be944cda0
Revises: 0190195e45e0
Create Date: 2020-08-28 09:01:29.442539
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '848be944cda0'
down_revision = '0190195e45e0'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('gem_date_of_import', sa.DateTime(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('gem_date_of_import', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'gem_date_of_import')
op.drop_column('genomic_set_member_history', 'gem_date_of_import')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/848be944cda0_add_gem_date_of_import.py",
"copies": "1",
"size": "1321",
"license": "bsd-3-clause",
"hash": 9047747878295444000,
"line_mean": 25.42,
"line_max": 110,
"alpha_frac": 0.6684330053,
"autogenerated": false,
"ratio": 3.2617283950617284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44301614003617285,
"avg_score": null,
"num_lines": null
} |
"""Add Gem Pass to genomic set member
Revision ID: 67710db9e2e1
Revises: df221cca2299
Create Date: 2020-03-06 11:19:17.165251
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '67710db9e2e1'
down_revision = 'df221cca2299'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('gem_pass', sa.String(length=10), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('gem_pass', sa.String(length=10), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'gem_pass')
op.drop_column('genomic_set_member_history', 'gem_pass')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/67710db9e2e1_add_gem_pass_to_genomic_set_member.py",
"copies": "1",
"size": "1307",
"license": "bsd-3-clause",
"hash": -2146856419958424800,
"line_mean": 24.6274509804,
"line_max": 107,
"alpha_frac": 0.6679418516,
"autogenerated": false,
"ratio": 3.2839195979899496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9346774112510868,
"avg_score": 0.02101746741581623,
"num_lines": 51
} |
"""add gem ptsc run id to genomic_set_member
Revision ID: 8cda4ff4eba7
Revises: 67710db9e2e1
Create Date: 2020-03-11 08:49:20.970167
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8cda4ff4eba7'
down_revision = '67710db9e2e1'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('gem_ptsc_sent_job_run_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['gem_ptsc_sent_job_run_id'], ['id'])
op.add_column('genomic_set_member_history', sa.Column('gem_ptsc_sent_job_run_id', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_column('genomic_set_member', 'gem_ptsc_sent_job_run_id')
op.drop_column('genomic_set_member_history', 'gem_ptsc_sent_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/8cda4ff4eba7_add_gem_ptsc_run_id_to_genomic_set_.py",
"copies": "1",
"size": "1543",
"license": "bsd-3-clause",
"hash": 4676863545415982000,
"line_mean": 28.6730769231,
"line_max": 115,
"alpha_frac": 0.6701231367,
"autogenerated": false,
"ratio": 3.031434184675835,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9102817998498581,
"avg_score": 0.01974786457545078,
"num_lines": 52
} |
"""add gender_identity to view
Revision ID: 629f994cc809
Revises: fdc0fb9ca67a
Create Date: 2019-06-06 13:13:57.319280
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "629f994cc809"
down_revision = "fdc0fb9ca67a"
branch_labels = None
depends_on = None
from rdr_service.dao.alembic_utils import ReplaceableObject
PARTICIPANT_VIEW = ReplaceableObject(
"participant_view",
"""
SELECT
p.participant_id,
p.sign_up_time,
p.withdrawal_status,
p.withdrawal_time,
p.suspension_status,
p.suspension_time,
hpo.name hpo,
ps.zip_code,
state_code.value state,
recontact_method_code.value recontact_method,
language_code.value language,
TIMESTAMPDIFF(YEAR, ps.date_of_birth, CURDATE()) age_years,
gender_code.value gender,
ps.gender_identity,
sex_code.value sex,
sexual_orientation_code.value sexual_orientation,
education_code.value education,
income_code.value income,
ps.enrollment_status,
ps.race,
ps.physical_measurements_status,
ps.physical_measurements_finalized_time,
ps.physical_measurements_time,
ps.physical_measurements_created_site_id,
ps.physical_measurements_finalized_site_id,
ps.consent_for_study_enrollment,
ps.consent_for_study_enrollment_time,
ps.consent_for_electronic_health_records,
ps.consent_for_electronic_health_records_time,
ps.questionnaire_on_overall_health,
ps.questionnaire_on_overall_health_time,
ps.questionnaire_on_lifestyle,
ps.questionnaire_on_lifestyle_time,
ps.questionnaire_on_the_basics,
ps.questionnaire_on_the_basics_time,
ps.questionnaire_on_healthcare_access,
ps.questionnaire_on_healthcare_access_time,
ps.questionnaire_on_medical_history,
ps.questionnaire_on_medical_history_time,
ps.questionnaire_on_medications,
ps.questionnaire_on_medications_time,
ps.questionnaire_on_family_health,
ps.questionnaire_on_family_health_time,
ps.biospecimen_status,
ps.biospecimen_order_time,
ps.biospecimen_source_site_id,
ps.biospecimen_collected_site_id,
ps.biospecimen_processed_site_id,
ps.biospecimen_finalized_site_id,
ps.sample_order_status_1sst8,
ps.sample_order_status_1sst8_time,
ps.sample_order_status_1pst8,
ps.sample_order_status_1pst8_time,
ps.sample_order_status_1hep4,
ps.sample_order_status_1hep4_time,
ps.sample_order_status_1ed04,
ps.sample_order_status_1ed04_time,
ps.sample_order_status_1ed10,
ps.sample_order_status_1ed10_time,
ps.sample_order_status_2ed10,
ps.sample_order_status_2ed10_time,
ps.sample_order_status_1ur10,
ps.sample_order_status_1ur10_time,
ps.sample_order_status_1sal,
ps.sample_order_status_1sal_time,
ps.sample_order_status_1sal2,
ps.sample_order_status_1sal2_time,
ps.sample_order_status_1cfd9,
ps.sample_order_status_1cfd9_time,
ps.sample_order_status_1pxr2,
ps.sample_order_status_1pxr2_time,
ps.sample_status_1sst8,
ps.sample_status_1sst8_time,
ps.sample_status_1pst8,
ps.sample_status_1pst8_time,
ps.sample_status_1hep4,
ps.sample_status_1hep4_time,
ps.sample_status_1ed04,
ps.sample_status_1ed04_time,
ps.sample_status_1ed10,
ps.sample_status_1ed10_time,
ps.sample_status_2ed10,
ps.sample_status_2ed10_time,
ps.sample_status_1ur10,
ps.sample_status_1ur10_time,
ps.sample_status_1sal,
ps.sample_status_1sal_time,
ps.sample_status_1sal2,
ps.sample_status_1sal2_time,
ps.sample_status_1cfd9,
ps.sample_status_1cfd9_time,
ps.sample_status_1pxr2,
ps.sample_status_1pxr2_time,
ps.num_completed_baseline_ppi_modules,
ps.num_completed_ppi_modules,
ps.num_baseline_samples_arrived,
ps.samples_to_isolate_dna,
ps.consent_for_cabor,
ps.consent_for_cabor_time,
(SELECT IFNULL(GROUP_CONCAT(
IF(ac.value = 'WhatRaceEthnicity_RaceEthnicityNoneOfThese',
'NoneOfThese',
TRIM(LEADING 'WhatRaceEthnicity_' FROM
TRIM(LEADING 'PMI_' FROM ac.value)))),
'None')
FROM questionnaire_response qr, questionnaire_response_answer qra,
questionnaire_question qq, code c, code ac
WHERE qra.end_time IS NULL AND
qr.questionnaire_response_id = qra.questionnaire_response_id AND
qra.question_id = qq.questionnaire_question_id AND
qq.code_id = c.code_id AND c.value = 'Race_WhatRaceEthnicity' AND
qr.participant_id = p.participant_id AND
qra.value_code_id = ac.code_id AND
ac.value != 'WhatRaceEthnicity_Hispanic'
) race_codes,
(SELECT COUNT(ac.value)
FROM questionnaire_response qr, questionnaire_response_answer qra,
questionnaire_question qq, code c, code ac
WHERE qra.end_time IS NULL AND
qr.questionnaire_response_id = qra.questionnaire_response_id AND
qra.question_id = qq.questionnaire_question_id AND
qq.code_id = c.code_id AND c.value = 'Race_WhatRaceEthnicity' AND
qr.participant_id = p.participant_id AND
qra.value_code_id = ac.code_id AND
ac.value = 'WhatRaceEthnicity_Hispanic'
) hispanic
FROM
participant p
LEFT OUTER JOIN hpo ON p.hpo_id = hpo.hpo_id
LEFT OUTER JOIN participant_summary ps ON p.participant_id = ps.participant_id
LEFT OUTER JOIN code state_code ON ps.state_id = state_code.code_id
LEFT OUTER JOIN code recontact_method_code ON ps.recontact_method_id = recontact_method_code.code_id
LEFT OUTER JOIN code language_code ON ps.language_id = language_code.code_id
LEFT OUTER JOIN code gender_code ON ps.gender_identity_id = gender_code.code_id
LEFT OUTER JOIN code sex_code ON ps.sex_id = sex_code.code_id
LEFT OUTER JOIN code sexual_orientation_code ON ps.sexual_orientation_id = sexual_orientation_code.code_id
LEFT OUTER JOIN code education_code ON ps.education_id = education_code.code_id
LEFT OUTER JOIN code income_code ON ps.income_id = income_code.code_id
WHERE (ps.email IS NULL OR ps.email NOT LIKE '%@example.com') AND
(hpo.name IS NULL OR hpo.name != 'TEST')
AND p.is_ghost_id IS NOT TRUE
""",
)
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.replace_view(PARTICIPANT_VIEW, replaces="fdc0fb9ca67a.PARTICIPANT_VIEW")
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.replace_view(PARTICIPANT_VIEW, replace_with="fdc0fb9ca67a.PARTICIPANT_VIEW")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/629f994cc809_add_gender_identity_to_view.py",
"copies": "1",
"size": "6897",
"license": "bsd-3-clause",
"hash": -281458404064564300,
"line_mean": 34.0101522843,
"line_max": 111,
"alpha_frac": 0.7107438017,
"autogenerated": false,
"ratio": 2.9336452573373033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41443890590373034,
"avg_score": null,
"num_lines": null
} |
"""Add general_ledger_post_date to award_financial tables
Revision ID: e5b90e0b2ff8
Revises: 7dd3f4b007e5
Create Date: 2019-05-09 13:16:09.898165
"""
# revision identifiers, used by Alembic.
revision = 'e5b90e0b2ff8'
down_revision = '7dd3f4b007e5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial', sa.Column('general_ledger_post_date', sa.Date(), nullable=True))
op.add_column('certified_award_financial', sa.Column('general_ledger_post_date', sa.Date(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('certified_award_financial', 'general_ledger_post_date')
op.drop_column('award_financial', 'general_ledger_post_date')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/e5b90e0b2ff8_add_general_ledger_post_date_to_award_.py",
"copies": "1",
"size": "1108",
"license": "cc0-1.0",
"hash": 8334897830454335000,
"line_mean": 25.380952381,
"line_max": 111,
"alpha_frac": 0.6976534296,
"autogenerated": false,
"ratio": 3.138810198300283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9200640478189513,
"avg_score": 0.027164629942153993,
"num_lines": 42
} |
"""Add genomes to an existing clinical report.
Example usages:
* Adding male proband genome (id = 1) to a solo or panel report (clinical_report_id = 100)
python add_genome_to_cr.py --p 1 --sex m 100
* Adding female proband (id =1), mother (id = 2) and father genome (id = 3) to a family or panel
trio report (clinical_report_id = 100)
python add_genome_to_cr.py --p 1 --sex f --f 2 --m 3 1000
If sibling genome is specified, then sibling sex and affected status must also be specified.
Sample console output:
Adding genome(s) to report...
Clinical Report Info:
id: 3340
test_type: panel
accession_id: PGX15-010311
created_on: 2015-11-18T17:02:39
created_by: 185
status: WAITING
filter_id: None
panel_id: 1987
filter_name: None
workspace_id: 233
sample_collected_date: None
sample_received_date: None
include_cosmic: False
vaast_report_id: None
mother_genome_id: None
father_genome_id: None
genome_id: 206164
version: 4
"""
import os
import json
import requests
from requests.auth import HTTPBasicAuth
import sys
import argparse
#Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def add_genome_to_clinical_report(clinical_report_id,
proband_genome_id=None,
proband_sex=None,
mother_genome_id=None,
father_genome_id=None,
sibling_genome_id=None,
sibling_affected=None,
sibling_sex=None,
duo_relation_genome_id=None,
duo_relation_affected=None,
vaast_report_id=None,
filter_id=None):
"""Use the Omicia API to add genome(s) to a clinical report
"""
# Construct url and request
url = "{}/reports/{}".format(FABRIC_API_URL, clinical_report_id)
url_payload = {'proband_genome_id': proband_genome_id,
'proband_sex': proband_sex,
'mother_genome_id': mother_genome_id,
'father_genome_id': father_genome_id,
'sibling_genome_id': sibling_genome_id,
'sibling_affected': sibling_affected,
'sibling_sex': sibling_sex,
'duo_relation_genome_id': duo_relation_genome_id,
'duo_relation_affected': duo_relation_affected,
'vaast_report_id': vaast_report_id}
if filter_id is not None:
# Use the integer -1 to indicate that there should explicitly be no filter
if filter_id == -1:
filter_id = None
url_payload['filter_id'] = filter_id
sys.stdout.write("Adding genome(s) to report...")
sys.stdout.write("\n\n")
sys.stdout.flush()
result = requests.put(url, auth=auth, data=json.dumps(url_payload))
return result.json()
def main():
"""Main function. Add genomes and metadata to an existing clinical report.
"""
parser = argparse.ArgumentParser(description='Add genome ids or vaast report ids to existing clinical reports.')
parser.add_argument('c', metavar='clinical_report_id', type=int)
parser.add_argument('p', metavar='proband_genome_id', type=int)
parser.add_argument('--m', metavar='mother_genome_id', type=int)
parser.add_argument('--f', metavar='father_genome_id', type=int)
parser.add_argument('--s', metavar='sibling_genome_id', type=int)
parser.add_argument('--sibling_affected', metavar='sibling_affected', type=str, choices=['true', 'false'])
parser.add_argument('--sibling_sex', metavar='sibling_sex', type=str, choices=['m', 'f'])
parser.add_argument('--d', metavar='duo_relation_genome_id', type=int)
parser.add_argument('--duo_affected', metavar='duo_relation_affected', type=str, choices=['true', 'false'])
parser.add_argument('--v', metavar='vaast_report_id', type=int)
parser.add_argument('--filter_id', metavar='filter_id', type=int)
parser.add_argument('sex', metavar='sex', type=str, choices=['f', 'm', 'u'])
args = parser.parse_args()
cr_id = args.c
proband_genome_id = args.p
mother_genome_id = args.m
father_genome_id = args.f
sibling_genome_id = args.s
sibling_affected = args.sibling_affected
sibling_sex = args.sibling_sex
duo_relation_genome_id = args.d
duo_relation_affected = args.duo_affected
vaast_report_id = args.v
proband_sex = args.sex
filter_id = args.filter_id
if sibling_genome_id is not None:
if sibling_sex is None:
sys.exit("Sibling sex must be specified as m (male) or f (female) if "
"sibling genome is specified.")
if sibling_affected is None:
sys.exit("Sibling affected status must be true or false "
"if sibling genome is specified.")
json_response = add_genome_to_clinical_report(cr_id,
proband_genome_id=proband_genome_id,
proband_sex=proband_sex,
mother_genome_id=mother_genome_id,
father_genome_id=father_genome_id,
sibling_genome_id=sibling_genome_id,
sibling_affected=sibling_affected,
sibling_sex=sibling_sex,
duo_relation_genome_id=duo_relation_genome_id,
duo_relation_affected=duo_relation_affected,
vaast_report_id=vaast_report_id,
filter_id=filter_id)
if "clinical_report" not in json_response.keys():
sys.stderr(json_response)
sys.exit("Failed to launch. Check report parameters for correctness.")
clinical_report = json_response['clinical_report']
sys.stdout.write('Clinical Report Info:\n'
'id: {}\n'
'test_type: {}\n'
'accession_id: {}\n'
'created_on: {}\n'
'created_by: {}\n'
'status: {}\n'
'filter_id: {}\n'
'panel_id: {}\n'
'filter_name: {}\n'
'workspace_id: {}\n'
'sample_collected_date: {}\n'
'sample_received_date: {}\n'
'include_cosmic: {}\n'
'vaast_report_id: {}\n'
'mother_genome_id: {}\n'
'father_genome_id: {}\n'
'genome_id: {}\n'
'version: {}\n'
.format(clinical_report.get('id', 'Missing'),
clinical_report.get('test_type','Missing'),
clinical_report.get('accession_id','Missing'),
clinical_report.get('created_on','Missing'),
clinical_report.get('created_by','Missing'),
clinical_report.get('status', 'Missing'),
clinical_report.get('filter_id','Missing'),
clinical_report.get('panel_id','Missing'),
clinical_report.get('filter_name', 'Missing'),
clinical_report.get('workspace_id','Missing'),
clinical_report.get('sample_collected_date','Missing'),
clinical_report.get('sample_received_date','Missing'),
clinical_report.get('include_cosmic','Missing'),
clinical_report.get('vaast_report_id', 'Missing'),
clinical_report.get('mother_genome_id', 'Missing'),
clinical_report.get('father_genome_id', 'Missing'),
clinical_report.get('genome_id', 'Missing'),
clinical_report.get('version', 'Missing')))
if __name__ == "__main__":
main()
| {
"repo_name": "Omicia/omicia_api_examples",
"path": "python/ClinicalReportLaunchers/add_genome_to_cr.py",
"copies": "1",
"size": "8794",
"license": "mit",
"hash": 2100031464069810400,
"line_mean": 44.8020833333,
"line_max": 116,
"alpha_frac": 0.5402547191,
"autogenerated": false,
"ratio": 3.9084444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99372315743412,
"avg_score": 0.00229351784064892,
"num_lines": 192
} |
"""add genomic_set_member.aw2f_manifest_job_run_id
Revision ID: 355854d777d3
Revises: a2d41894a561
Create Date: 2021-06-22 11:08:51.988993
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '355854d777d3'
down_revision = 'a2d41894a561'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('aw2f_manifest_job_run_id', sa.Integer(), nullable=True))
op.drop_constraint('genomic_set_member_ibfk_27', 'genomic_set_member', type_='foreignkey')
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['aw2f_manifest_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'aw2f_file_processed_id')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('aw2f_file_processed_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.create_foreign_key('genomic_set_member_ibfk_27', 'genomic_set_member', 'genomic_file_processed', ['aw2f_file_processed_id'], ['id'])
op.drop_column('genomic_set_member', 'aw2f_manifest_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/355854d777d3_add_genomic_set_member_aw2f_manifest_.py",
"copies": "1",
"size": "1842",
"license": "bsd-3-clause",
"hash": 531629186283137340,
"line_mean": 32.4909090909,
"line_max": 145,
"alpha_frac": 0.6851248643,
"autogenerated": false,
"ratio": 3.106239460370995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9196565528646532,
"avg_score": 0.018959759204892585,
"num_lines": 55
} |
"""add_genomic_set_member columns_for_aw3_aw4
Revision ID: 2c3a71f9fc04
Revises: c069abb92cc0
Create Date: 2020-08-25 08:57:17.987756
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '2c3a71f9fc04'
down_revision = 'c069abb92cc0'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('aw3_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member', sa.Column('aw4_manifest_job_run_id', sa.Integer(), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('aw4_manifest_job_run_id', sa.Integer(), nullable=True))
op.drop_constraint('genomic_set_member_ibfk_23', 'genomic_set_member', type_='foreignkey')
op.drop_constraint('genomic_set_member_ibfk_24', 'genomic_set_member', type_='foreignkey')
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['aw3_manifest_job_run_id'], ['id'])
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['aw4_manifest_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'wgs_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member', 'arr_aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'arr_aw3_manifest_job_run_id')
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('arr_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history', sa.Column('arr_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member', sa.Column('wgs_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.add_column('genomic_set_member_history', sa.Column('wgs_aw3_manifest_job_run_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.create_foreign_key('genomic_set_member_ibfk_24', 'genomic_set_member', 'genomic_job_run', ['wgs_aw3_manifest_job_run_id'], ['id'])
op.create_foreign_key('genomic_set_member_ibfk_23', 'genomic_set_member', 'genomic_job_run', ['arr_aw3_manifest_job_run_id'], ['id'])
op.drop_column('genomic_set_member', 'aw4_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'aw4_manifest_job_run_id')
op.drop_column('genomic_set_member', 'aw3_manifest_job_run_id')
op.drop_column('genomic_set_member_history', 'aw3_manifest_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/2c3a71f9fc04_add_genomic_set_member_columns_for_aw3_.py",
"copies": "1",
"size": "3524",
"license": "bsd-3-clause",
"hash": 6632828655767912000,
"line_mean": 42.5061728395,
"line_max": 158,
"alpha_frac": 0.6955164586,
"autogenerated": false,
"ratio": 2.914805624483044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4110322083083044,
"avg_score": null,
"num_lines": null
} |
"""add geoloc tables
Revision ID: 2b35f2f2adcb
Revises: 29474f196c96
Create Date: 2015-11-09 00:12:02.604229
"""
# revision identifiers, used by Alembic.
revision = '2b35f2f2adcb'
down_revision = '29474f196c96'
branch_labels = None
depends_on = None
import os
import json
from alembic import op
import sqlalchemy as sa
def upgrade():
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
countries_table = op.create_table(
'rb_countries',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(255)),
)
countries_file = os.path.join(path, 'data', 'ratebeer_countries.json')
with open(countries_file, 'r') as countries_data:
op.bulk_insert(countries_table, json.loads(countries_data.read()))
subregions_table = op.create_table(
'rb_subregions',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(255)),
)
subregions_file = os.path.join(path, 'data', 'ratebeer_subregions.json')
with open(subregions_file, 'r') as subregions_data:
op.bulk_insert(subregions_table, json.loads(subregions_data.read()))
countrycodes_table = op.create_table(
'country_codes',
sa.Column('id', sa.Unicode(2), primary_key=True),
sa.Column('name', sa.Unicode(255)),
)
countrycodes_file = os.path.join(path, 'data', '3166-1alpha-2.json')
with open(countrycodes_file, 'r') as countrycodes_data:
op.bulk_insert(countrycodes_table, json.loads(countrycodes_data.read()))
def downgrade():
op.drop_table('rb_countries')
op.drop_table('rb_subregions')
op.drop_table('country_codes')
| {
"repo_name": "atlefren/beerdatabase",
"path": "alembic/versions/2b35f2f2adcb_add_geoloc_tables.py",
"copies": "1",
"size": "1700",
"license": "mit",
"hash": -3715522950239192000,
"line_mean": 27.813559322,
"line_max": 88,
"alpha_frac": 0.6641176471,
"autogenerated": false,
"ratio": 3.1135531135531136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9252919922099038,
"avg_score": 0.004950167710815096,
"num_lines": 59
} |
"""Add github model
Revision ID: 22d18de9decd
Revises: 2d33b5d9977a
Create Date: 2016-01-25 17:05:44.456193
"""
# revision identifiers, used by Alembic.
revision = '22d18de9decd'
down_revision = '2d33b5d9977a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('github', sa.Column('contributors', sa.Integer(), nullable=True))
op.add_column('github', sa.Column('development_activity', sa.String(length=20), nullable=True))
op.add_column('github', sa.Column('first_commit', sa.DateTime(), nullable=True))
op.add_column('github', sa.Column('forks', sa.Integer(), nullable=True))
op.add_column('github', sa.Column('issues', sa.Integer(), nullable=True))
op.add_column('github', sa.Column('last_commit', sa.DateTime(), nullable=True))
op.add_column('github', sa.Column('pull_requests', sa.Integer(), nullable=True))
op.add_column('github', sa.Column('watchers', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('github', 'watchers')
op.drop_column('github', 'pull_requests')
op.drop_column('github', 'last_commit')
op.drop_column('github', 'issues')
op.drop_column('github', 'forks')
op.drop_column('github', 'first_commit')
op.drop_column('github', 'development_activity')
op.drop_column('github', 'contributors')
### end Alembic commands ###
| {
"repo_name": "lord63/flask_toolbox",
"path": "migrations/versions/22d18de9decd_add_github_model.py",
"copies": "1",
"size": "1515",
"license": "mit",
"hash": 7080131101656141000,
"line_mean": 36.875,
"line_max": 99,
"alpha_frac": 0.6792079208,
"autogenerated": false,
"ratio": 3.3223684210526314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9472283851388066,
"avg_score": 0.005858498092913054,
"num_lines": 40
} |
"""Add github roles to sphinx docs.
Based entirely on Doug Hellmann's bitbucket version, but
adapted for Github.
(https://bitbucket.org/dhellmann/sphinxcontrib-bitbucket/)
"""
from urlparse import urljoin
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
def make_node(rawtext, app, type_, slug, options):
base_url = app.config.github_project_url
if base_url is None:
raise ValueError(
"Configuration value for 'github_project_url' is not set.")
relative = '%s/%s' % (type_, slug)
full_ref = urljoin(base_url, relative)
set_classes(options)
if type_ == 'issues':
type_ = 'issue'
node = nodes.reference(rawtext, type_ + ' ' + utils.unescape(slug),
refuri=full_ref, **options)
return node
def github_sha(name, rawtext, text, lineno, inliner,
options={}, content=[]):
app = inliner.document.settings.env.app
node = make_node(rawtext, app, 'commit', text, options)
return [node], []
def github_issue(name, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
issue = int(text)
except ValueError:
msg = inliner.reporter.error(
"Invalid Github Issue '%s', must be an integer" % text,
line=lineno)
problem = inliner.problematic(rawtext, rawtext, msg)
return [problem], [msg]
app = inliner.document.settings.env.app
node = make_node(rawtext, app, 'issues', str(issue), options)
return [node], []
def setup(app):
app.info('Adding github link roles')
app.add_role('sha', github_sha)
app.add_role('issue', github_issue)
app.add_config_value('github_project_url', None, 'env')
| {
"repo_name": "todaychi/hue",
"path": "desktop/core/ext-py/boto-2.46.1/docs/source/extensions/githublinks/__init__.py",
"copies": "102",
"size": "1745",
"license": "apache-2.0",
"hash": -2095034592550934300,
"line_mean": 30.7272727273,
"line_max": 71,
"alpha_frac": 0.6303724928,
"autogenerated": false,
"ratio": 3.605371900826446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
addglobals = lambda x: globals().update(x)
def frange(start, stop, step):
return [x*step+start for x in range(0,round(abs((stop-start)/step)+0.5001),
int((stop-start)/step<0)*-2+1)]
def myrange(x):
return random.randint(flr(x[0]), flr(x[1]))
def myrange_f(x):
return random.uniform(x[0], x[1])
def lerp(f,to,t):
return f+t*(to-f)
def ease(t):
if t >= 0.5:
return (t-1)*(2*t-2)*(2*t-2)+1
return 4*t*t*t
class Vec2(object):
def __init__(self, x, y):
self.x = x
self.y = y
def len2(self):
return self.x * self.x + self.y * self.y
def len(self):
return sqrt(self.x*self.x+self.y*self.y)
def _add(self, b):
self.x = self.x + b.x
self.y = self.y + b.y
def _mul(self, s):
self.x = self.x * s
self.y = self.y * s
def div(self, s):
if s != 0:
return Vec2(self.x/s, self.y/s)
return Vec2(0, 0)
def mul(self, s):
return Vec2(self.x * s, self.y * s)
def sub(self, b):
return Vec2(self.x - b.x, self.y - b.y)
def add(self, b):
return Vec2(self.x + b.x, self.y + b.y)
def lerp(self, b, t):
return Vec2(lerp(self.x,b.x,t), lerp(self.y, b.y, t))
def normalize(self):
return self.div(self.len())
def dist(self, b):
return self.sub(b).len() | {
"repo_name": "trentspi/PX8",
"path": "games/BR/utils.py",
"copies": "1",
"size": "1399",
"license": "mit",
"hash": -3019427250104677000,
"line_mean": 21.9508196721,
"line_max": 79,
"alpha_frac": 0.507505361,
"autogenerated": false,
"ratio": 2.634651600753296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3642156961753296,
"avg_score": null,
"num_lines": null
} |
addglobals = lambda x: globals().update(x)
from utils import Vec2, frange
class Building(object):
def __init__(self, w, h, pos, height, color):
self.w = w
self.h = h
self.pos = pos
self.height = height
self.color = color
self.s = Vec2(0, 0)
class Buildings(object):
def __init__(self, config):
self.config = config
self.cell_size = self.config.cell_size
def update(self, x, y, cell, cam, cells, blobs):
building = cell.building
if building:
cellp = Vec2(
cam.pos.x%self.cell_size-x*self.cell_size,
cam.pos.y%self.cell_size-y*self.cell_size
)
building.s = building.pos.sub(cellp.add(self.config.perspective_offset))
s1=max(building.w,building.h)
s2=min(building.w,building.h)
for i in frange(-s1+s2/2,s1-s2/2,s2):
p1 = Vec2((cells.pos.x+x)*self.cell_size, (cells.pos.y+y)*self.cell_size).add(building.pos)
if s1 == building.w:
p1.x += i
else:
p1.y += i
blobs.add_blob(
p1,
s2
)
p2 = Vec2((cells.pos.x+x)*self.cell_size, (cells.pos.y+y)*self.cell_size).add(building.pos)
if s1 == building.w:
p2.x += s1-s2/2
else:
p2.y += s1-s2/2
if p2.dist(p1) > 2:
blobs.add_blob(
p2,
s2
)
def draw(self, a, b, cell, cam, shadow):
building = cell.building
if building:
camera(
cam.c.x-a*self.cell_size,
cam.c.y-b*self.cell_size
)
if shadow:
for i in frange(0,building.height/2,4):
t = Vec2(building.s.x,building.s.y)
t._mul(i*0.015)
t._add(building.pos)
rectfill(t.x-building.w, t.y-building.h, t.x+building.w, t.y+building.h, 5)
else:
for i in frange(building.height/2,building.height-1,4):
t = Vec2(building.s.x,building.s.y)
t._mul(i*0.015)
t._add(building.pos)
rectfill(t.x-building.w, t.y-building.h, t.x+building.w, t.y+building.h, 5)
s = building.s.mul(building.height*0.015)
s._add(building.pos)
rectfill(s.x-building.w, s.y-building.h, s.x+building.w, s.y+building.h, building.color)
| {
"repo_name": "trentspi/PX8",
"path": "games/BR/buildings.py",
"copies": "1",
"size": "2688",
"license": "mit",
"hash": 1416428345949004500,
"line_mean": 33.4615384615,
"line_max": 108,
"alpha_frac": 0.4601934524,
"autogenerated": false,
"ratio": 3.347447073474471,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4307640525874471,
"avg_score": null,
"num_lines": null
} |
addglobals = lambda x: globals().update(x)
from utils import Vec2
class Tree(object):
def __init__(self, pos, height, girth, leaves):
self.pos = pos
self.height = height
self.girth = girth
self.leaves = leaves
self.s = Vec2(pos.x, pos.y)
class Trees(object):
def __init__(self, config):
self.trees = {}
self.config = config
self.cell_size = self.config.cell_size
def update(self, x, y, cell, cam, cells, blobs):
trees = cell.trees
cellp = Vec2(
cam.pos.x%self.cell_size-x*self.cell_size,
cam.pos.y%self.cell_size-y*self.cell_size
)
for tree in trees:
tree.s = tree.pos.sub(cellp.add(self.config.perspective_offset))
tree.s._mul(tree.height*0.015)
tree.s._add(tree.pos)
leaves_0 = tree.pos.lerp(tree.s,0.5)
leaves_1 = tree.pos.lerp(tree.s,0.75)
leaves_2 = tree.s
tree.leaves[0] = [leaves_0.x, leaves_0.y]
tree.leaves[1] = [leaves_1.x, leaves_1.y]
tree.leaves[2] = [leaves_2.x, leaves_2.y]
blobs.add_blob(Vec2((cells.pos.x+x) * self.cell_size, (cells.pos.y+y)*self.cell_size).add(tree.pos), tree.girth)
def draw(self, a, b, cell, cam, shadow):
camera(
cam.c.x-a*self.cell_size,
cam.c.y-b*self.cell_size
)
if cell.trees:
if shadow:
for tree in cell.trees:
circfill(
tree.pos.x+self.config.shadow_offset.x*tree.height/2,
tree.pos.y+self.config.shadow_offset.y*tree.height/2,
tree.girth,
5)
else:
for tree in cell.trees:
for x in range(-1,2):
for y in range(-1,2):
if abs(x)+abs(y)!=2:
line(tree.pos.x+x, tree.pos.y+y, tree.s.x, tree.s.y, 4)
c=[[3,1],[11,0.7],[7,0.4]]
for i in range(0, 3):
for tree in cell.trees:
circfill(tree.leaves[i][0], tree.leaves[i][1], tree.girth*c[i][1], c[i][0])
| {
"repo_name": "trentspi/PX8",
"path": "games/BR/trees.py",
"copies": "1",
"size": "2251",
"license": "mit",
"hash": -4201222314047814700,
"line_mean": 34.7301587302,
"line_max": 124,
"alpha_frac": 0.4837849845,
"autogenerated": false,
"ratio": 3.2111269614835947,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4194911945983595,
"avg_score": null,
"num_lines": null
} |
"""Add GoLabOAuthUser
Revision ID: e220a74734b
Revises: 16ac195d729e
Create Date: 2015-04-09 20:45:48.373302
"""
# revision identifiers, used by Alembic.
revision = 'e220a74734b'
down_revision = '16ac195d729e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('GoLabOAuthUsers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('display_name', sa.Unicode(length=255), nullable=False),
sa.Column('email', sa.Unicode(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(u'ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.add_column(u'TranslationBundles', sa.Column('from_developer', sa.Boolean(), nullable=True))
op.create_index(u'ix_TranslationBundles_from_developer', 'TranslationBundles', ['from_developer'], unique=False)
try:
op.drop_constraint(u'TranslationMessageHistory_ibfk_2', 'TranslationMessageHistory', type_='foreignkey')
op.create_foreign_key(None, 'TranslationMessageHistory', 'GoLabOAuthUsers', ['user_id'], ['id'])
except:
print "drop_constraint and create_foreign_key not supported in SQLite"
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'TranslationMessageHistory', type_='foreignkey')
op.create_foreign_key(u'TranslationMessageHistory_ibfk_2', u'TranslationMessageHistory', u'Users', [u'user_id'], [u'id'])
op.drop_index(u'ix_TranslationBundles_from_developer', table_name='TranslationBundles')
op.drop_column(u'TranslationBundles', 'from_developer')
op.drop_index(u'ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index(u'ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_table('GoLabOAuthUsers')
### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/e220a74734b_add_golaboauthuser.py",
"copies": "3",
"size": "2043",
"license": "bsd-2-clause",
"hash": 6444252288711774000,
"line_mean": 43.4130434783,
"line_max": 125,
"alpha_frac": 0.7165932452,
"autogenerated": false,
"ratio": 3.3768595041322316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00928284818964227,
"num_lines": 46
} |
"""Add google analytics to sphinx documentation
imported from sphinxcontrib google analytics package
https://bitbucket.org/birkenfeld/sphinx-contrib/src/e758073384efd1ed5ed1e6286301b7bef71b27cf/googleanalytics/
"""
from sphinx.errors import ExtensionError
def add_ga_javascript(app, pagename, templatename, context, doctree):
if not app.config.googleanalytics_enabled:
return
metatags = context.get('metatags', '')
metatags += """<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '%s']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>""" % app.config.googleanalytics_id
context['metatags'] = metatags
def check_config(app):
if not app.config.googleanalytics_id:
raise ExtensionError("'googleanalytics_id' config value must be set "
"for ga statistics to function properly.")
def setup(app):
app.add_config_value('googleanalytics_id', '', 'html')
app.add_config_value('googleanalytics_enabled', True, 'html')
app.connect('html-page-context', add_ga_javascript)
app.connect('builder-inited', check_config)
return {'version': '0.1'}
| {
"repo_name": "Dekken/tick",
"path": "doc/sphinxext/google_analytics.py",
"copies": "2",
"size": "1518",
"license": "bsd-3-clause",
"hash": 1482822960633527300,
"line_mean": 36.0243902439,
"line_max": 121,
"alpha_frac": 0.6627140975,
"autogenerated": false,
"ratio": 3.4578587699316627,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007108365252275927,
"num_lines": 41
} |
"""add Google user model
Revision ID: ad5ccc47d004
Revises: fe028940e19b
Create Date: 2017-02-01 08:38:14.233344
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils.types.url
# revision identifiers, used by Alembic.
revision = 'ad5ccc47d004'
down_revision = 'fe028940e19b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('google_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('unique_id', sa.Unicode(), nullable=False),
sa.Column('email', sa.Unicode(), nullable=False),
sa.Column('email_verified', sa.Boolean(), nullable=False),
sa.Column('name', sa.Unicode(), nullable=False),
sa.Column('picture_url', sqlalchemy_utils.types.url.URLType(), nullable=False),
sa.Column('given_name', sa.Unicode(), nullable=False),
sa.Column('family_name', sa.Unicode(), nullable=False),
sa.Column('locale', sa.Unicode(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('unique_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('google_users')
# ### end Alembic commands ###
| {
"repo_name": "rjw57/bitsbox",
"path": "migrations/versions/ad5ccc47d004_add_google_user_model.py",
"copies": "1",
"size": "1394",
"license": "mit",
"hash": -7780925809125393000,
"line_mean": 31.4186046512,
"line_max": 83,
"alpha_frac": 0.6771879484,
"autogenerated": false,
"ratio": 3.4763092269326683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624156554895429,
"avg_score": 0.005868124087447862,
"num_lines": 43
} |
"""Add GradientAccumulator to checkpoint file."""
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
"old",
"//je-d/home/staging-brain-gpu-dedicated/bert/pretrained_model/converted",
"old checkpoint file")
flags.DEFINE_string("new",
"//je-d/home/tpu-perf-team/shibow/bert_add_gradacc",
"new checkpoint file")
def main(unused_argv):
reader = tf.train.NewCheckpointReader(FLAGS.old)
shapes = reader.get_variable_to_shape_map()
dtypes = reader.get_variable_to_dtype_map()
tf.reset_default_graph()
with tf.Session() as sess:
for n in shapes:
logging.info(n)
logging.info(shapes[n])
logging.info(dtypes[n])
tf.keras.backend.set_value(
tf.get_variable(n, shapes[n], dtypes[n]),
np.array(reader.get_tensor(n)))
tf.keras.backend.set_value(
tf.get_variable(n + "/GradientAccumulator", shapes[n], dtypes[n]),
np.zeros(shapes[n]))
tf.train.Saver().save(sess, FLAGS.new)
if __name__ == "__main__":
app.run(main)
| {
"repo_name": "mlcommons/training",
"path": "language_model/tensorflow/bert/checkpoint_add_gradacc.py",
"copies": "1",
"size": "1162",
"license": "apache-2.0",
"hash": -6056771784004940000,
"line_mean": 27.3414634146,
"line_max": 78,
"alpha_frac": 0.6445783133,
"autogenerated": false,
"ratio": 3.3011363636363638,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44457146769363637,
"avg_score": null,
"num_lines": null
} |
"""Add GradientAccumulator to checkpoint file."""
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow.google.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
"old",
"//je-d/home/staging-brain-gpu-dedicated/bert/pretrained_model/converted",
"old checkpoint file")
flags.DEFINE_string("new",
"//je-d/home/tpu-perf-team/shibow/bert_add_gradacc",
"new checkpoint file")
def main(unused_argv):
reader = tf.train.NewCheckpointReader(FLAGS.old)
shapes = reader.get_variable_to_shape_map()
dtypes = reader.get_variable_to_dtype_map()
tf.reset_default_graph()
with tf.Session() as sess:
for n in shapes:
logging.info(n)
logging.info(shapes[n])
logging.info(dtypes[n])
tf.keras.backend.set_value(
tf.get_variable(n, shapes[n], dtypes[n]),
np.array(reader.get_tensor(n)))
tf.keras.backend.set_value(
tf.get_variable(n + "/GradientAccumulator", shapes[n], dtypes[n]),
np.zeros(shapes[n]))
tf.train.Saver().save(sess, FLAGS.new)
if __name__ == "__main__":
app.run(main)
| {
"repo_name": "mlperf/training_results_v0.7",
"path": "Google/benchmarks/bert/implementations/bert-research-TF-tpu-v3-8192/checkpoint_add_gradacc.py",
"copies": "1",
"size": "1168",
"license": "apache-2.0",
"hash": 6911735071488794000,
"line_mean": 28.2,
"line_max": 78,
"alpha_frac": 0.6464041096,
"autogenerated": false,
"ratio": 3.2994350282485874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44458391378485873,
"avg_score": null,
"num_lines": null
} |
"""Add Graph and GraphCache models
Revision ID: 654121a84a33
Revises: fc7bc5c66c63
Create Date: 2020-11-16 21:02:36.249989
"""
# revision identifiers, used by Alembic.
revision = '654121a84a33'
down_revision = 'fc7bc5c66c63'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('graph',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('graph_thumbnail', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graphcache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('graph_plugin', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('comment', sa.UnicodeText(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_label',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('label', sa.Unicode(length=255), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('graph_label')
op.drop_table('graph_comment')
op.drop_table('graphcache')
op.drop_table('graph')
# ### end Alembic commands ###
| {
"repo_name": "google/timesketch",
"path": "timesketch/migrations/versions/654121a84a33_.py",
"copies": "1",
"size": "3278",
"license": "apache-2.0",
"hash": 8384118425084914000,
"line_mean": 39.975,
"line_max": 66,
"alpha_frac": 0.6555826724,
"autogenerated": false,
"ratio": 3.432460732984293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45880434053842933,
"avg_score": null,
"num_lines": null
} |
"""Add grating_couplers to a component."""
from typing import Callable, List, Tuple
from phidl.device_layout import Label
import pp
from pp.cell import cell
from pp.component import Component
from pp.components.grating_coupler.elliptical_trenches import (
grating_coupler_te,
grating_coupler_tm,
)
from pp.routing.get_input_labels import get_input_labels
from pp.types import ComponentFactory
@cell
def add_grating_couplers(
component: Component,
grating_coupler: ComponentFactory = grating_coupler_te,
layer_label: Tuple[int, int] = pp.LAYER.LABEL,
gc_port_name: str = "W0",
get_input_labels_function: Callable[..., List[Label]] = get_input_labels,
) -> Component:
"""Returns new component with grating couplers and labels.
Args:
Component: to add grating_couplers
grating_couplers: grating_coupler function
layer_label: for label
gc_port_name: where to add label
get_input_labels_function: function to get label
"""
cnew = Component()
cnew.add_ref(component)
grating_coupler = pp.call_if_func(grating_coupler)
io_gratings = []
optical_ports = component.get_ports_list(port_type="optical")
for port in optical_ports:
gc_ref = grating_coupler.ref()
gc_port = gc_ref.ports[gc_port_name]
gc_ref.connect(gc_port, port)
io_gratings.append(gc_ref)
cnew.add(gc_ref)
labels = get_input_labels_function(
io_gratings,
list(component.ports.values()),
component_name=component.name,
layer_label=layer_label,
gc_port_name=gc_port_name,
)
cnew.add(labels)
return cnew
def add_te(*args, **kwargs):
return add_grating_couplers(*args, **kwargs)
def add_tm(*args, grating_coupler=grating_coupler_tm, **kwargs):
return add_grating_couplers(*args, grating_coupler=grating_coupler, **kwargs)
if __name__ == "__main__":
# from pp.add_labels import get_optical_text
# c = pp.components.grating_coupler_elliptical_te()
# print(c.wavelength)
# print(c.get_property('wavelength'))
c = pp.components.straight(width=2)
c = pp.components.mzi2x2(with_elec_connections=True)
# cc = add_grating_couplers(c)
cc = add_tm(c)
print(cc)
cc.show()
| {
"repo_name": "gdsfactory/gdsfactory",
"path": "pp/add_grating_couplers.py",
"copies": "1",
"size": "2276",
"license": "mit",
"hash": -7820169289808934000,
"line_mean": 27.45,
"line_max": 81,
"alpha_frac": 0.6691564148,
"autogenerated": false,
"ratio": 3.2056338028169016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4374790217616902,
"avg_score": null,
"num_lines": null
} |
"""add gror columns to summary
Revision ID: 6f26f7c49be7
Revises: 64e68e221460
Create Date: 2020-02-17 14:42:24.710777
"""
from alembic import op
import sqlalchemy as sa
import model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '6f26f7c49be7'
down_revision = '64e68e221460'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('participant_summary', sa.Column('consent_for_genomics_ror', model.utils.Enum(QuestionnaireStatus), nullable=True))
op.add_column('participant_summary', sa.Column('consent_for_genomics_ror_time', model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('consent_for_genomics_ror_authored', model.utils.UTCDateTime(), nullable=True))
op.add_column('participant_summary', sa.Column('semantic_version_for_primary_consent', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'semantic_version_for_primary_consent')
op.drop_column('participant_summary', 'consent_for_genomics_ror_time')
op.drop_column('participant_summary', 'consent_for_genomics_ror_authored')
op.drop_column('participant_summary', 'consent_for_genomics_ror')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/6f26f7c49be7_add_gror_columns_to_summary.py",
"copies": "1",
"size": "2587",
"license": "bsd-3-clause",
"hash": 3599250495784827400,
"line_mean": 38.196969697,
"line_max": 133,
"alpha_frac": 0.7526091998,
"autogenerated": false,
"ratio": 3.500676589986468,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9664995566990484,
"avg_score": 0.017658044559196666,
"num_lines": 66
} |
""" add (groupable) categories
Revision ID: 419965394d38
Revises: 2fa92ef6570b
Create Date: 2014-09-22 16:33:46.225247
"""
# revision identifiers, used by Alembic.
revision = '419965394d38'
down_revision = '2fa92ef6570b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('categorygroup',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Unicode(), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], [u'categorygroup.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_category_title'), 'category', ['title'], unique=False)
op.create_table('categories',
sa.Column('category_id', sa.Integer(), nullable=False),
sa.Column('content_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], [u'category.id'], ),
sa.ForeignKeyConstraint(['content_id'], [u'content.id'], ),
sa.PrimaryKeyConstraint('category_id', 'content_id')
)
def downgrade():
op.drop_table('categories')
op.drop_index(op.f('ix_category_title'), table_name='category')
op.drop_table('category')
op.drop_table('categorygroup')
| {
"repo_name": "pyfidelity/rest-seed",
"path": "backend/backrest/migrations/versions/419965394d38_add_categories.py",
"copies": "1",
"size": "1438",
"license": "bsd-2-clause",
"hash": 2074898051449491500,
"line_mean": 31.6818181818,
"line_max": 83,
"alpha_frac": 0.6481223922,
"autogenerated": false,
"ratio": 3.4401913875598087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.957137760637292,
"avg_score": 0.0033872346773779582,
"num_lines": 44
} |
"""Add group_permission_id
Revision ID: f1ce7950ae8
Revises: 3614197cb5da
Create Date: 2014-01-13 16:54:33.181578
"""
# revision identifiers, used by Alembic.
revision = 'f1ce7950ae8'
down_revision = '3614197cb5da'
import time
import datetime
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
import os, sys
sys.path.append(os.path.join('..','..','..','..'))
import weblab.permissions as permissions
metadata = sa.MetaData()
uue = sa.Table('UserUsedExperiment', metadata,
sa.Column('id', sa.Integer()),
sa.Column('start_date', sa.DateTime()),
sa.Column('user_permission_id', sa.Integer()),
sa.Column('group_permission_id', sa.Integer()),
sa.Column('role_permission_id', sa.Integer()),
sa.Column('user_id', sa.Integer()),
sa.Column('experiment_id', sa.Integer()),
)
user = sa.Table('User', metadata,
sa.Column('id', sa.Integer()),
sa.Column('role_id', sa.Integer()),
sa.Column('login', sa.String(32)),
)
group = sa.Table('Group', metadata,
sa.Column('id', sa.Integer()),
sa.Column('parent_id', sa.Integer()),
)
user_is_member_of = sa.Table('UserIsMemberOf', metadata,
sa.Column('user_id', sa.Integer()),
sa.Column('group_id', sa.Integer()),
)
exp = sa.Table('Experiment', metadata,
sa.Column('id', sa.Integer()),
sa.Column('category_id', sa.Integer()),
sa.Column('name', sa.String(255)),
)
cat = sa.Table('ExperimentCategory', metadata,
sa.Column('id', sa.Integer()),
sa.Column('name', sa.String(255)),
)
user_permission = sa.Table('UserPermission', metadata,
sa.Column('id', sa.Integer()),
sa.Column('user_id', sa.Integer()),
sa.Column('permission_type', sa.String(255)),
sa.Column('date', sa.DateTime),
)
group_permission = sa.Table('GroupPermission', metadata,
sa.Column('id', sa.Integer()),
sa.Column('group_id', sa.Integer()),
sa.Column('permission_type', sa.String(255)),
sa.Column('date', sa.DateTime),
)
role_permission = sa.Table('RolePermission', metadata,
sa.Column('id', sa.Integer()),
sa.Column('role_id', sa.Integer()),
sa.Column('permission_type', sa.String(255)),
sa.Column('date', sa.DateTime),
)
user_permission_parameter = sa.Table('UserPermissionParameter', metadata,
sa.Column('id', sa.Integer()),
sa.Column('permission_id', sa.Integer()),
sa.Column('permission_type_parameter', sa.String(255)),
sa.Column('value', sa.Text()),
)
group_permission_parameter = sa.Table('GroupPermissionParameter', metadata,
sa.Column('id', sa.Integer()),
sa.Column('permission_id', sa.Integer()),
sa.Column('permission_type_parameter', sa.String(255)),
sa.Column('value', sa.Text()),
)
role_permission_parameter = sa.Table('RolePermissionParameter', metadata,
sa.Column('id', sa.Integer()),
sa.Column('permission_id', sa.Integer()),
sa.Column('permission_type_parameter', sa.String(255)),
sa.Column('value', sa.Text()),
)
GROUP_CACHE = {
# (group_id, exp_name, cat_name) : [ permission1, permission2 ]
}
ROLE_CACHE = {
# (role_id, exp_name, cat_name) : [ permission1, permission2 ]
}
USER_CACHE = {
# (user_id, exp_name, cat_name) : [ permission1, permission2 ]
}
def _get_group_permissions_recursive(connection, group_id, parent_id, group_ids, exp_name, cat_name):
cache_key = group_id, exp_name, cat_name
if cache_key in GROUP_CACHE:
return GROUP_CACHE[cache_key]
if group_id in group_ids:
return []
group_ids.append(group_id)
group_permissions = []
current = _get_permissions_by_condition(connection, 'group', group_permission.c.group_id == group_id, exp_name, cat_name)
group_permissions.extend(current)
if parent_id is not None:
grandparent_query = sql.select([group.c.parent_id], group.c.id == group_id).limit(1)
grandparent_id = None
for row in connection.execute(grandparent_query):
grandparent_id = row[group.c.parent_id]
parent_permissions = _get_group_permissions_recursive(connection, parent_id, grandparent_id, group_ids, exp_name, cat_name)
group_permissions.extend(parent_permissions)
GROUP_CACHE[cache_key] = group_permissions
return group_permissions
def _get_group_permissions(connection, user_id, exp_name, cat_name):
group_permissions = []
s = sql.select([ user_is_member_of.c.group_id, group.c.parent_id ], sql.and_(user_is_member_of.c.user_id == user_id, user_is_member_of.c.group_id == group.c.id) )
for row in connection.execute(s):
group_id = row[user_is_member_of.c.group_id]
parent_id = row[group.c.parent_id]
current = _get_group_permissions_recursive(connection, group_id, parent_id, [], exp_name, cat_name)
group_permissions.extend(current)
return group_permissions
def _get_experiment_id(connection, table, permission_id):
s = sql.select([table.c.permission_type_parameter, table.c.value], sql.and_(
table.c.permission_id == permission_id,
sql.or_(
table.c.permission_type_parameter == permissions.EXPERIMENT_PERMANENT_ID,
table.c.permission_type_parameter == permissions.EXPERIMENT_CATEGORY_ID,
table.c.permission_type_parameter == permissions.TIME_ALLOWED,
)))
exp_name = None
cat_name = None
time_allowed = None
for row in connection.execute(s):
parameter_type = row[table.c.permission_type_parameter]
if parameter_type == permissions.EXPERIMENT_PERMANENT_ID:
exp_name = row[table.c.value]
elif parameter_type == permissions.EXPERIMENT_CATEGORY_ID:
cat_name = row[table.c.value]
else:
time_allowed = row[table.c.value]
return exp_name, cat_name, time_allowed
def _get_permissions_by_condition(connection, scope, condition, exp_name, cat_name):
table = globals()['%s_permission' % scope]
parameter_table = globals()['%s_permission_parameter' % scope]
current_permissions = []
s = sql.select([table.c.id, table.c.date],
sql.and_(condition, table.c.permission_type == permissions.EXPERIMENT_ALLOWED))
for row in connection.execute(s):
exp_retrieved, cat_retrieved, time_allowed = _get_experiment_id(connection, parameter_table, row[table.c.id])
if exp_retrieved == exp_name and cat_retrieved == cat_name:
current_permissions.append({
'id' : row[table.c.id],
'scope' : scope,
'time_allowed' : int(time_allowed),
'start_date' : row[table.c.date],
})
return current_permissions
def _get_permissions(connection, user_id, user_role_id, exp_name, cat_name):
user_cache_key = user_id, exp_name, cat_name
if user_cache_key in USER_CACHE:
return USER_CACHE[user_cache_key]
current_permissions = [
# {
# 'id' : 5,
# 'scope' : 'group', # or user or role,
# 'time_allowed' : 100,
# 'start_date' : datetime.datetime
# }
]
user_permissions = _get_permissions_by_condition(connection, 'user', user_permission.c.user_id == user_id, exp_name, cat_name)
current_permissions.extend(user_permissions)
group_permissions = _get_group_permissions(connection, user_id, exp_name, cat_name)
current_permissions.extend(group_permissions)
role_cache_key = user_role_id, exp_name, cat_name
if role_cache_key in ROLE_CACHE:
current_permissions.extend(ROLE_CACHE[role_cache_key])
else:
role_permissions = _get_permissions_by_condition(connection, 'role', role_permission.c.role_id == user_role_id, exp_name, cat_name)
current_permissions.extend(role_permissions)
ROLE_CACHE[role_cache_key] = role_permissions
USER_CACHE[user_cache_key] = current_permissions
return current_permissions
def upgrade():
### commands auto generated by Alembic - please adjust! ###
engine = op.get_bind()
if engine.dialect.name == 'sqlite':
# Same but without Foreign keys
op.add_column(u'UserUsedExperiment', sa.Column('group_permission_id', sa.Integer(), nullable=True))
op.add_column(u'UserUsedExperiment', sa.Column('user_permission_id', sa.Integer(), nullable=True))
op.add_column(u'UserUsedExperiment', sa.Column('role_permission_id', sa.Integer(), nullable=True))
else:
op.add_column(u'UserUsedExperiment', sa.Column('group_permission_id', sa.Integer(), sa.ForeignKey('GroupPermission.id'), nullable=True))
op.add_column(u'UserUsedExperiment', sa.Column('user_permission_id', sa.Integer(), sa.ForeignKey('UserPermission.id'), nullable=True))
op.add_column(u'UserUsedExperiment', sa.Column('role_permission_id', sa.Integer(), sa.ForeignKey('RolePermission.id'), nullable=True))
### end Alembic commands ###
s = sql.select([
uue.c.id, uue.c.start_date,
uue.c.user_permission_id, uue.c.group_permission_id, uue.c.role_permission_id,
user.c.id, user.c.login, user.c.role_id,
exp.c.name, cat.c.name,
], sql.and_(uue.c.user_id == user.c.id, uue.c.experiment_id == exp.c.id, exp.c.category_id == cat.c.id,
),
use_labels = True
).order_by(uue.c.id)
skipped = []
total_uses_count = op.get_bind().execute(sql.select([sa.func.count(uue.c.id)]))
total_uses = [ x[0] for x in total_uses_count ][0]
if total_uses:
print "Converting %s uses" % total_uses
last_time = time.time()
operations = []
counter = 0
for use in op.get_bind().execute(s):
group_permission_id = use[uue.c.group_permission_id]
user_permission_id = use[uue.c.user_permission_id]
role_permission_id = use[uue.c.role_permission_id]
counter += 1
if counter % 1000 == 0:
new_last_time = time.time()
timespan = new_last_time - last_time
speed = 1000.0 / timespan
cur_time = time.asctime()
print "%s Reading %s out of %s (%.2f%%). 1000 uses processed in %.2f seconds (%.2f uses / second)." % (cur_time, counter, total_uses, 100.0 * counter / total_uses, timespan, speed)
last_time = new_last_time
if not user_permission_id and not group_permission_id and not role_permission_id:
use_id = use[uue.c.id]
user_id = use[user.c.id]
exp_name = use[exp.c.name]
cat_name = use[cat.c.name]
use_date = use[uue.c.start_date]
user_role_id = use[user.c.role_id]
current_permissions = _get_permissions(op.get_bind(), user_id, user_role_id, exp_name, cat_name)
potential_permissions = []
# Discard permissions assigned AFTER the use (this can happen when somebody repeats)
for current_permission in current_permissions:
# We give a 24 hour margin due to UTC issues
if (current_permission['start_date'] - datetime.timedelta(hours = 24) ) <= use_date:
potential_permissions.append(current_permission)
if not potential_permissions:
if not current_permissions:
skipped.append( (use_id, unicode(use[user.c.login]), exp_name, cat_name) )
continue
else:
# If there was not permission before that
potential_permissions = current_permissions
# Sort permissions by time_allowed
potential_permissions.sort(lambda p1, p2: cmp(p2['time_allowed'], p1['time_allowed']))
# Break by wherever the time_allowed is lower, and discard them
break_point = len(potential_permissions)
for position, potential_permission in enumerate(potential_permissions):
if potential_permission['time_allowed'] < potential_permissions[0]['time_allowed']:
break_point = position
break
potential_permissions = potential_permissions[:break_point]
# Sort by date: most recent - oldest.
potential_permissions.sort(lambda p1, p2: cmp(p2['start_date'], p1['start_date']))
assigned_permission = potential_permissions[0]
kwargs = {}
if assigned_permission['scope'] == 'group':
kwargs = dict(group_permission_id = assigned_permission['id'])
elif assigned_permission['scope'] == 'user':
kwargs = dict(user_permission_id = assigned_permission['id'])
elif assigned_permission['scope'] == 'role':
kwargs = dict(role_permission_id = assigned_permission['id'])
if kwargs:
update_stmt = uue.update().where(uue.c.id == use_id).values(**kwargs)
operations.append(update_stmt)
if total_uses > 1000:
new_last_time = time.time()
timespan = new_last_time - last_time
remainder = total_uses % 1000
speed = 1.0 * remainder / timespan
cur_time = time.asctime()
print "%s Found %s out of %s (%.2f%%). %s uses processed in %.2f seconds (%.2f uses / second)." % (cur_time, counter, total_uses, 100.0 * counter / total_uses, remainder, timespan, speed)
last_time = new_last_time
print "Executing %s operations..." % len(operations)
for pos, operation in enumerate(operations):
if pos % 1000 == 0 and pos > 0:
new_last_time = time.time()
timespan = new_last_time - last_time
speed = 1000.0 / timespan
cur_time = time.asctime()
print "%s Processing %s out of %s (%.2f%%). 1000 uses processed in %.2f seconds (%.2f uses / second)." % (cur_time, pos, total_uses, 100.0 * pos / total_uses, timespan, speed)
last_time = new_last_time
op.execute(operation)
print "Finished"
if skipped:
print "Warning. The following usages (total = %s) did not have any permission assigned and have been skipped:" % len(skipped)
for use_id, login, exp_name, cat_name in skipped:
print "\tUsage id=%s by %s on %s@%s" % (use_id, login, exp_name, cat_name)
print "You can see those by searching uses with user_permission_id, group_permission_id and role_permission_id set all to NULL"
print
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'UserUsedExperiment', 'role_permission_id')
op.drop_column(u'UserUsedExperiment', 'user_permission_id')
op.drop_column(u'UserUsedExperiment', 'group_permission_id')
### end Alembic commands ###
| {
"repo_name": "zstars/weblabdeusto",
"path": "server/src/weblab/db/upgrade/regular/versions/f1ce7950ae8_add_group_permission.py",
"copies": "1",
"size": "14772",
"license": "bsd-2-clause",
"hash": -4965578458558324000,
"line_mean": 39.694214876,
"line_max": 195,
"alpha_frac": 0.6216490658,
"autogenerated": false,
"ratio": 3.5518153402260157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46734644060260155,
"avg_score": null,
"num_lines": null
} |
"""add group_provision table
Revision ID: 1badb81476e
Revises: 220610eeac6
Create Date: 2015-03-24 18:01:10.425116
"""
# revision identifiers, used by Alembic.
revision = '1badb81476e'
down_revision = '220610eeac6'
branch_labels = None
depends_on = None
import sqlalchemy as sa
from alembic import op
from sqlalchemy_utils.types.uuid import UUIDType
def upgrade():
op.create_table(
'group_provision',
sa.Column(
'id',
UUIDType(binary=False),
server_default=sa.text('uuid_generate_v4()'),
nullable=False
),
sa.Column('quantity', sa.Integer(), nullable=False),
sa.Column(
'price_per_magazine',
sa.Numeric(precision=3, scale=2),
nullable=False
),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'quantity',
deferrable='True',
initially='DEFERRED',
name='uq_group_provision_quantity'
)
)
def downgrade():
op.drop_table('group_provision')
| {
"repo_name": "wappulehti-apy/diilikone-api",
"path": "diilikone/migrations/versions/1badb81476e_add_group_provision_table.py",
"copies": "1",
"size": "1062",
"license": "mit",
"hash": -8870890568830623000,
"line_mean": 22.0869565217,
"line_max": 60,
"alpha_frac": 0.5960451977,
"autogenerated": false,
"ratio": 3.6747404844290656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9759312520085419,
"avg_score": 0.0022946324087290866,
"num_lines": 46
} |
"""Add GroupRequest table.
Revision ID: 39a5823a808
Revises: a364e6e9c14
Create Date: 2013-09-21 15:40:31.274287
"""
# revision identifiers, used by Alembic.
revision = '39a5823a808'
down_revision = 'a364e6e9c14'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('grouprequest',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('from_user_id', sa.Integer(), nullable=True),
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('to_user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['from_user_id'], [u'user.id'], ),
sa.ForeignKeyConstraint(['project_id'], [u'project.id'], ),
sa.ForeignKeyConstraint(['to_user_id'], [u'user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('from_user_id','project_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('grouprequest')
### end Alembic commands ###
| {
"repo_name": "ucsb-cs/submit",
"path": "submit/migrations/versions/39a5823a808_add_grouprequest_tab.py",
"copies": "1",
"size": "1146",
"license": "bsd-2-clause",
"hash": 4346621596143835000,
"line_mean": 29.972972973,
"line_max": 72,
"alpha_frac": 0.6701570681,
"autogenerated": false,
"ratio": 3.3217391304347825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491896198534783,
"avg_score": null,
"num_lines": null
} |
"""add groups
Revision ID: f2e13b567540
Revises: 6ff1fdc328a5
Create Date: 2018-08-28 13:37:47.945953
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f2e13b567540'
down_revision = '6ff1fdc328a5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('groups',
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('group_id')
)
op.create_table('memberships',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.group_id'], ),
sa.ForeignKeyConstraint(['id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id', 'group_id')
)
op.add_column('accounts', sa.Column('group_id', sa.Integer(), nullable=True))
op.drop_constraint('accounts_id_fkey', 'accounts', type_='foreignkey')
op.create_foreign_key(None, 'accounts', 'groups', ['group_id'], ['group_id'])
op.drop_column('accounts', 'id')
op.add_column('categories', sa.Column('group_id', sa.Integer(), nullable=True))
op.drop_constraint('categories_id_fkey', 'categories', type_='foreignkey')
op.create_foreign_key(None, 'categories', 'groups', ['group_id'], ['group_id'])
op.drop_column('categories', 'id')
op.add_column('transactions', sa.Column('group_id', sa.Integer(), nullable=True))
op.drop_constraint('transactions_id_fkey', 'transactions', type_='foreignkey')
op.create_foreign_key(None, 'transactions', 'groups', ['group_id'], ['group_id'])
op.drop_column('transactions', 'id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('transactions', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'transactions', type_='foreignkey')
op.create_foreign_key('transactions_id_fkey', 'transactions', 'users', ['id'], ['id'])
op.drop_column('transactions', 'group_id')
op.add_column('categories', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'categories', type_='foreignkey')
op.create_foreign_key('categories_id_fkey', 'categories', 'users', ['id'], ['id'])
op.drop_column('categories', 'group_id')
op.add_column('accounts', sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'accounts', type_='foreignkey')
op.create_foreign_key('accounts_id_fkey', 'accounts', 'users', ['id'], ['id'])
op.drop_column('accounts', 'group_id')
op.drop_table('memberships')
op.drop_table('groups')
# ### end Alembic commands ###
| {
"repo_name": "gregcowell/BAM",
"path": "migrations/versions/f2e13b567540_add_groups.py",
"copies": "2",
"size": "2894",
"license": "apache-2.0",
"hash": -4233407464158037000,
"line_mean": 43.5230769231,
"line_max": 100,
"alpha_frac": 0.6624049758,
"autogenerated": false,
"ratio": 3.392731535756155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004907474267015614,
"num_lines": 65
} |
"""Add groups + permissions to posts
Revision ID: 36baffc5df12
Revises: 1b385158fd32
Create Date: 2016-11-02 15:37:12.808143
"""
# revision identifiers, used by Alembic.
revision = '36baffc5df12'
down_revision = '1b385158fd32'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('assoc_group_user',
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table('assoc_post_group',
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], )
)
with op.batch_alter_table('posts') as batch_op:
batch_op.add_column(sa.Column('private', sa.Integer(), nullable=True))
def downgrade():
with op.batch_alter_table('posts') as batch_op:
op.drop_column('private')
op.drop_table('assoc_post_group')
op.drop_table('assoc_group_user')
op.drop_table('groups')
| {
"repo_name": "airbnb/knowledge-repo",
"path": "knowledge_repo/app/migrations/versions/36baffc5df12_add_groups_permissions_to_posts.py",
"copies": "1",
"size": "1400",
"license": "apache-2.0",
"hash": -7581097927115234000,
"line_mean": 28.7872340426,
"line_max": 78,
"alpha_frac": 0.6578571429,
"autogenerated": false,
"ratio": 3.2482598607888633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44061170036888636,
"avg_score": null,
"num_lines": null
} |
"""Add hail_endpoint to user
Revision ID: 411fcaee167b
Revises: 26311efc301f
Create Date: 2015-04-25 18:12:55.584619
"""
# revision identifiers, used by Alembic.
revision = '411fcaee167b'
down_revision = '26311efc301f'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
old_options = ('emitted', 'received',
'sent_to_operator', 'received_by_operator',
'received_by_taxi', 'accepted_by_taxi',
'declined_by_taxi', 'incident_customer',
'incident_taxi', 'timeout_customer', 'timeout_taxi',
'outdated_customer', 'outdated_taxi')
new_options = sorted(old_options + ('failure',))
old_type = sa.Enum(*old_options, name='hail_status')
new_type = sa.Enum(*new_options, name='hail_status')
tmp_type = sa.Enum(*new_options, name='_status')
tcr = sa.sql.table('hail',
sa.Column('status', new_type, nullable=False))
def upgrade():
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN status TYPE _status'
' USING status::text::_status');
old_type.drop(op.get_bind(), checkfirst=False)
new_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN status TYPE hail_status'
' USING status::text::hail_status');
tmp_type.drop(op.get_bind(), checkfirst=False)
def downgrade():
op.execute(hail.update().where(hail.c.status=='failure')
.values(status='outdated_taxi'))
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN status TYPE hail__status'
' USING status::text::hail__status');
new_type.drop(op.get_bind(), checkfirst=False)
old_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN status TYPE hail_status'
' USING status::text::hail_status');
tmp_type.drop(op.get_bind(), checkfirst=False)
| {
"repo_name": "openmaraude/APITaxi_models",
"path": "APITaxi_models2/migrations/versions/20150425_18:12:55_411fcaee167b_add_hail_endpoint_to_user.py.py",
"copies": "2",
"size": "1934",
"license": "mit",
"hash": -1789294889502129400,
"line_mean": 35.4905660377,
"line_max": 71,
"alpha_frac": 0.6726990693,
"autogenerated": false,
"ratio": 3.186161449752883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823369372921944,
"avg_score": 0.007098229226187752,
"num_lines": 53
} |
"""Add handshake support and logging."""
import atexit, os, subprocess, zmq
from client_1 import MAYAEXE, kill
from client_2 import SETCMD
from client_6 import sendrecv
COMMAND = ('python("import mayaserver.server;'
'mayaserver.server.runserver(%s)");') #(1)
ORIGCOMMAND = COMMAND
COMMAND = SETCMD('_handshake', COMMAND)
def start_process():
handshakesock = zmq.Context().socket(zmq.REP) #(2)
handshakeport = handshakesock.bind_to_random_port(
'tcp://127.0.0.1')
command = COMMAND % handshakeport #(3)
process = subprocess.Popen(
[MAYAEXE, '-command', command]) #(4)
atexit.register(kill, process)
appport = int(handshakesock.recv()) #(5)
handshakesock.send('')
handshakesock.close() #(6)
return appport #(7)
def create_client(port): #(8)
socket = zmq.Context().socket(zmq.REQ)
socket.connect('tcp://127.0.0.1:%s' % port)
return socket
if __name__ == '__main__':
def start_and_get_pid():
appport = start_process()
sock = create_client(appport)
sendrecv(sock, ('exec', 'import os'))
return sendrecv(sock, ('eval', 'os.getpid()'))
srv1Pid = start_and_get_pid()
srv2Pid = start_and_get_pid()
print 'Client proc %s started Maya procs: %s, %s' % (
os.getpid(), srv1Pid, srv2Pid)
| {
"repo_name": "rgalanakis/practicalmayapython",
"path": "src/chapter6/mayaserver/client_7.py",
"copies": "1",
"size": "1312",
"license": "mit",
"hash": 7914667642525311000,
"line_mean": 30.2380952381,
"line_max": 57,
"alpha_frac": 0.6295731707,
"autogenerated": false,
"ratio": 3.2235872235872236,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4353160394287224,
"avg_score": null,
"num_lines": null
} |
"""Add has function and dependent pgcrpyto
Revision ID: 5552dfae2cb0
Revises: c225ea8fbf5e
Create Date: 2019-09-14 06:19:36.520447
"""
# revision identifiers, used by Alembic.
revision = '5552dfae2cb0'
down_revision = 'c225ea8fbf5e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
# We use a UUID column because it's a performant in-table 8-byte storage mechanism with nice printing facilities.
# SHA-1 has a 160 bit output, so we need to truncate the input
SQL_FUNC = '''
CREATE OR REPLACE FUNCTION sha_row_hash(text) returns uuid AS $$
SELECT substring(encode(digest($1, 'sha1'), 'hex') from 0 for 33)::uuid;
$$ LANGUAGE SQL STRICT IMMUTABLE;
'''
def upgrade():
op.execute("""CREATE EXTENSION IF NOT EXISTS pgcrypto""")
op.execute(SQL_FUNC)
def downgrade():
op.execute("""DROP FUNCTION sha1;""")
pass
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2019-09-14_5552dfae2cb0_add_hash_function_and_dependent_pgcrpyto.py",
"copies": "1",
"size": "1402",
"license": "bsd-3-clause",
"hash": 4563255934391378000,
"line_mean": 27.612244898,
"line_max": 113,
"alpha_frac": 0.7703281027,
"autogenerated": false,
"ratio": 3.444717444717445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47150455474174446,
"avg_score": null,
"num_lines": null
} |
"""Add hash and parent hash columns
Revision ID: c225ea8fbf5e
Revises: ea8987f915b8
Create Date: 2019-09-08 16:33:03.743328
"""
# revision identifiers, used by Alembic.
revision = 'c225ea8fbf5e'
down_revision = 'ea8987f915b8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
op.execute("SET statement_timeout TO 144000000;")
# ### commands auto generated by Alembic - please adjust! ###
print("Adding to rss_parser_feed_name_lut 1")
op.add_column('rss_parser_feed_name_lut_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to rss_parser_feed_name_lut 2")
op.add_column('rss_parser_feed_name_lut_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to rss_parser_feed_name_lut (foreign key)")
op.create_foreign_key(None, 'rss_parser_feed_name_lut_version', 'rss_parser_feed_name_lut_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on rss_parser_feed_name_lut")
op.drop_column('rss_parser_feed_name_lut_version', 'is_delta')
print("Adding to rss_parser_funcs 1")
op.add_column('rss_parser_funcs_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to rss_parser_funcs 2")
op.add_column('rss_parser_funcs_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to rss_parser_funcs (foreign key)")
op.create_foreign_key(None, 'rss_parser_funcs_version', 'rss_parser_funcs_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on rss_parser_funcs")
op.drop_column('rss_parser_funcs_version', 'is_delta')
print("Adding to web_pages 1")
op.add_column('web_pages_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to web_pages 2")
op.add_column('web_pages_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to web_pages (foreign key)")
op.create_foreign_key(None, 'web_pages_version', 'web_pages_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on web_pages")
op.drop_column('web_pages_version', 'is_delta')
print("Adding to raw_web_pages 1")
op.add_column('raw_web_pages_version', sa.Column('data_hash', postgresql.UUID(), nullable=True, unique=True))
print("Adding to raw_web_pages 2")
op.add_column('raw_web_pages_version', sa.Column('parent_hash', postgresql.UUID(), nullable=True))
print("Adding to raw_web_pages (foreign key)")
op.create_foreign_key(None, 'raw_web_pages_version', 'raw_web_pages_version', ['parent_hash'], ['data_hash'])
print("Dropping is_delta column on raw_web_pages")
op.drop_column('raw_web_pages_version', 'is_delta')
print("Done!")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('web_pages_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'web_pages_version', type_='foreignkey')
op.drop_column('web_pages_version', 'parent_hash')
op.drop_column('web_pages_version', 'data_hash')
op.add_column('rss_parser_funcs_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'rss_parser_funcs_version', type_='foreignkey')
op.drop_column('rss_parser_funcs_version', 'parent_hash')
op.drop_column('rss_parser_funcs_version', 'data_hash')
op.add_column('rss_parser_feed_name_lut_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'rss_parser_feed_name_lut_version', type_='foreignkey')
op.drop_column('rss_parser_feed_name_lut_version', 'parent_hash')
op.drop_column('rss_parser_feed_name_lut_version', 'data_hash')
op.add_column('raw_web_pages_version', sa.Column('is_delta', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'raw_web_pages_version', type_='foreignkey')
op.drop_column('raw_web_pages_version', 'parent_hash')
op.drop_column('raw_web_pages_version', 'data_hash')
# ### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2019-09-08_c225ea8fbf5e_add_hash_and_parent_hash_columns.py",
"copies": "1",
"size": "4843",
"license": "bsd-3-clause",
"hash": -8766424176848119000,
"line_mean": 49.9789473684,
"line_max": 135,
"alpha_frac": 0.7125748503,
"autogenerated": false,
"ratio": 3.321673525377229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4534248375677229,
"avg_score": null,
"num_lines": null
} |
# AddHeaderForEdgeR/AddHeaderForEdgeR.py - a self annotated version of rgToolFactory.py generated by running rgToolFactory.py
# to make a new Galaxy tool called AddHeaderForEdgeR
# User mika.yoshimura@riken.jp at 30/01/2015 16:38:14
# rgToolFactory.py
# see https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# copyright ross lazarus (ross stop lazarus at gmail stop com) May 2012
#
# all rights reserved
# Licensed under the LGPL
# suggestions for improvement and bug fixes welcome at https://bitbucket.org/fubar/galaxytoolfactory/wiki/Home
#
# August 2014
# merged John Chilton's citation addition and ideas from Marius van den Beek to enable arbitrary
# data types for input and output - thanks!
#
# march 2014
# had to remove dependencies because cross toolshed dependencies are not possible - can't pre-specify a toolshed url for graphicsmagick and ghostscript
# grrrrr - night before a demo
# added dependencies to a tool_dependencies.xml if html page generated so generated tool is properly portable
#
# added ghostscript and graphicsmagick as dependencies
# fixed a wierd problem where gs was trying to use the new_files_path from universe (database/tmp) as ./database/tmp
# errors ensued
#
# august 2013
# found a problem with GS if $TMP or $TEMP missing - now inject /tmp and warn
#
# july 2013
# added ability to combine images and individual log files into html output
# just make sure there's a log file foo.log and it will be output
# together with all images named like "foo_*.pdf
# otherwise old format for html
#
# January 2013
# problem pointed out by Carlos Borroto
# added escaping for <>$ - thought I did that ages ago...
#
# August 11 2012
# changed to use shell=False and cl as a sequence
# This is a Galaxy tool factory for simple scripts in python, R or whatever ails ye.
# It also serves as the wrapper for the new tool.
#
# you paste and run your script
# Only works for simple scripts that read one input from the history.
# Optionally can write one new history dataset,
# and optionally collect any number of outputs into links on an autogenerated HTML page.
# DO NOT install on a public or important site - please.
# installed generated tools are fine if the script is safe.
# They just run normally and their user cannot do anything unusually insecure
# but please, practice safe toolshed.
# Read the fucking code before you install any tool
# especially this one
# After you get the script working on some test data, you can
# optionally generate a toolshed compatible gzip file
# containing your script safely wrapped as an ordinary Galaxy script in your local toolshed for
# safe and largely automated installation in a production Galaxy.
# If you opt for an HTML output, you get all the script outputs arranged
# as a single Html history item - all output files are linked, thumbnails for all the pdfs.
# Ugly but really inexpensive.
#
# Patches appreciated please.
#
#
# long route to June 2012 product
# Behold the awesome power of Galaxy and the toolshed with the tool factory to bind them
# derived from an integrated script model
# called rgBaseScriptWrapper.py
# Note to the unwary:
# This tool allows arbitrary scripting on your Galaxy as the Galaxy user
# There is nothing stopping a malicious user doing whatever they choose
# Extremely dangerous!!
# Totally insecure. So, trusted users only
#
# preferred model is a developer using their throw away workstation instance - ie a private site.
# no real risk. The universe_wsgi.ini admin_users string is checked - only admin users are permitted to run this tool.
#
import sys
import shutil
import subprocess
import os
import time
import tempfile
import optparse
import tarfile
import re
import shutil
import math
progname = os.path.split(sys.argv[0])[1]
myversion = 'V001.1 March 2014'
verbose = False
debug = False
toolFactoryURL = 'https://bitbucket.org/fubar/galaxytoolfactory'
# if we do html we need these dependencies specified in a tool_dependencies.xml file and referred to in the generated
# tool xml
toolhtmldepskel = """<?xml version="1.0"?>
<tool_dependency>
<package name="ghostscript" version="9.10">
<repository name="package_ghostscript_9_10" owner="devteam" prior_installation_required="True" />
</package>
<package name="graphicsmagick" version="1.3.18">
<repository name="package_graphicsmagick_1_3" owner="iuc" prior_installation_required="True" />
</package>
<readme>
%s
</readme>
</tool_dependency>
"""
protorequirements = """<requirements>
<requirement type="package" version="9.10">ghostscript</requirement>
<requirement type="package" version="1.3.18">graphicsmagick</requirement>
</requirements>"""
def timenow():
"""return current time as a string
"""
return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
"$": "\$"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def parse_citations(citations_text):
"""
"""
citations = [c for c in citations_text.split("**ENTRY**") if c.strip()]
citation_tuples = []
for citation in citations:
if citation.startswith("doi"):
citation_tuples.append( ("doi", citation[len("doi"):].strip() ) )
else:
citation_tuples.append( ("bibtex", citation[len("bibtex"):].strip() ) )
return citation_tuples
class ScriptRunner:
"""class is a wrapper for an arbitrary script
"""
def __init__(self,opts=None,treatbashSpecial=True):
"""
cleanup inputs, setup some outputs
"""
self.useGM = cmd_exists('gm')
self.useIM = cmd_exists('convert')
self.useGS = cmd_exists('gs')
self.temp_warned = False # we want only one warning if $TMP not set
self.treatbashSpecial = treatbashSpecial
if opts.output_dir: # simplify for the tool tarball
os.chdir(opts.output_dir)
self.thumbformat = 'png'
self.opts = opts
self.toolname = re.sub('[^a-zA-Z0-9_]+', '', opts.tool_name) # a sanitizer now does this but..
self.toolid = self.toolname
self.myname = sys.argv[0] # get our name because we write ourselves out as a tool later
self.pyfile = self.myname # crude but efficient - the cruft won't hurt much
self.xmlfile = '%s.xml' % self.toolname
s = open(self.opts.script_path,'r').readlines()
s = [x.rstrip() for x in s] # remove pesky dos line endings if needed
self.script = '\n'.join(s)
fhandle,self.sfile = tempfile.mkstemp(prefix=self.toolname,suffix=".%s" % (opts.interpreter))
tscript = open(self.sfile,'w') # use self.sfile as script source for Popen
tscript.write(self.script)
tscript.close()
self.indentedScript = '\n'.join([' %s' % html_escape(x) for x in s]) # for restructured text in help
self.escapedScript = '\n'.join([html_escape(x) for x in s])
self.elog = os.path.join(self.opts.output_dir,"%s_error.log" % self.toolname)
if opts.output_dir: # may not want these complexities
self.tlog = os.path.join(self.opts.output_dir,"%s_runner.log" % self.toolname)
art = '%s.%s' % (self.toolname,opts.interpreter)
artpath = os.path.join(self.opts.output_dir,art) # need full path
artifact = open(artpath,'w') # use self.sfile as script source for Popen
artifact.write(self.script)
artifact.close()
self.cl = []
self.html = []
a = self.cl.append
a(opts.interpreter)
if self.treatbashSpecial and opts.interpreter in ['bash','sh']:
a(self.sfile)
else:
a('-') # stdin
a(opts.input_tab)
a(opts.input_int)
a(opts.output_tab)
self.outputFormat = self.opts.output_format
self.inputFormats = self.opts.input_formats
self.test1Input = '%s_test1_input.xls' % self.toolname
self.test1Output = '%s_test1_output.xls' % self.toolname
self.test1HTML = '%s_test1_output.html' % self.toolname
def makeXML(self):
"""
Create a Galaxy xml tool wrapper for the new script as a string to write out
fixme - use templating or something less fugly than this example of what we produce
<tool id="reverse" name="reverse" version="0.01">
<description>a tabular file</description>
<command interpreter="python">
reverse.py --script_path "$runMe" --interpreter "python"
--tool_name "reverse" --input_tab "$input1" --output_tab "$tab_file"
</command>
<inputs>
<param name="input1" type="data" format="tabular" label="Select a suitable input file from your history"/><param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="reverse"/>
</inputs>
<outputs>
<data format="tabular" name="tab_file" label="${job_name}"/>
</outputs>
<help>
**What it Does**
Reverse the columns in a tabular file
</help>
<configfiles>
<configfile name="runMe">
# reverse order of columns in a tabular file
import sys
inp = sys.argv[1]
outp = sys.argv[2]
i = open(inp,'r')
o = open(outp,'w')
for row in i:
rs = row.rstrip().split('\t')
rs.reverse()
o.write('\t'.join(rs))
o.write('\n')
i.close()
o.close()
</configfile>
</configfiles>
</tool>
"""
newXML="""<tool id="%(toolid)s" name="%(toolname)s" version="%(tool_version)s">
%(tooldesc)s
%(requirements)s
<command interpreter="python">
%(command)s
</command>
<inputs>
%(inputs)s
</inputs>
<outputs>
%(outputs)s
</outputs>
<configfiles>
<configfile name="runMe">
%(script)s
</configfile>
</configfiles>
%(tooltests)s
<help>
%(help)s
</help>
<citations>
%(citations)s
<citation type="doi">10.1093/bioinformatics/bts573</citation>
</citations>
</tool>""" # needs a dict with toolname, toolid, interpreter, scriptname, command, inputs as a multi line string ready to write, outputs ditto, help ditto
newCommand="""
%(toolname)s.py --script_path "$runMe" --interpreter "%(interpreter)s"
--tool_name "%(toolname)s" %(command_inputs)s %(command_outputs)s """
# may NOT be an input or htmlout - appended later
tooltestsTabOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s"/>
</test>
</tests>
"""
tooltestsHTMLOnly = """
<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="5"/>
</test>
</tests>
"""
tooltestsBoth = """<tests>
<test>
<param name="input1" value="%(test1Input)s" ftype="%(inputFormats)s"/>
<param name="job_name" value="test1"/>
<param name="runMe" value="$runMe"/>
<output name="tab_file" file="%(test1Output)s" ftype="%(outputFormat)s" />
<output name="html_file" file="%(test1HTML)s" ftype="html" lines_diff="10"/>
</test>
</tests>
"""
xdict = {}
xdict['outputFormat'] = self.outputFormat
xdict['inputFormats'] = self.inputFormats
xdict['requirements'] = ''
if self.opts.make_HTML:
if self.opts.include_dependencies == "yes":
xdict['requirements'] = protorequirements
xdict['tool_version'] = self.opts.tool_version
xdict['test1Input'] = self.test1Input
xdict['test1HTML'] = self.test1HTML
xdict['test1Output'] = self.test1Output
if self.opts.make_HTML and self.opts.output_tab <> 'None':
xdict['tooltests'] = tooltestsBoth % xdict
elif self.opts.make_HTML:
xdict['tooltests'] = tooltestsHTMLOnly % xdict
else:
xdict['tooltests'] = tooltestsTabOnly % xdict
xdict['script'] = self.escapedScript
# configfile is least painful way to embed script to avoid external dependencies
# but requires escaping of <, > and $ to avoid Mako parsing
if self.opts.help_text:
helptext = open(self.opts.help_text,'r').readlines()
helptext = [html_escape(x) for x in helptext] # must html escape here too - thanks to Marius van den Beek
xdict['help'] = ''.join([x for x in helptext])
else:
xdict['help'] = 'Please ask the tool author (%s) for help as none was supplied at tool generation\n' % (self.opts.user_email)
if self.opts.citations:
citationstext = open(self.opts.citations,'r').read()
citation_tuples = parse_citations(citationstext)
citations_xml = ""
for citation_type, citation_content in citation_tuples:
citation_xml = """<citation type="%s">%s</citation>""" % (citation_type, html_escape(citation_content))
citations_xml += citation_xml
xdict['citations'] = citations_xml
else:
xdict['citations'] = ""
coda = ['**Script**','Pressing execute will run the following code over your input file and generate some outputs in your history::']
coda.append('\n')
coda.append(self.indentedScript)
coda.append('\n**Attribution**\nThis Galaxy tool was created by %s at %s\nusing the Galaxy Tool Factory.\n' % (self.opts.user_email,timenow()))
coda.append('See %s for details of that project' % (toolFactoryURL))
coda.append('Please cite: Creating re-usable tools from scripts: The Galaxy Tool Factory. Ross Lazarus; Antony Kaspi; Mark Ziemann; The Galaxy Team. ')
coda.append('Bioinformatics 2012; doi: 10.1093/bioinformatics/bts573\n')
xdict['help'] = '%s\n%s' % (xdict['help'],'\n'.join(coda))
if self.opts.tool_desc:
xdict['tooldesc'] = '<description>%s</description>' % self.opts.tool_desc
else:
xdict['tooldesc'] = ''
xdict['command_outputs'] = ''
xdict['outputs'] = ''
if self.opts.input_tab <> 'None':
xdict['command_inputs'] = '--input_tab "$input1" ' # the space may matter a lot if we append something
xdict['inputs'] = '<param name="input1" type="data" format="%s" label="Select a suitable input file from your history"/> \n' % self.inputFormats
else:
xdict['command_inputs'] = '' # assume no input - eg a random data generator
xdict['inputs'] = ''
xdict['inputs'] += '<param name="job_name" type="text" label="Supply a name for the outputs to remind you what they contain" value="%s"/> \n' % self.toolname
xdict['toolname'] = self.toolname
xdict['toolid'] = self.toolid
xdict['interpreter'] = self.opts.interpreter
xdict['scriptname'] = self.sfile
if self.opts.make_HTML:
xdict['command_outputs'] += ' --output_dir "$html_file.files_path" --output_html "$html_file" --make_HTML "yes"'
xdict['outputs'] += ' <data format="html" name="html_file" label="${job_name}.html"/>\n'
else:
xdict['command_outputs'] += ' --output_dir "./"'
if self.opts.output_tab <> 'None':
xdict['command_outputs'] += ' --output_tab "$tab_file"'
xdict['outputs'] += ' <data format="%s" name="tab_file" label="${job_name}"/>\n' % self.outputFormat
xdict['command'] = newCommand % xdict
xmls = newXML % xdict
xf = open(self.xmlfile,'w')
xf.write(xmls)
xf.write('\n')
xf.close()
# ready for the tarball
def makeTooltar(self):
"""
a tool is a gz tarball with eg
/toolname/tool.xml /toolname/tool.py /toolname/test-data/test1_in.foo ...
"""
retval = self.run()
if retval:
print >> sys.stderr,'## Run failed. Cannot build yet. Please fix and retry'
sys.exit(1)
tdir = self.toolname
os.mkdir(tdir)
self.makeXML()
if self.opts.make_HTML:
if self.opts.help_text:
hlp = open(self.opts.help_text,'r').read()
else:
hlp = 'Please ask the tool author for help as none was supplied at tool generation\n'
if self.opts.include_dependencies:
tooldepcontent = toolhtmldepskel % hlp
depf = open(os.path.join(tdir,'tool_dependencies.xml'),'w')
depf.write(tooldepcontent)
depf.write('\n')
depf.close()
if self.opts.input_tab <> 'None': # no reproducible test otherwise? TODO: maybe..
testdir = os.path.join(tdir,'test-data')
os.mkdir(testdir) # make tests directory
shutil.copyfile(self.opts.input_tab,os.path.join(testdir,self.test1Input))
if self.opts.output_tab <> 'None':
shutil.copyfile(self.opts.output_tab,os.path.join(testdir,self.test1Output))
if self.opts.make_HTML:
shutil.copyfile(self.opts.output_html,os.path.join(testdir,self.test1HTML))
if self.opts.output_dir:
shutil.copyfile(self.tlog,os.path.join(testdir,'test1_out.log'))
outpif = '%s.py' % self.toolname # new name
outpiname = os.path.join(tdir,outpif) # path for the tool tarball
pyin = os.path.basename(self.pyfile) # our name - we rewrite ourselves (TM)
notes = ['# %s - a self annotated version of %s generated by running %s\n' % (outpiname,pyin,pyin),]
notes.append('# to make a new Galaxy tool called %s\n' % self.toolname)
notes.append('# User %s at %s\n' % (self.opts.user_email,timenow()))
pi = open(self.pyfile,'r').readlines() # our code becomes new tool wrapper (!) - first Galaxy worm
notes += pi
outpi = open(outpiname,'w')
outpi.write(''.join(notes))
outpi.write('\n')
outpi.close()
stname = os.path.join(tdir,self.sfile)
if not os.path.exists(stname):
shutil.copyfile(self.sfile, stname)
xtname = os.path.join(tdir,self.xmlfile)
if not os.path.exists(xtname):
shutil.copyfile(self.xmlfile,xtname)
tarpath = "%s.gz" % self.toolname
tar = tarfile.open(tarpath, "w:gz")
tar.add(tdir,arcname=self.toolname)
tar.close()
shutil.copyfile(tarpath,self.opts.new_tool)
shutil.rmtree(tdir)
## TODO: replace with optional direct upload to local toolshed?
return retval
def compressPDF(self,inpdf=None,thumbformat='png'):
"""need absolute path to pdf
note that GS gets confoozled if no $TMP or $TEMP
so we set it
"""
assert os.path.isfile(inpdf), "## Input %s supplied to %s compressPDF not found" % (inpdf,self.myName)
hlog = os.path.join(self.opts.output_dir,"compress_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'a')
our_env = os.environ.copy()
our_tmp = our_env.get('TMP',None)
if not our_tmp:
our_tmp = our_env.get('TEMP',None)
if not (our_tmp and os.path.exists(our_tmp)):
newtmp = os.path.join(self.opts.output_dir,'tmp')
try:
os.mkdir(newtmp)
except:
sto.write('## WARNING - cannot make %s - it may exist or permissions need fixing\n' % newtmp)
our_env['TEMP'] = newtmp
if not self.temp_warned:
sto.write('## WARNING - no $TMP or $TEMP!!! Please fix - using %s temporarily\n' % newtmp)
self.temp_warned = True
outpdf = '%s_compressed' % inpdf
cl = ["gs", "-sDEVICE=pdfwrite", "-dNOPAUSE", "-dUseCIEColor", "-dBATCH","-dPDFSETTINGS=/printer", "-sOutputFile=%s" % outpdf,inpdf]
x = subprocess.Popen(cl,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval1 = x.wait()
sto.close()
if retval1 == 0:
os.unlink(inpdf)
shutil.move(outpdf,inpdf)
os.unlink(hlog)
hlog = os.path.join(self.opts.output_dir,"thumbnail_%s.txt" % os.path.basename(inpdf))
sto = open(hlog,'w')
outpng = '%s.%s' % (os.path.splitext(inpdf)[0],thumbformat)
if self.useGM:
cl2 = ['gm', 'convert', inpdf, outpng]
else: # assume imagemagick
cl2 = ['convert', inpdf, outpng]
x = subprocess.Popen(cl2,stdout=sto,stderr=sto,cwd=self.opts.output_dir,env=our_env)
retval2 = x.wait()
sto.close()
if retval2 == 0:
os.unlink(hlog)
retval = retval1 or retval2
return retval
def getfSize(self,fpath,outpath):
"""
format a nice file size string
"""
size = ''
fp = os.path.join(outpath,fpath)
if os.path.isfile(fp):
size = '0 B'
n = float(os.path.getsize(fp))
if n > 2**20:
size = '%1.1f MB' % (n/2**20)
elif n > 2**10:
size = '%1.1f KB' % (n/2**10)
elif n > 0:
size = '%d B' % (int(n))
return size
def makeHtml(self):
""" Create an HTML file content to list all the artifacts found in the output_dir
"""
galhtmlprefix = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
<title></title>
<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
</head>
<body>
<div class="toolFormBody">
"""
galhtmlattr = """<hr/><div class="infomessage">This tool (%s) was generated by the <a href="https://bitbucket.org/fubar/galaxytoolfactory/overview">Galaxy Tool Factory</a></div><br/>"""
galhtmlpostfix = """</div></body></html>\n"""
flist = os.listdir(self.opts.output_dir)
flist = [x for x in flist if x <> 'Rplots.pdf']
flist.sort()
html = []
html.append(galhtmlprefix % progname)
html.append('<div class="infomessage">Galaxy Tool "%s" run at %s</div><br/>' % (self.toolname,timenow()))
fhtml = []
if len(flist) > 0:
logfiles = [x for x in flist if x.lower().endswith('.log')] # log file names determine sections
logfiles.sort()
logfiles = [x for x in logfiles if os.path.abspath(x) <> os.path.abspath(self.tlog)]
logfiles.append(os.path.abspath(self.tlog)) # make it the last one
pdflist = []
npdf = len([x for x in flist if os.path.splitext(x)[-1].lower() == '.pdf'])
for rownum,fname in enumerate(flist):
dname,e = os.path.splitext(fname)
sfsize = self.getfSize(fname,self.opts.output_dir)
if e.lower() == '.pdf' : # compress and make a thumbnail
thumb = '%s.%s' % (dname,self.thumbformat)
pdff = os.path.join(self.opts.output_dir,fname)
retval = self.compressPDF(inpdf=pdff,thumbformat=self.thumbformat)
if retval == 0:
pdflist.append((fname,thumb))
else:
pdflist.append((fname,fname))
if (rownum+1) % 2 == 0:
fhtml.append('<tr class="odd_row"><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
else:
fhtml.append('<tr><td><a href="%s">%s</a></td><td>%s</td></tr>' % (fname,fname,sfsize))
for logfname in logfiles: # expect at least tlog - if more
if os.path.abspath(logfname) == os.path.abspath(self.tlog): # handled later
sectionname = 'All tool run'
if (len(logfiles) > 1):
sectionname = 'Other'
ourpdfs = pdflist
else:
realname = os.path.basename(logfname)
sectionname = os.path.splitext(realname)[0].split('_')[0] # break in case _ added to log
ourpdfs = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] == sectionname]
pdflist = [x for x in pdflist if os.path.basename(x[0]).split('_')[0] <> sectionname] # remove
nacross = 1
npdf = len(ourpdfs)
if npdf > 0:
nacross = math.sqrt(npdf) ## int(round(math.log(npdf,2)))
if int(nacross)**2 != npdf:
nacross += 1
nacross = int(nacross)
width = min(400,int(1200/nacross))
html.append('<div class="toolFormTitle">%s images and outputs</div>' % sectionname)
html.append('(Click on a thumbnail image to download the corresponding original PDF image)<br/>')
ntogo = nacross # counter for table row padding with empty cells
html.append('<div><table class="simple" cellpadding="2" cellspacing="2">\n<tr>')
for i,paths in enumerate(ourpdfs):
fname,thumb = paths
s= """<td><a href="%s"><img src="%s" title="Click to download a PDF of %s" hspace="5" width="%d"
alt="Image called %s"/></a></td>\n""" % (fname,thumb,fname,width,fname)
if ((i+1) % nacross == 0):
s += '</tr>\n'
ntogo = 0
if i < (npdf - 1): # more to come
s += '<tr>'
ntogo = nacross
else:
ntogo -= 1
html.append(s)
if html[-1].strip().endswith('</tr>'):
html.append('</table></div>\n')
else:
if ntogo > 0: # pad
html.append('<td> </td>'*ntogo)
html.append('</tr></table></div>\n')
logt = open(logfname,'r').readlines()
logtext = [x for x in logt if x.strip() > '']
html.append('<div class="toolFormTitle">%s log output</div>' % sectionname)
if len(logtext) > 1:
html.append('\n<pre>\n')
html += logtext
html.append('\n</pre>\n')
else:
html.append('%s is empty<br/>' % logfname)
if len(fhtml) > 0:
fhtml.insert(0,'<div><table class="colored" cellpadding="3" cellspacing="3"><tr><th>Output File Name (click to view)</th><th>Size</th></tr>\n')
fhtml.append('</table></div><br/>')
html.append('<div class="toolFormTitle">All output files available for downloading</div>\n')
html += fhtml # add all non-pdf files to the end of the display
else:
html.append('<div class="warningmessagelarge">### Error - %s returned no files - please confirm that parameters are sane</div>' % self.opts.interpreter)
html.append(galhtmlpostfix)
htmlf = file(self.opts.output_html,'w')
htmlf.write('\n'.join(html))
htmlf.write('\n')
htmlf.close()
self.html = html
def run(self):
"""
scripts must be small enough not to fill the pipe!
"""
if self.treatbashSpecial and self.opts.interpreter in ['bash','sh']:
retval = self.runBash()
else:
if self.opts.output_dir:
ste = open(self.elog,'w')
sto = open(self.tlog,'w')
sto.write('## Toolfactory generated command line = %s\n' % ' '.join(self.cl))
sto.flush()
#p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=ste,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
p = subprocess.Popen(self.cl,shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False,stdin=subprocess.PIPE)
p.stdin.write(self.script)
stdout_data, stderr_data = p.communicate()
p.stdin.close()
retval = p.returncode
#retval = p.wait()
if self.opts.output_dir:
sto.close()
ste.close()
err = stderr_data
#err = open(self.elog,'r').readlines()
print >> sys.stdout,stdout_data
if retval <> 0 and err: # problem
print >> sys.stderr,err
if self.opts.make_HTML:
self.makeHtml()
return retval
def runBash(self):
"""
cannot use - for bash so use self.sfile
"""
if self.opts.output_dir:
s = '## Toolfactory generated command line = %s\n' % ' '.join(self.cl)
sto = open(self.tlog,'w')
sto.write(s)
sto.flush()
p = subprocess.Popen(self.cl,shell=False,stdout=sto,stderr=sto,cwd=self.opts.output_dir)
else:
p = subprocess.Popen(self.cl,shell=False)
retval = p.wait()
if self.opts.output_dir:
sto.close()
if self.opts.make_HTML:
self.makeHtml()
return retval
def main():
u = """
This is a Galaxy wrapper. It expects to be called by a special purpose tool.xml as:
<command interpreter="python">rgBaseScriptWrapper.py --script_path "$scriptPath" --tool_name "foo" --interpreter "Rscript"
</command>
"""
op = optparse.OptionParser()
a = op.add_option
a('--script_path',default=None)
a('--tool_name',default=None)
a('--interpreter',default=None)
a('--output_dir',default='./')
a('--output_html',default=None)
a('--input_tab',default="None")
a('--input_int',default="None")
a('--input_formats',default="tabular,text")
a('--output_tab',default="None")
a('--output_format',default="tabular")
a('--user_email',default='Unknown')
a('--bad_user',default=None)
a('--make_Tool',default=None)
a('--make_HTML',default=None)
a('--help_text',default=None)
a('--citations',default=None)
a('--tool_desc',default=None)
a('--new_tool',default=None)
a('--tool_version',default=None)
a('--include_dependencies',default=None)
opts, args = op.parse_args()
assert not opts.bad_user,'UNAUTHORISED: %s is NOT authorized to use this tool until Galaxy admin adds %s to admin_users in universe_wsgi.ini' % (opts.bad_user,opts.bad_user)
assert opts.tool_name,'## Tool Factory expects a tool name - eg --tool_name=DESeq'
assert opts.interpreter,'## Tool Factory wrapper expects an interpreter - eg --interpreter=Rscript'
assert os.path.isfile(opts.script_path),'## Tool Factory wrapper expects a script path - eg --script_path=foo.R'
if opts.output_dir:
try:
os.makedirs(opts.output_dir)
except:
pass
r = ScriptRunner(opts)
if opts.make_Tool:
retcode = r.makeTooltar()
else:
retcode = r.run()
os.unlink(r.sfile)
if retcode:
sys.exit(retcode) # indicate failure to job runner
if __name__ == "__main__":
main()
| {
"repo_name": "myoshimura080822/tools_of_rnaseq_on_docker_galaxy",
"path": "AddGroupIdForDEGAnalysis/AddGroupIdForDEGAnalysis.py",
"copies": "1",
"size": "32429",
"license": "mit",
"hash": 4437572277015331300,
"line_mean": 42.2963951936,
"line_max": 241,
"alpha_frac": 0.584261001,
"autogenerated": false,
"ratio": 3.618904140162928,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635919705215444,
"avg_score": 0.0134490871894967,
"num_lines": 749
} |
"""addHeadersDuplicated
Revision ID: 608afa719fb8
Revises: cdb714f6f374
Create Date: 2016-03-23 10:13:52.614000
"""
# revision identifiers, used by Alembic.
revision = '608afa719fb8'
down_revision = 'cdb714f6f374'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.add_column('file_status', sa.Column('headers_duplicated', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('file_status', 'headers_duplicated')
### end Alembic commands ###
def upgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def upgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-core",
"path": "dataactcore/migrations/versions/608afa719fb8_addheadersduplicated.py",
"copies": "1",
"size": "1402",
"license": "cc0-1.0",
"hash": 3371912650279566300,
"line_mean": 20.90625,
"line_max": 91,
"alpha_frac": 0.6661911555,
"autogenerated": false,
"ratio": 3.699208443271768,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9755239894628538,
"avg_score": 0.02203194082864599,
"num_lines": 64
} |
"""Add high comp officer fields to detached_award_procurement table
Revision ID: 3f24399ddd1b
Revises: ad3dd1c0cf20
Create Date: 2019-05-24 09:31:12.678128
"""
# revision identifiers, used by Alembic.
revision = '3f24399ddd1b'
down_revision = 'ad3dd1c0cf20'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_procurement', sa.Column('high_comp_officer1_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer1_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer2_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer2_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer3_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer3_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer4_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer4_full_na', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer5_amount', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('high_comp_officer5_full_na', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('detached_award_procurement', 'high_comp_officer5_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer5_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer4_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer4_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer3_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer3_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer2_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer2_amount')
op.drop_column('detached_award_procurement', 'high_comp_officer1_full_na')
op.drop_column('detached_award_procurement', 'high_comp_officer1_amount')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/3f24399ddd1b_add_high_comp_officer_fields_to_.py",
"copies": "1",
"size": "2692",
"license": "cc0-1.0",
"hash": 147442505260819140,
"line_mean": 45.4137931034,
"line_max": 114,
"alpha_frac": 0.7191679049,
"autogenerated": false,
"ratio": 2.9944382647385983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9109337214934663,
"avg_score": 0.020853790940786914,
"num_lines": 58
} |
"""Add high comp officer fields to published_award_financial_assistance table
Revision ID: ad3dd1c0cf20
Revises: 5f29b283f23e
Create Date: 2019-05-23 08:31:35.225654
"""
# revision identifiers, used by Alembic.
revision = 'ad3dd1c0cf20'
down_revision = '5f29b283f23e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer1_amount', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer1_full_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer2_amount', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer2_full_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer3_amount', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer3_full_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer4_amount', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer4_full_na', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer5_amount', sa.Text(), nullable=True))
op.add_column('published_award_financial_assistance', sa.Column('high_comp_officer5_full_na', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('published_award_financial_assistance', 'high_comp_officer5_full_na')
op.drop_column('published_award_financial_assistance', 'high_comp_officer5_amount')
op.drop_column('published_award_financial_assistance', 'high_comp_officer4_full_na')
op.drop_column('published_award_financial_assistance', 'high_comp_officer4_amount')
op.drop_column('published_award_financial_assistance', 'high_comp_officer3_full_na')
op.drop_column('published_award_financial_assistance', 'high_comp_officer3_amount')
op.drop_column('published_award_financial_assistance', 'high_comp_officer2_full_na')
op.drop_column('published_award_financial_assistance', 'high_comp_officer2_amount')
op.drop_column('published_award_financial_assistance', 'high_comp_officer1_full_na')
op.drop_column('published_award_financial_assistance', 'high_comp_officer1_amount')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/ad3dd1c0cf20_add_high_comp_officer_fields_to_.py",
"copies": "1",
"size": "2902",
"license": "cc0-1.0",
"hash": -5788116176003698000,
"line_mean": 49.0344827586,
"line_max": 124,
"alpha_frac": 0.7322536182,
"autogenerated": false,
"ratio": 3.0041407867494825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4236394404949482,
"avg_score": null,
"num_lines": null
} |
"""add-hikes
Revision ID: f7888bd46c75
Revises: 820bb005f2c5
Create Date: 2017-02-16 07:36:06.108806
"""
# revision identifiers, used by Alembic.
revision = 'f7888bd46c75'
down_revision = 'fc92ba2ffd7f'
from alembic import op
import sqlalchemy as sa
import geoalchemy2
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('hike_destination',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('altitude', sa.Integer(), nullable=True),
sa.Column('high_point_coord', geoalchemy2.types.Geometry(geometry_type='POINT'), nullable=False),
sa.Column('is_summit', sa.Boolean(), server_default='t', nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text(u'now()'), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('hike',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('destination_id', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), server_default=sa.text(u'now()'), nullable=False),
sa.Column('method', sa.String(length=30), nullable=False),
sa.Column('notes', sa.Text(), server_default='', nullable=False),
sa.CheckConstraint(u"method in ('ski', 'foot', 'crampons', 'climb', 'via ferrata')"),
sa.ForeignKeyConstraint(['destination_id'], [u'hike_destination.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('hike')
op.drop_table('hike_destination')
### end Alembic commands ###
| {
"repo_name": "thusoy/blag",
"path": "blag/migrations/versions/f7888bd46c75_add_hikes.py",
"copies": "1",
"size": "1664",
"license": "mit",
"hash": -8943776809270185000,
"line_mean": 35.1739130435,
"line_max": 101,
"alpha_frac": 0.6748798077,
"autogenerated": false,
"ratio": 3.314741035856574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4489620843556574,
"avg_score": null,
"num_lines": null
} |
"""Add history fields
Revision ID: a25a6c551233
Revises: a04da4b32a36
Create Date: 2016-01-29 16:46:54.522253
"""
# revision identifiers, used by Alembic.
revision = 'a25a6c551233'
down_revision = 'a04da4b32a36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.add_column('hail', sa.Column('change_to_accepted_by_customer', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_accepted_by_taxi', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_declined_by_customer', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_declined_by_taxi', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_failure', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_incident_customer', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_incident_taxi', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_received_by_operator', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_received_by_taxi', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_sent_to_operator', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_timeout_customer', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('change_to_timeout_taxi', sa.DateTime(), nullable=True))
def downgrade():
op.drop_column('hail', 'change_to_timeout_taxi')
op.drop_column('hail', 'change_to_timeout_customer')
op.drop_column('hail', 'change_to_sent_to_operator')
op.drop_column('hail', 'change_to_received_by_taxi')
op.drop_column('hail', 'change_to_received_by_operator')
op.drop_column('hail', 'change_to_incident_taxi')
op.drop_column('hail', 'change_to_incident_customer')
op.drop_column('hail', 'change_to_failure')
op.drop_column('hail', 'change_to_declined_by_taxi')
op.drop_column('hail', 'change_to_declined_by_customer')
op.drop_column('hail', 'change_to_accepted_by_taxi')
op.drop_column('hail', 'change_to_accepted_by_customer')
| {
"repo_name": "openmaraude/APITaxi_models",
"path": "APITaxi_models2/migrations/versions/20160129_16:46:54_a25a6c551233_add_history_fields.py.py",
"copies": "3",
"size": "2183",
"license": "mit",
"hash": -5199625217772000000,
"line_mean": 48.6136363636,
"line_max": 100,
"alpha_frac": 0.6999541915,
"autogenerated": false,
"ratio": 2.9262734584450403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006801715323787882,
"num_lines": 44
} |
"""Add history to hail
Revision ID: 7d563dd0c13
Revises: 3b8033532af1
Create Date: 2015-04-22 12:12:16.658415
"""
# revision identifiers, used by Alembic.
revision = '7d563dd0c13'
down_revision = '3b8033532af1'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
sources_enum = sa.Enum('form', 'api', name='sources')
def upgrade():
sources_enum.create(op.get_bind(), checkfirst=True)
op.add_column('customer', sa.Column('added_at', sa.DateTime(), nullable=True))
op.add_column('customer', sa.Column('added_by', sa.Integer(), nullable=True))
op.add_column('customer', sa.Column('added_via', sources_enum
, nullable=True))
op.add_column('customer', sa.Column('last_update_at', sa.DateTime(), nullable=True))
op.add_column('customer', sa.Column('source', sa.String(length=255),
nullable=True))
op.create_foreign_key(None, 'customer', 'user', ['added_by'], ['id'])
op.add_column('hail', sa.Column('added_at', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('added_by', sa.Integer(), nullable=True))
op.add_column('hail', sa.Column('added_via',
sa.Enum('form', 'api', name='sources'), nullable=True))
op.add_column('hail', sa.Column('last_update_at', sa.DateTime(), nullable=True))
op.add_column('hail', sa.Column('source', sa.String(length=255),
nullable=True))
op.create_foreign_key(None, 'hail', 'user', ['added_by'], ['id'])
op.execute("update customer set source = ''")
op.execute("update customer set added_via = 'api'")
op.execute("update hail set source = ''")
op.execute("update hail set added_via = 'api'")
hail_table = sa.sql.table('hail', sa.sql.column('added_via', 'source'))
customer_table = sa.sql.table('customer', sa.sql.column('added_via', 'source'))
op.alter_column('customer', 'added_via', nullable=False)
op.alter_column('customer', 'source', nullable=False)
op.alter_column('hail', 'added_via', nullable=False)
op.alter_column('hail', 'source', nullable=False)
def downgrade():
op.drop_constraint(None, 'hail', type_='foreignkey')
op.drop_column('hail', 'source')
op.drop_column('hail', 'last_update_at')
op.drop_column('hail', 'added_via')
op.drop_column('hail', 'added_by')
op.drop_column('hail', 'added_at')
op.drop_constraint(None, 'customer', type_='foreignkey')
op.drop_column('customer', 'source')
op.drop_column('customer', 'last_update_at')
op.drop_column('customer', 'added_via')
op.drop_column('customer', 'added_by')
op.drop_column('customer', 'added_at')
sources_enum.drop(op.get_bind(), checkfirst=True)
| {
"repo_name": "openmaraude/APITaxi_models",
"path": "APITaxi_models2/migrations/versions/20150422_12:12:16_7d563dd0c13_add_history_to_hail.py.py",
"copies": "4",
"size": "2670",
"license": "mit",
"hash": -72732749841591380,
"line_mean": 43.5,
"line_max": 88,
"alpha_frac": 0.6599250936,
"autogenerated": false,
"ratio": 3.115519253208868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006446458877806205,
"num_lines": 60
} |
"""Add ifttt integration so users can call their own ifttt hooks."""
import json
import re
from json import JSONDecodeError
import CommonMark
from plumeria.command import commands, CommandError
from plumeria.core.user_prefs import prefs_manager
from plumeria.message import Message
from plumeria.perms import direct_only
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
EVENT_NAME_RE = re.compile("^[A-Za-z0-9_\\- ]{1,40}$")
VALID_MAKER_KEY_RE = re.compile("^[A-Za-z0-9_\\- ]{1,70}$")
def valid_maker_key(s):
if not VALID_MAKER_KEY_RE.search(s):
raise ValueError("Invalid ifttt Maker key")
return s
maker_key = prefs_manager.create("ifttt_maker_key", type=valid_maker_key, fallback=None, comment="Your ifttt Maker key",
private=True)
@commands.create("ifttt", ".", category="Utility")
@rate_limit()
@direct_only
async def ifttt_maker(message: Message):
"""
Fire a ifttt Maker event.
You can trigger ifttt recipes using this command. Create recipes first
using the `Maker Channel <https://ifttt.com/maker>`_ and then add your Maker
key using the :code:`/pset ifttt_maker_key your_key` command.
Here's how you could trigger an email event::
/ifttt email
Regarding variables: Value1 refers to the raw input data, Value2 refers to an HTML
version, and Value3 is a Markdown and HTML free version.
"""
parts = message.content.strip().split(" ", 1)
if len(parts) == 1:
event_name, data = parts[0], ""
else:
event_name, data = parts
if not EVENT_NAME_RE.search(event_name):
raise CommandError("Invalid event name. Only alphanumeric, dash, space, and underscore letters are allowed.")
try:
key = await prefs_manager.get(maker_key, message.author)
except KeyError:
raise CommandError("Set your Maker key with /pset ifttt_maker_key your_key first.")
html = CommonMark.commonmark(data)
text = strip_html(html)
r = await http.post("https://maker.ifttt.com/trigger/{}/with/key/{}".format(event_name, key), data=json.dumps({
"value1": data,
"value2": html,
"value3": text,
}), headers=(('Content-Type', 'application/json'),), require_success=False)
try:
response = r.json()
if 'errors' in response:
raise CommandError("ifttt says: " + "\n".join([error['message'] for error in response['errors']]))
except JSONDecodeError:
pass
return r.text()
def setup():
prefs_manager.add(maker_key)
commands.add(ifttt_maker)
| {
"repo_name": "sk89q/Plumeria",
"path": "orchard/ifttt.py",
"copies": "1",
"size": "2645",
"license": "mit",
"hash": 1943384562390999800,
"line_mean": 30.4880952381,
"line_max": 120,
"alpha_frac": 0.6676748582,
"autogenerated": false,
"ratio": 3.5079575596816976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46756324178816977,
"avg_score": null,
"num_lines": null
} |
"""Add ignore_flag column to genomic_manifest_file and feedback
Revision ID: 99fb6b79b5f7
Revises: 50d9eeb498c3
Create Date: 2021-01-04 15:40:41.792027
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99fb6b79b5f7'
down_revision = '50d9eeb498c3'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_manifest_feedback', sa.Column('ignore_flag', sa.SmallInteger(), nullable=False))
op.add_column('genomic_manifest_file', sa.Column('ignore_flag', sa.SmallInteger(), nullable=False))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_manifest_file', 'ignore_flag')
op.drop_column('genomic_manifest_feedback', 'ignore_flag')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/99fb6b79b5f7_add_ignore_flag_column_to_genomic_.py",
"copies": "1",
"size": "1344",
"license": "bsd-3-clause",
"hash": -1411925241348412000,
"line_mean": 25.88,
"line_max": 107,
"alpha_frac": 0.6785714286,
"autogenerated": false,
"ratio": 3.3768844221105527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9453568386323088,
"avg_score": 0.020377492877492877,
"num_lines": 50
} |
""" Add image model table definition and relationship to product
Revision ID: 0051eed6ee5d
Revises: cad65db44e62
Create Date: 2016-07-16 00:57:34.439371
"""
# revision identifiers, used by Alembic.
revision = '0051eed6ee5d'
down_revision = '4d2c8abdff95'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('alt', sa.Unicode(length=256), nullable=True),
sa.Column('path', sa.String(length=128), nullable=False),
sa.Column('public_id', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_image',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('image_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['image_id'], ['image.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('product_images')
op.drop_table('image')
### end Alembic commands ###
| {
"repo_name": "betterlife/flask-psi",
"path": "psi/migrations/versions/26_0051eed6ee5d_.py",
"copies": "2",
"size": "1274",
"license": "mit",
"hash": -534509956291055200,
"line_mean": 30.0731707317,
"line_max": 65,
"alpha_frac": 0.6711145997,
"autogenerated": false,
"ratio": 3.424731182795699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009512911572389803,
"num_lines": 41
} |
"""add indexes
Revision ID: 4328f2c08f05
Revises: eb61567ea005
Create Date: 2019-02-05 19:23:02.744161
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix
# revision identifiers, used by Alembic.
revision = '4328f2c08f05'
down_revision = 'eb61567ea005'
branch_labels = None
depends_on = None
def upgrade():
op.create_index(dbtableprefix + 'ix_attachments_filename', 'attachments', ['filename'])
op.create_index(dbtableprefix + 'ix_attachments_key', 'attachments', ['key'])
op.create_index(dbtableprefix + 'ix_chatlog_filename', 'chatlog', ['filename'])
op.create_index(dbtableprefix + 'ix_chatlog_key', 'chatlog', ['key'])
op.create_index(dbtableprefix + 'ix_machinelearning_key', 'machinelearning', ['key'])
op.create_index(dbtableprefix + 'ix_objectstorage_key', 'objectstorage', ['key'])
op.create_index(dbtableprefix + 'ix_role_name', 'role', ['name'])
op.create_index(dbtableprefix + 'ix_shortener_filename', 'shortener', ['filename'])
op.create_index(dbtableprefix + 'ix_shortener_key', 'shortener', ['key'])
op.create_index(dbtableprefix + 'ix_speaklist_filename', 'speaklist', ['filename'])
op.create_index(dbtableprefix + 'ix_speaklist_key', 'speaklist', ['key'])
op.create_index(dbtableprefix + 'ix_uploads_filename', 'uploads', ['filename'])
op.create_index(dbtableprefix + 'ix_uploads_key', 'uploads', ['key'])
op.create_index(dbtableprefix + 'ix_user_auth_user_id', 'user_auth', ['user_id'])
op.create_index(dbtableprefix + 'ix_user_email', 'user', ['email'])
op.create_index(dbtableprefix + 'ix_user_roles_role_id', 'user_roles', ['role_id'])
op.create_index(dbtableprefix + 'ix_user_roles_user_id', 'user_roles', ['user_id'])
op.create_index(dbtableprefix + 'ix_userdict_filename', 'userdict', ['filename'])
op.create_index(dbtableprefix + 'ix_userdict_key', 'userdict', ['key'])
op.create_index(dbtableprefix + 'ix_userdictkeys_filename', 'userdictkeys', ['filename'])
op.create_index(dbtableprefix + 'ix_userdictkeys_key', 'userdictkeys', ['key'])
op.create_index(dbtableprefix + 'ix_userdictkeys_temp_user_id', 'userdictkeys', ['temp_user_id'])
op.create_index(dbtableprefix + 'ix_userdictkeys_user_id', 'userdictkeys', ['user_id'])
op.create_index(dbtableprefix + 'ix_userdict_key_filename', 'userdict', ['key', 'filename'])
op.create_index(dbtableprefix + 'ix_userdictkeys_key_filename', 'userdictkeys', ['key', 'filename'])
def downgrade():
op.drop_index(dbtableprefix + 'ix_attachments_filename', table_name='attachments')
op.drop_index(dbtableprefix + 'ix_attachments_key', table_name='attachments')
op.drop_index(dbtableprefix + 'ix_chatlog_filename', table_name='chatlog')
op.drop_index(dbtableprefix + 'ix_chatlog_key', table_name='chatlog')
op.drop_index(dbtableprefix + 'ix_machinelearning_key', table_name='machinelearning')
op.drop_index(dbtableprefix + 'ix_objectstorage_key', table_name='objectstorage')
op.drop_index(dbtableprefix + 'ix_role_name', table_name='role')
op.drop_index(dbtableprefix + 'ix_shortener_filename', table_name='shortener')
op.drop_index(dbtableprefix + 'ix_shortener_key', table_name='shortener')
op.drop_index(dbtableprefix + 'ix_speaklist_filename', table_name='speaklist')
op.drop_index(dbtableprefix + 'ix_speaklist_key', table_name='speaklist')
op.drop_index(dbtableprefix + 'ix_uploads_filename', table_name='uploads')
op.drop_index(dbtableprefix + 'ix_uploads_key', table_name='uploads')
op.drop_index(dbtableprefix + 'ix_user_auth_user_id', table_name='user_auth')
op.drop_index(dbtableprefix + 'ix_user_email', table_name='user')
op.drop_index(dbtableprefix + 'ix_user_roles_role_id', table_name='user_roles')
op.drop_index(dbtableprefix + 'ix_user_roles_user_id', table_name='user_roles')
op.drop_index(dbtableprefix + 'ix_userdict_filename', table_name='userdict')
op.drop_index(dbtableprefix + 'ix_userdict_key', table_name='userdict')
op.drop_index(dbtableprefix + 'ix_userdictkeys_filename', table_name='userdictkeys')
op.drop_index(dbtableprefix + 'ix_userdictkeys_key', table_name='userdictkeys')
op.drop_index(dbtableprefix + 'ix_userdictkeys_temp_user_id', table_name='userdictkeys')
op.drop_index(dbtableprefix + 'ix_userdictkeys_user_id', table_name='userdictkeys')
op.drop_index(dbtableprefix + 'ix_userdict_key_filename', table_name='userdict')
op.drop_index(dbtableprefix + 'ix_userdictkeys_key_filename', table_name='userdictkeys')
| {
"repo_name": "jhpyle/docassemble",
"path": "docassemble_webapp/docassemble/webapp/alembic/versions/4328f2c08f05_add_indexes.py",
"copies": "1",
"size": "4573",
"license": "mit",
"hash": 3217402970439389000,
"line_mean": 62.5138888889,
"line_max": 104,
"alpha_frac": 0.7089438006,
"autogenerated": false,
"ratio": 3.0773889636608343,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4286332764260834,
"avg_score": null,
"num_lines": null
} |
"""Add indexes to improve various sql query performance
Revision ID: c107fa0468ff
Revises: 236318ee3d3e
Create Date: 2017-08-22 15:33:13.706301
"""
# revision identifiers, used by Alembic.
revision = 'c107fa0468ff'
down_revision = '236318ee3d3e'
from alembic import op
import sqlalchemy as sa
import doorman.database
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('idx_distributed_query_task_node_id_status', 'distributed_query_task', ['node_id', 'status'], unique=False)
op.create_index(op.f('ix_file_path_tags_file_path.id'), 'file_path_tags', ['file_path.id'], unique=False)
op.create_index(op.f('ix_node_tags_node.id'), 'node_tags', ['node.id'], unique=False)
op.create_index(op.f('ix_pack_tags_pack.id'), 'pack_tags', ['pack.id'], unique=False)
op.create_index(op.f('ix_query_tags_query.id'), 'query_tags', ['query.id'], unique=False)
op.create_index('idx_result_log_node_id_timestamp_desc', 'result_log', ['node_id', sa.text(u'timestamp DESC')], unique=False)
op.create_index('idx_status_log_node_id_created_desc', 'status_log', ['node_id', sa.text(u'created DESC')], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('idx_status_log_node_id_created_desc', table_name='status_log')
op.drop_index('idx_result_log_node_id_timestamp_desc', table_name='result_log')
op.drop_index(op.f('ix_query_tags_query.id'), table_name='query_tags')
op.drop_index(op.f('ix_pack_tags_pack.id'), table_name='pack_tags')
op.drop_index(op.f('ix_node_tags_node.id'), table_name='node_tags')
op.drop_index(op.f('ix_file_path_tags_file_path.id'), table_name='file_path_tags')
op.drop_index('idx_distributed_query_task_node_id_status', table_name='distributed_query_task')
### end Alembic commands ###
| {
"repo_name": "mwielgoszewski/doorman",
"path": "migrations/versions/c107fa0468ff_add_indexes_to_improve_various_sql_.py",
"copies": "1",
"size": "1891",
"license": "mit",
"hash": -6286771938265299000,
"line_mean": 47.4871794872,
"line_max": 129,
"alpha_frac": 0.6901110524,
"autogenerated": false,
"ratio": 2.9227202472952087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4112831299695209,
"avg_score": null,
"num_lines": null
} |
"""add index for geo DB
Revision ID: 84fc7bc201e6
Revises: 2159f60b94ab
Create Date: 2017-08-20 12:42:40.461404
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '84fc7bc201e6'
down_revision = '2159f60b94ab'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('district_index', 'district', ['name'], unique=False)
op.create_index('province_index', 'province', ['name'], unique=False)
op.drop_index('ix_user_fullname', table_name='user')
op.create_index(op.f('ix_user_fullname'), 'user',
['fullname'], unique=True)
op.create_index('ward_index', 'ward', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ward_index', table_name='ward')
op.drop_index(op.f('ix_user_fullname'), table_name='user')
op.create_index('ix_user_fullname', 'user', ['fullname'], unique=False)
op.drop_index('province_index', table_name='province')
op.drop_index('district_index', table_name='district')
# ### end Alembic commands ###
| {
"repo_name": "HaManhDong/pgscm",
"path": "migrations/versions/84fc7bc201e6_add_index_for_geo_db.py",
"copies": "2",
"size": "1159",
"license": "apache-2.0",
"hash": -7125211678547969000,
"line_mean": 33.0882352941,
"line_max": 75,
"alpha_frac": 0.6548748921,
"autogenerated": false,
"ratio": 3.210526315789474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4865401207889474,
"avg_score": null,
"num_lines": null
} |
"""add indexing to DUNS table
Revision ID: d35ecdfc1da7
Revises: 4b1ee78268fb
Create Date: 2017-08-31 12:03:16.395760
"""
# revision identifiers, used by Alembic.
revision = 'd35ecdfc1da7'
down_revision = '4b1ee78268fb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_duns_activation_date'), 'duns', ['activation_date'], unique=False)
op.create_index(op.f('ix_duns_deactivation_date'), 'duns', ['deactivation_date'], unique=False)
op.create_index(op.f('ix_duns_expiration_date'), 'duns', ['expiration_date'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_duns_expiration_date'), table_name='duns')
op.drop_index(op.f('ix_duns_deactivation_date'), table_name='duns')
op.drop_index(op.f('ix_duns_activation_date'), table_name='duns')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/d35ecdfc1da7_add_indexing_to_duns_table.py",
"copies": "1",
"size": "1220",
"license": "cc0-1.0",
"hash": 4112935135808350000,
"line_mean": 26.7272727273,
"line_max": 99,
"alpha_frac": 0.6844262295,
"autogenerated": false,
"ratio": 2.953995157384988,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8996958732508549,
"avg_score": 0.028292530875287793,
"num_lines": 44
} |
"""add index
Revision ID: 52a2df18b1d4
Revises: 2bca06a823a0
Create Date: 2018-12-14 17:39:48.110670
"""
from alembic import op
revision = '52a2df18b1d4'
down_revision = '2bca06a823a0'
branch_labels = None
depends_on = None
def upgrade():
op.create_index('idx_spaceId', 'environments', ['space_id'], unique=False)
op.create_index('idx_spaceId', 'projects', ['space_id', 'name'], unique=False)
op.create_index('idx_taskId', 'records', ['task_id'], unique=False)
op.create_index('idx_name', 'servers', ['name'], unique=False)
op.create_index('idx_username', 'users', ['username'], unique=True)
op.create_index('idx_projectId', 'tasks', ['project_id', 'user_id'], unique=False)
op.create_index('idx_userId', 'tasks', ['user_id', 'project_id'], unique=False)
op.create_index('idx_name', 'tasks', ['name'], unique=False)
op.create_index('idx_name', 'users', ['username', 'email'], unique=False)
op.create_index('idx_user_source', 'members', ['source_type', 'source_id', 'access_level'], unique=False)
def downgrade():
op.drop_index('idx_spaceId', table_name='environments')
op.drop_index('idx_spaceId', table_name='projects')
op.drop_index('idx_taskId', table_name='records')
op.drop_index('idx_name', table_name='servers')
op.drop_index('idx_username', table_name='users')
op.drop_index('idx_projectId', table_name='tasks')
op.drop_index('idx_userId', table_name='tasks')
op.drop_index('idx_name', table_name='tasks')
op.drop_index('idx_name', table_name='users')
op.drop_index('idx_user_source', table_name='members')
| {
"repo_name": "meolu/walle-web",
"path": "migrations/versions/52a2df18b1d4_02_add_index.py",
"copies": "1",
"size": "1613",
"license": "apache-2.0",
"hash": -3157693497198885400,
"line_mean": 31.26,
"line_max": 109,
"alpha_frac": 0.6608803472,
"autogenerated": false,
"ratio": 2.970534069981584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9126960605517552,
"avg_score": 0.000890762332806371,
"num_lines": 50
} |
"""Add indexs and adjust forum accounts
Revision ID: 20025e7424d
Revises: 59abfc7b45d
Create Date: 2014-06-10 19:05:40.599138
"""
# revision identifiers, used by Alembic.
revision = '20025e7424d'
down_revision = '59abfc7b45d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_mod_description'), 'mod', ['description'], unique=False)
op.create_index(op.f('ix_mod_name'), 'mod', ['name'], unique=False)
op.add_column('user', sa.Column('forumId', sa.Integer(), nullable=True))
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_column('user', 'forumId')
op.drop_index(op.f('ix_mod_name'), table_name='mod')
op.drop_index(op.f('ix_mod_description'), table_name='mod')
### end Alembic commands ###
| {
"repo_name": "ModulousSmash/Modulous",
"path": "alembic/versions/20025e7424d_add_indexs_and_adjust_forum_accounts.py",
"copies": "5",
"size": "1182",
"license": "mit",
"hash": 5431469936181214000,
"line_mean": 33.7647058824,
"line_max": 85,
"alpha_frac": 0.6641285956,
"autogenerated": false,
"ratio": 3.054263565891473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6218392161491473,
"avg_score": null,
"num_lines": null
} |
"""Add indexs and adjust forum accounts
Revision ID: 20025e7424d
Revises: 59abfc7b45d
Create Date: 2014-06-10 19:05:40.599138
"""
# revision identifiers, used by Alembic.
revision = '20025e7424d'
down_revision = '59abfc7b45d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_mod_description'), 'mod', ['description'], unique=False)
op.create_index(op.f('ix_mod_name'), 'mod', ['name'], unique=False)
op.add_column('user', sa.Column('forumId', sa.Integer(), nullable=True))
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_column('user', 'forumId')
op.drop_index(op.f('ix_mod_name'), table_name='mod')
op.drop_index(op.f('ix_mod_description'), table_name='mod')
### end Alembic commands ###
| {
"repo_name": "EIREXE/SpaceDock",
"path": "alembic/versions/20025e7424d_add_indexs_and_adjust_forum_accounts.py",
"copies": "2",
"size": "1216",
"license": "mit",
"hash": 4542202109131910000,
"line_mean": 33.7647058824,
"line_max": 85,
"alpha_frac": 0.6455592105,
"autogenerated": false,
"ratio": 3.1020408163265305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4747600026826531,
"avg_score": null,
"num_lines": null
} |
"""add indexs for participant and ps
Revision ID: 58e8df756d74
Revises: edb6d45e5e45
Create Date: 2019-01-29 14:55:01.135611
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "58e8df756d74"
down_revision = "edb6d45e5e45"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("participant_sign_up_time", "participant", ["sign_up_time"], unique=False)
op.create_index(
"participant_summary_core_ordered_time",
"participant_summary",
["enrollment_status_core_ordered_sample_time"],
unique=False,
)
op.create_index(
"participant_summary_core_stored_time",
"participant_summary",
["enrollment_status_core_stored_sample_time"],
unique=False,
)
op.create_index(
"participant_summary_member_time", "participant_summary", ["enrollment_status_member_time"], unique=False
)
op.create_index("participant_summary_sign_up_time", "participant_summary", ["sign_up_time"], unique=False)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("participant_summary_sign_up_time", table_name="participant_summary")
op.drop_index("participant_summary_member_time", table_name="participant_summary")
op.drop_index("participant_summary_core_stored_time", table_name="participant_summary")
op.drop_index("participant_summary_core_ordered_time", table_name="participant_summary")
op.drop_index("participant_sign_up_time", table_name="participant")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/58e8df756d74_add_indexs_for_participant_and_ps.py",
"copies": "1",
"size": "2105",
"license": "bsd-3-clause",
"hash": -4231493008693136000,
"line_mean": 30.8939393939,
"line_max": 113,
"alpha_frac": 0.6736342043,
"autogenerated": false,
"ratio": 3.58603066439523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.475966486869523,
"avg_score": null,
"num_lines": null
} |
"""Add index to biobank_order_identifier table
Revision ID: 641372364227
Revises: 8ab7f6708ea3
Create Date: 2020-10-22 10:54:56.188687
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '641372364227'
down_revision = '8ab7f6708ea3'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_biobank_order_identifier_value'), 'biobank_order_identifier', ['value'], unique=False)
op.create_index(op.f('ix_biobank_order_identifier_history_value'), 'biobank_order_identifier_history', ['value'], unique=False)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_biobank_order_identifier_history_value'), table_name='biobank_order_identifier_history')
op.drop_index(op.f('ix_biobank_order_identifier_value'), table_name='biobank_order_identifier')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/641372364227_add_index_to_biobank_order_identifier_.py",
"copies": "1",
"size": "2252",
"license": "bsd-3-clause",
"hash": 6210660020559056000,
"line_mean": 35.3225806452,
"line_max": 131,
"alpha_frac": 0.7508880995,
"autogenerated": false,
"ratio": 3.546456692913386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9700500548952283,
"avg_score": 0.019368848692220407,
"num_lines": 62
} |
"""add index to PS
Revision ID: f17f0686ea6b
Revises: adb4ea532f1a
Create Date: 2018-03-19 17:17:59.110590
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "f17f0686ea6b"
down_revision = "adb4ea532f1a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(
"participant_summary_last_modified", "participant_summary", ["hpo_id", "last_modified"], unique=False
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("participant_summary_last_modified", table_name="participant_summary")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/f17f0686ea6b_add_index_to_ps.py",
"copies": "1",
"size": "1166",
"license": "bsd-3-clause",
"hash": -5781132419942818000,
"line_mean": 23.2916666667,
"line_max": 109,
"alpha_frac": 0.6603773585,
"autogenerated": false,
"ratio": 3.5015015015015014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46618788600015015,
"avg_score": null,
"num_lines": null
} |
"""Add index to user email.
Revision ID: 1db076d08d78
Revises: 4b73b02d9536
Create Date: 2015-01-19 17:34:30.214618
"""
# revision identifiers, used by Alembic.
revision = '1db076d08d78'
down_revision = '4b73b02d9536'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'hansard')
op.drop_table(u'briefing_file_join')
# op.drop_table(u'book')
op.drop_table(u'featured_committee_meeting_join')
op.drop_table(u'committee_info')
op.drop_table(u'tabled_committee_report_committee_join')
op.drop_table(u'book_file_join')
op.drop_table(u'briefing')
op.drop_table(u'featured_committee_meeting_report_join')
op.drop_table(u'committee_meeting_report')
op.create_index(u'ix_user_email', 'user', ['email'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_user_email', table_name='user')
op.create_table(u'committee_meeting_report',
sa.Column(u'id', sa.INTEGER(), server_default=sa.text(u"nextval('committee_meeting_report_id_seq'::regclass)"), nullable=False),
sa.Column(u'body', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'summary', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'event_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint(u'id', name=u'committee_meeting_report_pkey')
)
op.create_table(u'featured_committee_meeting_report_join',
sa.Column(u'featured_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'committee_meeting_report_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint([u'committee_meeting_report_id'], [u'committee_meeting_report.id'], name=u'featured_committee_meeting_rep_committee_meeting_report_id_fkey')
)
op.create_table(u'briefing',
sa.Column(u'id', sa.INTEGER(), server_default=sa.text(u"nextval('briefing_id_seq'::regclass)"), nullable=False),
sa.Column(u'title', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column(u'briefing_date', sa.DATE(), autoincrement=False, nullable=True),
sa.Column(u'summary', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'minutes', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'presentation', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'start_date', sa.DATE(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint(u'id', name=u'briefing_pkey')
)
op.create_table(u'book_file_join',
sa.Column(u'book_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'file_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint([u'book_id'], [u'book.id'], name=u'book_file_join_book_id_fkey')
)
op.create_table(u'tabled_committee_report_committee_join',
sa.Column(u'tabled_committee_report_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'committee_id', sa.INTEGER(), autoincrement=False, nullable=True)
)
op.create_table(u'committee_info',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'about', sa.VARCHAR(length=1500), autoincrement=False, nullable=True),
sa.Column(u'contact_details', sa.VARCHAR(length=1500), autoincrement=False, nullable=True),
sa.Column(u'organization_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint(u'id', name=u'committee_info_pkey')
)
op.create_table(u'featured_committee_meeting_join',
sa.Column(u'featured_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'committee_meeting_id', sa.INTEGER(), autoincrement=False, nullable=True)
)
op.create_table(u'briefing_file_join',
sa.Column(u'briefing_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column(u'file_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint([u'briefing_id'], [u'briefing.id'], name=u'briefing_file_join_briefing_id_fkey')
)
op.create_table(u'book',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'title', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column(u'summary', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'body', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(u'start_date', sa.DATE(), autoincrement=False, nullable=True),
sa.Column(u'nid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint(u'id', name=u'book_pkey')
)
op.create_table(u'hansard',
sa.Column(u'id', sa.INTEGER(), nullable=False),
sa.Column(u'title', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column(u'meeting_date', sa.DATE(), autoincrement=False, nullable=True),
sa.Column(u'start_date', sa.DATE(), autoincrement=False, nullable=True),
sa.Column(u'body', sa.TEXT(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint(u'id', name=u'hansard_pkey')
)
### end Alembic commands ###
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/1db076d08d78_add_index_to_user_email.py",
"copies": "1",
"size": "5171",
"license": "apache-2.0",
"hash": 683200029542673900,
"line_mean": 49.6960784314,
"line_max": 168,
"alpha_frac": 0.7014117192,
"autogenerated": false,
"ratio": 3.126360338573156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43277720577731565,
"avg_score": null,
"num_lines": null
} |
"""add indices for all tables
Revision ID: 3aba96e0a6ab
Revises: 1abce90e550b
Create Date: 2015-05-18 23:40:57.850712
"""
# revision identifiers, used by Alembic.
revision = '3aba96e0a6ab'
down_revision = '1abce90e550b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_company_company_name'), 'company', ['company_name'], unique=True)
op.create_index(op.f('ix_company_id'), 'company', ['id'], unique=False)
op.drop_constraint(u'company_company_name_key', 'company', type_='unique')
op.create_index(op.f('ix_company_contact_id'), 'company_contact', ['id'], unique=False)
op.create_index(op.f('ix_company_contract_association_company_id'), 'company_contract_association', ['company_id'], unique=False)
op.create_index(op.f('ix_company_contract_association_contract_id'), 'company_contract_association', ['contract_id'], unique=False)
op.create_index(op.f('ix_contract_description'), 'contract', ['description'], unique=False)
op.create_index(op.f('ix_contract_property_id'), 'contract_property', ['id'], unique=False)
op.create_index(op.f('ix_contract_user_association_contract_id'), 'contract_user_association', ['contract_id'], unique=False)
op.create_index(op.f('ix_contract_user_association_user_id'), 'contract_user_association', ['user_id'], unique=False)
op.create_index(op.f('ix_flow_id'), 'flow', ['id'], unique=False)
op.create_index(op.f('ix_line_item_description'), 'line_item', ['description'], unique=False)
op.create_index(op.f('ix_line_item_id'), 'line_item', ['id'], unique=False)
op.create_index(op.f('ix_stage_id'), 'stage', ['id'], unique=False)
op.create_index(op.f('ix_stage_property_id'), 'stage_property', ['id'], unique=False)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.drop_constraint(u'users_email_key', 'users', type_='unique')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(u'users_email_key', 'users', ['email'])
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_index(op.f('ix_stage_property_id'), table_name='stage_property')
op.drop_index(op.f('ix_stage_id'), table_name='stage')
op.drop_index(op.f('ix_line_item_id'), table_name='line_item')
op.drop_index(op.f('ix_line_item_description'), table_name='line_item')
op.drop_index(op.f('ix_flow_id'), table_name='flow')
op.drop_index(op.f('ix_contract_user_association_user_id'), table_name='contract_user_association')
op.drop_index(op.f('ix_contract_user_association_contract_id'), table_name='contract_user_association')
op.drop_index(op.f('ix_contract_property_id'), table_name='contract_property')
op.drop_index(op.f('ix_contract_description'), table_name='contract')
op.drop_index(op.f('ix_company_contract_association_contract_id'), table_name='company_contract_association')
op.drop_index(op.f('ix_company_contract_association_company_id'), table_name='company_contract_association')
op.drop_index(op.f('ix_company_contact_id'), table_name='company_contact')
op.create_unique_constraint(u'company_company_name_key', 'company', ['company_name'])
op.drop_index(op.f('ix_company_id'), table_name='company')
op.drop_index(op.f('ix_company_company_name'), table_name='company')
### end Alembic commands ###
| {
"repo_name": "codeforamerica/pittsburgh-purchasing-suite",
"path": "migrations/versions/3aba96e0a6ab_add_indices_for_all_tables.py",
"copies": "3",
"size": "3483",
"license": "bsd-3-clause",
"hash": -3674779484113913000,
"line_mean": 59.0517241379,
"line_max": 135,
"alpha_frac": 0.6899224806,
"autogenerated": false,
"ratio": 3.0741394527802295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00570526701857237,
"num_lines": 58
} |
"""adding 1sal2 collection method
Revision ID: e4a837723c94
Revises: 86880fe5065e
Create Date: 2020-10-30 13:34:08.237109
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from sqlalchemy.dialects import mysql
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity, SampleCollectionMethod
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'e4a837723c94'
down_revision = '86880fe5065e'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('biobank_mail_kit_order', 'created',
existing_type=mysql.DATETIME(fsp=6),
nullable=True)
op.alter_column('biobank_mail_kit_order', 'modified',
existing_type=mysql.DATETIME(fsp=6),
nullable=True)
op.add_column('participant_summary', sa.Column('sample_1sal2_collection_method', rdr_service.model.utils.Enum(SampleCollectionMethod), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('participant_summary', 'sample_1sal2_collection_method')
op.alter_column('biobank_mail_kit_order', 'modified',
existing_type=mysql.DATETIME(fsp=6),
nullable=False)
op.alter_column('biobank_mail_kit_order', 'created',
existing_type=mysql.DATETIME(fsp=6),
nullable=False)
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/e4a837723c94_adding_1sal2_collection_method.py",
"copies": "1",
"size": "2626",
"license": "bsd-3-clause",
"hash": 4430201402964534300,
"line_mean": 35.4722222222,
"line_max": 154,
"alpha_frac": 0.7223914699,
"autogenerated": false,
"ratio": 3.6830294530154277,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4905420922915428,
"avg_score": null,
"num_lines": null
} |
"""Adding accountpatternauditscore table
Revision ID: 6d2354fb841c
Revises: 67ea2aac5ea0
Create Date: 2016-06-21 19:58:12.949279
"""
# revision identifiers, used by Alembic.
revision = '6d2354fb841c'
down_revision = '67ea2aac5ea0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('accountpatternauditscore',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_type', sa.String(length=80), nullable=False),
sa.Column('account_field', sa.String(length=128), nullable=False),
sa.Column('account_pattern', sa.String(length=128), nullable=False),
sa.Column('score', sa.Integer(), nullable=False),
sa.Column('itemauditscores_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['itemauditscores_id'], ['itemauditscores.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('accountpatternauditscore')
### end Alembic commands ###
| {
"repo_name": "stackArmor/security_monkey",
"path": "migrations/versions/6d2354fb841c_.py",
"copies": "3",
"size": "1110",
"license": "apache-2.0",
"hash": -6553932882600029000,
"line_mean": 30.7142857143,
"line_max": 78,
"alpha_frac": 0.7018018018,
"autogenerated": false,
"ratio": 3.4049079754601226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5606709777260122,
"avg_score": null,
"num_lines": null
} |
"""Adding additional_reporting to (certified_)award_procurement table
Revision ID: 737cae4d1c76
Revises: 3fd9a578c9c5
Create Date: 2020-05-21 10:12:14.687816
"""
# revision identifiers, used by Alembic.
revision = '737cae4d1c76'
down_revision = '3fd9a578c9c5'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_procurement', sa.Column('additional_reporting', sa.Text(), nullable=True))
op.add_column('certified_award_procurement', sa.Column('additional_reporting', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('certified_award_procurement', 'additional_reporting')
op.drop_column('award_procurement', 'additional_reporting')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/737cae4d1c76_adding_additional_reporting_to_award_.py",
"copies": "1",
"size": "1112",
"license": "cc0-1.0",
"hash": -5588142659321194000,
"line_mean": 25.4761904762,
"line_max": 109,
"alpha_frac": 0.7050359712,
"autogenerated": false,
"ratio": 3.3696969696969696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45747329408969695,
"avg_score": null,
"num_lines": null
} |
"""Adding ADU by build ID.
Revision ID: 58d0dc2f6aa4
Revises: 3a5471a358bf
Create Date: 2013-12-02 10:41:26.866644
"""
# revision identifiers, used by Alembic.
revision = '58d0dc2f6aa4'
down_revision = '46c7fb8a8671'
from alembic import op
from socorro.lib import citexttype, jsontype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'crash_adu_by_build_signature',
sa.Column(u'crash_adu_by_build_signature_id', sa.INTEGER(), nullable=False),
sa.Column(u'signature_id', sa.INTEGER(), nullable=False),
sa.Column(u'signature', citexttype.CitextType(), nullable=False),
sa.Column(u'adu_date', sa.DATE(), nullable=False),
sa.Column(u'build_date', sa.DATE(), nullable=False),
sa.Column(u'buildid', sa.NUMERIC(), server_default='0', nullable=False),
sa.Column(u'crash_count', sa.INTEGER(), server_default='0', nullable=False),
sa.Column(u'adu_count', sa.INTEGER(), server_default='0', nullable=False),
sa.Column(u'os_name', citexttype.CitextType(), nullable=False),
sa.Column(u'channel', citexttype.CitextType(), nullable=False),
sa.PrimaryKeyConstraint(u'crash_adu_by_build_signature_id')
)
### end Alembic commands ###
load_stored_proc(op, ['backfill_crash_adu_by_build_signature.sql',
'backfill_matviews.sql',
'update_crash_adu_by_build_signature.sql'])
fix_permissions(op, 'crash_adu_by_build_signature')
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'adu_by_build')
### end Alembic commands ###
load_stored_proc(op, ['backfill_crash_adu_by_build_signature.sql',
'backfill_matviews.sql',
'update_crash_adu_by_build_signature.sql']) | {
"repo_name": "twobraids/socorro",
"path": "alembic/versions/58d0dc2f6aa4_adding_adu_by_build_.py",
"copies": "11",
"size": "2023",
"license": "mpl-2.0",
"hash": 1935244946528874500,
"line_mean": 37.1886792453,
"line_max": 80,
"alpha_frac": 0.6786950074,
"autogenerated": false,
"ratio": 3.2368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010626591447533477,
"num_lines": 53
} |
"""Adding AgencyUsers
Revision ID: 52a4062502bb
Revises: 971f341c0204
Create Date: 2017-05-25 19:28:31.144382
"""
# revision identifiers, used by Alembic.
revision = "52a4062502bb"
down_revision = "971f341c0204"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"agency_users",
sa.Column("user_guid", sa.String(length=64), nullable=False),
sa.Column(
"auth_user_type",
sa.Enum(
"Saml2In:NYC Employees",
"LDAP:NYC Employees",
"FacebookSSO",
"MSLiveSSO",
"YahooSSO",
"LinkedInSSO",
"GoogleSSO",
"EDIRSSO",
"AnonymousUser",
name="auth_user_type",
),
nullable=False,
),
sa.Column("agency_ein", sa.String(length=4), nullable=False),
sa.Column("is_agency_active", sa.Boolean(), nullable=False),
sa.Column("is_agency_admin", sa.Boolean(), nullable=False),
sa.Column("is_primary_agency", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(["agency_ein"], ["agencies.ein"]),
sa.ForeignKeyConstraint(
["user_guid", "auth_user_type"],
["users.guid", "users.auth_user_type"],
onupdate="CASCADE",
),
sa.PrimaryKeyConstraint("user_guid", "auth_user_type", "agency_ein"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("agency_users")
### end Alembic commands ###
| {
"repo_name": "CityOfNewYork/NYCOpenRecords",
"path": "migrations/versions/52a4062502bb_adding_agencyusers.py",
"copies": "1",
"size": "1693",
"license": "apache-2.0",
"hash": 9168606210826346000,
"line_mean": 29.2321428571,
"line_max": 77,
"alpha_frac": 0.5593620791,
"autogenerated": false,
"ratio": 3.633047210300429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9676605629744868,
"avg_score": 0.0031607319311123656,
"num_lines": 56
} |
"""adding album trigger
Revision ID: 003
Revises: 002
Create Date: 2015-11-06 07:56:12.062064
"""
# revision identifiers, used by Alembic.
revision = '003'
down_revision = '002'
from alembic import op
import sqlalchemy as sa
def upgrade():
conn = op.get_bind()
conn.execute(sa.sql.text('''
CREATE OR REPLACE FUNCTION update_album ()
RETURNS trigger
AS
$BODY$
DECLARE
v_count_songs integer;
v_count_played integer;
v_max_played timestamp;
v_count_rated integer;
v_max_rated timestamp;
v_avg_rating float;
BEGIN
-- count_songs
SELECT COUNT(*) INTO STRICT v_count_songs FROM song WHERE album_id = NEW.album_id;
-- count_played
SELECT SUM(count_played) INTO STRICT v_count_played FROM song WHERE album_id = NEW.album_id;
-- played_at
SELECT MAX(played_at) INTO STRICT v_max_played FROM song WHERE album_id = NEW.album_id;
-- count_rated
SELECT SUM(count_rated) INTO STRICT v_count_rated FROM song WHERE album_id = NEW.album_id;
-- rated_at
SELECT MAX(rated_at) INTO STRICT v_max_rated FROM song WHERE album_id = NEW.album_id;
-- rating
SELECT AVG(rating) INTO STRICT v_avg_rating FROM song WHERE album_id = NEW.album_id;
UPDATE album SET
count_songs = v_count_songs,
count_played = v_count_played,
played_at = v_max_played,
count_rated = v_count_rated,
rated_at = v_max_rated,
rating = v_avg_rating
WHERE id = new.album_id;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE;
CREATE TRIGGER update_album_trigger
AFTER UPDATE ON song
FOR EACH ROW
EXECUTE PROCEDURE update_album();
'''))
def downgrade():
conn = op.get_bind()
conn.execute(sa.sql.text('''
DROP TRIGGER update_album_trigger ON song;
DROP FUNCTION update_album();
'''))
| {
"repo_name": "Tjorriemorrie/speler",
"path": "migrations/versions/003.py",
"copies": "1",
"size": "1805",
"license": "mit",
"hash": -1845261852061724000,
"line_mean": 22.141025641,
"line_max": 96,
"alpha_frac": 0.6703601108,
"autogenerated": false,
"ratio": 3.1778169014084505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43481770122084507,
"avg_score": null,
"num_lines": null
} |
"""Adding a migration for the exploitability report.
Revision ID: 3a5471a358bf
Revises: 191d0453cc07
Create Date: 2013-10-25 07:07:33.968691
"""
# revision identifiers, used by Alembic.
revision = '3a5471a358bf'
down_revision = '4aacaea3eb48'
from alembic import op
from socorro.lib import citexttype, jsontype
from socorro.lib.migrations import load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE exploitability_reports CASCADE');
op.add_column(u'exploitability_reports', sa.Column(u'version_string', sa.TEXT(), nullable=True))
op.add_column(u'exploitability_reports', sa.Column(u'product_name', sa.TEXT(), nullable=True))
op.add_column(u'exploitability_reports', sa.Column(u'product_version_id', sa.INTEGER(), nullable=False))
### end Alembic commands ###
load_stored_proc(op, ['update_exploitability.sql'])
for i in range(15, 30):
backfill_date = '2013-11-%s' % i
op.execute("""
SELECT backfill_exploitability('%s')
""" % backfill_date)
op.execute(""" COMMIT """)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'exploitability_reports', u'product_version_id')
op.drop_column(u'exploitability_reports', u'product_name')
op.drop_column(u'exploitability_reports', u'version_string')
load_stored_proc(op, ['update_exploitability.sql'])
### end Alembic commands ###
| {
"repo_name": "adngdb/socorro",
"path": "alembic/versions/3a5471a358bf_adding_a_migration_f.py",
"copies": "3",
"size": "1624",
"license": "mpl-2.0",
"hash": -7502828997213379000,
"line_mean": 32.8333333333,
"line_max": 108,
"alpha_frac": 0.7025862069,
"autogenerated": false,
"ratio": 3.3692946058091287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010926792469139107,
"num_lines": 48
} |
# Adding Annotations
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
def convert_date(date_format, encoding='utf-8'):
string_converter = mdates.strpdate2num(date_format)
def bytes_converter(b):
s = b.decode(encoding)
return string_converter(s)
return bytes_converter
def graph_data(stock):
ax1 = plt.subplot2grid((1, 1), (0, 0))
stock_price_url = 'http://chartapi.finance.yahoo.com/instrument/1.0/' + stock + '/chartdata;type=quote;range=1m/csv'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source_code = source_code.split('\n')
for line in split_source_code:
split_line = line.split(',')
if len(split_line) == 6:
if 'values' not in line:
stock_data.append(line)
date, close_price, high_price, low_price, open_price, stock_volume = np.loadtxt(stock_data,
delimiter=',',
unpack=True,
converters={
0: convert_date('%Y%m%d')})
x = 0
y = len(date)
ohlc = []
while x < y:
data = date[x], close_price[x], high_price[x], low_price[x], open_price[x], stock_volume[x]
ohlc.append(data)
x += 1
candlestick_ohlc(ax1, ohlc, width=0.4, colorup='#77D879', colordown='#DB3F3F')
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(45)
for label in ax1.yaxis.get_ticklabels():
label.set_rotation(45)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax1.grid(True)
ax1.annotate('Big News!', (date[8], high_price[11]), xytext=(0.8, 0.9), textcoords='axes fraction',
arrowprops=dict(facecolor='grey', color='grey'))
font_dict = {'family': 'serif', 'color': 'darkred', 'size': 15}
ax1.text(date[10], close_price[1], 'Stock Prices', fontdict=font_dict)
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(stock)
plt.legend()
plt.subplots_adjust(left=0.09, bottom=0.2, right=0.94, top=0.9, wspace=0.2, hspace=0)
plt.show()
style.use('fivethirtyeight')
name = input('Enter the name of stock\n')
graph_data(name)
| {
"repo_name": "mayankdcoder/Matplotlib",
"path": "20 - Adding Annotations.py",
"copies": "1",
"size": "2654",
"license": "mit",
"hash": 167636133290178600,
"line_mean": 38.6119402985,
"line_max": 120,
"alpha_frac": 0.5655614167,
"autogenerated": false,
"ratio": 3.496706192358366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9554745513951547,
"avg_score": 0.0015044190213638437,
"num_lines": 67
} |
"""Adding ARN column and latest_revision_complete_hash and latest_revision_durable_hash to item table
Revision ID: bfb550a500ab
Revises: ae5c0a6aebb3
Create Date: 2016-06-23 21:16:35.951815
"""
# revision identifiers, used by Alembic.
revision = 'bfb550a500ab'
down_revision = 'ae5c0a6aebb3'
from alembic import op
import sqlalchemy as sa
import datetime
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session as BaseSession, relationship, deferred
import hashlib
import json
from copy import deepcopy
import dpath.util
from dpath.exceptions import PathNotFound
from six import text_type
Session = sessionmaker()
Base = declarative_base()
class Technology(Base):
__tablename__ = 'technology'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(32))
items = relationship("Item", backref="technology")
class Item(Base):
__tablename__ = 'item'
id = sa.Column(sa.Integer, primary_key=True)
arn = sa.Column(sa.Text(), nullable=True, index=True, unique=True)
latest_revision_id = sa.Column(sa.Integer, nullable=True)
latest_revision_complete_hash = sa.Column(sa.String(32), index=True)
latest_revision_durable_hash = sa.Column(sa.String(32), index=True)
tech_id = sa.Column(sa.Integer, sa.ForeignKey("technology.id"), nullable=False)
revisions = relationship("ItemRevision", backref="item", cascade="all, delete, delete-orphan", order_by="desc(ItemRevision.date_created)", lazy="dynamic")
class ItemRevision(Base):
"""
Every new configuration for an item is saved in a new ItemRevision.
"""
__tablename__ = "itemrevision"
id = sa.Column(sa.Integer, primary_key=True)
active = sa.Column(sa.Boolean())
config = deferred(sa.Column(JSON))
item_id = sa.Column(sa.Integer, sa.ForeignKey("item.id"), nullable=False)
date_created = sa.Column(sa.DateTime(), default=datetime.datetime.utcnow, nullable=False, index=True)
prims = [int, str, text_type, bool, float, type(None)]
def sub_list(l):
"""
Recursively walk a datastructrue sorting any lists along the way.
:param l: list
:return: sorted list, where any child lists are also sorted.
"""
r = []
for i in l:
if type(i) in prims:
r.append(i)
elif type(i) is list:
r.append(sub_list(i))
elif type(i) is dict:
r.append(sub_dict(i))
else:
print("Unknown Type: {}".format(type(i)))
r = sorted(r)
return r
def sub_dict(d):
"""
Recursively walk a datastructure sorting any lists along the way.
:param d: dict
:return: dict where any lists, even those buried deep in the structure, have been sorted.
"""
r = {}
for k in d:
if type(d[k]) in prims:
r[k] = d[k]
elif type(d[k]) is list:
r[k] = sub_list(d[k])
elif type(d[k]) is dict:
r[k] = sub_dict(d[k])
else:
print("Unknown Type: {}".format(type(d[k])))
return r
def retrieve_arn(config):
"""
See issue #374. SM does not currently store ARNs in a consistent place.
:param config: itemrevision config dict
:return: ARN, if we can find it
"""
if config.get('arn'):
return config.get('arn')
if config.get('Arn'):
return config.get('Arn')
if config.get('CertificateArn'):
return config.get('CertificateArn')
if config.get('group', {}).get('arn'):
return config.get('group', {}).get('arn')
if config.get('role', {}).get('arn'):
return config.get('role', {}).get('arn')
if config.get('user', {}).get('arn'):
return config.get('user', {}).get('arn')
return None
def hash_item(item, ephemeral_paths):
"""
Finds the hash of a dict.
:param item: dictionary, representing an item tracked in security_monkey
:return: hash of the json dump of the item
"""
complete = hash_config(item)
durable = durable_hash(item, ephemeral_paths)
return complete, durable
def durable_hash(item, ephemeral_paths):
"""
Remove all ephemeral paths from the item and return the hash of the new structure.
:param item: dictionary, representing an item tracked in security_monkey
:return: hash of the sorted json dump of the item with all ephemeral paths removed.
"""
durable_item = deepcopy(item)
for path in ephemeral_paths:
try:
dpath.util.delete(durable_item, path, separator='$')
except PathNotFound:
pass
return hash_config(durable_item)
def hash_config(config):
item = sub_dict(config)
item_str = json.dumps(item, sort_keys=True)
item_hash = hashlib.md5(item_str) # nosec: not used for security
return item_hash.hexdigest()
def ephemeral_paths_for_item(item):
technology = item.technology.name
paths = {
'redshift': [
"RestoreStatus",
"ClusterStatus",
"ClusterParameterGroups$ParameterApplyStatus",
"ClusterParameterGroups$ClusterParameterStatusList$ParameterApplyErrorDescription",
"ClusterParameterGroups$ClusterParameterStatusList$ParameterApplyStatus",
"ClusterRevisionNumber"
],
'securitygroup': ["assigned_to"],
'iamuser': [
"user$password_last_used",
"accesskeys$*$LastUsedDate",
"accesskeys$*$Region",
"accesskeys$*$ServiceName"
]
}
return paths.get(technology, [])
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
### commands auto generated by Alembic - please adjust! ###
op.add_column('item', sa.Column('arn', sa.Text(), nullable=True))
op.add_column('item', sa.Column('latest_revision_complete_hash', sa.String(32), nullable=True))
op.add_column('item', sa.Column('latest_revision_durable_hash', sa.String(32), nullable=True))
op.create_index('ix_item_arn', 'item', ['arn'], unique=True)
op.create_index('ix_item_name', 'item', ['name'], unique=False)
op.create_index('ix_item_latest_revision_complete_hash', 'item', ['latest_revision_complete_hash'], unique=False)
op.create_index('ix_item_latest_revision_durable_hash', 'item', ['latest_revision_durable_hash'], unique=False)
### end Alembic commands ###
query = session.query(Item) \
.join((ItemRevision, Item.latest_revision_id == ItemRevision.id)) \
.filter(ItemRevision.active == True)
for item in query.all():
revision = item.revisions.first()
arn = retrieve_arn(revision.config)
if arn and u'arn:aws:iam::aws:policy' not in arn:
item.arn = arn
ephemeral_paths = ephemeral_paths_for_item(item)
complete_hash, durable_hash = hash_item(revision.config, ephemeral_paths)
item.latest_revision_complete_hash = complete_hash
item.latest_revision_durable_hash = durable_hash
session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_item_name', table_name='item')
op.drop_index('ix_item_arn', table_name='item')
op.drop_index('ix_item_latest_revision_durable_hash', table_name='item')
op.drop_index('ix_item_latest_revision_complete_hash', table_name='item')
op.drop_column('item', 'arn')
op.drop_column('item', 'latest_revision_complete_hash')
op.drop_column('item', 'latest_revision_durable_hash')
### end Alembic commands ###
| {
"repo_name": "Netflix/security_monkey",
"path": "migrations/versions/bfb550a500ab_.py",
"copies": "1",
"size": "7562",
"license": "apache-2.0",
"hash": -6472899544180492000,
"line_mean": 31.3162393162,
"line_max": 158,
"alpha_frac": 0.6481089659,
"autogenerated": false,
"ratio": 3.5872865275142316,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.971733759787469,
"avg_score": 0.003611579107908348,
"num_lines": 234
} |
"""adding artist trigger
Revision ID: 004
Revises: 003
Create Date: 2015-11-06 08:00:50.481484
"""
# revision identifiers, used by Alembic.
revision = '004'
down_revision = '003'
from alembic import op
import sqlalchemy as sa
def upgrade():
conn = op.get_bind()
conn.execute(sa.sql.text('''
CREATE OR REPLACE FUNCTION update_artist ()
RETURNS trigger
AS
$BODY$
DECLARE
v_count_songs integer;
v_count_albums integer;
v_count_played integer;
v_max_played timestamp;
v_count_rated integer;
v_max_rated timestamp;
v_avg_rating float;
BEGIN
-- count_songs
SELECT SUM(count_songs) INTO STRICT v_count_songs FROM album WHERE artist_id = NEW.artist_id;
-- count_albums
SELECT COUNT(id) INTO STRICT v_count_albums FROM album WHERE artist_id = NEW.artist_id;
-- count_played
SELECT SUM(count_played) INTO STRICT v_count_played FROM album WHERE artist_id = NEW.artist_id;
-- played_at
SELECT MAX(played_at) INTO STRICT v_max_played FROM album WHERE artist_id = NEW.artist_id;
-- count_rated
SELECT SUM(count_rated) INTO STRICT v_count_rated FROM album WHERE artist_id = NEW.artist_id;
-- rated_at
SELECT MAX(rated_at) INTO STRICT v_max_rated FROM album WHERE artist_id = NEW.artist_id;
-- rating
SELECT AVG(rating) INTO STRICT v_avg_rating FROM album WHERE artist_id = NEW.artist_id;
UPDATE artist SET
count_songs = v_count_songs,
count_albums = v_count_albums,
count_played = v_count_played,
played_at = v_max_played,
count_rated = v_count_rated,
rated_at = v_max_rated,
rating = v_avg_rating
WHERE id = NEW.artist_id;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql VOLATILE;
CREATE TRIGGER update_artist_trigger
AFTER UPDATE ON album
FOR EACH ROW
EXECUTE PROCEDURE update_artist();
'''))
def downgrade():
conn = op.get_bind()
conn.execute(sa.sql.text('''
DROP TRIGGER update_artist_trigger ON album;
DROP FUNCTION update_artist();
'''))
| {
"repo_name": "Tjorriemorrie/speler",
"path": "migrations/versions/004.py",
"copies": "1",
"size": "2021",
"license": "mit",
"hash": 1418983158014537500,
"line_mean": 23.3493975904,
"line_max": 99,
"alpha_frac": 0.6763978229,
"autogenerated": false,
"ratio": 3.218152866242038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9384998205696384,
"avg_score": 0.0019104966891305701,
"num_lines": 83
} |
""" Adding banner header and type to submission window
Revision ID: 505f4b28d33d
Revises: de5e3fa1d2d2
Create Date: 2019-12-05 13:38:19.627635
"""
# revision identifiers, used by Alembic.
revision = '505f4b28d33d'
down_revision = 'de5e3fa1d2d2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('submission_window', sa.Column('banner_type', sa.Text(), nullable=True))
op.add_column('submission_window', sa.Column('header', sa.Text(), nullable=True))
op.execute("""
UPDATE submission_window
SET banner_type = 'warning'
""")
op.alter_column('submission_window', 'banner_type', nullable=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('submission_window', 'header')
op.drop_column('submission_window', 'banner_type')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/505f4b28d33d_adding_banner_header_and_type_to_.py",
"copies": "1",
"size": "1266",
"license": "cc0-1.0",
"hash": 5117512835296025000,
"line_mean": 25.9361702128,
"line_max": 90,
"alpha_frac": 0.6706161137,
"autogenerated": false,
"ratio": 3.5069252077562325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4677541321456232,
"avg_score": null,
"num_lines": null
} |
""" Adding business_types for duns
Revision ID: c3db0b20bdef
Revises: 6a7dfeb64b27
Create Date: 2020-01-22 18:58:55.987646
"""
# revision identifiers, used by Alembic.
revision = 'c3db0b20bdef'
down_revision = '6a7dfeb64b27'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('duns', sa.Column('business_types', sa.ARRAY(sa.Text()), nullable=True))
op.add_column('historic_duns', sa.Column('business_types', sa.ARRAY(sa.Text()), nullable=True))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('duns', 'business_types')
op.drop_column('historic_duns', 'business_types')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/c3db0b20bdef_adding_business_types_for_duns.py",
"copies": "1",
"size": "1061",
"license": "cc0-1.0",
"hash": 6138519799474753000,
"line_mean": 24.2619047619,
"line_max": 99,
"alpha_frac": 0.6889726673,
"autogenerated": false,
"ratio": 3.1766467065868262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43656193738868265,
"avg_score": null,
"num_lines": null
} |
"""Adding cascade to foreign keys in RuleSetting
Revision ID: 2592f3bdae72
Revises: 42e11ab5cea3
Create Date: 2020-02-11 17:02:00.651727
"""
# revision identifiers, used by Alembic.
revision = '2592f3bdae72'
down_revision = '42e11ab5cea3'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_rule', 'rule_settings', type_='foreignkey')
op.drop_constraint('fk_impact', 'rule_settings', type_='foreignkey')
op.create_foreign_key('fk_impact', 'rule_settings', 'rule_impact', ['impact_id'], ['rule_impact_id'], ondelete='CASCADE')
op.create_foreign_key('fk_rule', 'rule_settings', 'rule_sql', ['rule_id'], ['rule_sql_id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_rule', 'rule_settings', type_='foreignkey')
op.drop_constraint('fk_impact', 'rule_settings', type_='foreignkey')
op.create_foreign_key('fk_impact', 'rule_settings', 'rule_impact', ['impact_id'], ['rule_impact_id'])
op.create_foreign_key('fk_rule', 'rule_settings', 'rule_sql', ['rule_id'], ['rule_sql_id'])
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/2592f3bdae72_adding_cascade_to_foreign_keys_in_.py",
"copies": "1",
"size": "1517",
"license": "cc0-1.0",
"hash": 866388217622406100,
"line_mean": 31.9782608696,
"line_max": 125,
"alpha_frac": 0.6763348715,
"autogenerated": false,
"ratio": 3.3267543859649122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9370447176648522,
"avg_score": 0.026528416163278123,
"num_lines": 46
} |
"""Adding category and population groups.
Revision ID: 157debc89661
Revises: 51589067470d
Create Date: 2015-08-25 22:19:38.050000
"""
# revision identifiers, used by Alembic.
revision = '157debc89661'
down_revision = '51589067470d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category_group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('grouporder', sa.Float(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('population_group',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('grouporder', sa.Float(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.add_column(u'category', sa.Column('grouping_id', sa.Integer(), nullable=True))
op.add_column(u'population', sa.Column('grouping_id', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'population', 'grouping_id')
op.drop_column(u'category', 'grouping_id')
op.drop_table('population_group')
op.drop_table('category_group')
### end Alembic commands ###
| {
"repo_name": "radioprotector/radremedy",
"path": "remedy/rad/migrations/versions/157debc89661_adding_category_and_population_groups.py",
"copies": "2",
"size": "1765",
"license": "mpl-2.0",
"hash": -4492683694251681300,
"line_mean": 23.2142857143,
"line_max": 87,
"alpha_frac": 0.645325779,
"autogenerated": false,
"ratio": 3.594704684317719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5240030463317719,
"avg_score": null,
"num_lines": null
} |
"""adding cdr version to workbench workspaces
Revision ID: 66765ee98a07
Revises: 942d61446bfa
Create Date: 2021-01-26 16:38:11.859021
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '66765ee98a07'
down_revision = '942d61446bfa'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('workbench_workspace_approved', sa.Column('cdr_version', sa.String(length=200), nullable=True))
op.add_column('workbench_workspace_snapshot', sa.Column('cdr_version', sa.String(length=200), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('workbench_workspace_snapshot', 'cdr_version')
op.drop_column('workbench_workspace_approved', 'cdr_version')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/66765ee98a07_adding_cdr_version_to_workbench_.py",
"copies": "1",
"size": "2146",
"license": "bsd-3-clause",
"hash": 938757293726289400,
"line_mean": 33.6129032258,
"line_max": 125,
"alpha_frac": 0.7516309413,
"autogenerated": false,
"ratio": 3.59463986599665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.484627080729665,
"avg_score": null,
"num_lines": null
} |
"""Adding census_year to state_congressional_table
Revision ID: 5456e2207d32
Revises: 17105e26eef4
Create Date: 2018-05-03 12:20:05.945295
"""
# revision identifiers, used by Alembic.
revision = '5456e2207d32'
down_revision = '17105e26eef4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('state_congressional', sa.Column('census_year', sa.Integer(), nullable=True))
op.create_index(op.f('ix_state_congressional_census_year'), 'state_congressional', ['census_year'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_state_congressional_census_year'), table_name='state_congressional')
op.drop_column('state_congressional', 'census_year')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/5456e2207d32_adding_census_year_to_state_.py",
"copies": "1",
"size": "1112",
"license": "cc0-1.0",
"hash": -8666728030887368000,
"line_mean": 25.4761904762,
"line_max": 117,
"alpha_frac": 0.6978417266,
"autogenerated": false,
"ratio": 3.106145251396648,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.916690485011834,
"avg_score": 0.027416425575661634,
"num_lines": 42
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.