text stringlengths 0 1.05M | meta dict |
|---|---|
"""Add LocationCache
Revision ID: 292d93c545f2
Revises: 17d2927a8743
Create Date: 2017-06-09 18:40:25.576169
"""
# revision identifiers, used by Alembic.
revision = '292d93c545f2'
down_revision = '17d2927a8743'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('LocationCache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pack', sa.Unicode(length=255), nullable=False),
sa.Column('lookup_time', sa.DateTime(), nullable=False),
sa.Column('hostname', sa.Unicode(length=255), nullable=True),
sa.Column('city', sa.Unicode(length=255), nullable=True),
sa.Column('country', sa.Unicode(length=255), nullable=True),
sa.Column('most_specific_subdivision', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_LocationCache_city', 'LocationCache', ['city'], unique=False)
op.create_index(u'ix_LocationCache_country', 'LocationCache', ['country'], unique=False)
op.create_index(u'ix_LocationCache_hostname', 'LocationCache', ['hostname'], unique=False)
op.create_index(u'ix_LocationCache_lookup_time', 'LocationCache', ['lookup_time'], unique=False)
op.create_index(u'ix_LocationCache_most_specific_subdivision', 'LocationCache', ['most_specific_subdivision'], unique=False)
op.create_index(u'ix_LocationCache_pack', 'LocationCache', ['pack'], unique=False)
op.add_column(u'UseLogs', sa.Column('city', sa.Unicode(length=255), nullable=True))
op.add_column(u'UseLogs', sa.Column('country', sa.Unicode(length=255), nullable=True))
op.add_column(u'UseLogs', sa.Column('hostname', sa.Unicode(length=255), nullable=True))
op.create_index(u'ix_UseLogs_city', 'UseLogs', ['city'], unique=False)
op.create_index(u'ix_UseLogs_country', 'UseLogs', ['country'], unique=False)
op.create_index(u'ix_UseLogs_hostname', 'UseLogs', ['hostname'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_UseLogs_hostname', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_country', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_city', table_name='UseLogs')
op.drop_column(u'UseLogs', 'hostname')
op.drop_column(u'UseLogs', 'country')
op.drop_column(u'UseLogs', 'city')
op.drop_index(u'ix_LocationCache_pack', table_name='LocationCache')
op.drop_index(u'ix_LocationCache_most_specific_subdivision', table_name='LocationCache')
op.drop_index(u'ix_LocationCache_lookup_time', table_name='LocationCache')
op.drop_index(u'ix_LocationCache_hostname', table_name='LocationCache')
op.drop_index(u'ix_LocationCache_country', table_name='LocationCache')
op.drop_index(u'ix_LocationCache_city', table_name='LocationCache')
op.drop_table('LocationCache')
### end Alembic commands ###
| {
"repo_name": "gateway4labs/labmanager",
"path": "alembic/versions/292d93c545f2_add_locationcache.py",
"copies": "5",
"size": "2935",
"license": "bsd-2-clause",
"hash": 2948110497520516600,
"line_mean": 48.7457627119,
"line_max": 128,
"alpha_frac": 0.7045996593,
"autogenerated": false,
"ratio": 3.2006543075245366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6405253966824537,
"avg_score": null,
"num_lines": null
} |
""" Add logging capabilities to DSS
"""
from __future__ import absolute_import
import os
import os.path
import datetime
import makeobj
from ..config import config
from . import thread
logdir = config['log']['dir']
class Levels(makeobj.Obj):
debug = 0
info = 1
warn = 2
error = 3
max = 99
class Writer(object):
def __init__(self, filename):
self.filename = filename
self._opened = False
self.file = None
self.lock = thread.Lock()
self._format = '[{date}] [{owner}] {level.name}: {message}'
def format(self, owner, level, message):
now = datetime.datetime.now()
return self._format.format(
date=now,
message=message,
owner=owner or '',
level=level,
) + '\n'
def add(self, *args):
# TODO Add to queue and write on another thread.
with self.lock:
self._write(*args)
def _write(self, owner, level, message):
text = self.format(owner, level, message)
if not self._opened:
self.file = open(os.path.join(logdir, self.filename), 'a')
self._opened = True
self.file.write(text)
self.file.flush()
def __del__(self):
if self._opened:
self.file.close()
class Log(object):
_writers = {}
def __init__(self, owner, filename):
self.owner = owner
self.writer = self._writers.get(filename)
if self.writer is None:
self.writer = Writer(filename)
self._writers[filename] = self.writer
def log(self, message, level=Levels.info):
self.writer.add(self.owner, level, message)
def __getattr__(self, name):
level = Levels(name)
def call(message):
return self.log(message, level=level)
return call
| {
"repo_name": "terabit-software/dynamic-stream-server",
"path": "dss/tools/log.py",
"copies": "2",
"size": "1852",
"license": "bsd-3-clause",
"hash": -5530340332969568000,
"line_mean": 22.4430379747,
"line_max": 70,
"alpha_frac": 0.56587473,
"autogenerated": false,
"ratio": 3.8989473684210525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5464822098421053,
"avg_score": null,
"num_lines": null
} |
"""add login_phone_number column
Revision ID: 380d2c363481
Revises: e6605d4b0dba
Create Date: 2018-10-03 11:42:59.993435
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "380d2c363481"
down_revision = "e6605d4b0dba"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("login_phone_number", sa.String(length=80), nullable=True))
op.alter_column("participant_summary", "email", existing_type=mysql.VARCHAR(length=255), nullable=True)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column("participant_summary", "email", existing_type=mysql.VARCHAR(length=255), nullable=False)
op.drop_column("participant_summary", "login_phone_number")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/380d2c363481_add_login_phone_number_column.py",
"copies": "1",
"size": "1408",
"license": "bsd-3-clause",
"hash": 1273473990162187800,
"line_mean": 27.16,
"line_max": 110,
"alpha_frac": 0.6882102273,
"autogenerated": false,
"ratio": 3.511221945137157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4699432172437157,
"avg_score": null,
"num_lines": null
} |
"""Add LogSource and LogChunk
Revision ID: 393be9b08e4c
Revises: 4901f27ade8e
Create Date: 2013-11-12 11:05:50.757171
"""
# revision identifiers, used by Alembic.
revision = '393be9b08e4c'
down_revision = '4901f27ade8e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'logsource',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_logsource_project_id', 'logsource', ['project_id'])
op.create_index('idx_logsource_build_id', 'logsource', ['build_id'])
op.create_table(
'logchunk',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('source_id', sa.GUID(), nullable=False),
sa.Column('offset', sa.Integer(), nullable=False),
sa.Column('size', sa.Integer(), nullable=False),
sa.Column('text', sa.Text(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['source_id'], ['logsource.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_logchunk_project_id', 'logchunk', ['project_id'])
op.create_index('idx_logchunk_build_id', 'logchunk', ['build_id'])
op.create_index('idx_logchunk_source_id', 'logchunk', ['source_id'])
def downgrade():
op.drop_table('logchunk')
op.drop_table('logsource')
| {
"repo_name": "dropbox/changes",
"path": "migrations/versions/393be9b08e4c_add_logsource_and_lo.py",
"copies": "4",
"size": "2003",
"license": "apache-2.0",
"hash": -6167412444864061000,
"line_mean": 34.7678571429,
"line_max": 76,
"alpha_frac": 0.6265601598,
"autogenerated": false,
"ratio": 3.355108877721943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015204451345755693,
"num_lines": 56
} |
"""Add LogSource unique constraint on name
Revision ID: 2dfac13a4c78
Revises: 5896e31725d
Create Date: 2013-12-06 10:56:15.727933
"""
from __future__ import absolute_import, print_function
# revision identifiers, used by Alembic.
revision = '2dfac13a4c78'
down_revision = '5896e31725d'
from alembic import op
from sqlalchemy.sql import table, select
import sqlalchemy as sa
def upgrade():
connection = op.get_bind()
logsources_table = table(
'logsource',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('name', sa.String(64), nullable=True),
)
logchunks_table = table(
'logchunk',
sa.Column('source_id', sa.GUID(), nullable=False),
)
done = set()
for logsource in connection.execute(logsources_table.select()):
# migrate group to suite
key = (logsource.build_id, logsource.name)
if key in done:
continue
print("Checking LogSource %s - %s" % (
logsource.build_id, logsource.name))
query = logchunks_table.delete().where(
logchunks_table.c.source_id.in_(select([logchunks_table]).where(
sa.and_(
logsources_table.c.build_id == logsource.build_id,
logsources_table.c.name == logsource.name,
logsources_table.c.id != logsource.id,
),
))
)
connection.execute(query)
query = logsources_table.delete().where(
sa.and_(
logsources_table.c.build_id == logsource.build_id,
logsources_table.c.name == logsource.name,
logsources_table.c.id != logsource.id,
)
)
connection.execute(query)
done.add(key)
op.create_unique_constraint(
'unq_logsource_key', 'logsource', ['build_id', 'name'])
op.drop_index('idx_logsource_build_id', 'logsource')
def downgrade():
op.drop_constraint('unq_logsource_key', 'logsource')
op.create_index('idx_logsource_build_id', 'logsource', ['build_id'])
| {
"repo_name": "bowlofstew/changes",
"path": "migrations/versions/2dfac13a4c78_add_logsource_unique.py",
"copies": "4",
"size": "2138",
"license": "apache-2.0",
"hash": 5574094116646417000,
"line_mean": 27.8918918919,
"line_max": 76,
"alpha_frac": 0.5935453695,
"autogenerated": false,
"ratio": 3.5106732348111658,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6104218604311166,
"avg_score": null,
"num_lines": null
} |
"""Add Log Table for User Actions
Revision ID: 117567def844
Revises: 2c3193839c9d
Create Date: 2017-06-19 00:39:17.408944
"""
# revision identifiers, used by Alembic.
revision = '117567def844'
down_revision = '2c3193839c9d'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('ipaddr', postgresql.INET(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('uid', sa.String(length=32), nullable=False),
sa.Column('method', sa.Enum('GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH', name='http_enum'), nullable=True),
sa.Column('blueprint', sa.String(length=32), nullable=False),
sa.Column('path', sa.String(length=128), nullable=False),
sa.Column('description', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_log')
### end Alembic commands ###
| {
"repo_name": "RamZallan/conditional",
"path": "migrations/versions/117567def844_.py",
"copies": "2",
"size": "1223",
"license": "mit",
"hash": -1038309435042204300,
"line_mean": 32.9722222222,
"line_max": 146,
"alpha_frac": 0.6794766966,
"autogenerated": false,
"ratio": 3.4353932584269664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5114869955026966,
"avg_score": null,
"num_lines": null
} |
"""Add longer ip address called pack
Revision ID: 585d74c833a6
Revises: 42bd0c8938f9
Create Date: 2016-11-23 17:43:03.266314
"""
# revision identifiers, used by Alembic.
revision = '585d74c833a6'
down_revision = '42bd0c8938f9'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
metadata = sa.MetaData()
loc = sa.Table('LocationCache', metadata,
sa.Column('id', sa.Integer()),
)
def upgrade():
op.execute(loc.delete())
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'LocationCache', sa.Column('pack', sa.Unicode(length=255), nullable=False))
op.create_index(u'ix_LocationCache_pack', u'LocationCache', ['pack'], unique=False)
op.drop_index(u'ix_LocationCache_ip', table_name=u'LocationCache')
op.drop_column(u'LocationCache', u'ip')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'LocationCache', sa.Column(u'ip', sa.Unicode(length=255), nullable=False))
op.create_index(u'ix_LocationCache_ip', u'LocationCache', [u'ip'], unique=False)
op.drop_index(u'ix_LocationCache_pack', table_name=u'LocationCache')
op.drop_column(u'LocationCache', 'pack')
### end Alembic commands ###
| {
"repo_name": "porduna/weblabdeusto",
"path": "server/src/weblab/db/upgrade/regular/versions/585d74c833a6_add_longer_ip_address_called_pack.py",
"copies": "3",
"size": "1265",
"license": "bsd-2-clause",
"hash": 1021066919290035800,
"line_mean": 32.2894736842,
"line_max": 94,
"alpha_frac": 0.7011857708,
"autogenerated": false,
"ratio": 3.1157635467980294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009237231523917916,
"num_lines": 38
} |
"""Add lots of models
Revision ID: 7cb3cc8cc18b
Revises: 1da99da59d35
Create Date: 2017-02-04 20:46:41.924579
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy import BigInteger
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '7cb3cc8cc18b'
down_revision = '1da99da59d35'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('event_setting',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('enabled', sa.Boolean(), nullable=False),
sa.Column('event', sa.String(), nullable=True),
sa.Column('message', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('guild',
sa.Column('id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('roleme_roles', postgresql.ARRAY(BigInteger()), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_foreign_key(None, 'rolestate', 'guild', ['guild_id'], ['id'])
op.create_foreign_key(None, 'setting', 'guild', ['guild_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'setting', type_='foreignkey')
op.drop_constraint(None, 'rolestate', type_='foreignkey')
op.drop_table('guild')
op.drop_table('event_setting')
# ### end Alembic commands ###
| {
"repo_name": "MJB47/Jokusoramame",
"path": "migrations/versions/7cb3cc8cc18b_add_lots_of_models.py",
"copies": "1",
"size": "1493",
"license": "mit",
"hash": -7294423259116138000,
"line_mean": 31.4565217391,
"line_max": 77,
"alpha_frac": 0.6751507033,
"autogenerated": false,
"ratio": 3.448036951501155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4623187654801155,
"avg_score": null,
"num_lines": null
} |
"""Add lot table and relationships
Revision ID: 330
Revises: 320_drop_selection_answers
Create Date: 2015-10-14 10:48:29.311523
"""
# revision identifiers, used by Alembic.
revision = '330'
down_revision = '320_drop_selection_answers'
import itertools
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
def upgrade():
# Create Lots table and Lot to Framework relationship
op.create_table(
'lots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('slug', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('one_service_limit', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_lots_slug'), 'lots', ['slug'], unique=False)
op.create_table(
'framework_lots',
sa.Column('framework_id', sa.Integer(), nullable=False),
sa.Column('lot_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['framework_id'], ['frameworks.id'], ),
sa.ForeignKeyConstraint(['lot_id'], ['lots.id'], )
)
# Insert G-Cloud lot records
lot_table = table(
'lots',
column('name', sa.String),
column('slug', sa.String),
column('one_service_limit', sa.Boolean)
)
op.bulk_insert(lot_table, [
{'name': 'Software as a Service', 'slug': 'saas', 'one_service_limit': False},
{'name': 'Platform as a Service', 'slug': 'paas', 'one_service_limit': False},
{'name': 'Infrastructure as a Service', 'slug': 'iaas', 'one_service_limit': False},
{'name': 'Specialist Cloud Services', 'slug': 'scs', 'one_service_limit': False},
])
framework_lots_table = table(
'framework_lots',
column('framework_id', sa.Integer),
column('lot_id', sa.Integer)
)
# Add 4 lots (ids 1-4) to all G-Cloud frameworks (ids 1-4)
op.bulk_insert(framework_lots_table, [
{'framework_id': framework_id, 'lot_id': lot_id}
for framework_id, lot_id in itertools.product(range(1, 5), range(1, 5))
])
op.add_column(u'archived_services', sa.Column('lot_id', sa.BigInteger(), nullable=True))
op.create_index(op.f('ix_archived_services_lot_id'), 'archived_services', ['lot_id'], unique=False)
op.create_foreign_key(None, 'archived_services', 'lots', ['lot_id'], ['id'])
op.add_column(u'draft_services', sa.Column('lot_id', sa.BigInteger(), nullable=True))
op.create_index(op.f('ix_draft_services_lot_id'), 'draft_services', ['lot_id'], unique=False)
op.create_foreign_key(None, 'draft_services', 'lots', ['lot_id'], ['id'])
op.add_column(u'services', sa.Column('lot_id', sa.BigInteger(), nullable=True))
op.create_index(op.f('ix_services_lot_id'), 'services', ['lot_id'], unique=False)
op.create_foreign_key(None, 'services', 'lots', ['lot_id'], ['id'])
def downgrade():
op.drop_constraint('services_lot_id_fkey', 'services', type_='foreignkey')
op.drop_index(op.f('ix_services_lot_id'), table_name='services')
op.drop_column(u'services', 'lot_id')
op.drop_constraint('draft_services_lot_id_fkey', 'draft_services', type_='foreignkey')
op.drop_index(op.f('ix_draft_services_lot_id'), table_name='draft_services')
op.drop_column(u'draft_services', 'lot_id')
op.drop_constraint('archived_services_lot_id_fkey', 'archived_services', type_='foreignkey')
op.drop_index(op.f('ix_archived_services_lot_id'), table_name='archived_services')
op.drop_column(u'archived_services', 'lot_id')
op.drop_table('framework_lots')
op.drop_index(op.f('ix_lots_slug'), table_name='lots')
op.drop_table('lots')
| {
"repo_name": "alphagov/digitalmarketplace-api",
"path": "migrations/versions/330_add_lot_table_and_relationships.py",
"copies": "1",
"size": "3693",
"license": "mit",
"hash": -8155260936596065000,
"line_mean": 37.0721649485,
"line_max": 103,
"alpha_frac": 0.6344435418,
"autogenerated": false,
"ratio": 3.2140992167101827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43485427585101827,
"avg_score": null,
"num_lines": null
} |
"""Add lowercase index for certificate name and cn and also for domain name
Revision ID: 8323a5ea723a
Revises: b33c838cb669
Create Date: 2020-01-10 10:51:44.776052
"""
# revision identifiers, used by Alembic.
revision = '8323a5ea723a'
down_revision = 'b33c838cb669'
from alembic import op
from sqlalchemy import text
import sqlalchemy as sa
def upgrade():
op.create_index(
"ix_certificates_cn_lower",
"certificates",
[text("lower(cn)")],
unique=False,
postgresql_ops={"lower(cn)": "gin_trgm_ops"},
postgresql_using="gin",
)
op.create_index(
"ix_certificates_name_lower",
"certificates",
[text("lower(name)")],
unique=False,
postgresql_ops={"lower(name)": "gin_trgm_ops"},
postgresql_using="gin",
)
op.create_index(
"ix_domains_name_lower",
"domains",
[text("lower(name)")],
unique=False,
postgresql_ops={"lower(name)": "gin_trgm_ops"},
postgresql_using="gin",
)
def downgrade():
op.drop_index("ix_certificates_cn_lower", table_name="certificates")
op.drop_index("ix_certificates_name_lower", table_name="certificates")
op.drop_index("ix_domains_name_lower", table_name="domains")
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/8323a5ea723a_.py",
"copies": "1",
"size": "1270",
"license": "apache-2.0",
"hash": -3557942594798545400,
"line_mean": 24.4,
"line_max": 75,
"alpha_frac": 0.625984252,
"autogenerated": false,
"ratio": 3.3866666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45126509186666663,
"avg_score": null,
"num_lines": null
} |
"""Add `lower` field to all tags so that Tags can be queried efficiently"""
import sys
import logging
from website.models import Tag
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
for t in Tag.find():
logger.info('Migrating tag {!r}'.format(t))
t.lower = t._id.lower()
t.save(force=True)
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
# Start a transaction that will be rolled back if any exceptions are un
with TokuTransaction():
do_migration()
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
| {
"repo_name": "brandonPurvis/osf.io",
"path": "scripts/migration/migrate_tags.py",
"copies": "31",
"size": "1452",
"license": "apache-2.0",
"hash": 6736631998450641000,
"line_mean": 29.25,
"line_max": 88,
"alpha_frac": 0.6639118457,
"autogenerated": false,
"ratio": 3.934959349593496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""add lower() indexes to pg
Revision ID: 439766f6104d
Revises: 20671b28c538
Create Date: 2012-07-09 21:33:28.404627
"""
from __future__ import unicode_literals
from alembic import op
from alembic.context import get_context
from sqlalchemy.dialects.postgresql.base import PGDialect
# revision identifiers, used by Alembic.
revision = "439766f6104d"
down_revision = "20671b28c538"
def upgrade():
c = get_context()
if isinstance(c.connection.engine.dialect, PGDialect):
op.execute(
"""
CREATE UNIQUE INDEX groups_unique_group_name_key
ON groups
USING btree
(lower(group_name::text));
"""
)
op.execute(
"""
ALTER TABLE groups_permissions
ADD CONSTRAINT groups_permissions_perm_name_check CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.execute(
"""
ALTER TABLE groups_resources_permissions
ADD CONSTRAINT groups_resources_permissions_perm_name_check CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.execute(
"""
ALTER TABLE users_permissions
ADD CONSTRAINT user_permissions_perm_name_check CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.execute(
"""
ALTER TABLE users_resources_permissions
ADD CONSTRAINT users_resources_permissions_perm_name_check CHECK (perm_name::text = lower(perm_name::text));
"""
) # noqa
op.execute(
"""
CREATE UNIQUE INDEX users_email_key2 ON users (lower(email::text));
"""
)
op.execute(
"""
CREATE INDEX users_username_uq2 ON users (lower(user_name::text));
"""
)
def downgrade():
pass
| {
"repo_name": "ergo/ziggurat_foundations",
"path": "ziggurat_foundations/migrations/versions/439766f6104d_add_lower_indexes_to.py",
"copies": "1",
"size": "1880",
"license": "bsd-3-clause",
"hash": -1146781044199118800,
"line_mean": 24.7534246575,
"line_max": 123,
"alpha_frac": 0.5792553191,
"autogenerated": false,
"ratio": 3.991507430997877,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5070762750097877,
"avg_score": null,
"num_lines": null
} |
"""Add luxembourg tables
Revision ID: 1b9b2d6fb6e
Revises: None
Create Date: 2014-11-24 15:45:03.798619
"""
# revision identifiers, used by Alembic.
revision = '1b9b2d6fb6e'
down_revision = None
from alembic import op, context
import sqlalchemy as sa
def downgrade():
schema = context.get_context().config.get_main_option('schema')
op.drop_table('lux_layer_external_wms', schema=schema)
op.drop_table('lux_layer_internal_wms', schema=schema)
def upgrade():
schema = context.get_context().config.get_main_option('schema')
op.create_table(
'lux_layer_internal_wms',
sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(
'url', sa.VARCHAR(length=255), autoincrement=False,
nullable=True),
sa.Column(
'layers', sa.VARCHAR(length=1000), autoincrement=False,
nullable=True),
sa.Column('is_poi', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.Column(
'collection_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['id'], [schema + '.layer_internal_wms.id'],
name=u'lux_layer_internal_wms_fk1',
onupdate=u'CASCADE',
ondelete=u'CASCADE'),
sa.PrimaryKeyConstraint('id', name=u'lux_layer_internal_wms_pkey'),
schema=schema
)
op.create_table(
'lux_layer_external_wms',
sa.Column('id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column(
'category_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['id'], [schema + '.layer_external_wms.id'],
name=u'lux_layer_external_wms_fk1',
onupdate=u'CASCADE', ondelete=u'CASCADE'),
sa.PrimaryKeyConstraint('id', name=u'lux_layer_external_wms_pkey'),
schema=schema
)
| {
"repo_name": "Geoportail-Luxembourg/geoportailv3",
"path": "geoportal/LUX_alembic/versions/1b9b2d6fb6e_add_luxembourg_tables.py",
"copies": "2",
"size": "1991",
"license": "mit",
"hash": 3998540763927278600,
"line_mean": 34.5535714286,
"line_max": 79,
"alpha_frac": 0.5986941236,
"autogenerated": false,
"ratio": 3.680221811460259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5278915935060259,
"avg_score": null,
"num_lines": null
} |
"""Add ManualApps and AllowedHost
Revision ID: 76668d91d087
Revises: 2b963e6ca2a2
Create Date: 2018-02-19 18:03:28.009222
"""
# revision identifiers, used by Alembic.
revision = '76668d91d087'
down_revision = '2b963e6ca2a2'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('AllowedHosts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.Unicode(length=100), nullable=False),
sa.Column('creation', sa.DateTime(), nullable=False),
sa.Column('last_update', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_AllowedHosts_creation', 'AllowedHosts', ['creation'], unique=False)
op.create_index(u'ix_AllowedHosts_last_update', 'AllowedHosts', ['last_update'], unique=False)
op.create_index(u'ix_AllowedHosts_url', 'AllowedHosts', ['url'], unique=False)
op.create_table('ManualApplications',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=100), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.Column('identifier', sa.Unicode(length=36), nullable=False),
sa.Column('creation', sa.DateTime(), nullable=False),
sa.Column('last_update', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['GoLabOAuthUsers.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_ManualApplications_creation', 'ManualApplications', ['creation'], unique=False)
op.create_index(u'ix_ManualApplications_identifier', 'ManualApplications', ['identifier'], unique=True)
op.create_index(u'ix_ManualApplications_last_update', 'ManualApplications', ['last_update'], unique=False)
op.create_index(u'ix_ManualApplications_name', 'ManualApplications', ['name'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_ManualApplications_name', table_name='ManualApplications')
op.drop_index(u'ix_ManualApplications_last_update', table_name='ManualApplications')
op.drop_index(u'ix_ManualApplications_identifier', table_name='ManualApplications')
op.drop_index(u'ix_ManualApplications_creation', table_name='ManualApplications')
op.drop_table('ManualApplications')
op.drop_index(u'ix_AllowedHosts_url', table_name='AllowedHosts')
op.drop_index(u'ix_AllowedHosts_last_update', table_name='AllowedHosts')
op.drop_index(u'ix_AllowedHosts_creation', table_name='AllowedHosts')
op.drop_table('AllowedHosts')
# ### end Alembic commands ###
| {
"repo_name": "go-lab/labmanager",
"path": "alembic/versions/76668d91d087_add_manualapps_and_allowedhost.py",
"copies": "5",
"size": "2664",
"license": "bsd-2-clause",
"hash": 6143350842212751000,
"line_mean": 45.7368421053,
"line_max": 110,
"alpha_frac": 0.7094594595,
"autogenerated": false,
"ratio": 3.46875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007841008448911599,
"num_lines": 57
} |
"""Add mapping table
Revision ID: 0cf18be7ade1
Revises:
Create Date: 2021-03-30 17:42:59.493830
"""
from enum import Enum
import sqlalchemy as sa
from alembic import op
from sqlalchemy.sql.ddl import CreateSchema, DropSchema
from indico.core.db.sqlalchemy import PyIntEnum
# revision identifiers, used by Alembic.
revision = '0cf18be7ade1'
down_revision = None
branch_labels = None
depends_on = None
class _EntryType(int, Enum):
event = 1
contribution = 2
subcontribution = 3
attachment = 4
note = 5
def upgrade():
op.execute(CreateSchema('plugin_citadel'))
op.create_table(
'id_map',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('citadel_id', sa.Integer(), nullable=False, index=True, unique=True),
sa.Column('entry_type', PyIntEnum(_EntryType), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('contrib_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('subcontrib_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('attachment_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('note_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.Column('attachment_file_id', sa.Integer(), nullable=True, index=True, unique=True),
sa.CheckConstraint('entry_type != 1 OR (event_id IS NOT NULL AND attachment_id IS NULL AND contrib_id IS NULL AND note_id IS NULL AND subcontrib_id IS NULL)', name='valid_event_entry'),
sa.CheckConstraint('entry_type != 2 OR (contrib_id IS NOT NULL AND attachment_id IS NULL AND event_id IS NULL AND note_id IS NULL AND subcontrib_id IS NULL)', name='valid_contribution_entry'),
sa.CheckConstraint('entry_type != 3 OR (subcontrib_id IS NOT NULL AND attachment_id IS NULL AND contrib_id IS NULL AND event_id IS NULL AND note_id IS NULL)', name='valid_subcontribution_entry'),
sa.CheckConstraint('entry_type != 4 OR (attachment_id IS NOT NULL AND contrib_id IS NULL AND event_id IS NULL AND note_id IS NULL AND subcontrib_id IS NULL)', name='valid_attachment_entry'),
sa.CheckConstraint('entry_type != 5 OR (note_id IS NOT NULL AND attachment_id IS NULL AND contrib_id IS NULL AND event_id IS NULL AND subcontrib_id IS NULL)', name='valid_note_entry'),
sa.ForeignKeyConstraint(['attachment_id'], ['attachments.attachments.id']),
sa.ForeignKeyConstraint(['contrib_id'], ['events.contributions.id']),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.ForeignKeyConstraint(['note_id'], ['events.notes.id']),
sa.ForeignKeyConstraint(['subcontrib_id'], ['events.subcontributions.id']),
sa.ForeignKeyConstraint(['attachment_file_id'], ['attachments.files.id']),
sa.PrimaryKeyConstraint('id'),
schema='plugin_citadel'
)
def downgrade():
op.drop_table('id_map', schema='plugin_citadel')
op.execute(DropSchema('plugin_citadel'))
| {
"repo_name": "indico/indico-plugins",
"path": "citadel/indico_citadel/migrations/20210330_1742_0cf18be7ade1_add_mapping_table.py",
"copies": "1",
"size": "3027",
"license": "mit",
"hash": 4566765336188599000,
"line_mean": 47.0476190476,
"line_max": 203,
"alpha_frac": 0.6888007929,
"autogenerated": false,
"ratio": 3.5864928909952605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.976415306415457,
"avg_score": 0.002228123948138082,
"num_lines": 63
} |
""" add markers to Devices:
- pins
- outline
"""
import numpy as np
from pp.layers import LAYER, port_type2layer
from pp.port import read_port_markers
import pp
def _rotate(v, m):
return np.dot(m, v)
def add_pin_triangle(component, port, layer=LAYER.PORT, label_layer=LAYER.TEXT):
"""
# The port visualization pattern is a triangle with a right angle
# The face opposite the right angle is the port width
"""
p = port
a = p.orientation
ca = np.cos(a * np.pi / 180)
sa = np.sin(a * np.pi / 180)
rot_mat = np.array([[ca, -sa], [sa, ca]])
d = p.width / 2
dbot = np.array([0, -d])
dtop = np.array([0, d])
dtip = np.array([d, 0])
p0 = p.position + _rotate(dbot, rot_mat)
p1 = p.position + _rotate(dtop, rot_mat)
ptip = p.position + _rotate(dtip, rot_mat)
polygon = [p0, p1, ptip]
component.add_label(
text=p.name, position=p.midpoint, layer=label_layer,
)
component.add_polygon(polygon, layer=layer)
def add_pin_square_inside(
component, port, port_length=0.1, layer=LAYER.PORT, label_layer=LAYER.TEXT
):
""" square towards the inside of the port
.. code::
_______________
| |
| |
| |
|| |
|| |
| |
| __ |
|_______________|
"""
p = port
a = p.orientation
ca = np.cos(a * np.pi / 180)
sa = np.sin(a * np.pi / 180)
rot_mat = np.array([[ca, -sa], [sa, ca]])
d = p.width / 2
dx = port_length
dbot = np.array([0, -d])
dtop = np.array([0, d])
dbotin = np.array([-dx, -d])
dtopin = np.array([-dx, +d])
p0 = p.position + _rotate(dbot, rot_mat)
p1 = p.position + _rotate(dtop, rot_mat)
ptopin = p.position + _rotate(dtopin, rot_mat)
pbotin = p.position + _rotate(dbotin, rot_mat)
polygon = [p0, p1, ptopin, pbotin]
component.add_polygon(polygon, layer=layer)
def add_pin_square(
component, port, port_length=0.1, layer=LAYER.PORT, label_layer=LAYER.PORT
):
""" half out
.. code::
_______________
| |
| |
| |
||| |
||| |
| |
| __ |
|_______________|
__
"""
p = port
a = p.orientation
ca = np.cos(a * np.pi / 180)
sa = np.sin(a * np.pi / 180)
rot_mat = np.array([[ca, -sa], [sa, ca]])
d = p.width / 2
dx = port_length
dbot = np.array([dx / 2, -d])
dtop = np.array([dx / 2, d])
dbotin = np.array([-dx / 2, -d])
dtopin = np.array([-dx / 2, +d])
p0 = p.position + _rotate(dbot, rot_mat)
p1 = p.position + _rotate(dtop, rot_mat)
ptopin = p.position + _rotate(dtopin, rot_mat)
pbotin = p.position + _rotate(dbotin, rot_mat)
polygon = [p0, p1, ptopin, pbotin]
component.add_polygon(polygon, layer=layer)
component.add_label(
text=str(p.name), position=p.midpoint, layer=label_layer,
)
def add_outline(component, layer=LAYER.DEVREC):
""" adds devices outline in layer """
c = component
points = [
[c.xmin, c.ymin],
[c.xmax, c.ymin],
[c.xmax, c.ymax],
[c.xmin, c.ymax],
]
c.add_polygon(points, layer=layer)
def add_pins(
component,
add_port_marker_function=add_pin_square,
port_type2layer=port_type2layer,
**kwargs,
):
""" add port markers:
Args:
component: to add ports
add_port_marker_function:
port_type2layer: dict mapping port types to marker layers for ports
Add device recognition layer
"""
if hasattr(component, "ports") and component.ports:
for p in component.ports.values():
layer = port_type2layer[p.port_type]
add_port_marker_function(
component=component, port=p, layer=layer, label_layer=layer, **kwargs
)
def add_pins_and_outline(
component, pins_function=add_pins, add_outline_function=add_outline
):
add_outline_function(component)
pins_function(component)
def add_pins_triangle(component, add_port_marker_function=add_pin_triangle, **kwargs):
return add_pins(
component=component, add_port_marker_function=add_port_marker_function, **kwargs
)
def test_add_pins():
component = pp.c.mzi2x2(with_elec_connections=True)
add_pins(component)
port_layer = port_type2layer["optical"]
port_markers = read_port_markers(component, [port_layer])
assert len(port_markers.polygons) == 4
port_layer = port_type2layer["dc"]
port_markers = read_port_markers(component, [port_layer])
assert len(port_markers.polygons) == 3
# for port_layer, port_type in port_layer2type.items():
# port_markers = read_port_markers(component, [port_layer])
# print(len(port_markers.polygons))
if __name__ == "__main__":
test_add_pins()
# from pp.components import mmi1x2
# from pp.components import bend_circular
# from pp.add_grating_couplers import add_grating_couplers
# c = mmi1x2(width_mmi=5)
# c = bend_circular()
# cc = add_grating_couplers(c, layer_label=pp.LAYER.LABEL)
# c = pp.c.waveguide()
# c = pp.c.crossing(pins=True)
# add_pins(c)
# pp.show(c)
| {
"repo_name": "psiq/gdsfactory",
"path": "pp/add_pins.py",
"copies": "1",
"size": "5421",
"license": "mit",
"hash": 1819663384694387200,
"line_mean": 24.8142857143,
"line_max": 88,
"alpha_frac": 0.548607268,
"autogenerated": false,
"ratio": 3.144431554524362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4193038822524362,
"avg_score": null,
"num_lines": null
} |
'''add_master_protein - assign a unique master to each peptide across
all samples using a maximum parsimony approach
======================================================================
:Author: Tom Smith, Manasa Ramakrishna
:Release: $Id$
:Date: |today|
:Tags: Python RNP Proteomics
Purpose
-------
This script takes the xlsx output from a set of input files
(*.txt/*.xlsx) and annotates the table with unique protein information
for downstream analyses.
The following columns are added:
- master_protein: The master protein(s) for the peptide. See below
for how this is derived
- master_uniprot_id: The uniprot id(s) for the master protein(s)
- protein_description: Description(s) for the master protein(s)
- protein_length: The length(s) of the master protein(s)
- crap_protein: Is the protein in the cRAP database of common
proteomics proteins, e.g keratin
- crap_associated_protein: does the protein share peptides with a
protein in the cRAP database of common proteomics proteins
If a log file is requested (--log), basic statistics are collected and
written to the log file
Fasta description format
------------------------
The source of the protein (SwissProt or TrEMBL) is derived from the
protein fasta description, with SwissProt proteins starting 'sp' and
TrEMBL 'tr'. Furthermore, the description column is derived from the
fasta description too. For this reason the fasta databases must be
correctly formatted as in the examples below. This is the standard
format for fasta files from uniprot.
format:
Three-level identifier followed by protein description:
>[sp|tr]|[Uniprot id]|[Protein name] [Description]
examples:
>sp|P27361|MK03_HUMAN Mitogen-activated protein kinase 3 OS=Homo sapiens GN=MAPK3 PE=1 SV=4
>tr|F8W1T5|F8W1T5_HUMAN GTPase RhebL1 (Fragment) OS=Homo sapiens GN=RHEBL1 PE=4 SV=1
Deriving master proteins
----------------------------
Matching peptides to their source proteins (protein inference) is a
common task in proteomics and there are many possible
approaches. Ultimately, the aim is usually to identify the most likely
source protein since taking all possible sources makes downstream
analyses very complex. Here we use the parsimonious approach to
identify a minimal set of proteins which explains all peptides
observed. In essense, the approach is as follows:
- start with list of all peptides
- sort proteins by the number of peptides observed
- take the protein(s) with the most peptides and remove these from the peptides list
- continue through the sorted proteins, removing peptides, until the
peptides list is empty
Additionally, we prioritise matches to SwissProt proteins over TrEMBL
proteins. SwissProt proteins have been manually curated and should not
contain any redundant proteins, truncated sequences, miss-annotations
etc. On the other hand, the set of TrEMBL proteins will ceratainly
contain proteins which are redundant with respect to the SwissProt
proteins as well as truncated and just plain wrong(!) proteins. It is
useful to include the TrEMBL proteins to catch peptides which are from
a protein or isoform which has not been curated into SwissProt
yet. However, where a SwissProt match is found, we believe it is
preferable to ignore any TrEMBL match. Here, for all peptides with
matched to both SwissProt and TrEMBL proteins, we remove all the
TrEMBL matches.
In some instances, it is not possible to assign a single protein to a
peptide. In these cases, the proteins names, uniprot ids, descriptions
and lengths are ';' separated in the outfile.
In addition to the conditions above, In some cases we are looking for
master proteins that are consistent across a set of samples. This is
to ensure that for a given set of peptides, the same master protein is
assigned to all samples.
Usage
-----
By default, the outfile will be created in the same directory with the
suffix annotated.xlsx. You can change the outfile name by specifying
the option --outfile
python add_master_protein.py --infile=RNP.xlsx --fasta-db=Human_201701.fasta
--fasta-crap-db=cRAP_FullIdentifiers.fasta --outfile=master_prot_annotated.txt
--logfile=master_prot_annot.log
Command line options
--------------------
'''
import argparse
import collections
import copy
import os
import re
import sys
import io
import gzip
import math
import pandas as pd
import numpy as np
import proteomics.fasta as fasta
from time import gmtime, strftime
def writeSectionHeader(logfile, section_header):
#underliner = "".join(("-",)*len(section_header))
section_blocker = ("======================================="
"=======================================")
underliner1 = ("----------------------------------------"
"----------------------------------------")
logfile.write("\n%s\n%s\n" % (section_blocker, section_header))
logfile.write("%s\n" % underliner1)
return section_blocker
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
argv, usage=__doc__)
optional = parser.add_argument_group('optional arguments')
required = parser.add_argument_group('required arguments')
required.add_argument('-i', '--infile', dest="infile",
required=True, nargs='+',
help=("Provide a single file or folder with "
"multiple files for processing"))
required.add_argument('-f', '--fasta-db', dest="fasta_db",
required=True,
help=("Input a fasta file for all proteins in "
"the species of interest"))
required.add_argument('-fc', '--fasta-crap-db', dest="fasta_crap_db",
required=True,
help=("Input a fasta file for all proteins that "
"are common contaminants in a mass-spec "
"experiment"))
required.add_argument('--peptide-column', dest="pep_column",
required=True,
help=("What's the name of the column with the "
"peptide sequence?"))
optional.add_argument('--matches-column', dest="matches_column",
default=None,
help=("Column with the matches already identified "
"for the peptide"))
optional.add_argument('--check-crap', dest="check_crap",
default=False, action='store_true',
help=("Check each peptide against the cRAP fasta"))
optional.add_argument('--only-swissprot', dest="strict_sw",
default=False, action='store_true',
help=("Ignore matches to non-swissprot proteins"))
optional.add_argument('--matches-separator', dest="matches_sep",
default=",",
help=("Separator for the matches column"))
optional.add_argument('-o', '--outfile', dest="outfile",
default=None,
help=("Enter a file name for your output"))
optional.add_argument('-os', '--outfile-suffix', dest="outfile_suffix",
default=None,
help=("Enter a suffix to add to the output files"))
optional.add_argument('-l', '--logfile', dest="logfile",
default=os.devnull,
help=("Enter a file name for logging program "
"output. Else, nothing will be printed"))
args = vars(parser.parse_args())
if not args['outfile'] and not args['outfile_suffix']:
raise ValueError("must supply either --outfile or "
"--outfile-suffix option")
logfile = open(args['logfile'], 'w')
logfile.write("Logfile for annotate_rnp.py %s\n\n" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime())))
section_blocker = writeSectionHeader(logfile, "Script arguments:")
for key, value in args.items():
logfile.write("%s: %s\n" % (key, value))
logfile.write("%s\n\n" % section_blocker)
#(1) Get the mappings between peptide and proteins
# (1.1) Build dictionaries using the fasta database to map from:
# 1. protein accession: protein
# 2. protein accession: sequence
# 3. protein accession: description e.g >sp|O43707|ACTN4_HUMAN
# 3. protein accession: long description e.g >sp|O43707|ACTN4_HUMAN|Alpha-actinin-4
protein2description = {}
protein2longdescription = {}
protein2seq = {}
tr_proteins = set()
sp_proteins = set()
for fa_infile in (args['fasta_db'], args['fasta_crap_db']):
if fa_infile.endswith(".gz"):
fa_iterator = fasta.FastaIterator(
io.TextIOWrapper(gzip.open(fa_infile)))
else:
fa_iterator = fasta.FastaIterator(open(fa_infile))
for entry in fa_iterator:
accession = entry.title.split(" ")[0].split("|")[1]
protein2seq[accession] = entry.sequence.upper().replace("I", "L")
protein2description[accession] = entry.title.split(" ")[0]
protein2longdescription[accession] = "|".join(entry.title.split(" ")[0:2])
if entry.title.split(" ")[0].split("|")[0] == "sp":
sp_proteins.add(accession)
elif entry.title.split(" ")[0].split("|")[0] == "tr":
tr_proteins.add(accession)
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
crap_proteins = set()
associated_crap_proteins = set()
if args['fasta_crap_db'].endswith(".gz"):
fa_iterator = fasta.FastaIterator(
io.TextIOWrapper(gzip.open(args['fasta_crap_db'])))
else:
fa_iterator = fasta.FastaIterator(open(args['fasta_crap_db']))
for entry in fa_iterator:
accession = entry.title.split(" ")[0].split("|")[1]
crap_proteins.add(accession)
# (1.2) Parse the infiles to obtain maps of peptides to proteins and vis versa
pep2pro = collections.defaultdict(lambda: collections.defaultdict(set))
pep2allpro = collections.defaultdict(set)
pro2pep = collections.defaultdict(set)
top_level_proteins = set()
initial_proteins = set()
if not args['matches_column']:
peptides = set()
for infile in args['infile']:
# read the data into a dataframe
infile_split = infile.split(".")
if infile_split[-1] == "xlsx":
peptide_df = pd.read_excel(infile)
elif infile_split[-1] in ["text", "txt", "tsv"]:
peptide_df = pd.read_table(infile, sep='\t', comment=None)
elif infile_split[-1] == "csv":
peptide_df = pd.read_table(infile, sep=',', comment=None)
else:
raise ValueError("File type must one of .xlsx, "
".txt, .text, .tsv, .csv")
# add some basic annotations
#rnp_df['tr_only'] = [x.count("sp|") == 0 for x in rnp_df['Proteins']]
#rnp_df['matches'] = [len(x.split(",")) for x in rnp_df['Proteins']]
# if matches have already been made, use these
# (1.1) extract the initial mappings between proteins and peptides
if args['matches_column']:
for row_ix, row_values in peptide_df[
[args['matches_column'], args['pep_column']]].iterrows():
# if empty match, will be converted to NaN (type=float)
if type(row_values[args['matches_column']]) is float:
# could manually search against all proteins in database?
continue
proteins = row_values[args['matches_column']].split(
args['matches_sep'])
# rather annoyingly, PD adds "#CONTAM#" to the crap protein ids
proteins = [x.replace("#CONTAM#", "") for x in proteins]
peptide = row_values[args['pep_column']]
if args['check_crap']:
add_crap_proteins = []
for prot in crap_proteins:
if peptide.replace("I", "L") in protein2seq[prot]:
add_crap_proteins.append(prot)
proteins.extend(add_crap_proteins)
'''if peptide == "RTPPAGVFYQGWSATPIANGSLGHDIHHPR":
add_all_proteins = []
print(proteins)
for prot in protein2seq:
if peptide.replace("I", "L") in protein2seq[prot]:
add_all_proteins.append(prot)
proteins.extend(add_all_proteins)
print(proteins)
raise ValueError()'''
for protein in proteins:
if protein in crap_proteins:
associated_crap_proteins.update(proteins)
if protein not in protein2seq:
logfile.write(
"protein %s matches peptide %s but is not found "
"in fasta database\n" % (protein, peptide))
# remove proteins not in fasta database
proteins = set([prot for prot in proteins if prot in protein2seq])
if peptide in pep2pro:
if not pep2allpro[peptide] == proteins:
current_protein_matches = ", ".join(pep2allpro[peptide])
new_protein_matches = ", ".join(proteins)
logfile.write(
("The same peptide is observed more than once with "
"different proteins! : peptide = %(peptide)s, "
"matching proteins = %(current_protein_matches)s "
"or %(new_protein_matches)s\n" % locals()))
pep2allpro[peptide].update(proteins)
else:
pep2allpro[peptide] = proteins
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
protein_description = protein2description[protein]
if protein in sp_proteins:
protein_level = 1
top_level_proteins.add(protein)
elif protein in tr_proteins:
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
else: # if we don't have a column of matches, get the set of all peptides
peptides.update(peptide_df[args['pep_column']])
if not args['matches_column']:
# search against all proteins in the provided databases
n = 0
for peptide in peptides:
n += 1
if n % 1000 == 0:
logfile.write("searched %i peptides against database %s\n" % (
n, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
proteins = [prot for prot in protein2seq if
peptide in protein2seq[prot]]
for protein in proteins:
initial_proteins.add(protein)
pro2pep[protein].add(peptide)
protein_description = protein2description[protein]
if protein in sp_proteins:
protein_level = 1
top_level_proteins.add(protein)
elif protein in tr_proteins:
protein_level = 2
else:
raise ValueError("Protein does not appear to be either"
"SwissProt(sp) or TrEMBL(tr)")
pep2pro[peptide][protein_level].add(protein)
section_blocker = writeSectionHeader(logfile, "Initial file(s) stats")
logfile.write("# initial peptides: %i\n" % len(pep2pro))
logfile.write("# initial proteins: %i\n" % len(pro2pep))
logfile.write("# initial SwissProt proteins: %i\n" % len(top_level_proteins))
logfile.write("# initial TrEMBL proteins: %i\n" % (
len(pro2pep)-len(top_level_proteins)))
logfile.write("%s\n\n" % section_blocker)
if not args['strict_sw']:
section_blocker = writeSectionHeader(
logfile, "Deciding which TrEMBL proteins to retain:")
# (1.2) find the peptides with only TrEMBL protein matches and
# 'upgrade' these TrEMBL proteins to being equivalent to SwissProt
# across all peptides where these TrEMBL proteins match
tr_only_peptides = set([x for x in pep2pro.keys() if len(pep2pro[x][1]) == 0])
logfile.write("# peptides with only TrEMBL matches: %i\n" % (
len(tr_only_peptides)))
set_upgraded = set()
for peptide in tr_only_peptides:
upgraded = pep2pro[peptide][2]
set_upgraded.update(upgraded)
top_level_proteins.update(upgraded)
logfile.write("# TrEMBL proteins retained as no SwissProt matches for "
"peptide: %i\n" % (len(set_upgraded)))
# 'upgrade' the selected TrEMBL proteins
for peptide in pep2pro:
pep2pro[peptide][2] = pep2pro[peptide][2].difference(set_upgraded)
pep2pro[peptide][1] = pep2pro[peptide][1].union(set_upgraded)
logfile.write("%s\n\n" % section_blocker)
# (1.3) Use a parsimonious approach to identifty the minimum number
# of proteins required to cover all the peptides:
# Start from the protein(s) with the most peptides and mark these as covered.
# Continue with remaining proteins in order of peptides per protein
# until all peptides are covered
section_blocker = writeSectionHeader(
logfile, ("Parsimonious method to identify minimal set of proteins"
" to account for all peptides %s" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime()))))
retained_proteins = []
peptides = copy.deepcopy(set(pep2pro.keys()))
peptide_counts = {}
tmppro2pep = copy.deepcopy(pro2pep)
new_top_level_proteins = copy.deepcopy(top_level_proteins)
new_pep2pro = collections.defaultdict(set)
peptide_count = max(map(len, tmppro2pep.values()))
while True:
# (1.3.1) If all peptides covered or the maximum peptides per
# protein = 0, break.
if len(peptides) == 0 or peptide_count == 0:
logfile.write("All peptides are now accounted for %s\n" % (
strftime("%Y-%m-%d %H:%M:%S", gmtime())))
break
peptide_count -= 1
top_proteins = set()
top_score = 0
#(1.3.2) Find the proteins with the highest number of peptide matches
for protein in new_top_level_proteins:
if len(tmppro2pep[protein]) == top_score:
top_proteins.add(protein)
elif len(tmppro2pep[protein]) > top_score:
top_score = len(tmppro2pep[protein])
top_proteins = set((protein,))
logfile.write("%i protein(s) with %i peptides\n" % (
len(top_proteins), top_score))
# (1.3.3) Remove the top proteins and the associated peptides
for top_protein in top_proteins:
new_top_level_proteins.remove(top_protein)
retained_proteins.append(top_protein)
for peptide in pro2pep[top_protein]:
new_pep2pro[peptide].add(top_protein)
if peptide in peptides:
peptides.remove(peptide)
for protein in pep2pro[peptide][1]:
if protein == top_protein:
continue
if peptide in tmppro2pep[protein]:
tmppro2pep[protein].remove(peptide)
logfile.write("\n%i proteins retained\n" % len(retained_proteins))
logfile.write("%i SwissProt proteins retained\n" % len(
set(retained_proteins).intersection(sp_proteins)))
logfile.write("%i TrEMBL proteins retained\n" % len(
set(retained_proteins).intersection(tr_proteins)))
logfile.write("\nNote: If not all SwissProt proteins were retained, this means\n"
"these proteins only included peptides which were observed\n"
"in other proteins which had a greater number of peptides\n")
logfile.write("%s\n\n" % section_blocker)
section_blocker = writeSectionHeader(logfile, "proteins per peptide:")
counts = collections.Counter([len(x) for x in new_pep2pro.values()])
sum_counts = sum(counts.values())
for k, v in counts.items():
logfile.write("%i peptide(s) (%.2f %%) have %i master protein(s)\n" % (
v, (100 * v)/sum_counts, k))
logfile.write("%s\n\n" % section_blocker)
# Check all the peptides are covered
if not args['strict_sw']:
assert set(pep2pro.keys()).difference(set(new_pep2pro.keys())) == set()
else:
missing_peptides = set(pep2pro.keys()).difference(set(new_pep2pro.keys()))
logfile.write("%i peptide(s) (%.2f %%) have no master protein(s)\n" % (
len(missing_peptides), (100 * len(missing_peptides))/sum_counts))
if args['outfile']:
outfile = open(args['outfile'], "w")
for infile in args['infile']:
# read the data into a dataframe
infile_split = infile.split(".")
if infile_split[-1] == "xlsx":
peptide_df = pd.read_excel(infile)
elif infile_split[-1] in ["text", "txt", "tsv"]:
peptide_df = pd.read_table(infile, sep='\t', comment=None)
elif infile_split[-1] == "csv":
peptide_df = pd.read_table(infile, sep=',', comment=None)
else:
raise ValueError("File type must one of .xlsx, "
".txt, .text, .tsv, .csv")
# add the top protein and uniprot id annotations
peptide_df['master_protein'] = [
";".join(new_pep2pro[protein]) for protein in peptide_df[args['pep_column']]]
# (1.5) derive further annotations
protein_lengths = []
protein_descriptions = []
crap_protein = []
associated_crap_protein = []
peptide_start = []
peptide_end = []
for ix, row in peptide_df.iterrows():
proteins = row['master_protein'].split(";")
pep_sequence = row['Sequence'].upper().replace("I", "L").replace("X", "L")
if proteins == [""]:
protein_lengths.append("")
protein_descriptions.append("")
crap_protein.append("")
associated_crap_protein.append("")
peptide_start.append("")
peptide_end.append("")
else:
protein_lengths.append(
";".join(map(str, [len(protein2seq[x]) for x in proteins])))
protein_descriptions.append(
";".join([protein2description[x] for x in proteins]))
# (1.5.1) does peptide match a cRAP protein?
crap = 0
for protein in proteins:
if protein in crap_proteins:
crap = 1
break
crap_protein.append(crap)
# (1.5.2) does peptide match a protein associated with a cRAP protein?
associated_crap = 0
for protein in proteins:
if protein in associated_crap_proteins:
associated_crap = 1
break
associated_crap_protein.append(associated_crap)
starts = []
ends = []
for protein in proteins:
protein_sequence = protein2seq[protein]
all_matches = re.findall(pep_sequence, protein_sequence)
if len(all_matches) > 1:
logfile.write(
"peptide: %s is observed more than once in protein: %s\n" % (
pep_sequence, protein))
starts.append("NA")
ends.append("NA")
elif len(all_matches) == 0:
logfile.write(
"peptide: %s is not observed in protein: %s\n" % (
pep_sequence, protein))
starts.append("NA")
ends.append("NA")
else:
peptide_match = re.search(pep_sequence, protein_sequence)
starts.append(peptide_match.start())
ends.append(peptide_match.end())
try:
peptide_start.append(";".join(map(str, starts)))
peptide_end.append(";".join(map(str, ends)))
except:
print(starts)
print(ends)
raise ValueError()
peptide_df['protein_length'] = protein_lengths
peptide_df['protein_description'] = protein_descriptions
peptide_df['peptide_start'] = peptide_start
peptide_df['peptide_end'] = peptide_end
peptide_df['crap_protein'] = crap_protein
peptide_df['associated_crap_protein'] = associated_crap_protein
peptide_df['unique'] = [1 if len(x.split(";"))==1 else 0
for x in peptide_df['master_protein']]
if args['outfile']:
peptide_df['filename'] = infile
peptide_df.to_csv(outfile, index=False, sep="\t", mode="a")
os.chmod(args['outfile'], 0o666)
else:
outfile = ".".join(infile.split(".")[:-1]) + args['outfile_suffix']
peptide_df.to_csv(outfile, index=False, sep="\t")
os.chmod(outfile, 0o666)
if args['outfile']:
outfile.close()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"repo_name": "TomSmithCGAT/CamProt",
"path": "camprot/scripts/add_master_protein.py",
"copies": "1",
"size": "26322",
"license": "mit",
"hash": -7171799152851391000,
"line_mean": 40.780952381,
"line_max": 91,
"alpha_frac": 0.5673201125,
"autogenerated": false,
"ratio": 3.816441931274467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48837620437744667,
"avg_score": null,
"num_lines": null
} |
"""add mastodon_instances
Revision ID: f8a153bc809b
Revises: 5fec5f5e8a5e
Create Date: 2017-08-23 11:27:19.223721
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f8a153bc809b'
down_revision = '5fec5f5e8a5e'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('mastodon_instances',
sa.Column('instance', sa.String(), nullable=False),
sa.Column('popularity', sa.Float(), server_default='10', nullable=False),
sa.PrimaryKeyConstraint('instance', name=op.f('pk_mastodon_instances'))
)
op.execute("""
INSERT INTO mastodon_instances (instance, popularity) VALUES
('mastodon.social', 100),
('mastodon.cloud', 90),
('social.tchncs.de', 80),
('mastodon.xyz', 70),
('mstdn.io', 60),
('awoo.space', 50),
('cybre.space', 40),
('mastodon.art', 30)
;
""")
def downgrade():
op.drop_table('mastodon_instances')
| {
"repo_name": "codl/forget",
"path": "migrations/versions/f8a153bc809b_add_mastodon_instances.py",
"copies": "1",
"size": "1025",
"license": "isc",
"hash": -5219611567132404000,
"line_mean": 24.625,
"line_max": 77,
"alpha_frac": 0.6117073171,
"autogenerated": false,
"ratio": 3.115501519756839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9221729393055027,
"avg_score": 0.0010958887603624446,
"num_lines": 40
} |
"""add measuredpositiontable
Revision ID: 146580790cd
Revises: 2902af6c9ef9
Create Date: 2015-09-25 11:25:11.921242
"""
# revision identifiers, used by Alembic.
revision = '146580790cd'
down_revision = '2902af6c9ef9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('LoadPositionTbl',
sa.Column('idloadpositionTbl', sa.Integer(), nullable=False),
sa.Column('identifier', sa.String(length=80), nullable=True),
sa.Column('position', sa.Integer(), nullable=True),
sa.Column('loadName', sa.String(length=45), nullable=True),
sa.Column('weight', sa.Float(), nullable=True),
sa.Column('note', sa.BLOB(), nullable=True),
sa.ForeignKeyConstraint(['identifier'], ['IrradiationPositionTbl.identifier'], ),
sa.ForeignKeyConstraint(['loadName'], ['LoadTbl.name'], ),
sa.PrimaryKeyConstraint('idloadpositionTbl')
)
op.create_table('MeasuredPositionTbl',
sa.Column('idmeasuredpositionTbl', sa.Integer(), nullable=False),
sa.Column('position', sa.Integer(), nullable=True),
sa.Column('x', sa.Float(), nullable=True),
sa.Column('y', sa.Float(), nullable=True),
sa.Column('z', sa.Float(), nullable=True),
sa.Column('is_degas', sa.Boolean(), nullable=True),
sa.Column('analysisID', sa.Integer(), nullable=True),
sa.Column('loadName', sa.String(length=45), nullable=True),
sa.ForeignKeyConstraint(['analysisID'], ['AnalysisTbl.idanalysisTbl'], ),
sa.ForeignKeyConstraint(['loadName'], ['LoadTbl.name'], ),
sa.PrimaryKeyConstraint('idmeasuredpositionTbl')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('MeasuredPositionTbl')
op.drop_table('LoadPositionTbl')
### end Alembic commands ###
| {
"repo_name": "NMGRL/pychron",
"path": "alembic_dvc/versions/146580790cd_add_measuredposition.py",
"copies": "3",
"size": "2212",
"license": "apache-2.0",
"hash": -3477187240576465000,
"line_mean": 43.24,
"line_max": 101,
"alpha_frac": 0.5777576854,
"autogenerated": false,
"ratio": 4.103896103896104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6181653789296104,
"avg_score": null,
"num_lines": null
} |
"""add meta.created
Revision ID: d2176bc400c8
Revises: 661fce6a2f64
Create Date: 2019-12-02 07:14:01.145411
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd2176bc400c8'
down_revision = '661fce6a2f64'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_fingerprint():
op.add_column(u'meta', sa.Column('created', sa.DateTime(timezone=True), nullable=True))
def downgrade_fingerprint():
op.drop_column(u'meta', 'created')
| {
"repo_name": "lalinsky/acoustid-server",
"path": "alembic/versions/d2176bc400c8_add_meta_created.py",
"copies": "1",
"size": "1173",
"license": "mit",
"hash": 6112639460864543000,
"line_mean": 19.9464285714,
"line_max": 91,
"alpha_frac": 0.6598465473,
"autogenerated": false,
"ratio": 3.45,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.960887605040559,
"avg_score": 0.00019409937888198756,
"num_lines": 56
} |
"""Add metadata for faster downloads
Revision ID: 490693c8dfe0
Revises: 4b75b53bdb19
Create Date: 2017-07-19 16:48:32.029613
"""
# revision identifiers, used by Alembic.
revision = '490693c8dfe0'
down_revision = '4b75b53bdb19'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryApp2languages', sa.Column('downloaded_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApp2languages', sa.Column('error', sa.Boolean(), nullable=True))
op.add_column('RepositoryApp2languages', sa.Column('id', sa.Integer(), nullable=False))
op.add_column('RepositoryApp2languages', sa.Column('last_processed_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApp2languages', sa.Column('last_processed_time', sa.DateTime(), nullable=True))
op.create_index(u'ix_RepositoryApp2languages_downloaded_hash', 'RepositoryApp2languages', ['downloaded_hash'], unique=False)
op.create_index(u'ix_RepositoryApp2languages_error', 'RepositoryApp2languages', ['error'], unique=False)
op.create_index(u'ix_RepositoryApp2languages_last_processed_hash', 'RepositoryApp2languages', ['last_processed_hash'], unique=False)
op.create_index(u'ix_RepositoryApp2languages_last_processed_time', 'RepositoryApp2languages', ['last_processed_time'], unique=False)
op.add_column('RepositoryApps', sa.Column('downloaded_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_hash', sa.Unicode(length=255), nullable=True))
op.add_column('RepositoryApps', sa.Column('last_processed_time', sa.DateTime(), nullable=True))
op.create_index(u'ix_RepositoryApps_downloaded_hash', 'RepositoryApps', ['downloaded_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_hash', 'RepositoryApps', ['last_processed_hash'], unique=False)
op.create_index(u'ix_RepositoryApps_last_processed_time', 'RepositoryApps', ['last_processed_time'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_RepositoryApps_last_processed_time', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_last_processed_hash', table_name='RepositoryApps')
op.drop_index(u'ix_RepositoryApps_downloaded_hash', table_name='RepositoryApps')
op.drop_column('RepositoryApps', 'last_processed_time')
op.drop_column('RepositoryApps', 'last_processed_hash')
op.drop_column('RepositoryApps', 'downloaded_hash')
op.drop_index(u'ix_RepositoryApp2languages_last_processed_time', table_name='RepositoryApp2languages')
op.drop_index(u'ix_RepositoryApp2languages_last_processed_hash', table_name='RepositoryApp2languages')
op.drop_index(u'ix_RepositoryApp2languages_error', table_name='RepositoryApp2languages')
op.drop_index(u'ix_RepositoryApp2languages_downloaded_hash', table_name='RepositoryApp2languages')
op.drop_column('RepositoryApp2languages', 'last_processed_time')
op.drop_column('RepositoryApp2languages', 'last_processed_hash')
op.drop_column('RepositoryApp2languages', 'id')
op.drop_column('RepositoryApp2languages', 'error')
op.drop_column('RepositoryApp2languages', 'downloaded_hash')
### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/490693c8dfe0_add_metadata_for_faster_downloads.py",
"copies": "3",
"size": "3369",
"license": "bsd-2-clause",
"hash": -3408767698649153000,
"line_mean": 61.3888888889,
"line_max": 136,
"alpha_frac": 0.7482932621,
"autogenerated": false,
"ratio": 3.4203045685279188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5668597830627919,
"avg_score": null,
"num_lines": null
} |
"""Add metadata to FastCache
Revision ID: 3d2668a96dce
Revises: 4e20dff98010
Create Date: 2015-05-11 14:03:10.890043
"""
# revision identifiers, used by Alembic.
revision = '3d2668a96dce'
down_revision = '4e20dff98010'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
translation_fast_caches = db.Table('TranslationFastCaches', metadata,
db.Column('id', db.Integer()),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationFastCaches', sa.Column('app_metadata', sa.UnicodeText(), nullable=True))
### end Alembic commands ###
with app.app_context():
delete_stmt = translation_fast_caches.delete()
db.session.execute(delete_stmt)
db.session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('TranslationFastCaches', 'app_metadata')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/3d2668a96dce_add_metadata_to_fastcache.py",
"copies": "3",
"size": "1048",
"license": "bsd-2-clause",
"hash": 7781458506732205000,
"line_mean": 25.2,
"line_max": 102,
"alpha_frac": 0.7089694656,
"autogenerated": false,
"ratio": 3.4473684210526314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5656337886652631,
"avg_score": null,
"num_lines": null
} |
"""Add MFA
Revision ID: 84a8fa98bdf2
Revises: afde33bde2e0
Create Date: 2021-01-15 14:19:41.812334
"""
# revision identifiers, used by Alembic.
revision = '84a8fa98bdf2'
down_revision = 'afde33bde2e0'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('mfa',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_guid', sa.String(length=64), nullable=True),
sa.Column('secret', sa.LargeBinary(), nullable=False),
sa.Column('device_name', sa.String(length=32), nullable=False),
sa.Column('is_valid', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['user_guid'], ['users.guid'], onupdate='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('mfa')
# ### end Alembic commands ###
| {
"repo_name": "CityOfNewYork/NYCOpenRecords",
"path": "migrations/versions/84a8fa98bdf2_add_mfa.py",
"copies": "1",
"size": "1090",
"license": "apache-2.0",
"hash": 7559564775479928000,
"line_mean": 31.0588235294,
"line_max": 95,
"alpha_frac": 0.5889908257,
"autogenerated": false,
"ratio": 3.6577181208053693,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4746708946505369,
"avg_score": null,
"num_lines": null
} |
"""Add minutes FK constraint to ALV.
Revision ID: f05f7c8518a3
Revises: 22710e6fd2b1
Create Date: 2018-03-20 13:04:10.320984
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# revision identifiers, used by Alembic.
revision = 'f05f7c8518a3'
down_revision = '22710e6fd2b1'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Set any invalid minutes_file_id to NULL first
op.execute('''UPDATE alv
LEFT JOIN file f ON f.id = alv.minutes_file_id
SET minutes_file_id = NULL
WHERE f.id IS NULL''')
op.create_foreign_key(op.f('fk_alv_minutes_file_id_file'), 'alv', 'file', ['minutes_file_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('fk_alv_minutes_file_id_file'), 'alv', type_='foreignkey')
# ### end Alembic commands ###
# vim: ft=python
| {
"repo_name": "viaict/viaduct",
"path": "migrations/versions/2018_03_20_f05f7c8518a3_add_minutes_fk_constraint_to_alv.py",
"copies": "1",
"size": "1057",
"license": "mit",
"hash": 3016760429691256300,
"line_mean": 26.1025641026,
"line_max": 106,
"alpha_frac": 0.7010406812,
"autogenerated": false,
"ratio": 3.1646706586826348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9363039538951085,
"avg_score": 0.0005343601863099124,
"num_lines": 39
} |
"""Add missing blocking principal constraints
Revision ID: 252c0015c9a0
Revises: 4f98f2f979c7
Create Date: 2018-12-13 10:49:51.279917
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '252c0015c9a0'
down_revision = '4f98f2f979c7'
branch_labels = None
depends_on = None
def upgrade():
op.create_index(None, 'blocking_principals', ['mp_group_provider', 'mp_group_name'], schema='roombooking')
op.create_index('ix_uq_blocking_principals_local_group',
'blocking_principals',
['local_group_id', 'blocking_id'],
unique=True, schema='roombooking', postgresql_where=sa.text('type = 2'))
op.create_index('ix_uq_blocking_principals_mp_group',
'blocking_principals',
['mp_group_provider', 'mp_group_name', 'blocking_id'],
unique=True, schema='roombooking', postgresql_where=sa.text('type = 3'))
op.create_index('ix_uq_blocking_principals_user',
'blocking_principals',
['user_id', 'blocking_id'],
unique=True, schema='roombooking', postgresql_where=sa.text('type = 1'))
op.create_check_constraint('valid_user', 'blocking_principals',
'((type != 1) OR ((local_group_id IS NULL) AND (mp_group_name IS NULL) AND '
'(mp_group_provider IS NULL) AND (user_id IS NOT NULL)))', schema='roombooking')
op.create_check_constraint('valid_local_group', 'blocking_principals',
'((type != 2) OR ((mp_group_name IS NULL) AND (mp_group_provider IS NULL) AND '
'(user_id IS NULL) AND (local_group_id IS NOT NULL)))', schema='roombooking')
op.create_check_constraint('valid_multipass_group', 'blocking_principals',
'((type <> 3) OR ((local_group_id IS NULL) AND (user_id IS NULL) AND '
'(mp_group_name IS NOT NULL) AND (mp_group_provider IS NOT NULL)))',
schema='roombooking')
def downgrade():
op.drop_index('ix_uq_blocking_principals_user', table_name='blocking_principals', schema='roombooking')
op.drop_index('ix_uq_blocking_principals_mp_group', table_name='blocking_principals', schema='roombooking')
op.drop_index('ix_uq_blocking_principals_local_group', table_name='blocking_principals', schema='roombooking')
op.drop_index('ix_blocking_principals_mp_group_provider_mp_group_name', table_name='blocking_principals',
schema='roombooking')
op.drop_constraint('ck_blocking_principals_valid_user', 'blocking_principals', schema='roombooking')
op.drop_constraint('ck_blocking_principals_valid_local_group', 'blocking_principals', schema='roombooking')
op.drop_constraint('ck_blocking_principals_valid_multipass_group', 'blocking_principals', schema='roombooking')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20181213_1049_252c0015c9a0_add_missing_blocking_principal.py",
"copies": "1",
"size": "2967",
"license": "mit",
"hash": 5132448581061188000,
"line_mean": 54.9811320755,
"line_max": 115,
"alpha_frac": 0.6255476913,
"autogenerated": false,
"ratio": 3.482394366197183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607942057497183,
"avg_score": null,
"num_lines": null
} |
"""Add missing constraints
Revision ID: 3a872ea246b9
Revises: 4690204e5a62
Create Date: 2015-10-28 19:00:20.176000
"""
# revision identifiers, used by Alembic.
revision = '3a872ea246b9'
down_revision = '4690204e5a62'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('event', 'visible',
existing_type=mysql.TINYINT(display_width=1),
type_=sa.Boolean(),
existing_nullable=True)
op.add_column('sourcequeue', sa.Column('target', sa.Integer(), nullable=True))
op.create_unique_constraint('_user_target_uc', 'sourcequeue', ['user', 'target'])
op.create_foreign_key(None, 'sourcequeue', 'player', ['target'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'sourcequeue', type_='foreignkey')
op.drop_constraint('_user_target_uc', 'sourcequeue', type_='unique')
op.drop_column('sourcequeue', 'target')
op.alter_column('event', 'visible',
existing_type=sa.Boolean(),
type_=mysql.TINYINT(display_width=1),
existing_nullable=True)
### end Alembic commands ###
| {
"repo_name": "katajakasa/utuputki2",
"path": "alembic/versions/3a872ea246b9_add_missing_constraints.py",
"copies": "1",
"size": "1351",
"license": "mit",
"hash": -2130447596318423800,
"line_mean": 32.775,
"line_max": 85,
"alpha_frac": 0.6550703183,
"autogenerated": false,
"ratio": 3.583554376657825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9684620919334238,
"avg_score": 0.01080075512471732,
"num_lines": 40
} |
"""Add missing field in Historic
Revision ID: 24b650b60652
Revises: bfa1f94763c
Create Date: 2016-02-04 09:57:51.655017
"""
# revision identifiers, used by Alembic.
revision = '24b650b60652'
down_revision = 'bfa1f94763c'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
active_translation = db.Table('ActiveTranslationMessages', metadata,
sa.Column('history_id', sa.Integer()),
sa.Column('category', sa.Unicode(length=255), nullable=True),
sa.Column('fmt', sa.Unicode(length=255), nullable=True),
sa.Column('from_developer', sa.Boolean(), nullable=True),
sa.Column('namespace', sa.Unicode(length=255), nullable=True),
sa.Column('position', sa.Integer(), nullable=True),
sa.Column('same_tool', sa.Boolean(), nullable=True),
sa.Column('tool_id', sa.Unicode(length=255), nullable=True),
)
translation_history = db.Table('TranslationMessageHistory', metadata,
sa.Column('id', sa.Integer()),
sa.Column('parent_translation_id', sa.Integer()),
sa.Column('category', sa.Unicode(length=255), nullable=True),
sa.Column('fmt', sa.Unicode(length=255), nullable=True),
sa.Column('from_developer', sa.Boolean(), nullable=True),
sa.Column('namespace', sa.Unicode(length=255), nullable=True),
sa.Column('position', sa.Integer(), nullable=True),
sa.Column('same_tool', sa.Boolean(), nullable=True),
sa.Column('tool_id', sa.Unicode(length=255), nullable=True),
)
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('TranslationMessageHistory', sa.Column('category', sa.Unicode(length=255), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('fmt', sa.Unicode(length=255), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('from_developer', sa.Boolean(), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('namespace', sa.Unicode(length=255), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('position', sa.Integer(), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('same_tool', sa.Boolean(), nullable=True))
op.add_column('TranslationMessageHistory', sa.Column('tool_id', sa.Unicode(length=255), nullable=True))
op.create_index(u'ix_TranslationMessageHistory_category', 'TranslationMessageHistory', ['category'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_fmt', 'TranslationMessageHistory', ['fmt'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_from_developer', 'TranslationMessageHistory', ['from_developer'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_namespace', 'TranslationMessageHistory', ['namespace'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_position', 'TranslationMessageHistory', ['position'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_same_tool', 'TranslationMessageHistory', ['same_tool'], unique=False)
op.create_index(u'ix_TranslationMessageHistory_tool_id', 'TranslationMessageHistory', ['tool_id'], unique=False)
### end Alembic commands ###
with app.app_context():
# Create a list of fields such as active_translation.c.history_id, ...
FIELD_NAMES = 'category', 'fmt', 'from_developer', 'namespace', 'position', 'same_tool', 'tool_id'
fields = []
for field in ('history_id',) + FIELD_NAMES:
fields.append(getattr(active_translation.c, field))
for active_message in db.session.execute(sql.select(fields)):
parent_id = active_message[0]
# field_values = { 'category' : (category), 'fmt' : (fmt), ... }
field_values = dict(zip(FIELD_NAMES, active_message[1:]))
while parent_id is not None:
update_stmt = translation_history.update().where(translation_history.c.id == parent_id).values(**field_values)
db.session.execute(update_stmt)
parent_ids = db.session.execute(sql.select([ translation_history.c.parent_translation_id ], translation_history.c.id == parent_id))
parent_ids = list(parent_ids)
if len(parent_ids) == 0 or len(parent_ids[0]) == 0:
break
parent_id = parent_ids[0][0]
db.session.commit()
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationMessageHistory_tool_id', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_same_tool', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_position', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_namespace', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_from_developer', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_fmt', table_name='TranslationMessageHistory')
op.drop_index(u'ix_TranslationMessageHistory_category', table_name='TranslationMessageHistory')
op.drop_column('TranslationMessageHistory', 'tool_id')
op.drop_column('TranslationMessageHistory', 'same_tool')
op.drop_column('TranslationMessageHistory', 'position')
op.drop_column('TranslationMessageHistory', 'namespace')
op.drop_column('TranslationMessageHistory', 'from_developer')
op.drop_column('TranslationMessageHistory', 'fmt')
op.drop_column('TranslationMessageHistory', 'category')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/24b650b60652_add_missing_field_in_historic.py",
"copies": "3",
"size": "5674",
"license": "bsd-2-clause",
"hash": -8745628749135998000,
"line_mean": 54.6274509804,
"line_max": 147,
"alpha_frac": 0.7040888262,
"autogenerated": false,
"ratio": 3.732894736842105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007242810176085001,
"num_lines": 102
} |
"""addMissingHeaders
Revision ID: 37bf5b71b83f
Revises: 6015ec0f59c9
Create Date: 2016-03-22 12:24:54.141000
"""
# revision identifiers, used by Alembic.
revision = '37bf5b71b83f'
down_revision = '6015ec0f59c9'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.add_column('file_status', sa.Column('headers_missing', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_error_data():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('file_status', 'headers_missing')
### end Alembic commands ###
def upgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_job_tracker():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def upgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_user_manager():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-core",
"path": "dataactcore/migrations/versions/37bf5b71b83f_addmissingheaders.py",
"copies": "1",
"size": "1393",
"license": "cc0-1.0",
"hash": 8131530340776197000,
"line_mean": 20.765625,
"line_max": 88,
"alpha_frac": 0.664034458,
"autogenerated": false,
"ratio": 3.6465968586387434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9700442988289357,
"avg_score": 0.02203766566987706,
"num_lines": 64
} |
"""Add missing indexes.
Revision ID: 32e8060589b8
Revises: a3fe8c8a344
Create Date: 2014-02-11 17:21:00.718449
"""
# revision identifiers, used by Alembic.
revision = '32e8060589b8'
down_revision = 'a3fe8c8a344'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_group_created_at', 'group', ['created_at'], unique=False)
op.create_index('ix_grouprequest_created_at', 'grouprequest', ['created_at'], unique=False)
op.create_index('ix_grouprequest_from_user_id', 'grouprequest', ['from_user_id'], unique=False)
op.create_index('ix_grouprequest_project_id', 'grouprequest', ['project_id'], unique=False)
op.create_index('ix_grouprequest_to_user_id', 'grouprequest', ['to_user_id'], unique=False)
op.create_index('ix_testableresult_created_at', 'testableresult', ['created_at'], unique=False)
op.create_index('ix_user_to_group_group_id', 'user_to_group', ['group_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_user_to_group_group_id', table_name='user_to_group')
op.drop_index('ix_testableresult_created_at', table_name='testableresult')
op.drop_index('ix_grouprequest_to_user_id', table_name='grouprequest')
op.drop_index('ix_grouprequest_project_id', table_name='grouprequest')
op.drop_index('ix_grouprequest_from_user_id', table_name='grouprequest')
op.drop_index('ix_grouprequest_created_at', table_name='grouprequest')
op.drop_index('ix_group_created_at', table_name='group')
### end Alembic commands ###
| {
"repo_name": "ucsb-cs/submit",
"path": "submit/migrations/versions/32e8060589b8_add_missing_indexes.py",
"copies": "1",
"size": "1674",
"license": "bsd-2-clause",
"hash": 8611515270830671000,
"line_mean": 43.0526315789,
"line_max": 99,
"alpha_frac": 0.7013142174,
"autogenerated": false,
"ratio": 3.1057513914656774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9274028186764908,
"avg_score": 0.006607484420154011,
"num_lines": 38
} |
"""Add missing indexes
Revision ID: 4bc4c1ae0f38
Revises: 105c1c44ff70
Create Date: 2014-04-29 21:28:23.149125
"""
# revision identifiers, used by Alembic.
revision = '4bc4c1ae0f38'
down_revision = '105c1c44ff70'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_laboratories_available', 'laboratories', ['available'], unique=False)
op.create_index('ix_laboratories_publicly_available', 'laboratories', ['publicly_available'], unique=False)
op.create_index('ix_laboratories_visibility', 'laboratories', ['visibility'], unique=False)
op.create_index('ix_learning_tools_full_name', 'learning_tools', ['full_name'], unique=False)
op.create_index('ix_learning_tools_name', 'learning_tools', ['name'], unique=False)
op.create_index('ix_lt_users_login', 'lt_users', ['login'], unique=False)
op.create_index('ix_permissions2lt_accessible', 'permissions2lt', ['accessible'], unique=False)
op.create_index('ix_permissions2lt_local_identifier', 'permissions2lt', ['local_identifier'], unique=False)
op.create_index('ix_permissions2ltuser_lt_user_id', 'permissions2ltuser', ['lt_user_id'], unique=False)
op.create_index('ix_permissions2ltuser_permission_to_lt_id', 'permissions2ltuser', ['permission_to_lt_id'], unique=False)
op.create_index('ix_request_permissions_lt_accessible', 'request_permissions_lt', ['accessible'], unique=False)
op.create_index('ix_request_permissions_lt_local_identifier', 'request_permissions_lt', ['local_identifier'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_request_permissions_lt_local_identifier', table_name='request_permissions_lt')
op.drop_index('ix_request_permissions_lt_accessible', table_name='request_permissions_lt')
op.drop_index('ix_permissions2ltuser_permission_to_lt_id', table_name='permissions2ltuser')
op.drop_index('ix_permissions2ltuser_lt_user_id', table_name='permissions2ltuser')
op.drop_index('ix_permissions2lt_local_identifier', table_name='permissions2lt')
op.drop_index('ix_permissions2lt_accessible', table_name='permissions2lt')
op.drop_index('ix_lt_users_login', table_name='lt_users')
op.drop_index('ix_learning_tools_name', table_name='learning_tools')
op.drop_index('ix_learning_tools_full_name', table_name='learning_tools')
op.drop_index('ix_laboratories_visibility', table_name='laboratories')
op.drop_index('ix_laboratories_publicly_available', table_name='laboratories')
op.drop_index('ix_laboratories_available', table_name='laboratories')
### end Alembic commands ###
| {
"repo_name": "porduna/labmanager",
"path": "alembic/versions/4bc4c1ae0f38_add_missing_indexes.py",
"copies": "5",
"size": "2738",
"license": "bsd-2-clause",
"hash": 4423779502197167600,
"line_mean": 56.0416666667,
"line_max": 127,
"alpha_frac": 0.7282688093,
"autogenerated": false,
"ratio": 3.3107617896009676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6539030598900968,
"avg_score": null,
"num_lines": null
} |
"""Add missing mapping permissions.
Revision ID: 758b4012b5f
Revises: 3be0d9d2c9f8
Create Date: 2013-10-08 04:43:08.967577
"""
# revision identifiers, used by Alembic.
revision = '758b4012b5f'
down_revision = '3be0d9d2c9f8'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_owner_objects = list(program_editor_objects)
program_owner_objects.append('UserRole')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_owner_objects,
'read': program_owner_objects,
'update': program_owner_objects,
'delete': program_owner_objects,
}),
updated_at = current_datetime,
)\
.where(roles_table.c.name == 'ProgramOwner'))
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': [],
'read': program_editor_objects,
'update': [],
'delete': [],
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramReader'))
def upgrade():
#create the context
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectPerson',
'Program',
'ProgramDirective',
'Relationship',
])
| {
"repo_name": "uskudnik/ggrc-core",
"path": "src/ggrc_basic_permissions/migrations/versions/20131008044308_758b4012b5f_add_missing_mapping_.py",
"copies": "2",
"size": "2372",
"license": "apache-2.0",
"hash": 7994997664952841000,
"line_mean": 25.6516853933,
"line_max": 54,
"alpha_frac": 0.6087689713,
"autogenerated": false,
"ratio": 3.7472353870458135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356004358345814,
"avg_score": null,
"num_lines": null
} |
"""Add missing `NOT NULL`s
Revision ID: 0958abda66cf
Revises: 3f18ecfaf637
Create Date: 2017-05-08 11:01:05.744154
"""
# revision identifiers, used by Alembic.
revision = '0958abda66cf'
down_revision = '3f18ecfaf637'
from alembic import op # lgtm[py/unused-import]
import sqlalchemy as sa # lgtm[py/unused-import]
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('character', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('charcomment', 'targetid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('charcomment', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('folder', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('forgotpassword', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('journal', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('journalcomment', 'targetid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('journalcomment', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('message', 'otherid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('message', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('siteupdate', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('submission', 'userid',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('submission', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('siteupdate', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('message', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('message', 'otherid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('journalcomment', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('journalcomment', 'targetid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('journal', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('forgotpassword', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('folder', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('charcomment', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('charcomment', 'targetid',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('character', 'userid',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
| {
"repo_name": "Weasyl/weasyl",
"path": "libweasyl/libweasyl/alembic/versions/0958abda66cf_add_missing_not_null_s.py",
"copies": "1",
"size": "3389",
"license": "apache-2.0",
"hash": -4517465691475755500,
"line_mean": 34.3020833333,
"line_max": 65,
"alpha_frac": 0.5582767778,
"autogenerated": false,
"ratio": 4.102905569007264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5161182346807264,
"avg_score": null,
"num_lines": null
} |
"""Add missing references section to pages with refs, comes from wikidata."""
import re
import pywikibot
CATEGORY = "Категория:Википедия:Статьи с источниками из Викиданных"
TEMPLATE = "Шаблон:Примечания"
COMMENT = "Исправление отсутствующей секции примечаний."
IGNORE_FILTER = re.compile(r"""(
<!--.*?-->|
<nowiki>.*?</nowiki>|
<nowiki\s*/>|
<math>.*?</math>|
<hiero>.*?</hiero>|
<tt>.*?</tt>|
<code>.*?</code>|
<pre>.*?</pre>|
<source[^>]*>.*?</source>|
<syntaxhighlight[^>]*>.*?</syntaxhighlight>|
<templatedata>.*?</templatedata>|
<imagemap>.*?</imagemap>
)""", re.I | re.DOTALL | re.VERBOSE)
LABEL_PREFIX = "\x01"
LABEL_SUFFIX = "\x02"
def ignore(text, ignore_filter=IGNORE_FILTER):
"""
Replace all text matches regexp with special label.
Parameters:
text - text to be processed;
ignore_filter - compiled regular expression or string with regexp.
Return (new_text, deleted_text_list) tuple.
"""
if isinstance(ignore_filter, str):
ignore_filter = re.compile(ignore_filter, flags=re.I | re.DOTALL)
ignored = []
count = 0
def _ignore_line(match_obj):
"""Replace founded text with special label."""
#pylint: disable=undefined-variable
nonlocal ignored
ignored.append(match_obj.group(0))
nonlocal count
old_count = count
count += 1
return LABEL_PREFIX + str(old_count) + LABEL_SUFFIX
text = re.sub(LABEL_PREFIX + r"(\d+)" + LABEL_SUFFIX, _ignore_line, text)
text = ignore_filter.sub(_ignore_line, text)
return (text, ignored)
def deignore(text, ignored):
"""
Restore the text returned by the ignore() function.
Parameters:
text - text to be processed;
ignored - deleted_text_list, returned by the ignore() function.
Return string.
"""
def _deignore_line(match_obj):
"""Replace founded label with corresponding text."""
index = int(match_obj.group(1))
return ignored[index]
return re.sub(LABEL_PREFIX + r"(\d+)" + LABEL_SUFFIX, _deignore_line, text)
def insert_references(text, last_ref=0):
"""
Insert references section to the page according to local manual of style.
last_ref parameter is used for transfering last reference position: it will be
used for additional checks. If last_ref equals -1, references section will not
be added.
You can change FIX_UNSAFE_MISSING_REFERENCES global variable to allow unsafe
insertions.
Return (new_text, is_inserted) tuple, where is_inserted is 0 or 1.
"""
if last_ref == -1:
# no references in page
return (text, 0)
if (re.search(r"{{\s*(?:примечания|список[_ ]примечаний|reflist\+?)", text, flags=re.I) or
re.search(r"<\s*references", text, flags=re.I)):
# references are already here
return (text, 0)
if ("noinclude" in text or "includeonly" in text or "onlyinclude" in text):
# page is included somewhere - dangerous to fix, we don't know how it will affect to this page
return (text, 0)
# try to place references into corresponding section
section = re.search(r"^==[ ]*Примечани[ея][ ]*==$", text, flags=re.M)
if section:
pos = section.end(0)
if pos < last_ref:
# that's not a solution
return (text, 0)
if re.match(r"\s*($|\n==|\[\[Категория:)", text[pos:]) is None:
if not (FIX_UNSAFE_MISSING_REFERENCES and
re.match(r"({{[^:{}][^{}]*}}|\[\[Категория:[^\[\]]+\]\]|\s)*$", text[pos:])):
# section isn't empty
return (text, 0)
text = text[:pos] + "\n{{примечания}}" + text[pos:]
return (text, 1)
# try to place references before special sections
section = re.search(r"^==[ ]*(Литература|Ссылки|Источники)[ ]*==$", text, flags=re.M | re.I)
if section:
start = section.start(0)
end = section.end(0)
if start < last_ref:
return (text, 0)
if re.match(r"\s*($|\[\[Категория:)", text[end:]):
# section is empty
text = text[:start] + "== Примечания ==\n{{примечания}}" + text[end:]
else:
text = text[:start] + "== Примечания ==\n{{примечания}}\n\n" + text[start:]
return (text, 1)
# place references at the end of the article, just before categories and templates
if FIX_UNSAFE_MISSING_REFERENCES:
section = re.search(r"\n({{[^:{}][^{}]*}}|\[\[Категория:[^\[\]]+\]\]|\s)*$", text)
pos = section.start(0)
if pos < last_ref:
return (text, 0)
text = text[:pos] + "\n\n== Примечания ==\n{{примечания}}" + text[pos:]
return (text, 1)
return (text, 0)
def main():
"""Main script function."""
site = pywikibot.Site()
category = pywikibot.Category(site, CATEGORY)
refs = [page.title() for page in category.articles(namespaces=[0])]
template = pywikibot.Page(site, TEMPLATE)
references = set([page.title() for page in template.embeddedin(namespaces=[0])])
pages = [pywikibot.Page(site, page) for page in refs if not page in references]
# converting to titles and back is needed for saving memory
for page in pages:
(text, ignored) = ignore(page.text)
(text, flag) = checkwiki.insert_references(text)
text = deignore(text, ignored)
if flag:
page.text = text
page.save(COMMENT)
if __name__ == "__main__":
main()
| {
"repo_name": "Facenapalm/NapalmBot",
"path": "scripts/wikidatarefs.py",
"copies": "1",
"size": "5794",
"license": "mit",
"hash": 7357969087732842000,
"line_mean": 33.7295597484,
"line_max": 102,
"alpha_frac": 0.5932633104,
"autogenerated": false,
"ratio": 3.1937536148062464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4287016925206246,
"avg_score": null,
"num_lines": null
} |
"""Add mnemonic for supplier, product and customer, make mobile_phone unique in customer.
Revision ID: 3c66f436a5be
Revises: 3f2bc9bc2775
Create Date: 2017-04-30 22:55:30.543871
"""
# revision identifiers, used by Alembic.
revision = '3c66f436a5be'
down_revision = '3f2bc9bc2775'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('customer', sa.Column('mnemonic', sa.String(length=128), nullable=True))
op.create_unique_constraint('customer_mobile_phone_key', 'customer', ['mobile_phone'])
op.add_column('product', sa.Column('mnemonic', sa.String(length=256), nullable=True))
op.add_column('supplier', sa.Column('mnemonic', sa.String(length=256), nullable=True))
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('supplier', 'mnemonic')
op.drop_column('product', 'mnemonic')
op.drop_constraint('customer_mobile_phone_key', 'customer', type_='unique')
op.drop_column('customer', 'mnemonic')
# ### end Alembic commands ###
| {
"repo_name": "betterlife/psi",
"path": "psi/migrations/versions/38_3c66f436a5be_.py",
"copies": "2",
"size": "1105",
"license": "mit",
"hash": -1861999726722648300,
"line_mean": 34.6451612903,
"line_max": 90,
"alpha_frac": 0.7022624434,
"autogenerated": false,
"ratio": 3.308383233532934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9978523465297894,
"avg_score": 0.006424442327008121,
"num_lines": 31
} |
"""Add ModAction model
Revision ID: 4430bb0ac79d
Revises: 2e09867c0c38
Create Date: 2016-07-03 11:05:53.359444
"""
# revision identifiers, used by Alembic.
revision = '4430bb0ac79d'
down_revision = '2e09867c0c38'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('mod_actions',
sa.Column('id', sa.String(length=256), autoincrement=False, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('created_utc', sa.DateTime(), nullable=True),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('mod', sa.String(length=64), nullable=True),
sa.Column('target_author', sa.String(length=64), nullable=True),
sa.Column('action', sa.String(length=256), nullable=True),
sa.Column('target_fullname', sa.String(length=256), nullable=True),
sa.Column('action_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_index(op.f('ix_mod_actions_subreddit_id'), 'mod_actions', ['subreddit_id'], unique=False)
op.create_index(op.f('ix_mod_actions_target_author'), 'mod_actions', ['target_author'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_mod_actions_target_author'), table_name='mod_actions')
op.drop_index(op.f('ix_mod_actions_subreddit_id'), table_name='mod_actions')
op.drop_table('mod_actions')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('mod_actions',
sa.Column('id', sa.String(length=256), autoincrement=False, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('created_utc', sa.DateTime(), nullable=True),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('mod', sa.String(length=64), nullable=True),
sa.Column('target_author', sa.String(length=64), nullable=True),
sa.Column('action', sa.String(length=256), nullable=True),
sa.Column('target_fullname', sa.String(length=256), nullable=True),
sa.Column('action_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_index(op.f('ix_mod_actions_subreddit_id'), 'mod_actions', ['subreddit_id'], unique=False)
op.create_index(op.f('ix_mod_actions_target_author'), 'mod_actions', ['target_author'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_mod_actions_target_author'), table_name='mod_actions')
op.drop_index(op.f('ix_mod_actions_subreddit_id'), table_name='mod_actions')
op.drop_table('mod_actions')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('mod_actions',
sa.Column('id', sa.String(length=256), autoincrement=False, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('created_utc', sa.DateTime(), nullable=True),
sa.Column('subreddit_id', sa.String(length=32), nullable=True),
sa.Column('mod', sa.String(length=64), nullable=True),
sa.Column('target_author', sa.String(length=64), nullable=True),
sa.Column('action', sa.String(length=256), nullable=True),
sa.Column('target_fullname', sa.String(length=256), nullable=True),
sa.Column('action_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_index(op.f('ix_mod_actions_subreddit_id'), 'mod_actions', ['subreddit_id'], unique=False)
op.create_index(op.f('ix_mod_actions_target_author'), 'mod_actions', ['target_author'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_mod_actions_target_author'), table_name='mod_actions')
op.drop_index(op.f('ix_mod_actions_subreddit_id'), table_name='mod_actions')
op.drop_table('mod_actions')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/4430bb0ac79d_add_modaction_model.py",
"copies": "1",
"size": "4558",
"license": "mit",
"hash": -7095921864053196000,
"line_mean": 41.2037037037,
"line_max": 105,
"alpha_frac": 0.6801228609,
"autogenerated": false,
"ratio": 3.3197378004369993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499860661336999,
"avg_score": null,
"num_lines": null
} |
"""Add model for snapshots of ExperimentThings
Revision ID: a0f4fda7588f
Revises: ae72c799d321
Create Date: 2016-11-26 22:05:26.728790
"""
# revision identifiers, used by Alembic.
revision = 'a0f4fda7588f'
down_revision = 'ae72c799d321'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_thing_snapshots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_thing_id', sa.String(length=256), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_id'), 'experiment_thing_snapshots', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), 'experiment_thing_snapshots', ['experiment_thing_id'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_id'), table_name='experiment_thing_snapshots')
op.drop_table('experiment_thing_snapshots')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_thing_snapshots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_thing_id', sa.String(length=256), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_id'), 'experiment_thing_snapshots', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), 'experiment_thing_snapshots', ['experiment_thing_id'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_id'), table_name='experiment_thing_snapshots')
op.drop_table('experiment_thing_snapshots')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('experiment_thing_snapshots',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('experiment_thing_id', sa.String(length=256), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('object_type', sa.Integer(), nullable=True),
sa.Column('experiment_id', sa.Integer(), nullable=True),
sa.Column('metadata_json', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_id'), 'experiment_thing_snapshots', ['experiment_id'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), 'experiment_thing_snapshots', ['experiment_thing_id'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_thing_id'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_thing_snapshots_experiment_id'), table_name='experiment_thing_snapshots')
op.drop_table('experiment_thing_snapshots')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/a0f4fda7588f_add_model_for_snapshots_of_.py",
"copies": "1",
"size": "4337",
"license": "mit",
"hash": -4106635405305571000,
"line_mean": 42.37,
"line_max": 147,
"alpha_frac": 0.7057874107,
"autogenerated": false,
"ratio": 3.575432811211871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4781220221911871,
"avg_score": null,
"num_lines": null
} |
"""Add moderator log table
Revision ID: ac1680bc48
Revises: b6880387f89d
Create Date: 2016-05-08 19:50:03.469322
"""
# revision identifiers, used by Alembic.
revision = 'ac1680bc48'
down_revision = 'b6880387f89d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('moderatorlog',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.BigInteger(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('type', sa.Integer(), nullable=False),
sa.Column('text', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_moderatorlog_board_id'), 'moderatorlog', ['board_id'], unique=False)
op.create_index(op.f('ix_moderatorlog_date'), 'moderatorlog', ['date'], unique=False)
op.create_index(op.f('ix_moderatorlog_moderator_id'), 'moderatorlog', ['moderator_id'], unique=False)
op.create_index(op.f('ix_moderatorlog_type'), 'moderatorlog', ['type'], unique=False)
def downgrade():
op.drop_index(op.f('ix_moderatorlog_type'), table_name='moderatorlog')
op.drop_index(op.f('ix_moderatorlog_moderator_id'), table_name='moderatorlog')
op.drop_index(op.f('ix_moderatorlog_date'), table_name='moderatorlog')
op.drop_index(op.f('ix_moderatorlog_board_id'), table_name='moderatorlog')
op.drop_table('moderatorlog')
| {
"repo_name": "Floens/uchan",
"path": "migrations/versions/ac1680bc48_add_moderator_log_table.py",
"copies": "1",
"size": "1759",
"license": "mit",
"hash": -4668925968336703000,
"line_mean": 40.880952381,
"line_max": 105,
"alpha_frac": 0.62706083,
"autogenerated": false,
"ratio": 3.497017892644135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46240787226441354,
"avg_score": null,
"num_lines": null
} |
"""Add mod list objects
Revision ID: 566d08c75c6
Revises: 18af22fa9e4
Create Date: 2014-09-25 11:19:16.916489
"""
# revision identifiers, used by Alembic.
revision = '566d08c75c6'
down_revision = '18af22fa9e4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('modlist', sa.Column('background', sa.String(length=32), nullable=True))
op.add_column('modlist', sa.Column('bgOffsetY', sa.Integer(), nullable=True))
op.add_column('modlist', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('modlist', sa.Column('description', sa.Unicode(length=100000), nullable=True))
op.add_column('modlist', sa.Column('name', sa.Unicode(length=1024), nullable=True))
op.add_column('modlist', sa.Column('short_description', sa.Unicode(length=1000), nullable=True))
op.add_column('modlistitem', sa.Column('mod_list_id', sa.Integer(), nullable=True))
op.add_column('modlistitem', sa.Column('sort_index', sa.Integer(), nullable=True))
op.drop_column('modlistitem', 'list_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('modlistitem', sa.Column('list_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('modlistitem', 'sort_index')
op.drop_column('modlistitem', 'mod_list_id')
op.drop_column('modlist', 'short_description')
op.drop_column('modlist', 'name')
op.drop_column('modlist', 'description')
op.drop_column('modlist', 'created')
op.drop_column('modlist', 'bgOffsetY')
op.drop_column('modlist', 'background')
### end Alembic commands ###
| {
"repo_name": "EIREXE/Olddulous",
"path": "alembic/versions/566d08c75c6_add_mod_list_objects.py",
"copies": "5",
"size": "1715",
"license": "mit",
"hash": -6916345028550936000,
"line_mean": 39.8333333333,
"line_max": 104,
"alpha_frac": 0.6874635569,
"autogenerated": false,
"ratio": 3.2116104868913857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006583009611167608,
"num_lines": 42
} |
"""Add mod list objects
Revision ID: 566d08c75c6
Revises: 18af22fa9e4
Create Date: 2014-09-25 11:19:16.916489
"""
# revision identifiers, used by Alembic.
revision = '566d08c75c6'
down_revision = '18af22fa9e4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('modlist', sa.Column('background', sa.String(length=32), nullable=True))
op.add_column('modlist', sa.Column('bgOffsetY', sa.Integer(), nullable=True))
op.add_column('modlist', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('modlist', sa.Column('description', sa.Unicode(length=100000), nullable=True))
op.add_column('modlist', sa.Column('name', sa.Unicode(length=1024), nullable=True))
op.add_column('modlist', sa.Column('short_description', sa.Unicode(length=1000), nullable=True))
op.add_column('modlistitem', sa.Column('mod_list_id', sa.Integer(), nullable=True))
op.add_column('modlistitem', sa.Column('sort_index', sa.Integer(), nullable=True))
op.drop_column('modlistitem', 'list_id')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('modlistitem', sa.Column('list_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('modlistitem', 'sort_index')
op.drop_column('modlistitem', 'mod_list_id')
op.drop_column('modlist', 'short_description')
op.drop_column('modlist', 'name')
op.drop_column('modlist', 'description')
op.drop_column('modlist', 'created')
op.drop_column('modlist', 'bgOffsetY')
op.drop_column('modlist', 'background')
### end Alembic commands ###
| {
"repo_name": "EIREXE/SpaceDock",
"path": "alembic/versions/566d08c75c6_add_mod_list_objects.py",
"copies": "2",
"size": "1757",
"license": "mit",
"hash": 5717819534314636000,
"line_mean": 39.8333333333,
"line_max": 104,
"alpha_frac": 0.6710301651,
"autogenerated": false,
"ratio": 3.25974025974026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9898669255943976,
"avg_score": 0.006420233779256777,
"num_lines": 42
} |
"""Add mods and media
Revision ID: 2ef141f5132
Revises: 1fb18596264
Create Date: 2014-06-06 00:00:28.128296
"""
# revision identifiers, used by Alembic.
revision = '2ef141f5132'
down_revision = '1fb18596264'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('media', sa.Column('mod_id', sa.Integer(), nullable=True))
op.add_column('mod', sa.Column('approved', sa.Boolean(), nullable=True))
op.add_column('mod', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('mod', sa.Column('description', sa.Unicode(length=100000), nullable=True))
op.add_column('mod', sa.Column('donation_link', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('external_link', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('installation', sa.Unicode(length=100000), nullable=True))
op.add_column('mod', sa.Column('keywords', sa.String(length=256), nullable=True))
op.add_column('mod', sa.Column('license', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('published', sa.Boolean(), nullable=True))
op.add_column('mod', sa.Column('user_id', sa.Integer(), nullable=True))
op.add_column('mod', sa.Column('votes', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('mod', 'votes')
op.drop_column('mod', 'user_id')
op.drop_column('mod', 'published')
op.drop_column('mod', 'license')
op.drop_column('mod', 'keywords')
op.drop_column('mod', 'installation')
op.drop_column('mod', 'external_link')
op.drop_column('mod', 'donation_link')
op.drop_column('mod', 'description')
op.drop_column('mod', 'created')
op.drop_column('mod', 'approved')
op.drop_column('media', 'mod_id')
### end Alembic commands ###
| {
"repo_name": "EIREXE/Olddulous",
"path": "alembic/versions/2ef141f5132_add_mods_and_media.py",
"copies": "5",
"size": "1956",
"license": "mit",
"hash": 6654335684167178000,
"line_mean": 39.75,
"line_max": 93,
"alpha_frac": 0.6646216769,
"autogenerated": false,
"ratio": 3.2224052718286655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6387026948728666,
"avg_score": null,
"num_lines": null
} |
"""Add mods and media
Revision ID: 2ef141f5132
Revises: 1fb18596264
Create Date: 2014-06-06 00:00:28.128296
"""
# revision identifiers, used by Alembic.
revision = '2ef141f5132'
down_revision = '1fb18596264'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('media', sa.Column('mod_id', sa.Integer(), nullable=True))
op.add_column('mod', sa.Column('approved', sa.Boolean(), nullable=True))
op.add_column('mod', sa.Column('created', sa.DateTime(), nullable=True))
op.add_column('mod', sa.Column('description', sa.Unicode(length=100000), nullable=True))
op.add_column('mod', sa.Column('donation_link', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('external_link', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('installation', sa.Unicode(length=100000), nullable=True))
op.add_column('mod', sa.Column('keywords', sa.String(length=256), nullable=True))
op.add_column('mod', sa.Column('license', sa.String(length=128), nullable=True))
op.add_column('mod', sa.Column('published', sa.Boolean(), nullable=True))
op.add_column('mod', sa.Column('user_id', sa.Integer(), nullable=True))
op.add_column('mod', sa.Column('votes', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('mod', 'votes')
op.drop_column('mod', 'user_id')
op.drop_column('mod', 'published')
op.drop_column('mod', 'license')
op.drop_column('mod', 'keywords')
op.drop_column('mod', 'installation')
op.drop_column('mod', 'external_link')
op.drop_column('mod', 'donation_link')
op.drop_column('mod', 'description')
op.drop_column('mod', 'created')
op.drop_column('mod', 'approved')
op.drop_column('media', 'mod_id')
### end Alembic commands ###
| {
"repo_name": "EIREXE/SpaceDock",
"path": "alembic/versions/2ef141f5132_add_mods_and_media.py",
"copies": "2",
"size": "2004",
"license": "mit",
"hash": -1594080463561725400,
"line_mean": 39.75,
"line_max": 93,
"alpha_frac": 0.6487025948,
"autogenerated": false,
"ratio": 3.2745098039215685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4923212398721569,
"avg_score": null,
"num_lines": null
} |
"""Add more FTS indexes
Revision ID: 79e770865675
Revises: fda76e047e87
Create Date: 2021-06-15 16:27:35.922384
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '79e770865675'
down_revision = 'fda76e047e87'
branch_labels = None
depends_on = None
def upgrade():
op.create_check_constraint('valid_title', 'contributions', "title != ''", schema='events')
op.create_check_constraint('valid_title', 'subcontributions', "title != ''", schema='events')
op.create_index('ix_contributions_title_fts', 'contributions', [sa.text("to_tsvector('simple', title)")],
schema='events', postgresql_using='gin')
op.create_index('ix_subcontributions_title_fts', 'subcontributions', [sa.text("to_tsvector('simple', title)")],
schema='events', postgresql_using='gin')
op.create_index('ix_attachments_title_fts', 'attachments', [sa.text("to_tsvector('simple', title)")],
schema='attachments', postgresql_using='gin')
op.create_index('ix_contributions_description_fts', 'contributions',
[sa.text("to_tsvector('simple', description)")],
schema='events', postgresql_using='gin')
op.create_index('ix_subcontributions_description_fts', 'subcontributions',
[sa.text("to_tsvector('simple', description)")],
schema='events', postgresql_using='gin')
op.create_index('ix_notes_html_fts', 'notes', [sa.text("to_tsvector('simple', html)")],
schema='events', postgresql_using='gin')
def downgrade():
op.drop_constraint('ck_contributions_valid_title', 'contributions', schema='events')
op.drop_constraint('ck_subcontributions_valid_title', 'subcontributions', schema='events')
op.drop_index('ix_contributions_title_fts', table_name='contributions', schema='events')
op.drop_index('ix_subcontributions_title_fts', table_name='subcontributions', schema='events')
op.drop_index('ix_attachments_title_fts', table_name='attachments', schema='attachments')
op.drop_index('ix_contributions_description_fts', table_name='contributions', schema='events')
op.drop_index('ix_subcontributions_description_fts', table_name='subcontributions', schema='events')
op.drop_index('ix_notes_html_fts', table_name='notes', schema='events')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20210615_1627_79e770865675_add_more_fts_indexes.py",
"copies": "1",
"size": "2368",
"license": "mit",
"hash": -3956013261716409000,
"line_mean": 50.4782608696,
"line_max": 115,
"alpha_frac": 0.6722972973,
"autogenerated": false,
"ratio": 3.5081481481481482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666138422593189,
"avg_score": 0.0028614045709916764,
"num_lines": 46
} |
"""add more indexes
Revision ID: b262149925a7
Revises: 92235b77ea53
Create Date: 2017-10-14 15:37:56.451984
"""
# revision identifiers, used by Alembic.
revision = 'b262149925a7'
down_revision = '92235b77ea53'
from collections import defaultdict
from alembic import op
import sqlalchemy as sa
from sqlalchemy import func
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
ActiveTranslationMessage = db.Table('ActiveTranslationMessages', metadata,
sa.Column('id', sa.Integer, nullable=True),
sa.Column('key', sa.Unicode(length=255), nullable=True),
sa.Column('bundle_id', sa.Integer, nullable=False),
)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with app.app_context():
duplicated_active_messages = list(db.session.query(ActiveTranslationMessage.c.key, ActiveTranslationMessage.c.bundle_id).group_by(ActiveTranslationMessage.c.bundle_id, ActiveTranslationMessage.c.key).having(func.count(ActiveTranslationMessage.c.id) > 1).all())
keys = [ k for k, b in duplicated_active_messages ]
bundle_ids = [ b for k, b in duplicated_active_messages ]
all_results = defaultdict(list)
for atm in db.session.query(ActiveTranslationMessage).filter(ActiveTranslationMessage.c.key.in_(keys), ActiveTranslationMessage.c.bundle_id.in_(bundle_ids)).all():
all_results[atm.key, atm.bundle_id].append(atm)
all_ids = []
for key, bundle_id in duplicated_active_messages:
for atm in all_results[key, bundle_id][1:]:
all_ids.append(atm.id)
delete_stmt = ActiveTranslationMessage.delete(ActiveTranslationMessage.c.id.in_(all_ids))
connection = op.get_bind()
connection.execute(delete_stmt)
op.create_unique_constraint(None, 'ActiveTranslationMessages', ['bundle_id', 'key'])
op.create_unique_constraint(None, 'RepositoryApp2languages', ['repository_app_id', 'language_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'RepositoryApp2languages', type_='unique')
op.drop_constraint(None, 'ActiveTranslationMessages', type_='unique')
# ### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/b262149925a7_add_more_indexes.py",
"copies": "3",
"size": "2277",
"license": "bsd-2-clause",
"hash": 5862836485731627000,
"line_mean": 36.3278688525,
"line_max": 268,
"alpha_frac": 0.7101449275,
"autogenerated": false,
"ratio": 3.5971563981042656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5807301325604266,
"avg_score": null,
"num_lines": null
} |
"""Add More Indicies To Tables
Revision ID: 7b6a65c708b9
Revises: 7e1d5c529924
Create Date: 2016-10-25 20:58:47.659852
"""
# revision identifiers, used by Alembic.
revision = '7b6a65c708b9'
down_revision = '7e1d5c529924'
from alembic import op
import sqlalchemy as sa
import server
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_backup_flagged'), 'backup', ['flagged'], unique=False)
op.create_index(op.f('ix_backup_submit'), 'backup', ['submit'], unique=False)
op.create_index(op.f('ix_backup_created'), 'backup', ['created'], unique=False)
op.create_index(op.f('ix_enrollment_role'), 'enrollment', ['role'], unique=False)
op.create_index(op.f('ix_score_archived'), 'score', ['archived'], unique=False)
op.create_index(op.f('ix_score_kind'), 'score', ['kind'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_score_kind'), table_name='score')
op.drop_index(op.f('ix_score_archived'), table_name='score')
op.drop_index(op.f('ix_enrollment_role'), table_name='enrollment')
op.drop_index(op.f('ix_backup_submit'), table_name='backup')
op.drop_index(op.f('ix_backup_flagged'), table_name='backup')
op.drop_index(op.f('ix_backup_created'), table_name='backup')
### end Alembic commands ###
| {
"repo_name": "Cal-CS-61A-Staff/ok",
"path": "migrations/versions/7b6a65c708b9_add_more_indicies_to_tables.py",
"copies": "1",
"size": "1406",
"license": "apache-2.0",
"hash": 188726225492086460,
"line_mean": 37,
"line_max": 85,
"alpha_frac": 0.673541963,
"autogenerated": false,
"ratio": 2.9788135593220337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4152355522322034,
"avg_score": null,
"num_lines": null
} |
"""Add more info
Revision ID: 2639af76b6d1
Revises: 74932447fbf8
Create Date: 2018-02-19 22:55:52.627273
"""
# revision identifiers, used by Alembic.
revision = '2639af76b6d1'
down_revision = '74932447fbf8'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('RepositoryAppCheckUrls', sa.Column('proxy_image_works', sa.Boolean(), nullable=True))
op.create_index(op.f('ix_RepositoryAppCheckUrls_proxy_image_works'), 'RepositoryAppCheckUrls', ['proxy_image_works'], unique=False)
op.add_column('RepositoryApps', sa.Column('proxy_image_works', sa.Boolean(), nullable=True))
op.create_index(op.f('ix_RepositoryApps_proxy_image_works'), 'RepositoryApps', ['proxy_image_works'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_RepositoryApps_proxy_image_works'), table_name='RepositoryApps')
op.drop_column('RepositoryApps', 'proxy_image_works')
op.drop_index(op.f('ix_RepositoryAppCheckUrls_proxy_image_works'), table_name='RepositoryAppCheckUrls')
op.drop_column('RepositoryAppCheckUrls', 'proxy_image_works')
# ### end Alembic commands ###
| {
"repo_name": "porduna/appcomposer",
"path": "alembic/versions/2639af76b6d1_add_more_info.py",
"copies": "3",
"size": "1278",
"license": "bsd-2-clause",
"hash": -7797583732599806000,
"line_mean": 38.9375,
"line_max": 135,
"alpha_frac": 0.7175273865,
"autogenerated": false,
"ratio": 3.2110552763819094,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428582662881909,
"avg_score": null,
"num_lines": null
} |
"""Add more locals
Revision ID: 3aac2c8dbcb6
Revises: 56d38b1172f9
Create Date: 2017-06-09 19:35:21.477009
"""
# revision identifiers, used by Alembic.
revision = '3aac2c8dbcb6'
down_revision = '56d38b1172f9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('UseLogs', sa.Column('local_datetime', sa.DateTime(), nullable=False))
op.add_column('UseLogs', sa.Column('local_timezone', sa.Integer(), nullable=False))
op.create_index(u'ix_UseLogs_local_datetime', 'UseLogs', ['local_datetime'], unique=False)
op.create_index(u'ix_UseLogs_local_timezone', 'UseLogs', ['local_timezone'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_UseLogs_local_timezone', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_local_datetime', table_name='UseLogs')
op.drop_column('UseLogs', 'local_timezone')
op.drop_column('UseLogs', 'local_datetime')
### end Alembic commands ###
| {
"repo_name": "go-lab/labmanager",
"path": "alembic/versions/3aac2c8dbcb6_add_more_locals.py",
"copies": "5",
"size": "1093",
"license": "bsd-2-clause",
"hash": 3222636577251648500,
"line_mean": 33.15625,
"line_max": 94,
"alpha_frac": 0.6971637694,
"autogenerated": false,
"ratio": 3.158959537572254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6356123306972254,
"avg_score": null,
"num_lines": null
} |
# Add more specific tests to check the schema created for the table
'''Tests for file_to_db'''
from mysite import settings
from Testing import testhelpers as th
from Database.db_access import DBAccess
from scisheets.models import UploadedFiles
from mysite.helpers.file_to_db import (FileTable, SQLType)
import unittest
#import file_to_db as f2d
import sqlite3
import os
class TestFunctions(unittest.TestCase):
def testSQLType(self):
return
self.assertEqual(SQLType('aa'), 'TEXT')
self.assertEqual(SQLType(33), 'INTEGER')
self.assertEqual(SQLType(3.3), 'REAL')
class TestFileTable(unittest.TestCase):
def setUp(self):
return
# Use a copy of the real DB for these tests
self.assertTrue(th.SetupTestDB())
def tearDown(self):
return
# Make sure that the test DB is eliminated
th.TearDownTestDB()
def testDataTableList(self):
return
dba = DBAccess(db_path=th.TEST_DB_PATH)
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
sql_str = "DELETE FROM %s WHERE table_name='%s'" % (
settings.UPLOAD_FILE_TABLE, ft._table_name)
try:
dba.ExecuteQuery(sql_str, commit=True)
except:
pass
old_table_set = set(FileTable.DataTableList(db=th.TEST_DB_PATH))
ft.CreateAndPopulateTable()
new_table_set = set(FileTable.DataTableList(db=th.TEST_DB_PATH))
new_row_set = set([ft._table_name])
self.assertTrue(new_table_set.issuperset(new_row_set))
self.assertFalse(old_table_set.issuperset(new_row_set))
ft._ExecuteSQL('', CloseDB=True)
def testRemoveUploadedFile(self):
return
dba = DBAccess(db_path=th.TEST_DB_PATH)
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
ft.CreateAndPopulateTable()
table_set = set(FileTable.DataTableList(db=th.TEST_DB_PATH))
self.assertTrue(table_set.issuperset(set([ft._table_name])))
FileTable.RemoveUploadedFile(ft._filename, db=th.TEST_DB_PATH)
remove_table_set = set(FileTable.DataTableList(db=th.TEST_DB_PATH))
self.assertFalse(remove_table_set.issuperset(set([ft._table_name])))
def testConstructor(self):
return
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
self.assertEqual(ft._filename, th.FILE_NAME)
self.assertEqual(ft._table_name, th.FILE_NAME)
def test_AddFileToTable(self):
return
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
ft._AddFileToTable()
dba = DBAccess(db_path=th.TEST_DB_PATH)
sql_str = "SELECT * FROM %s WHERE file_name='%s'" % (
settings.UPLOAD_FILE_TABLE, th.FILE_NAME)
rows, _ = dba.ExecuteQuery(sql_str)
file_name = rows[0][0]
self.assertEqual(file_name, th.FILE_NAME)
def test_RemoveFileFromTable(self):
return
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
ft._RemoveFileFromTable()
try:
entry = UploadedFiles.objects.get(file_name=th.FILE_NAME)
assertTrue(True) # This should succeed
except:
pass
def testCreateAndPopulateTable(self):
return
ft = FileTable(th.FILE_PATH, db=th.TEST_DB_PATH)
ft.CreateAndPopulateTable()
conn = sqlite3.connect(th.TEST_DB_PATH)
sql_str = "SELECT * FROM %s" % ft._table_name
rows = th.ExecuteSQL(conn, sql_str)
self.assertEqual(len(rows), 3)
row = rows[1]
self.assertEqual(row[0], 2)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "ScienceStacks/SciSheets",
"path": "mysite/mysite/helpers/test_file_to_db.py",
"copies": "2",
"size": "3334",
"license": "apache-2.0",
"hash": 6550488658828849000,
"line_mean": 31.0576923077,
"line_max": 72,
"alpha_frac": 0.6835632873,
"autogenerated": false,
"ratio": 3.1393596986817327,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9744904076283,
"avg_score": 0.01560378193974667,
"num_lines": 104
} |
# Add mouse controls
# add half size paddle after hitting back wall
import math, pygame, sys, shutil, getpass
from pygame.locals import *
from fuzzy_agent import FuzzyAgent
from fuzzy_trajectory import FuzzyTrajectory
pygame.init()
fpsClock = pygame.time.Clock()
screen = pygame.display.set_mode((640, 480)) # create screen - 640 pix by 480 pix
pygame.display.set_caption('Breakout') # set title bar
icon = pygame.image.load('breakout.png')
pygame.display.set_icon(icon)
# add the font; use PressStart2P, but otherwise default if not available
try:
fontObj = pygame.font.Font('PressStart2P.ttf', 36)
except:
fontObj = pygame.font.Font('freesansbold.ttf', 36)
# generic colors-------------------------------
red = pygame.Color(255, 0, 0)
green = pygame.Color(0, 255, 0)
blue = pygame.Color(0, 0, 255)
white = pygame.Color(255, 255, 255)
grey = pygame.Color(142, 142, 142)
black = pygame.Color(0, 0, 0)
# row colors-----------------------------------
r1 = pygame.Color(200, 72, 72)
r2 = pygame.Color(198, 108, 58)
r3 = pygame.Color(180, 122, 48)
r4 = pygame.Color(162, 162, 42)
r5 = pygame.Color(72, 160, 72)
r6 = pygame.Color(67, 73, 202)
colors = [r1, r2, r3, r4, r5, r6]
# variables------------------------------------
controls = 'keys' # control method
mousex, mousey = 0, 0 # mouse position
dx, dy = 18, 6 # dimensions of board
bx, by = 50, 150 # board position
score = 0 # score
wall1 = pygame.Rect(20, 100, 30, 380) # walls of the game
wall2 = pygame.Rect(590, 100, 30, 380)
wall3 = pygame.Rect(20, 80, 600, 30)
# Creates a board of rectangles----------------
def new_board():
board = []
for x in range(dx):
board.append([])
for y in range(dy):
board[x].append(1)
return board
# Classes defined------------------------------
class Paddle: # class for paddle vars
x = 320
y = 450
size = 2 # 2 is normal size, 1 is half-size
direction = 'none'
class Ball: # class for ball vars
def __init__(self):
self.x = 0
self.y = 0
self.remaining = 3
self.xPos = 1 # amount increasing by for x. adjusted for speed
self.yPos = 1
self.adjusted = False # says whether the xPos and yPos have been adjusted for speed
self.speed = 5
self.collisions = 0
self.alive = False
self.moving = False
def rect(self):
return pygame.Rect(self.x - 3, self.y - 3, 6, 6)
def adjust(self): # adjusts the x and y being added to the ball to make the hypotenuse the ball speed
tSlope = math.sqrt(self.xPos ** 2 + self.yPos ** 2)
self.xPos *= (self.speed / tSlope)
self.yPos *= (self.speed / tSlope)
self.adjusted = True
# Functions defined----------------------------
def print_board(board, colors): # prints the board
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
pygame.draw.rect(screen, colors[y], (((x * 30) + bx), ((y * 12) + by), 30, 12))
def print_paddle(paddle): # prints the paddle
if paddle.size == 2:
pygame.draw.rect(screen, red, ((paddle.x - 20), (paddle.y), 40, 5))
def collide_paddle(paddle, ball): # recalculates the trajectory for the ball after collision with the paddle
ball.adjusted = False
if ball.x - paddle.x != 0:
difference=float(ball.x) - float(paddle.x)
trajectory=FuzzyTrajectory().compute(difference)
ball.xPos = float(trajectory)
ball.yPos = -1
else:
print "ball position ", ball.x, " paddle position ", ball.yPos
ball.xPos = 0
ball.yPos = 1
return ball.adjusted, float(ball.xPos), float(ball.yPos)
def write(x, y, color, msg): # prints onto the screen in selected font
msgSurfaceObj = fontObj.render(msg, False, color)
msgRectobj = msgSurfaceObj.get_rect()
msgRectobj.topleft = (x, y)
screen.blit(msgSurfaceObj, msgRectobj)
def game(score, paddle, ball, board, wall1, agent): # The game itself
# starting variables
running = True
ball.alive = True
ball.moving = False
ball.x = 53
ball.y = 300
ball.collisions, ball.speed = 0, 5
colO = False # check collision with the orange row, for speed purposes
colR = False # same but for red row
ball.speed = 5
ball.xPos = 1
ball.yPos = 1
ball.adjusted = False
while running:
# Draw all the things------------------------------
screen.fill(black)
pygame.draw.rect(screen, grey, wall1)
pygame.draw.rect(screen, grey, wall2)
pygame.draw.rect(screen, grey, wall3)
pygame.draw.rect(screen, red, (ball.x - 3, ball.y - 3, 6, 6)) # drawing ball
print_board(board, colors)
print_paddle(paddle)
write(20, 20, grey, str(score)) # score
temp = 0
for life in range(ball.remaining): # drawing life rectangles on the right side
if life != 0:
pygame.draw.rect(screen, red, (600, 400 - temp, 10, 10))
temp += 15
# check all the collisions-------------------------
if ball.moving:
if not ball.adjusted:
ball.adjust()
ball.x += ball.xPos
ball.y += ball.yPos
if ball.y < 455 and ball.y > 445:
if ball.x > paddle.x - 20 and ball.x < paddle.x + 20:
ball.adjusted, ball.xPos, ball.yPos = collide_paddle(paddle, ball) # paddle collide
ball.collisions += 1
# increase ball speeds at 4 hits on paddle, 12 hits, orange row, red row
if ball.collisions % 4 == 0:
ball.speed += 0.24
# check wall collide----------------------------
if wall1.colliderect(ball.rect()) or wall2.colliderect(ball.rect()):
ball.xPos = -(ball.xPos)
if wall3.colliderect(ball.rect()):
ball.yPos = -(ball.yPos)
# check collision with bricks-------------------
Break = False
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
block = pygame.Rect(30 * x + bx - 1, 12 * y + by - 1, 32, 14)
if block.collidepoint(ball.x, ball.y):
board[x][y] = 0
ball.yPos = -ball.yPos
if y == 4 or y == 5:
score += 1
elif y == 2 or y == 3:
score += 4
if colO == False:
colO = True
ball.speed += 0.25
else:
score += 7
if colR == False:
colR = True
ball.speed += 0.5
Break = True
# ball.speed += 1
if Break:
break
if Break:
break
if ball.y > 460:
ball.alive = False
if score == 432:
running = False
# check if ball was lost
if not ball.alive:
running = False
ball.remaining -= 1
agent_move(ball, paddle, agent)
# get user input
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
if not ball.moving:
ball.moving = True
# update display
pygame.display.update()
fpsClock.tick(120)
return score
def agent_move(ball, paddle, agent):
distance = paddle.x - ball.x
paddle.x += agent.compute(distance)
# -----------------------------------------------------
if __name__ == '__main__':
replay = False
loop = 0
while True:
screen.fill(black)
if replay:
board = new_board()
score = 0
paddle = Paddle()
ball = Ball()
agent = FuzzyAgent()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1, agent)
if score == 432:
replay = True
break
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
boardcheck = 0
for x in range(len(board)):
for y in range(len(board[x])):
boardcheck += board[x][y]
if boardcheck == 0:
paddle = Paddle()
ball = Ball()
board = new_board()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1, agent)
if score == 432:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
replay = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
replay = True
loop += 1
pygame.display.update()
| {
"repo_name": "ElchinValiyev/GameAI",
"path": "Project_3/breakout/breakout.py",
"copies": "1",
"size": "10308",
"license": "mit",
"hash": 4095935882233494500,
"line_mean": 34.7916666667,
"line_max": 109,
"alpha_frac": 0.4779782693,
"autogenerated": false,
"ratio": 3.914925939992404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4892904209292404,
"avg_score": null,
"num_lines": null
} |
# Add mouse controls
# add half size paddle after hitting back wall
import math, pygame, sys, shutil, getpass
from pygame.locals import *
pygame.init()
fpsClock = pygame.time.Clock()
screen = pygame.display.set_mode((640, 480)) # create screen - 640 pix by 480 pix
pygame.display.set_caption('Breakout') # set title bar
# add the font; use PressStart2P, but otherwise default if not available
try:
fontObj = pygame.font.Font('PressStart2P.ttf', 36)
except:
fontObj = pygame.font.Font('freesansbold.ttf', 36)
# generic colors-------------------------------
red = pygame.Color(255, 0, 0)
green = pygame.Color(0, 255, 0)
blue = pygame.Color(0, 0, 255)
white = pygame.Color(255, 255, 255)
grey = pygame.Color(142, 142, 142)
black = pygame.Color(0, 0, 0)
# row colors-----------------------------------
r1 = pygame.Color(200, 72, 72)
r2 = pygame.Color(198, 108, 58)
r3 = pygame.Color(180, 122, 48)
r4 = pygame.Color(162, 162, 42)
r5 = pygame.Color(72, 160, 72)
r6 = pygame.Color(67, 73, 202)
colors = [r1, r2, r3, r4, r5, r6]
# variables------------------------------------
controls = 'keys' # control method
mousex, mousey = 0, 0 # mouse position
dx, dy = 18, 6 # dimensions of board
bx, by = 50, 150 # board position
score = 0 # score
wall1 = pygame.Rect(20, 100, 30, 380) # walls of the game
wall2 = pygame.Rect(590, 100, 30, 380)
wall3 = pygame.Rect(20, 80, 600, 30)
# Creates a board of rectangles----------------
def new_board():
board = []
for x in range(dx):
board.append([])
for y in range(dy):
board[x].append(1)
return board
# Classes defined------------------------------
class Paddle: # class for paddle vars
x = 320
y = 450
size = 2 # 2 is normal size, 1 is half-size
direction = 'none'
class Ball: # class for ball vars
def __init__(self):
self.x = 0
self.y = 0
self.remaining = 3
self.xPos = 1 # amount increasing by for x. adjusted for speed
self.yPos = 1
self.adjusted = False # says wether the xPos and yPos have been adjusted for speed
self.speed = 5
self.collisions = 0
self.alive = False
self.moving = False
def rect(self):
return pygame.Rect(self.x - 3, self.y - 3, 6, 6)
def adjust(self): # adjusts the x and y being added to the ball to make the hypotenuse the ball speed
tSlope = math.sqrt(self.xPos ** 2 + self.yPos ** 2)
self.xPos = (self.speed / tSlope) * self.xPos
self.yPos = (self.speed / tSlope) * self.yPos
self.adjusted = True
# Functions defined----------------------------
def print_board(board, colors): # prints the board
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
pygame.draw.rect(screen, colors[y], (((x * 30) + bx), ((y * 12) + by), 30, 12))
def print_paddle(paddle): # prints the paddle
if paddle.size == 2:
pygame.draw.rect(screen, red, ((paddle.x - 20), (paddle.y), 40, 5))
def collide_paddle(paddle, ball): # recalculates the trajectory for the ball after collision with the paddle
ball.adjusted = False
if ball.x - paddle.x != 0:
ball.xPos = (ball.x - paddle.x) / 8
ball.yPos = -1
else:
ball.xPos = 0
ball.yPos = 1
return ball.adjusted, float(ball.xPos), float(ball.yPos)
def write(x, y, color, msg): # prints onto the screen in selected font
msgSurfaceObj = fontObj.render(msg, False, color)
msgRectobj = msgSurfaceObj.get_rect()
msgRectobj.topleft = (x, y)
screen.blit(msgSurfaceObj, msgRectobj)
def game(score, paddle, ball, board, wall1): # The game itself
# starting variables
running = True
ball.alive = True
ball.moving = False
ball.x = 53
ball.y = 300
ball.collisions, ball.speed = 0, 5
colO = False # check collision with the orange row, for speed purposes
colR = False # same but for red row
ball.speed = 5
ball.xPos = 1
ball.yPos = 1
ball.adjusted = False
while running:
# Draw all the things------------------------------
screen.fill(black)
pygame.draw.rect(screen, grey, wall1)
pygame.draw.rect(screen, grey, wall2)
pygame.draw.rect(screen, grey, wall3)
pygame.draw.rect(screen, red, (ball.x - 3, ball.y - 3, 6, 6))
print_board(board, colors)
print_paddle(paddle)
write(20, 20, grey, str(score))
temp = 0
for life in range(ball.remaining):
if life != 0:
pygame.draw.rect(screen, red, (600, 400 - temp, 10, 10))
temp += 15
# check all the collisions-------------------------
if ball.moving:
if ball.adjusted == False:
ball.adjust()
ball.x += ball.xPos
ball.y += ball.yPos
if ball.y < 455 and ball.y > 445:
if ball.x > paddle.x - 20 and ball.x < paddle.x + 20:
ball.adjusted, ball.xPos, ball.yPos = collide_paddle(paddle, ball) # paddle collide
ball.collisions += 1
# increase ball speeds at 4 hits on paddle, 12 hits, orange row, red row
if ball.collisions % 4 == 0:
ball.speed += 1
# check wall collide----------------------------
if wall1.colliderect(ball.rect()) or wall2.colliderect(ball.rect()):
ball.xPos = -(ball.xPos)
if wall3.colliderect(ball.rect()):
ball.yPos = -(ball.yPos)
# check collision with bricks-------------------
Break = False
for x in range(dx):
for y in range(dy):
if board[x][y] == 1:
block = pygame.Rect(30 * x + bx - 1, 12 * y + by - 1, 32, 14)
if block.collidepoint(ball.x, ball.y):
board[x][y] = 0
ball.yPos = -ball.yPos
if y == 4 or y == 5:
score += 1
elif y == 2 or y == 3:
score += 4
if colO == False:
colO = True
ball.speed += 1
else:
score += 7
if colR == False:
colR = True
ball.speed += 2
Break = True
# ball.speed += 1
if Break:
break
if Break:
break
if ball.y > 460:
ball.alive = False
# check if ball was lost
if not ball.alive:
running = False
ball.remaining -= 1
agent_move(ball, paddle)
# move paddle
if paddle.direction == 'right':
if paddle.x <= 561:
paddle.x += 8
elif paddle.direction == 'left':
if paddle.x >= 79:
paddle.x -= 8
# get user input
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_SPACE:
if not ball.moving:
ball.moving = True
# update display
pygame.display.update()
fpsClock.tick(90)
return score
def agent_move(ball, paddle):
if ball.yPos > 0: # if ball goes down: calculate falling point
dist = (paddle.y - ball.y) / ball.yPos
fall_point = ball.x + dist * ball.xPos
if fall_point > 590:
fall_point = 590 - (fall_point % 590)
if fall_point < 50:
fall_point = math.fabs(50 - fall_point) + 50
else: # if ball goes up: try to follow
fall_point = ball.x
if fall_point > paddle.x + 8:
paddle.direction = 'right'
elif fall_point < paddle.x - 8:
paddle.direction = 'left'
else:
paddle.direction = 'none'
# -----------------------------------------------------
if __name__ == '__main__':
replay = False
loop = 0
while True:
screen.fill(black)
if replay:
board = new_board()
score = 0
paddle = Paddle()
ball = Ball()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1)
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
boardcheck = 0
for x in range(len(board)):
for y in range(len(board[x])):
boardcheck += board[x][y]
if boardcheck == 0:
paddle = Paddle()
ball = Ball()
board = new_board()
while ball.remaining > 0:
score = game(score, paddle, ball, board, wall1)
if ball.remaining == 0:
for x in range(16):
for y in range(12):
pygame.draw.rect(screen, black, (x * 40, y * 40, 40, 40))
pygame.display.update()
pygame.time.wait(10)
replay = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
replay = True
loop += 1
pygame.display.update()
| {
"repo_name": "ElchinValiyev/GameAI",
"path": "Project_2/bricka/breakout.py",
"copies": "1",
"size": "10079",
"license": "mit",
"hash": -6303919351263107000,
"line_mean": 33.3993174061,
"line_max": 109,
"alpha_frac": 0.4779243973,
"autogenerated": false,
"ratio": 3.8120272314674737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47899516287674737,
"avg_score": null,
"num_lines": null
} |
"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, server_default='0'))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, server_default='0'))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, server_default='0'))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, server_default='0'))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
| {
"repo_name": "streamr/marvin",
"path": "migrations/versions/2c240cb3edd1_.py",
"copies": "1",
"size": "1137",
"license": "mit",
"hash": -735231393272295800,
"line_mean": 34.53125,
"line_max": 111,
"alpha_frac": 0.6860158311,
"autogenerated": false,
"ratio": 3.2485714285714287,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44345872596714286,
"avg_score": null,
"num_lines": null
} |
"""Add multi-column indexes to appropriation and object class program activity table
Revision ID: 51b1bbc0bfde
Revises: 4ebc7a781b31
Create Date: 2019-07-26 12:03:39.154057
"""
# revision identifiers, used by Alembic.
revision = '51b1bbc0bfde'
down_revision = '4ebc7a781b31'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_appropriation_tas_id_submission_id', 'appropriation', ['tas_id', 'submission_id'], unique=False)
op.create_index('ix_oc_pa_tas_id_submission_id', 'object_class_program_activity', ['tas_id', 'submission_id'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_oc_pa_tas_id_submission_id', table_name='object_class_program_activity')
op.drop_index('ix_appropriation_tas_id_submission_id', table_name='appropriation')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/51b1bbc0bfde_add_multi_column_indexes_to_.py",
"copies": "1",
"size": "1253",
"license": "cc0-1.0",
"hash": 321826603692573060,
"line_mean": 28.8333333333,
"line_max": 128,
"alpha_frac": 0.7094972067,
"autogenerated": false,
"ratio": 3.2801047120418847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44896019187418845,
"avg_score": null,
"num_lines": null
} |
"""Add multicolumn indexes to published_award_financial_assistance
Revision ID: ae35bd44ec9a
Revises: 66ce64f4c1da
Create Date: 2018-09-19 09:15:27.852093
"""
# revision identifiers, used by Alembic.
revision = 'ae35bd44ec9a'
down_revision = '66ce64f4c1da'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_pafa_fain_awarding_sub_tier_is_active', 'published_award_financial_assistance', ['fain', 'awarding_sub_tier_agency_c', 'is_active'], unique=False)
op.create_index('ix_pafa_uri_awarding_sub_tier_is_active', 'published_award_financial_assistance', ['uri', 'awarding_sub_tier_agency_c', 'is_active'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_pafa_uri_awarding_sub_tier_is_active', table_name='published_award_financial_assistance')
op.drop_index('ix_pafa_fain_awarding_sub_tier_is_active', table_name='published_award_financial_assistance')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/ae35bd44ec9a_add_multicolumn_indexes_to_published_.py",
"copies": "1",
"size": "1326",
"license": "cc0-1.0",
"hash": 1468482552120254200,
"line_mean": 30.5714285714,
"line_max": 170,
"alpha_frac": 0.7119155354,
"autogenerated": false,
"ratio": 2.9932279909706545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42051435263706544,
"avg_score": null,
"num_lines": null
} |
"""Add multicolumn upper indexes to published_award_financial_assistance
Revision ID: be4dcb9eede6
Revises: 6e3be68b87ae
Create Date: 2020-07-30 14:41:38.848315
"""
# revision identifiers, used by Alembic.
revision = 'be4dcb9eede6'
down_revision = '6e3be68b87ae'
branch_labels = None
depends_on = None
from alembic import op
from sqlalchemy import text
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('CREATE INDEX ix_pafa_fain_awarding_subtier_upper ON published_award_financial_assistance (UPPER(fain), UPPER(awarding_sub_tier_agency_c));')
op.execute('CREATE INDEX ix_pafa_uri_awarding_subtier_upper ON published_award_financial_assistance (UPPER(uri), UPPER(awarding_sub_tier_agency_c));')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_pafa_uri_awarding_subtier_upper', table_name='published_award_financial_assistance')
op.drop_index('ix_pafa_fain_awarding_subtier_upper', table_name='published_award_financial_assistance')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/be4dcb9eede6_add_multicolumn_upper_indexes_to_.py",
"copies": "1",
"size": "1298",
"license": "cc0-1.0",
"hash": 8853717881610677000,
"line_mean": 29.9047619048,
"line_max": 156,
"alpha_frac": 0.7249614792,
"autogenerated": false,
"ratio": 3.075829383886256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9164165187261555,
"avg_score": 0.02732513516494021,
"num_lines": 42
} |
"""add multigame tables
Revision ID: 4e0500347ce7
Revises: 29344aa34d9
Create Date: 2016-04-05 23:51:58.647657
"""
# revision identifiers, used by Alembic.
revision = '4e0500347ce7'
down_revision = '29344aa34d9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('gameversion', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('gameversion_game_id_fkey', 'gameversion', 'game', ['game_id'], ['id'])
op.add_column('mod', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('mod_game_id_fkey', 'mod', 'game', ['game_id'], ['id'])
op.add_column('modlist', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('modlist_game_id_fkey', 'modlist', 'game', ['game_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('modlist_game_id_fkey', 'modlist', type_='foreignkey')
op.drop_column('modlist', 'game_id')
op.drop_constraint('mod_game_id_fkey', 'mod', type_='foreignkey')
op.drop_column('mod', 'game_id')
op.drop_constraint('gameversion_game_id_fkey', 'gameversion', type_='foreignkey')
op.drop_column('gameversion', 'game_id')
### end Alembic commands ###
| {
"repo_name": "EIREXE/Olddulous",
"path": "alembic/versions/4e0500347ce7_add_multigame_tables.py",
"copies": "1",
"size": "1360",
"license": "mit",
"hash": 5725122559588898000,
"line_mean": 36.7777777778,
"line_max": 97,
"alpha_frac": 0.6632352941,
"autogenerated": false,
"ratio": 3.076923076923077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9207652263839914,
"avg_score": 0.006501221436632841,
"num_lines": 36
} |
"""add multigame tables
Revision ID: 4e0500347ce7
Revises: 29344aa34d9
Create Date: 2016-04-05 23:51:58.647657
"""
# revision identifiers, used by Alembic.
revision = '4e0500347ce7'
down_revision = '29344aa34d9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('gameversion', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('gameversion_game_id_fkey', 'gameversion', 'game', ['game_id'], ['id'])
op.add_column('mod', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('mod_game_id_fkey', 'mod', 'game', ['game_id'], ['id'])
op.add_column('modlist', sa.Column('game_id', sa.Integer(), nullable=True))
op.create_foreign_key('modlist_game_id_fkey', 'modlist', 'game', ['game_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('modlist_game_id_fkey', 'modlist', type_='foreignkey')
op.drop_column('modlist', 'game_id')
op.drop_constraint('mod_game_id_fkey', 'mod', type_='foreignkey')
op.drop_column('mod', 'game_id')
op.drop_constraint('gameversion_game_id_fkey', 'gameversion', type_='foreignkey')
op.drop_column('gameversion', 'game_id')
### end Alembic commands ###
| {
"repo_name": "EIREXE/SpaceDock",
"path": "alembic/versions/4e0500347ce7_add_multigame_tables.py",
"copies": "2",
"size": "1396",
"license": "mit",
"hash": 1701918314813213400,
"line_mean": 36.7777777778,
"line_max": 97,
"alpha_frac": 0.6461318052,
"autogenerated": false,
"ratio": 3.1230425055928412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4769174310792841,
"avg_score": null,
"num_lines": null
} |
"""Add multi-label support"""
# revision identifiers, used by Alembic.
revision = '0fd16cdac8ca'
down_revision = 'a1343ebd31c7'
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('problem_label',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('problem_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('label', sa.Unicode(length=255), nullable=False),
sa.ForeignKeyConstraint(['problem_id'], ['problem.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dataset_label_probability',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('data_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('label_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('probability', sa.Float(), nullable=True),
sa.CheckConstraint('probability >= 0 AND probability <= 1', name='chk_dataset_probability'),
sa.ForeignKeyConstraint(['data_id'], ['dataset.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['label_id'], ['problem_label.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.add_column('label_event', sa.Column('label_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=True))
op.add_column('problem', sa.Column('name', sa.Unicode(length=255), nullable=True))
op.execute('''
INSERT INTO problem_label(problem_id, label)
SELECT id, label FROM problem
''')
op.execute('''
INSERT INTO dataset_label_probability(data_id, label_id, probability)
SELECT id, (SELECT id FROM problem_label WHERE problem_id = dataset.problem_id), probability FROM dataset
''')
op.execute('''
UPDATE label_event SET
label_id = (SELECT id FROM problem_label WHERE label = label_event.label)
''')
op.execute('''
UPDATE problem SET
name = (SELECT label FROM problem_label WHERE problem_id = problem.id)
''')
op.alter_column('label_event', 'label_id', nullable=False)
op.alter_column('problem', 'name', nullable=False)
op.create_index(op.f('ix_dataset_label_probability_data_id'), 'dataset_label_probability', ['data_id'], unique=False)
op.create_index(op.f('ix_dataset_label_probability_label_id'), 'dataset_label_probability', ['label_id'], unique=False)
op.drop_column('dataset', 'probability')
op.drop_column('problem', 'label')
op.create_index(op.f('ix_label_event_label_id'), 'label_event', ['label_id'], unique=False)
op.create_foreign_key(None, 'label_event', 'problem_label', ['label_id'], ['id'], ondelete='CASCADE')
op.drop_column('label_event', 'label')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('label_event', sa.Column('label', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.add_column('dataset', sa.Column('probability', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('problem', sa.Column('label', sa.Unicode(length=255), nullable=True))
op.execute('''
UPDATE label_event SET label = (SELECT label FROM problem_label WHERE problem_label.id = label_event.label_id)
''')
op.execute('''
UPDATE dataset SET probability = (SELECT probability FROM dataset_label_probability WHERE dataset_label_probability.data_id = dataset.id)
''')
op.execute('''
UPDATE problem SET label = (SELECT label FROM problem_label WHERE problem_label.problem_id = problem.id)
''')
op.alter_column('label_event', 'label', nullable=False)
op.alter_column('problem', 'label', nullable=False)
op.drop_column('problem', 'name')
op.drop_index(op.f('ix_label_event_label_id'), table_name='label_event')
op.drop_column('label_event', 'label_id')
op.drop_index(op.f('ix_dataset_label_probability_label_id'), table_name='dataset_label_probability')
op.drop_index(op.f('ix_dataset_label_probability_data_id'), table_name='dataset_label_probability')
op.drop_table('dataset_label_probability')
op.drop_table('problem_label')
# ### end Alembic commands ###
| {
"repo_name": "planbrothers/ml-annotate",
"path": "annotator/migrations/versions/0fd16cdac8ca_add_multi_label_support.py",
"copies": "1",
"size": "4518",
"license": "mit",
"hash": 1875779414728077300,
"line_mean": 47.5806451613,
"line_max": 145,
"alpha_frac": 0.6739707835,
"autogenerated": false,
"ratio": 3.667207792207792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4841178575707792,
"avg_score": null,
"num_lines": null
} |
"""Add multiple organization support and add seed organization
Revision ID: b19224a5c1dd
Revises: a1ed2f75cb13
Create Date: 2016-03-18 20:41:33.024198
"""
# revision identifiers, used by Alembic.
revision = 'b19224a5c1dd'
down_revision = 'a1ed2f75cb13'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
### commands auto generated by Alembic - please adjust! ###
org_table = op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['organization.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.bulk_insert(org_table, [
{'id': 1, 'name': 'betterlife', 'description': u'Betterlife'},
])
op.add_column('user', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'user', 'organization', ['organization_id'], ['id'])
op.add_column('customer', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'customer', 'organization', ['organization_id'], ['id'])
op.add_column('expense', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'expense', 'organization', ['organization_id'], ['id'])
op.add_column('incoming', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'incoming', 'organization', ['organization_id'], ['id'])
op.add_column('inventory_transaction', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'inventory_transaction', 'organization', ['organization_id'], ['id'])
op.add_column('preference', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'preference', 'organization', ['organization_id'], ['id'])
op.add_column('product', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'product', 'organization', ['organization_id'], ['id'])
op.add_column('product_category', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'product_category', 'organization', ['organization_id'], ['id'])
op.add_column('purchase_order', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'purchase_order', 'organization', ['organization_id'], ['id'])
op.add_column('receiving', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'receiving', 'organization', ['organization_id'], ['id'])
op.add_column('sales_order', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'sales_order', 'organization', ['organization_id'], ['id'])
op.add_column('shipping', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'shipping', 'organization', ['organization_id'], ['id'])
op.add_column('supplier', sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'supplier', 'organization', ['organization_id'], ['id'])
from sqlalchemy.sql import text
op.get_bind().execute(text("UPDATE \"user\" SET organization_id = 1"))
op.get_bind().execute(text('UPDATE customer SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE expense SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE incoming SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE inventory_transaction SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE preference SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE product SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE product_category SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE purchase_order SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE receiving SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE sales_order SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE shipping SET organization_id = 1;'))
op.get_bind().execute(text('UPDATE supplier SET organization_id = 1;'))
op.get_bind().execute(text("ALTER SEQUENCE organization_id_seq RESTART WITH 2;"))
role_table = sa.table('role',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
)
res = op.get_bind().execute('SELECT max(id)+1 FROM role')
results = res.fetchall()
rm = 49
for r in results:
rm = r[0]
op.bulk_insert(role_table, [
{'id': rm, 'name': 'organization_view', 'description': 'View customers'},
{'id': rm + 1, 'name': 'organization_create', 'description': 'Create customers'},
{'id': rm + 2, 'name': 'organization_edit', 'description': 'Edit customers'},
{'id': rm + 3, 'name': 'organization_delete', 'description': 'Delete customers'},
], multiinsert=False)
op.get_bind().execute(text("ALTER SEQUENCE role_id_seq RESTART WITH " + str(rm + 4) + ";"))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
from sqlalchemy.sql import text
op.get_bind().execute(text("DELETE FROM role WHERE name IN ('organization_view', 'organization_create', 'organization_edit','organization_delete')"))
op.drop_constraint('supplier_organization_id_fkey', 'supplier', type_='foreignkey')
op.drop_column('supplier', 'organization_id')
op.drop_constraint('shipping_organization_id_fkey', 'shipping', type_='foreignkey')
op.drop_column('shipping', 'organization_id')
op.drop_constraint('sales_order_organization_id_fkey', 'sales_order', type_='foreignkey')
op.drop_column('sales_order', 'organization_id')
op.drop_constraint('receiving_organization_id_fkey', 'receiving', type_='foreignkey')
op.drop_column('receiving', 'organization_id')
op.drop_constraint('purchase_order_organization_id_fkey', 'purchase_order', type_='foreignkey')
op.drop_column('purchase_order', 'organization_id')
op.drop_constraint('product_category_organization_id_fkey', 'product_category', type_='foreignkey')
op.drop_column('product_category', 'organization_id')
op.drop_constraint('product_organization_id_fkey', 'product', type_='foreignkey')
op.drop_column('product', 'organization_id')
op.drop_constraint('preference_organization_id_fkey', 'preference', type_='foreignkey')
op.drop_column('preference', 'organization_id')
op.drop_constraint('inventory_transaction_organization_id_fkey', 'inventory_transaction', type_='foreignkey')
op.drop_column('inventory_transaction', 'organization_id')
op.drop_constraint('incoming_organization_id_fkey', 'incoming', type_='foreignkey')
op.drop_column('incoming', 'organization_id')
op.drop_constraint('expense_organization_id_fkey', 'expense', type_='foreignkey')
op.drop_column('expense', 'organization_id')
op.drop_constraint('customer_organization_id_fkey', 'customer', type_='foreignkey')
op.drop_column('customer', 'organization_id')
op.drop_constraint('user_organization_id_fkey', 'user', type_='foreignkey')
op.drop_column('user', 'organization_id')
op.drop_table('organization')
### end Alembic commands ###
| {
"repo_name": "betterlife/psi",
"path": "psi/migrations/versions/17_b19224a5c1dd_.py",
"copies": "2",
"size": "7937",
"license": "mit",
"hash": -3413085169860915000,
"line_mean": 62.496,
"line_max": 153,
"alpha_frac": 0.6545294192,
"autogenerated": false,
"ratio": 3.7350588235294118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5389588242729412,
"avg_score": null,
"num_lines": null
} |
"""add name,location,about_me,member_since,last_seen to User model
Revision ID: 52e7af0253c6
Revises: 577579399e83
Create Date: 2017-09-26 01:46:51.978788
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '52e7af0253c6'
down_revision = '577579399e83'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
# ### end Alembic commands ###
| {
"repo_name": "mikkylok/mikky.lu",
"path": "migrations/versions/52e7af0253c6_add_name_location_about_me_member_since_.py",
"copies": "1",
"size": "1197",
"license": "mit",
"hash": -6265436489120376000,
"line_mean": 32.25,
"line_max": 86,
"alpha_frac": 0.6750208855,
"autogenerated": false,
"ratio": 3.1835106382978724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9351893622886518,
"avg_score": 0.001327580182270976,
"num_lines": 36
} |
"""Add name to RLMS
Revision ID: 298c12227419
Revises: 229fd8d6f159
Create Date: 2015-05-08 13:50:33.747142
"""
# revision identifiers, used by Alembic.
revision = '298c12227419'
down_revision = '229fd8d6f159'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('laboratories', 'laboratory_id',
existing_type=mysql.VARCHAR(length=255),
nullable=False)
op.alter_column('laboratories', 'name',
existing_type=mysql.VARCHAR(length=255),
nullable=False)
op.add_column('rlmss', sa.Column('name', sa.Unicode(length=50), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('rlmss', 'name')
op.alter_column('laboratories', 'name',
existing_type=mysql.VARCHAR(length=255),
nullable=True)
op.alter_column('laboratories', 'laboratory_id',
existing_type=mysql.VARCHAR(length=255),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "go-lab/labmanager",
"path": "alembic/versions/298c12227419_add_name_to_rlms.py",
"copies": "5",
"size": "1188",
"license": "bsd-2-clause",
"hash": 9035770104710729000,
"line_mean": 30.2631578947,
"line_max": 83,
"alpha_frac": 0.6447811448,
"autogenerated": false,
"ratio": 3.6,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.67447811448,
"avg_score": null,
"num_lines": null
} |
"""Add narrative table
Revision ID: e2aef4260e1f
Revises: a57a6a486f83
Create Date: 2016-11-17 22:24:36.546851
"""
# revision identifiers, used by Alembic.
revision = 'e2aef4260e1f'
down_revision = 'a57a6a486f83'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('submission_narrative',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('submission_narrative_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('file_type_id', sa.Integer(), nullable=False),
sa.Column('narrative', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['file_type_id'], ['file_type.file_type_id'], name='fk_file_type'),
sa.ForeignKeyConstraint(['submission_id'], ['submission.submission_id'], name='fk_submission'),
sa.PrimaryKeyConstraint('submission_narrative_id'),
sa.UniqueConstraint('submission_id', 'file_type_id', name='uniq_submission_file_type')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('submission_narrative')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/e2aef4260e1f_add_narrative_table.py",
"copies": "1",
"size": "1512",
"license": "cc0-1.0",
"hash": 1674208704169947100,
"line_mean": 28.6470588235,
"line_max": 99,
"alpha_frac": 0.6891534392,
"autogenerated": false,
"ratio": 3.352549889135255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.94051152881015,
"avg_score": 0.0273176080467509,
"num_lines": 51
} |
"""add_new_biobank_codes
Revision ID: 9b93d00a35cc
Revises: 574efa4de1ba
Create Date: 2018-01-26 07:59:58.808302
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus, SampleStatus
# revision identifiers, used by Alembic.
revision = "9b93d00a35cc"
down_revision = "574efa4de1ba"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ps08", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ps08_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ss08", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ss08_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ps08", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ps08_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ss08", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ss08_time", model.utils.UTCDateTime(), nullable=True)
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "sample_status_1ss08_time")
op.drop_column("participant_summary", "sample_status_1ss08")
op.drop_column("participant_summary", "sample_status_1ps08_time")
op.drop_column("participant_summary", "sample_status_1ps08")
op.drop_column("participant_summary", "sample_order_status_1ss08_time")
op.drop_column("participant_summary", "sample_order_status_1ss08")
op.drop_column("participant_summary", "sample_order_status_1ps08_time")
op.drop_column("participant_summary", "sample_order_status_1ps08")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/9b93d00a35cc_add_new_biobank_codes.py",
"copies": "1",
"size": "2732",
"license": "bsd-3-clause",
"hash": -5080634067696032000,
"line_mean": 33.15,
"line_max": 116,
"alpha_frac": 0.6866764275,
"autogenerated": false,
"ratio": 3.389578163771712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45762545912717123,
"avg_score": null,
"num_lines": null
} |
"""add new biosample codes
Revision ID: 3da004006210
Revises: c2dd2332a63f
Create Date: 2018-03-01 09:20:45.647001
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus, SampleStatus
# revision identifiers, used by Alembic.
revision = "3da004006210"
down_revision = "c2dd2332a63f"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("sample_order_status_2pst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2sst8", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_2sst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2pst8", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2pst8_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2sst8", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_2sst8_time", model.utils.UTCDateTime(), nullable=True)
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "sample_status_2sst8_time")
op.drop_column("participant_summary", "sample_status_2sst8")
op.drop_column("participant_summary", "sample_status_2pst8_time")
op.drop_column("participant_summary", "sample_status_2pst8")
op.drop_column("participant_summary", "sample_order_status_2sst8_time")
op.drop_column("participant_summary", "sample_order_status_2sst8")
op.drop_column("participant_summary", "sample_order_status_2pst8_time")
op.drop_column("participant_summary", "sample_order_status_2pst8")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/3da004006210_add_new_biosample_codes.py",
"copies": "1",
"size": "2734",
"license": "bsd-3-clause",
"hash": 882917413750195200,
"line_mean": 33.175,
"line_max": 116,
"alpha_frac": 0.6869056328,
"autogenerated": false,
"ratio": 3.3382173382173383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45251229710173385,
"avg_score": null,
"num_lines": null
} |
"""Add new categories and add order column
Revision ID: 1145cc4ac43b
Revises: 2a1c7cc1743b
Create Date: 2015-01-26 20:39:32.921622
"""
# revision identifiers, used by Alembic.
revision = '1145cc4ac43b'
down_revision = '2a1c7cc1743b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
op.add_column('shopping_category',
sa.Column('order', sa.Integer))
shopping_categories = table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True),
sa.Column('monthly_limit', sa.Integer, nullable=False),
sa.Column('family_wide', sa.Boolean, nullable=False),
sa.Column('order', sa.Integer, nullable=False)
)
op.bulk_insert(
shopping_categories,
[
{'id': 7, 'name': 'Accessories',
'daily_limit': 2, 'monthly_limit': 8,
'family_wide': False,
'order': 3},
{'id': 8, 'name': 'Socks/Underwear',
'daily_limit': 1, 'monthly_limit': 4,
'family_wide': False,
'order': 5},
{'id': 9, 'name': 'Toys',
'daily_limit': 1, 'monthly_limit': 4,
'family_wide': False,
'order': 6},
{'id': 10, 'name': 'Books',
'daily_limit': 2, 'monthly_limit': 8,
'family_wide': False,
'order': 7},
{'id': 11, 'name': 'Seasonal',
'daily_limit': 1, 'monthly_limit': 4,
'family_wide': False,
'order': 9},
])
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Clothing')).
values({'order': op.inline_literal('1')})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Household')).
values({'order': op.inline_literal('2'),
'daily_limit': op.inline_literal('2'),
'monthly_limit': op.inline_literal('2')})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Shoes')).
values({'order': op.inline_literal('4'),
'daily_limit': op.inline_literal('1'),
'monthly_limit': op.inline_literal('4')})
)
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Baby')).
values({'order': op.inline_literal('8'),
'daily_limit': op.inline_literal('1'),
'monthly_limit': op.inline_literal('4')})
)
op.execute(
shopping_categories.delete().
where(shopping_categories.c.name == op.inline_literal('Coats'))
)
op.execute(
shopping_categories.delete().
where(shopping_categories.c.name == op.inline_literal('Other'))
)
def downgrade():
pass
| {
"repo_name": "jlutz777/FreeStore",
"path": "alembic/versions/1145cc4ac43b_add_new_categories_and_add_order_column.py",
"copies": "1",
"size": "3135",
"license": "mit",
"hash": -4949983003625736000,
"line_mean": 30.6666666667,
"line_max": 76,
"alpha_frac": 0.536523126,
"autogenerated": false,
"ratio": 3.723277909738717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47598010357387166,
"avg_score": null,
"num_lines": null
} |
"""add new column member_count to cert table
Revision ID: de313b629296
Revises: 32c1cbb13ec2
Create Date: 2017-07-20 08:55:47.250651
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'de313b629296'
down_revision = '32c1cbb13ec2'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('certificate', sa.Column('member_count', sa.Integer(),
nullable=False))
op.alter_column('farmer', 'gender',
existing_type=mysql.ENUM('male', 'female'),
nullable=False)
op.alter_column('farmer', 'name',
existing_type=mysql.VARCHAR(length=80),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('farmer', 'name',
existing_type=mysql.VARCHAR(length=80),
nullable=True)
op.alter_column('farmer', 'gender',
existing_type=mysql.ENUM('male', 'female'),
nullable=True)
op.drop_column('certificate', 'member_count')
# ### end Alembic commands ###
| {
"repo_name": "hieulq/pgscm",
"path": "migrations/versions/de313b629296_add_member_count_cert_table.py",
"copies": "2",
"size": "1250",
"license": "apache-2.0",
"hash": -1074896123597830500,
"line_mean": 31.0512820513,
"line_max": 72,
"alpha_frac": 0.6112,
"autogenerated": false,
"ratio": 3.787878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5399078787878787,
"avg_score": null,
"num_lines": null
} |
"""Add new enum value
Revision ID: 5a974b8e4966
Revises: b88ba80940cb
Create Date: 2018-03-23 08:47:40.976539
"""
# revision identifiers, used by Alembic.
revision = '5a974b8e4966'
down_revision = 'b88ba80940cb'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute("COMMIT")
op.execute("ALTER TYPE dlstate_enum ADD VALUE 'specialty_blocked';")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
### end Alembic commands ###
pass | {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/00041_5a974b8e4966_add_new_enum_value.py",
"copies": "1",
"size": "1195",
"license": "bsd-3-clause",
"hash": 1472732596268778800,
"line_mean": 26.1818181818,
"line_max": 72,
"alpha_frac": 0.7682008368,
"autogenerated": false,
"ratio": 3.6769230769230767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9854947597215167,
"avg_score": 0.01803526330158173,
"num_lines": 44
} |
"""Add new field 'is_restricted' to SqlMetric and DruidMetric
Revision ID: d8bc074f7aad
Revises: 1226819ee0e3
Create Date: 2016-06-07 12:33:25.756640
"""
# revision identifiers, used by Alembic.
revision = 'd8bc074f7aad'
down_revision = '1226819ee0e3'
from alembic import op
import sqlalchemy as sa
from caravel import db
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import (
Column, Integer, Boolean)
Base = declarative_base()
class DruidMetric(Base):
"""Declarative class used to do query in upgrade"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
is_restricted = Column(Boolean, default=False, nullable=True)
class SqlMetric(Base):
"""Declarative class used to do query in upgrade"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
is_restricted = Column(Boolean, default=False, nullable=True)
def upgrade():
op.add_column('metrics', sa.Column('is_restricted', sa.Boolean(), nullable=True))
op.add_column('sql_metrics', sa.Column('is_restricted', sa.Boolean(), nullable=True))
bind = op.get_bind()
session = db.Session(bind=bind)
# don't use models.DruidMetric
# because it assumes the context is consistent with the application
for obj in session.query(DruidMetric).all():
obj.is_restricted = False
for obj in session.query(SqlMetric).all():
obj.is_restricted = False
session.commit()
session.close()
def downgrade():
with op.batch_alter_table('sql_metrics', schema=None) as batch_op:
batch_op.drop_column('is_restricted')
with op.batch_alter_table('metrics', schema=None) as batch_op:
batch_op.drop_column('is_restricted')
| {
"repo_name": "georgeke/caravel",
"path": "caravel/migrations/versions/d8bc074f7aad_add_new_field_is_restricted_to_.py",
"copies": "8",
"size": "1732",
"license": "apache-2.0",
"hash": 3997271819468351000,
"line_mean": 28.8620689655,
"line_max": 89,
"alpha_frac": 0.6997690531,
"autogenerated": false,
"ratio": 3.4570858283433132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8156854881443313,
"avg_score": null,
"num_lines": null
} |
"""Add new Indexes for faster searching
Revision ID: 3097d57f3f0b
Revises: 4fe230f7a26e
Create Date: 2021-06-19 20:18:55.332165
"""
# revision identifiers, used by Alembic.
revision = '3097d57f3f0b'
down_revision = '4fe230f7a26e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index(
'ix_root_authority_id',
'certificates',
['root_authority_id'],
unique=False,
postgresql_where=sqlalchemy.text("root_authority_id IS NOT NULL"))
op.create_index(
'certificate_associations_certificate_id_idx',
'certificate_associations',
['certificate_id'],
unique=False)
op.create_index(
'ix_certificates_serial',
'certificates',
['serial'],
unique=False)
def downgrade():
op.drop_index(
'ix_root_authority_id',
table_name='certificates')
op.drop_index(
'certificate_associations_certificate_id_idx',
table_name='certificate_associations')
op.drop_index(
'ix_certificates_serial',
table_name='certificates')
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/3097d57f3f0b_.py",
"copies": "1",
"size": "1099",
"license": "apache-2.0",
"hash": -6762456171640848000,
"line_mean": 23.9772727273,
"line_max": 74,
"alpha_frac": 0.6405823476,
"autogenerated": false,
"ratio": 3.55663430420712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9680856599881211,
"avg_score": 0.0032720103851817408,
"num_lines": 44
} |
"""Add new member to Google Sheet."""
import httplib2
import logging
import re
from apiclient import discovery
from oauth2client.service_account import ServiceAccountCredentials
from Config import get_config, get_credentials
def get_service():
creds = get_credentials()
scopes = ['https://www.googleapis.com/auth/spreadsheets']
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
keyfile_dict=creds.to_dict(),
scopes=scopes
)
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
return discovery.build('sheets', 'v4', http=http,
discoveryServiceUrl=discoveryUrl)
def add_member(row):
"""Add a member. See:
- https://developers.google.com/sheets/api/quickstart/python (API)
- http://gspread.readthedocs.io/en/latest/oauth2.html (ServiceAccountCredentials)
"""
service = get_service()
sheet = get_config().membership_sheet_id
rangeName = 'SquareTemp'
values = [
[row.first_name,
row.last_name,
row.email,
row.phone,
row.address1,
row.address2,
row.towncity,
row.state,
row.zip,
"${:0.2f}".format(row.order.line_items[0].base_price_money.amount / 100),
row.order.line_items[0].name,
"Online purchase",
str(row.created_at),
row.square_id,
row.transactionId,
"",
"",
", ".join(row.interests)]
]
result = service.spreadsheets().values().append( # pylint: disable=no-member
spreadsheetId=sheet,
range=rangeName,
valueInputOption="USER_ENTERED",
body={'values': values}).execute()
logging.info("Got from Sheets: {}".format(result))
def format_result(row):
return {k: v for k, v in zip(
['first_name', 'last_name', 'email', 'phone', 'address1', 'address2',
'towncity', 'state', 'zip', 'amount',
'level', 'payment_type', 'created_at', 'square_id', 'transactionId',
'ad', 'ed', 'interests'],
row)}
def get_member(email=None, phone=None):
"""Get details from sheet."""
if phone:
m = re.search(r'(\d{3}).*(\d{3}).*(\d{4})', phone)
if m:
formatted_phone = "({}) {}-{}".format(m.group(1), m.group(2), m.group(3))
else:
formatted_phone = phone
else:
formatted_phone = None
service = get_service()
sheet = get_config().membership_sheet_id
rangeName = 'Sheet1'
result = service.spreadsheets().values().get( # pylint: disable=no-member
spreadsheetId=sheet,
range=rangeName,
majorDimension="ROWS",
valueRenderOption="UNFORMATTED_VALUE",
dateTimeRenderOption="SERIAL_NUMBER").execute()
values = result.get('values', [])
for row in values:
if email and formatted_phone:
# Both email and phone provided
if (row[2] == email) and (row[3] == formatted_phone):
return format_result(row)
elif email:
if row[2] == email:
return format_result(row)
elif formatted_phone:
if row[3] == formatted_phone:
return format_result(row)
def add_purchase(row):
"""Add a purchase from Shop."""
service = get_service()
sheet = get_config().shop_sheet_id
rangeName = 'Sheet1'
values = [
[row.first_name,
row.last_name,
row.email,
row.phone,
row.employer,
row.occupation,
row.address1,
row.address2,
row.towncity,
row.state,
row.zip,
row.order.line_items[0].name,
"${:0.2f}".format(row.order.line_items[0].base_price_money.amount / 100),
row.order.line_items[0].quantity,
"${:0.2f}".format(row.order.total_money.amount / 100),
"Online purchase",
str(row.created_at),
row.square_id,
row.transactionId,
"",
"",
", ".join(row.interests)]
]
result = service.spreadsheets().values().append( # pylint: disable=no-member
spreadsheetId=sheet,
range=rangeName,
valueInputOption="USER_ENTERED",
body={'values': values}).execute()
logging.info("Got from Sheets: {}".format(result))
| {
"repo_name": "hkdems/membership-server",
"path": "GoogleSheets.py",
"copies": "1",
"size": "4423",
"license": "apache-2.0",
"hash": 6778664610903114000,
"line_mean": 30.8201438849,
"line_max": 85,
"alpha_frac": 0.5735925842,
"autogenerated": false,
"ratio": 3.75785896346644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.483145154766644,
"avg_score": null,
"num_lines": null
} |
"""add new netloc tracking table
Revision ID: 669e9df34ea7
Revises: 5552dfae2cb0
Create Date: 2020-01-20 01:36:51.862767
"""
# revision identifiers, used by Alembic.
revision = '669e9df34ea7'
down_revision = '5552dfae2cb0'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('seen_netloc_tracker',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('netloc', citext.CIText(), nullable=False),
sa.Column('ignore', sa.Boolean(), nullable=True),
sa.Column('have', sa.Boolean(), nullable=True),
sa.Column('extra', postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_seen_netloc_tracker_netloc'), 'seen_netloc_tracker', ['netloc'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_seen_netloc_tracker_netloc'), table_name='seen_netloc_tracker')
op.drop_table('seen_netloc_tracker')
# ### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2020-01-20_669e9df34ea7_add_new_netloc_tracking_table.py",
"copies": "1",
"size": "1746",
"license": "bsd-3-clause",
"hash": -2469419829929895400,
"line_mean": 31.9433962264,
"line_max": 106,
"alpha_frac": 0.7394043528,
"autogenerated": false,
"ratio": 3.5060240963855422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4745428449185542,
"avg_score": null,
"num_lines": null
} |
"""Add new rating_ride_reason
Revision ID: bb73d477a1c4
Revises: 986de51c9c9f
Create Date: 2016-10-25 11:13:30.478311
"""
# revision identifiers, used by Alembic.
revision = 'bb73d477a1c4'
down_revision = '986de51c9c9f'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
old_options = ['ko', 'payment', 'courtesy', 'route', 'cleanliness',
'late', 'no_credit_card', 'bad_itinerary', 'dirty_taxi']
new_options = old_options + ['automatic_rating']
old_name = 'rating_ride_reason_enum'
tmp_name = '_' + old_name
column_name = 'rating_ride_reason'
old_type = sa.Enum(*old_options, name=old_name)
new_type = sa.Enum(*new_options, name=old_name)
tmp_type = sa.Enum(*new_options, name=tmp_name)
hail = sa.sql.table('hail',
sa.Column(column_name, new_type, nullable=False))
def upgrade():
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN {column_name} TYPE {tmp_name}'
' USING rating_ride_reason::text::{tmp_name}'.format(
tmp_name=tmp_name, column_name=column_name));
old_type.drop(op.get_bind(), checkfirst=False)
new_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN {column_name} TYPE {old_name}'
' USING rating_ride_reason::text::{old_name}'.format(
old_name=old_name, column_name=column_name));
tmp_type.drop(op.get_bind(), checkfirst=False)
def downgrade():
op.execute(hail.update().where(hail.c.rating_ride_reason=='automatic_rating')
.values(rating_ride_reason='ko'))
tmp_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN {column_name} TYPE {tmp_name}'
' USING rating_ride_reason::text::{tmp_name}'.format(
column_name=column_name, tmp_name=tmp_name));
new_type.drop(op.get_bind(), checkfirst=False)
old_type.create(op.get_bind(), checkfirst=False)
op.execute('ALTER TABLE hail ALTER COLUMN {column_name} TYPE {old_name}'
' USING rating_ride_reason::text::{old_name}'.format(
column_name=column_name, old_name=old_name))
tmp_type.drop(op.get_bind(), checkfirst=False)
| {
"repo_name": "openmaraude/APITaxi_models",
"path": "APITaxi_models2/migrations/versions/20161025_11:13:30_bb73d477a1c4_add_new_rating_ride_reason.py.py",
"copies": "2",
"size": "2244",
"license": "mit",
"hash": -8113902328441162000,
"line_mean": 39.0714285714,
"line_max": 81,
"alpha_frac": 0.6631016043,
"autogenerated": false,
"ratio": 3.078189300411523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9707764599776534,
"avg_score": 0.006705260986997972,
"num_lines": 56
} |
"""Add new rule to copy Plugin url and comment
Revision ID: e4d30f140ed
Revises: 22e4e60e03f
Create Date: 2013-05-22 08:01:54.873655
"""
# revision identifiers, used by Alembic.
revision = 'e4d30f140ed'
down_revision = '22e4e60e03f'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import sqlalchemy.types as types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
def __repr__(self):
return "citext"
def upgrade():
transform_rule = table('transform_rules',
column(u'transform_rule_id', sa.INTEGER()),
column(u'category', CITEXT()),
column(u'rule_order', sa.INTEGER()),
column(u'action', sa.TEXT()),
column(u'action_args', sa.TEXT()),
column(u'action_kwargs', sa.TEXT()),
column(u'predicate', sa.TEXT()),
column(u'predicate_args', sa.TEXT()),
column(u'predicate_kwargs', sa.TEXT()))
# Indexes
op.bulk_insert(transform_rule, [{
"category": 'processor.json_rewrite'
, "predicate": 'socorro.lib.transform_rules.is_not_null_predicate'
, "predicate_args": ''
, "predicate_kwargs": 'key="PluginContentURL"'
, "action": 'socorro.processor.processor.json_reformat_action'
, "action_args": ''
, "action_kwargs": 'key="URL", format_str="%(PluginContentURL)s"'
, "rule_order": '5'
}, {
"category": 'processor.json_rewrite'
, "predicate": 'socorro.lib.transform_rules.is_not_null_predicate'
, "predicate_args": ''
, "predicate_kwargs": 'key="PluginUserComment"'
, "action": 'socorro.processor.processor.json_reformat_action'
, "action_args": ''
, "action_kwargs": 'key="Comments", format_str="%(PluginUserComment)s"'
, "rule_order": '6'
}])
def downgrade():
op.execute("""
DELETE from transform_rules
where action_kwargs IN
('key="Comments", format_str="%(PluginUserComment)s"'
, 'key="URL", format_str="%(PluginContentURL)s"');
""")
| {
"repo_name": "cliqz/socorro",
"path": "alembic/versions/e4d30f140ed_add_new_rule_to_copy.py",
"copies": "14",
"size": "2393",
"license": "mpl-2.0",
"hash": -1233249629522882000,
"line_mean": 29.6794871795,
"line_max": 79,
"alpha_frac": 0.6117843711,
"autogenerated": false,
"ratio": 3.5610119047619047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""add new samples
Revision ID: 2bd074e60e19
Revises: f098d2c51614
Create Date: 2018-04-11 13:20:43.492666
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus, SampleStatus
# revision identifiers, used by Alembic.
revision = "2bd074e60e19"
down_revision = "f098d2c51614"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("sample_order_status_1cfd9", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1cfd9_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed02", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ed02_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pxr2", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1pxr2_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur90", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_1ur90_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1cfd9", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1cfd9_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ed02", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ed02_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1pxr2", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1pxr2_time", model.utils.UTCDateTime(), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ur90", model.utils.Enum(SampleStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_status_1ur90_time", model.utils.UTCDateTime(), nullable=True)
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "sample_status_1ur90_time")
op.drop_column("participant_summary", "sample_status_1ur90")
op.drop_column("participant_summary", "sample_status_1pxr2_time")
op.drop_column("participant_summary", "sample_status_1pxr2")
op.drop_column("participant_summary", "sample_status_1ed02_time")
op.drop_column("participant_summary", "sample_status_1ed02")
op.drop_column("participant_summary", "sample_status_1cfd9_time")
op.drop_column("participant_summary", "sample_status_1cfd9")
op.drop_column("participant_summary", "sample_order_status_1ur90_time")
op.drop_column("participant_summary", "sample_order_status_1ur90")
op.drop_column("participant_summary", "sample_order_status_1pxr2_time")
op.drop_column("participant_summary", "sample_order_status_1pxr2")
op.drop_column("participant_summary", "sample_order_status_1ed02_time")
op.drop_column("participant_summary", "sample_order_status_1ed02")
op.drop_column("participant_summary", "sample_order_status_1cfd9_time")
op.drop_column("participant_summary", "sample_order_status_1cfd9")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/2bd074e60e19_add_new_samples.py",
"copies": "1",
"size": "4400",
"license": "bsd-3-clause",
"hash": 8844297910204567000,
"line_mean": 38.2857142857,
"line_max": 116,
"alpha_frac": 0.6893181818,
"autogenerated": false,
"ratio": 3.351104341203351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9534139359392602,
"avg_score": 0.0012566327221499637,
"num_lines": 112
} |
"""Add newsletter models.
Revision ID: d3e9983f8366
Revises: 8fc09a727a5c
Create Date: 2017-06-15 22:44:02.778366
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd3e9983f8366'
down_revision = '8fc09a727a5c'
def upgrade():
op.create_table(
'newsletter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_newsletter')),
sqlite_autoincrement=True
)
op.create_table(
'newsletter_news',
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.Column('news_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['news_id'], ['news.id'],
name=op.f('fk_newsletter_news_news_id_news')),
sa.ForeignKeyConstraint(
['newsletter_id'], ['newsletter.id'],
name=op.f('fk_newsletter_news_newsletter_id_newsletter'))
)
op.create_table(
'newsletter_activities',
sa.Column('newsletter_id', sa.Integer(), nullable=True),
sa.Column('activity_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['activity_id'], ['activity.id'],
name=op.f('fk_newsletter_activities_activity_id_activity')),
sa.ForeignKeyConstraint(
['newsletter_id'], ['newsletter.id'],
name=op.f('fk_newsletter_activities_newsletter_id_newsletter'))
)
def downgrade():
op.drop_table('newsletter_activities')
op.drop_table('newsletter_news')
op.drop_table('newsletter')
| {
"repo_name": "viaict/viaduct",
"path": "migrations/versions/2017_06_15_d3e9983f8366_add_newsletter_models.py",
"copies": "1",
"size": "1701",
"license": "mit",
"hash": 4926626687541002000,
"line_mean": 31.0943396226,
"line_max": 75,
"alpha_frac": 0.6231628454,
"autogenerated": false,
"ratio": 3.4714285714285715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4594591416828571,
"avg_score": null,
"num_lines": null
} |
"""Add new table for RLMS-level cache
Revision ID: 229fd8d6f159
Revises: 1a3e5ec9c2fd
Create Date: 2015-05-07 09:29:51.133081
"""
# revision identifiers, used by Alembic.
revision = '229fd8d6f159'
down_revision = '1a3e5ec9c2fd'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('rlms_caches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('rlms_id', sa.Integer(), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=True),
sa.Column('value', sa.UnicodeText(), nullable=True),
sa.Column('datetime', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['rlms_id'], ['rlmss.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_rlms_caches_datetime', 'rlms_caches', ['datetime'], unique=False)
op.create_index(u'ix_rlms_caches_key', 'rlms_caches', ['key'], unique=False)
op.create_index(u'ix_rlmstype_cache_datetime', 'rlmstype_cache', ['datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_rlmstype_cache_datetime', table_name='rlmstype_cache')
op.drop_index(u'ix_rlms_caches_key', table_name='rlms_caches')
op.drop_index(u'ix_rlms_caches_datetime', table_name='rlms_caches')
op.drop_table('rlms_caches')
### end Alembic commands ###
| {
"repo_name": "morelab/labmanager",
"path": "alembic/versions/229fd8d6f159_add_new_table_for_rlms_level_cache.py",
"copies": "5",
"size": "1443",
"license": "bsd-2-clause",
"hash": -2600196039979929000,
"line_mean": 35.075,
"line_max": 96,
"alpha_frac": 0.6812196812,
"autogenerated": false,
"ratio": 3.0315126050420167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6212732286242018,
"avg_score": null,
"num_lines": null
} |
"""add new tables
Revision ID: d46a231df696
Revises: 415d32f993b8
Create Date: 2018-08-26 18:14:48.922463
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd46a231df696'
down_revision = '415d32f993b8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('budget',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=300), nullable=False),
sa.Column('color', sa.String(length=6), nullable=False),
sa.Column('show_details_in_report', sa.Boolean(), nullable=False),
sa.Column('show_count_in_report', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('budget', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_budget_name'), ['name'], unique=False)
op.create_table('paymenttype',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=300), nullable=False),
sa.Column('color', sa.String(length=6), nullable=False),
sa.Column('has_members', sa.Boolean(), nullable=False),
sa.Column('show_details_in_report', sa.Boolean(), nullable=False),
sa.Column('show_count_in_report', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('paymenttype', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_paymenttype_name'), ['name'], unique=False)
op.create_table('payment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('transaction_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=300), nullable=False),
sa.Column('payment_type_id', sa.Integer(), nullable=False),
sa.Column('budget_id', sa.Integer(), nullable=False),
sa.Column('member_id', sa.Integer(), nullable=True),
sa.Column('date', sa.Date(), nullable=False),
sa.Column('cost', sa.Numeric(10, 2), nullable=False),
sa.ForeignKeyConstraint(['budget_id'], ['budget.id'], ),
sa.ForeignKeyConstraint(['member_id'], ['member.id'], ),
sa.ForeignKeyConstraint(['payment_type_id'], ['paymenttype.id'], ),
sa.ForeignKeyConstraint(['transaction_id'], ['transaction.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('payment', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_payment_name'), ['name'], unique=False)
with op.batch_alter_table(u'transaction', schema=None) as batch_op:
batch_op.add_column(sa.Column('proposed_budget_id', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('proposed_type_id', sa.Integer(), nullable=True))
batch_op.create_foreign_key(None, 'paymenttype', ['proposed_type_id'], ['id'])
batch_op.create_foreign_key(None, 'budget', ['proposed_budget_id'], ['id'])
batch_op.drop_column('type')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table(u'transaction', schema=None) as batch_op:
batch_op.add_column(sa.Column('type', sa.VARCHAR(length=20), nullable=True))
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_constraint(None, type_='foreignkey')
batch_op.drop_column('proposed_type_id')
batch_op.drop_column('proposed_budget_id')
with op.batch_alter_table('payment', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_payment_name'))
op.drop_table('payment')
with op.batch_alter_table('paymenttype', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_paymenttype_name'))
op.drop_table('paymenttype')
with op.batch_alter_table('budget', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_budget_name'))
op.drop_table('budget')
# ### end Alembic commands ###
| {
"repo_name": "hackerspace-silesia/cebulany-manager",
"path": "migrations/versions/d46a231df696_add_new_tables.py",
"copies": "1",
"size": "3945",
"license": "mit",
"hash": 6679619780934775000,
"line_mean": 41.4193548387,
"line_max": 89,
"alpha_frac": 0.6626108999,
"autogenerated": false,
"ratio": 3.3151260504201683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9447689487962876,
"avg_score": 0.006009492471458602,
"num_lines": 93
} |
"""Add
Add an excentury configuration file to excentury.
"""
import textwrap
import os.path as pth
from excentury.command import error, trace
from excentury.command.config import _read_config
DESC = """
Adds an excentury configuration file to the list of the users
projects. A valid excentury project must have two files:
xcpp.config
.xcpprc
Furthermore, the `xcpp.config` file must have the key
xcpp.name
"""
def add_parser(subp, raw):
"""Add a parser to the main subparser. """
tmpp = subp.add_parser('add', help='add an excentury project',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('path', type=str,
help='project path')
def get_entries():
"""Read the projects file. """
pfile = pth.expandvars('$HOME/.excentury/projects')
try:
content = list()
with open(pfile, 'r') as epf:
for line in epf:
if line.strip() == '':
continue
key, val = line.split(':')
content.append((key.strip(), val.strip()))
return content
except IOError:
return list()
def set_entries(data):
"""Write the projects file. """
pfile = pth.expandvars('$HOME/.excentury/projects')
with open(pfile, 'w') as epf:
for entry in data:
epf.write('%s: %s\n' % (entry[0], entry[1]))
def append_entry(data, name, path):
"""Append an entry if it doesn't have it. Returns true if an
entry was appened, false otherwise. """
found = False
for entry in data:
if path == entry[1]:
found = True
break
if not found:
data.append((name, path))
return True
return False
def update_entries(path):
"""Updates the projects entries. Checks to see if the
`xcpp.config` and `.xcpprc` files exists. """
data = get_entries()
path = pth.abspath(path)
try:
config = _read_config('%s/xcpp.config' % path)
except IOError:
error("ERROR: `xcpp.config` not found in %r\n" % path)
if not pth.exists('%s/.xcpprc' % path):
error("ERROR: `xcpprc` not found in %r\n" % path)
if 'xcpp' not in config:
error("ERROR: Missing `xcpp` section in `xcpp.config`\n")
if 'name' not in config['xcpp']:
error("ERROR: Missing `xcpp.name` value in `xcpp.config`\n")
if not append_entry(data, config['xcpp']['name'], path):
trace("%r has been previously added\n" % path)
set_entries(data)
def run(arg):
"""Run the command. """
update_entries(arg.path)
| {
"repo_name": "jmlopez-rod/excentury",
"path": "excentury/command/add.py",
"copies": "1",
"size": "2649",
"license": "bsd-2-clause",
"hash": 4029268600738679300,
"line_mean": 26.8842105263,
"line_max": 68,
"alpha_frac": 0.5877689694,
"autogenerated": false,
"ratio": 3.594301221166893,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46820701905668927,
"avg_score": null,
"num_lines": null
} |
"""add
Revision ID: d81ae8ae32ea
Revises: fc1eb86aa8f4, 355854d777d3
Create Date: 2021-06-25 11:54:54.415110
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = 'd81ae8ae32ea'
down_revision = ('fc1eb86aa8f4', '355854d777d3')
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('genomic_informing_loop',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('message_record_id', sa.Integer(), nullable=False),
sa.Column('participant_id', sa.Integer(), nullable=False),
sa.Column('event_type', sa.String(length=256), nullable=False),
sa.Column('event_authored_time', rdr_service.model.utils.UTCDateTime6(fsp=6), nullable=True),
sa.Column('module_type', sa.String(length=128), nullable=True),
sa.Column('decision_value', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['participant_id'], ['participant.participant_id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('genomic_informing_loop')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/d81ae8ae32ea_add.py",
"copies": "1",
"size": "2497",
"license": "bsd-3-clause",
"hash": 3973806663957601000,
"line_mean": 34.6714285714,
"line_max": 125,
"alpha_frac": 0.7368842611,
"autogenerated": false,
"ratio": 3.511954992967651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47488392540676505,
"avg_score": null,
"num_lines": null
} |
"""add_nodes
Revision ID: a3d144cdc527
Revises: e33c1d5684cf
Create Date: 2018-08-08 23:49:22.439468+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a3d144cdc527'
down_revision = 'e33c1d5684cf'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('node_power_status',
sa.Column('time', sa.BigInteger(), nullable=False),
sa.Column('node', sa.Integer(), nullable=False),
sa.Column('snap_relay_powered', sa.Boolean(), nullable=False),
sa.Column('snap0_powered', sa.Boolean(), nullable=False),
sa.Column('snap1_powered', sa.Boolean(), nullable=False),
sa.Column('snap2_powered', sa.Boolean(), nullable=False),
sa.Column('snap3_powered', sa.Boolean(), nullable=False),
sa.Column('fem_powered', sa.Boolean(), nullable=False),
sa.Column('pam_powered', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('time', 'node')
)
op.create_table('node_sensor',
sa.Column('time', sa.BigInteger(), nullable=False),
sa.Column('node', sa.Integer(), nullable=False),
sa.Column('top_sensor_temp', sa.Float(), nullable=True),
sa.Column('middle_sensor_temp', sa.Float(), nullable=True),
sa.Column('bottom_sensor_temp', sa.Float(), nullable=True),
sa.Column('humidity_sensor_temp', sa.Float(), nullable=True),
sa.Column('humidity', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('time', 'node')
)
def downgrade():
op.drop_table('node_sensor')
op.drop_table('node_power_status')
| {
"repo_name": "HERA-Team/Monitor_and_Control",
"path": "alembic/versions/a3d144cdc527_add_nodes.py",
"copies": "2",
"size": "1865",
"license": "bsd-2-clause",
"hash": 4296327656973007400,
"line_mean": 39.5434782609,
"line_max": 82,
"alpha_frac": 0.5624664879,
"autogenerated": false,
"ratio": 3.853305785123967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5415772273023967,
"avg_score": null,
"num_lines": null
} |
"""Add node with given data to linked list at a desired position."""
class Node(object):
"""Implement a node class."""
def __init__(self, data, next_node=None):
"""Give node data and next attributes on initialization."""
self.data = data
self.next = next_node
class LinkedList(object):
"""Building LinkedList class."""
def __init__(self, data=None):
"""Init singly linked list, set head to None and iterate through data
if provided as an argument."""
self.head = None
if data is not None:
try:
for item in data:
self.push(item)
except TypeError:
raise TypeError('Please enter an object that is iterable.')
def push(self, data):
"""Insert data at the head of the list."""
new_node = Node(data)
new_node.next_node = self.head
self.head = new_node
def insert_nth(head, data, position):
"""Insert a node with data at a given position, return head."""
new_node = Node(data)
if head is None or position == 0:
new_node.next = head
head = new_node
else:
counter = 0
curr = head
while counter != position - 1:
if curr.next:
curr = curr.next
counter += 1
new_node.next = curr.next
curr.next = new_node
return head
| {
"repo_name": "vbenavente/coffee_katas",
"path": "src/insertnth.py",
"copies": "1",
"size": "1413",
"license": "mit",
"hash": 2831508043448088000,
"line_mean": 28.4375,
"line_max": 77,
"alpha_frac": 0.5576786978,
"autogenerated": false,
"ratio": 4.256024096385542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5313702794185542,
"avg_score": null,
"num_lines": null
} |
"""Add non-nullable to uploads
Revision ID: 4c34ca870e0
Revises: 470b162dcd9
Create Date: 2015-06-14 15:05:37.769670
"""
# revision identifiers, used by Alembic.
revision = '4c34ca870e0'
down_revision = '470b162dcd9'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('upload', 'hash',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('upload', 'path',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('upload', 'shorthash',
existing_type=sa.VARCHAR(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('upload', 'shorthash',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('upload', 'path',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('upload', 'hash',
existing_type=sa.VARCHAR(),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "Ninja3047/sr.ht",
"path": "alembic/versions/4c34ca870e0_add_non_nullable_to_uploads.py",
"copies": "2",
"size": "1177",
"license": "mit",
"hash": 6176811433880075000,
"line_mean": 27.0238095238,
"line_max": 63,
"alpha_frac": 0.5913338997,
"autogenerated": false,
"ratio": 3.796774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01222166822408595,
"num_lines": 42
} |
"""Add non-NULL conditions to data columns
Revision ID: 1ab13f7b5ba
Revises: 7dc1a66bb8
Create Date: 2014-09-25 10:52:05.700132
"""
# revision identifiers, used by Alembic.
revision = '1ab13f7b5ba'
down_revision = '7dc1a66bb8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('observations', 'link_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('observations', 'observed_at',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('observations', 'type',
existing_type=postgresql.ENUM('SPEED', 'FLOW', 'OCCUPANCY', name='observation_types'),
nullable=False)
op.alter_column('observations', 'value',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('observations', 'value',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('observations', 'type',
existing_type=postgresql.ENUM('SPEED', 'FLOW', 'OCCUPANCY', name='observation_types'),
nullable=True)
op.alter_column('observations', 'observed_at',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('observations', 'link_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "rjw57/trafficdb",
"path": "migrations/versions/1ab13f7b5ba_non_null_constraints.py",
"copies": "1",
"size": "1708",
"license": "mit",
"hash": -7853433007266526000,
"line_mean": 34.5833333333,
"line_max": 101,
"alpha_frac": 0.6270491803,
"autogenerated": false,
"ratio": 3.8995433789954337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.995570401750625,
"avg_score": 0.014177708357836771,
"num_lines": 48
} |
"""Add note_id and update constraints
Revision ID: 6ef9616e57cb
Revises: aa0dbc6c14aa
Create Date: 2020-10-23 12:24:51.648130
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '6ef9616e57cb'
down_revision = 'aa0dbc6c14aa'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('queues', sa.Column('note_id', sa.Integer(), nullable=True, index=True), schema='plugin_livesync')
op.create_foreign_key(None, 'queues', 'notes', ['note_id'], ['id'], source_schema='plugin_livesync',
referent_schema='events')
op.drop_constraint('ck_queues_valid_enum_type', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_category_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_event_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_contribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_subcontribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_session_entry', 'queues', schema='plugin_livesync')
op.execute('''
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_enum_type CHECK ((type = ANY (ARRAY[1, 2, 3, 4, 5, 6])));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_category_entry CHECK (((type <> 1) OR ((contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (category_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_event_entry CHECK (((type <> 2) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (event_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_contribution_entry CHECK (((type <> 3) OR ((category_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (contribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_subcontribution_entry CHECK (((type <> 4) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_session_entry CHECK (((type <> 5) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (note_id IS NULL) AND (subcontribution_id IS NULL) AND (session_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_note_entry CHECK (((type <> 6) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (note_id IS NOT NULL))));
''')
def downgrade():
op.execute('DELETE FROM plugin_livesync.queues WHERE type = 6')
op.drop_constraint('ck_queues_valid_enum_type', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_category_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_event_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_contribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_subcontribution_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_session_entry', 'queues', schema='plugin_livesync')
op.drop_constraint('ck_queues_valid_note_entry', 'queues', schema='plugin_livesync')
op.drop_column('queues', 'note_id', schema='plugin_livesync')
op.execute('''
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_enum_type CHECK ((type = ANY (ARRAY[1, 2, 3, 4, 5])));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_category_entry CHECK (((type <> 1) OR ((contribution_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (category_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_event_entry CHECK (((type <> 2) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (event_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_contribution_entry CHECK (((type <> 3) OR ((category_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NULL) AND (contribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_subcontribution_entry CHECK (((type <> 4) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (session_id IS NULL) AND (subcontribution_id IS NOT NULL))));
ALTER TABLE plugin_livesync.queues ADD CONSTRAINT ck_queues_valid_session_entry CHECK (((type <> 5) OR ((category_id IS NULL) AND (contribution_id IS NULL) AND (event_id IS NULL) AND (subcontribution_id IS NULL) AND (session_id IS NOT NULL))));
''')
| {
"repo_name": "indico/indico-plugins",
"path": "livesync/indico_livesync/migrations/20201023_1224_6ef9616e57cb_add_note_id_and_update_constraints.py",
"copies": "1",
"size": "5227",
"license": "mit",
"hash": 3827817236268665000,
"line_mean": 86.1166666667,
"line_max": 282,
"alpha_frac": 0.7107327339,
"autogenerated": false,
"ratio": 3.329299363057325,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4540032096957325,
"avg_score": null,
"num_lines": null
} |
"""Add notification mechanism to the database
Revision ID: 100cfe53a84
Revises: 3d2668a96dce
Create Date: 2015-05-11 15:51:04.211519
"""
# revision identifiers, used by Alembic.
revision = '100cfe53a84'
down_revision = '3d2668a96dce'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationNotificationRecipients',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.Unicode(length=255), nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_TranslationNotificationRecipients_created', 'TranslationNotificationRecipients', ['created'], unique=False)
op.create_index(u'ix_TranslationNotificationRecipients_email', 'TranslationNotificationRecipients', ['email'], unique=True)
op.create_table('TranslationSubscriptions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('translation_url_id', sa.Integer(), nullable=True),
sa.Column('recipient_id', sa.Integer(), nullable=True),
sa.Column('mechanism', sa.Unicode(length=255), nullable=False),
sa.ForeignKeyConstraint(['recipient_id'], ['TranslationNotificationRecipients.id'], ),
sa.ForeignKeyConstraint(['translation_url_id'], ['TranslationUrls.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_TranslationSubscriptions_mechanism', 'TranslationSubscriptions', ['mechanism'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationSubscriptions_mechanism', table_name='TranslationSubscriptions')
op.drop_table('TranslationSubscriptions')
op.drop_index(u'ix_TranslationNotificationRecipients_email', table_name='TranslationNotificationRecipients')
op.drop_index(u'ix_TranslationNotificationRecipients_created', table_name='TranslationNotificationRecipients')
op.drop_table('TranslationNotificationRecipients')
### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/100cfe53a84_add_notification_mechanism_to_the_.py",
"copies": "3",
"size": "2094",
"license": "bsd-2-clause",
"hash": -2669636946784884000,
"line_mean": 43.5531914894,
"line_max": 132,
"alpha_frac": 0.7382999045,
"autogenerated": false,
"ratio": 3.807272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00938433061312813,
"num_lines": 47
} |
"""Add notifications table
Revision ID: d3915e15063e
Revises: 52d93745fa71
Create Date: 2020-12-13 14:51:46.447103
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd3915e15063e'
down_revision = '52d93745fa71'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('notifications',
sa.Column('guild_id', sa.BigInteger(), autoincrement=False, nullable=False),
sa.Column('kind', sa.Enum('JOIN', 'LEAVE', 'BAN', 'UNBAN', name='notification_kind'), autoincrement=False, nullable=False),
sa.Column('channel_id', sa.BigInteger(), nullable=False),
sa.Column('template', sa.UnicodeText(), nullable=False),
sa.PrimaryKeyConstraint('guild_id', 'kind', name=op.f('pk_notifications'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('notifications')
# ### end Alembic commands ###
| {
"repo_name": "FallenWarrior2k/cardinal.py",
"path": "src/cardinal/db/migrations/versions/d3915e15063e_add_notifications_table.py",
"copies": "1",
"size": "1034",
"license": "mit",
"hash": 8577304885299716000,
"line_mean": 29.4117647059,
"line_max": 127,
"alpha_frac": 0.6876208897,
"autogenerated": false,
"ratio": 3.4466666666666668,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4634287556366667,
"avg_score": null,
"num_lines": null
} |
"""Add not null constraints everywhere
Revision ID: ba6fefa33e22
Revises: 2cb2db7089f4
Create Date: 2016-09-12 23:50:53.526022
"""
# revision identifiers, used by Alembic.
revision = 'ba6fefa33e22'
down_revision = '2cb2db7089f4'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('game', 'black_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'date_played',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('game', 'date_reported',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.alter_column('game', 'handicap',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'komi',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('game', 'rated',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('game', 'result',
existing_type=sa.VARCHAR(length=10),
nullable=False)
op.alter_column('game', 'server_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('game', 'white_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('go_server', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=False)
op.alter_column('go_server', 'token',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('go_server', 'url',
existing_type=sa.VARCHAR(length=180),
nullable=False)
op.alter_column('myuser', 'active',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('myuser', 'claimed',
existing_type=sa.BOOLEAN(),
nullable=False)
op.alter_column('myuser', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=False)
op.alter_column('myuser', 'login_count',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('player', 'name',
existing_type=sa.VARCHAR(length=20),
nullable=False)
op.alter_column('player', 'server_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('player', 'token',
existing_type=sa.TEXT(),
nullable=False)
op.alter_column('player', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('rating', 'rating',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('rating', 'sigma',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=False)
op.alter_column('rating', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
nullable=False)
op.alter_column('role', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('role', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=True)
op.alter_column('role', 'description',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('rating', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('rating', 'sigma',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('rating', 'rating',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('player', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('player', 'token',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('player', 'server_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('player', 'name',
existing_type=sa.VARCHAR(length=20),
nullable=True)
op.alter_column('myuser', 'login_count',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('myuser', 'email',
existing_type=sa.VARCHAR(length=255),
nullable=True)
op.alter_column('myuser', 'claimed',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('myuser', 'active',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('go_server', 'url',
existing_type=sa.VARCHAR(length=180),
nullable=True)
op.alter_column('go_server', 'token',
existing_type=sa.TEXT(),
nullable=True)
op.alter_column('go_server', 'name',
existing_type=sa.VARCHAR(length=80),
nullable=True)
op.alter_column('game', 'white_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'server_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'result',
existing_type=sa.VARCHAR(length=10),
nullable=True)
op.alter_column('game', 'rated',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('game', 'komi',
existing_type=postgresql.DOUBLE_PRECISION(precision=53),
nullable=True)
op.alter_column('game', 'handicap',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('game', 'date_reported',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('game', 'date_played',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.alter_column('game', 'black_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "usgo/online-ratings",
"path": "web/migrations/versions/ba6fefa33e22_add_not_null_constraints_everywhere.py",
"copies": "2",
"size": "6577",
"license": "mit",
"hash": -4564594505896265000,
"line_mean": 36.7988505747,
"line_max": 71,
"alpha_frac": 0.5507070093,
"autogenerated": false,
"ratio": 4.04240934234788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016920661232403777,
"num_lines": 174
} |
"""Add not-null constraints on membership table.
Revision ID: 33ad6ad8f955
Revises: 4c3554286996
Create Date: 2015-01-30 19:17:13.409798
"""
# revision identifiers, used by Alembic.
revision = '33ad6ad8f955'
down_revision = '4c3554286996'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('committee_members', 'committee_id',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('committee_members', 'member_id',
existing_type=sa.INTEGER(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('committee_members', 'member_id',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('committee_members', 'committee_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/33ad6ad8f955_add_not_null_constraints_on_membership_.py",
"copies": "1",
"size": "1075",
"license": "apache-2.0",
"hash": 3476737568343178000,
"line_mean": 27.2894736842,
"line_max": 63,
"alpha_frac": 0.6353488372,
"autogenerated": false,
"ratio": 3.706896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978945498896951,
"avg_score": 0.010558079990925544,
"num_lines": 38
} |
"""Add object_events table
Revision ID: 420f0f384465
Revises: 126a93430a9e
Create Date: 2013-11-21 01:57:45.035753
"""
# revision identifiers, used by Alembic.
revision = '420f0f384465'
down_revision = '126a93430a9e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('object_events',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('permissions_json', sa.Text(), nullable=False),
sa.Column('modified_by_id', sa.Integer()),
sa.Column(
'created_at', sa.DateTime(), default=sa.text('current_timestamp')),
sa.Column(
'updated_at',
sa.DateTime(),
default=sa.text('current_timestamp'),
onupdate=sa.text('current_timestamp')),
sa.Column('context_id', sa.Integer()),
sa.Column('calendar_id', sa.String(length=250), nullable=True),
sa.Column('event_id', sa.String(length=250), nullable=False),
sa.Column('eventable_id', sa.Integer(), nullable=False),
sa.Column('eventable_type', sa.String(length=250), nullable=False),
)
def downgrade():
op.drop_table('object_events')
| {
"repo_name": "vladan-m/ggrc-core",
"path": "src/ggrc_gdrive_integration/migrations/versions/20131121015745_420f0f384465_add_object_events_ta.py",
"copies": "2",
"size": "1101",
"license": "apache-2.0",
"hash": -8274449319511900000,
"line_mean": 27.9736842105,
"line_max": 73,
"alpha_frac": 0.676657584,
"autogenerated": false,
"ratio": 3.1913043478260867,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9847105071295313,
"avg_score": 0.004171372106154714,
"num_lines": 38
} |
"""add obsolete columns
Revision ID: 4a9ceee64d0b
Revises: 69453413dfc3
Create Date: 2018-07-13 09:52:49.048544
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.model.site_enums import ObsoleteStatus
# revision identifiers, used by Alembic.
revision = "4a9ceee64d0b"
down_revision = "69453413dfc3"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("hpo", sa.Column("is_obsolete", model.utils.Enum(ObsoleteStatus), nullable=True))
op.add_column("organization", sa.Column("is_obsolete", model.utils.Enum(ObsoleteStatus), nullable=True))
op.add_column("site", sa.Column("is_obsolete", model.utils.Enum(ObsoleteStatus), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("site", "is_obsolete")
op.drop_column("organization", "is_obsolete")
op.drop_column("hpo", "is_obsolete")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/4a9ceee64d0b_add_obsolete_columns.py",
"copies": "1",
"size": "1488",
"license": "bsd-3-clause",
"hash": 7865203316313358000,
"line_mean": 26.5555555556,
"line_max": 108,
"alpha_frac": 0.6814516129,
"autogenerated": false,
"ratio": 3.405034324942792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45864859378427913,
"avg_score": null,
"num_lines": null
} |
"""add officer age & years of service to SRPD incidents
Revision ID: 5cb851130ebb
Revises: c5e276c0d67f
Create Date: 2017-02-02 16:31:22.915785
"""
# revision identifiers, used by Alembic.
revision = '5cb851130ebb'
down_revision = 'c5e276c0d67f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('citizen_complaints_srpd', sa.Column('officer_age', sa.String(length=255), nullable=True))
op.add_column('citizen_complaints_srpd', sa.Column('officer_years_of_service', sa.String(length=255), nullable=True))
op.add_column('officer_involved_shootings_srpd', sa.Column('officer_age', sa.String(length=255), nullable=True))
op.add_column('officer_involved_shootings_srpd', sa.Column('officer_years_of_service', sa.String(length=255), nullable=True))
op.add_column('pursuits_srpd', sa.Column('officer_age', sa.String(length=255), nullable=True))
op.add_column('pursuits_srpd', sa.Column('officer_years_of_service', sa.String(length=255), nullable=True))
op.add_column('use_of_force_incidents_srpd', sa.Column('officer_age', sa.String(length=255), nullable=True))
op.add_column('use_of_force_incidents_srpd', sa.Column('officer_years_of_service', sa.String(length=255), nullable=True))
def downgrade():
op.drop_column('use_of_force_incidents_srpd', 'officer_years_of_service')
op.drop_column('use_of_force_incidents_srpd', 'officer_age')
op.drop_column('pursuits_srpd', 'officer_years_of_service')
op.drop_column('pursuits_srpd', 'officer_age')
op.drop_column('officer_involved_shootings_srpd', 'officer_years_of_service')
op.drop_column('officer_involved_shootings_srpd', 'officer_age')
op.drop_column('citizen_complaints_srpd', 'officer_years_of_service')
op.drop_column('citizen_complaints_srpd', 'officer_age')
| {
"repo_name": "codeforamerica/comport",
"path": "migrations/versions/5cb851130ebb_.py",
"copies": "1",
"size": "1804",
"license": "bsd-3-clause",
"hash": 3548411128261506600,
"line_mean": 50.5428571429,
"line_max": 129,
"alpha_frac": 0.7283813747,
"autogenerated": false,
"ratio": 2.668639053254438,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3897020427954438,
"avg_score": null,
"num_lines": null
} |
"""Add ondelete cascade to foreign keys
Revision ID: b295b033364d
Revises: b5551cd26764
Create Date: 2019-05-03 19:26:57.746887
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "b295b033364d"
down_revision = "b5551cd26764"
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
url = str(bind.engine.url)
if url.startswith("mysql"):
op.drop_constraint("awards_ibfk_1", "awards", type_="foreignkey")
op.drop_constraint("awards_ibfk_2", "awards", type_="foreignkey")
op.create_foreign_key(
"awards_ibfk_1", "awards", "teams", ["team_id"], ["id"], ondelete="CASCADE"
)
op.create_foreign_key(
"awards_ibfk_2", "awards", "users", ["user_id"], ["id"], ondelete="CASCADE"
)
op.drop_constraint("files_ibfk_1", "files", type_="foreignkey")
op.create_foreign_key(
"files_ibfk_1",
"files",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("flags_ibfk_1", "flags", type_="foreignkey")
op.create_foreign_key(
"flags_ibfk_1",
"flags",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("hints_ibfk_1", "hints", type_="foreignkey")
op.create_foreign_key(
"hints_ibfk_1",
"hints",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("tags_ibfk_1", "tags", type_="foreignkey")
op.create_foreign_key(
"tags_ibfk_1",
"tags",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("team_captain_id", "teams", type_="foreignkey")
op.create_foreign_key(
"team_captain_id",
"teams",
"users",
["captain_id"],
["id"],
ondelete="SET NULL",
)
op.drop_constraint("tracking_ibfk_1", "tracking", type_="foreignkey")
op.create_foreign_key(
"tracking_ibfk_1",
"tracking",
"users",
["user_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("unlocks_ibfk_1", "unlocks", type_="foreignkey")
op.drop_constraint("unlocks_ibfk_2", "unlocks", type_="foreignkey")
op.create_foreign_key(
"unlocks_ibfk_1",
"unlocks",
"teams",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"unlocks_ibfk_2",
"unlocks",
"users",
["user_id"],
["id"],
ondelete="CASCADE",
)
elif url.startswith("postgres"):
op.drop_constraint("awards_team_id_fkey", "awards", type_="foreignkey")
op.drop_constraint("awards_user_id_fkey", "awards", type_="foreignkey")
op.create_foreign_key(
"awards_team_id_fkey",
"awards",
"teams",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"awards_user_id_fkey",
"awards",
"users",
["user_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("files_challenge_id_fkey", "files", type_="foreignkey")
op.create_foreign_key(
"files_challenge_id_fkey",
"files",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("flags_challenge_id_fkey", "flags", type_="foreignkey")
op.create_foreign_key(
"flags_challenge_id_fkey",
"flags",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("hints_challenge_id_fkey", "hints", type_="foreignkey")
op.create_foreign_key(
"hints_challenge_id_fkey",
"hints",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("tags_challenge_id_fkey", "tags", type_="foreignkey")
op.create_foreign_key(
"tags_challenge_id_fkey",
"tags",
"challenges",
["challenge_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("team_captain_id", "teams", type_="foreignkey")
op.create_foreign_key(
"team_captain_id",
"teams",
"users",
["captain_id"],
["id"],
ondelete="SET NULL",
)
op.drop_constraint("tracking_user_id_fkey", "tracking", type_="foreignkey")
op.create_foreign_key(
"tracking_user_id_fkey",
"tracking",
"users",
["user_id"],
["id"],
ondelete="CASCADE",
)
op.drop_constraint("unlocks_team_id_fkey", "unlocks", type_="foreignkey")
op.drop_constraint("unlocks_user_id_fkey", "unlocks", type_="foreignkey")
op.create_foreign_key(
"unlocks_team_id_fkey",
"unlocks",
"teams",
["team_id"],
["id"],
ondelete="CASCADE",
)
op.create_foreign_key(
"unlocks_user_id_fkey",
"unlocks",
"users",
["user_id"],
["id"],
ondelete="CASCADE",
)
def downgrade():
bind = op.get_bind()
url = str(bind.engine.url)
if url.startswith("mysql"):
op.drop_constraint("unlocks_ibfk_1", "unlocks", type_="foreignkey")
op.drop_constraint("unlocks_ibfk_2", "unlocks", type_="foreignkey")
op.create_foreign_key("unlocks_ibfk_1", "unlocks", "teams", ["team_id"], ["id"])
op.create_foreign_key("unlocks_ibfk_2", "unlocks", "users", ["user_id"], ["id"])
op.drop_constraint("tracking_ibfk_1", "tracking", type_="foreignkey")
op.create_foreign_key(
"tracking_ibfk_1", "tracking", "users", ["user_id"], ["id"]
)
op.drop_constraint("team_captain_id", "teams", type_="foreignkey")
op.create_foreign_key(
"team_captain_id", "teams", "users", ["captain_id"], ["id"]
)
op.drop_constraint("tags_ibfk_1", "tags", type_="foreignkey")
op.create_foreign_key(
"tags_ibfk_1", "tags", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("hints_ibfk_1", "hints", type_="foreignkey")
op.create_foreign_key(
"hints_ibfk_1", "hints", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("flags_ibfk_1", "flags", type_="foreignkey")
op.create_foreign_key(
"flags_ibfk_1", "flags", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("files_ibfk_1", "files", type_="foreignkey")
op.create_foreign_key(
"files_ibfk_1", "files", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("awards_ibfk_1", "awards", type_="foreignkey")
op.drop_constraint("awards_ibfk_2", "awards", type_="foreignkey")
op.create_foreign_key("awards_ibfk_1", "awards", "teams", ["team_id"], ["id"])
op.create_foreign_key("awards_ibfk_2", "awards", "users", ["user_id"], ["id"])
elif url.startswith("postgres"):
op.drop_constraint("unlocks_team_id_fkey", "unlocks", type_="foreignkey")
op.drop_constraint("unlocks_user_id_fkey", "unlocks", type_="foreignkey")
op.create_foreign_key(
"unlocks_team_id_fkey", "unlocks", "teams", ["team_id"], ["id"]
)
op.create_foreign_key(
"unlocks_user_id_fkey", "unlocks", "users", ["user_id"], ["id"]
)
op.drop_constraint("tracking_user_id_fkey", "tracking", type_="foreignkey")
op.create_foreign_key(
"tracking_user_id_fkey", "tracking", "users", ["user_id"], ["id"]
)
op.drop_constraint("team_captain_id", "teams", type_="foreignkey")
op.create_foreign_key(
"team_captain_id", "teams", "users", ["captain_id"], ["id"]
)
op.drop_constraint("tags_challenge_id_fkey", "tags", type_="foreignkey")
op.create_foreign_key(
"tags_challenge_id_fkey", "tags", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("hints_challenge_id_fkey", "hints", type_="foreignkey")
op.create_foreign_key(
"hints_challenge_id_fkey", "hints", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("flags_challenge_id_fkey", "flags", type_="foreignkey")
op.create_foreign_key(
"flags_challenge_id_fkey", "flags", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("files_challenge_id_fkey", "files", type_="foreignkey")
op.create_foreign_key(
"files_challenge_id_fkey", "files", "challenges", ["challenge_id"], ["id"]
)
op.drop_constraint("awards_team_id_fkey", "awards", type_="foreignkey")
op.drop_constraint("awards_user_id_fkey", "awards", type_="foreignkey")
op.create_foreign_key(
"awards_team_id_fkey", "awards", "teams", ["team_id"], ["id"]
)
op.create_foreign_key(
"awards_user_id_fkey", "awards", "users", ["user_id"], ["id"]
)
| {
"repo_name": "CTFd/CTFd",
"path": "migrations/versions/b295b033364d_add_ondelete_cascade_to_foreign_keys.py",
"copies": "4",
"size": "9801",
"license": "apache-2.0",
"hash": 8286545861502038000,
"line_mean": 31.889261745,
"line_max": 88,
"alpha_frac": 0.5026017753,
"autogenerated": false,
"ratio": 3.623290203327172,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6125891978627171,
"avg_score": null,
"num_lines": null
} |
"""add ondelete cascade to user_committee_alerts
Revision ID: 29978548fcb6
Revises: 1cde07889540
Create Date: 2015-02-10 09:53:41.732800
"""
# revision identifiers, used by Alembic.
revision = '29978548fcb6'
down_revision = '1cde07889540'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint("user_committee_user_id_fkey", 'user_committee_alerts')
op.create_foreign_key('user_committee_alerts_user_id_fkey', 'user_committee_alerts', 'user', ['user_id'], ['id'], ondelete='CASCADE')
op.drop_constraint("user_committee_committee_id_fkey", 'user_committee_alerts')
op.create_foreign_key('user_committee_alerts_committee_id_fkey', 'user_committee_alerts', 'committee', ['committee_id'], ['id'], ondelete='CASCADE')
def downgrade():
op.drop_constraint("user_committee_alerts_user_id_fkey", 'user_committee_alerts')
op.create_foreign_key('user_committee_user_id_fkey', 'user_committee_alerts', 'user', ['user_id'], ['id'])
op.drop_constraint("user_committee_alerts_committee_id_fkey", 'user_committee_alerts')
op.create_foreign_key('user_committee_committee_id_fkey', 'user_committee_alerts', 'committee', ['committee_id'], ['id'])
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/29978548fcb6_add_ondelete_cascade_to_user_committee_.py",
"copies": "1",
"size": "1235",
"license": "apache-2.0",
"hash": 8690881897060263000,
"line_mean": 37.59375,
"line_max": 152,
"alpha_frac": 0.7206477733,
"autogenerated": false,
"ratio": 2.990314769975787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9187463410638869,
"avg_score": 0.0046998265273837,
"num_lines": 32
} |
"""add ondelete set null to question_repyl, tabled_committee_report and call_for_comment tables
Revision ID: 122a38429a58
Revises: 29cf770ce19b
Create Date: 2015-02-10 10:13:58.437446
"""
# revision identifiers, used by Alembic.
revision = '122a38429a58'
down_revision = '29cf770ce19b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_constraint("question_reply_committee_id_fkey", 'question_reply')
op.create_foreign_key('question_reply_committee_id_fkey', 'question_reply', 'committee', ['committee_id'], ['id'], ondelete='SET NULL')
op.drop_constraint("tabled_committee_report_committee_id_fkey", 'tabled_committee_report')
op.create_foreign_key('tabled_committee_report_committee_id_fkey', 'tabled_committee_report', 'committee', ['committee_id'], ['id'], ondelete='SET NULL')
op.drop_constraint("call_for_comment_committee_id_fkey", 'call_for_comment')
op.create_foreign_key('call_for_comment_committee_id_fkey', 'call_for_comment', 'committee', ['committee_id'], ['id'], ondelete='SET NULL')
def downgrade():
pass
| {
"repo_name": "Code4SA/pmg-cms-2",
"path": "migrations/versions/122a38429a58_add_ondelete_set_null_to_question_repyl_.py",
"copies": "1",
"size": "1118",
"license": "apache-2.0",
"hash": -6448761011559110000,
"line_mean": 35.064516129,
"line_max": 157,
"alpha_frac": 0.7271914132,
"autogenerated": false,
"ratio": 3.029810298102981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.923460654928426,
"avg_score": 0.004479032403744198,
"num_lines": 31
} |
#Add-on packages
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
# Plotting packages
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_hist as hist
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def volcano(combo, results, oname, cutoff=2):
"""
Plot volcano plots.
Creates volcano plots to compare means, for all pairwise differences.
:Arguments:
:type combo: dictionary
:param combo: A dictionary of dictionaries with all possible pairwise
combinations. Used this to create the various column headers in the
results table.
:type results: pandas DataFrame
:param results: TODO
:type oname: string
:param oname: Name of the output file in pdf format.
:type cutoff: int
:param cutoff: The cutoff value for significance.
:Returns:
:rtype: PD
:returns: Outputs a pdf file containing all plots.
"""
# Getting data for lpvals
lpvals = {col.split("_")[-1]:results[col] for col in results.columns.tolist() \
if col.startswith("-log10_p-value_")}
# Gettign data for diffs
difs = {col.split("_")[-1]:results[col] for col in results.columns.tolist() \
if col.startswith("diff_of")}
# Making plots
with PdfPages(oname) as pdf:
for key in sorted(difs.keys()):
# Set Up Figure
volcanoPlot = figureHandler(proj="2d")
# Plot all results
scatter.scatter2D(x=list(difs[key]), y=list(lpvals[key]),
colorList=list('b'), ax=volcanoPlot.ax[0])
# Color results beyond treshold red
cutLpvals = lpvals[key][lpvals[key]>cutoff]
if not cutLpvals.empty:
cutDiff = difs[key][cutLpvals.index]
scatter.scatter2D(x=list(cutDiff), y=list(cutLpvals),
colorList=list('r'), ax=volcanoPlot.ax[0])
# Drawing cutoffs
lines.drawCutoffHoriz(y=cutoff, ax=volcanoPlot.ax[0])
# Format axis (volcanoPlot)
volcanoPlot.formatAxis(axTitle=key, grid=False,
yTitle="-log10(p-value) for Diff of treatment = {0}".format(key),
xTitle="Diff of treatment = {0}".format(key))
# Add figure to PDF
volcanoPlot.addToPdf(pdfPages=pdf)
| {
"repo_name": "secimTools/SECIMTools",
"path": "src/secimtools/anovaModules/volcano.py",
"copies": "2",
"size": "2710",
"license": "mit",
"hash": -9210571469516348000,
"line_mean": 34.1948051948,
"line_max": 83,
"alpha_frac": 0.632103321,
"autogenerated": false,
"ratio": 3.8549075391180656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010101981723933128,
"num_lines": 77
} |
#Add-on packages
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import statsmodels.api as sm
from matplotlib.backends.backend_pdf import PdfPages
# Plotting packages
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_hist as hist
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def qqPlot(tresid, tfit, oname):
"""
Plot the residual diagnostic plots by sample.
Output q-q plot, boxplots and distributions of the residuals. These plots
will be used diagnose if residuals are approximately normal.
:Arguments:
:type tresid: pandas.Series
:param tresid: Pearson normalized residuals. (transposed)
(residuals / sqrt(MSE))
:type tfit: pandas DataFrame
:param tfit: output of the ANOVA (transposed)
:type oname: string
:param oname: Name of the output file in pdf format.
:Returns:
:rtype: PDF
:returns: Outputs a pdf file containing all plots.
"""
#Open pdf
with PdfPages(oname) as pdf:
# Stablishing axisLayout
axisLayout = [(0,0,1,1),(0,1,1,1),(0,2,1,1),(1,0,3,1)]
# Start plotting
for col in tresid.columns:
#Creating figure
fig = figureHandler(proj='2d',numAx=4,numRow=2,numCol=3,
arrangement=axisLayout)
data = tresid[col].values.ravel()
noColors = list()
for j in range(0,len(data)):
noColors.append('b')#blue
df_data = pd.DataFrame(data)
# Removing missing so that it will plot correctly.
mask_nan_data = np.isnan(data)
data = data[~mask_nan_data]
# Plot qqplot on axis 0
sm.graphics.qqplot(data,fit=True,line='r',ax=fig.ax[0])
# Plot boxplot on axis 1
box.boxSeries(ser=data,ax=fig.ax[1])
# Plot histogram on axis 2
hist.quickHist(ax=fig.ax[2],dat=df_data,orientation='horizontal')
# Plot scatterplot on axis 3
scatter.scatter2D(ax=fig.ax[3],x=tfit[col], y=tresid[col],
colorList=list('b'))
# Draw cutoff line for scatterplot on axis 3
lines.drawCutoffHoriz(ax=fig.ax[3],y=0)
# Format axis 0
fig.formatAxis(figTitle=col,axnum=0,grid=False,showX=True,
yTitle="Sample Quantiles", xTitle=" ")
# Format axis 1
fig.formatAxis(axnum=1,axTitle="Standardized Residuals",
grid=False,showX=False,showY=True, xTitle=" ")
# Format axis 2
fig.formatAxis(axnum=2,grid=False,showX=True,showY=True,
axTitle=" ",xTitle=" ")
# Format axis 3
fig.formatAxis(axnum=3,axTitle="Predicted Values vs Residual Values",
xTitle="Predicted Values",yTitle="Residual Values",
grid=False)
#Add figure to pdf
fig.addToPdf(pdfPages=pdf)
| {
"repo_name": "secimTools/SECIMTools",
"path": "src/scripts/secimtools/anovaModules/qqPlot.py",
"copies": "2",
"size": "3288",
"license": "mit",
"hash": 377979980900468740,
"line_mean": 30.3142857143,
"line_max": 81,
"alpha_frac": 0.6070559611,
"autogenerated": false,
"ratio": 3.774971297359357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5382027258459358,
"avg_score": null,
"num_lines": null
} |
ADDON = "plugin.video.tfixtures.listings"
import main
from datetime import datetime,timedelta
import time
import xbmcaddon
import xbmc
xbmcaddon.Addon(ADDON).setSetting('playing_channel','')
xbmcaddon.Addon(ADDON).setSetting('playing_title','')
xbmcaddon.Addon(ADDON).setSetting('playing_start','')
main.refresh_reminders()
monitor = xbmc.Monitor()
while not monitor.abortRequested():
try:
reload = xbmcaddon.Addon(ADDON).getSetting('xml_reload_timer')
except:
reload = 'false'
if (reload == 'true'):
dt = datetime.now()
try:
xmltv_timer_last = int(xbmcaddon.Addon(ADDON).getSetting('xmltv_timer_last'))
except:
xmltv_timer_last = 0
now_seconds = int(time.mktime(dt.timetuple()))
timeout = False
if xmltv_timer_last + 25*3600 < now_seconds:
timeout = True
else:
hour = xbmcaddon.Addon(ADDON).getSetting('xml_reload_hour')
if xmltv_timer_last + 1*3600 < now_seconds: # only once a day
if dt.hour == hour:
timeout = True
if timeout:
xbmcaddon.Addon(ADDON).setSetting('xml_reload','true')
main.xml_channels()
xbmcaddon.Addon(ADDON).setSetting('ini_reload','true')
main.store_channels()
now = int(time.mktime(dt.timetuple()))
xbmcaddon.Addon(ADDON).setSetting("xmltv_timer_last",str(now))
wait_time = 60
if monitor.waitForAbort(wait_time):
break
| {
"repo_name": "TheWardoctor/Wardoctors-repo",
"path": "plugin.video.tfixtures.listings/service.py",
"copies": "1",
"size": "1519",
"license": "apache-2.0",
"hash": -1544844105536007200,
"line_mean": 35.1666666667,
"line_max": 89,
"alpha_frac": 0.6142198815,
"autogenerated": false,
"ratio": 3.7786069651741294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4892826846674129,
"avg_score": null,
"num_lines": null
} |
# Addons: "DamageLog"
# ktulho <https://kr.cm/f/p/17624/>
import copy
import BigWorld
import ResMgr
import nations
from Avatar import PlayerAvatar
from Vehicle import Vehicle
from VehicleEffects import DamageFromShotDecoder
from vehicle_systems.tankStructure import TankPartIndexes
from constants import ITEM_DEFS_PATH, DAMAGE_INFO_CODES, VEHICLE_CLASSES
from gui.Scaleform.daapi.view.battle.shared.damage_log_panel import DamageLogPanel
from gui.Scaleform.daapi.view.meta.DamagePanelMeta import DamagePanelMeta
from gui.shared.utils.TimeInterval import TimeInterval
from items import vehicles, _xml
from helpers import dependency
from skeletons.gui.battle_session import IBattleSessionProvider
from skeletons.gui.game_control import IBootcampController
from xfw.events import registerEvent, overrideMethod
from xfw_actionscript.python import *
from xvm_main.python.logger import *
from xvm_main.python.stats import _stat
import xvm_main.python.config as config
import xvm_main.python.userprefs as userprefs
import xvm_battle.python.battle as battle
import parser_addon
on_fire = 0
isDownAlt = False
damageLogConfig = {}
macros = None
chooseRating = None
isImpact = False
isShowDamageLog = True
ATTACK_REASONS = {
0: 'shot',
1: 'fire',
2: 'ramming',
3: 'world_collision',
4: 'death_zone',
5: 'drowning',
6: 'gas_attack',
7: 'overturn',
8: 'manual',
9: 'artillery_protection',
10: 'artillery_sector',
11: 'bombers',
12: 'recovery',
13: 'artillery_eq',
14: 'bomber_eq',
15: 'none',
24: 'art_attack',
25: 'air_strike'
}
VEHICLE_CLASSES_SHORT = {
'mediumTank': 'mt',
'lightTank': 'lt',
'heavyTank': 'ht',
'AT-SPG': 'td',
'SPG': 'spg',
'not_vehicle': 'not_vehicle'
}
HIT_EFFECT_CODES = {
None: 'unknown',
0: 'intermediate_ricochet',
1: 'final_ricochet',
2: 'armor_not_pierced',
3: 'armor_pierced_no_damage',
4: 'armor_pierced',
5: 'critical_hit'
}
RATINGS = {
'xvm_wgr': {'name': 'xwgr', 'size': 2},
'xvm_wtr': {'name': 'xwtr', 'size': 2},
'xvm_wn8': {'name': 'xwn8', 'size': 2},
'xvm_eff': {'name': 'xeff', 'size': 2},
'xvm_xte': {'name': 'xte', 'size': 2},
'basic_wgr': {'name': 'wgr', 'size': 5},
'basic_wtr': {'name': 'wtr', 'size': 4},
'basic_wn8': {'name': 'wn8', 'size': 4},
'basic_eff': {'name': 'eff', 'size': 4},
'basic_xte': {'name': 'xte', 'size': 2}
}
DEVICES_TANKMAN = {'engineHealth': 'engine_crit',
'ammoBayHealth': 'ammo_bay_crit',
'fuelTankHealth': 'fuel_tank_crit',
'radioHealth': 'radio_crit',
'leftTrackHealth': 'left_track_crit',
'rightTrackHealth': 'right_track_crit',
'gunHealth': 'gun_crit',
'turretRotatorHealth': 'turret_rotator_crit',
'surveyingDeviceHealth': 'surveying_device_crit',
'commanderHealth': 'commander',
'driverHealth': 'driver',
'radioman1Health': 'radioman',
'radioman2Health': 'radioman',
'gunner1Health': 'gunner',
'gunner2Health': 'gunner',
'loader1Health': 'loader',
'loader2Health': 'loader',
'engineHealth_destr': 'engine_destr',
'ammoBayHealth_destr': 'ammo_bay_destr',
'fuelTankHealth_destr': 'fuel_tank_destr',
'radioHealth_destr': 'radio_destr',
'leftTrackHealth_destr': 'left_track_destr',
'rightTrackHealth_destr': 'right_track_destr',
'gunHealth_destr': 'gun_destr',
'turretRotatorHealth_destr': 'turret_rotator_destr',
'surveyingDeviceHealth_destr': 'surveying_device_destr'
}
ADD_LINE = -1
FORMAT_HISTORY = 'formatHistory'
ENABLED = 'enabled'
SHADOW = 'shadow/'
COLOR_RATING = 'colors'
COLOR_RATING_X = 'colors/x'
FORMAT_LAST_HIT = 'formatLastHit'
SHOW_HIT_NO_DAMAGE = 'showHitNoDamage'
MOVE_IN_BATTLE = 'moveInBattle'
TIME_DISPLAY_LAST_HIT = 'timeDisplayLastHit'
DAMAGE_LOG = 'damageLog/'
DAMAGE_LOG_ENABLED = DAMAGE_LOG + ENABLED
DAMAGE_LOG_DISABLED_DETAIL_STATS = DAMAGE_LOG + 'disabledDetailStats'
DAMAGE_LOG_DISABLED_SUMMARY_STATS = DAMAGE_LOG + 'disabledSummaryStats'
damageInfoCriticals = ('DEVICE_CRITICAL',
'DEVICE_CRITICAL_AT_SHOT',
'DEVICE_CRITICAL_AT_RAMMING',
'DEVICE_CRITICAL_AT_FIRE',
'DEVICE_CRITICAL_AT_WORLD_COLLISION',
'DEVICE_CRITICAL_AT_DROWNING',
'ENGINE_CRITICAL_AT_UNLIMITED_RPM'
)
damageInfoDestructions = ('DEVICE_DESTROYED',
'DEVICE_DESTROYED_AT_SHOT',
'DEVICE_DESTROYED_AT_RAMMING',
'DEVICE_DESTROYED_AT_FIRE',
'DEVICE_DESTROYED_AT_WORLD_COLLISION',
'DEVICE_DESTROYED_AT_DROWNING',
'ENGINE_DESTROYED_AT_UNLIMITED_RPM',
'DEATH_FROM_DEVICE_EXPLOSION_AT_SHOT'
)
damageInfoTANKMAN = ('TANKMAN_HIT',
'TANKMAN_HIT_AT_SHOT',
'TANKMAN_HIT_AT_WORLD_COLLISION',
'TANKMAN_HIT_AT_DROWNING'
)
class GROUP_DAMAGE(object):
RAMMING_COLLISION = 'groupDamagesFromRamming_WorldCollision'
FIRE = 'groupDamagesFromFire'
ART_AND_AIRSTRIKE = 'groupDamageFromArtAndAirstrike'
class EVENTS_NAMES(object):
ON_HIT = 'ON_HIT'
ON_LAST_HIT = 'ON_LAST_HIT'
ON_FIRE = 'ON_FIRE'
ON_IMPACT = 'ON_IMPACT'
class SHADOW_OPTIONS(object):
DISTANCE = 'distance'
ANGLE = 'angle'
ALPHA = 'alpha'
BLUR = 'blur'
STRENGTH = 'strength'
COLOR = 'color'
HIDE_OBJECT = 'hideobject'
INNER = 'inner'
KNOCKOUT = 'knockout'
QUALITY = 'quality'
class DAMAGE_LOG_SECTIONS(object):
LOG = DAMAGE_LOG + 'log/'
LOG_ALT = DAMAGE_LOG + 'logAlt/'
LOG_BACKGROUND = DAMAGE_LOG + 'logBackground/'
LOG_ALT_BACKGROUND = DAMAGE_LOG + 'logAltBackground/'
LAST_HIT = DAMAGE_LOG + 'lastHit/'
SECTIONS = (LOG, LOG_ALT, LOG_BACKGROUND, LOG_ALT_BACKGROUND, LAST_HIT)
def keyLower(_dict):
return {key.lower(): _dict[key] for key in _dict.iterkeys()} if _dict is not None else None
def parser(strHTML):
s = parser_addon.parser_addon(strHTML, macros)
return s
class ConfigCache(object):
def __init__(self):
self.__configCache = {}
def get(self, key, default=None):
if config.config_autoreload:
return config.get(key, default)
else:
return self.__configCache.setdefault(key, config.get(key, default))
_config = ConfigCache()
def readyConfig(section):
if config.config_autoreload or (section not in damageLogConfig):
return {'vehicleClass': keyLower(_config.get(section + 'vtype')),
'c_Shell': keyLower(_config.get(section + 'c:costShell')),
'costShell': keyLower(_config.get(section + 'costShell')),
'c_typeHit': keyLower(_config.get(section + 'c:dmg-kind')),
'c_VehicleClass': keyLower(_config.get(section + 'c:vtype')),
'typeHit': keyLower(_config.get(section + 'dmg-kind')),
'c_teamDmg': keyLower(_config.get(section + 'c:team-dmg')),
'teamDmg': keyLower(_config.get(section + 'team-dmg')),
'compNames': keyLower(_config.get(section + 'comp-name')),
'splashHit': keyLower(_config.get(section + 'splash-hit')),
'criticalHit': keyLower(_config.get(section + 'critical-hit')),
'hitEffect': keyLower(_config.get(section + 'hit-effects')),
'c_hitEffect': keyLower(_config.get(section + 'c:hit-effects')),
'typeShell': keyLower(_config.get(section + 'type-shell')),
'c_typeShell': keyLower(_config.get(section + 'c:type-shell')),
'critDevice': keyLower(_config.get(section + 'crit-device'))
}
else:
return damageLogConfig[section]
class Data(object):
sessionProvider = dependency.descriptor(IBattleSessionProvider)
bootcampController = dependency.descriptor(IBootcampController)
def __init__(self):
self.reset()
xmlPath = ''
self.shells = {}
self.shells_stunning = {}
for nation in nations.NAMES:
xmlPath = '%s%s%s%s' % (ITEM_DEFS_PATH, 'vehicles/', nation, '/components/shells.xml')
xmlCtx_s = (((None, '{}/{}'.format(xmlPath, n)), s) for n, s in ResMgr.openSection(xmlPath).items() if (n != 'icons') and (n != 'xmlns:xmlref'))
id_xmlCtx_s = ((_xml.readInt(xmlCtx, s, 'id', 0, 65535), xmlCtx, s) for xmlCtx, s in xmlCtx_s)
self.shells[nation] = [i for i, xmlCtx, s in id_xmlCtx_s if s.readBool('improved', False)]
self.shells_stunning[nation] = [i for i, xmlCtx, s in id_xmlCtx_s if _xml.readStringOrNone(xmlCtx, s, 'stunDuration')]
ResMgr.purge(xmlPath, True)
def reset(self):
self.data = {'isAlive': True,
'isDamage': False,
'attackReasonID': 0,
'attackerID': 0,
'compName': 'unknown',
'splashHit': 'no-splash',
'criticalHit': False,
'hitEffect': 'unknown',
'damage': 0,
'dmgRatio': 0,
'oldHealth': 0,
'maxHealth': 0,
'costShell': 'unknown',
'shellKind': 'not_shell',
'teamDmg': 'unknown',
'attackerVehicleType': 'not_vehicle',
'shortUserString': '',
'name': '',
'clanAbbrev': '',
'level': 1,
'clanicon': '',
'squadnum': 0,
'number': None,
'reloadGun': 0.0,
'caliber': None,
'fireDuration': None,
'diff-masses': None,
'nation': None,
'blownup': False,
'stun-duration': None,
'shells_stunning': False,
'critDevice': 'no-critical',
'hitTime': 0,
'attackerVehicleName': ''
}
def updateData(self):
if self.bootcampController.isInBootcamp():
return
player = BigWorld.player()
self.data['dmgRatio'] = self.data['damage'] * 100 // self.data['maxHealth']
attackerID = self.data['attackerID']
minutes, seconds = divmod(int(self.sessionProvider.shared.arenaPeriod.getEndTime() - BigWorld.serverTime()), 60)
self.data['hitTime'] = '{:02d}:{:02d}'.format(minutes, seconds)
if attackerID:
self.data['teamDmg'] = 'unknown'
attacker = player.arena.vehicles.get(attackerID)
if attacker is not None:
if attacker['team'] != player.team:
self.data['teamDmg'] = 'enemy-dmg'
elif attacker['name'] == player.name:
self.data['teamDmg'] = 'player'
else:
self.data['teamDmg'] = 'ally-dmg'
vehicleType = attacker['vehicleType']
if vehicleType:
_type = vehicleType.type
self.data['attackerVehicleName'] = vehicleType.name.replace(':', '-', 1) if vehicleType.name else ''
self.data['attackerVehicleType'] = list(_type.tags.intersection(VEHICLE_CLASSES))[0]
self.data['shortUserString'] = _type.shortUserString
self.data['level'] = vehicleType.level
self.data['nation'] = nations.NAMES[_type.customizationNationID]
if self.data['attackReasonID'] == 2:
self.data['diff-masses'] = (player.vehicleTypeDescriptor.physics['weight'] - vehicleType.physics['weight']) / 1000.0
elif self.data['diff-masses'] is not None:
self.data['diff-masses'] = None
else:
self.data['attackerVehicleType'] = 'not_vehicle'
self.data['attackerVehicleName'] = ''
self.data['shortUserString'] = None
self.data['level'] = None
self.data['nation'] = None
self.data['diff-masses'] = None
self.data['name'] = attacker['name']
if (_stat.resp is not None) and (attacker['name'] in _stat.resp['players']):
stats = _stat.resp['players'][attacker['name']]
self.data['wn8'] = stats.get('wn8', None)
self.data['xwn8'] = stats.get('xwn8', None)
self.data['wtr'] = stats.get('wtr', None)
self.data['xwtr'] = stats.get('xwtr', None)
self.data['eff'] = stats.get('eff', None)
self.data['xeff'] = stats.get('xeff', None)
self.data['wgr'] = stats.get('wgr', None)
self.data['xwgr'] = stats.get('xwgr', None)
self.data['xte'] = stats.get('v').get('xte', None)
else:
self.data['wn8'] = None
self.data['xwn8'] = None
self.data['wtr'] = None
self.data['xwtr'] = None
self.data['eff'] = None
self.data['xeff'] = None
self.data['wgr'] = None
self.data['xwgr'] = None
self.data['xte'] = None
self.data['clanAbbrev'] = attacker['clanAbbrev']
self.data['clanicon'] = _stat.getClanIcon(attackerID)
self.data['squadnum'] = None
arenaDP = self.sessionProvider.getArenaDP()
if arenaDP is not None:
vInfo = arenaDP.getVehicleInfo(vID=attackerID)
self.data['squadnum'] = vInfo.squadIndex if vInfo.squadIndex != 0 else None
else:
self.data['teamDmg'] = 'unknown'
self.data['attackerVehicleType'] = 'not_vehicle'
self.data['attackerVehicleName'] = ''
self.data['shortUserString'] = ''
self.data['name'] = ''
self.data['clanAbbrev'] = ''
self.data['level'] = None
self.data['clanicon'] = None
self.data['squadnum'] = None
self.updateLabels()
def typeShell(self, effectsIndex):
self.data['costShell'] = 'unknown'
self.data['shellKind'] = 'not_shell'
if (self.data['attackerID'] == 0) or (self.data['attackReasonID'] != 0):
return
player = BigWorld.player()
attacker = player.arena.vehicles.get(self.data['attackerID'])
if (attacker is None) or not attacker['vehicleType']:
self.data['shellKind'] = 'not_shell'
self.data['caliber'] = None
self.data['costShell'] = 'unknown'
return
for shot in attacker['vehicleType'].gun.shots:
_shell = shot.shell
if effectsIndex == _shell.effectsIndex:
self.data['shellKind'] = str(_shell.kind).lower()
self.data['caliber'] = _shell.caliber
_id = _shell.id
nation = nations.NAMES[_id[0]]
self.data['costShell'] = 'gold-shell' if _id[1] in self.shells[nation] else 'silver-shell'
self.data['shells_stunning'] = _id[1] in self.shells_stunning[nation]
break
def timeReload(self, attackerID):
player = BigWorld.player()
attacker = player.arena.vehicles.get(attackerID)
if attacker is not None:
vehicleType = attacker['vehicleType']
if (attacker is not None) and (vehicleType):
reload_orig = vehicleType.gun.reloadTime
_miscAttrs = vehicleType.miscAttrs
crew = 0.94 if _miscAttrs['crewLevelIncrease'] != 0 else 1.0
if (vehicleType.gun.clip[0] == 1) and (_miscAttrs['gunReloadTimeFactor'] != 0.0):
rammer = _miscAttrs['gunReloadTimeFactor']
else:
rammer = 1
return reload_orig * crew * rammer
else:
return 0.0
else:
return 0.0
def hitShell(self, attackerID, effectsIndex, damageFactor):
self.data['stun-duration'] = None
self.data['attackerID'] = attackerID
self.data['attackReasonID'] = effectsIndex if effectsIndex in [24, 25] else 0
self.data['reloadGun'] = self.timeReload(attackerID)
self.typeShell(effectsIndex)
self.data['damage'] = 0
if self.data['isDamage']:
self.data['hitEffect'] = HIT_EFFECT_CODES[4]
elif self.data['shells_stunning']:
pass
else:
self.updateData()
def updateLabels(self):
global macros
macros = None
_log.callEvent = _logBackground.callEvent = not isDownAlt
_logAlt.callEvent = _logAltBackground.callEvent = isDownAlt
_logAlt.output()
_log.output()
_lastHit.output()
_logBackground.output()
_logAltBackground.output()
self.data['critDevice'] = 'no-critical'
self.data['criticalHit'] = False
self.data['isDamage'] = False
self.data['hitEffect'] = 'unknown'
self.data['splashHit'] = 'no-splash'
def showDamageFromShot(self, vehicle, attackerID, points, effectsIndex, damageFactor):
if not vehicle.isStarted:
return
maxComponentIdx = TankPartIndexes.ALL[-1]
wheelsConfig = vehicle.appearance.typeDescriptor.chassis.generalWheelsAnimatorConfig
if wheelsConfig:
maxComponentIdx += wheelsConfig.getWheelsCount()
maxHitEffectCode, decodedPoints, maxDamagedComponent = DamageFromShotDecoder.decodeHitPoints(points, vehicle.appearance.collisions, maxComponentIdx)
if decodedPoints:
compName = decodedPoints[0].componentName
self.data['compName'] = compName if compName[0] != 'W' else 'wheel'
else:
self.data['compName'] = 'unknown'
# self.data['criticalHit'] = (maxHitEffectCode == 5)
if not self.data['isDamage']:
self.data['hitEffect'] = HIT_EFFECT_CODES[min(3, maxHitEffectCode)]
self.data['isAlive'] = bool(vehicle.isCrewActive)
self.hitShell(attackerID, effectsIndex, damageFactor)
def showDamageFromExplosion(self, vehicle, attackerID, center, effectsIndex, damageFactor):
self.data['splashHit'] = 'splash'
# self.data['criticalHit'] = False
if not self.data['isDamage']:
self.data['hitEffect'] = HIT_EFFECT_CODES[3]
self.data['isAlive'] = bool(vehicle.isCrewActive)
self.hitShell(attackerID, effectsIndex, damageFactor)
def updateStunInfo(self, vehicle, stunDuration):
self.data['stun-duration'] = stunDuration
if (not self.data['isDamage']) and (self.data['hitEffect'] in ('armor_pierced_no_damage', 'critical_hit')):
self.updateData()
def showVehicleDamageInfo(self, player, vehicleID, damageIndex, extraIndex, entityID, equipmentID):
dataUpdate = {
'attackerID': entityID,
'costShell': 'unknown',
'hitEffect': 'unknown',
'damage': 0,
'shellKind': 'not_shell',
'splashHit': 'no-splash',
'reloadGun': 0.0,
'stun-duration': None,
'compName': 'unknown'
}
damageCode = DAMAGE_INFO_CODES[damageIndex]
extra = player.vehicleTypeDescriptor.extras[extraIndex]
if damageCode in ('DEVICE_CRITICAL_AT_RAMMING', 'DEVICE_DESTROYED_AT_RAMMING'):
self.data['criticalHit'] = True
if extra.name in DEVICES_TANKMAN:
self.data['critDevice'] = DEVICES_TANKMAN[extra.name] if damageCode == 'DEVICE_CRITICAL_AT_RAMMING' else DEVICES_TANKMAN[extra.name + '_destr']
vehicle = BigWorld.entities.get(player.playerVehicleID)
if self.data['oldHealth'] == vehicle.health:
self.data.update(dataUpdate)
self.data['attackReasonID'] = 2
self.updateData()
elif damageCode in ('DEVICE_CRITICAL_AT_WORLD_COLLISION', 'DEVICE_DESTROYED_AT_WORLD_COLLISION', 'TANKMAN_HIT_AT_WORLD_COLLISION'):
self.data['criticalHit'] = True
if extra.name in DEVICES_TANKMAN:
self.data['critDevice'] = DEVICES_TANKMAN[extra.name + '_destr'] if damageCode == 'DEVICE_DESTROYED_AT_WORLD_COLLISION' else DEVICES_TANKMAN[extra.name]
vehicle = BigWorld.entities.get(player.playerVehicleID)
if self.data['oldHealth'] == vehicle.health:
self.data.update(dataUpdate)
self.data['attackReasonID'] = 3
self.updateData()
elif damageCode == 'DEATH_FROM_DROWNING':
self.data.update(dataUpdate)
self.data['attackReasonID'] = 5
self.data['isAlive'] = False
self.data['criticalHit'] = False
self.updateData()
elif (damageCode in damageInfoCriticals) or (damageCode in damageInfoDestructions) or (damageCode in damageInfoTANKMAN):
if extra.name in DEVICES_TANKMAN:
self.data['critDevice'] = DEVICES_TANKMAN[extra.name + '_destr'] if damageCode in damageInfoDestructions else DEVICES_TANKMAN[extra.name]
self.data['criticalHit'] = True
def onHealthChanged(self, vehicle, newHealth, attackerID, attackReasonID):
self.data['blownup'] = (newHealth <= -5)
newHealth = max(0, newHealth)
self.data['damage'] = self.data['oldHealth'] - newHealth
self.data['oldHealth'] = newHealth
if self.data['damage'] < 0:
return
if (attackReasonID < 8) or (attackReasonID == 12):
self.data['attackReasonID'] = attackReasonID
elif attackReasonID in [9, 10, 13, 24]:
self.data['attackReasonID'] = 24
elif attackReasonID in [11, 14, 25]:
self.data['attackReasonID'] = 25
self.data['isDamage'] = (self.data['damage'] > 0)
self.data['isAlive'] = vehicle.isAlive()
self.data['hitEffect'] = HIT_EFFECT_CODES[4]
if self.data['attackReasonID'] != 0:
self.data['costShell'] = 'unknown'
# self.data['criticalHit'] = False
self.data['shellKind'] = 'not_shell'
self.data['splashHit'] = 'no-splash'
self.data['reloadGun'] = 0.0
self.data['stun-duration'] = None
else:
self.data['reloadGun'] = self.timeReload(attackerID)
self.data['attackerID'] = attackerID
self.updateData()
data = Data()
class _Base(object):
def __init__(self, section):
self.S_MOVE_IN_BATTLE = section + MOVE_IN_BATTLE
self.S_SHOW_HIT_NO_DAMAGE = section + SHOW_HIT_NO_DAMAGE
self.S_GROUP_DAMAGE_RAMMING_COLLISION = section + GROUP_DAMAGE.RAMMING_COLLISION
self.S_GROUP_DAMAGE_FIRE = section + GROUP_DAMAGE.FIRE
self.S_GROUP_DAMAGE_ART_AND_AIRSTRIKE = section + GROUP_DAMAGE.ART_AND_AIRSTRIKE
self.S_SHADOW = section + SHADOW
self.SHADOW_DISTANCE = self.S_SHADOW + SHADOW_OPTIONS.DISTANCE
self.SHADOW_ANGLE = self.S_SHADOW + SHADOW_OPTIONS.ANGLE
self.SHADOW_ALPHA = self.S_SHADOW + SHADOW_OPTIONS.ALPHA
self.SHADOW_BLUR = self.S_SHADOW + SHADOW_OPTIONS.BLUR
self.SHADOW_STRENGTH = self.S_SHADOW + SHADOW_OPTIONS.STRENGTH
self.SHADOW_COLOR = self.S_SHADOW + SHADOW_OPTIONS.COLOR
self.SHADOW_HIDE_OBJECT = self.S_SHADOW + SHADOW_OPTIONS.HIDE_OBJECT
self.SHADOW_INNER = self.S_SHADOW + SHADOW_OPTIONS.INNER
self.SHADOW_KNOCKOUT = self.S_SHADOW + SHADOW_OPTIONS.KNOCKOUT
self.SHADOW_QUALITY = self.S_SHADOW + SHADOW_OPTIONS.QUALITY
self.S_X = section + 'x'
self.S_Y = section + 'y'
self.section = section
self.dictVehicle = {}
self.shadow = {}
self._data = None
def reset(self):
self.dictVehicle = {}
self.shadow = {}
def mouse_down(self, _data):
if _data['buttonIdx'] == 0:
self._data = _data
def mouse_up(self, _data):
if _data['buttonIdx'] == 0:
self._data = None
def _mouse_move(self, _data, nameEvent):
if self._data:
self.x += (_data['x'] - self._data['x'])
self.y += (_data['y'] - self._data['y'])
as_event(nameEvent)
def updateValueMacros(self, value):
global macros
def readColor(sec, m, xm=None):
colors = _config.get('colors/' + sec)
if m is not None and colors is not None:
for val in colors:
if val['value'] > m:
return '#' + val['color'][2:] if val['color'][:2] == '0x' else val['color']
elif xm is not None:
colors_x = _config.get(COLOR_RATING_X)
for val in colors_x:
if val['value'] > xm:
return '#' + val['color'][2:] if val['color'][:2] == '0x' else val['color']
conf = readyConfig(self.section)
if macros is None:
xwn8 = value.get('xwn8', None)
xwtr = value.get('xwtr', None)
xeff = value.get('xeff', None)
xwgr = value.get('xwgr', None)
macros = {'vehicle': value['shortUserString'],
'name': value['name'],
'clannb': value['clanAbbrev'],
'clan': ''.join(['[', value['clanAbbrev'], ']']) if value['clanAbbrev'] else '',
'level': value['level'],
'clanicon': value['clanicon'],
'squad-num': value['squadnum'],
'reloadGun': value['reloadGun'],
'my-alive': 'al' if value['isAlive'] else None,
'gun-caliber': value['caliber'],
'wn8': value.get('wn8', None),
'xwn8': value.get('xwn8', None),
'wtr': value.get('wtr', None),
'xwtr': value.get('xwtr', None),
'eff': value.get('eff', None),
'xeff': value.get('xeff', None),
'wgr': value.get('wgr', None),
'xwgr': value.get('xwgr', None),
'xte': value.get('xte', None),
'r': '{{%s}}' % chooseRating,
'xr': '{{%s}}' % chooseRating if chooseRating[0] == 'x' else '{{x%s}}' % chooseRating,
'c:r': '{{c:%s}}' % chooseRating,
'c:xr': '{{c:%s}}' % chooseRating if chooseRating[0] == 'x' else '{{c:x%s}}' % chooseRating,
'c:wn8': readColor('wn8', value.get('wn8', None), xwn8),
'c:xwn8': readColor('x', xwn8),
'c:wtr': readColor('wtr', value.get('wtr', None), xwtr),
'c:xwtr': readColor('x', xwtr),
'c:eff': readColor('eff', value.get('eff', None), xeff),
'c:xeff': readColor('x', xeff),
'c:wgr': readColor('wgr', value.get('wgr', None), xwgr),
'c:xwgr': readColor('x', xwgr),
'c:xte': readColor('x', value.get('xte', None)),
'diff-masses': value.get('diff-masses', None),
'nation': value.get('nation', None),
'my-blownup': 'blownup' if value['blownup'] else None,
'type-shell-key': value['shellKind'],
'stun-duration': value.get('stun-duration', None),
'vehiclename': value.get('attackerVehicleName', None)
}
macros.update({'c:team-dmg': conf['c_teamDmg'][value['teamDmg']],
'team-dmg': conf['teamDmg'].get(value['teamDmg'], ''),
'vtype': conf['vehicleClass'].get(VEHICLE_CLASSES_SHORT[value['attackerVehicleType']], ''),
'c:costShell': conf['c_Shell'][value['costShell']],
'costShell': conf['costShell'].get(value['costShell'], 'unknown'),
'c:dmg-kind': conf['c_typeHit'][ATTACK_REASONS[value['attackReasonID']]],
'dmg-kind': conf['typeHit'].get(ATTACK_REASONS[value['attackReasonID']], 'reason: %s' % value['attackReasonID']),
'c:vtype': conf['c_VehicleClass'].get(VEHICLE_CLASSES_SHORT[value['attackerVehicleType']], '#CCCCCC'),
'comp-name': conf['compNames'].get(value['compName'], 'unknown'),
'splash-hit': conf['splashHit'].get(value['splashHit'], 'unknown'),
'critical-hit': conf['criticalHit'].get('critical') if value['criticalHit'] else conf['criticalHit'].get('no-critical'),
'type-shell': conf['typeShell'][value['shellKind']],
'c:type-shell': conf['c_typeShell'][value['shellKind']],
'c:hit-effects': conf['c_hitEffect'].get(value['hitEffect'], 'unknown'),
'hit-effects': conf['hitEffect'].get(value['hitEffect'], 'unknown'),
'crit-device': conf['critDevice'].get(value.get('critDevice', '')),
'number': value['number'],
'dmg': value['damage'],
'dmg-ratio': value['dmgRatio'],
'fire-duration': value.get('fireDuration', None),
'hitTime': value['hitTime']
})
def getShadow(self):
return {SHADOW_OPTIONS.DISTANCE: parser(_config.get(self.SHADOW_DISTANCE)),
SHADOW_OPTIONS.ANGLE: parser(_config.get(self.SHADOW_ANGLE)),
SHADOW_OPTIONS.ALPHA: parser(_config.get(self.SHADOW_ALPHA)),
SHADOW_OPTIONS.BLUR: parser(_config.get(self.SHADOW_BLUR)),
SHADOW_OPTIONS.STRENGTH: parser(_config.get(self.SHADOW_STRENGTH)),
SHADOW_OPTIONS.COLOR: parser(_config.get(self.SHADOW_COLOR)),
SHADOW_OPTIONS.HIDE_OBJECT: parser(_config.get(self.SHADOW_HIDE_OBJECT)),
SHADOW_OPTIONS.INNER: parser(_config.get(self.SHADOW_INNER)),
SHADOW_OPTIONS.KNOCKOUT: parser(_config.get(self.SHADOW_KNOCKOUT)),
SHADOW_OPTIONS.QUALITY: parser(_config.get(self.SHADOW_QUALITY))
}
class DamageLog(_Base):
def __init__(self, section):
_Base.__init__(self, section)
self.S_FORMAT_HISTORY = section + FORMAT_HISTORY
self.listLog = []
self.dataLog = {}
self.scrollList = []
if _config.get(self.S_MOVE_IN_BATTLE):
_data = userprefs.get(DAMAGE_LOG_SECTIONS.LOG, {'x': _config.get(self.S_X), 'y': _config.get(self.S_Y)})
if section == DAMAGE_LOG_SECTIONS.LOG:
as_callback("damageLog_mouseDown", self.mouse_down)
as_callback("damageLog_mouseUp", self.mouse_up)
as_callback("damageLog_mouseMove", self.mouse_move)
else:
_data = {'x': _config.get(self.S_X), 'y': _config.get(self.S_Y)}
as_callback("damageLog_mouseWheel", self.mouse_wheel)
self.x = _data['x']
self.y = _data['y']
self.section = section
self.callEvent = True
def reset(self, section):
super(DamageLog, self).reset()
self.listLog = []
self.scrollList = []
self.section = section
self.dataLog = {}
self.callEvent = True
self.dictVehicle.clear()
if (None not in [self.x, self.y]) and _config.get(self.S_MOVE_IN_BATTLE) and section == DAMAGE_LOG_SECTIONS.LOG:
userprefs.set(DAMAGE_LOG_SECTIONS.LOG, {'x': self.x, 'y': self.y})
def mouse_move(self, _data):
self._mouse_move(_data, EVENTS_NAMES.ON_HIT)
def mouse_wheel(self, _data):
if _data['delta'] < 0:
if self.listLog:
self.scrollList.append(self.listLog.pop(0))
as_event(EVENTS_NAMES.ON_HIT)
else:
if self.scrollList:
self.listLog.insert(0, self.scrollList.pop())
as_event(EVENTS_NAMES.ON_HIT)
def setOutParameters(self, numberLine):
self.updateValueMacros(self.dataLog)
if numberLine == ADD_LINE:
self.listLog = [parser(_config.get(self.S_FORMAT_HISTORY))] + self.listLog
else:
self.listLog[numberLine] = parser(_config.get(self.S_FORMAT_HISTORY))
if (self.section == DAMAGE_LOG_SECTIONS.LOG) or (self.section == DAMAGE_LOG_SECTIONS.LOG_ALT):
if not _config.get(self.S_MOVE_IN_BATTLE):
self.x = parser(_config.get(self.S_X))
self.y = parser(_config.get(self.S_Y))
self.shadow = self.getShadow()
def updateNumberLine(self, attackerID, attackReasonID):
for attacker in self.dictVehicle:
dictAttacker = self.dictVehicle[attacker]
for attack in dictAttacker:
if (attacker != attackerID) or (attack != attackReasonID):
dictAttacker[attack]['numberLine'] += 1
def addLine(self, attackerID=None, attackReasonID=None):
if not (attackerID is None or attackReasonID is None):
self.dictVehicle[attackerID][attackReasonID] = {'time': BigWorld.serverTime(),
'damage': self.dataLog['damage'],
'criticalHit': self.dataLog['criticalHit'],
'numberLine': 0,
'startAction': BigWorld.time() if attackReasonID == 1 else None,
'hitTime': self.dataLog['hitTime']
}
self.dataLog['number'] = len(self.listLog) + 1
self.dataLog['fireDuration'] = BigWorld.time() - self.dictVehicle[attackerID][attackReasonID]['startAction'] if attackReasonID == 1 else None
self.setOutParameters(ADD_LINE)
self.updateNumberLine(attackerID, attackReasonID)
def reset_scrolling(self):
if self.scrollList:
self.scrollList.extend(self.listLog)
self.listLog = self.scrollList
self.scrollList = []
def updateGroupedValues(self, parametersDmg):
parametersDmg['time'] = BigWorld.serverTime()
parametersDmg['damage'] += self.dataLog['damage']
parametersDmg['criticalHit'] = (parametersDmg['criticalHit'] or self.dataLog['criticalHit'])
if parametersDmg['damage'] > 0:
self.dataLog['hitEffect'] = 'armor_pierced'
self.dataLog['criticalHit'] = parametersDmg['criticalHit']
self.dataLog['damage'] = parametersDmg['damage']
self.dataLog['dmgRatio'] = self.dataLog['damage'] * 100 // self.dataLog['maxHealth']
self.dataLog['number'] = len(self.listLog) - parametersDmg['numberLine']
self.dataLog['fireDuration'] = BigWorld.time() - parametersDmg['startAction'] if (self.dataLog['attackReasonID'] == 1) and (parametersDmg['startAction'] is not None) else None
self.dataLog['hitTime'] = parametersDmg['hitTime']
self.setOutParameters(parametersDmg['numberLine'])
def groupDmg(self):
self.dataLog = data.data.copy()
attackerID = self.dataLog['attackerID']
attackReasonID = self.dataLog['attackReasonID']
if attackerID in self.dictVehicle:
if attackReasonID in self.dictVehicle[attackerID]:
parametersDmg = self.dictVehicle[attackerID][attackReasonID]
if (BigWorld.serverTime() - parametersDmg['time']) < 1.0:
self.updateGroupedValues(parametersDmg)
return
else:
del self.dictVehicle[attackerID][attackReasonID]
else:
self.dictVehicle[attackerID] = {}
self.addLine(attackerID, attackReasonID)
def isGroupDmg(self):
attackReasonID = data.data['attackReasonID']
isGroupRamming_WorldCollision = (attackReasonID in [2, 3]) and _config.get(self.S_GROUP_DAMAGE_RAMMING_COLLISION)
isGroupFire = (attackReasonID == 1) and _config.get(self.S_GROUP_DAMAGE_FIRE)
isGroupArtAndAirstrike = (attackReasonID in [24, 25]) and _config.get(self.S_GROUP_DAMAGE_ART_AND_AIRSTRIKE)
return isGroupRamming_WorldCollision or isGroupFire or isGroupArtAndAirstrike
def output(self):
if _config.get(self.S_SHOW_HIT_NO_DAMAGE) or data.data['isDamage']:
self.reset_scrolling()
if self.isGroupDmg():
self.groupDmg()
else:
self.dataLog = data.data
self.addLine()
if self.callEvent:
as_event(EVENTS_NAMES.ON_HIT)
class LastHit(_Base):
def __init__(self, section):
_Base.__init__(self, section)
self.strLastHit = ''
self.S_FORMAT_LAST_HIT = section + FORMAT_LAST_HIT
self.S_TIME_DISPLAY_LAST_HIT = section + TIME_DISPLAY_LAST_HIT
if _config.get(self.S_MOVE_IN_BATTLE):
_data = userprefs.get(DAMAGE_LOG_SECTIONS.LAST_HIT, {'x': _config.get(self.S_X), 'y': _config.get(self.S_Y)})
as_callback("lastHit_mouseDown", self.mouse_down)
as_callback("lastHit_mouseUp", self.mouse_up)
as_callback("lastHit_mouseMove", self.mouse_move)
else:
_data = {'x': _config.get(self.S_X), 'y': _config.get(self.S_Y)}
self.x = _data['x']
self.y = _data['y']
self.timerLastHit = None
def reset(self):
super(LastHit, self).reset()
self.strLastHit = ''
if (self.timerLastHit is not None) and self.timerLastHit.isStarted:
self.timerLastHit.stop()
if (None not in [self.x, self.y]) and _config.get(self.S_MOVE_IN_BATTLE):
userprefs.set(DAMAGE_LOG_SECTIONS.LAST_HIT, {'x': self.x, 'y': self.y})
def mouse_move(self, _data):
self._mouse_move(_data, EVENTS_NAMES.ON_LAST_HIT)
def hideLastHit(self):
self.strLastHit = ''
if (self.timerLastHit is not None) and self.timerLastHit.isStarted:
self.timerLastHit.stop()
as_event(EVENTS_NAMES.ON_LAST_HIT)
def setOutParameters(self, dataLog):
self.updateValueMacros(dataLog)
self.strLastHit = parser(_config.get(self.S_FORMAT_LAST_HIT))
if not _config.get(self.S_MOVE_IN_BATTLE):
self.x = parser(_config.get(self.S_X))
self.y = parser(_config.get(self.S_Y))
self.shadow = self.getShadow()
def initGroupedValues(self, dmg, hitTime, attackReasonID):
return {'time': BigWorld.serverTime(),
'damage': dmg,
'startAction': BigWorld.time() if attackReasonID == 1 else None,
'hitTime': hitTime}
def groupDmg(self):
dataLog = data.data.copy()
attackerID = dataLog['attackerID']
attackReasonID = dataLog['attackReasonID']
if attackerID in self.dictVehicle:
if attackReasonID in self.dictVehicle[attackerID]:
key = self.dictVehicle[attackerID][attackReasonID]
if ('time' in key) and ('damage' in key) and ((BigWorld.serverTime() - key['time']) < 1):
key['time'] = BigWorld.serverTime()
key['damage'] += dataLog['damage']
dataLog['damage'] = key['damage']
dataLog['dmgRatio'] = key['damage'] * 100 // dataLog['maxHealth']
dataLog['fireDuration'] = BigWorld.time() - key['startAction'] if (attackReasonID == 1) and (key['startAction'] is not None) else None
dataLog['hitTime'] = key['hitTime']
else:
self.dictVehicle[attackerID][attackReasonID] = self.initGroupedValues(dataLog['damage'], dataLog['hitTime'], attackReasonID)
dataLog['fireDuration'] = 0 if attackReasonID == 1 else None
else:
self.dictVehicle[attackerID] = {}
self.dictVehicle[attackerID][attackReasonID] = self.initGroupedValues(dataLog['damage'], dataLog['hitTime'], attackReasonID)
dataLog['fireDuration'] = 0 if attackReasonID == 1 else None
return dataLog
def isGroupDmg(self):
attackReasonID = data.data['attackReasonID']
isGroupRamming_WorldCollision = (attackReasonID in [2, 3]) and _config.get(self.S_GROUP_DAMAGE_RAMMING_COLLISION)
isGroupFire = (attackReasonID == 1) and _config.get(self.S_GROUP_DAMAGE_FIRE)
isGroupArtAndAirstrike = (attackReasonID in [24, 25]) and _config.get(self.S_GROUP_DAMAGE_ART_AND_AIRSTRIKE)
return isGroupRamming_WorldCollision or isGroupFire or isGroupArtAndAirstrike
def output(self):
if _config.get(self.S_SHOW_HIT_NO_DAMAGE) or data.data['isDamage']:
if self.isGroupDmg():
self.setOutParameters(self.groupDmg())
else:
self.setOutParameters(data.data)
if self.strLastHit:
if (self.timerLastHit is not None) and self.timerLastHit.isStarted:
self.timerLastHit.stop()
timeDisplayLastHit = float(parser(_config.get(self.S_TIME_DISPLAY_LAST_HIT)))
self.timerLastHit = TimeInterval(timeDisplayLastHit, self, 'hideLastHit')
self.timerLastHit.start()
as_event(EVENTS_NAMES.ON_LAST_HIT)
return
_log = DamageLog(DAMAGE_LOG_SECTIONS.LOG)
_logAlt = DamageLog(DAMAGE_LOG_SECTIONS.LOG_ALT)
_logBackground = DamageLog(DAMAGE_LOG_SECTIONS.LOG_BACKGROUND)
_logAltBackground = DamageLog(DAMAGE_LOG_SECTIONS.LOG_ALT_BACKGROUND)
_lastHit = LastHit(DAMAGE_LOG_SECTIONS.LAST_HIT)
@registerEvent(PlayerAvatar, 'onBecomePlayer')
def _PlayerAvatar_onBecomePlayer(self):
global isShowDamageLog
isShowDamageLog = _config.get(DAMAGE_LOG_ENABLED) and battle.isBattleTypeSupported
@overrideMethod(DamageLogPanel, '_addToTopLog')
def DamageLogPanel_addToTopLog(base, self, value, actionTypeImg, vehicleTypeImg, vehicleName, shellTypeStr, shellTypeBG):
if not (_config.get(DAMAGE_LOG_DISABLED_DETAIL_STATS) and isShowDamageLog):
return base(self, value, actionTypeImg, vehicleTypeImg, vehicleName, shellTypeStr, shellTypeBG)
@overrideMethod(DamageLogPanel, '_addToBottomLog')
def DamageLogPanel_addToBottomLog(base, self, value, actionTypeImg, vehicleTypeImg, vehicleName, shellTypeStr, shellTypeBG):
if not (_config.get(DAMAGE_LOG_DISABLED_DETAIL_STATS) and isShowDamageLog):
return base(self, value, actionTypeImg, vehicleTypeImg, vehicleName, shellTypeStr, shellTypeBG)
@overrideMethod(DamageLogPanel, 'as_summaryStatsS')
def DamageLogPanel_as_summaryStatsS(base, self, damage, blocked, assist, stun):
if not (_config.get(DAMAGE_LOG_DISABLED_SUMMARY_STATS) and isShowDamageLog):
return base(self, damage, blocked, assist, stun)
@overrideMethod(DamageLogPanel, 'as_updateSummaryDamageValueS')
def as_updateSummaryDamageValueS(base, self, value):
if not (_config.get(DAMAGE_LOG_DISABLED_SUMMARY_STATS) and isShowDamageLog):
return base(self, value)
@overrideMethod(DamageLogPanel, 'as_updateSummaryBlockedValueS')
def as_updateSummaryBlockedValueS(base, self, value):
if not (_config.get(DAMAGE_LOG_DISABLED_SUMMARY_STATS) and isShowDamageLog):
return base(self, value)
@overrideMethod(DamageLogPanel, 'as_updateSummaryAssistValueS')
def as_updateSummaryAssistValueS(base, self, value):
if not (_config.get(DAMAGE_LOG_DISABLED_SUMMARY_STATS) and isShowDamageLog):
return base(self, value)
@overrideMethod(DamageLogPanel, 'as_updateSummaryStunValueS')
def as_updateSummaryStunValueS(base, self, value):
if not (_config.get(DAMAGE_LOG_DISABLED_SUMMARY_STATS) and isShowDamageLog):
return base(self, value)
@registerEvent(Vehicle, 'onHealthChanged')
def Vehicle_onHealthChanged(self, newHealth, attackerID, attackReasonID):
global on_fire, isImpact
if not isImpact and self.isPlayerVehicle:
isImpact = True
as_event(EVENTS_NAMES.ON_IMPACT)
if isShowDamageLog:
if self.isPlayerVehicle and data.data['isAlive']:
data.onHealthChanged(self, newHealth, attackerID, attackReasonID)
if newHealth <= 0:
on_fire = 0
as_event(EVENTS_NAMES.ON_FIRE)
else:
v = vId = getattr(BigWorld.player().inputHandler.ctrl, 'curVehicleID', None)
if isinstance(vId, int):
v = BigWorld.entity(vId)
if isinstance(v, Vehicle) and (self.id == v.id) and not v.isAlive():
on_fire = 0
as_event(EVENTS_NAMES.ON_FIRE)
elif not isinstance(v, Vehicle) and v is not None:
log('[DamageLog] Type(BigWorld.player().inputHandler.ctrl.curVehicleID) = %s' % v)
@registerEvent(PlayerAvatar, 'showVehicleDamageInfo')
def PlayerAvatar_showVehicleDamageInfo(self, vehicleID, damageIndex, extraIndex, entityID, equipmentID):
global isImpact
if self.playerVehicleID == vehicleID:
if not isImpact:
damageCode = DAMAGE_INFO_CODES[damageIndex]
isImpact = damageCode not in ['DEVICE_REPAIRED_TO_CRITICAL', 'DEVICE_REPAIRED', 'TANKMAN_RESTORED', 'FIRE_STOPPED']
if isImpact:
as_event(EVENTS_NAMES.ON_IMPACT)
if isShowDamageLog:
data.showVehicleDamageInfo(self, vehicleID, damageIndex, extraIndex, entityID, equipmentID)
@registerEvent(PlayerAvatar, 'updateVehicleHealth')
def updateVehicleHealth(self, vehicleID, health, deathReasonID, isCrewActive, isRespawn):
if (vehicleID == self.playerVehicleID) and isShowDamageLog:
data.data['isDamage'] = (max(0, health) != data.data['oldHealth'])
@registerEvent(Vehicle, 'onEnterWorld')
def Vehicle_onEnterWorld(self, prereqs):
if self.isPlayerVehicle:
global isShowDamageLog
isShowDamageLog = _config.get(DAMAGE_LOG_ENABLED) and battle.isBattleTypeSupported
if isShowDamageLog:
global on_fire, damageLogConfig, chooseRating
scale = config.networkServicesSettings.scale
name = config.networkServicesSettings.rating
r = '{}_{}'.format(scale, name)
if r in RATINGS:
chooseRating = RATINGS[r]['name']
else:
chooseRating = 'xwgr' if scale == 'xvm' else 'wgr'
if not (config.config_autoreload or damageLogConfig):
damageLogConfig = {section: readyConfig(section) for section in DAMAGE_LOG_SECTIONS.SECTIONS}
on_fire = 0
data.data['oldHealth'] = self.health
data.data['maxHealth'] = self.health
data.data['isAlive'] = self.isAlive()
@registerEvent(Vehicle, 'showDamageFromShot')
def Vehicle_showDamageFromShot(self, attackerID, points, effectsIndex, damageFactor):
global isImpact
if not isImpact and self.isPlayerVehicle:
isImpact = True
as_event(EVENTS_NAMES.ON_IMPACT)
if self.isPlayerVehicle and data.data['isAlive'] and isShowDamageLog:
data.showDamageFromShot(self, attackerID, points, effectsIndex, damageFactor)
@registerEvent(Vehicle, 'showDamageFromExplosion')
def Vehicle_showDamageFromExplosion(self, attackerID, center, effectsIndex, damageFactor):
global isImpact
if not isImpact and self.isPlayerVehicle:
isImpact = True
as_event(EVENTS_NAMES.ON_IMPACT)
if self.isPlayerVehicle and data.data['isAlive'] and isShowDamageLog:
data.showDamageFromExplosion(self, attackerID, center, effectsIndex, damageFactor)
@registerEvent(Vehicle, 'updateStunInfo')
def Vehicle_updateStunInfo(self):
if self.isPlayerVehicle and isShowDamageLog:
stunDuration = self.stunInfo - BigWorld.serverTime() if self.stunInfo else None
if stunDuration is not None:
data.updateStunInfo(self, stunDuration)
@registerEvent(DamagePanelMeta, 'as_setFireInVehicleS')
def DamagePanelMeta_as_setFireInVehicleS(self, isInFire):
global on_fire
if isShowDamageLog:
on_fire = 100 if isInFire else 0
as_event(EVENTS_NAMES.ON_FIRE)
@registerEvent(PlayerAvatar, '_PlayerAvatar__destroyGUI')
def PlayerAvatar__destroyGUI(self):
global on_fire, isImpact
isImpact = False
on_fire = 0
data.reset()
_log.reset(_log.section)
_logAlt.reset(_logAlt.section)
_logBackground.reset(_logBackground.section)
_logAltBackground.reset(_logAltBackground.section)
_lastHit.reset()
@registerEvent(PlayerAvatar, 'handleKey')
def PlayerAvatar_handleKey(self, isDown, key, mods):
global isDownAlt
if isShowDamageLog:
hotkey = _config.get('hotkeys/damageLogAltMode')
if hotkey[ENABLED] and (key == hotkey['keyCode']):
if isDown:
if hotkey['onHold']:
if not isDownAlt:
isDownAlt = True
as_event(EVENTS_NAMES.ON_HIT)
else:
isDownAlt = not isDownAlt
as_event(EVENTS_NAMES.ON_HIT)
else:
if hotkey['onHold']:
if isDownAlt:
isDownAlt = False
as_event(EVENTS_NAMES.ON_HIT)
def dLog():
return '\n'.join(_logAlt.listLog) if isDownAlt else '\n'.join(_log.listLog)
def dLog_bg():
return '\n'.join(_logAltBackground.listLog) if isDownAlt else '\n'.join(_logBackground.listLog)
def dLog_shadow(setting):
return _logAlt.shadow.get(setting, None) if isDownAlt else _log.shadow.get(setting, None)
def dLog_x():
return _log.x
def dLog_y():
return _log.y
def lastHit():
return _lastHit.strLastHit
def lastHit_shadow(setting):
return _lastHit.shadow.get(setting, None)
def lastHit_x():
return _lastHit.x
def lastHit_y():
return _lastHit.y
def fire():
return on_fire
| {
"repo_name": "KnechtRootrechtCH/Mimir",
"path": "content/Mods/XVM/XVM_Base/res_mods_content/configs/xvm/py_macro/xvm/damageLog.py",
"copies": "1",
"size": "51007",
"license": "mit",
"hash": -7552477134396769000,
"line_mean": 43.3153779322,
"line_max": 183,
"alpha_frac": 0.5775089694,
"autogenerated": false,
"ratio": 3.5150575425539246,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9586684306592429,
"avg_score": 0.0011764410722990709,
"num_lines": 1151
} |
"""add opportunity types
Revision ID: 2303f398cc0e
Revises: 409abb6abb99
Create Date: 2015-09-04 16:40:59.787504
"""
# revision identifiers, used by Alembic.
revision = '2303f398cc0e'
down_revision = '409abb6abb99'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('contract_type', sa.Column('allow_opportunities', sa.Boolean(), nullable=True))
op.add_column('contract_type', sa.Column('opportunity_response_instructions', sa.Text(), nullable=True))
op.create_foreign_key('created_by_id_fkey', 'job_status', 'users', ['created_by_id'], ['id'])
op.create_foreign_key('updated_by_id_fkey', 'job_status', 'users', ['updated_by_id'], ['id'])
op.add_column('opportunity', sa.Column('opportunity_type_id', sa.Integer(), nullable=True))
op.create_foreign_key(
'opportunity_type_id_contract_type_id_fkey', 'opportunity', 'contract_type',
['opportunity_type_id'], ['id']
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('opportunity_type_id_contract_type_id_fkey', 'opportunity', type_='foreignkey')
op.drop_column('opportunity', 'opportunity_type_id')
op.drop_constraint('updated_by_id_fkey', 'job_status', type_='foreignkey')
op.drop_constraint('created_by_id_fkey', 'job_status', type_='foreignkey')
op.drop_column('contract_type', 'allow_opportunities')
op.drop_column('contract_type', 'opportunity_response_instructions')
### end Alembic commands ###
| {
"repo_name": "CityofPittsburgh/pittsburgh-purchasing-suite",
"path": "migrations/versions/2303f398cc0e_add_opportunity_types.py",
"copies": "3",
"size": "1603",
"license": "bsd-3-clause",
"hash": 3956785438096025000,
"line_mean": 40.1025641026,
"line_max": 108,
"alpha_frac": 0.6868371803,
"autogenerated": false,
"ratio": 3.339583333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5526420513633332,
"avg_score": null,
"num_lines": null
} |
"""Add OrderBarbicanMetadata table
Revision ID: 3d36a26b88af
Revises: 443d6f4a69ac
Create Date: 2015-02-20 12:27:08.155647
"""
# revision identifiers, used by Alembic.
revision = '3d36a26b88af'
down_revision = '443d6f4a69ac'
from alembic import op
import sqlalchemy as sa
def upgrade():
ctx = op.get_context()
con = op.get_bind()
table_exists = ctx.dialect.has_table(con.engine, 'order_barbican_metadata')
if not table_exists:
op.create_table(
'order_barbican_metadata',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.Boolean(), nullable=False),
sa.Column('status', sa.String(length=20), nullable=False),
sa.Column('order_id', sa.String(length=36), nullable=False),
sa.Column('key', sa.String(length=255), nullable=False),
sa.Column('value', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['order_id'], ['orders.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('order_barbican_metadata')
| {
"repo_name": "MCDong/barbican",
"path": "barbican/model/migration/alembic_migrations/versions/3d36a26b88af_add_order_barbican_metadata_table.py",
"copies": "2",
"size": "1315",
"license": "apache-2.0",
"hash": -7682187338654804000,
"line_mean": 32.7179487179,
"line_max": 79,
"alpha_frac": 0.6326996198,
"autogenerated": false,
"ratio": 3.3891752577319587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5021874877531959,
"avg_score": null,
"num_lines": null
} |
"""Add order fields for DV
Revision ID: 80d36c1e37e2
Revises: 77422a4d8e14
Create Date: 2019-03-05 14:16:11.937122
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import OrderStatus
# revision identifiers, used by Alembic.
revision = "80d36c1e37e2"
down_revision = "77422a4d8e14"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"participant_summary", sa.Column("sample_order_status_dv_1sal2", model.utils.Enum(OrderStatus), nullable=True)
)
op.add_column(
"participant_summary", sa.Column("sample_order_status_dv_1sal2_time", model.utils.UTCDateTime(), nullable=True)
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "sample_order_status_dv_1sal2_time")
op.drop_column("participant_summary", "sample_order_status_dv_1sal2")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/80d36c1e37e2_add_order_fields_for_dv.py",
"copies": "1",
"size": "1488",
"license": "bsd-3-clause",
"hash": 8917728906388231000,
"line_mean": 25.5714285714,
"line_max": 119,
"alpha_frac": 0.6794354839,
"autogenerated": false,
"ratio": 3.381818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9559759317979086,
"avg_score": 0.0002988695478191276,
"num_lines": 56
} |
"""Add order finalized time
Revision ID: 806f7c2031a5
Revises: 5ec19e6b8726
Create Date: 2019-10-23 13:19:45.264759
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '806f7c2031a5'
down_revision = '5ec19e6b8726'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('biobank_history', sa.Column('finalized_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
op.add_column('biobank_order', sa.Column('finalized_time', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('biobank_order', 'finalized_time')
op.drop_column('biobank_history', 'finalized_time')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/806f7c2031a5_add_order_finalized_time.py",
"copies": "1",
"size": "2116",
"license": "bsd-3-clause",
"hash": -999502112653915300,
"line_mean": 33.1290322581,
"line_max": 125,
"alpha_frac": 0.7490548204,
"autogenerated": false,
"ratio": 3.5622895622895623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48113443826895624,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.