text stringlengths 0 1.05M | meta dict |
|---|---|
"""Add timeouts."""
import json, zmq, mayaserver
from client_4 import create_client, start_process, SETCMD
import time #(1)
def sendrecv(socket, data, timeoutSecs=10.0): #(2)
socket.send(json.dumps(data))
starttime = time.time() #(3)
while True: #(4)
try:
recved = socket.recv(zmq.NOBLOCK) #(5)
break #(6)
except zmq.Again: #(7)
if time.time() - starttime > timeoutSecs: #(8)
raise
time.sleep(0.1) #(9)
code, response = json.loads(recved) #(10)
# ...same code as before...
if code == mayaserver.SUCCESS:
return response
if code == mayaserver.UNHANDLED_ERROR:
raise RuntimeError(response)
if code == mayaserver.INVALID_METHOD:
raise RuntimeError('Sent invalid method: %s' % response)
raise RuntimeError('Unhandled response: %s, %s' % (
code, response))
if __name__ == '__main__':
SETCMD('_exceptions')
start_process()
sock = create_client()
sendrecv(sock, ('exec', 'import time')) #(1)
try:
sendrecv(sock, ('exec', 'time.sleep(5)'), .1) #(2)
except zmq.Again:
print 'Timed out successfully!'
sock = create_client() #(3)
sendrecv(sock, ('eval', '1 + 1')) #(4)
print 'And recovered successfully!'
| {
"repo_name": "rgalanakis/practicalmayapython",
"path": "src/chapter6/mayaserver/client_6.py",
"copies": "1",
"size": "1304",
"license": "mit",
"hash": -3893924714870706700,
"line_mean": 29.3255813953,
"line_max": 64,
"alpha_frac": 0.5759202454,
"autogenerated": false,
"ratio": 3.5148247978436657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4590745043243666,
"avg_score": null,
"num_lines": null
} |
"""add timezone awareness for datetime objects
Revision ID: d9530a529b3f
Revises: 221d918aa9f0
Create Date: 2016-06-21 09:39:38.348519
"""
# revision identifiers, used by Alembic.
revision = 'd9530a529b3f'
down_revision = '221d918aa9f0'
from alembic import op
import sqlalchemy as sa
import flaskbb
def upgrade():
connection = op.get_bind()
# Having a hard time with ALTER TABLE/COLUMN stuff in SQLite.. and because DateTime objects are stored as strings anyway,
# we can simply skip those migrations for SQLite
if connection.engine.dialect.name != "sqlite":
# user/models.py
op.alter_column('users', 'date_joined', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'lastseen', existing_type=sa.DateTime(), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'birthday', existing_type=sa.DateTime(), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'last_failed_login', existing_type=sa.DateTime(), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
# message/models.py
op.alter_column('conversations', 'date_created', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('messages', 'date_created', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
# forum/models.py
op.alter_column('topicsread', 'last_read', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forumsread', 'last_read', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forumsread', 'cleared', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('reports', 'reported', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('reports', 'zapped', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('posts', 'date_created', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('posts', 'date_modified', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('topics', 'date_created', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('topics', 'last_updated', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forums', 'last_post_created', existing_type=sa.DateTime(timezone=False), type_=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
def downgrade():
connection = op.get_bind()
if connection.engine.dialect.name != "sqlite":
# user/models.py
op.alter_column('users', 'date_joined', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'lastseen', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'birthday', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('users', 'last_failed_login', type_=sa.DateTime(), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
# message/models.py
op.alter_column('conversations', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('messages', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
# forum/models.py
op.alter_column('topicsread', 'last_read', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forumsread', 'last_read', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forumsread', 'cleared', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('reports', 'reported', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('reports', 'zapped', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('posts', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('posts', 'date_modified', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('topics', 'date_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('topics', 'last_updated', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
op.alter_column('forums', 'last_post_created', type_=sa.DateTime(timezone=False), existing_type=flaskbb.utils.database.UTCDateTime(timezone=True), existing_nullable=True)
| {
"repo_name": "realityone/flaskbb",
"path": "migrations/versions/d9530a529b3f_add_timezone_awareness_for_datetime.py",
"copies": "2",
"size": "6312",
"license": "bsd-3-clause",
"hash": -8684962285936870000,
"line_mean": 87.9014084507,
"line_max": 180,
"alpha_frac": 0.7474651458,
"autogenerated": false,
"ratio": 3.580260918888259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004893954962175796,
"num_lines": 71
} |
"""Add timezone information to datetime.
Revision ID: 133e975c3ad7
Revises: 2afd7cb1c8b7
Create Date: 2013-01-29 16:23:10.656380
"""
# revision identifiers, used by Alembic.
revision = '133e975c3ad7'
down_revision = '2afd7cb1c8b7'
from alembic import op
import sqlalchemy as sa
TABLES = ('buildfile', 'class', 'executionfile', 'file', 'fileverifier',
'project', 'submission', 'testable', 'testcase', 'user')
def upgrade():
for table in TABLES:
op.alter_column(table, u'created_at', type_=sa.DateTime(timezone=True))
op.alter_column('submission', u'made_at', type_=sa.DateTime(timezone=True))
op.alter_column('submission', u'verified_at',
type_=sa.DateTime(timezone=True))
def downgrade():
for table in TABLES:
op.alter_column(table, u'created_at',
type_=sa.DateTime(timezone=False))
op.alter_column('submission', u'made_at',
type_=sa.DateTime(timezone=False))
op.alter_column('submission', u'verified_at',
type_=sa.DateTime(timezone=False))
| {
"repo_name": "ucsb-cs/submit",
"path": "submit/migrations/versions/133e975c3ad7_add_timezone_informa.py",
"copies": "1",
"size": "1083",
"license": "bsd-2-clause",
"hash": -6948421012625551000,
"line_mean": 30.8529411765,
"line_max": 79,
"alpha_frac": 0.6472760849,
"autogenerated": false,
"ratio": 3.301829268292683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4449105353192683,
"avg_score": null,
"num_lines": null
} |
"""add time zone to site
Revision ID: fddd3f850e2c
Revises: 060dba019a3a
Create Date: 2018-01-05 13:32:29.608220
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "fddd3f850e2c"
down_revision = "060dba019a3a"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("site", sa.Column("time_zone_id", sa.String(length=1024), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("site", "time_zone_id")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/fddd3f850e2c_add_time_zone_to_site.py",
"copies": "1",
"size": "1105",
"license": "bsd-3-clause",
"hash": -6720035336463531000,
"line_mean": 22.5106382979,
"line_max": 91,
"alpha_frac": 0.657918552,
"autogenerated": false,
"ratio": 3.4104938271604937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45684123791604936,
"avg_score": null,
"num_lines": null
} |
"""! \addtogroup mbtbLocalOption
This documentation is build from the python example Tests/SliderCrank, it contains an example of local options.
This file is optional, if it doesn't exit, the default voptions will be use.
@{
"""
import array
import numpy
#bodies view Param
## To define if a body is drew.
bodyDraw=array.array('I',[1,1,1,0])
## Ti define the transparency level of the body.
bodyTrans=array.array('d',[
2.5,
0.7,
0.5])
# contacts view parameters
ContactArtefactLength=0.1
ArtefactThershold=1e-12
## To define if the fisrt surface of the contact is drew.
contactDraw1=array.array('I',[
1,
1,
1])
## To define if the second surface of the contact is drew.
contactDraw2=array.array('I',[
0,
0,
1])
## To define the transparency level of the first contact surface.
contactTrans1=array.array('d',[
2.,
2.,
2.])
## To define the transparency level of the second contact surface.
contactTrans2=array.array('d',[
0.7,
0.7,
2.])
#3D parameters
## It must be set to 1 to run in a 3D view.
with3D=1
## 3D viewer update frequency and output frequency.
freqOutput=10
freqUpdate=10
apple=0
#Simulation parameters
## Simulation parameters time step size.
stepSize=1e-4
## Simulation parameters number of steps. Useful if with3D=0.
stepNumber=2000
TSTheta=0.5
TSGamma=0.5
TSNewtonTolerance=1e-10
TSNewtonMaxIteration=15
TSdeactivateYPosThreshold=1e-5
TSdeactivateYVelThreshold=0.0
TSactivateYPosThreshold=1.e-7
TSactivateYVelThreshold=100
TSProjectionMaxIteration=100
TSConstraintTol=1e-8
TSConstraintTolUnilateral=1e-7
TSLevelOfProjection=0
#solver parameters
## To activate the projection algorithm.
withProj=2
## Solver option
withReduced=2
## Solver option
solverTol=1e-10
## Solver option
solverIt=1000
gotoPos=0
## The number of artefacts
NBARTEFACTS=1
## CAD file of the artefacts
Artefactfile=[
'./CAD/artefact2.step']
## transparency of the artefacts
ArtefactTrans=array.array('d',[
0.8])
##! @}
| {
"repo_name": "fperignon/siconos",
"path": "mechanisms/swig/tests/slider_crank/mbtbLocalOptions.py",
"copies": "1",
"size": "2062",
"license": "apache-2.0",
"hash": -4779555214912163000,
"line_mean": 19.2156862745,
"line_max": 111,
"alpha_frac": 0.7090203686,
"autogenerated": false,
"ratio": 3.086826347305389,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.9144762327260043,
"avg_score": 0.030216877729069346,
"num_lines": 102
} |
# add to heapq things like removing any item and changing key value
# implementation of priority queue to support contains, change_task_priority
# and remove_task in log time
from heapq import *
class PriorityQueue(object):
def __init__(self, is_min_heap):
self.pq = []
self.entry_finder = {}
if(is_min_heap is True):
self.mul = 1
else :
self.mul = -1
def contains_task(self, task):
if task in self.entry_finder:
return True
else:
return False
def get_task_priority(self, task):
if task in self.entry_finder:
return (self.entry_finder[task])[0]
raise ValueError("task does not exist")
def add_task(self, priority, task):
if task in self.entry_finder:
raise KeyError("Key already exists")
entry = [self.mul*priority, False, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def change_task_priority(self, priority, task):
if task not in self.entry_finder:
raise KeyError("Task not found")
self.remove_task(task)
entry = [self.mul*priority, False, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def remove_task(self, task):
entry = self.entry_finder.pop(task)
entry[1] = True
def pop_task(self):
while self.pq:
priority, removed, task = heappop(self.pq)
if removed is False:
del self.entry_finder[task]
return task
raise KeyError("pop from an empty priority queue")
def peek_task(self):
while self.pq:
priority, removed, task = tuple(heappop(self.pq))
if removed is False:
heappush(self.pq, [priority, False, task])
return task
raise KeyError("pop from an empty priority queue")
def is_empty(self):
try:
self.peek_task()
return False
except KeyError:
return True
def __str__(self):
return str(self.entry_finder) + " " + str(self.pq)
if __name__ == '__main__':
task1 = "Tushar"
task2 = "Roy"
task3 = "is"
task4 = "coder"
min_pq = PriorityQueue(True)
min_pq.add_task(1, task1)
min_pq.add_task(3, task2)
min_pq.add_task(6, task3)
min_pq.add_task(7, task4)
print(min_pq.contains_task(task3))
print(min_pq.get_task_priority(task3))
print(min_pq)
while min_pq.is_empty() is False:
print(min_pq.pop_task())
max_pq = PriorityQueue(False)
max_pq.add_task(1, task1)
max_pq.add_task(3, task2)
max_pq.add_task(6, task3)
max_pq.add_task(7, task4)
while max_pq.is_empty() is False:
print(max_pq.pop_task())
| {
"repo_name": "rtkasodariya/interview",
"path": "python/graph/priorityqueue.py",
"copies": "2",
"size": "2850",
"license": "apache-2.0",
"hash": -4742196052146358000,
"line_mean": 27.7878787879,
"line_max": 76,
"alpha_frac": 0.5670175439,
"autogenerated": false,
"ratio": 3.566958698372966,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5133976242272966,
"avg_score": null,
"num_lines": null
} |
"""add token time
Revision ID: 2625f2bf32bf
Revises: c19852e4dcda
Create Date: 2020-08-09 20:32:02.324936
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2625f2bf32bf'
down_revision = 'c19852e4dcda'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.add_column(sa.Column('otp_secret', sa.String(length=16), nullable=True))
batch_op.add_column(sa.Column('token', sa.String(length=32), nullable=True))
batch_op.add_column(sa.Column('token_time', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_column('token_time')
batch_op.drop_column('token')
batch_op.drop_column('otp_secret')
# ### end Alembic commands ###
| {
"repo_name": "hackerspace-silesia/cebulany-manager",
"path": "migrations/versions/2625f2bf32bf_add_token_time.py",
"copies": "1",
"size": "1048",
"license": "mit",
"hash": 6511214562482271000,
"line_mean": 28.1111111111,
"line_max": 89,
"alpha_frac": 0.6717557252,
"autogenerated": false,
"ratio": 3.214723926380368,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9381629094668625,
"avg_score": 0.0009701113823485664,
"num_lines": 36
} |
"""Add tournament status
Revision ID: ee46a4ecb7e3
Revises: 1ce0eb6f5f54
Create Date: 2019-01-03 18:59:43.630787
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'ee46a4ecb7e3'
down_revision = '1ce0eb6f5f54'
branch_labels = None
depends_on = None
def upgrade():
if op.get_bind().engine.name == 'postgresql':
tournamentstatus = postgresql.ENUM('created', 'spawned', 'spawning', name='tournamentstatus')
tournamentstatus.create(op.get_bind())
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tournaments', sa.Column('status', sa.Enum('created', 'spawned', 'spawning', name='tournamentstatus'), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tournaments', 'status')
# ### end Alembic commands ###
if op.get_bind().engine.name == 'postgresql':
op.execute("DROP TYPE banner_status;")
| {
"repo_name": "mitpokerbots/scrimmage",
"path": "migrations/versions/ee46a4ecb7e3_.py",
"copies": "1",
"size": "1065",
"license": "mit",
"hash": -7056337834203869000,
"line_mean": 30.3235294118,
"line_max": 136,
"alpha_frac": 0.6882629108,
"autogenerated": false,
"ratio": 3.3702531645569622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4558516075356962,
"avg_score": null,
"num_lines": null
} |
"""Add track_groups table
Revision ID: eefba82b42c5
Revises: 620b312814f3
Create Date: 2019-07-31 15:15:48.350924
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'eefba82b42c5'
down_revision = '620b312814f3'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'track_groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('position', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('event_id', sa.Integer(), nullable=False, index=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
schema='events'
)
op.add_column('tracks', sa.Column('track_group_id', sa.Integer(), nullable=True, index=True), schema='events')
op.create_foreign_key(None, 'tracks', 'track_groups', ['track_group_id'], ['id'], source_schema='events',
referent_schema='events', ondelete='SET NULL')
def downgrade():
op.drop_column('tracks', 'track_group_id', schema='events')
op.drop_table('track_groups', schema='events')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20190821_1515_eefba82b42c5_add_track_groups_table.py",
"copies": "7",
"size": "1245",
"license": "mit",
"hash": 8234200143640068000,
"line_mean": 31.7631578947,
"line_max": 114,
"alpha_frac": 0.6489959839,
"autogenerated": false,
"ratio": 3.364864864864865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004680674017058457,
"num_lines": 38
} |
"""Add track principal
Revision ID: 2496c4adc7e9
Revises: 4e459d27adab
Create Date: 2019-10-02 18:20:33.866458
"""
import bisect
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.principals import PrincipalType
# revision identifiers, used by Alembic.
revision = '2496c4adc7e9'
down_revision = '4e459d27adab'
branch_labels = None
depends_on = None
def _update_event_acl_entry(conn, user_id, event_id, permissions):
conn.execute('''
UPDATE events.principals
SET permissions = %s
WHERE user_id = %s AND event_id = %s
''', [(permissions, user_id, event_id)])
def _create_track_acl_entry(conn, user_id, track_id, permissions):
conn.execute('''
INSERT INTO events.track_principals
(track_id, user_id, type, read_access, full_access, permissions) VALUES
(%s, %s, %s, false, false, %s)
''', (track_id, user_id, PrincipalType.user.value, permissions))
def _upgrade_permissions():
conn = op.get_bind()
# Create track acl entries
track_reviewers_conveners_stmt = '''
SELECT user_id, track_id
FROM events.track_abstract_reviewers r
WHERE EXISTS (
SELECT 1
FROM events.track_conveners c
WHERE r.user_id = c.user_id AND r.track_id = c.track_id
)
'''
for user_id, track_id in conn.execute(track_reviewers_conveners_stmt):
_create_track_acl_entry(conn, user_id, track_id, ['convene', 'review'])
track_reviewers_stmt = '''
SELECT user_id, track_id
FROM events.track_abstract_reviewers r
WHERE (
track_id IS NOT NULL AND NOT EXISTS (
SELECT 1 FROM events.track_conveners c
WHERE r.user_id = c.user_id AND r.track_id = c.track_id
)
)
'''
for user_id, track_id in conn.execute(track_reviewers_stmt):
_create_track_acl_entry(conn, user_id, track_id, ['review'])
track_conveners_stmt = '''
SELECT user_id, track_id
FROM events.track_conveners c
WHERE (
track_id IS NOT NULL AND NOT EXISTS (
SELECT 1 FROM events.track_abstract_reviewers r
WHERE r.user_id = c.user_id AND r.track_id = c.track_id
)
)
'''
for user_id, track_id in conn.execute(track_conveners_stmt):
_create_track_acl_entry(conn, user_id, track_id, ['convene'])
# Update event acl entries
event_reviewers_stmt = '''
SELECT r.user_id, r.event_id, p.permissions
FROM events.track_abstract_reviewers r
JOIN events.principals p ON r.user_id = p.user_id AND r.event_id = p.event_id
WHERE r.track_id IS NULL
'''
for user_id, event_id, permissions in conn.execute(event_reviewers_stmt):
bisect.insort(permissions, 'review_all_abstracts')
_update_event_acl_entry(conn, user_id, event_id, permissions)
event_conveners_stmt = '''
SELECT c.user_id, c.event_id, p.permissions
FROM events.track_conveners c
JOIN events.principals p ON c.user_id = p.user_id AND c.event_id = p.event_id
WHERE c.track_id IS NULL
'''
for user_id, event_id, permissions in conn.execute(event_conveners_stmt):
bisect.insort(permissions, 'convene_all_abstracts')
_update_event_acl_entry(conn, user_id, event_id, permissions)
def upgrade():
op.create_table(
'track_principals',
sa.Column('read_access', sa.Boolean(), nullable=False),
sa.Column('full_access', sa.Boolean(), nullable=False),
sa.Column('permissions', postgresql.ARRAY(sa.String()), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('track_id', sa.Integer(), nullable=False, index=True),
sa.Column('local_group_id', sa.Integer(), nullable=True, index=True),
sa.Column('mp_group_provider', sa.String(), nullable=True),
sa.Column('event_role_id', sa.Integer(), nullable=True, index=True),
sa.Column('mp_group_name', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True, index=True),
sa.Column(
'type',
PyIntEnum(PrincipalType, exclude_values={PrincipalType.network}),
nullable=False,
),
sa.Column('email', sa.String(), nullable=True, index=True),
sa.CheckConstraint('NOT full_access', name='no_full_access'),
sa.CheckConstraint('NOT read_access', name='no_read_access'),
sa.CheckConstraint('email IS NULL OR email = lower(email)', name='lowercase_email'),
sa.CheckConstraint('read_access OR full_access OR array_length(permissions, 1) IS NOT NULL', name='has_privs'),
sa.CheckConstraint(
'type != 1 OR (email IS NULL AND event_role_id IS NULL AND local_group_id IS NULL AND '
'mp_group_name IS NULL AND mp_group_provider IS NULL AND user_id IS NOT NULL)', name='valid_user'),
sa.CheckConstraint(
'type != 2 OR (email IS NULL AND event_role_id IS NULL AND mp_group_name IS NULL AND '
'mp_group_provider IS NULL AND user_id IS NULL AND local_group_id IS NOT NULL)', name='valid_local_group'),
sa.CheckConstraint(
'type != 3 OR (email IS NULL AND event_role_id IS NULL AND local_group_id IS NULL AND '
'user_id IS NULL AND mp_group_name IS NOT NULL AND mp_group_provider IS NOT NULL)',
name='valid_multipass_group'
),
sa.CheckConstraint(
'type != 4 OR (event_role_id IS NULL AND local_group_id IS NULL AND mp_group_name IS NULL AND '
'mp_group_provider IS NULL AND user_id IS NULL AND email IS NOT NULL)', name='valid_email'),
sa.CheckConstraint(
'type != 6 OR (email IS NULL AND local_group_id IS NULL AND mp_group_name IS NULL AND '
'mp_group_provider IS NULL AND user_id IS NULL AND event_role_id IS NOT NULL)', name='valid_event_role'),
sa.ForeignKeyConstraint(['event_role_id'], ['events.roles.id']),
sa.ForeignKeyConstraint(['local_group_id'], ['users.groups.id']),
sa.ForeignKeyConstraint(['track_id'], ['events.tracks.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
schema='events',
)
op.create_index(
None,
'track_principals',
['mp_group_provider', 'mp_group_name'],
unique=False,
schema='events',
)
op.create_index(
'ix_uq_track_principals_email',
'track_principals',
['email', 'track_id'],
unique=True,
schema='events',
postgresql_where=sa.text('type = 4'),
)
op.create_index(
'ix_uq_track_principals_local_group',
'track_principals',
['local_group_id', 'track_id'],
unique=True,
schema='events',
postgresql_where=sa.text('type = 2'),
)
op.create_index(
'ix_uq_track_principals_mp_group',
'track_principals',
['mp_group_provider', 'mp_group_name', 'track_id'],
unique=True,
schema='events',
postgresql_where=sa.text('type = 3'),
)
op.create_index(
'ix_uq_track_principals_user',
'track_principals',
['user_id', 'track_id'],
unique=True,
schema='events',
postgresql_where=sa.text('type = 1'),
)
_upgrade_permissions()
op.drop_table('track_conveners', schema='events')
op.drop_table('track_abstract_reviewers', schema='events')
def _create_abstract_reviewers_entry(conn, user_id, track_id=None, event_id=None):
conn.execute('''
INSERT INTO events.track_abstract_reviewers
(user_id, track_id, event_id) VALUES
(%s, %s, %s)
''', (user_id, track_id, event_id))
def _create_conveners_entry(conn, user_id, track_id=None, event_id=None):
conn.execute('''
INSERT INTO events.track_conveners
(user_id, track_id, event_id) VALUES
(%s, %s, %s)
''', (user_id, track_id, event_id))
def _downgrade_permissions():
conn = op.get_bind()
query_track_permissions = 'SELECT user_id, track_id, permissions FROM events.track_principals'
for user_id, track_id, permissions in conn.execute(query_track_permissions):
if 'review' in permissions:
_create_abstract_reviewers_entry(conn, user_id, track_id=track_id)
if 'convene' in permissions:
_create_conveners_entry(conn, user_id, track_id=track_id)
query_event_permissions = '''
SELECT user_id, event_id, permissions FROM events.principals
WHERE permissions && ARRAY['review_all_abstracts', 'convene_all_abstracts']::character varying[];
'''
for user_id, event_id, permissions in conn.execute(query_event_permissions):
if 'review_all_abstracts' in permissions:
_create_abstract_reviewers_entry(conn, user_id, event_id=event_id)
if 'convene_all_abstracts' in permissions:
_create_conveners_entry(conn, user_id, event_id=event_id)
updated_permissions = [
str(permission)
for permission in permissions
if permission not in ('review_all_abstracts', 'convene_all_abstracts')
]
_update_event_acl_entry(conn, user_id, event_id, updated_permissions)
def downgrade():
op.create_table(
'track_abstract_reviewers',
sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, index=True),
sa.Column('user_id', sa.Integer(), nullable=False, autoincrement=False, index=True),
sa.Column('event_id', sa.Integer(), nullable=True, autoincrement=False, index=True),
sa.Column('track_id', sa.Integer(), nullable=True, autoincrement=False, index=True),
sa.ForeignKeyConstraint(['track_id'], ['events.tracks.id']),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
sa.CheckConstraint('(track_id IS NULL) != (event_id IS NULL)', name='track_xor_event_id_null'),
schema='events'
)
op.create_table(
'track_conveners',
sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, index=True),
sa.Column('user_id', sa.Integer(), nullable=False, autoincrement=False, index=True),
sa.Column('event_id', sa.Integer(), nullable=True, autoincrement=False, index=True),
sa.Column('track_id', sa.Integer(), nullable=True, autoincrement=False, index=True),
sa.ForeignKeyConstraint(['track_id'], ['events.tracks.id']),
sa.ForeignKeyConstraint(['event_id'], ['events.events.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.users.id']),
sa.PrimaryKeyConstraint('id'),
sa.CheckConstraint('(track_id IS NULL) != (event_id IS NULL)', name='track_xor_event_id_null'),
schema='events'
)
_downgrade_permissions()
op.drop_table('track_principals', schema='events')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20191111_1820_2496c4adc7e9_add_track_principal.py",
"copies": "5",
"size": "11173",
"license": "mit",
"hash": -1815636628946074000,
"line_mean": 40.2287822878,
"line_max": 119,
"alpha_frac": 0.6198872281,
"autogenerated": false,
"ratio": 3.523494165878272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6643381393978272,
"avg_score": null,
"num_lines": null
} |
"""add track storage
Revision ID: 2b0f6152e206
Revises: efdd5be8b4f8
Create Date: 2017-09-22 21:33:19.183845
"""
# revision identifiers, used by Alembic.
revision = '2b0f6152e206'
down_revision = 'efdd5be8b4f8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('track_storage',
sa.Column('uuid', postgresql.UUID(as_uuid=True), server_default=sa.text('gen_random_uuid()'), nullable=False),
sa.Column('created', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('deleted', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=1024), nullable=False),
sa.PrimaryKeyConstraint('uuid', name=op.f('pk_track_storage'))
)
op.add_column('track_location', sa.Column('store', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key(op.f('fk_track_location_track_storage_store'), 'track_location', 'track_storage', ['store'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_track_location_track_storage_store'), 'track_location', type_='foreignkey')
op.drop_column('track_location', 'store')
op.drop_table('track_storage')
| {
"repo_name": "EliRibble/minstrel",
"path": "alembic/versions/2b0f6152e206_add_track_storage.py",
"copies": "1",
"size": "1346",
"license": "mit",
"hash": -5475268663394175000,
"line_mean": 38.5882352941,
"line_max": 128,
"alpha_frac": 0.6939078752,
"autogenerated": false,
"ratio": 3.235576923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44294847982769225,
"avg_score": null,
"num_lines": null
} |
"""add track.updated
Revision ID: b1a71d1125c2
Revises: af7eba7cd108
Create Date: 2019-12-02 07:45:26.771874
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b1a71d1125c2'
down_revision = 'af7eba7cd108'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_app():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_ingest():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def upgrade_fingerprint():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'track', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
# ### end Alembic commands ###
def downgrade_fingerprint():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'track', 'updated')
# ### end Alembic commands ###
| {
"repo_name": "lalinsky/acoustid-server",
"path": "alembic/versions/b1a71d1125c2_add_track_updated.py",
"copies": "1",
"size": "1378",
"license": "mit",
"hash": 3250520064355549700,
"line_mean": 21.9666666667,
"line_max": 92,
"alpha_frac": 0.6509433962,
"autogenerated": false,
"ratio": 3.5792207792207793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4730164175420779,
"avg_score": null,
"num_lines": null
} |
"""Add transactions table
Revision ID: 392c0a07a3c
Revises: 4b7ccf8ac448
Create Date: 2015-12-14 20:13:06.833508
"""
# revision identifiers, used by Alembic.
revision = '392c0a07a3c'
down_revision = '4b7ccf8ac448'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('transactions',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('payment_id', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.VARCHAR(length=128), autoincrement=False, nullable=False),
sa.Column('currency', postgresql.ENUM(u'BTC', u'LTC', u'DOGE', name='currency_types', create_type=False), autoincrement=False, nullable=False),
sa.Column('amount', sa.NUMERIC(precision=16, scale=8), autoincrement=False, nullable=False),
sa.Column('fee', sa.NUMERIC(precision=16, scale=8), autoincrement=False, nullable=False),
sa.Column('confirmations', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('category', sa.VARCHAR(length=128), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text(u'now()'), autoincrement=False, nullable=True),
sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text(u'now()'), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['payment_id'], [u'payments.id'], name=u'transactions_payment_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'transactions_pkey'),
sa.UniqueConstraint('transaction_id', name=u'transactions_transaction_id_key')
)
def downgrade():
op.drop_table('transactions')
| {
"repo_name": "smilledge/transient",
"path": "transient/migrations/versions/392c0a07a3c_add_transactions_table.py",
"copies": "1",
"size": "1769",
"license": "mit",
"hash": -2926122879764818000,
"line_mean": 43.225,
"line_max": 151,
"alpha_frac": 0.710570944,
"autogenerated": false,
"ratio": 3.5593561368209254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47699270808209254,
"avg_score": null,
"num_lines": null
} |
"""Add translated_strings
Revision ID: 0ab02337f372
Revises:
Create Date: 2017-03-08 14:50:37.058424
"""
# revision identifiers, used by Alembic.
revision = '0ab02337f372'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";')
op.create_table('translated_string',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('base_string', sa.Text(), nullable=True),
sa.Column('translation', sa.Text(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('translator_comment', sa.Text(), nullable=True),
sa.Column('context', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('translated_string')
# ### end Alembic commands ###
| {
"repo_name": "socialwifi/dila",
"path": "dila/data/alembic/versions/0ab02337f372_add_translated_strings.py",
"copies": "1",
"size": "1138",
"license": "bsd-3-clause",
"hash": 945368375239269000,
"line_mean": 29.7567567568,
"line_max": 113,
"alpha_frac": 0.6854130053,
"autogenerated": false,
"ratio": 3.523219814241486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9666460944871185,
"avg_score": 0.008434374934060327,
"num_lines": 37
} |
"""Add TranslationExternalSuggestions
Revision ID: 2ef3688b5383
Revises: 20860ffde766
Create Date: 2015-04-19 12:43:34.752894
"""
# revision identifiers, used by Alembic.
revision = '2ef3688b5383'
down_revision = '20860ffde766'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationExternalSuggestions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('engine', sa.Unicode(length=20), nullable=True),
sa.Column('human_key', sa.Unicode(length=255), nullable=True),
sa.Column('language', sa.Unicode(length=255), nullable=True),
sa.Column('value', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('engine', 'human_key', 'language')
)
op.create_index(u'ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(u'ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(u'ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index(u'ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index(u'ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_table('TranslationExternalSuggestions')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/2ef3688b5383_add_translationexternalsuggestions.py",
"copies": "3",
"size": "1733",
"license": "bsd-2-clause",
"hash": -9220331112230542000,
"line_mean": 42.325,
"line_max": 130,
"alpha_frac": 0.7443739181,
"autogenerated": false,
"ratio": 3.602910602910603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0089091969832816,
"num_lines": 40
} |
"""Add TranslationSyncLogs
Revision ID: 21e927fdf78c
Revises: 44d704928d8c
Create Date: 2015-04-20 23:34:51.724151
"""
# revision identifiers, used by Alembic.
revision = '21e927fdf78c'
down_revision = '44d704928d8c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationSyncLogs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('start_datetime', sa.DateTime(), nullable=True),
sa.Column('end_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_TranslationSyncLogs_end_datetime', 'TranslationSyncLogs', ['end_datetime'], unique=False)
op.create_index(u'ix_TranslationSyncLogs_start_datetime', 'TranslationSyncLogs', ['start_datetime'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationSyncLogs_start_datetime', table_name='TranslationSyncLogs')
op.drop_index(u'ix_TranslationSyncLogs_end_datetime', table_name='TranslationSyncLogs')
op.drop_table('TranslationSyncLogs')
### end Alembic commands ###
| {
"repo_name": "morelab/appcomposer",
"path": "alembic/versions/21e927fdf78c_add_translationsynclogs.py",
"copies": "3",
"size": "1217",
"license": "bsd-2-clause",
"hash": -1001517064193068800,
"line_mean": 33.7714285714,
"line_max": 118,
"alpha_frac": 0.7198027938,
"autogenerated": false,
"ratio": 3.4771428571428573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008482683745416544,
"num_lines": 35
} |
"""Add triggers for cleanup of address orphans
Revision ID: 28e56bf6f62c
Revises: 5905825242ff
Create Date: 2021-02-12 02:07:00.403096
"""
from alembic import op
import sqlalchemy as sa
import pycroft
# revision identifiers, used by Alembic.
revision = '28e56bf6f62c'
down_revision = '5905825242ff'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
CREATE FUNCTION address_remove_orphans() RETURNS trigger VOLATILE STRICT LANGUAGE plpgsql AS $$
BEGIN
delete from address
where not exists (select 1 from room where room.address_id = address.id)
and not exists (select 1 from "user" where "user".address_id = address.id);
RETURN NULL;
END;
$$""")
op.execute("""
CREATE TRIGGER user_address_cleanup_trigger AFTER UPDATE OR DELETE ON "user"
FOR EACH ROW EXECUTE PROCEDURE address_remove_orphans()
""")
op.execute("""
CREATE TRIGGER room_address_cleanup_trigger AFTER UPDATE OR DELETE ON room
FOR EACH ROW EXECUTE PROCEDURE address_remove_orphans()
""")
def downgrade():
op.execute("DROP TRIGGER IF EXISTS room_address_cleanup_trigger ON room")
op.execute("DROP TRIGGER IF EXISTS user_address_cleanup_trigger ON \"user\"")
op.execute("DROP FUNCTION IF EXISTS address_remove_orphans()")
| {
"repo_name": "agdsn/pycroft",
"path": "pycroft/model/alembic/versions/28e56bf6f62c_add_triggers_for_cleanup_of_address_.py",
"copies": "1",
"size": "1300",
"license": "apache-2.0",
"hash": 8035011293553502000,
"line_mean": 29.2325581395,
"line_max": 99,
"alpha_frac": 0.71,
"autogenerated": false,
"ratio": 3.485254691689008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4695254691689008,
"avg_score": null,
"num_lines": null
} |
"""add Trope
Revision ID: 6ea60f0db6
Revises: 5401daf82c9
Create Date: 2014-11-13 01:35:15.619898
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6ea60f0db6'
down_revision = '5401daf82c9'
def upgrade():
op.create_table(
'tropes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'work_tropes',
sa.Column('work_id', sa.Integer(), nullable=False),
sa.Column('trope_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['trope_id'], ['tropes.id'], ),
sa.ForeignKeyConstraint(['work_id'], ['works.id'], ),
sa.PrimaryKeyConstraint('work_id', 'trope_id')
)
op.add_column('works',
sa.Column('media_type', sa.String(), nullable=False))
def downgrade():
op.drop_column('works', 'media_type')
op.drop_table('work_tropes')
op.drop_table('tropes')
| {
"repo_name": "clicheio/cliche",
"path": "cliche/migrations/versions/6ea60f0db6_add_trope.py",
"copies": "2",
"size": "1032",
"license": "mit",
"hash": -7125334453475938000,
"line_mean": 25.4615384615,
"line_max": 71,
"alpha_frac": 0.6191860465,
"autogenerated": false,
"ratio": 3.2149532710280373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48341393175280367,
"avg_score": null,
"num_lines": null
} |
"""Add tv and animation table and media table add bilibili_id column
Revision ID: 1b2d2f1a1d56
Revises: cc825dd0b55a
Create Date: 2016-02-21 22:24:43.509991
"""
# revision identifiers, used by Alembic.
revision = '1b2d2f1a1d56'
down_revision = 'cc825dd0b55a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('animation_genres',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('animations',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['videos.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tvs',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['videos.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('animations_genres_association',
sa.Column('animation_id', sa.Integer(), nullable=True),
sa.Column('animation_genre_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['animation_genre_id'], ['animation_genres.id'], ),
sa.ForeignKeyConstraint(['animation_id'], ['animations.id'], )
)
op.create_table('tvs_genres_association',
sa.Column('tv_id', sa.Integer(), nullable=True),
sa.Column('tv_genre_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['tv_genre_id'], ['tv_genres.id'], ),
sa.ForeignKeyConstraint(['tv_id'], ['tvs.id'], )
)
with op.batch_alter_table('media', schema=None) as batch_op:
batch_op.add_column(sa.Column('bilibili_id', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('media', schema=None) as batch_op:
batch_op.drop_column('bilibili_id')
op.drop_table('tvs_genres_association')
op.drop_table('animations_genres_association')
op.drop_table('tvs')
op.drop_table('animations')
op.drop_table('animation_genres')
### end Alembic commands ###
| {
"repo_name": "billvsme/videoSpider",
"path": "alembic/versions/1b2d2f1a1d56_add_tv_and_animation_table_and_media_.py",
"copies": "1",
"size": "2208",
"license": "mit",
"hash": -743054426668906900,
"line_mean": 32.9692307692,
"line_max": 81,
"alpha_frac": 0.6639492754,
"autogenerated": false,
"ratio": 3.2857142857142856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44496635611142854,
"avg_score": null,
"num_lines": null
} |
"""Add tvtropes Redirections
Revision ID: 7f6fc70526
Revises: 3ae4102055a
Create Date: 2014-08-19 05:03:49.018915
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7f6fc70526'
down_revision = '3ae4102055a'
def upgrade():
op.create_table(
'tvtropes_redirections',
sa.Column('alias_namespace', sa.String(), nullable=False),
sa.Column('alias_name', sa.String(), nullable=False),
sa.Column('original_namespace', sa.String(), nullable=False),
sa.Column('original_name', sa.String(), nullable=False),
sa.ForeignKeyConstraint(['original_namespace', 'original_name'],
['tvtropes_entities.namespace',
'tvtropes_entities.name'],
),
sa.PrimaryKeyConstraint('alias_namespace', 'alias_name',
'original_namespace', 'original_name')
)
def downgrade():
op.drop_table('tvtropes_redirections')
| {
"repo_name": "clicheio/cliche",
"path": "cliche/migrations/versions/7f6fc70526_add_tvtropes_redirections.py",
"copies": "2",
"size": "1035",
"license": "mit",
"hash": -6745523977760766000,
"line_mean": 29.4411764706,
"line_max": 72,
"alpha_frac": 0.6057971014,
"autogenerated": false,
"ratio": 3.8051470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5410944160223529,
"avg_score": null,
"num_lines": null
} |
# add two numbers a and b, and return a+b
# para:
# a - integer number
# b - integer number
# return: a number which is sum of a and b
#
from fileinput import input
def add(a, b):
return a+b
# here im going to make a subtracting function
# input:
# a - integer
# b - integer
# return: a-b
def subtract(a, b):
return a-b
# here im going to make a multiplying function
# input:
# a - integer
# b - integer
# return: a*b
def multiply(a, b):
return a*b
# x=(add(5,3))
#
# y=multiply(x,2)
#
# z=subtract(y,12)
#
# print(z)
#
# p=subtract(multiply(add(5,3),2), 12)
# print(p)
# im going to sort all my favorite songs from this file into alphabetical order, heres how:
# 1. open the file
# 2. split each line (each song)
# 3. put the lines into an array
# 4. sort the songs in alphabetical order
# 5. use for loop to print each item of "songs"
# songs=[]
# songs_file_handle=open("song-list")
# for each_line in songs_file_handle:
# each_line=each_line.rstrip("\n")
# songs.append((each_line))
# songs_file_handle.close()
# songs.sort()
# # print(songs)
# print("\n".join(songs))
# here is a function to open the song-list file and create the array:
def array():
songs=[]
songs_file_handle=open("song-list")
for each_line in songs_file_handle:
each_line=each_line.rstrip("\n")
songs.append((each_line))
| {
"repo_name": "vollov/python-test",
"path": "dudu/2021_01_05.py",
"copies": "1",
"size": "1389",
"license": "mit",
"hash": -5390406615219619000,
"line_mean": 20.703125,
"line_max": 91,
"alpha_frac": 0.6364290857,
"autogenerated": false,
"ratio": 2.783567134268537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3919996219968537,
"avg_score": null,
"num_lines": null
} |
"""Add Two Numbers
You are given two linked lists representing two non-negative numbers.
The digits are stored in reverse order and each of their nodes contain
a single digit. Add the two numbers and return it as a linked list.
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Refer https://leetcode.com/problems/add-two-numbers/description/
"""
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
@classmethod
def create_from_list(cls, l):
head = cls(l[0])
n = head
for k in l[1:]:
node = cls(k)
n.next = node
n = node
return head
def list(self):
l = []
node = self
while True:
l.append(node.val)
if node.next is None:
break
node = node.next
return l
def integer(self):
return int(''.join(map(str, self.list()[::-1])))
def __str__(self):
return ''.join(map(str, self.list()))
class Solution(object):
"""
Time Complexity: O(max(m, n))
Space Complexity: O(max(m, n))
"""
def addTwoNumbers(self, l1, l2):
dummpy_head = ListNode(0)
p, q = l1, l2
curr = dummpy_head
carry = 0
while (p is not None or q is not None):
x = 0 if p is None else p.val
y = 0 if q is None else q.val
count = carry + x + y
carry = count // 10
curr.next = ListNode(count % 10)
curr = curr.next
if p is not None:
p = p.next
if q is not None:
q = q.next
if carry > 0:
curr.next = ListNode(carry)
return dummpy_head.next
class SolutionComplex(object):
def addTwoNumbers(self, l1, l2):
head = l1
tail = head
i = 0
while True:
l1.val = l1.val + l2.val + i
i = 0
if l1.val >= 10:
i = l1.val // 10
l1.val = l1.val % 10
if l1.next and l2.next:
l1 = l1.next
l2 = l2.next
tail = l1
elif l1.next is None or l2.next is None:
break
if l1.next is None and l2.next is not None:
l1.next = l2.next
l2.next = None
if l2.next is None:
while l1.next is not None:
l1 = l1.next
tail = l1
l1.val += i
i = 0
if l1.val >= 10:
i = l1.val // 10
l1.val = l1.val % 10
if i > 0:
tail.next = ListNode(i)
return head
class SolutionConvert(object):
def list_to_integer(self, head):
l = []
while True:
l.append(head.val)
if head.next is None:
break
head = head.next
return int(''.join(map(str, l[::-1])))
def integer_to_list(self, i):
head = None
node = None
while i >= 10:
if head is None:
head = node = ListNode(i % 10)
else:
node.next = ListNode(i % 10)
node = node.next
i = i // 10
if node is None:
return ListNode(i)
else:
node.next = ListNode(i)
return head
def addTwoNumbers(self, l1, l2):
i1 = self.list_to_integer(l1)
i2 = self.list_to_integer(l2)
return self.integer_to_list(i1 + i2)
if __name__ == '__main__':
cases = [(([5, 2, 5], [7, 1, 6]), [2, 4, 1, 1]), (([2, 4, 3], [5, 6, 4]), [7, 0, 8])]
for case in cases:
l1 = ListNode.create_from_list(case[0][0])
l2 = ListNode.create_from_list(case[0][1])
for s in (Solution(), SolutionComplex(), SolutionConvert()):
s.addTwoNumbers(l1, l2).list() == case[1]
| {
"repo_name": "aiden0z/snippets",
"path": "leetcode/002_add_two_numbers.py",
"copies": "1",
"size": "3946",
"license": "mit",
"hash": -137725167606608020,
"line_mean": 24.4580645161,
"line_max": 89,
"alpha_frac": 0.4630005068,
"autogenerated": false,
"ratio": 3.5485611510791366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4511561657879136,
"avg_score": null,
"num_lines": null
} |
"""Add type field to NetworkingAction
Revision ID: 3b2dab2e0d7d
Revises: 89630e3872ec
Create Date: 2016-11-14 14:57:19.247255
"""
from alembic import op
import sqlalchemy as sa
from hil.model import NetworkingAction
# revision identifiers, used by Alembic.
revision = '3b2dab2e0d7d'
down_revision = '57f4c30b0ad4'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
# We first introduce the table with null 'type' fields allowed.
# Any existing actions will have null type fields, so we then
# update them to 'modify_port', which was previously the only
# possible action. Then, we add the NOT NULL constraint once
# we know it won't run afoul of any existing rows.
op.add_column('networking_action',
sa.Column('type', sa.String(),
nullable=True))
op.execute(sa.update(NetworkingAction).values({'type': 'modify_port'}))
op.alter_column('networking_action', 'type', nullable=False)
def downgrade():
op.drop_column('networking_action', 'type')
| {
"repo_name": "CCI-MOC/haas",
"path": "hil/migrations/versions/3b2dab2e0d7d_add_type_field_to_networkingaction.py",
"copies": "4",
"size": "1054",
"license": "apache-2.0",
"hash": 4433189799139824600,
"line_mean": 28.2777777778,
"line_max": 75,
"alpha_frac": 0.6973434535,
"autogenerated": false,
"ratio": 3.4444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 36
} |
"""Add unique_award_key to AwardProcurement, AwardFinancialAssistance, CertifiedAwardProcurement, and CertifiedAwardFinancialAssistance
Revision ID: 3fd9a578c9c5
Revises: 0225d53e520a
Create Date: 2020-01-23 14:39:59.527010
"""
# revision identifiers, used by Alembic.
revision = '3fd9a578c9c5'
down_revision = '0225d53e520a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('award_financial_assistance', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.add_column('certified_award_financial_assistance', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.add_column('certified_award_procurement', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.create_index('ix_af_pan_upper', 'award_financial', [sa.text('UPPER(program_activity_name)')], unique=False)
op.create_index('ix_oc_pa_pan_upper', 'object_class_program_activity', [sa.text('UPPER(program_activity_name)')], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_oc_pa_pan_upper', table_name='object_class_program_activity')
op.drop_index('ix_af_pan_upper', table_name='award_financial')
op.drop_column('certified_award_procurement', 'unique_award_key')
op.drop_column('certified_award_financial_assistance', 'unique_award_key')
op.drop_column('award_procurement', 'unique_award_key')
op.drop_column('award_financial_assistance', 'unique_award_key')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/3fd9a578c9c5_add_unique_award_key_to_.py",
"copies": "1",
"size": "1928",
"license": "cc0-1.0",
"hash": 627406173009764600,
"line_mean": 37.56,
"line_max": 135,
"alpha_frac": 0.7131742739,
"autogenerated": false,
"ratio": 3.0798722044728435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42930464783728434,
"avg_score": null,
"num_lines": null
} |
"""add unique_award_key to transaction tables
Revision ID: 4bbc47f2b48d
Revises: ab4eac43f605
Create Date: 2018-10-10 17:17:34.091022
"""
# revision identifiers, used by Alembic.
revision = '4bbc47f2b48d'
down_revision = 'ab4eac43f605'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('detached_award_procurement', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.create_index(op.f('ix_detached_award_procurement_unique_award_key'), 'detached_award_procurement', ['unique_award_key'], unique=False)
op.add_column('detached_award_financial_assistance', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.create_index(op.f('ix_detached_award_financial_assistance_unique_award_key'), 'detached_award_financial_assistance', ['unique_award_key'], unique=False)
op.add_column('published_award_financial_assistance', sa.Column('unique_award_key', sa.Text(), nullable=True))
op.create_index(op.f('ix_published_award_financial_assistance_unique_award_key'), 'published_award_financial_assistance', ['unique_award_key'], unique=False)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_published_award_financial_assistance_unique_award_key'), table_name='published_award_financial_assistance')
op.drop_column('published_award_financial_assistance', 'unique_award_key')
op.drop_index(op.f('ix_detached_award_financial_assistance_unique_award_key'), table_name='detached_award_financial_assistance')
op.drop_column('detached_award_financial_assistance', 'unique_award_key')
op.drop_index(op.f('ix_detached_award_procurement_unique_award_key'), table_name='detached_award_procurement')
op.drop_column('detached_award_procurement', 'unique_award_key')
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/4bbc47f2b48d_add_unique_award_key_to_transaction_.py",
"copies": "1",
"size": "2147",
"license": "cc0-1.0",
"hash": -8486461423501704000,
"line_mean": 41.94,
"line_max": 161,
"alpha_frac": 0.7265952492,
"autogenerated": false,
"ratio": 3.054054054054054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9161423272684388,
"avg_score": 0.023845206113933082,
"num_lines": 50
} |
"""Add unique constraints
Revision ID: 0868747e62ff
Revises: e60a77e44da8
Create Date: 2017-04-11 16:10:42.109777
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0868747e62ff'
down_revision = 'e60a77e44da8'
branch_labels = ()
depends_on = None
def upgrade():
op.create_unique_constraint(None, 'package', ['id'])
op.create_unique_constraint(None, 'package_result', ['id'])
op.create_unique_constraint(None, 'platform', ['id'])
op.create_unique_constraint(None, 'release', ['id'])
op.create_unique_constraint(None, 'release_metadata', ['id'])
op.create_unique_constraint(None, 'release_note', ['id'])
def downgrade():
op.drop_constraint(None, 'release_note', type_='unique')
op.drop_constraint(None, 'release_metadata', type_='unique')
op.drop_constraint(None, 'release', type_='unique')
op.drop_constraint(None, 'platform', type_='unique')
op.drop_constraint(None, 'package_result', type_='unique')
op.drop_constraint(None, 'package', type_='unique')
| {
"repo_name": "al4/orlo",
"path": "orlo/migrations/0868747e62ff_add_unique_constraints.py",
"copies": "4",
"size": "1061",
"license": "mit",
"hash": 287590846351461020,
"line_mean": 31.1515151515,
"line_max": 65,
"alpha_frac": 0.6927426956,
"autogenerated": false,
"ratio": 3.2249240121580547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5917666707758055,
"avg_score": null,
"num_lines": null
} |
"""Add unique constraint to certificate_notification_associations table.
Revision ID: 449c3d5c7299
Revises: 5770674184de
Create Date: 2018-02-24 22:51:35.369229
"""
# revision identifiers, used by Alembic.
revision = "449c3d5c7299"
down_revision = "5770674184de"
from alembic import op
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
CONSTRAINT_NAME = "uq_dest_not_ids"
TABLE = "certificate_notification_associations"
COLUMNS = ["notification_id", "certificate_id"]
def upgrade():
connection = op.get_bind()
# Delete duplicate entries
connection.execute(
"""\
DELETE FROM certificate_notification_associations WHERE ctid NOT IN (
-- Select the first tuple ID for each (notification_id, certificate_id) combination and keep that
SELECT min(ctid) FROM certificate_notification_associations GROUP BY notification_id, certificate_id
)
"""
)
op.create_unique_constraint(CONSTRAINT_NAME, TABLE, COLUMNS)
def downgrade():
op.drop_constraint(CONSTRAINT_NAME, TABLE)
| {
"repo_name": "Netflix/lemur",
"path": "lemur/migrations/versions/449c3d5c7299_.py",
"copies": "1",
"size": "1054",
"license": "apache-2.0",
"hash": 6040258194363445000,
"line_mean": 26.7368421053,
"line_max": 112,
"alpha_frac": 0.7220113852,
"autogenerated": false,
"ratio": 3.7777777777777777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49997891629777774,
"avg_score": null,
"num_lines": null
} |
"""Add unique constraint to program_number in cfda_program table
Revision ID: 67feaf4d50b8
Revises: a9b778fd5181
Create Date: 2018-08-30 09:59:18.532654
"""
# revision identifiers, used by Alembic.
revision = '67feaf4d50b8'
down_revision = 'a9b778fd5181'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_cfda_program_program_number', table_name='cfda_program')
op.create_index(op.f('ix_cfda_program_program_number'), 'cfda_program', ['program_number'], unique=True)
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_cfda_program_program_number'), table_name='cfda_program')
op.create_index('ix_cfda_program_program_number', 'cfda_program', ['program_number'], unique=False)
# ### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/67feaf4d50b8_add_unique_constraint_to_program_number_.py",
"copies": "1",
"size": "1136",
"license": "cc0-1.0",
"hash": 3311508355739880000,
"line_mean": 26.0476190476,
"line_max": 108,
"alpha_frac": 0.6954225352,
"autogenerated": false,
"ratio": 3.191011235955056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43864337711550555,
"avg_score": null,
"num_lines": null
} |
"""Add unique constraint to the AccountTypeCustomValues table and remove duplicates
Revision ID: 11f081cf54e2
Revises: a9fe9c93ed75
Create Date: 2018-04-06 17:28:33.431400
"""
# revision identifiers, used by Alembic.
revision = '11f081cf54e2'
down_revision = 'a9fe9c93ed75'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
Session = sessionmaker()
Base = declarative_base()
class AccountType(Base):
"""
Defines the type of account based on where the data lives, e.g. AWS.
"""
__tablename__ = "account_type"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(80), unique=True)
class Account(Base):
"""
Meant to model AWS accounts.
"""
__tablename__ = "account"
id = sa.Column(sa.Integer, primary_key=True)
active = sa.Column(sa.Boolean())
third_party = sa.Column(sa.Boolean())
name = sa.Column(sa.String(50), index=True, unique=True)
notes = sa.Column(sa.String(256))
identifier = sa.Column(sa.String(256), unique=True) # Unique id of the account, the number for AWS.
account_type_id = sa.Column(sa.Integer, sa.ForeignKey("account_type.id"), nullable=False)
class AccountTypeCustomValues(Base):
"""
Defines the values for custom fields defined in AccountTypeCustomFields.
"""
__tablename__ = "account_type_values"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(64))
value = sa.Column(sa.String(256))
account_id = sa.Column(sa.Integer, sa.ForeignKey("account.id"), nullable=False)
def upgrade():
bind = op.get_bind()
session = Session(bind=bind)
results = session.query(AccountTypeCustomValues).all()
delete_list = []
seen = {}
for result in results:
if seen.get("{}-{}".format(result.account_id, result.name)):
# We only want to keep the values that are not null -- if they exist:
if result.value is None:
print("[+] Marking duplicate account custom field for account with ID: {},"
" field name: {}, field Value: NULL for deletion".format(result.account_id, result.name))
delete_list.append(result)
else:
# Replace the seen element with this one:
print("[+] Replacing OLD duplicate account custom field for account with ID: {},"
" field name: {}, old field Value: {}, "
"with new field value: {}".format(result.account_id, result.name,
seen["{}-{}".format(result.account_id, result.name)].value,
result.value))
delete_list.append(seen["{}-{}".format(result.account_id, result.name)])
seen["{}-{}".format(result.account_id, result.name)] = result
else:
seen["{}-{}".format(result.account_id, result.name)] = result
if delete_list:
print("[-->] Deleting duplicate account custom fields... This may take a while...")
for d in delete_list:
session.delete(d)
session.commit()
session.flush()
print("[@] Deleted all duplicate account custom fields.")
else:
print("[@] No duplicates found so nothing to delete!")
print("[-->] Adding proper unique constraint to the `account_type_values` table...")
op.create_unique_constraint("uq_account_id_name", "account_type_values", ["account_id", "name"])
print("[@] Completed adding proper unique constraint to the `account_type_values` table.")
def downgrade():
op.drop_constraint("uq_account_id_name", "account_type_values")
| {
"repo_name": "Netflix/security_monkey",
"path": "migrations/versions/11f081cf54e2_.py",
"copies": "1",
"size": "3769",
"license": "apache-2.0",
"hash": 4660798176035427000,
"line_mean": 35.5922330097,
"line_max": 115,
"alpha_frac": 0.6200583709,
"autogenerated": false,
"ratio": 3.8537832310838445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.995986470154375,
"avg_score": 0.002795380088018709,
"num_lines": 103
} |
"""Add unique fields and link between history and active translations
Revision ID: 19128bb7efe0
Revises: 4280e1ce31d9
Create Date: 2015-03-31 12:55:22.925413
"""
# revision identifiers, used by Alembic.
revision = '19128bb7efe0'
down_revision = '4280e1ce31d9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('ActiveTranslationMessages', sa.Column('history_id', sa.Integer(), nullable=True))
op.drop_index(u'ix_ActiveTranslationMessages_datetime', table_name='ActiveTranslationMessages')
try:
op.drop_constraint(u'ActiveTranslationMessages_ibfk_2', 'ActiveTranslationMessages', type_='foreignkey')
op.create_foreign_key(None, 'ActiveTranslationMessages', 'TranslationMessageHistory', ['history_id'], ['id'])
op.drop_column('ActiveTranslationMessages', u'user_id')
op.drop_column('ActiveTranslationMessages', u'datetime')
except:
print "drop_constraint, create_foreign_key and drop_column not supported in sqlite"
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('ActiveTranslationMessages', sa.Column(u'datetime', sa.DATETIME(), nullable=True))
op.add_column('ActiveTranslationMessages', sa.Column(u'user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_constraint(None, 'ActiveTranslationMessages', type_='foreignkey')
op.create_foreign_key(u'ActiveTranslationMessages_ibfk_2', 'ActiveTranslationMessages', u'Users', [u'user_id'], [u'id'])
op.create_index(u'ix_ActiveTranslationMessages_datetime', 'ActiveTranslationMessages', [u'datetime'], unique=False)
op.drop_column('ActiveTranslationMessages', 'history_id')
### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/19128bb7efe0_add_unique_fields_and_link_between_.py",
"copies": "3",
"size": "1877",
"license": "bsd-2-clause",
"hash": -4328621045721123300,
"line_mean": 47.1282051282,
"line_max": 138,
"alpha_frac": 0.7352157698,
"autogenerated": false,
"ratio": 3.6948818897637796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.593009765956378,
"avg_score": null,
"num_lines": null
} |
"""Add unique files constraint.
Revision ID: 1217e5fbdbd9
Revises: 32518998055d
Create Date: 2015-03-23 14:49:33.176862
"""
from textwrap import dedent
# revision identifiers, used by Alembic.
revision = '1217e5fbdbd9'
down_revision = '32518998055d'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
temp_select = dedent(
"""
SELECT DISTINCT ON
(f.user_id, f.parent_name, f.name)
id, name, user_id, parent_name, content, created_at
INTO TEMP TABLE migrate_temp
FROM
pgcontents.files AS f
ORDER BY
f.user_id, f.parent_name, f.name, f.created_at
"""
)
drop_existing_rows = "TRUNCATE TABLE pgcontents.files"
copy_from_temp_table = dedent(
"""
INSERT INTO pgcontents.files
SELECT id, name, user_id, parent_name, content, created_at
FROM migrate_temp
"""
)
drop_temp_table = "DROP TABLE migrate_temp"
op.execute(temp_select)
op.execute(drop_existing_rows)
op.execute(copy_from_temp_table)
op.execute(drop_temp_table)
op.create_unique_constraint(
u'uix_filepath_username',
'files',
['user_id', 'parent_name', 'name'],
schema='pgcontents',
)
def downgrade():
op.drop_constraint(
u'uix_filepath_username',
'files',
schema='pgcontents',
type_='unique'
)
| {
"repo_name": "quantopian/pgcontents",
"path": "pgcontents/alembic/versions/1217e5fbdbd9_.py",
"copies": "1",
"size": "1440",
"license": "apache-2.0",
"hash": -1669297761076910600,
"line_mean": 21.8571428571,
"line_max": 66,
"alpha_frac": 0.6055555556,
"autogenerated": false,
"ratio": 3.4698795180722892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45754350736722893,
"avg_score": null,
"num_lines": null
} |
"""add unique in bundles
Revision ID: d20615919d80
Revises: b262149925a7
Create Date: 2017-10-14 17:04:29.450667
"""
# revision identifiers, used by Alembic.
revision = 'd20615919d80'
down_revision = 'b262149925a7'
from collections import defaultdict
from alembic import op
import sqlalchemy as sa
from sqlalchemy import func
from appcomposer.db import db
from appcomposer.application import app
metadata = db.MetaData()
ActiveTranslationMessage = db.Table('ActiveTranslationMessages', metadata,
sa.Column('id', sa.Integer, nullable=True),
sa.Column('bundle_id', sa.Integer, nullable=False),
)
TranslationMessageHistory = db.Table('TranslationMessageHistory', metadata,
sa.Column('id', sa.Integer, nullable=True),
sa.Column('bundle_id', sa.Integer, nullable=False),
)
TranslationBundle = db.Table('TranslationBundles', metadata,
sa.Column('id', sa.Integer, nullable=True),
sa.Column('translation_url_id', sa.Integer, nullable=False),
sa.Column('language', sa.Unicode(20), nullable=False),
sa.Column('target', sa.Unicode(20), nullable=False),
)
def upgrade():
with app.app_context():
duplicated_bundles = list(db.session.query(TranslationBundle.c.translation_url_id, TranslationBundle.c.language, TranslationBundle.c.target).group_by(TranslationBundle.c.translation_url_id, TranslationBundle.c.language, TranslationBundle.c.target).having(func.count(TranslationBundle.c.id) > 1).all())
translation_url_ids = [ tr_id for tr_id, language, target in duplicated_bundles ]
languages = [ language for tr_id, language, target in duplicated_bundles ]
targets = [ target for tr_id, language, target in duplicated_bundles ]
all_results = defaultdict(list)
for bundle in db.session.query(TranslationBundle).filter(TranslationBundle.c.translation_url_id.in_(translation_url_ids), TranslationBundle.c.language.in_(languages), TranslationBundle.c.target.in_(targets)).all():
all_results[bundle.translation_url_id, bundle.language, bundle.target].append(bundle)
all_bundle_ids = []
for key in duplicated_bundles:
for bundle in all_results[key][1:]:
all_bundle_ids.append(bundle.id)
delete_msg_stmt = ActiveTranslationMessage.delete(ActiveTranslationMessage.c.bundle_id.in_(all_bundle_ids))
delete_hist_stmt = TranslationMessageHistory.delete(TranslationMessageHistory.c.bundle_id.in_(all_bundle_ids))
delete_bundle_stmt = TranslationBundle.delete(TranslationBundle.c.id.in_(all_bundle_ids))
connection = op.get_bind()
connection.execute(delete_msg_stmt)
connection.execute(delete_hist_stmt)
connection.execute(delete_bundle_stmt)
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'TranslationBundles', ['translation_url_id', 'language', 'target'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'TranslationBundles', type_='unique')
# ### end Alembic commands ###
| {
"repo_name": "go-lab/appcomposer",
"path": "alembic/versions/d20615919d80_add_unique_in_bundles.py",
"copies": "3",
"size": "3091",
"license": "bsd-2-clause",
"hash": -5627055794465592000,
"line_mean": 41.3424657534,
"line_max": 309,
"alpha_frac": 0.7243610482,
"autogenerated": false,
"ratio": 3.671021377672209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006664959385302212,
"num_lines": 73
} |
"""add uniqueness constraints
Revision ID: e59c406395d2
Revises: ad5ccc47d004
Create Date: 2017-02-02 16:00:24.237353
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e59c406395d2'
down_revision = 'ad5ccc47d004'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('layout_items', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_layout_items_layout_spec_item_path', ['layout_id', 'spec_item_path'])
with op.batch_alter_table('locations', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_locations_cabinet_layout_item', ['cabinet_id', 'layout_item_id'])
with op.batch_alter_table('resource_links', schema=None) as batch_op:
batch_op.create_unique_constraint('uq_resource_links_name_collection', ['name', 'collection_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('resource_links', schema=None) as batch_op:
batch_op.drop_constraint('uq_resource_links_name_collection', type_='unique')
with op.batch_alter_table('locations', schema=None) as batch_op:
batch_op.drop_constraint('uq_locations_cabinet_layout_item', type_='unique')
with op.batch_alter_table('layout_items', schema=None) as batch_op:
batch_op.drop_constraint('uq_layout_items_layout_spec_item_path', type_='unique')
# ### end Alembic commands ###
| {
"repo_name": "rjw57/bitsbox",
"path": "migrations/versions/e59c406395d2_add_uniqueness_constraints.py",
"copies": "1",
"size": "1579",
"license": "mit",
"hash": 6540235696542949000,
"line_mean": 34.8863636364,
"line_max": 115,
"alpha_frac": 0.6966434452,
"autogenerated": false,
"ratio": 3.310272536687631,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45069159818876314,
"avg_score": null,
"num_lines": null
} |
"""Add unsupported https url list
Revision ID: 58abc4899824
Revises: 3812c222917d
Create Date: 2019-02-24 18:35:34.484088
"""
# revision identifiers, used by Alembic.
revision = '58abc4899824'
down_revision = '3812c222917d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('HttpsUnsupportedUrls',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.Unicode(length=100), nullable=False),
sa.Column('creation', sa.DateTime(), nullable=False),
sa.Column('last_update', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
mysql_engine='InnoDB',
mysql_row_format='DYNAMIC'
)
op.create_index(u'ix_HttpsUnsupportedUrls_creation', 'HttpsUnsupportedUrls', ['creation'], unique=False)
op.create_index(u'ix_HttpsUnsupportedUrls_last_update', 'HttpsUnsupportedUrls', ['last_update'], unique=False)
op.create_index(u'ix_HttpsUnsupportedUrls_url', 'HttpsUnsupportedUrls', ['url'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_HttpsUnsupportedUrls_url', table_name='HttpsUnsupportedUrls')
op.drop_index(u'ix_HttpsUnsupportedUrls_last_update', table_name='HttpsUnsupportedUrls')
op.drop_index(u'ix_HttpsUnsupportedUrls_creation', table_name='HttpsUnsupportedUrls')
op.drop_table('HttpsUnsupportedUrls')
### end Alembic commands ###
| {
"repo_name": "gateway4labs/labmanager",
"path": "alembic/versions/58abc4899824_add_unsupported_https_url_list.py",
"copies": "4",
"size": "1513",
"license": "bsd-2-clause",
"hash": 2349206488728119000,
"line_mean": 36.825,
"line_max": 114,
"alpha_frac": 0.7184401851,
"autogenerated": false,
"ratio": 3.2962962962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6014736481396297,
"avg_score": null,
"num_lines": null
} |
"""Add 'until approved' regform modification mode
Revision ID: e4fb983dc64c
Revises: 8d614ef75968
Create Date: 2020-12-09 20:10:12.155982
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e4fb983dc64c'
down_revision = '8d614ef75968'
branch_labels = None
depends_on = None
def upgrade():
op.execute('''
ALTER TABLE "event_registration"."forms" DROP CONSTRAINT "ck_forms_valid_enum_modification_mode";
ALTER TABLE "event_registration"."forms" ADD CONSTRAINT "ck_forms_valid_enum_modification_mode" CHECK ((modification_mode = ANY (ARRAY[1, 2, 3, 4])));
''')
def downgrade():
op.execute('''
UPDATE "event_registration"."forms" SET modification_mode = 3 WHERE modification_mode = 4;
ALTER TABLE "event_registration"."forms" DROP CONSTRAINT "ck_forms_valid_enum_modification_mode";
ALTER TABLE "event_registration"."forms" ADD CONSTRAINT "ck_forms_valid_enum_modification_mode" CHECK ((modification_mode = ANY (ARRAY[1, 2, 3])));
''')
| {
"repo_name": "ThiefMaster/indico",
"path": "indico/migrations/versions/20201209_2010_e4fb983dc64c_add_until_approved_regform_modification_mode.py",
"copies": "4",
"size": "1024",
"license": "mit",
"hash": 6551486202729118000,
"line_mean": 33.1333333333,
"line_max": 158,
"alpha_frac": 0.7021484375,
"autogenerated": false,
"ratio": 3.4478114478114477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013889499738556342,
"num_lines": 30
} |
"""add updated columns
Revision ID: af7eba7cd108
Revises: d2176bc400c8
Create Date: 2019-12-02 07:44:04.212957
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'af7eba7cd108'
down_revision = 'd2176bc400c8'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_app():
pass
def downgrade_app():
pass
def upgrade_ingest():
pass
def downgrade_ingest():
pass
def upgrade_fingerprint():
op.add_column(u'fingerprint', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
op.add_column(u'track_foreignid', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
op.add_column(u'track_mbid', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
op.add_column(u'track_meta', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
op.add_column(u'track_puid', sa.Column('updated', sa.DateTime(timezone=True), nullable=True))
def downgrade_fingerprint():
op.drop_column(u'track_puid', 'updated')
op.drop_column(u'track_meta', 'updated')
op.drop_column(u'track_mbid', 'updated')
op.drop_column(u'track_foreignid', 'updated')
op.drop_column(u'fingerprint', 'updated')
| {
"repo_name": "lalinsky/acoustid-server",
"path": "alembic/versions/af7eba7cd108_add_updated_columns.py",
"copies": "1",
"size": "1368",
"license": "mit",
"hash": -5838381274657133000,
"line_mean": 23.4285714286,
"line_max": 102,
"alpha_frac": 0.6995614035,
"autogenerated": false,
"ratio": 3.0950226244343892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42945840279343894,
"avg_score": null,
"num_lines": null
} |
"""Add/Update tables for pjsip
Revision ID: 2fc7930b41b3
Revises: 581a4264e537
Create Date: 2014-01-14 09:23:53.923454
"""
# revision identifiers, used by Alembic.
revision = '2fc7930b41b3'
down_revision = '581a4264e537'
from alembic import op
from alembic import context
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import ENUM
YESNO_NAME = 'yesno_values'
YESNO_VALUES = ['yes', 'no']
PJSIP_REDIRECT_METHOD_NAME = 'pjsip_redirect_method_values'
PJSIP_REDIRECT_METHOD_VALUES = ['user', 'uri_core', 'uri_pjsip']
PJSIP_TRANSPORT_METHOD_NAME = 'pjsip_transport_method_values'
PJSIP_TRANSPORT_METHOD_VALUES = ['default', 'unspecified', 'tlsv1', 'sslv2',
'sslv3', 'sslv23']
PJSIP_TRANSPORT_PROTOCOL_NAME = 'pjsip_transport_protocol_values'
PJSIP_TRANSPORT_PROTOCOL_VALUES = ['udp', 'tcp', 'tls', 'ws', 'wss']
def upgrade():
############################# Enums ##############################
# yesno_values have already been created, so use postgres enum object
# type to get around "already created" issue - works okay with mysql
yesno_values = ENUM(*YESNO_VALUES, name=YESNO_NAME, create_type=False)
# for some reason when using 'add_column' if you don't create the enum
# first it will think it already exists and fail
pjsip_redirect_method_values = sa.Enum(
*PJSIP_REDIRECT_METHOD_VALUES, name=PJSIP_REDIRECT_METHOD_NAME)
check = False if context.is_offline_mode() else True
pjsip_redirect_method_values.create(op.get_bind(), checkfirst=check)
pjsip_transport_method_values = sa.Enum(
*PJSIP_TRANSPORT_METHOD_VALUES, name=PJSIP_TRANSPORT_METHOD_NAME)
pjsip_transport_protocol_values = sa.Enum(
*PJSIP_TRANSPORT_PROTOCOL_VALUES, name=PJSIP_TRANSPORT_PROTOCOL_NAME)
######################### create tables ##########################
op.create_table(
'ps_systems',
sa.Column('id', sa.String(40), nullable=False, unique=True),
sa.Column('timer_t1', sa.Integer),
sa.Column('timer_b', sa.Integer),
sa.Column('compact_headers', yesno_values),
sa.Column('threadpool_initial_size', sa.Integer),
sa.Column('threadpool_auto_increment', sa.Integer),
sa.Column('threadpool_idle_timeout', sa.Integer),
sa.Column('threadpool_max_size', sa.Integer),
)
op.create_index('ps_systems_id', 'ps_systems', ['id'])
op.create_table(
'ps_globals',
sa.Column('id', sa.String(40), nullable=False, unique=True),
sa.Column('max_forwards', sa.Integer),
sa.Column('user_agent', sa.String(40)),
sa.Column('default_outbound_endpoint', sa.String(40)),
)
op.create_index('ps_globals_id', 'ps_globals', ['id'])
op.create_table(
'ps_transports',
sa.Column('id', sa.String(40), nullable=False, unique=True),
sa.Column('async_operations', sa.Integer),
sa.Column('bind', sa.String(40)),
sa.Column('ca_list_file', sa.String(200)),
sa.Column('cert_file', sa.String(200)),
sa.Column('cipher', sa.String(200)),
sa.Column('domain', sa.String(40)),
sa.Column('external_media_address', sa.String(40)),
sa.Column('external_signaling_address', sa.String(40)),
sa.Column('external_signaling_port', sa.Integer),
sa.Column('method', pjsip_transport_method_values),
sa.Column('local_net', sa.String(40)),
sa.Column('password', sa.String(40)),
sa.Column('priv_key_file', sa.String(200)),
sa.Column('protocol', pjsip_transport_protocol_values),
sa.Column('require_client_cert', yesno_values),
sa.Column('verify_client', yesno_values),
sa.Column('verifiy_server', yesno_values),
sa.Column('tos', yesno_values),
sa.Column('cos', yesno_values),
)
op.create_index('ps_transports_id', 'ps_transports', ['id'])
op.create_table(
'ps_registrations',
sa.Column('id', sa.String(40), nullable=False, unique=True),
sa.Column('auth_rejection_permanent', yesno_values),
sa.Column('client_uri', sa.String(40)),
sa.Column('contact_user', sa.String(40)),
sa.Column('expiration', sa.Integer),
sa.Column('max_retries', sa.Integer),
sa.Column('outbound_auth', sa.String(40)),
sa.Column('outbound_proxy', sa.String(40)),
sa.Column('retry_interval', sa.Integer),
sa.Column('forbidden_retry_interval', sa.Integer),
sa.Column('server_uri', sa.String(40)),
sa.Column('transport', sa.String(40)),
sa.Column('support_path', yesno_values),
)
op.create_index('ps_registrations_id', 'ps_registrations', ['id'])
########################## add columns ###########################
# new columns for endpoints
op.add_column('ps_endpoints', sa.Column('media_address', sa.String(40)))
op.add_column('ps_endpoints', sa.Column('redirect_method',
pjsip_redirect_method_values))
op.add_column('ps_endpoints', sa.Column('set_var', sa.Text()))
# rename mwi_fromuser to mwi_from_user
op.alter_column('ps_endpoints', 'mwi_fromuser',
new_column_name='mwi_from_user',
existing_type=sa.String(40))
# new columns for contacts
op.add_column('ps_contacts', sa.Column('outbound_proxy', sa.String(40)))
op.add_column('ps_contacts', sa.Column('path', sa.Text()))
# new columns for aors
op.add_column('ps_aors', sa.Column('maximum_expiration', sa.Integer))
op.add_column('ps_aors', sa.Column('outbound_proxy', sa.String(40)))
op.add_column('ps_aors', sa.Column('support_path', yesno_values))
def downgrade():
########################## drop columns ##########################
op.drop_column('ps_aors', 'support_path')
op.drop_column('ps_aors', 'outbound_proxy')
op.drop_column('ps_aors', 'maximum_expiration')
op.drop_column('ps_contacts', 'path')
op.drop_column('ps_contacts', 'outbound_proxy')
op.alter_column('ps_endpoints', 'mwi_from_user',
new_column_name='mwi_fromuser',
existing_type=sa.String(40))
op.drop_column('ps_endpoints', 'set_var')
op.drop_column('ps_endpoints', 'redirect_method')
op.drop_column('ps_endpoints', 'media_address')
########################## drop tables ###########################
op.drop_table('ps_registrations')
op.drop_table('ps_transports')
op.drop_table('ps_globals')
op.drop_table('ps_systems')
########################## drop enums ############################
sa.Enum(name=PJSIP_TRANSPORT_PROTOCOL_NAME).drop(
op.get_bind(), checkfirst=False)
sa.Enum(name=PJSIP_TRANSPORT_METHOD_NAME).drop(
op.get_bind(), checkfirst=False)
sa.Enum(name=PJSIP_REDIRECT_METHOD_NAME).drop(
op.get_bind(), checkfirst=False)
| {
"repo_name": "TheSeanBrady/crtc.bcs.versa",
"path": "asteriskSource/contrib/ast-db-manage/config/versions/2fc7930b41b3_add_pjsip_endpoint_options_for_12_1.py",
"copies": "7",
"size": "6909",
"license": "unlicense",
"hash": 5148876456277858000,
"line_mean": 38.2556818182,
"line_max": 77,
"alpha_frac": 0.6066000868,
"autogenerated": false,
"ratio": 3.388425698871996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019982374142943375,
"num_lines": 176
} |
"""add upload_date to genomic_file_processed
Revision ID: 4d7e99e7b5b4
Revises: 139de0f907dc
Create Date: 2020-10-14 16:59:03.404271
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
# revision identifiers, used by Alembic.
revision = '4d7e99e7b5b4'
down_revision = '139de0f907dc'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_file_processed', sa.Column('upload_date', rdr_service.model.utils.UTCDateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_file_processed', 'upload_date')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/4d7e99e7b5b4_add_upload_date_to_genomic_file_.py",
"copies": "1",
"size": "1206",
"license": "bsd-3-clause",
"hash": 4153109163886470000,
"line_mean": 23.612244898,
"line_max": 123,
"alpha_frac": 0.6766169154,
"autogenerated": false,
"ratio": 3.368715083798883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9442468273063267,
"avg_score": 0.02057274522712311,
"num_lines": 49
} |
"""Add URL column (Derp!)
Revision ID: 2d10f39bfbfd
Revises: 669e9df34ea7
Create Date: 2020-01-20 05:40:48.520465
"""
# revision identifiers, used by Alembic.
revision = '2d10f39bfbfd'
down_revision = '669e9df34ea7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('seen_netloc_tracker', sa.Column('example_url', citext.CIText(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_seen_netloc_tracker_example_url'), table_name='seen_netloc_tracker')
op.drop_column('seen_netloc_tracker', 'example_url')
# ### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/2020-01-20_2d10f39bfbfd_add_url_column_derp.py",
"copies": "1",
"size": "1330",
"license": "bsd-3-clause",
"hash": 1334492894382890000,
"line_mean": 28.5555555556,
"line_max": 99,
"alpha_frac": 0.754887218,
"autogenerated": false,
"ratio": 3.4725848563968666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4727472074396866,
"avg_score": null,
"num_lines": null
} |
# add urls for the CashCity app here
from django.conf.urls import patterns, include, url
from django.conf import settings
# override django-registration registration form
from CashCity.forms import ExRegistrationForm
from registration.backends.default.views import RegistrationView
from CashCity import views
urlpatterns = patterns('',
#map urls
url(r'^$', views.index, name='index'),
#map bookmarks
url(r'^mapsnaps/(?P<id>\d+)/$', views.mapSnaps, name='mapSnaps'),
# map filters
url(r'^filterIndexImage/$', views.filterIndexImage, name='filterIndexImage'),
url(r'^filterIndexAudio/$', views.filterIndexAudio, name='filterIndexAudio'),
url(r'^filterIndexNote/$', views.filterIndexNote, name='filterIndexNote'),
url(r'^filterIndexInterview/$', views.filterIndexInterview, name='filterIndexInterview'),
# media form urls
url(r'^media/form/image/', views.mediaFormImage, name='mediaFormImage'),
url(r'^media/form/audio/', views.mediaFormAudio, name='mediaFormAudio'),
url(r'^media/form/note/', views.mediaFormNote, name='mediaFormNote'),
url(r'^media/form/interview/', views.mediaFormInterview, name='mediaFormInterview'),
# media edit forms
url(r'^media/form/edit/image/(?P<id>\d+)/$', views.mediaFormImage, name = 'mediaFormImageEdit'),
url(r'^media/form/edit/audio/(?P<id>\d+)/$', views.mediaFormAudio, name = 'mediaFormAudioEdit'),
url(r'^media/form/edit/note/(?P<id>\d+)/$', views.mediaFormNote, name = 'mediaFormNoteEdit'),
url(r'^media/form/edit/interview/(?P<id>\d+)/$', views.mediaFormInterview, name = 'mediaFormInterviewEdit'),
#media remove forms
url(r'^media/form/remove/image/(?P<id>\d+)/$', views.mediaFormImageRemove, name = 'mediaFormImageRemove'),
url(r'^media/form/remove/audio/(?P<id>\d+)/$', views.mediaFormAudioRemove, name = 'mediaFormAudioRemove'),
url(r'^media/form/remove/note/(?P<id>\d+)/$', views.mediaFormNoteRemove, name = 'mediaFormNoteRemove'),
url(r'^media/form/remove/interview/(?P<id>\d+)/$', views.mediaFormInterviewRemove, name = 'mediaFormInterviewRemove'),
#media saveDraft
url(r'^media/saveDraft/image/(?P<id>\d+)/$', views.mediaImageSaveDraft, name = 'mediaImageSaveDraft'),
url(r'^media/saveDraft/audio/(?P<id>\d+)/$', views.mediaAudioSaveDraft, name = 'mediaAudioSaveDraft'),
url(r'^media/saveDraft/note/(?P<id>\d+)/$', views.mediaNoteSaveDraft, name = 'mediaNoteSaveDraft'),
url(r'^media/saveDraft/interview/(?P<id>\d+)/$', views.mediaInterviewSaveDraft, name = 'mediaInterviewSaveDraft'),
#media publish
url(r'^media/publish/image/(?P<id>\d+)/$', views.mediaImagePublish, name = 'mediaImagePublish'),
url(r'^media/publish/audio/(?P<id>\d+)/$', views.mediaAudioPublish, name = 'mediaAudioPublish'),
url(r'^media/publish/note/(?P<id>\d+)/$', views.mediaNotePublish, name = 'mediaNotePublish'),
url(r'^media/publish/interview/(?P<id>\d+)/$', views.mediaInterviewPublish, name = 'mediaInterviewPublish'),
#media share
url(r'^media/share/image/(?P<id>\d+)/$', views.mediaImageShare, name = 'mediaImageShare'),
url(r'^media/share/audio/(?P<id>\d+)/$', views.mediaAudioShare, name = 'mediaAudioShare'),
url(r'^media/share/note/(?P<id>\d+)/$', views.mediaNoteShare, name = 'mediaNoteShare'),
url(r'^media/share/interview/(?P<id>\d+)/$', views.mediaInterviewShare, name = 'mediaInterviewShare'),
#media un-share
url(r'^media/unshare/image/(?P<id>\d+)/$', views.mediaImageUnshare, name = 'mediaImageUnshare'),
url(r'^media/unshare/audio/(?P<id>\d+)/$', views.mediaAudioUnshare, name = 'mediaAudioUnshare'),
url(r'^media/unshare/note/(?P<id>\d+)/$', views.mediaNoteUnshare, name = 'mediaNoteUnshare'),
url(r'^media/unshare/interview/(?P<id>\d+)/$', views.mediaInterviewUnshare, name = 'mediaInterviewUnshare'),
# media single pages
url(r'^media/image/(?P<id>\d+)/$', views.mediaPageImage, name='mediaPageImage'),
url(r'^media/audio/(?P<id>\d+)/$', views.mediaPageAudio, name='mediaPageAudio'),
url(r'^media/note/(?P<id>\d+)/$', views.mediaPageNote, name='mediaPageNote'),
url(r'^media/interview/(?P<id>\d+)/$', views.mediaPageInterview, name='mediaPageInterview'),
#media remove comments
url(r'^media/form/remove/comment/image/(?P<id>\d+)/$', views.mediaFormCommentImageRemove, name = 'mediaFormCommentImageRemove'),
url(r'^media/form/remove/comment/audio/(?P<id>\d+)/$', views.mediaFormCommentAudioRemove, name = 'mediaFormCommentAudioRemove'),
url(r'^media/form/remove/comment/note/(?P<id>\d+)/$', views.mediaFormCommentNoteRemove, name = 'mediaFormCommentNoteRemove'),
url(r'^media/form/remove/comment/interview/(?P<id>\d+)/$', views.mediaFormCommentInterviewRemove, name = 'mediaFormCommentInterviewRemove'),
# media filter
url(r'^media/filter/$', views.filterMedia, name='filterMedia'),
url(r'^media/', views.media, name='media'),
# opinion remove comment
url(r'^opinion/form/remove/comment/(?P<id>\d+)/$', views.opinionFormCommentRemove, name = 'opinionFormCommentRemove'),
# opinion edit form
url(r'^opinion/form/edit/(?P<id>\d+)/$', views.opinionForm, name = 'opinionFormEdit'),
# opinion remove form
url(r'^opinion/form/remove/(?P<id>\d+)/$', views.opinionFormRemove, name = 'opinionFormRemove'),
# opinion saveDraft
url(r'^opinion/saveDraft/(?P<id>\d+)/$', views.opinionSaveDraft, name = 'opinionSaveDraft'),
# opinion publish
url(r'^opinion/publish/(?P<id>\d+)/$', views.opinionPublish, name = 'opinionPublish'),
# opinion form url
url(r'^opinion/form/', views.opinionForm, name='opinionForm'),
# opinion single page
url(r'^opinion/(?P<id>\d+)/$', views.opinionPage, name='opinionPage'),
# opinion filter
url(r'^opinion/filter/$', views.filterOpinions, name='filterOpinions'),
url(r'^opinion/', views.opinion, name='opinion'),
# save map snaps
url(r'^savemap/$',views.SaveMap, name='savemap'),
# remove map snaps
url(r'^removemap/$',views.RemoveMap, name='removemap'),
#accounts section - includes Django registration and adding teams, etc that are related
url(r'accounts/register/$',
RegistrationView.as_view(form_class = ExRegistrationForm),
name = 'registration_register'),
# add new team
url(r'accounts/register/team/$', views.createTeam, name = 'createTeam'),
# edit team
url(r'accounts/edit/team/(?P<id>\d+)/$', views.createTeam, name = 'editTeam'),
# remove team
url(r'accounts/remove/team/(?P<id>\d+)/$', views.removeTeam, name = 'removeTeam'),
# registration urls
url(r'^accounts/', include('registration.backends.default.urls')),
# teacher account profile url
url(r'^accounts/profile/$', views.accountProfile, name='accountProfile'),
# teams in teacher profile url
url(r'^accounts/profile/teams/$', views.teams, name='teams'),
# media in teacher profile filter
url(r'^accounts/profile/media/filter/$', views.accountFilterMedia, name='accountFilterMedia'),
# media in teacher profile url
url(r'^accounts/profile/media/', views.accountMedia, name='accountMedia'),
# opinions in teacher profile filter
#url(r'^accounts/profile/opinion/filter/$', views.accountFilterOpinion, name='accountFilterOpinion'),
# opinions in teacher profile url
url(r'^accounts/profile/opinion/$', views.accountOpinion, name='accountOpinion'),
# media in student profile filter
url(r'^accounts/profile/student/media/filter/$', views.studentFilterMedia, name='studentFilterMedia'),
# student account media profile urls
url(r'^accounts/profile/student/media/$', views.studentProfileMedia, name='studentProfileMedia'),
# opinion in student profile filter
#url(r'^accounts/profile/student/opinion/filter/$', views.studentFilterOpinion, name='studentFilterOpinion'),
# student account opinion profile urls
url(r'^accounts/profile/student/opinion/$', views.studentProfileOpinion, name='studentProfileOpinion'),
# about page
url(r'^about/$', views.about, name='about')
)
| {
"repo_name": "NiJeLorg/follow-the-money",
"path": "CashCity/urls.py",
"copies": "1",
"size": "8101",
"license": "mit",
"hash": -5902080826280988000,
"line_mean": 63.2936507937,
"line_max": 144,
"alpha_frac": 0.6907789162,
"autogenerated": false,
"ratio": 3.3740108288213246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45647897450213243,
"avg_score": null,
"num_lines": null
} |
"""Add urls to asana, with title suggestions!"""
import logging
import os
import urlparse
import asana
from flask import Flask, request, session, redirect, render_template_string, render_template, jsonify, url_for
import flask_wtf
from wtforms import SelectField, SubmitField, StringField, BooleanField
from wtforms.validators import DataRequired
from reddit.utils import get_title
log = logging.getLogger(__name__)
CLIENT_ID = os.environ['ASANA_CLIENT_ID']
CLIENT_SECRET = os.environ['ASANA_CLIENT_SECRET']
def token_updater(token):
session["token"] = token
# convience method to create an auto refreshing client with your credentials
def Client(**kwargs):
return asana.Client.oauth(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
redirect_uri='http://asana.mikejperrone.com/auth/asana/callback',
auto_refresh_url=asana.session.AsanaOAuth2Session.token_url,
auto_refresh_kwargs={"client_id": CLIENT_ID, "client_secret": CLIENT_SECRET},
token_updater=token_updater,
**kwargs
)
app = Flask(__name__)
class WorkspaceSelectionForm(flask_wtf.Form):
workspace = SelectField("workspace", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/workspace', methods=["GET", "POST"])
def set_workspace():
token = session.get('token', False)
if not token:
return redirect(url_for("add_task"))
if request.method == "POST":
form = WorkspaceSelectionForm()
form.workspace.choices = [(ws["id"], ws["name"]) for ws in session["workspaces"]]
session["workspace"] = next(x for x in session["workspaces"] if int(x["id"]) == int(form.data["workspace"]))
return redirect(url_for('set_project'))
else:
client = Client(token=token)
me = client.users.me()
form = WorkspaceSelectionForm()
session["workspaces"] = me["workspaces"]
form.workspace.choices = [(ws["id"], ws["name"]) for ws in me["workspaces"]]
return render_template("select_workspace.html", name=me['name'], form=form)
class ProjectSelectionForm(flask_wtf.Form):
project = SelectField("project", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/project', methods=["GET", "POST"])
def set_project():
token = session.get('token', False)
if not token:
return redirect(url_for("add_task"))
if not session["workspace"]:
return redirect(url_for("set_workspace"))
if request.method == "POST":
form = ProjectSelectionForm()
form.project.choices = [(p["id"], p["name"]) for p in session["projects"]]
session["project"] = next(x for x in session["projects"] if int(x["id"]) == int(form.data["project"]))
return redirect(url_for('add_task'))
else: # GET
client = Client(token=token)
projects = list(client.projects.find_all(workspace=session["workspace"]["id"], limit=25))
form = ProjectSelectionForm()
session["projects"] = projects
form.project.choices = [(p["id"], p["name"]) for p in projects]
return render_template("select_project.html", form=form, workspace=session["workspace"]["name"])
class TaskForm(flask_wtf.Form):
title = StringField("Title:")
url = StringField("URL:")
submit = SubmitField("Submit")
assign_to_me = BooleanField("Assign To Me")
@app.route("/", methods=["GET", "POST"])
def add_task():
token = session.get('token', False)
if not token:
(auth_url, state) = Client().session.authorization_url()
session['state'] = state
return render_template_string(
'''
<p><a href="{{ auth_url }}"><img src="https://luna1.co/7df202.png"></a></p>''',
auth_url=auth_url
)
if not session.get("user"):
client = Client(token=token)
me = client.users.me()
session["user"] = {"name": me["name"], "id": me["id"], "photo": me.get("photo", {})}
if not session.get("workspace"):
return redirect(url_for("set_workspace"))
if not session.get("project"):
return redirect(url_for("set_project"))
if request.method == "GET":
form = TaskForm(assign_to_me=session.get("assign_to_me", False))
return render_template("add_task.html", form=form, workspace=session["workspace"]["name"])
else: # POST
client = Client(token=token)
form = TaskForm()
session["assign_to_me"] = form.data["assign_to_me"]
task = client.tasks.create({
"workspace": session["workspace"]["id"],
"name": form.data["title"],
"notes": form.data["url"],
"assignee": session["user"]["id"] if form.data["assign_to_me"] else None,
"projects": [session["project"]["id"]],
})
return render_template(
"add_task.html",
form=form,
workspace=session["workspace"]["name"],
project_id=session["project"]["id"],
project=session["project"]["name"],
task_title=task["name"],
)
@app.route("/logout")
def logout():
del session['token']
return redirect(url_for('add_task'))
@app.route("/auth/asana/callback")
def auth_callback():
if request.args.get('state') == session['state']:
del session['state']
session['token'] = Client().session.fetch_token(code=request.args.get('code'))
return redirect(url_for('add_task'))
else:
return "state doesn't match!"
@app.route('/suggest_title')
def suggest_title():
url = request.args.get("url")
if not url:
return jsonify({"error": "must provide url"})
title = get_title(url)
if not title:
# make something up based on the url
parsed = urlparse.urlparse(url)
title = "{} | {}".format(
parsed.netloc,
os.path.split(os.path.splitext(parsed.path)[0])[1]
)
return jsonify({"title": title})
@app.route('/health')
def health():
return "Healthy!"
app.debug = os.environ.get("FLASK_DEBUG", False)
if app.debug:
app.secret_key = "debug"
else:
app.secret_key = os.environ["FLASK_SECRET_KEY"]
if __name__ == "__main__":
app.run()
| {
"repo_name": "mjperrone/asana-media",
"path": "asana-media/asana-media.py",
"copies": "1",
"size": "6219",
"license": "mit",
"hash": 7859525303501011000,
"line_mean": 32.256684492,
"line_max": 116,
"alpha_frac": 0.6136034732,
"autogenerated": false,
"ratio": 3.7128358208955223,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9821626374764678,
"avg_score": 0.0009625838661689924,
"num_lines": 187
} |
"""Add Use log
Revision ID: 17d2927a8743
Revises: 392aa82bba8f
Create Date: 2017-06-09 18:34:36.239300
"""
# revision identifiers, used by Alembic.
revision = '17d2927a8743'
down_revision = '392aa82bba8f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('UseLogs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), nullable=False),
sa.Column('date', sa.Date(), nullable=False),
sa.Column('day_of_week', sa.Integer(), nullable=False),
sa.Column('hour_of_day', sa.Integer(), nullable=False),
sa.Column('year', sa.Integer(), nullable=False),
sa.Column('month', sa.Integer(), nullable=False),
sa.Column('url', sa.Unicode(length=255), nullable=True),
sa.Column('ip_address', sa.Unicode(length=100), nullable=True),
sa.Column('web_browser', sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_UseLogs_date', 'UseLogs', ['date'], unique=False)
op.create_index(u'ix_UseLogs_datetime', 'UseLogs', ['datetime'], unique=False)
op.create_index(u'ix_UseLogs_day_of_week', 'UseLogs', ['day_of_week'], unique=False)
op.create_index(u'ix_UseLogs_hour_of_day', 'UseLogs', ['hour_of_day'], unique=False)
op.create_index(u'ix_UseLogs_ip_address', 'UseLogs', ['ip_address'], unique=False)
op.create_index(u'ix_UseLogs_month', 'UseLogs', ['month'], unique=False)
op.create_index(u'ix_UseLogs_url', 'UseLogs', ['url'], unique=False)
op.create_index(u'ix_UseLogs_web_browser', 'UseLogs', ['web_browser'], unique=False)
op.create_index(u'ix_UseLogs_year', 'UseLogs', ['year'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_UseLogs_year', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_web_browser', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_url', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_month', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_ip_address', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_hour_of_day', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_day_of_week', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_datetime', table_name='UseLogs')
op.drop_index(u'ix_UseLogs_date', table_name='UseLogs')
op.drop_table('UseLogs')
### end Alembic commands ###
| {
"repo_name": "porduna/labmanager",
"path": "alembic/versions/17d2927a8743_add_use_log.py",
"copies": "5",
"size": "2478",
"license": "bsd-2-clause",
"hash": 5896405310329563000,
"line_mean": 43.25,
"line_max": 88,
"alpha_frac": 0.6735270379,
"autogenerated": false,
"ratio": 2.960573476702509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007800431090860636,
"num_lines": 56
} |
"""Add User and UserProblem"""
import sqlalchemy as sa
# revision identifiers, used by Alembic.
import sqlalchemy_utils
from alembic import op
revision = '186247cd152e'
down_revision = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('username', sa.Unicode(length=255), nullable=False),
sa.Column('password', sqlalchemy_utils.types.password.PasswordType(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False),
sa.Column('is_superuser', sa.Boolean(), server_default='FALSE', nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.create_table('user_problem',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('problem_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.ForeignKeyConstraint(['problem_id'], ['problem.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_problem_user_id'), 'user_problem', ['user_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_problem_user_id'), table_name='user_problem')
op.drop_table('user_problem')
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
| {
"repo_name": "planbrothers/ml-annotate",
"path": "annotator/migrations/versions/186247cd152e_add_user_and_userproblem.py",
"copies": "1",
"size": "1823",
"license": "mit",
"hash": -6439986540380544000,
"line_mean": 41.3953488372,
"line_max": 122,
"alpha_frac": 0.6769061986,
"autogenerated": false,
"ratio": 3.4723809523809526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.960928161826338,
"avg_score": 0.008001106543514455,
"num_lines": 43
} |
"""Add UserColour table
Revision ID: b3a672162cc0
Revises: 9da95f53e8a1
Create Date: 2017-02-05 00:05:23.241442
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b3a672162cc0'
down_revision = '9da95f53e8a1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_colour',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.BigInteger(), nullable=False),
sa.Column('guild_id', sa.BigInteger(), nullable=False),
sa.Column('role_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['guild_id'], ['guild.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('role_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_colour')
# ### end Alembic commands ###
| {
"repo_name": "MJB47/Jokusoramame",
"path": "migrations/versions/b3a672162cc0_add_usercolour_table.py",
"copies": "1",
"size": "1031",
"license": "mit",
"hash": 6798211973430759000,
"line_mean": 26.8648648649,
"line_max": 65,
"alpha_frac": 0.666343356,
"autogenerated": false,
"ratio": 3.28343949044586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.444978284644586,
"avg_score": null,
"num_lines": null
} |
"""Add user created_by and modified_by foreign key refs to any model automatically.
Almost entirely taken from https://github.com/Atomidata/django-audit-log/blob/master/audit_log/middleware.py"""
from django.db.models import signals
from django.utils.functional import curry
class WhodidMiddleware(object):
def process_request(self, request):
if not request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
print "sfsfsfsfsdfdsf middleware", self.__class__
mark_whodid = curry(self.mark_whodid, user)
print mark_whodid
signals.pre_save.connect(mark_whodid, dispatch_uid = (self.__class__, request,), weak = False)
def process_response(self, request, response):
signals.pre_save.disconnect(dispatch_uid = (self.__class__, request,))
return response
def mark_whodid(self, user, sender, instance, **kwargs):
print instance, "mark_whodid"
if not getattr(instance, 'created_by_id', None):
instance.created_by = user
if hasattr(instance,'modified_by_id'):
instance.modified_by = user | {
"repo_name": "JTarball/docker-django-polymer-starter-kit",
"path": "docker/app/app/backend/apps/_archive/blog_codewheel/middleware.py",
"copies": "4",
"size": "1274",
"license": "isc",
"hash": -1574932406086539300,
"line_mean": 46.2222222222,
"line_max": 114,
"alpha_frac": 0.6428571429,
"autogenerated": false,
"ratio": 3.98125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007290742446162667,
"num_lines": 27
} |
"""Add user created_by and modified_by foreign key refs to any model automatically.
Almost entirely taken from https://github.com/Atomidata/django-audit-log/blob/master/audit_log/middleware.py
Modified from http://mindlace.wordpress.com/2012/10/19/automatically-associating-users-with-django-models-on-save/"""
from django.db.models import signals
from django.utils.functional import curry
class WhoMiddleware(object):
def process_request(self, request):
#if not request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated():
user = request.user
else:
user = None
mark_whodid = curry(self.mark_whodid, user)
signals.pre_save.connect(mark_whodid, dispatch_uid = (self.__class__, request,), weak = False)
def process_response(self, request, response):
signals.pre_save.disconnect(dispatch_uid = (self.__class__, request,))
return response
def mark_whodid(self, user, sender, instance, **kwargs):
# if not getattr(instance, 'created_by_id', None):
# instance.created_by = user
if hasattr(instance, 'last_user_modified_id'):
instance.last_user_modified = user
| {
"repo_name": "dparizek/newecosystems",
"path": "newecosystems/apps/core/middleware.py",
"copies": "1",
"size": "1255",
"license": "mit",
"hash": -4474586920558227500,
"line_mean": 49.2,
"line_max": 120,
"alpha_frac": 0.6749003984,
"autogenerated": false,
"ratio": 3.7574850299401197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.493238542834012,
"avg_score": null,
"num_lines": null
} |
"""Add user created_by and modified_by foreign key refs to any model automatically.
Almost entirely taken from https://github.com/Atomidata/django-audit-log/blob/master/audit_log/middleware.py"""
try:
from django.utils.deprecation import MiddlewareMixin
object = MiddlewareMixin
except:
pass
from django.db.models import signals
from functools import partial
from django import http
from django.contrib import auth
from django.core import exceptions
class WhodidMiddleware(MiddlewareMixin):
def process_request(self, request):
if not request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if hasattr(request, 'user') and request.user.is_authenticated:
user = request.user
else:
user = None
mark_whodid = partial(self.mark_whodid, user)
signals.pre_save.connect(mark_whodid, dispatch_uid = (self.__class__, request,), weak = False)
def process_response(self, request, response):
signals.pre_save.disconnect(dispatch_uid = (self.__class__, request,))
return response
def mark_whodid(self, user, sender, instance, **kwargs):
if not hasattr(instance, 'modified_by_id'):
instance.modified_by = user
class TokenMiddleware:
"""
Middleware that authenticates against a token in the http authorization
header.
"""
get_response = None
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
if not self.get_response:
return exceptions.ImproperlyConfigured(
'Middleware called without proper initialization')
self.process_request(request)
return self.get_response(request)
def process_request(self, request):
auth_header = str(request.META.get('HTTP_AUTHORIZATION', '')).partition(' ')
if auth_header[0].lower() != 'token':
return None
# If they specified an invalid token, let them know.
if not auth_header[2]:
return http.HttpResponseBadRequest("Improperly formatted token")
user = auth.authenticate(token=auth_header[2])
if user:
request.user = user
| {
"repo_name": "BanzaiTokyo/askapp",
"path": "askapp/middleware.py",
"copies": "1",
"size": "2232",
"license": "apache-2.0",
"hash": -4985649499008399000,
"line_mean": 32.3134328358,
"line_max": 114,
"alpha_frac": 0.6527777778,
"autogenerated": false,
"ratio": 4.275862068965517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5428639846765517,
"avg_score": null,
"num_lines": null
} |
"""add_user_fk_to_submission
Revision ID: 885280875a1c
Revises: c9e8302571cd
Create Date: 2016-10-23 15:41:42.023560
"""
# revision identifiers, used by Alembic.
revision = '885280875a1c'
down_revision = 'c9e8302571cd'
branch_labels = None
depends_on = None
from alembic import op
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# unset existing users' ids
op.execute("""
UPDATE submission
SET user_id = NULL
WHERE user_id NOT IN (SELECT user_id FROM users)
""")
### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key('fk_submission_user', 'submission', 'users', ['user_id'], ['user_id'], ondelete='SET NULL')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_submission_user', 'submission', type_='foreignkey')
### end Alembic commands ###
| {
"repo_name": "fedspendingtransparency/data-act-broker-backend",
"path": "dataactcore/migrations/versions/885280875a1c_add_user_fk_to_submission.py",
"copies": "2",
"size": "1058",
"license": "cc0-1.0",
"hash": -5346962463101193000,
"line_mean": 22.5111111111,
"line_max": 117,
"alpha_frac": 0.6616257089,
"autogenerated": false,
"ratio": 3.412903225806452,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.994332383084158,
"avg_score": 0.02624102077297443,
"num_lines": 45
} |
"""add user information
Revision ID: 0b9d29ccc6fb
Revises: 8cf7e695b34e
Create Date: 2016-04-06 18:56:29.499633
"""
# revision identifiers, used by Alembic.
revision = '0b9d29ccc6fb'
down_revision = '8cf7e695b34e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
### end Alembic commands ###
| {
"repo_name": "KotiyaSenya/FlaskLearn",
"path": "migrations/versions/0b9d29ccc6fb_add_user_information.py",
"copies": "1",
"size": "1107",
"license": "mit",
"hash": 5371744475263835000,
"line_mean": 31.5588235294,
"line_max": 86,
"alpha_frac": 0.6729900632,
"autogenerated": false,
"ratio": 3.1810344827586206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9320966623208026,
"avg_score": 0.006611584550119048,
"num_lines": 34
} |
"""Add User Language
Revision ID: d4a70083f72e
Revises: 3512efb5496d
Create Date: 2016-07-22 14:04:43.900826
"""
# revision identifiers, used by Alembic.
revision = 'd4a70083f72e'
down_revision = '3512efb5496d'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_languages_name'), 'languages', ['name'], unique=False)
op.create_index(op.f('ix_translationvariables_name'), 'translationvariables', ['name'], unique=False)
op.add_column('users', sa.Column('language_id', sa.Integer(), nullable=True))
op.create_foreign_key(op.f('fk_users_language_id_languages'), 'users', 'languages', ['language_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f('fk_users_language_id_languages'), 'users', type_='foreignkey')
op.drop_column('users', 'language_id')
op.drop_index(op.f('ix_translationvariables_name'), table_name='translationvariables')
op.drop_index(op.f('ix_languages_name'), table_name='languages')
### end Alembic commands ###
| {
"repo_name": "ActiDoo/gamification-engine",
"path": "gengine/app/alembic/versions/d4a70083f72e_add_user_language.py",
"copies": "1",
"size": "1211",
"license": "mit",
"hash": 3671964924535227000,
"line_mean": 34.6176470588,
"line_max": 112,
"alpha_frac": 0.6928158547,
"autogenerated": false,
"ratio": 3.2641509433962264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44569667980962263,
"avg_score": null,
"num_lines": null
} |
"""Add user model
Revision ID: 3d8f34e83d23
Revises: None
Create Date: 2015-01-01 13:03:45.048687
"""
# revision identifiers, used by Alembic.
revision = '3d8f34e83d23'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('email', sa.String(length=50), nullable=True),
sa.Column('avatar', sa.String(length=200), nullable=True),
sa.Column('password', sa.String(length=200), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('name')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
| {
"repo_name": "Akagi201/learning-python",
"path": "flask/flask-boost/test/migrations/versions/20150101130345_3d8f34e83d23_add_user_model.py",
"copies": "4",
"size": "1047",
"license": "mit",
"hash": 8099459492158509000,
"line_mean": 27.2972972973,
"line_max": 64,
"alpha_frac": 0.6695319962,
"autogenerated": false,
"ratio": 3.3993506493506493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010404488121719535,
"num_lines": 37
} |
"""Add user model
Revision ID: 4d4c8a2e1646
Revises: None
Create Date: 2015-02-06 23:09:10.871000
"""
# revision identifiers, used by Alembic.
revision = '4d4c8a2e1646'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('auth_user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('username', sa.String(length=128), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('password', sa.String(length=192), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('auth_user')
### end Alembic commands ###
| {
"repo_name": "awm/minventory",
"path": "migrations/versions/4d4c8a2e1646_add_user_model.py",
"copies": "1",
"size": "1043",
"license": "bsd-3-clause",
"hash": 7015323049312152000,
"line_mean": 27.9722222222,
"line_max": 65,
"alpha_frac": 0.6768935762,
"autogenerated": false,
"ratio": 3.419672131147541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4596565707347541,
"avg_score": null,
"num_lines": null
} |
"""add username and comments
Revision ID: 38f44d362e21
Revises: e9e90674174
Create Date: 2016-08-17 02:01:28.472471
"""
# revision identifiers, used by Alembic.
revision = '38f44d362e21'
down_revision = 'e9e90674174'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', sa.Column('alias', sa.String(length=255), nullable=False, unique=True))
op.add_column('entry', sa.Column('author_id', sa.Integer(), sa.ForeignKey('user.id'), nullable=True))
op.create_table('comment',
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('user_id', sa.Integer(), sa.ForeignKey('user.id'), nullable=False),
sa.Column('post_id', sa.Integer(), sa.ForeignKey('entry.id'), nullable=False),
sa.Column('text', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
)
def downgrade():
op.drop_column('user', 'alias')
op.drop_column('entry', 'author_id')
op.drop_table('comment')
| {
"repo_name": "pbecotte/devblog",
"path": "backend/alembic/versions/38f44d362e21_add_username_and_comments.py",
"copies": "1",
"size": "1036",
"license": "mit",
"hash": -7552749779611819000,
"line_mean": 29.4705882353,
"line_max": 105,
"alpha_frac": 0.667953668,
"autogenerated": false,
"ratio": 3.168195718654434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43361493866544343,
"avg_score": null,
"num_lines": null
} |
"""Add username history
Revision ID: 4b6fd0d48a2b
Revises: c1f8375b5805
Create Date: 2020-02-19 18:51:51.714126
"""
# revision identifiers, used by Alembic.
revision = '4b6fd0d48a2b'
down_revision = 'c1f8375b5805'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('username_history',
sa.Column('historyid', sa.Integer(), nullable=False),
sa.Column('userid', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=25), nullable=False),
sa.Column('login_name', sa.String(length=25), nullable=False),
sa.Column('replaced_at', postgresql.TIMESTAMP(timezone=True), nullable=False),
sa.Column('replaced_by', sa.Integer(), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('deactivated_at', postgresql.TIMESTAMP(timezone=True), nullable=True),
sa.Column('deactivated_by', sa.Integer(), nullable=True),
sa.Column('cosmetic', sa.Boolean(), nullable=False),
sa.CheckConstraint(u"login_name = lower(regexp_replace(username, '[^0-9A-Za-z]', '', 'g'))", name='username_history_login_name_check'),
sa.CheckConstraint(u"username !~ '[^ -~]' AND username !~ ';'", name='username_history_username_check'),
sa.CheckConstraint(u'NOT (cosmetic AND active)', name='username_history_cosmetic_inactive_check'),
sa.CheckConstraint(u'(active OR cosmetic) = (deactivated_at IS NULL) AND (active OR cosmetic) = (deactivated_by IS NULL)', name='username_history_active_check'),
sa.ForeignKeyConstraint(['deactivated_by'], ['login.userid'], ),
sa.ForeignKeyConstraint(['replaced_by'], ['login.userid'], ),
sa.ForeignKeyConstraint(['userid'], ['login.userid'], ),
sa.PrimaryKeyConstraint('historyid')
)
op.create_index('ind_username_history_login_name', 'username_history', ['login_name'], unique=True, postgresql_where=sa.text('active'))
op.create_index('ind_username_history_userid', 'username_history', ['userid'], unique=True, postgresql_where=sa.text('active'))
op.create_index('ind_username_history_userid_historyid', 'username_history', ['userid', 'historyid'], unique=True, postgresql_where=sa.text(u'NOT cosmetic'))
def downgrade():
op.drop_index('ind_username_history_userid_historyid', table_name='username_history')
op.drop_index('ind_username_history_userid', table_name='username_history')
op.drop_index('ind_username_history_login_name', table_name='username_history')
op.drop_table('username_history')
| {
"repo_name": "Weasyl/weasyl",
"path": "libweasyl/libweasyl/alembic/versions/4b6fd0d48a2b_add_username_history.py",
"copies": "1",
"size": "2517",
"license": "apache-2.0",
"hash": -936030117446526300,
"line_mean": 52.5531914894,
"line_max": 165,
"alpha_frac": 0.7111640842,
"autogenerated": false,
"ratio": 3.401351351351351,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9555559791146142,
"avg_score": 0.011391128881041688,
"num_lines": 47
} |
"""Add user_passwordhistory, anchore_config tables, and user_password_change_insert triggers
Revision ID: 3828e380de20
Revises: ea2739ecd874
Create Date: 2018-08-28 16:31:32.570755
"""
# revision identifiers, used by Alembic.
revision = '3828e380de20'
down_revision = 'ea2739ecd874'
from alembic import op
import sqlalchemy as sa
# Postgres triggers for maintaining user password history
fn_userpw_hist = sa.DDL("""
create or replace function user_password_change_insert()
returns trigger
language plpgsql
as $$
BEGIN
IF
TG_OP = 'UPDATE'
AND OLD."password" <> NEW."password" THEN
INSERT INTO user_passwordhistory ( user_id, "password" )
VALUES
( OLD.ID, OLD."password" );
ELSIF ( TG_OP = 'INSERT' ) THEN
INSERT INTO user_passwordhistory ( user_id, "password" )
VALUES
( NEW.ID, NEW."password" );
END IF;
RETURN NEW;
END;
$$;
""")
trig_userpw_hist_insert = sa.DDL("""
create trigger user_password_insert_trigger
after insert
on "user"
for each row
execute procedure user_password_change_insert()
;
""")
trig_userpw_hist_change = sa.DDL("""
create trigger user_password_change_insert_trigger
before update
on "user"
for each row
execute procedure user_password_change_insert()
;
""")
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('anchore_config',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password', sa.String(length=64), nullable=True),
sa.Column('url', sa.String(length=1024), nullable=True),
sa.Column('ssl_verify', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id', 'name')
)
op.create_table('user_passwordhistory',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('changed_at', sa.DateTime(), server_default=sa.func.current_timestamp()),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_user_passwordhistory_user_id', 'user_passwordhistory', ['user_id'], unique=False)
# ### end Alembic commands ###
# Create User Password History related triggers
ddl_callable_fn = fn_userpw_hist.execute_if(dialect="postgresql")
ddl_callable_fn(target=None, bind=op.get_context().bind)
ddl_callable_trig_ins = trig_userpw_hist_insert.execute_if(dialect="postgresql")
ddl_callable_trig_ins(target=None, bind=op.get_context().bind)
ddl_callable_trig_chg = trig_userpw_hist_change.execute_if(dialect="postgresql")
ddl_callable_trig_chg(target=None, bind=op.get_context().bind)
# Build Password History record for current users
conn = op.get_bind()
conn.execute(sa.sql.text('''
INSERT INTO user_passwordhistory(user_id, password, changed_at) select "user".id as user_id, password, current_timestamp - interval '61 days' from "user";
'''))
def downgrade():
conn = op.get_bind()
conn.execute(sa.sql.text('''
DROP TRIGGER IF EXISTS user_password_insert_trigger ON "user";
DROP TRIGGER IF EXISTS user_password_change_insert_trigger ON "user";
DROP FUNCTION IF EXISTS user_password_change_insert();
'''))
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_user_passwordhistory_user_id', table_name='user_passwordhistory')
op.drop_table('user_passwordhistory')
op.drop_table('anchore_config')
# ### end Alembic commands ###
| {
"repo_name": "stackArmor/security_monkey",
"path": "migrations/versions/3828e380de20_add_user_passwordhistory_anchore_config_.py",
"copies": "1",
"size": "3618",
"license": "apache-2.0",
"hash": 3639237251088616400,
"line_mean": 31.5945945946,
"line_max": 163,
"alpha_frac": 0.6951354339,
"autogenerated": false,
"ratio": 3.283121597096189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4478257030996189,
"avg_score": null,
"num_lines": null
} |
"""Add UserPreferences
Revision ID: 20cb28a89cc6
Revises: 498a6d3a1e0f
Create Date: 2015-10-17 18:20:11.865650
"""
# revision identifiers, used by Alembic.
revision = '20cb28a89cc6'
down_revision = u'498a6d3a1e0f'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(u'UserPreferences',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('labs_sort_method', sa.Unicode(length=32), nullable=False),
sa.ForeignKeyConstraint(['user_id'], [u'User.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_engine=u'InnoDB'
)
op.create_index(u'ix_UserPreferences_user_id', u'UserPreferences', ['user_id'], unique=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_UserPreferences_user_id', table_name=u'UserPreferences')
op.drop_table(u'UserPreferences')
### end Alembic commands ###
| {
"repo_name": "porduna/weblabdeusto",
"path": "server/src/weblab/db/upgrade/regular/versions/20cb28a89cc6_add_userpreferences.py",
"copies": "3",
"size": "1057",
"license": "bsd-2-clause",
"hash": -4268819403407898600,
"line_mean": 29.2,
"line_max": 96,
"alpha_frac": 0.6896877956,
"autogenerated": false,
"ratio": 3.1552238805970148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009175388883415134,
"num_lines": 35
} |
"""add user preferences
Revision ID: 2438f7d83478
Revises: 40f5c56adfee
Create Date: 2016-03-08 21:20:42.234353
"""
# revision identifiers, used by Alembic.
revision = '2438f7d83478'
down_revision = '40f5c56adfee'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'general_preferences',
sa.Column('user_id', sa.Integer, sa.ForeignKey('users.id'), primary_key=True),
sa.Column('minimum_displayed_severity', sa.Integer, nullable=True),
sa.Column('resource_type', sa.String(64), nullable=True),
)
op.create_table(
'report_preferences',
sa.Column('user_id', sa.Integer, sa.ForeignKey('users.id'), primary_key=True),
sa.Column('line_number', sa.Integer, primary_key=True),
sa.Column('historical_report', sa.Boolean, default=False),
sa.Column('how_many_months_back', sa.Integer, nullable=True),
sa.Column('latitude', sa.Float),
sa.Column('longitude', sa.Float),
sa.Column('radius', sa.Float),
sa.Column('minimum_severity', sa.Integer),
)
def downgrade():
op.drop_table('report_preferences')
op.drop_table('general_preferences')
| {
"repo_name": "boazin/anyway",
"path": "alembic/versions/2438f7d83478_add_user_preferences.py",
"copies": "2",
"size": "1231",
"license": "bsd-3-clause",
"hash": -5514986369411539000,
"line_mean": 27.6279069767,
"line_max": 86,
"alpha_frac": 0.6596263201,
"autogenerated": false,
"ratio": 3.3360433604336044,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9614740543773082,
"avg_score": 0.07618582735210468,
"num_lines": 43
} |
"""add user profile column
Revision ID: 423fc08700e
Revises: 1ccc670e299
Create Date: 2015-08-11 17:11:29.685590
"""
# revision identifiers, used by Alembic.
revision = '423fc08700e'
down_revision = '1ccc670e299'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('about_me', sa.Text(), nullable=True))
op.add_column('users', sa.Column('last_seen', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('location', sa.String(length=64), nullable=True))
op.add_column('users', sa.Column('member_since', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('name', sa.String(length=64), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'name')
op.drop_column('users', 'member_since')
op.drop_column('users', 'location')
op.drop_column('users', 'last_seen')
op.drop_column('users', 'about_me')
### end Alembic commands ###
| {
"repo_name": "hanks-zyh/Flask-blog",
"path": "migrations/versions/423fc08700e_add_user_profile_column.py",
"copies": "2",
"size": "1106",
"license": "apache-2.0",
"hash": 3220304221149540000,
"line_mean": 31.5294117647,
"line_max": 86,
"alpha_frac": 0.6717902351,
"autogenerated": false,
"ratio": 3.272189349112426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9910921661461831,
"avg_score": 0.006611584550119048,
"num_lines": 34
} |
"""Add users/roles tables, staff, admin, superadmin roles.
Revision ID: 2d4ba8baa06c
Revises: 22427b19886b
Create Date: 2015-04-27 14:45:55.566462
"""
# revision identifiers, used by Alembic.
revision = '2d4ba8baa06c'
down_revision = '22427b19886b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(sa.schema.CreateSequence(sa.schema.Sequence("roles_id_seq")))
roles = op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False, server_default=sa.text("nextval('roles_id_seq'::regclass)")),
sa.Column('name', sa.String(length=80), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_unique_constraint('company_company_name_key', 'company', ['company_name'])
op.bulk_insert(roles, [
{'name': 'superadmin'},
{'name': 'admin'},
{'name': 'staff'}
])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('company_company_name_key', 'company', type_='unique')
op.drop_table('users')
op.drop_table('roles')
op.execute(sa.schema.DropSequence(sa.schema.Sequence("roles_id_seq")))
### end Alembic commands ###
| {
"repo_name": "ajb/pittsburgh-purchasing-suite",
"path": "migrations/versions/2d4ba8baa06c_.py",
"copies": "3",
"size": "1898",
"license": "bsd-3-clause",
"hash": -742739070365679900,
"line_mean": 35.5,
"line_max": 111,
"alpha_frac": 0.6617492097,
"autogenerated": false,
"ratio": 3.353356890459364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010242960919647777,
"num_lines": 52
} |
"""Add users table
Revision ID: 4b806c48d18d
Revises: 9cd42e48cd23
Create Date: 2017-02-27 21:07:31.949415
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4b806c48d18d'
down_revision = '9cd42e48cd23'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('password', sa.String(length=255), server_default='', nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('is_active', sa.Boolean(), server_default='0', nullable=False),
sa.Column('first_name', sa.String(length=100), server_default='', nullable=False),
sa.Column('last_name', sa.String(length=100), server_default='', nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
def downgrade():
op.drop_table('users')
| {
"repo_name": "vinntreus/training_stats",
"path": "migrations/versions/4b806c48d18d_.py",
"copies": "1",
"size": "1078",
"license": "apache-2.0",
"hash": -8934413613448252000,
"line_mean": 28.9444444444,
"line_max": 86,
"alpha_frac": 0.693877551,
"autogenerated": false,
"ratio": 3.256797583081571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4450675134081571,
"avg_score": null,
"num_lines": null
} |
"""add user table
Revision ID: 1478867a872a
Revises: 657669eb5fcb
Create Date: 2020-08-06 00:35:31.088631
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1478867a872a'
down_revision = '657669eb5fcb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
)
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
op.drop_table('user')
# ### end Alembic commands ###
| {
"repo_name": "hackerspace-silesia/cebulany-manager",
"path": "migrations/versions/1478867a872a_add_user_table.py",
"copies": "1",
"size": "1117",
"license": "mit",
"hash": 4489398136423430700,
"line_mean": 27.641025641,
"line_max": 89,
"alpha_frac": 0.6696508505,
"autogenerated": false,
"ratio": 3.2376811594202897,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44073320099202895,
"avg_score": null,
"num_lines": null
} |
"""Add `user` table.
Revision ID: 1f75e6d37e
Revises: 3a550b296ce
Create Date: 2015-02-07 00:53:31.665860
"""
# revision identifiers, used by Alembic.
revision = '1f75e6d37e'
down_revision = '3a550b296ce'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types.email import EmailType
from sqlalchemy_utils.types.uuid import UUIDType
def upgrade():
op.create_table(
'user',
sa.Column(
'id',
UUIDType(binary=False),
server_default=sa.text('uuid_generate_v4()'),
nullable=False
),
sa.Column('email', EmailType(length=255), nullable=False),
sa.Column('first_name', sa.Unicode(length=255), nullable=False),
sa.Column('last_name', sa.Unicode(length=255), nullable=False),
sa.Column('guild', sa.Unicode(length=100), nullable=True),
sa.Column('class_year', sa.Unicode(length=1), nullable=True),
sa.Column('phone_number', sa.Unicode(length=20), nullable=True),
sa.Column('signed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
def downgrade():
op.drop_table('user')
| {
"repo_name": "wappulehti-apy/diilikone-api",
"path": "diilikone/migrations/versions/1f75e6d37e_add_user_table.py",
"copies": "1",
"size": "1229",
"license": "mit",
"hash": 1969103066276876800,
"line_mean": 27.5813953488,
"line_max": 72,
"alpha_frac": 0.6476810415,
"autogenerated": false,
"ratio": 3.3950276243093924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9528155068839765,
"avg_score": 0.0029107193939254166,
"num_lines": 43
} |
"""add user table
Revision ID: 3311cbf4b2bd
Revises: b1c09a577a3d
Create Date: 2016-11-29 06:27:37.700999
"""
# revision identifiers, used by Alembic.
revision = "3311cbf4b2bd"
down_revision = "b1c09a577a3d"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("google_id", sa.String(length=128), nullable=True),
sa.Column("email", sa.String(length=128), nullable=True),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("avatar", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email"),
sa.UniqueConstraint("google_id"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("user")
### end Alembic commands ###
| {
"repo_name": "Clinical-Genomics/taboo",
"path": "alembic/versions/3311cbf4b2bd_add_user_table.py",
"copies": "1",
"size": "1027",
"license": "mit",
"hash": 3881777610896475600,
"line_mean": 26.0263157895,
"line_max": 69,
"alpha_frac": 0.6484907498,
"autogenerated": false,
"ratio": 3.3894389438943895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9514640089991455,
"avg_score": 0.004657920740586644,
"num_lines": 38
} |
"""add user table
Revision ID: 66a893890997
Revises: e06fa0a86214
Create Date: 2016-06-28 14:52:01.114605
"""
# revision identifiers, used by Alembic.
revision = '66a893890997'
down_revision = 'e06fa0a86214'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('name', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('first_seen', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('user_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
# removed to fix merge conflict
#op.add_column('posts', sa.Column('created_at', sa.DateTime(), nullable=True))
# op.drop_index('posts_ibfk_1', table_name='posts')
op.add_column('subreddits', sa.Column('created_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddits', 'created_at')
# op.create_index('posts_ibfk_1', 'posts', ['subreddit_id'], unique=False)
# removed to fix merge conflict
#op.drop_column('posts', 'created_at')
op.drop_table('users')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('name', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('first_seen', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('user_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
# removed to fix merge conflict
#op.add_column('posts', sa.Column('created_at', sa.DateTime(), nullable=True))
# op.drop_index('posts_ibfk_1', table_name='posts')
op.add_column('subreddits', sa.Column('created_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddits', 'created_at')
# op.create_index('posts_ibfk_1', 'posts', ['subreddit_id'], unique=False)
# removed to fix merge conflict
#op.drop_column('posts', 'created_at')
op.drop_table('users')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('name', sa.String(length=32), autoincrement=False, nullable=False),
sa.Column('id', sa.String(length=32), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('first_seen', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('user_data', mysql.MEDIUMTEXT(), nullable=True),
sa.PrimaryKeyConstraint('name'),
sa.UniqueConstraint('name')
)
# removed to fix merge conflict
#op.add_column('posts', sa.Column('created_at', sa.DateTime(), nullable=True))
# op.drop_index('posts_ibfk_1', table_name='posts')
op.add_column('subreddits', sa.Column('created_at', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('subreddits', 'created_at')
# op.create_index('posts_ibfk_1', 'posts', ['subreddit_id'], unique=False)
# removed to fix merge conflict
#op.drop_column('posts', 'created_at')
op.drop_table('users')
### end Alembic commands ###
| {
"repo_name": "c4fcm/CivilServant",
"path": "alembic/versions/66a893890997_add_user_table.py",
"copies": "1",
"size": "4149",
"license": "mit",
"hash": 8519844441935635000,
"line_mean": 35.0782608696,
"line_max": 86,
"alpha_frac": 0.6649795131,
"autogenerated": false,
"ratio": 3.4431535269709546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9514978481205634,
"avg_score": 0.018630911773064317,
"num_lines": 115
} |
"""add user table
Revision ID: bea3ba88c73
Revises: 382f554a8fe8
Create Date: 2015-04-07 13:45:22.348137
"""
# revision identifiers, used by Alembic.
revision = 'bea3ba88c73'
down_revision = '382f554a8fe8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
op.create_table(
'user_groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=30), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('salt', sa.String(length=16), nullable=True),
sa.Column('allowed_machines', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('date_joined', sa.DateTime(), nullable=True),
sa.Column('last_login', sa.DateTime(), nullable=True),
sa.Column('token', sa.String(length=50), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['user_groups.id'], ondelete='SET DEFAULT'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('salt'),
sa.UniqueConstraint('username')
)
op.add_column('sessions', sa.Column('user_id', sa.Integer(), nullable=True))
# Create default groups
op.get_bind().execute(text("INSERT INTO user_groups (name) VALUES ('nogroup')"))
op.get_bind().execute(text("INSERT INTO user_groups (name) VALUES ('admin')"))
# Create default user
op.get_bind().execute(
text("INSERT INTO users (group_id, is_active, username, allowed_machines) "
"VALUES ((SELECT min(id) from user_groups), True, 'anonymous', -1)")
)
def downgrade():
op.drop_column('sessions', 'user_id')
op.drop_table('users')
op.drop_table('user_groups') | {
"repo_name": "2gis/vmmaster",
"path": "migrations/alembic/versions/bea3ba88c73_add_user_table.py",
"copies": "1",
"size": "2062",
"license": "mit",
"hash": -4853482215089434000,
"line_mean": 35.1929824561,
"line_max": 90,
"alpha_frac": 0.639185257,
"autogenerated": false,
"ratio": 3.512776831345826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4651962088345826,
"avg_score": null,
"num_lines": null
} |
"""Add user table to support doorman and oauth authentication.
Revision ID: 0bc0a93ac867
Revises: fd28e46e46a6
Create Date: 2016-05-11 11:01:40.472139
"""
# revision identifiers, used by Alembic.
revision = '0bc0a93ac867'
down_revision = 'fd28e46e46a6'
from alembic import op
import sqlalchemy as sa
import doorman.database
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('social_id', sa.String(), nullable=True),
sa.Column('first_name', sa.String(), nullable=True),
sa.Column('last_name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
| {
"repo_name": "mwielgoszewski/doorman",
"path": "migrations/versions/0bc0a93ac867_.py",
"copies": "1",
"size": "1141",
"license": "mit",
"hash": 4412238547602834000,
"line_mean": 29.0263157895,
"line_max": 64,
"alpha_frac": 0.6801051709,
"autogenerated": false,
"ratio": 3.4161676646706587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45962728355706584,
"avg_score": null,
"num_lines": null
} |
"""Add user to comments table
Revision ID: 9bf5bf5ab706
Revises: 4d52d5579ec7
Create Date: 2016-01-06 11:46:54.929171
"""
# revision identifiers, used by Alembic.
revision = '9bf5bf5ab706'
down_revision = '4d52d5579ec7'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('comments', sa.Column('username', sa.String(length=20), nullable=False))
op.alter_column('users', 'email',
existing_type=sa.VARCHAR(length=30),
nullable=False)
op.alter_column('users', 'password',
existing_type=sa.VARCHAR(length=180),
nullable=False)
op.alter_column('users', 'username',
existing_type=sa.VARCHAR(length=20),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'username',
existing_type=sa.VARCHAR(length=20),
nullable=True)
op.alter_column('users', 'password',
existing_type=sa.VARCHAR(length=180),
nullable=True)
op.alter_column('users', 'email',
existing_type=sa.VARCHAR(length=30),
nullable=True)
op.drop_column('comments', 'username')
### end Alembic commands ###
| {
"repo_name": "ma3lstrom/manga-cork",
"path": "mangacork/migrations/versions/9bf5bf5ab706_add_user_to_comments_table.py",
"copies": "1",
"size": "1372",
"license": "mit",
"hash": 7776079476446952000,
"line_mean": 30.1818181818,
"line_max": 90,
"alpha_frac": 0.6144314869,
"autogenerated": false,
"ratio": 3.69811320754717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.481254469444717,
"avg_score": null,
"num_lines": null
} |
"""Add user to requests_log
Revision ID: 8edcc232c0f8
Revises: 93f2bab71572
Create Date: 2019-10-25 13:37:39.770010
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.participant_enums import PhysicalMeasurementsStatus, QuestionnaireStatus, OrderStatus
from rdr_service.participant_enums import WithdrawalStatus, WithdrawalReason, SuspensionStatus, QuestionnaireDefinitionStatus
from rdr_service.participant_enums import EnrollmentStatus, Race, SampleStatus, OrganizationType, BiobankOrderStatus
from rdr_service.participant_enums import OrderShipmentTrackingStatus, OrderShipmentStatus
from rdr_service.participant_enums import MetricSetType, MetricsKey, GenderIdentity
from rdr_service.model.base import add_table_history_table, drop_table_history_table
from rdr_service.model.code import CodeType
from rdr_service.model.site_enums import SiteStatus, EnrollingStatus, DigitalSchedulingStatus, ObsoleteStatus
# revision identifiers, used by Alembic.
revision = '8edcc232c0f8'
down_revision = '93f2bab71572'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('requests_log', sa.Column('user', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('requests_log', 'user')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/8edcc232c0f8_add_user_to_requests_log.py",
"copies": "1",
"size": "1899",
"license": "bsd-3-clause",
"hash": -2578064286742979600,
"line_mean": 32.3157894737,
"line_max": 125,
"alpha_frac": 0.7488151659,
"autogenerated": false,
"ratio": 3.6240458015267176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48728609674267176,
"avg_score": null,
"num_lines": null
} |
"""Add uses_proxy
Revision ID: 3812c222917d
Revises: 76668d91d087
Create Date: 2019-02-19 12:30:01.251441
"""
# revision identifiers, used by Alembic.
revision = '3812c222917d'
down_revision = '76668d91d087'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table(u'SiWaySAMLUsers')
op.add_column('EmbedApplications', sa.Column('uses_proxy', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('EmbedApplications', 'uses_proxy')
op.create_table(u'SiWaySAMLUsers',
sa.Column(u'id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column(u'email', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'uid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.Column(u'employee_type', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'full_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'short_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'school_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'group', mysql.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'latin1',
mysql_engine=u'InnoDB'
)
### end Alembic commands ###
| {
"repo_name": "morelab/labmanager",
"path": "alembic/versions/3812c222917d_add_uses_proxy.py",
"copies": "4",
"size": "1450",
"license": "bsd-2-clause",
"hash": 1187793352552614400,
"line_mean": 35.25,
"line_max": 92,
"alpha_frac": 0.7075862069,
"autogenerated": false,
"ratio": 3.265765765765766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5973351972665766,
"avg_score": null,
"num_lines": null
} |
"""Add "Use this" buttons into files that contain Git merge conflicts."""
from __future__ import annotations
import itertools
import tkinter
from typing import Any, cast
from porcupine import get_tab_manager, tabs, utils
from porcupine.plugins.linenumbers import LineNumbers
setup_after = ["linenumbers"]
def find_merge_conflicts(textwidget: tkinter.Text) -> list[list[int]]:
result = []
current_state = "outside"
for lineno, line in enumerate(textwidget.get("1.0", "end - 1 char").split("\n"), start=1):
# Line can have basically anything after '<<<<<<< ', even whitespace characters
if line.startswith("<<<<<<< "):
expected_current_state = "outside"
new_state = "first"
elif line == "=======":
expected_current_state = "first"
new_state = "second"
elif line.startswith(">>>>>>> "):
expected_current_state = "second"
new_state = "outside"
else:
int("123") # needed for coverage to notice that the continue runs
continue
if current_state != expected_current_state:
# Something is funny. Maybe the file contains some things that make
# it look like git merge conflict, but it really isn't that.
return []
current_state = new_state
if new_state == "first":
result.append([lineno])
else:
result[-1].append(lineno)
if current_state == "outside":
return result
return []
tag_counter = itertools.count()
class ConflictDisplayer:
# line numbers not stored to self because they may change as text is edited
def __init__(
self, textwidget: tkinter.Text, start_lineno: int, middle_lineno: int, end_lineno: int
) -> None:
self.textwidget = textwidget
n = next(tag_counter)
self.part1_tag = f"merge_conflict_{n}_part1"
self.middle_tag = f"merge_conflict_{n}_middle"
self.part2_tag = f"merge_conflict_{n}_part2"
part1_color = utils.mix_colors(self.textwidget["bg"], "magenta", 0.8)
manual_color = utils.mix_colors(self.textwidget["bg"], "tomato", 0.8)
part2_color = utils.mix_colors(self.textwidget["bg"], "cyan", 0.8)
# TODO: also specify fg color
self.part1_button = self.make_button(
start_lineno, part1_color, text="Use this", command=self.use_part1
)
self.manual_button = self.make_button(
middle_lineno, manual_color, text="Edit manually", command=self.stop_displaying
)
self.part2_button = self.make_button(
end_lineno, part2_color, text="Use this", command=self.use_part2
)
textwidget.tag_config(self.part1_tag, background=part1_color)
textwidget.tag_config(self.middle_tag, background=manual_color)
textwidget.tag_config(self.part2_tag, background=part2_color)
textwidget.tag_lower(self.part1_tag, "sel")
textwidget.tag_lower(self.middle_tag, "sel")
textwidget.tag_lower(self.part2_tag, "sel")
textwidget.tag_add(self.part1_tag, f"{start_lineno}.0", f"{middle_lineno}.0")
textwidget.tag_add(self.middle_tag, f"{middle_lineno}.0", f"{middle_lineno + 1}.0")
textwidget.tag_add(self.part2_tag, f"{middle_lineno + 1}.0", f"{end_lineno + 1}.0")
self._stopped = False
def make_button(self, lineno: int, bg_color: str, **options: Any) -> tkinter.Button:
# tkinter.Button to use custom color, that's more difficult with ttk
button = tkinter.Button(
self.textwidget, bg=bg_color, fg=utils.invert_color(bg_color), cursor="arrow", **options
)
def on_destroy(event: tkinter.Event[tkinter.Misc]) -> None:
# after_idle needed to prevent segfault
# https://core.tcl-lang.org/tk/tktview/54fe7a5e718423d16f4a11f9d672cd7bae7da39f
self.textwidget.after_idle(self.stop_displaying)
button.bind("<Destroy>", on_destroy, add=True)
self.textwidget.window_create(f"{lineno}.0 lineend", window=button) # type: ignore[no-untyped-call]
return button
# may get called multiple times
def stop_displaying(self) -> None:
if self._stopped:
return
self._stopped = True
self.part1_button.destroy()
self.manual_button.destroy()
self.part2_button.destroy()
self.textwidget.tag_delete(self.part1_tag)
self.textwidget.tag_delete(self.middle_tag)
self.textwidget.tag_delete(self.part2_tag)
def use_part1(self) -> None:
self.textwidget.delete(f"{self.middle_tag}.first", f"{self.part2_tag}.last")
self.textwidget.delete(
f"{self.part1_button} linestart", f"{self.part1_button} linestart + 1 line"
)
self.stop_displaying()
def use_part2(self) -> None:
self.textwidget.delete(
f"{self.part2_button} linestart", f"{self.part2_button} linestart + 1 line"
)
self.textwidget.delete(f"{self.part1_tag}.first", f"{self.middle_tag}.last")
self.stop_displaying()
def update_displayers(tab: tabs.FileTab, displayers: list[ConflictDisplayer]) -> None:
for displayer in displayers:
displayer.stop_displaying()
displayers.clear()
for line_numbers in find_merge_conflicts(tab.textwidget):
displayers.append(ConflictDisplayer(tab.textwidget, *line_numbers))
def on_new_filetab(tab: tabs.FileTab) -> None:
displayers: list[ConflictDisplayer] = []
update_displayers(tab, displayers)
tab.bind("<<Reloaded>>", (lambda event: update_displayers(tab, displayers)), add=True)
for child in tab.left_frame.winfo_children():
if isinstance(child, LineNumbers):
tab.textwidget.bind(
"<Enter>",
(
# This runs after clicking "Use this" button, mouse <Enter>s text widget
# Don't know why this needs a small timeout instead of after_idle
# https://github.com/python/mypy/issues/9658
lambda event: tab.after(50, cast(Any, child).do_update)
),
add=True,
)
def setup() -> None:
get_tab_manager().add_filetab_callback(on_new_filetab)
| {
"repo_name": "Akuli/porcupine",
"path": "porcupine/plugins/mergeconflict.py",
"copies": "1",
"size": "6315",
"license": "mit",
"hash": -3236950126123988500,
"line_mean": 37.0421686747,
"line_max": 108,
"alpha_frac": 0.6183689628,
"autogenerated": false,
"ratio": 3.6230636833046472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4741432646104647,
"avg_score": null,
"num_lines": null
} |
"""Add "Use this" buttons into files that contain Git merge conflicts."""
from __future__ import annotations
import itertools
import tkinter
import weakref
from typing import Any, List, cast
from porcupine import get_tab_manager, tabs, utils
def find_merge_conflicts(textwidget: tkinter.Text) -> List[List[int]]:
result = []
current_state = 'outside'
for lineno in range(1, int(textwidget.index('end - 1 char').split('.')[0]) + 1):
line = textwidget.get(f'{lineno}.0', f'{lineno}.0 lineend')
# Line might contain whitespace characters after '<<<<<<< '
if line.startswith('<<<<<<< '):
expected_current_state = 'outside'
new_state = 'first'
elif line == '=======':
expected_current_state = 'first'
new_state = 'second'
elif line.startswith('>>>>>>> '):
expected_current_state = 'second'
new_state = 'outside'
else:
int("123") # needed for coverage to notice that the continue runs
continue
if current_state != expected_current_state:
# Something is funny. Maybe the file contains some things that make
# it look like git merge conflict, but it really isn't that.
return []
current_state = new_state
if new_state == 'first':
result.append([lineno])
else:
result[-1].append(lineno)
if current_state == 'outside':
return result
return []
tag_counter = itertools.count()
class ConflictDisplayer:
# line numbers not stored to self because they may change as text is edited
def __init__(self, textwidget: tkinter.Text, start_lineno: int, middle_lineno: int, end_lineno: int) -> None:
self.textwidget = textwidget
n = next(tag_counter)
self.part1_tag = f'merge_conflict_{n}_part1'
self.middle_tag = f'merge_conflict_{n}_middle'
self.part2_tag = f'merge_conflict_{n}_part2'
part1_color = utils.mix_colors(self.textwidget['bg'], 'magenta', 0.8)
manual_color = utils.mix_colors(self.textwidget['bg'], 'tomato', 0.8)
part2_color = utils.mix_colors(self.textwidget['bg'], 'cyan', 0.8)
# TODO: also specify fg color
self.part1_button = self.make_button(
start_lineno, part1_color,
text="Use this",
command=self.use_part1,
)
self.manual_button = self.make_button(
middle_lineno, manual_color,
text="Edit manually",
command=self.stop_displaying,
)
self.part2_button = self.make_button(
end_lineno, part2_color,
text="Use this",
command=self.use_part2,
)
textwidget.tag_config(self.part1_tag, background=part1_color)
textwidget.tag_config(self.middle_tag, background=manual_color)
textwidget.tag_config(self.part2_tag, background=part2_color)
textwidget.tag_lower(self.part1_tag, 'sel')
textwidget.tag_lower(self.middle_tag, 'sel')
textwidget.tag_lower(self.part2_tag, 'sel')
textwidget.tag_add(self.part1_tag, f'{start_lineno}.0', f'{middle_lineno}.0')
textwidget.tag_add(self.middle_tag, f'{middle_lineno}.0', f'{middle_lineno + 1}.0')
textwidget.tag_add(self.part2_tag, f'{middle_lineno + 1}.0', f'{end_lineno + 1}.0')
self._stopped = False
def make_button(self, lineno: int, bg_color: str, **options: Any) -> tkinter.Button:
# tkinter.Button to use custom color, that's more difficult with ttk
button = tkinter.Button(
self.textwidget,
bg=bg_color,
fg=utils.invert_color(bg_color),
cursor='arrow',
**options
)
def on_destroy(event: tkinter.Event) -> None:
# after_idle needed to prevent segfault
# https://core.tcl-lang.org/tk/tktview/54fe7a5e718423d16f4a11f9d672cd7bae7da39f
self.textwidget.after_idle(self.stop_displaying)
button.bind('<Destroy>', on_destroy, add=True)
self.textwidget.window_create(f'{lineno}.0 lineend', window=button)
return button
# may get called multiple times
def stop_displaying(self) -> None:
if self._stopped:
return
self._stopped = True
self.part1_button.destroy()
self.manual_button.destroy()
self.part2_button.destroy()
self.textwidget.tag_delete(self.part1_tag)
self.textwidget.tag_delete(self.middle_tag)
self.textwidget.tag_delete(self.part2_tag)
def use_part1(self) -> None:
self.textwidget.delete(f'{self.middle_tag}.first', f'{self.part2_tag}.last')
self.textwidget.delete(f'{self.part1_button} linestart', f'{self.part1_button} linestart + 1 line')
self.stop_displaying()
def use_part2(self) -> None:
self.textwidget.delete(f'{self.part2_button} linestart', f'{self.part2_button} linestart + 1 line')
self.textwidget.delete(f'{self.part1_tag}.first', f'{self.middle_tag}.last')
self.stop_displaying()
conflict_displayers: weakref.WeakKeyDictionary[tabs.FileTab, List[ConflictDisplayer]] = weakref.WeakKeyDictionary()
def setup_displayers(tab: tabs.FileTab) -> None:
displayer_list = conflict_displayers.setdefault(tab, [])
for displayer in displayer_list:
displayer.stop_displaying()
displayer_list.clear()
for line_numbers in find_merge_conflicts(tab.textwidget):
displayer_list.append(ConflictDisplayer(tab.textwidget, *line_numbers))
def on_new_tab(tab: tabs.Tab) -> None:
if isinstance(tab, tabs.FileTab):
setup_displayers(tab)
# https://github.com/python/mypy/issues/9658
tab.bind('<<Reloaded>>', (lambda event: setup_displayers(cast(tabs.FileTab, tab))), add=True)
tab.textwidget.bind('<Enter>', (
# This runs after clicking "Use this" button, mouse <Enter>s text widget
# Don't know why this needs a small timeout instead of after_idle
lambda event: tab.after(50, tab.textwidget.event_generate, '<<UpdateLineNumbers>>') # type: ignore
), add=True)
def setup() -> None:
get_tab_manager().add_tab_callback(on_new_tab)
| {
"repo_name": "Akuli/editor",
"path": "porcupine/plugins/mergeconflict.py",
"copies": "1",
"size": "6280",
"license": "mit",
"hash": 1668105044968537900,
"line_mean": 36.6047904192,
"line_max": 115,
"alpha_frac": 0.6202229299,
"autogenerated": false,
"ratio": 3.5906232132647227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47108461431647225,
"avg_score": null,
"num_lines": null
} |
"""add uuid and status to networkingaction
Revision ID: 89ff8a6d72b2
Revises: 7acb050f783c
Create Date: 2018-01-18 14:04:09.553012
"""
from alembic import op
from sqlalchemy.orm import Session
import sqlalchemy as sa
import uuid
from hil import model
# revision identifiers, used by Alembic.
revision = '89ff8a6d72b2'
down_revision = '7acb050f783c'
branch_labels = None
# pylint: disable=missing-docstring
def upgrade():
op.add_column('networking_action', sa.Column('status', sa.String(),
nullable=True))
op.add_column('networking_action', sa.Column('uuid', sa.String(),
nullable=True))
op.create_index(op.f('ix_networking_action_uuid'), 'networking_action',
['uuid'], unique=False)
conn = op.get_bind()
session = Session(bind=conn)
for item in session.query(model.NetworkingAction):
item.uuid = str(uuid.uuid4())
item.status = 'PENDING'
session.commit()
session.close()
op.alter_column('networking_action', 'status', nullable=False)
op.alter_column('networking_action', 'uuid', nullable=False)
def downgrade():
op.execute("DELETE from networking_action "
"WHERE status = 'DONE' or status = 'ERROR'")
op.drop_index(op.f('ix_networking_action_uuid'),
table_name='networking_action')
op.drop_column('networking_action', 'uuid')
op.drop_column('networking_action', 'status')
| {
"repo_name": "SahilTikale/haas",
"path": "hil/migrations/versions/89ff8a6d72b2_add_uuid_and_status_to_networkingaction.py",
"copies": "4",
"size": "1444",
"license": "apache-2.0",
"hash": 7273220522063113000,
"line_mean": 28.4693877551,
"line_max": 75,
"alpha_frac": 0.6627423823,
"autogenerated": false,
"ratio": 3.4299287410926365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6092671123392637,
"avg_score": null,
"num_lines": null
} |
"""Add validation bool
Revision ID: e9210385ec47
Revises: 8af5f324577a
Create Date: 2016-07-04 04:07:20.909282
"""
# revision identifiers, used by Alembic.
revision = 'e9210385ec47'
down_revision = '8af5f324577a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_searchable import make_searchable
import sqlalchemy_utils
# Patch in knowledge of the citext type, so it reflects properly.
from sqlalchemy.dialects.postgresql.base import ischema_names
import citext
import queue
import datetime
from sqlalchemy.dialects.postgresql import ENUM
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.dialects.postgresql import TSVECTOR
ischema_names['citext'] = citext.CIText
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('nu_outbound_wrappers', sa.Column('validated', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('nu_outbound_wrappers', 'validated')
### end Alembic commands ###
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "alembic/versions/00010_e9210385ec47_add_validation_bool.py",
"copies": "1",
"size": "1168",
"license": "bsd-3-clause",
"hash": -6680131066190072000,
"line_mean": 26.1627906977,
"line_max": 94,
"alpha_frac": 0.761130137,
"autogenerated": false,
"ratio": 3.5938461538461537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48549762908461536,
"avg_score": null,
"num_lines": null
} |
"""Add vector style table
Revision ID: 19e97a222003
Revises: 1cb8168b89d8
Create Date: 2020-01-09 09:37:25.499564
"""
from alembic import op, context
from sqlalchemy import Column
from sqlalchemy.types import INTEGER, Unicode, VARCHAR
# revision identifiers, used by Alembic.
revision = '19e97a222003'
down_revision = '1cb8168b89d8'
branch_labels = None
depends_on = None
def upgrade():
schema = context.get_context().config.get_main_option("schema")
op.create_table(
"lux_userconfig",
Column("id", INTEGER, primary_key=True),
Column("key", Unicode, nullable=False),
Column("style", VARCHAR, nullable=False),
Column("user_login", Unicode, nullable=False),
schema=schema,
)
op.execute(
"do language plpgsql $$ "
"begin "
"execute 'create sequence casipo_seq_day start with ' || (current_date - '1900-01-01')::varchar; "
"end; $$; "
"select nextval('casipo_seq_day'); "
"create sequence casipo_seq; "
"create or replace function nextval_casipo_daily(in p_seq varchar) returns bigint as $$ "
"declare "
"dd bigint; "
"lv bigint; "
"begin "
"select current_date - '1900-01-01'::date into dd; "
"execute 'select last_value from '||p_seq||'_day' into lv; "
"if dd - lv > 0 then "
"execute 'alter sequence '||p_seq||' restart'; "
"execute 'alter sequence '||p_seq||'_day restart with '||dd::varchar; "
"execute 'select nextval('''||p_seq||'_day'')' into lv; "
"end if; "
"return nextval(p_seq); "
"end; $$ language plpgsql;")
def downgrade():
schema = context.get_context().config.get_main_option("schema")
op.drop_table("lux_userconfig", schema=schema)
| {
"repo_name": "Geoportail-Luxembourg/geoportailv3",
"path": "geoportal/LUX_alembic/versions/19e97a222003_add_vector_style_table.py",
"copies": "1",
"size": "1786",
"license": "mit",
"hash": -6291938271962434000,
"line_mean": 32.0740740741,
"line_max": 106,
"alpha_frac": 0.6114221725,
"autogenerated": false,
"ratio": 3.3074074074074074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44188295799074073,
"avg_score": null,
"num_lines": null
} |
"""add vendor, opportunity, category models
Revision ID: 29562eda8fbc
Revises: 3473ff14af7e
Create Date: 2015-05-28 02:31:47.039725
"""
# revision identifiers, used by Alembic.
revision = '29562eda8fbc'
down_revision = '3473ff14af7e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('category',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('nigp_code', sa.Integer(), nullable=True),
sa.Column('category', sa.String(length=255), nullable=True),
sa.Column('subcategory', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_category_id'), 'category', ['id'], unique=False)
op.create_table('vendor',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('business_name', sa.String(length=255), nullable=False),
sa.Column('email', sa.String(length=80), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('first_name', sa.String(length=30), nullable=True),
sa.Column('last_name', sa.String(length=30), nullable=True),
sa.Column('phone_number', sa.String(length=20), nullable=True),
sa.Column('fax_number', sa.String(length=20), nullable=True),
sa.Column('minority_owned', sa.Boolean(), nullable=True),
sa.Column('veteran_owned', sa.Boolean(), nullable=True),
sa.Column('woman_owned', sa.Boolean(), nullable=True),
sa.Column('disadvantaged_owned', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_index(op.f('ix_vendor_id'), 'vendor', ['id'], unique=False)
op.create_table('category_vendor_association',
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('vendor_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['vendor_id'], ['vendor.id'], ondelete='SET NULL')
)
op.create_index(op.f('ix_category_vendor_association_category_id'), 'category_vendor_association', ['category_id'], unique=False)
op.create_index(op.f('ix_category_vendor_association_vendor_id'), 'category_vendor_association', ['vendor_id'], unique=False)
op.create_table('opportunity',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('contract_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('title', sa.String(length=255), nullable=True),
sa.Column('department', sa.String(length=255), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=False),
sa.Column('bid_open', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['category.id'], ondelete='SET NULL'),
sa.ForeignKeyConstraint(['contract_id'], ['contract.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.add_column('app_status', sa.Column('county_max_deadline', sa.DateTime(), nullable=True))
op.add_column('line_item', sa.Column('percentage', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('opportunity')
op.drop_index(op.f('ix_category_vendor_association_vendor_id'), table_name='category_vendor_association')
op.drop_index(op.f('ix_category_vendor_association_category_id'), table_name='category_vendor_association')
op.drop_table('category_vendor_association')
op.drop_index(op.f('ix_vendor_id'), table_name='vendor')
op.drop_table('vendor')
op.drop_index(op.f('ix_category_id'), table_name='category')
op.drop_table('category')
op.drop_column('line_item', 'percentage')
op.drop_column('app_status', 'county_max_deadline')
### end Alembic commands ###
| {
"repo_name": "ajb/pittsburgh-purchasing-suite",
"path": "migrations/versions/29562eda8fbc_add_vendor_opportunity_category_models.py",
"copies": "3",
"size": "3927",
"license": "bsd-3-clause",
"hash": 1356632472473640000,
"line_mean": 46.8902439024,
"line_max": 133,
"alpha_frac": 0.6809269162,
"autogenerated": false,
"ratio": 3.3911917098445596,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.557211862604456,
"avg_score": null,
"num_lines": null
} |
"""Add verbose_name column
Revision ID: f7ab9ee32bdf
Revises: 55893713f6b7
Create Date: 2018-10-23 11:45:07.880418
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'f7ab9ee32bdf'
down_revision = '55893713f6b7'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('rooms', sa.Column('verbose_name', sa.String(), nullable=True), schema='roombooking')
# set verbose_name only when the name is non-standard (B-F-N)
op.execute('UPDATE roombooking.rooms SET verbose_name = name '
"WHERE name != format('%s-%s-%s', building, floor, number)")
op.create_check_constraint('verbose_name_not_empty', 'rooms', "verbose_name != ''", schema='roombooking')
op.drop_column('rooms', 'name', schema='roombooking')
def downgrade():
op.add_column('rooms',
sa.Column('name', sa.String(), nullable=True),
schema='roombooking')
op.execute("UPDATE roombooking.rooms SET name = format('%s-%s-%s', building, floor, number) "
'WHERE verbose_name IS NULL')
op.execute('UPDATE roombooking.rooms SET name = verbose_name '
'WHERE verbose_name IS NOT NULL')
op.alter_column('rooms', 'name', nullable=False, schema='roombooking')
op.drop_column('rooms', 'verbose_name', schema='roombooking')
| {
"repo_name": "indico/indico",
"path": "indico/migrations/versions/20181023_1103_f7ab9ee32bdf_add_verbose_name_column.py",
"copies": "1",
"size": "1356",
"license": "mit",
"hash": 7482217238138145000,
"line_mean": 35.6486486486,
"line_max": 109,
"alpha_frac": 0.6615044248,
"autogenerated": false,
"ratio": 3.348148148148148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4509652572948148,
"avg_score": null,
"num_lines": null
} |
"""Add verification table
Revision ID: 90ac01a2df
Revises: df61cfff356e
Create Date: 2016-04-16 17:28:20.778467
"""
# revision identifiers, used by Alembic.
revision = '90ac01a2df'
down_revision = 'df61cfff356e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('verification',
sa.Column('verification_id', sa.String(length=32), nullable=False),
sa.Column('ip4', sa.BigInteger(), nullable=False),
sa.Column('expires', sa.BigInteger(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint('verification_id')
)
op.create_index(op.f('ix_verification_expires'), 'verification', ['expires'], unique=False)
op.create_index(op.f('ix_verification_ip4'), 'verification', ['ip4'], unique=False)
def downgrade():
op.drop_index(op.f('ix_verification_ip4'), table_name='verification')
op.drop_index(op.f('ix_verification_expires'), table_name='verification')
op.drop_table('verification')
| {
"repo_name": "Floens/uchan",
"path": "migrations/versions/90ac01a2df_add_verification_table.py",
"copies": "1",
"size": "1083",
"license": "mit",
"hash": 6111046986593570000,
"line_mean": 30.8529411765,
"line_max": 95,
"alpha_frac": 0.7156048015,
"autogenerated": false,
"ratio": 3.311926605504587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4527531407004587,
"avg_score": null,
"num_lines": null
} |
"""add Vietnam Geo DB
Revision ID: 32c1cbb13ec2
Revises: b91f6260d520
Create Date: 2017-07-19 11:31:03.582990
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '32c1cbb13ec2'
down_revision = 'b91f6260d520'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('province',
sa.Column('province_id', sa.String(length=64),
nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('type', sa.String(length=30), nullable=False),
sa.PrimaryKeyConstraint('province_id'),
mysql_default_charset='utf8'
)
op.create_index(op.f('ix_province_name'), 'province', ['name'],
unique=False)
op.create_table('district',
sa.Column('district_id', sa.String(length=64),
nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('type', sa.String(length=30), nullable=False),
sa.Column('location', sa.String(length=30),
nullable=False),
sa.Column('province_id', sa.String(length=64),
nullable=False),
sa.ForeignKeyConstraint(['province_id'],
['province.province_id'], ),
sa.PrimaryKeyConstraint('district_id'),
mysql_default_charset='utf8'
)
op.create_index(op.f('ix_district_name'), 'district', ['name'],
unique=False)
op.create_index(op.f('ix_district_province_id'), 'district',
['province_id'], unique=False)
op.create_table('ward',
sa.Column('ward_id', sa.String(length=64), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('type', sa.String(length=100), nullable=False),
sa.Column('location', sa.String(length=100),
nullable=False),
sa.Column('district_id', sa.String(length=100),
nullable=False),
sa.ForeignKeyConstraint(['district_id'],
['district.district_id'], ),
sa.PrimaryKeyConstraint('ward_id'),
mysql_default_charset='utf8'
)
op.create_index(op.f('ix_ward_district_id'), 'ward', ['district_id'],
unique=False)
op.create_index(op.f('ix_ward_name'), 'ward', ['name'], unique=False)
op.add_column('associate_group',
sa.Column('province_id', sa.String(length=64),
nullable=True))
op.drop_constraint('associate_group_ibfk_1', 'associate_group',
type_='foreignkey')
op.create_foreign_key(None, 'associate_group', 'province', ['province_id'],
['province_id'])
op.drop_column('associate_group', 'region_id')
op.add_column('group',
sa.Column('district_id', sa.String(length=64),
nullable=True))
op.add_column('group',
sa.Column('province_id', sa.String(length=64),
nullable=True))
op.add_column('group',
sa.Column('ward_id', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_group_district_id'), 'group', ['district_id'],
unique=False)
op.create_index(op.f('ix_group_province_id'), 'group', ['province_id'],
unique=False)
op.create_index(op.f('ix_group_ward_id'), 'group', ['ward_id'],
unique=False)
op.drop_constraint('group_ibfk_2', 'group', type_='foreignkey')
op.create_foreign_key(None, 'group', 'district', ['district_id'],
['district_id'])
op.create_foreign_key(None, 'group', 'province', ['province_id'],
['province_id'])
op.create_foreign_key(None, 'group', 'ward', ['ward_id'], ['ward_id'])
op.drop_column('group', 'province')
op.drop_column('group', 'region_id')
op.drop_column('group', 'ward')
op.drop_column('group', 'district')
op.add_column('user',
sa.Column('province_id', sa.String(length=64),
nullable=True))
op.drop_constraint('user_ibfk_1', 'user', type_='foreignkey')
op.create_foreign_key(None, 'user', 'province', ['province_id'],
['province_id'])
op.drop_column('user', 'region_id')
op.drop_table('region')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('region_id', mysql.VARCHAR(length=64),
nullable=True))
op.drop_constraint(None, 'user', type_='foreignkey')
op.create_foreign_key('user_ibfk_1', 'user', 'region', ['region_id'],
['id'])
op.drop_column('user', 'province_id')
op.add_column('group', sa.Column('district', mysql.VARCHAR(length=255),
nullable=True))
op.add_column('group',
sa.Column('ward', mysql.VARCHAR(length=255), nullable=True))
op.add_column('group', sa.Column('region_id', mysql.VARCHAR(length=64),
nullable=True))
op.add_column('group', sa.Column('province', mysql.VARCHAR(length=255),
nullable=True))
op.drop_constraint(None, 'group', type_='foreignkey')
op.drop_constraint(None, 'group', type_='foreignkey')
op.drop_constraint(None, 'group', type_='foreignkey')
op.create_foreign_key('group_ibfk_2', 'group', 'region', ['region_id'],
['id'])
op.drop_index(op.f('ix_group_ward_id'), table_name='group')
op.drop_index(op.f('ix_group_province_id'), table_name='group')
op.drop_index(op.f('ix_group_district_id'), table_name='group')
op.drop_column('group', 'ward_id')
op.drop_column('group', 'province_id')
op.drop_column('group', 'district_id')
op.add_column('associate_group',
sa.Column('region_id', mysql.VARCHAR(length=64),
nullable=True))
op.drop_constraint(None, 'associate_group', type_='foreignkey')
op.create_foreign_key('associate_group_ibfk_1', 'associate_group',
'region',
['region_id'], ['id'])
op.drop_column('associate_group', 'province_id')
op.create_table('region',
sa.Column('id', mysql.VARCHAR(length=64), nullable=False),
sa.Column('region_code', mysql.VARCHAR(length=64),
nullable=True),
sa.Column('name', mysql.VARCHAR(length=64), nullable=True),
sa.Column('description', mysql.VARCHAR(length=255),
nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8',
mysql_engine='InnoDB'
)
op.drop_index(op.f('ix_ward_name'), table_name='ward')
op.drop_index(op.f('ix_ward_district_id'), table_name='ward')
op.drop_table('ward')
op.drop_index(op.f('ix_district_province_id'), table_name='district')
op.drop_index(op.f('ix_district_name'), table_name='district')
op.drop_table('district')
op.drop_index(op.f('ix_province_name'), table_name='province')
op.drop_table('province')
# ### end Alembic commands ###
| {
"repo_name": "hieulq/pgscm",
"path": "migrations/versions/32c1cbb13ec2_add_geo_db.py",
"copies": "2",
"size": "7921",
"license": "apache-2.0",
"hash": 4931574541742558000,
"line_mean": 48.198757764,
"line_max": 79,
"alpha_frac": 0.5290998611,
"autogenerated": false,
"ratio": 3.8488824101068997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 161
} |
"""Add view column to setting table
Revision ID: 59729e468045
Revises: 787bdba9e147
Create Date: 2018-08-17 16:17:31.058782
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '59729e468045'
down_revision = '787bdba9e147'
branch_labels = None
depends_on = None
def update_data():
setting_table = sa.sql.table('setting',
sa.sql.column('id', sa.Integer),
sa.sql.column('name', sa.String),
sa.sql.column('value', sa.String),
sa.sql.column('view', sa.String)
)
# just update previous records which have id <= 7
op.execute(
setting_table.update().where(setting_table.c.id <= 7).values({'view': 'basic'})
)
# add more new settings
op.bulk_insert(setting_table,
[
{'id': 8, 'name': 'pretty_ipv6_ptr', 'value': 'False', 'view': 'basic'},
{'id': 9, 'name': 'dnssec_admins_only', 'value': 'False', 'view': 'basic'},
{'id': 10, 'name': 'bg_domain_updates', 'value': 'False', 'view': 'basic'},
{'id': 11, 'name': 'site_name', 'value': 'PowerDNS-Admin', 'view': 'basic'},
{'id': 12, 'name': 'pdns_api_url', 'value': '', 'view': 'pdns'},
{'id': 13, 'name': 'pdns_api_key', 'value': '', 'view': 'pdns'},
{'id': 14, 'name': 'pdns_version', 'value': '4.1.1', 'view': 'pdns'},
{'id': 15, 'name': 'local_db_enabled', 'value': 'True', 'view': 'authentication'},
{'id': 16, 'name': 'signup_enabled', 'value': 'True', 'view': 'authentication'},
{'id': 17, 'name': 'ldap_enabled', 'value': 'False', 'view': 'authentication'},
{'id': 18, 'name': 'ldap_type', 'value': 'ldap', 'view': 'authentication'},
{'id': 19, 'name': 'ldap_uri', 'value': '', 'view': 'authentication'},
{'id': 20, 'name': 'ldap_base_dn', 'value': '', 'view': 'authentication'},
{'id': 21, 'name': 'ldap_admin_username', 'value': '', 'view': 'authentication'},
{'id': 22, 'name': 'ldap_admin_password', 'value': '', 'view': 'authentication'},
{'id': 23, 'name': 'ldap_filter_basic', 'value': '', 'view': 'authentication'},
{'id': 24, 'name': 'ldap_filter_username', 'value': '', 'view': 'authentication'},
{'id': 25, 'name': 'ldap_sg_enabled', 'value': 'False', 'view': 'authentication'},
{'id': 26, 'name': 'ldap_admin_group', 'value': '', 'view': 'authentication'},
{'id': 27, 'name': 'ldap_user_group', 'value': '', 'view': 'authentication'},
{'id': 28, 'name': 'github_oauth_enabled', 'value': 'False', 'view': 'authentication'},
{'id': 29, 'name': 'github_oauth_key', 'value': '', 'view': 'authentication'},
{'id': 30, 'name': 'github_oauth_secret', 'value': '', 'view': 'authentication'},
{'id': 31, 'name': 'github_oauth_scope', 'value': 'email', 'view': 'authentication'},
{'id': 32, 'name': 'github_oauth_api_url', 'value': 'https://api.github.com/user', 'view': 'authentication'},
{'id': 33, 'name': 'github_oauth_token_url', 'value': 'https://github.com/login/oauth/access_token', 'view': 'authentication'},
{'id': 34, 'name': 'github_oauth_authorize_url', 'value': 'https://github.com/login/oauth/authorize', 'view': 'authentication'},
{'id': 35, 'name': 'google_oauth_enabled', 'value': 'False', 'view': 'authentication'},
{'id': 36, 'name': 'google_oauth_client_id', 'value': '', 'view': 'authentication'},
{'id': 37, 'name': 'google_oauth_client_secret', 'value': '', 'view': 'authentication'},
{'id': 38, 'name': 'google_token_url', 'value': 'https://accounts.google.com/o/oauth2/token', 'view': 'authentication'},
{'id': 39, 'name': 'google_token_params', 'value': "{'scope': 'email profile'}", 'view': 'authentication'},
{'id': 40, 'name': 'google_authorize_url', 'value': 'https://accounts.google.com/o/oauth2/auth', 'view': 'authentication'},
{'id': 41, 'name': 'google_base_url', 'value': 'https://www.googleapis.com/oauth2/v1/', 'view': 'authentication'},
]
)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('setting', sa.Column('view', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
# update data for new schema
update_data()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
## NOTE:
## - Drop action does not work on sqlite3
## - This action touches the `setting` table which loaded in views.py
## during app initlization, so the downgrade function won't work
## unless we temporary remove importing `views` from `app/__init__.py`
op.drop_column('setting', 'view')
# delete added records in previous version
op.execute("DELETE FROM setting WHERE id > 7")
# ### end Alembic commands ###
| {
"repo_name": "ngoduykhanh/PowerDNS-Admin",
"path": "migrations/versions/59729e468045_add_view_column_to_setting_table.py",
"copies": "1",
"size": "4979",
"license": "mit",
"hash": 4413281786779395600,
"line_mean": 53.1195652174,
"line_max": 140,
"alpha_frac": 0.5591484234,
"autogenerated": false,
"ratio": 3.419642857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9441929497651516,
"avg_score": 0.007372356578268322,
"num_lines": 92
} |
#%% addViscous.py
"""
Viscous correction code to amend Cart3D solutions based on streamline running
length.
Alexander Ward
April 2017
"""
from math import exp, log, asin
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import brentq
import time as time
#%% Inputs
"""
Here are the majority of the user defined parameters. They are grouped into:
Simulation - Parameters on the simulation itself
Freestream - Freestream gas properties
Vehicle - Reference area, wall temperature
Post Processing - What properties if any do you want to save or report?
"""
""" -- SIMULATION -- """
#------------
# Cart3D solution filename {str}
cart3DFilename = 'Components.i.triq'
#------------
# Streamline filename {str}
streamlineFilename = '4.csv'
#------------
# Number of Tecplot generated streamlines {int}
nStreamlines = 68
#------------
# maximum number of steps in the the Tecplot streamlines {int}
maxSteps = 19999
#------------
# If you don't have streamlines available, set to 0 to guess the development
# length based on flow direction (positive x) NOTE: If the vehicle starts in
# negative x, you must set an offset to translate the nose to x = 0.
streamlineFlag = 0 # {bool}
LEoffset = 0. # {float}
#------------
# If you want lots of info to print out, set this to true, {bool}
verbose = 0
#------------
# If you just want to debug, tell the code to only iterate over part of the
# .triq file. Make sure to set the flag as well.
sampleAmount = 10000 # {int}
sampleFlag = 0 # {bool}
""" -- FREESTREAM -- """
#------------
# Freestream temperature, [K], {float}
T_inf = 300
#------------
# Freestream pressure, [Pa], {float}
p_inf = 1013.0
#------------
# Freestream density, [kg/m^3], {float}
rho_inf = 0.039466
#------------
# Freestream Mach number, {float}
M_inf = 4.
#------------
# Ratio of specific heats, [Pa] (constant), {float}
g_inf = 1.4
#------------
# Angle of attack
alpha = 0. * np.pi/180
#------------
# Sideslip angle
beta = 0. * np.pi/180
#------------
""" -- VEHICLE -- """
#------------
# Wall temperature of the vehicle surface [K], {float}
T_wall = 400.
#------------
# Reference area for aerodynamic force coefficients
S_ref = 3.
#------------
# Reference length for aerodynamic moment coefficients (measured from nose)
L_ref = 1.5
#------------
""" -- POST PROCESSING -- """
#------------
# If you DON'T want to save a .csv file of local properties, set flag to 0
writeDataFlag = 1 # {bool}
#------------
# Additional properties to save to a .csv file (called flowData.csv) {List}
# You will always get ['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']
# Choose from ['M', 'rho', 'p', 'cp', 'T_e', 'U', 'V', 'W', 'BL Regime',
# 'Dev Length', 'Re']
additionalProperties = ['Dev Length', 'Re', 'BL Regime'] # {list of str}
#------------
# Write data to this filename
outputFilename = 'flowData' # {str}
#------------
# Save data to VTK format for nice viewing in ParaView
vtkFlag = 0
#------------
# Save data to csv
csvFlag = 0
#------------
###############################################################################
""" You probably (hopefully) won't have to change anything below this line. """
###############################################################################
#%% The code
class Fluid(object):
""" Contains all the constant fluid properties
"""
def __init__(self):
# Sutherland law reference conditions and constants
self.mu_0 = 1.716e-5
self.T_0 = 273
self.sutherland_S = 111
# Gas constants
self.R_universal = 8.314510
self.R_air = 287.058
self.molarMass_air = 28.964923942499997
# Freestream conditions
self.T_inf = T_inf
self.p_inf = p_inf
self.g_inf = g_inf
self.a_inf = (self.g_inf * self.R_air * self.T_inf) **0.5
# Constant specific heat value {float}
self.constantCp = 1.015
def calculate_cp(self, T):
"""
This function calculates the specific heat capacity and ratio fo specific heats
at a temeprature T [K]. It uses the polynomial curve fit taken from NASA's CEA code
McBride, B. Zehe, M. Gordon, S. (2002)
"NASA Glenn Coefficients for Calculating Thermodynamic Properties of Individual Species"
molar mass air = 28.964923942499997
Specific heat capacities at 298.15 K:
Cp{N2, O2, Ar, CO2} = 29.124, 29.378, 20.786, 37.135
"""
verbose = 0
if T < 200.:
T = 200.
if verbose:
print "Problem, Temp < 200 K, I'll set it to 200 K"
elif T > 6000.:
T = 6000.
if 1:
print "Problem, Temp > 6000 K, I'll set it to 6000 K"
if T < 1000.:
N2 = [2.210371497E+04, -3.818461820E+02, 6.082738360E+00, -8.530914410E-03, 1.384646189E-05, -9.625793620E-09, 2.519705809E-12]
O2 = [-3.425563420E+04, 4.847000970E+02, 1.119010961E+00, 4.293889240E-03, -6.836300520E-07,-2.023372700E-09, 1.039040018E-12]
Ar = [0., 0., 2.5, 0., 0., 0., 0.]
CO2 = [4.943650540E+04, -6.264116010E+02, 5.301725240E+00, 2.503813816E-03, -2.127308728E-07, -7.689988780E-10, 2.849677801E-13]
else:
N2 = [5.877124060E+05, -2.239249073E+03, 6.066949220E+00, -6.139685500E-04, 1.491806679E-07, -1.923105485E-11, 1.061954386E-15]
O2 = [-1.037939022E+06, 2.344830282E+03, 1.819732036E+00, 1.267847582E-03,-2.188067988E-07, 2.053719572E-11, -8.193467050E-16]
Ar = [2.010538475E+01, -5.992661070E-02, 2.500069401E+00, -3.992141160E-08, 1.205272140E-11, -1.819015576E-15, 1.078576636E-19]
CO2 = [1.176962419E+05, -1.788791477E+03, 8.291523190E+00, -9.223156780E-05, 4.863676880E-09, -1.891053312E-12, 6.330036590E-16]
coefficients = 8.314510 * np.array([N2, O2, Ar, CO2])
temperatureVector = np.array([T**-2, T**-1, 1., T, T**2, T**3, T**4])
cp_species = np.dot(coefficients, temperatureVector) / np.array([28.01340, 31.99880, 39.94800, 44.00950])
cp_air = np.sum(cp_species * np.array([.78084, .20947, .009365, .000319]))
gamma = cp_air*self.molarMass_air/(cp_air*self.molarMass_air - self.R_universal)
return cp_air
#%%
class Streamline(Fluid):
""" This class takes care of the streamlines. It imports them, calculates
the running length and then sorts them to efficiently calculate the local
running length in each cell.
"""
def __init__(self):
""" """
def importer(self, filename, FieldFlag=0):
""" This function imports the streamline data by iterating over the
Tecplot generated file. Since we are iterating over it anyway, we
calculate the length of the streamline for every point as well, saving
it in self.streamlineLengths.
No flow properties along the streamline are saved - only the length,
and x, y, z coordinates. The flow properties for each area element are
imported form the actual solution (.triq file).
"""
if not streamlineFlag:
print "No streamlines - we'll use a coordinate based running length"
return (None, None, None)
print 'Importing streamline data...'
self.filename = filename
with open(self.filename, 'r') as dataFile:
data = dataFile.read().split('Streamtrace')
# Iterate through and construct the streamline dataframes
Streamline.streamlines = 1000.*np.ones((maxSteps, nStreamlines, 3))
Streamline.streamlineLengths = 1000.*np.ones([maxSteps, nStreamlines])
count = 0 # if FieldFlag else 1
length = 0 # Initialise maxLength counter to be small
streamlineStepLengths = []
streamlineTotalLengths = []
for zone in data:
# This is iterating over the file, selecting the streamlines
#print 'Zone', count
zone = zone.strip().split('\n')
streamlineStepLength = len(zone)
streamlineStepLengths.append(int(streamlineStepLength))
# if count == 0:
# # Delete the surface label
# del zone[1]; del zone[0]
# if FieldFlag == False:
# # We don't want to read and store the velocity field data
# count += 1; continue
L = 0.
rowCount = 0
coords = []
streamlineLength = []
for row in zone:
# This is iterating over the individual streamlines
row = row.split(',')
if rowCount == 0:
x, y, z = (float(row[0]), float(row[1]), float(row[2]))
else:
xNew, yNew, zNew = (float(row[0]), float(row[1]), float(row[2]))
L += ((x-xNew)**2 + (y-yNew)**2 + (z-zNew)**2)**0.5
x, y, z = xNew, yNew, zNew
if xNew < x:
print "Warning: a streamline may be recirculating"
names = ['Streamline', 'x', 'y', 'z', 'Length']
properties = [count, xNew, yNew, zNew, L]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
coords.append((x, y, z))
streamlineLength.append(L)
rowCount += 1
# if count == 0 and FieldFlag:
# # We're constructing the surface
# self.field = zone
# else:
# # We're constructing a streamline
# #print np.shape(coords)
# streamlines[:len(zone), 3*(count):3*count+3] = np.array(coords)
# streamlineLengths[:len(zone), count] = np.array(streamlineLength)
Streamline.streamlines[:streamlineStepLength, count:count+1, :] = np.array(coords).reshape(streamlineStepLength, 1, 3)
Streamline.streamlineLengths[:streamlineStepLength, count:count+1] = np.array(streamlineLength).reshape(streamlineStepLength, 1)
streamlineTotalLengths.append(streamlineLength[-1])
count += 1
Streamline.maxStreamlineSteps = max(streamlineStepLengths)
Streamline.streamlineLengths = Streamline.streamlineLengths[:Streamline.maxStreamlineSteps,:,]
sortedLengthIndices = np.argsort(streamlineStepLengths)[::-1]
# Sort the streamlines into order of increasing length
Streamline.streamlines = Streamline.streamlines[:, sortedLengthIndices, :]
sortedStepLengths = np.array(streamlineStepLengths)[sortedLengthIndices]
Streamline.maxStepLength = sortedStepLengths[0]
# Tolerance on positioning the streamlines
tol = 0.001
# The first (longest and hopefully ~first in x)
Streamline.firstStagnationPoint = Streamline.streamlines[:, 0, 0]
for n in xrange(nStreamlines-1):
# Iterate through the streamlines and adjust the starting position.
# Skip the first (longest) streamline - we're adjusting all the others
# relative to it.
n += 1
# Get length of current streamline
length = sortedStepLengths[n]
# Get current streamline
streamline = Streamline.streamlines[:length, n:n+1, :]
# Get the running length vector
streamlineLength = Streamline.streamlineLengths[:length, n:n+1]
# Get the starting x position of the current streamline
xStartCurrent = streamline[0, 0, 0]
try:
# Try to find a position based on a small tolerance
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Streamline adjustment failed (tol =", tol, "), increasing tolerance by 10."
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol*10)[0][0], 0))
# move the streamline to new starting location
if newRow + length >= Streamline.maxStepLength:
# We are moving the streamline further back
# Physically this means this streamline finishes behind the longest
# Perhaps a swept wing where the trailing edge is behind the fuselage
print "Warning: Attempted to push streamline outside the streamlines array!"
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
else:
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
# Overwrite old area
Streamline.streamlines[:newRow, n:n+1, :] = 1000.*np.ones((newRow, 1, 3))
# Adjust streamlines to the actual maximum streamline step length not
# the maximum possible tecplot streamline steps
Streamline.streamlines = Streamline.streamlines[:Streamline.maxStepLength, :, :]
# Finished importing and calculating lengths
print 'Streamline import and length calculation complete.', count-1, 'streamlines imported with max', self.maxStreamlineSteps, 'steps.', '\n'
if verbose:
print 'Maximum calculated streamline length is', max(Streamline.streamlineLengths), 'units.'
# Check to ensure the longest streamline is weirdly longer than the next
perCentLonger = (sortedStepLengths[0] - sortedStepLengths[1])/sortedStepLengths[1]
if perCentLonger >= 10.:
print "Warning: The longest streamline is", perCentLonger, "% longer than the next longest"
"""
Include some checks here. Is max length >> length of vehicle?
Any streamline in the opposite direction to the flow (recirculation)
Any streamline >> longer than all the others/neighbours?
"""
return Streamline.streamlines, streamlineTotalLengths, Streamline.maxStepLength
#%%
class Data(Streamline):
def __init__(self, filename):
"""
"""
Streamline.__init__(self)
self.filename = filename
self.flowData = pd.DataFrame()
def triqImporter(self, FieldFlag=1):
""" Imports the area data from the Tecplot field data.
"""
lineNumber = 0
print 'Importing flow data...'
with open(self.filename, 'r') as dataFile:
for line in dataFile:
if lineNumber == 0:
# We're on the first line
#print line
self.nVertices, self.nTriangles, nScalars = (int(x) for x in line.split())
lineNumber += 1
# Read in the vertex information
self.vertices = pd.read_csv(self.filename, delim_whitespace=1,
names=['x', 'y', 'z'],
skiprows=1, nrows=self.nVertices, memory_map=1)
# Read in the triangle information
self.triangles = pd.read_csv(self.filename, delim_whitespace=1,
names=['v1', 'v2', 'v3'],
skiprows=self.nVertices+1, nrows=self.nTriangles,
memory_map=1)
if sampleFlag:
self.triangles = self.triangles.sample(sampleAmount).transpose()
else:
self.triangles = self.triangles.transpose()
# Read in the flow information
temp = pd.read_csv(self.filename, delim_whitespace=1,
names=["rho","U","V","W","P"],
skiprows=self.nVertices+2*self.nTriangles+1, nrows=2*self.nVertices,
memory_map=1)
self.flow = temp.iloc[1::2, :].reset_index()
if sampleFlag:
self.nTriangles = sampleAmount
print "Field import complete", self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
return self.vertices, self.triangles, self.flow
def getProperties(self):
"""
Calculate all the flow properties of the triangles and add it all to the
flowData dataframe. This function just applies the master function to
each row of the triangles DataFrame (list of vertices).
It calculates the centroid and averages the data to the centroid.
"""
print 'Running main code now...'
# print self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
self.count = 1
self.startTime = time.time()
self.percentComplete = 0
"""----- Some flags which won't be changed much -------"""
self.naturalTransitionFlag = 1
self.roughnessInducedFlag = 0
self.userDefinedFlag = 0
self.coneCorrectionFlag = 0
self.laminarOnlyFlag = 1
self.turbulentOnlyFlag = 0
self.immediateTransitionFlag = 0
"""------------"""
self.badMachCount = 0
self.badCfCount = 0
self.badTempCount = 0
self.badVelCount = 0
self.flowData = pd.DataFrame(self.triangles.apply(self.master, axis=0))
timeElapsed = time.time() - self.startTime
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print 'Viscous correction code complete.', '\n'
print 'Time elapsed', "%d:%02d:%02d" % (h, m, s)
print 'Average time per loop', timeElapsed/self.count
print "Bad cell counts (Total %d):" % self.nTriangles
names = ['Mach', 'cf', 'T', 'Vel']
if sampleFlag:
amounts = [(self.badMachCount/float(sampleAmount) * 100., self.badMachCount),
(self.badCfCount/float(sampleAmount) * 100., self.badCfCount),
(self.badTempCount/float(sampleAmount) * 100., self.badTempCount),
(self.badVelCount/float(sampleAmount) * 100., self.badVelCount)]
else:
amounts = [(self.badMachCount/self.nTriangles * 100., self.badMachCount),
(self.badCfCount/self.nTriangles * 100., self.badCfCount),
(self.badTempCount/self.nTriangles * 100., self.badTempCount),
(self.badVelCount/self.nTriangles * 100., self.badVelCount)]
for c1, c2 in zip(names, amounts):
print "%-10s %s" % (c1, c2)
print '\n'
# print 'Bad cell count (Mach)', self.badMachCount/float(sampleAmount) * 100., '%', self.badMachCount, 'total.'
# print 'Bad cell count (Cf)', self.badCfCount/float(sampleAmount) * 100., '%', self.badCfCount, 'total.'
# print 'Bad cell count (T)', self.badTempCount/float(sampleAmount) * 100., '%', self.badTempCount, 'total.'
# print 'Bad cell count (V)', self.badTempCount/float(sampleAmount) * 100., '%', self.badTempCount, 'total.'
# else:
# print 'Bad cell count (Mach)', self.badMachCount/self.nTriangles * 100., '%', self.badMachCount, 'total.'
# print 'Bad cell count (Cf)', self.badCfCount/self.nTriangles * 100., '%', self.badCfCount, 'total.'
# print 'Bad cell count (T)', self.badTempCount/self.nTriangles * 100., '%', self.badTempCount, 'total.'
return self.flowData.transpose()
def master(self, row):
""" This function iterates over the list of triangle vertices. To only
iterate over a potentially very long list, all calculations are done
at once - looping only once. Unfortuantely to avoid the overhead
associated with calling functions in python, most calculations are
done inside master() - this makes it long and difficult to read.
The properties calculated include:
A - The area of the triangle calculated with the cross product of
the vectors.
n - The normal of the triangle, again from the cross product. By
convention normal is point OUTWARDS, into the flow.
Cx, Cy, Cz - The coordinates of the centroid of each triangle.
Re - The local Reynolds number calculated form an interpolated
guess at the local running length based on the two closest
streamlines (either side of the point).
Cf - The local skin friction coefficient. Check associated docs.
Ch - The local heat transfer coefficient (Stanton number). Check
associated docs.
The following properties are taken from the Cart3D solution file and
(currently) linearly interpolated to the centroid. Note Cart3D
normalises its data against the freestream value and gamma (=1.4).
rho - density [kg/m^3]
U - x velocity [m/s]
V - y velocity [m/s]
W - z velocity [m/s]
p - pressure [Pa]
Currently takes a simple average of the properties - should implement
a weighted average based on distance from centroid to vertex when areas
get bigger. Depending on computational cost, set up a tolerance.
"""
"""
if row some multiple of total number of triangles:
print out a status update and estimate of time
"""
#print "count", self.count
if verbose:
reportingInterval = 1
else:
reportingInterval = 5
timeElapsed = time.time() - self.startTime
if self.count%(reportingInterval * self.nTriangles/100) == 0 or self.count == 1000:
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print self.percentComplete, '% of elements completed so far. Wall clock time', "%d:%02d:%02d" % (h, m, s)
printFlag = 0
if self.percentComplete > 0:
timeRemaining = timeElapsed *(100 - self.percentComplete)/self.percentComplete
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Approximately", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
printFlag = 1
if self.count == 1000 and not printFlag:
timeRemaining = timeElapsed/1000. * self.nTriangles
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Rough initial estimate:", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
self.percentComplete += reportingInterval; #self.count += 1
# These are the vertices of the specific triangle - they correspond to
# indices in the vertices AND flow DataFrames
# Note STL is indexed from 1, hence we need to minus one to get the
# dataframe index.
v1i, v2i, v3i = row[0] - 1, row[1] - 1, row[2] - 1
if v1i > self.nVertices or v2i > self.nVertices or v3i > self.nVertices:
print 'Vertex indexing has died - max > than number of vertices.'
# These are the (x, y, z) indices of each vertex
v1 = np.array(self.vertices.iloc[v1i])
v2 = np.array(self.vertices.iloc[v2i])
v3 = np.array(self.vertices.iloc[v3i])
# Form two vectors forming the triangle
v1v2 = v2 - v1
v1v3 = v3 - v1
# Calculate area and normal from cross product given the above vectors.
area = 0.5 * np.linalg.norm(np.cross(v1v2, v1v3))
normal = tuple(np.cross(v1v2, v1v3)/area)
# Calculate the centroid coodinates.
centroidx = np.mean([v1[0], v2[0], v3[0]])
centroidy = np.mean([v1[1], v2[1], v3[1]])
centroidz = np.mean([v1[2], v2[2], v3[2]])
centroid = (centroidx, centroidy, centroidz)
# Calculate the mean surface flow properties at the centroid of each triangle.
# Order: Cp,Rho,U,V,W,Pressure
properties = np.mean([self.flow.iloc[v1i], self.flow.iloc[v2i], self.flow.iloc[v3i]], axis=0)
self.rho, U, V, W, self.p = properties[1], properties[2], properties[3], properties[4], properties[5]
# Undo the normalisation Cart3D uses for some currently unknown reason
self.rho *= rho_inf; U *= Fluid.a_inf; V *= Fluid.a_inf; W *= Fluid.a_inf; self.p *= rho_inf*Fluid.a_inf**2.
# print 'rho', self.rho, 'U', U, 'V', V, 'W', W, 'p', self.p
# Need to catch the problematic data Cart3D sometimes produces -
# generally degenerencies in small cut cells. Known issue.
if self.p < 1e-1:
self.p = 1e-1
if verbose:
print "Warning: Pressure < 1e-1 at", v1, v2, v3, "setting to 1e-1 Pa."
if self.rho < 1e-6:
self.rho = 1e-6
if verbose:
print "Warning: Density < 1e-6 at", v1, v2, v3, "setting to 1e-6 kg/m^3."
# Calculate local velocity vector
self.velocityMagnitude = (U**2. + V**2. + W**2.)**0.5
velocityDirection = np.array([U, V, W], dtype='float64') / self.velocityMagnitude
#print 'velocity', velocityMagnitude
if self.velocityMagnitude > 1.5*M_inf*Fluid.a_inf:
self.badVelCount += 1
if verbose:
print "Warning: velocity > 1.5x freestrem at", v1, v2, v3
# Calculate the temperature based on ideal gas law
self.T = self.p / (self.rho * Fluid.R_air)
#print 'T', self.T
if self.T > 800.:
#print "Warning: High edge temperature, constant Cp assumption might be in trouble - consider variable Cp."
self.badTempCount += 1
# Calculate local Mach number
try:
self.M = self.velocityMagnitude/((g_inf*Fluid.R_air*self.T)**0.5)
if self.M > 1.5 * M_inf:
# print "Warning high Mach number,", self.M, "Temperature is", self.T
self.badMachCount += 1
# print "x coordinate is", centroid[0], "Are you in the wake?"
self.M = M_inf
except:
print 'Check local sound speed at', v1, v2, v3
# Calculate local dynamic viscosity using Sutherland's law
self.mu = Fluid.mu_0 * (self.T/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (self.T + Fluid.sutherland_S))
self.mu_wall = Fluid.mu_0 * (T_wall/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (T_wall + Fluid.sutherland_S))
#print 'mu/mu_wall', self.mu/self.mu_wall
# Calculate the local streamline based running length
self.calculate_runningLength(centroid)
# Calculate the local Reynolds number
self.localReynolds = self.rho * self.velocityMagnitude * self.localRunningLength / self.mu
#print 'localReynolds', self.localReynolds
# We always assume a laminar boundary layer
self.laminar = 1; self.transitional = 0; self.turbulent = 0
if self.laminar:
self.calculate_laminarCf()
if self.transitional:
self.calculate_transitionalCf()
if self.turbulent:
self.calculate_turbulentCf()
# The above computed skin friction coefficients should be corrected for
# thickness with form factors. Unique factors are implmented here
# for wings and bodies
wallShearMagnitude = self.cf * 0.5 * self.rho * self.velocityMagnitude**2.
wallShearVector = wallShearMagnitude * velocityDirection
viscousForce = wallShearVector*area
# Calculate Reynold's analogy factor
Pr_T = 0.86; Pr_L = 0.71
bracket = 1./(5.*0.4) * (1. - Pr_T) * ((np.pi**2.)/6. + 1.5*(1. - Pr_T)) + (Pr_L/Pr_T - 1.) + log(1. + (5./6.)*(Pr_L/Pr_T - 1.))
s = Pr_T * (1. + 5.*(self.cf/2.)**0.5 * bracket)
# Calculate Stanton number from modified Reynold's analogy
ch = (1./s) * self.cf/2.
# Calculate heat transfer coefficient
h = ch*self.rho*self.velocityMagnitude*Fluid.calculate_cp(self.T)
# Calculate heat transfer into the wall
bracket = self.T * (1. + self.r * (g_inf - 1.)/2. * self.M**2.) - T_wall
q_wall = ch * self.rho*self.velocityMagnitude * Fluid.calculate_cp(self.T) * bracket
if verbose:
print 'Local properties...'
names = ['area', 'centroidx', 'centroidy', 'centroidz', 'normal', 'rho', 'U', 'V', 'W', 'p', 'cf', 'Ff']
properties = [area, centroidx, centroidy, centroidz, normal, self.rho, U, V, W, self.p, self.cf, viscousForce]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Increment the element counter
self.count += 1
return pd.Series({'A': area,
'x': centroid[0],
'y': centroid[1],
'z': centroid[2],
'n': normal,
'rho': self.rho,
'U': U,
'V': V,
'W': W,
'Velocity': self.velocityMagnitude,
'M': self.M,
'p': self.p,
'cf': self.cf,
'ch': ch,
#'cp': self.cp,
'BL Regime': self.BLregime,
'Dev Length': self.localRunningLength,
'Re': self.localReynolds,
'Ff': viscousForce,
'T_e': self.T,
'T_aw': self.T_adiabaticWall,
'q_wall': q_wall})
def calculate_runningLength(self, centroid, searchBracket=200):
""" This function calculates the local streamline running length given
a location.
"""
# Firstly check we actually want the streamline running length
if not streamlineFlag:
# MAKE SURE THIS ISN'T NEGATIVE
self.localRunningLength = centroid[0] + LEoffset
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
return
# Populate a large array repeating the current location
# self.Location = np.tile(centroid, (self.maxStreamlineSteps, nStreamlines))
self.Location = np.broadcast_to(centroid, (searchBracket, nStreamlines, 3))
currentX = centroid[0]
# print 'Current centroid', centroid
# Tolerance on finding the position on the streamlines
tol = 0.001
try:
# Try to find a position based on a small tolerance
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Row position adjustment failed (tol =", tol, "), increasing tolerance by 10."
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol*10)[0][0], 0))
if rowPosition <= searchBracket/2:
# We are at the top of the array
self.streamlineSection = self.streamlines[:searchBracket, :, :]
elif rowPosition >= Streamline.maxStepLength - searchBracket/2:
# We are at the bottom of the array
self.streamlineSection = self.streamlines[searchBracket:, :, :]
else:
# We are in the middle
self.streamlineSection = self.streamlines[rowPosition-searchBracket/2:rowPosition+searchBracket/2, :, :]
# print "Streamline section goes between", self.streamlineSection[0, 0, 0], self.streamlineSection[-1, 0, 0]
# delta x, delta y, delta z from location to every point
self.deltas = self.Location - self.streamlineSection
# Square the distances
self.deltas = np.square(self.deltas)
# Separate dx, dy and dz to sum together
dx = self.deltas[:, :, 0]
dy = self.deltas[:, :, 1]
dz = self.deltas[:, :, 2]
# Take the square root to find the Euclidean distance
self.distances = np.sqrt(dx + dy + dz)
"""
POTENTIAL SPEED IMPROVEMENT
# possibly need to have:
# temp = np.asfortranarray(self.distances)
# streamlineMinimumsIndices = temp.argmin(axis=0)
"""
"""
NEED TO INCLUDE GRAD CHECK HERE TO ENSURE STREAMLINES ARE ON THE CORRECT SIDE OF THE OBJECT
"""
# Indices of two closest streamlines (column indices)
# Column index of two closest streamline points
neighbouringStreamlineIndices = self.distances.min(axis=0).argsort(kind='mergesort')[:2] # index
# print 'neighbouringStreamlineIndices', neighbouringStreamlineIndices
# Indices of the step number to the minimum distances
# Row index of two closest streamline points
neighbouringStreamlineStepIndices = self.distances.argsort(axis=0, kind='mergesort')[0, neighbouringStreamlineIndices]
# print 'neighbouringStreamlineStepIndices', neighbouringStreamlineStepIndices
# # Indices of the two closest streamline points
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
# print 'neighbouringStreamlines_indices', neighbouringStreamlines_indices
# Distances to two closest streamlines
neighbouringStreamlines_distances = self.distances[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices] # value
# print "neighbouringStreamline_distances", neighbouringStreamlines_distances
if np.max(abs(neighbouringStreamlines_distances)) > 1.:
print "WARNING: Closest streamline seems to be far away at", np.max(neighbouringStreamlines_distances), "m."
print 'Current centroid', centroid
# Running length at the two neighbouring streamline points
# Need to correct the indexing because we only look at a window above
neighbouringStreamlineStepIndices = neighbouringStreamlineIndices + rowPosition
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
neighbouringStreamlines_lengths = Streamline.streamlineLengths[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices]
# print 'neighbouringStreamlines_lengths', neighbouringStreamlines_lengths
# Linearly interpolate between two neighbouring streamlines
self.localRunningLength = float(neighbouringStreamlines_lengths[0] + neighbouringStreamlines_distances[0]*np.diff(neighbouringStreamlines_lengths)/np.sum(neighbouringStreamlines_distances))
# print 'localRunningLength', self.localRunningLength
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
def calculate_laminarCf(self, checkFlag=1):
# Check to ensure flow isn't transitional
if checkFlag:
if not self.laminarOnlyFlag:
if self.turbulentOnlyFlag:
self.laminar = 0; self.transitional = 0; self.turbulent = 1
elif self.naturalTransitionFlag:
# Natural transition criterion
self.Re_critical = 6.421* exp(1.209e-4 * self.M**2.641)
"""
NEED TO INCLUDE THE WING SWEEP STUFF HERE
Re_critical = Re_critical*(0.787 * cos(wingLEsweep)**4.346 - 0.7221*exp(-0.0991*wingLEsweep) + 0.9464)
"""
if self.localReynolds >= self.Re_critical:
# The flow is transitional, break out of the laminar analysis
# self.criticalRunningLength_start = self.localRunningLength
self.criticalRunningLength_start = (6.421*self.mu*exp(1.209e-4 * self.M**2.641)) / (self.rho * self.velocityMagnitude)
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
elif self.roughnessInducedFlag:
# Roughness induced transition condition then return
pass
elif self.userDefinedFlag:
# User defined transition location then return
pass
# The above transition checks all showed that it was laminar flow,
# continue laminar analysis:
"""
# Calculate Reference Temperature
T_ref = T_e_ave*(1.0 + 0.032*Ma_e_ave**2 + 0.58*(T_w/T_e_ave - 1.0))
#print(T_ref)
# Calculate Reference density
rho_ref = P_e_ave/(R_a*T_ref)
#print(rho_ref)
# Calculate Reference viscosity, Sutherlands law, valid for moderate temperatures and pressures
mu_ref = (1.458*10**(-6) * (T_ref**1.5))/(T_ref + 110.4)
"""
# Calculate the laminar skin friction coefficient
# Set recovery factor
self.r = 0.85 # van Driest says 0.85 to 0.89 for lam to turbs
# Calculate the adiabatic wall temperature
self.T_adiabaticWall = (1 + self.r*((g_inf - 1)/2.) * self.T * self.M**2.)
T_awOnT = self.T_adiabaticWall/self.T
# Reference temperature
# T_reference = self.T*(0.45 * 0.55 * T_awOnT + 0.16*self.r*(g_inf - 1)/2. * self.M**2.)
T_reference = self.T*(1. + 0.032*self.M**2. + 0.58 * (T_wall/self.T - 1.))
# Reference density
rho_reference = self.p/(Fluid.R_air * T_reference)
# Reference viscosity
mu_reference = 1.458e-6 * ((T_reference)**1.5) / (T_reference + 110.4)
# Reference Reynolds
# Re_reference = self.M * (g_inf*Fluid.R_air*T_reference)**0.5 * rho_reference * self.localRunningLength / mu_reference
Re_reference = self.velocityMagnitude * rho_reference * self.localRunningLength / mu_reference
try:
cf = 0.664 / (Re_reference)**0.5
except:
print 'Calculation of laminar flow Cf failed'
cf = 0.
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.73
self.cf = cf
# This is to show lam (0 = BLregime) vs transitional (1 < BLregime < 0)
# vs turb flow (BLregime = 0)
self.BLregime = 0
return self.cf
def calculate_transitionalCf(self):
# Set recovery factor
self.r = 0.87 # van Driest says 0.85 to 0.89 for lam to turbs
# Check we aren't turbulent
criticalRunningLength_end = self.criticalRunningLength_start * (1. + self.Re_critical**(-0.2))
#print 'end of laminar', self.criticalRunningLength_start
#print 'current', self.localRunningLength
#print 'end of transitional', criticalRunningLength_end, '\n'
if self.localRunningLength >= criticalRunningLength_end:
# Flow is now fully turbulent
self.laminar = 0; self.transitional = 0; self.turbulent = 1
self.T_adiabaticWall = (1 + self.r*((g_inf - 1)/2.) * self.T * self.M**2.)
return
else:
#print "Transitional flow"
cf_laminar = self.calculate_laminarCf(checkFlag=0)
try:
cf_turbulent = self.calculate_turbulentCf(r=0.87)
except:
print "Calculation of transitional flow turbulent Cf failed"
names = ['Local Re', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'P', 'rho']
properties = [self.localReynolds, self.mu, self.mu_wall, self.T_adiabaticWall, self.T, self.p, self.rho]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Set up the variables to vary between laminar and turbulent skin friction coefficients.
exponent = -3. *(exp(log(2)/(5.*self.criticalRunningLength_start) * self.Re_critical**(-0.2)*(self.localRunningLength - self.criticalRunningLength_start)) - 1.)**2.
epsilon = 1 - exp(exponent)
try:
cf = (1-epsilon)*cf_laminar + epsilon*cf_turbulent
except:
print "Calculation of transitional flow Cf failed"
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= (1-epsilon)*1.15 + epsilon*1.73
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 0.5
return self.cf
def calculate_turbulentCf(self, r=0.89):
#print "Turbulent flow"
# Calculate the turbulent skin fricton coefficient
# van Driest says r = 0.85 to 0.89 for lam to turbs
self.r = r
# Set up the variables/coefficients for the Van Driest estimate
aSquared = (g_inf - 1)/2. * self.r * self.M**2. * self.T/T_wall
b = self.T_adiabaticWall/T_wall - 1
denominator = (b**2. + 4.*aSquared)**0.5
A = self.clean_A(aSquared, b, denominator)
B = self.clean_B(aSquared, b, denominator)
# Solve the implicit equation for skin friction
#cf_guess = 1.
cf_func = lambda cf_turbulent: 4.15*log(self.localReynolds*cf_turbulent*self.mu/self.mu_wall) + 1.7 - (asin(A) + asin(B)) / (cf_turbulent*(self.T_adiabaticWall/self.T - 1.))**0.5
try:
cf = brentq(cf_func, 1e-15, 0.1)
# names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
# properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
# for c1, c2 in zip(names, properties):
# print "%-10s %s" % (c1, c2)
# print '\n'
except:
# print "Calculation of turbulent Cf failed, Flow properties at culprit cell:"
# print "Wake? Running length is", self.localRunningLength
# names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
# properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
# for c1, c2 in zip(names, properties):
# print "%-10s %s" % (c1, c2)
# print '\n'
cf = 0.
self.badCfCount += 1
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.15
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 1
return self.cf
def clean_A(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
A = ( 2.*a**2. - b ) / denominator
if A < -1.:
return -1.
elif A > 1.:
return 1.
else:
return A
def clean_B(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
B = ( b ) / denominator
if B < -1.:
return -1.
elif B > 1.:
return 1.
else:
return B
#%%
class postProcessor(Data):
def __init__(self, Field, flowData):
self.flowData = flowData
self.propertiesToSave = ['cf', 'ch', 'Velocity', 'T_aw'] + additionalProperties
def viscousForceCoefficients(self):
""" This function will calculate and return the viscous force
coefficients. The forces are calculated and stored here.
Sign convention
x - positive toward tail (flow in the direction of positive x)
y - positive upwards
z - positive left spanwise sitting in cockpit facing forwards
"""
# Visous forces in body axes
viscousForces_body = sum(self.flowData.loc['Ff'])
# Transform to wind axes
viscousForces = viscousForces_body
# Calculate velocity
u_inf = M_inf*Fluid.a_inf
cl_viscous = viscousForces[0]/(0.5*S_ref*rho_inf*u_inf**2.)
cd_viscous = viscousForces[1]/(0.5*S_ref*rho_inf*u_inf**2.)
return cl_viscous, cd_viscous
def viscousMomentCoefficients(self):
""" Similar to the above this function will calculate the viscous
pitching moment coefficients.
Sign convention
Directions same as above
Positive rotations defined by RH rule
"""
cm_viscous = 5
return cm_viscous
def saveAs_CSV(self, outputFilename=outputFilename, properties=['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']):
""" This function will write the flow data to file so we can view it
in Paraview.
"""
outputFilename += '.csv'
if additionalProperties != []:
for i in additionalProperties:
properties.append(i)
# self.flowData = self.flowData.round(decimals=5)
self.flowData.to_csv(outputFilename, sep=',', columns=properties, index=0, index_label=0, float_format='%.3f')
print "output file saved as", outputFilename
def saveSlice_CSV(self, outputFilename=outputFilename, xSlice=[], ySlice=[], zSlice=[]):
""" Take a slice and save it to csv """
outputFilename += '_slice.csv'
# # This defines how 'narrow' slice we want. Why am I writing this if ParaView will do it fark
# tol = 1e-2
#
# # Pre allocate empty DF here?
# slicedData = pd.DataFrame()
#
# if not xSlice:
# # We have some slices along x to make
# for point in xSlice:
# # we want to slice at all of these points
# > xSlice[point] - tol
# self.flowData.transpose().loc[(self.flowData.transpose()["x"] > 0.599 & self.flowData.transpose()["x"] < 0.601 & self.flowData.transpose()["z"] == 0), "cf"]
# elif not ySlice:
# # Slices along y to take
# elif not zSlice:
# # And slices aong z
flowData = self.flowData.apply(pd.to_numeric, errors='ignore')
slicedData_indices = (flowData["y"] > 0.598) & (flowData["y"] < 0.602) & (flowData["z"] == 0)
slicedData = flowData.loc[slicedData_indices]
slicedData.to_csv(outputFilename, sep=',', index=0, index_label=0)
print "Slices saved in", outputFilename
def saveAs_VTK(self, outputFilename):
"""
Write the flow data as a VTK unstructured grid - STILL NOT SURE WHY???
"""
outputFilename += '.vtu'
vtuFile = open(outputFilename, "w")
NumberOfPoints = Field.nVertices
NumberOfTriangles = Field.nTriangles
# Write the header
vtuFile.write("<VTKFile type=\"UnstructuredGrid\" byte_order=\"BigEndian\">\n")
vtuFile.write("<UnstructuredGrid>")
vtuFile.write("<Piece NumberOfPoints=\"%d\" NumberOfCells=\"%d\">\n" %
(NumberOfPoints, NumberOfTriangles))
# Write the point coordinates
vtuFile.write("<Points>\n")
vtuFile.write(" <DataArray type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfPoints-500000):
x, y, z = Field.vertices.iloc[index]
vtuFile.write(" %e %e %e\n" % (x, y, z))
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Points>\n")
vtuFile.write("<Cells>\n")
# Write the connectivity
vtuFile.write(" <DataArray type=\"Int32\" Name=\"connectivity\"")
vtuFile.write(" format=\"ascii\">\n")
temp = Field.triangles.transpose()
for index in range(NumberOfTriangles):
v1, v2, v3 = temp.iloc[index]
vtuFile.write(" %d %d %d\n" % (v1, v2, v3))
vtuFile.write(" </DataArray>\n")
# Write the offsets
# vtuFile.write(" <DataArray type=\"Int32\" Name=\"offsets\"")
# vtuFile.write(" format=\"ascii\">\n")
# # Since all of the point-lists are concatenated, these offsets into the connectivity
# # array specify the end of each cell.
# for point in range(NumberOfTriangles):
# if two_D:
# conn_offset = 4*(1+i+j*nic)
# else:
# conn_offset = 8*(1+i+j*nic+k*(nic*njc))
# vtuFile.write(" %d\n" % conn_offset)
# vtuFile.write(" </DataArray>\n")
# Write the types
vtuFile.write(" <DataArray type=\"UInt8\" Name=\"types\"")
vtuFile.write(" format=\"ascii\">\n")
VTKtype = 5 # VTK_TRIANGLE
for point in range(NumberOfTriangles):
vtuFile.write(" %d\n" % VTKtype)
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Cells>\n")
# Write the flow variables
vtuFile.write("<CellData>\n")
# Write variables from the dictionary.
for variable in self.propertiesToSave:
vtuFile.write(" <DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"1\"" % (variable))
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfTriangles):
vtuFile.write(" %e\n" % Field.flowData.transpose()[variable].iloc[index])
vtuFile.write(" </DataArray>\n")
# Write the velocity vector - have to do this separately because it's a vector
vtuFile.write(" <DataArray Name=\"Velocity vector\" type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in NumberOfTriangles:
U, V, W = (Field.flowData.transpose()['U'].iloc[index],
Field.flowData.transpose()['V'].iloc[index],
Field.flowData.transpose()['W'].iloc[index])
vtuFile.write(" %e %e %e\n" % (U, V, W))
vtuFile.write(" </DataArray>\n")
# Write footers and close file
vtuFile.write("</CellData>\n")
vtuFile.write("</Piece>\n")
vtuFile.write("</UnstructuredGrid>\n")
vtuFile.write("</VTKFile>\n")
vtuFile.close()
return
#%% ----- Run the program
if __name__ == '__main__':
print time.strftime("%H:%M:%S"), 'Starting....'
# Run Tecplot in batch mode to generate and save the streamline data
# try:
# call(['tec360', '-b', 'Components.i.plt', 'retrieveStreamlines.mcr'])
# except:
# print 'Import of Tecplot streamline data failed'
# Initialise Fluid class - sets up basic fluid and freestream properties
Fluid = Fluid()
# Initialise streamline class
Streamlines = Streamline()
streamlineCoordinates, streamlineLengths, maxSteplength = Streamlines.importer(streamlineFilename)
StreamlinesDict = Streamline.__dict__
# Initialise a data class, this contains all the Field (Cart3D) data.
Field = Data(cart3DFilename)
# Import Cart3D data
vertices, triangles, flow = Field.triqImporter()
# Run the actual code - calculate viscous forces
flowData = Field.getProperties()
flowData = flowData.round(decimals=5)
post = postProcessor(Field, flowData)
post.saveAs_CSV()
post.saveSlice_CSV()
DataDict = Field.__dict__
#Field.plotter()
| {
"repo_name": "AlexanderWard1/VC3D",
"path": "Flat Plate/addViscous.py",
"copies": "1",
"size": "55506",
"license": "mit",
"hash": 5288873927299967000,
"line_mean": 42.1224165342,
"line_max": 197,
"alpha_frac": 0.5503008684,
"autogenerated": false,
"ratio": 3.7955415754923414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48458424438923414,
"avg_score": null,
"num_lines": null
} |
#%% addViscous.py
"""
Viscous correction code to augment Cart3D solutions.
Alexander Ward
April 2017
"""
#http://www.dtic.mil/dtic/tr/fulltext/u2/a045367.pdf
from math import exp, log, asin, sin
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import brentq
import time as time
#%% Inputs
"""
Here are the majority of the user defined parameters. They are grouped into:
Simulation - Parameters on the simulation itself
Freestream - Freestream gas properties
Transition - Parameters controlling how you want to treat transition
Vehicle - Reference area and length, wall temperature
Post Processing - Properties to save/report, slices to take
"""
""" -- SIMULATION -- """
#------------
# Cart3D solution filename {str}
cart3DFilename = 'Components.i.triq'
#------------
# Streamline filename {str}
streamlineFilename = '4.csv'
#------------
# Number of Tecplot generated streamlines {int}
nStreamlines = 68
#------------
# maximum number of steps in the the Tecplot streamlines {int}
maxSteps = 19999
#------------
# If you don't have streamlines available, set to 0 to guess the development
# length based on flow direction (positive x) NOTE: If the vehicle starts in
# negative x, you must set an offset to translate the nose to x = 0.
streamlineFlag = 0 # {bool}
LEoffset = 0. # {float}
#------------
# If you want lots of info to print out, set this to true, {bool}
verbose = 0
#------------
# If you just want to debug, tell the code to only iterate over part of the
# .triq file. Make sure to set the flag as well.
sampleAmount = 5000 # {int}
sampleFlag = 1 # {bool}
""" -- FREESTREAM -- """
#------------
# Freestream temperature, [K], {float}
T_inf = 62.157
#------------
# Freestream pressure, [Pa], {float}
p_inf = 3163.4
#------------
# Freestream density, [kg/m^3], {float}
rho_inf = 0.177268215
#------------
# Freestream Mach number, {float}
M_inf = 4.5099
#------------
# Ratio of specific heats, [Pa] (constant), {float}
g_inf = 1.4
#------------
# Angle of attack
alpha = 0. * np.pi/180
#------------
# Sideslip angle
beta = 0. * np.pi/180
#------------
""" -- TRANSITION -- """
#------------
# Set the point of transition (Re or streamline based length) OR set to False
# to use correlation.
criticalLocation = 0
#------------
# Set to 'laminar' or 'turbulent' if you want to assume and ignore transition
regime = 'turbulent'
#------------
# Roughness induced transition location (streamline coordinate)
roughnessCoordinate = 0
#------------
# Set true to have no transitional region (i.e. fully turbulent when tripped).
immediateTransitionFlag = 1
#------------
""" -- VEHICLE -- """
#------------
# Wall temperature of the vehicle surface [K], {float}
T_wall = 316.2
#------------
# Reference area for aerodynamic force coefficients
S_ref = 3.
#------------
# Reference length for aerodynamic moment coefficients (measured from nose)
L_ref = 1.5
#------------
""" -- POST PROCESSING -- """
#------------
# If you DON'T want to save a .csv file of local properties, set flag to 0
writeDataFlag = 1 # {bool}
#------------
# Additional properties to save to a .csv file (called flowData.csv) {List}
# You will always get ['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']
# Choose from ['M', 'rho', 'p', 'cp', 'T_e', 'U', 'V', 'W', 'BL Regime',
# 'Dev Length', 'Re']
additionalProperties = ['Dev Length', 'Re', 'BL Regime'] # {list of str}
#------------
# Write data to this filename
outputFilename = 'flowData' # {str}
#------------
# Save data to VTK format for nice viewing in ParaView
vtkFlag = 0
#------------
# Save data to csv
csvFlag = 0
#------------
###############################################################################
""" You probably (hopefully) won't have to change anything below this line. """
###############################################################################
#%% The code
class Fluid(object):
""" Contains all the constant fluid properties
"""
def __init__(self):
# Sutherland law reference conditions and constants
self.mu_0 = 1.716e-5
self.T_0 = 273
self.sutherland_S = 111
# Gas constants
self.R_universal = 8.314510
self.R_air = 287.058
self.molarMass_air = 28.964923942499997
# Freestream conditions
self.T_inf = T_inf
self.p_inf = p_inf
self.g_inf = g_inf
self.a_inf = (self.g_inf * self.R_air * self.T_inf) **0.5
# Constant specific heat value {float}
self.constantCp = 1.015
def calculate_cp(self, T):
"""
This function calculates the specific heat capacity and ratio fo specific heats
at a temeprature T [K]. It uses the polynomial curve fit taken from NASA's CEA code
McBride, B. Zehe, M. Gordon, S. (2002)
"NASA Glenn Coefficients for Calculating Thermodynamic Properties of Individual Species"
molar mass air = 28.964923942499997
Specific heat capacities at 298.15 K:
Cp{N2, O2, Ar, CO2} = 29.124, 29.378, 20.786, 37.135
"""
verbose = 0
if T < 200.:
T = 200.
if verbose:
print "Problem, Temp < 200 K, I'll set it to 200 K"
elif T > 6000.:
T = 6000.
if 1:
print "Problem, Temp > 6000 K, I'll set it to 6000 K"
if T < 1000.:
N2 = [2.210371497E+04, -3.818461820E+02, 6.082738360E+00, -8.530914410E-03, 1.384646189E-05, -9.625793620E-09, 2.519705809E-12]
O2 = [-3.425563420E+04, 4.847000970E+02, 1.119010961E+00, 4.293889240E-03, -6.836300520E-07,-2.023372700E-09, 1.039040018E-12]
Ar = [0., 0., 2.5, 0., 0., 0., 0.]
CO2 = [4.943650540E+04, -6.264116010E+02, 5.301725240E+00, 2.503813816E-03, -2.127308728E-07, -7.689988780E-10, 2.849677801E-13]
else:
N2 = [5.877124060E+05, -2.239249073E+03, 6.066949220E+00, -6.139685500E-04, 1.491806679E-07, -1.923105485E-11, 1.061954386E-15]
O2 = [-1.037939022E+06, 2.344830282E+03, 1.819732036E+00, 1.267847582E-03,-2.188067988E-07, 2.053719572E-11, -8.193467050E-16]
Ar = [2.010538475E+01, -5.992661070E-02, 2.500069401E+00, -3.992141160E-08, 1.205272140E-11, -1.819015576E-15, 1.078576636E-19]
CO2 = [1.176962419E+05, -1.788791477E+03, 8.291523190E+00, -9.223156780E-05, 4.863676880E-09, -1.891053312E-12, 6.330036590E-16]
coefficients = 8.314510 * np.array([N2, O2, Ar, CO2])
temperatureVector = np.array([T**-2, T**-1, 1., T, T**2, T**3, T**4])
cp_species = np.dot(coefficients, temperatureVector) / np.array([28.01340, 31.99880, 39.94800, 44.00950])
cp_air = np.sum(cp_species * np.array([.78084, .20947, .009365, .000319]))
gamma = cp_air*self.molarMass_air/(cp_air*self.molarMass_air - self.R_universal)
return cp_air
#%%
class Streamline(Fluid):
""" This class takes care of the streamlines. It imports them, calculates
the running length and then sorts them to efficiently calculate the local
running length in each cell.
"""
def __init__(self):
""" """
def importer(self, filename, FieldFlag=0):
""" This function imports the streamline data by iterating over the
Tecplot generated file. Since we are iterating over it anyway, we
calculate the length of the streamline for every point as well, saving
it in self.streamlineLengths.
No flow properties along the streamline are saved - only the length,
and x, y, z coordinates. The flow properties for each area element are
imported form the actual solution (.triq file).
"""
if not streamlineFlag:
print "No streamlines - we'll use a coordinate based running length"
return (None, None, None)
print 'Importing streamline data...'
self.filename = filename
with open(self.filename, 'r') as dataFile:
data = dataFile.read().split('Streamtrace')
# Iterate through and construct the streamline dataframes
Streamline.streamlines = 1000.*np.ones((maxSteps, nStreamlines, 3))
Streamline.streamlineLengths = 1000.*np.ones([maxSteps, nStreamlines])
count = 0 # if FieldFlag else 1
length = 0 # Initialise maxLength counter to be small
streamlineStepLengths = []
streamlineTotalLengths = []
for zone in data:
# This is iterating over the file, selecting the streamlines
#print 'Zone', count
zone = zone.strip().split('\n')
streamlineStepLength = len(zone)
streamlineStepLengths.append(int(streamlineStepLength))
# if count == 0:
# # Delete the surface label
# del zone[1]; del zone[0]
# if FieldFlag == False:
# # We don't want to read and store the velocity field data
# count += 1; continue
L = 0.
rowCount = 0
coords = []
streamlineLength = []
for row in zone:
# This is iterating over the individual streamlines
row = row.split(',')
if rowCount == 0:
x, y, z = (float(row[0]), float(row[1]), float(row[2]))
else:
xNew, yNew, zNew = (float(row[0]), float(row[1]), float(row[2]))
L += ((x-xNew)**2 + (y-yNew)**2 + (z-zNew)**2)**0.5
x, y, z = xNew, yNew, zNew
if xNew < x:
print "Warning: a streamline may be recirculating"
names = ['Streamline', 'x', 'y', 'z', 'Length']
properties = [count, xNew, yNew, zNew, L]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
coords.append((x, y, z))
streamlineLength.append(L)
rowCount += 1
# if count == 0 and FieldFlag:
# # We're constructing the surface
# self.field = zone
# else:
# # We're constructing a streamline
# #print np.shape(coords)
# streamlines[:len(zone), 3*(count):3*count+3] = np.array(coords)
# streamlineLengths[:len(zone), count] = np.array(streamlineLength)
Streamline.streamlines[:streamlineStepLength, count:count+1, :] = np.array(coords).reshape(streamlineStepLength, 1, 3)
Streamline.streamlineLengths[:streamlineStepLength, count:count+1] = np.array(streamlineLength).reshape(streamlineStepLength, 1)
streamlineTotalLengths.append(streamlineLength[-1])
count += 1
Streamline.maxStreamlineSteps = max(streamlineStepLengths)
Streamline.streamlineLengths = Streamline.streamlineLengths[:Streamline.maxStreamlineSteps,:,]
sortedLengthIndices = np.argsort(streamlineStepLengths)[::-1]
# Sort the streamlines into order of increasing length
Streamline.streamlines = Streamline.streamlines[:, sortedLengthIndices, :]
sortedStepLengths = np.array(streamlineStepLengths)[sortedLengthIndices]
Streamline.maxStepLength = sortedStepLengths[0]
# Tolerance on positioning the streamlines
tol = 0.001
# The first (longest and hopefully ~first in x)
Streamline.firstStagnationPoint = Streamline.streamlines[:, 0, 0]
for n in xrange(nStreamlines-1):
# Iterate through the streamlines and adjust the starting position.
# Skip the first (longest) streamline - we're adjusting all the others
# relative to it.
n += 1
# Get length of current streamline
length = sortedStepLengths[n]
# Get current streamline
streamline = Streamline.streamlines[:length, n:n+1, :]
# Get the running length vector
streamlineLength = Streamline.streamlineLengths[:length, n:n+1]
# Get the starting x position of the current streamline
xStartCurrent = streamline[0, 0, 0]
try:
# Try to find a position based on a small tolerance
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Streamline adjustment failed (tol =", tol, "), increasing tolerance by 10."
newRow = max((np.where(abs(Streamline.firstStagnationPoint - xStartCurrent) < tol*10)[0][0], 0))
# move the streamline to new starting location
if newRow + length >= Streamline.maxStepLength:
# We are moving the streamline further back
# Physically this means this streamline finishes behind the longest
# Perhaps a swept wing where the trailing edge is behind the fuselage
print "Warning: Attempted to push streamline outside the streamlines array!"
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
else:
Streamline.streamlines[newRow:newRow+length, n:n+1, :] = streamline
Streamline.streamlineLengths[newRow:newRow+length, n:n+1] = streamlineLength
# Overwrite old area
Streamline.streamlines[:newRow, n:n+1, :] = 1000.*np.ones((newRow, 1, 3))
# Adjust streamlines to the actual maximum streamline step length not
# the maximum possible tecplot streamline steps
Streamline.streamlines = Streamline.streamlines[:Streamline.maxStepLength, :, :]
# Finished importing and calculating lengths
print 'Streamline import and length calculation complete.', count-1, 'streamlines imported with max', self.maxStreamlineSteps, 'steps.', '\n'
if verbose:
print 'Maximum calculated streamline length is', max(Streamline.streamlineLengths), 'units.'
# Check to ensure the longest streamline is weirdly longer than the next
perCentLonger = (sortedStepLengths[0] - sortedStepLengths[1])/sortedStepLengths[1]
if perCentLonger >= 10.:
print "Warning: The longest streamline is", perCentLonger, "% longer than the next longest"
"""
Include some checks here. Is max length >> length of vehicle?
Any streamline in the opposite direction to the flow (recirculation)
Any streamline >> longer than all the others/neighbours?
"""
return Streamline.streamlines, streamlineTotalLengths, Streamline.maxStepLength
#%%
class Data(Streamline):
def __init__(self, filename):
"""
"""
Streamline.__init__(self)
self.filename = filename
self.flowData = pd.DataFrame()
# Set the defaults for transition
self.naturalTransitionFlag = 0
self.ReynoldsTransitionFlag = 0
self.coordinateTransitionFlag = 0
self.roughnessInducedFlag = 0
self.laminarOnlyFlag = 0
self.turbulentOnlyFlag = 0
self.immediateTransitionFlag = 0
self.coneCorrectionFlag = 1
# Work out what we want to do with transition.
if criticalLocation == 0:
self.naturalTransitionFlag = 1
elif criticalLocation > 1000.:
self.ReynoldsTransitionFlag = 1
if verbose:
print "Using a Reynolds number based transition criterion."
elif criticalLocation < 1000.:
self.coordinateTransitionFlag = 1
if verbose:
print "Using a Streamline coordinate length based transition criterion."
elif roughnessCoordinate != 0:
self.roughnessInducedFlag = 1
if verbose:
print "Using a roughness location based transition criterion."
if regime == 'laminar':
self.laminarOnlyFlag = 1
if verbose:
print "Laminar only simulation."
elif regime == 'turbulent':
self.turbulentOnlyFlag = 1
if verbose:
print "Turbulent only simulation."
if immediateTransitionFlag:
self.immediateTransitionFlag = 1
if verbose:
print "Immediate transition - no transitional flow region."
else:
print "Sorry, not sure what you want to do about transition."
def triqImporter(self, FieldFlag=1):
""" Imports the area data from the Tecplot field data.
"""
lineNumber = 0
print 'Importing flow data...'
with open(self.filename, 'r') as dataFile:
for line in dataFile:
if lineNumber == 0:
# We're on the first line
#print line
self.nVertices, self.nTriangles, nScalars = (int(x) for x in line.split())
lineNumber += 1
# Read in the vertex information
self.vertices = pd.read_csv(self.filename, delim_whitespace=1,
names=['x', 'y', 'z'],
skiprows=1, nrows=self.nVertices, memory_map=1)
# Read in the triangle information
self.triangles = pd.read_csv(self.filename, delim_whitespace=1,
names=['v1', 'v2', 'v3'],
skiprows=self.nVertices+1, nrows=self.nTriangles,
memory_map=1)
if sampleFlag:
self.triangles = self.triangles.sample(sampleAmount).transpose()
else:
self.triangles = self.triangles.transpose()
# Read in the flow information
temp = pd.read_csv(self.filename, delim_whitespace=1,
names=["rho","U","V","W","P"],
skiprows=self.nVertices+2*self.nTriangles+1, nrows=2*self.nVertices,
memory_map=1)
self.flow = temp.iloc[1::2, :].reset_index()
if sampleFlag:
self.nTriangles = sampleAmount
print "Field import complete", self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
return self.vertices, self.triangles, self.flow
def getProperties(self):
"""
Calculate all the flow properties of the triangles and add it all to the
flowData dataframe. This function just applies the master function to
each row of the triangles DataFrame (list of vertices).
It calculates the centroid and averages the data to the centroid.
"""
print 'Running main code now...'
# print self.nTriangles, 'elements,', self.nVertices, 'vertices.', '\n'
self.count = 1
self.startTime = time.time()
self.percentComplete = 0
# Set up counters to keep track of problematic cells.
self.badMachCount = 0
self.badCfCount = 0
self.badTempCount = 0
self.badVelCount = 0
# Watch for very cold wall - Switch to Spalding & Chi
self.spaldingFlag = 0
# Watch for high Mach numbers - Switch to Coles
self.colesFlag = 0
# Run the main calculation
self.flowData = pd.DataFrame(self.triangles.apply(self.master, axis=0))
timeElapsed = time.time() - self.startTime
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print 'Viscous correction code complete.', '\n'
print 'Time elapsed', "%d:%02d:%02d" % (h, m, s)
print 'Average time per loop', timeElapsed/self.count
if self.spaldingFlag:
print "Warning: T_aw/T_w < 0.2 was encountered - Spalding & Chi method was employed but be careful of results."
if self.colesFlag:
print "Warning: M > 10 was encountered, the van Driest is known to be inaccurate - Coles' method (1964) might be better here."
print "Bad cell counts (Total %d):" % self.nTriangles
names = ['Mach', 'cf', 'T', 'Vel']
if sampleFlag:
amounts = [(self.badMachCount/float(sampleAmount) * 100., self.badMachCount),
(self.badCfCount/float(sampleAmount) * 100., self.badCfCount),
(self.badTempCount/float(sampleAmount) * 100., self.badTempCount),
(self.badVelCount/float(sampleAmount) * 100., self.badVelCount)]
else:
amounts = [(self.badMachCount/float(self.nTriangles) * 100., self.badMachCount),
(self.badCfCount/float(self.nTriangles) * 100., self.badCfCount),
(self.badTempCount/float(self.nTriangles) * 100., self.badTempCount),
(self.badVelCount/float(self.nTriangles) * 100., self.badVelCount)]
for c1, c2 in zip(names, amounts):
print "%-10s %s" % (c1, c2)
print '\n'
return self.flowData.transpose()
def master(self, row):
""" This function iterates over the list of triangle vertices. To only
iterate over a potentially very long list, all calculations are done
at once - looping only once. Unfortuantely to avoid the overhead
associated with calling functions in python, most calculations are
done inside master() - this makes it long and difficult to read.
The properties calculated include:
A - The area of the triangle calculated with the cross product of
the vectors.
n - The normal of the triangle, again from the cross product. By
convention normal is point OUTWARDS, into the flow.
Cx, Cy, Cz - The coordinates of the centroid of each triangle.
Re - The local Reynolds number calculated form an interpolated
guess at the local running length based on the two closest
streamlines (either side of the point).
Cf - The local skin friction coefficient. Check associated docs.
Ch - The local heat transfer coefficient (Stanton number). Check
associated docs.
The following properties are taken from the Cart3D solution file and
(currently) linearly interpolated to the centroid. Note Cart3D
normalises its data against the freestream value and gamma (=1.4).
rho - density [kg/m^3]
U - x velocity [m/s]
V - y velocity [m/s]
W - z velocity [m/s]
p - pressure [Pa]
Currently takes a simple average of the properties - should implement
a weighted average based on distance from centroid to vertex when areas
get bigger. Depending on computational cost, set up a tolerance.
"""
"""
if row some multiple of total number of triangles:
print out a status update and estimate of time
"""
#print "count", self.count
if verbose:
reportingInterval = 1
else:
reportingInterval = 5
timeElapsed = time.time() - self.startTime
if self.count%(reportingInterval * self.nTriangles/100) == 0 or self.count == 1000:
m, s = divmod(timeElapsed, 60); h, m = divmod(m, 60)
print self.percentComplete, '% of elements completed so far. Wall clock time', "%d:%02d:%02d" % (h, m, s)
printFlag = 0
if self.percentComplete > 0:
timeRemaining = timeElapsed *(100 - self.percentComplete)/self.percentComplete
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Approximately", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
printFlag = 1
if self.count == 1000 and not printFlag:
timeRemaining = timeElapsed/1000. * self.nTriangles
mRemaining, sRemaining = divmod(timeRemaining, 60)
hRemaining, mRemaining = divmod(mRemaining, 60)
print "Rough initial estimate:", "%d:%02d:%02d" % (hRemaining, mRemaining, sRemaining), "remaining."
self.percentComplete += reportingInterval; #self.count += 1
# These are the vertices of the specific triangle - they correspond to
# indices in the vertices AND flow DataFrames
# Note STL is indexed from 1, hence we need to minus one to get the
# dataframe index.
v1i, v2i, v3i = row[0] - 1, row[1] - 1, row[2] - 1
if v1i > self.nVertices or v2i > self.nVertices or v3i > self.nVertices:
print 'Vertex indexing has died - max > than number of vertices.'
# These are the (x, y, z) indices of each vertex
v1 = np.array(self.vertices.iloc[v1i])
v2 = np.array(self.vertices.iloc[v2i])
v3 = np.array(self.vertices.iloc[v3i])
# Form two vectors forming the triangle
v1v2 = v2 - v1
v1v3 = v3 - v1
# Calculate area and normal from cross product given the above vectors.
area = 0.5 * np.linalg.norm(np.cross(v1v2, v1v3))
normal = tuple(np.cross(v1v2, v1v3)/area)
# Calculate the centroid coodinates.
centroidx = np.mean([v1[0], v2[0], v3[0]])
centroidy = np.mean([v1[1], v2[1], v3[1]])
centroidz = np.mean([v1[2], v2[2], v3[2]])
centroid = (centroidx, centroidy, centroidz)
# Calculate the mean surface flow properties at the centroid of each triangle.
# Order: Cp,Rho,U,V,W,Pressure
properties = np.mean([self.flow.iloc[v1i], self.flow.iloc[v2i], self.flow.iloc[v3i]], axis=0)
self.rho, U, V, W, self.p = properties[1], properties[2], properties[3], properties[4], properties[5]
# Undo the normalisation Cart3D uses for some currently unknown reason
self.rho *= rho_inf; U *= Fluid.a_inf; V *= Fluid.a_inf; W *= Fluid.a_inf; self.p *= rho_inf*Fluid.a_inf**2.
# print 'rho', self.rho, 'U', U, 'V', V, 'W', W, 'p', self.p
# Need to catch the problematic data Cart3D sometimes produces -
# generally degenerencies in small cut cells. Known issue.
if self.p < 1e-1:
self.p = 1e-1
if verbose:
print "Warning: Pressure < 1e-1 at", v1, v2, v3, "setting to 1e-1 Pa."
if self.rho < 1e-6:
self.rho = 1e-6
if verbose:
print "Warning: Density < 1e-6 at", v1, v2, v3, "setting to 1e-6 kg/m^3."
# Calculate local velocity vector
self.velocityMagnitude = (U**2. + V**2. + W**2.)**0.5
velocityDirection = np.array([U, V, W], dtype='float64') / self.velocityMagnitude
#print 'velocity', velocityMagnitude
if self.velocityMagnitude > 1.5*M_inf*Fluid.a_inf:
self.badVelCount += 1
if verbose:
print "Warning: velocity > 1.5x freestrem at", v1, v2, v3
# Calculate the temperature based on ideal gas law
self.T = self.p / (self.rho * Fluid.R_air)
#print 'T', self.T
if self.T > 800.:
#print "Warning: High edge temperature, constant Cp assumption might be in trouble - consider variable Cp."
self.badTempCount += 1
# Calculate local Mach number
try:
self.M = self.velocityMagnitude/((g_inf*Fluid.R_air*self.T)**0.5)
if self.M > 1.5 * M_inf:
# print "Warning high Mach number,", self.M, "Temperature is", self.T
self.badMachCount += 1
# print "x coordinate is", centroid[0], "Are you in the wake?"
self.M = M_inf
if self.M > 10.:
self.colesFlag = 1
except:
print 'Check local sound speed at', v1, v2, v3
# Calculate local dynamic viscosity using Keye's law if T < 95 Kelvin
if self.T < 95.:
self.mu = (1.488 * 10**-6.) * self.T**0.5 / (1. + 122.1*(10.**(-5/self.T))/self.T)
self.mu_wall = (1.488 * 10**-6.) * T_wall**0.5 / (1. + 122.1*(10.**(-5/T_wall))/T_wall)
else:
self.mu = Fluid.mu_0 * (self.T/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (self.T + Fluid.sutherland_S))
self.mu_wall = Fluid.mu_0 * (T_wall/Fluid.T_0)**(3./2) * ((Fluid.T_0 + Fluid.sutherland_S) / (T_wall + Fluid.sutherland_S))
#print 'mu/mu_wall', self.mu/self.mu_wall
# Calculate the local streamline based running length
self.calculate_runningLength(centroid)
# Calculate the local Reynolds number
self.localReynolds = self.rho * self.velocityMagnitude * self.localRunningLength / self.mu
#print 'localReynolds', self.localReynolds
# We always assume a laminar boundary layer
self.laminar = 1; self.transitional = 0; self.turbulent = 0
if self.laminar:
self.calculate_laminarCf()
if self.transitional:
self.calculate_transitionalCf()
if self.turbulent:
self.calculate_turbulentCf()
# The above computed skin friction coefficients should be corrected for
# thickness with form factors. Unique factors are implmented here
# for wings and bodies
wallShearMagnitude = self.cf * 0.5 * self.rho * self.velocityMagnitude**2.
wallShearVector = wallShearMagnitude * velocityDirection
viscousForce = wallShearVector*area
# Calculate Reynold's analogy factor
Pr_T = 0.86; Pr_L = 0.71
bracket = 1./(5.*0.4) * (1. - Pr_T) * ((np.pi**2.)/6. + 1.5*(1. - Pr_T)) + (Pr_L/Pr_T - 1.) + log(1. + (5./6.)*(Pr_L/Pr_T - 1.))
s = Pr_T * (1. + 5.*(self.cf/2.)**0.5 * bracket)
# Calculate Stanton number from modified Reynold's analogy
ch = (1./s) * self.cf/2.
# Calculate heat transfer coefficient
h = ch*self.rho*self.velocityMagnitude*Fluid.calculate_cp(self.T)
# Calculate heat transfer into the wall
bracket = self.T * (1. + self.r * (g_inf - 1.)/2. * self.M**2.) - T_wall
q_wall = ch * self.rho*self.velocityMagnitude * Fluid.calculate_cp(self.T) * bracket
if verbose:
print 'Local properties...'
names = ['area', 'centroidx', 'centroidy', 'centroidz', 'normal', 'rho', 'U', 'V', 'W', 'p', 'cf', 'Ff']
properties = [area, centroidx, centroidy, centroidz, normal, self.rho, U, V, W, self.p, self.cf, viscousForce]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Increment the element counter
self.count += 1
return pd.Series({'A': area,
'x': centroid[0],
'y': centroid[1],
'z': centroid[2],
'n': normal,
'rho': self.rho,
'U': U,
'V': V,
'W': W,
'Velocity': self.velocityMagnitude,
'M': self.M,
'p': self.p,
'cf': self.cf,
'ch': ch,
#'cp': self.cp,
'BL Regime': self.BLregime,
'Dev Length': self.localRunningLength,
'Re': self.localReynolds,
'Ff': viscousForce,
'T_e': self.T,
'T_aw': self.T_adiabaticWall,
'q_wall': q_wall})
def calculate_runningLength(self, centroid, searchBracket=200):
""" This function calculates the local running length given
a location.
If there is streamline data available - it will use that, otherwise
it just uses a cartesian based running length (i.e. x coordinate).
"""
# Firstly check we actually want the streamline running length
if not streamlineFlag:
# MAKE SURE THIS ISN'T NEGATIVE
self.localRunningLength = centroid[0] + LEoffset
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
return
# Populate a large array repeating the current location
# self.Location = np.tile(centroid, (self.maxStreamlineSteps, nStreamlines))
self.Location = np.broadcast_to(centroid, (searchBracket, nStreamlines, 3))
currentX = centroid[0]
# print 'Current centroid', centroid
# Tolerance on finding the position on the streamlines
tol = 0.001
try:
# Try to find a position based on a small tolerance
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol)[0][0], 0))
except:
# If not increase the tolerance a lot to ensure we find a spot.
if verbose:
print "Row position adjustment failed (tol =", tol, "), increasing tolerance by 10."
rowPosition = max((np.where(abs(Streamline.firstStagnationPoint - currentX) < tol*10)[0][0], 0))
if rowPosition <= searchBracket/2:
# We are at the top of the array
self.streamlineSection = self.streamlines[:searchBracket, :, :]
elif rowPosition >= Streamline.maxStepLength - searchBracket/2:
# We are at the bottom of the array
self.streamlineSection = self.streamlines[searchBracket:, :, :]
else:
# We are in the middle
self.streamlineSection = self.streamlines[rowPosition-searchBracket/2:rowPosition+searchBracket/2, :, :]
# print "Streamline section goes between", self.streamlineSection[0, 0, 0], self.streamlineSection[-1, 0, 0]
# delta x, delta y, delta z from location to every point
self.deltas = self.Location - self.streamlineSection
# Square the distances
self.deltas = np.square(self.deltas)
# Separate dx, dy and dz to sum together
dx = self.deltas[:, :, 0]
dy = self.deltas[:, :, 1]
dz = self.deltas[:, :, 2]
# Take the square root to find the Euclidean distance
self.distances = np.sqrt(dx + dy + dz)
"""
POTENTIAL SPEED IMPROVEMENT
# possibly need to have:
# temp = np.asfortranarray(self.distances)
# streamlineMinimumsIndices = temp.argmin(axis=0)
"""
"""
NEED TO INCLUDE GRAD CHECK HERE TO ENSURE STREAMLINES ARE ON THE CORRECT SIDE OF THE OBJECT
"""
# Indices of two closest streamlines (column indices)
# Column index of two closest streamline points
neighbouringStreamlineIndices = self.distances.min(axis=0).argsort(kind='mergesort')[:2] # index
# print 'neighbouringStreamlineIndices', neighbouringStreamlineIndices
# Indices of the step number to the minimum distances
# Row index of two closest streamline points
neighbouringStreamlineStepIndices = self.distances.argsort(axis=0, kind='mergesort')[0, neighbouringStreamlineIndices]
# print 'neighbouringStreamlineStepIndices', neighbouringStreamlineStepIndices
# # Indices of the two closest streamline points
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
# print 'neighbouringStreamlines_indices', neighbouringStreamlines_indices
# Distances to two closest streamlines
neighbouringStreamlines_distances = self.distances[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices] # value
# print "neighbouringStreamline_distances", neighbouringStreamlines_distances
if np.max(abs(neighbouringStreamlines_distances)) > 1.:
print "WARNING: Closest streamline seems to be far away at", np.max(neighbouringStreamlines_distances), "m."
print 'Current centroid', centroid
# Running length at the two neighbouring streamline points
# Need to correct the indexing because we only look at a window above
neighbouringStreamlineStepIndices = neighbouringStreamlineIndices + rowPosition
# neighbouringStreamlines_indices = np.array([neighbouringStreamlineStepIndices, neighbouringStreamlineIndices])
neighbouringStreamlines_lengths = Streamline.streamlineLengths[neighbouringStreamlineStepIndices, neighbouringStreamlineIndices]
# print 'neighbouringStreamlines_lengths', neighbouringStreamlines_lengths
# Linearly interpolate between two neighbouring streamlines
self.localRunningLength = float(neighbouringStreamlines_lengths[0] + neighbouringStreamlines_distances[0]*np.diff(neighbouringStreamlines_lengths)/np.sum(neighbouringStreamlines_distances))
# print 'localRunningLength', self.localRunningLength
if self.localRunningLength <= 0.005:
# Need to include blunted leading edge effects here but for the moment
# we'll just set it to 0.005
self.localRunningLength = 0.005
def calculate_laminarCf(self, checkFlag=1):
# Check to ensure flow isn't transitional
if checkFlag:
if not self.laminarOnlyFlag:
# Not doing a laminar only analysis
if self.turbulentOnlyFlag:
# Running turbulent only analysis
self.laminar = 0; self.transitional = 0; self.turbulent = 1
return
elif self.naturalTransitionFlag:
# Natural transition criterion
self.Re_critical = 10.**(6.421 * exp((1.209e-4) * self.M**2.641))
"""
NEED TO INCLUDE THE WING SWEEP STUFF HERE
Re_critical = Re_critical*(0.787 * cos(wingLEsweep)**4.346 - 0.7221*exp(-0.0991*wingLEsweep) + 0.9464)
"""
if self.localReynolds >= self.Re_critical:
# The flow is transitional, break out of the laminar analysis
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
elif self.roughnessInducedFlag:
# Roughness induced transition condition
pass
elif self.ReynoldsTransitionFlag:
# Critical Reynolds criterion
if criticalLocation >= self.localReynolds:
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
elif self.coordinateTransitionFlag:
if criticalLocation >= self.localRunningLength:
self.laminar = 0; self.transitional = 1; self.turbulent = 0
return
# The above transition checks all showed that it was laminar flow,
# continue laminar analysis:
# Calculate the laminar skin friction coefficient
# Set recovery factor
self.r = 0.85 # van Driest says 0.85 to 0.89 for lam to turbs
# Calculate the adiabatic wall temperature
self.T_adiabaticWall = (1. + self.r*((g_inf - 1)/2.) * self.M**2.) * self.T
# T_awOnT = self.T_adiabaticWall/self.T
# Reference temperature
# T_reference = self.T*(0.45 * 0.55 * T_awOnT + 0.16*self.r*(g_inf - 1)/2. * self.M**2.)
T_reference = self.T*(1. + 0.032*self.M**2. + 0.58 * (T_wall/self.T - 1.))
# Reference density
rho_reference = self.p/(Fluid.R_air * T_reference)
# Reference viscosity
mu_reference = 1.458e-6 * ((T_reference)**1.5) / (T_reference + 110.4)
# Reference Reynolds
# Re_reference = self.M * (g_inf*Fluid.R_air*T_reference)**0.5 * rho_reference * self.localRunningLength / mu_reference
Re_reference = self.velocityMagnitude * rho_reference * self.localRunningLength / mu_reference
try:
cf = 0.664 / (Re_reference)**0.5
except:
print 'Calculation of laminar flow Cf failed'
cf = 0.
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.73
self.cf = cf
# This is to show lam (0 = BLregime) vs transitional (1 < BLregime < 0)
# vs turb flow (BLregime = 0)
self.BLregime = 0
return self.cf
def calculate_transitionalCf(self):
# Set recovery factor
self.r = 0.87 # van Driest says 0.85 to 0.89 for lam to turbs
# self.criticalRunningLength_start = (6.421*self.mu*exp(1.209e-4 * self.M**2.641)) / (self.rho * self.velocityMagnitude)
# criticalRunningLength_end = self.criticalRunningLength_start * (1. + self.Re_critical**(-0.2))
# Check we aren't turbulent
if self.immediateTransitionFlag:
# Ignoring transitional region
self.laminar = 0; self.transitional = 0; self.turbulent = 1
return
# elif self.localRunningLength >= criticalRunningLength_end:
# Flow is now fully turbulent
# self.laminar = 0; self.transitional = 0; self.turbulent = 1
# return
else:
# The above checks all showed we are still in a transitional region
cf_laminar = self.calculate_laminarCf(checkFlag=0)
try:
cf_turbulent = self.calculate_turbulentCf(r=0.87)
except:
print "Calculation of transitional flow turbulent Cf failed"
names = ['Local Re', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'P', 'rho']
properties = [self.localReynolds, self.mu, self.mu_wall, self.T_adiabaticWall, self.T, self.p, self.rho]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
# Set up the variables to vary between laminar and turbulent skin friction coefficients.
exponent = -3. *(exp(log(2)/(5.*self.criticalRunningLength_start) * self.Re_critical**(-0.2)*(self.localRunningLength - self.criticalRunningLength_start)) - 1.)**2.
epsilon = 1 - exp(exponent)
try:
cf = (1-epsilon)*cf_laminar + epsilon*cf_turbulent
except:
print "Calculation of transitional flow Cf failed"
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= (1-epsilon)*1.15 + epsilon*1.73
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 0.5
return self.cf
def calculate_turbulentCf(self, r=0.89):
#print "Turbulent flow"
# Calculate the turbulent skin fricton coefficient
# van Driest says r = 0.85 to 0.89 for lam to turbs
self.r = r
self.T_adiabaticWall = (1. + self.r*((g_inf - 1.)/2.) * self.M**2.) * self.T
# Quick wall temp check
if T_wall/self.T_adiabaticWall < 0.2:
self.spaldingFlag = 1
cf = self.calculate_turbulentCf_spaldingChi()
else:
# Set up the variables/coefficients for the Van Driest estimate
aSquared = self.r * (g_inf - 1.)/2. * self.M**2. * self.T/T_wall
b = self.T_adiabaticWall/T_wall - 1.
denominator = (b**2. + 4.*aSquared)**0.5
A = self.clean_A(aSquared, b, denominator)
B = self.clean_B(aSquared, b, denominator)
# Solve the implicit equation for skin friction
cf_func = lambda cf: 4.15*log(self.localReynolds*cf*self.mu/self.mu_wall, 10) + 1.7 - ((np.arcsin(A) + np.arcsin(B)) / ((cf*(self.T_adiabaticWall - self.T)/self.T)**0.5))
try:
cf = brentq(cf_func, 1e-15, 0.1)
self.calculate_turbulentCf_spaldingChi()
except:
if verbose:
print "Calculation of turbulent Cf failed, Flow properties at culprit cell below."
print "Am I in the Wake? Running length is", self.localRunningLength, "Set cf to zero."
names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
for c1, c2 in zip(names, properties):
print "%-10s %s" % (c1, c2)
print '\n'
cf = 0.
# USE THE SMART MEADER CORRELATION IF VAN DRIEST FAILS
# Reference temperature
# T_reference = self.T*(1. + 0.032*self.M**2. + 0.58 * (T_wall/self.T - 1.))
#
# # Reference density
# rho_reference = self.p/(Fluid.R_air * T_reference)
#
# # Reference viscosity
# mu_reference = 1.458e-6 * ((T_reference)**1.5) / (T_reference + 110.4)
#
# # Reference Reynolds
# Re_reference = self.velocityMagnitude * rho_reference * self.localRunningLength / mu_reference
#
# cf = 0.02296/(Re_reference**0.139) * (rho_reference/self.rho)**0.861 * (mu_reference/self.mu)**0.139
self.badCfCount += 1
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.15
# End cf (van driest or Spalding & Chi) estimate
self.cf = cf
# This is to plot lam (BLregime = 0) vs transitional (0 < BLregime < 1)
# vs turb flow (BLregime = 1)
self.BLregime = 1
return self.cf
def clean_A(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
A = ( 2.*a - b ) / denominator
if A < -1.:
return -1.
elif A > 1.:
return 1.
else:
return A
def clean_B(self, a, b, denominator):
"""
This function is required to avoid math domain errors in an arcsin
calculation in the Van Driest calculation.
"""
B = ( b ) / denominator
if B < -1.:
return -1.
elif B > 1.:
return 1.
else:
return B
def calculate_turbulentCf_spaldingChi(self, r=0.89):
# Calculate the turbulent skin fricton coefficient using the Spalding Chi method
# This is more accurate than Van driest for T_wall/self.T_adiabaticWall < 0.2
# van Driest says r = 0.85 to 0.89 for lam to turbs
self.r = 0.89
# Set up the variables/coefficients for the estimate
# Various wall temperature ratios
TawOnT = self.T_adiabaticWall/self.T
TwOnT = T_wall/self.T
denominator = ( (TawOnT + TwOnT)**2. - 4.*TwOnT )**0.5
alpha = (TawOnT + TwOnT - 2.) / denominator
beta = (TawOnT - TwOnT) / denominator
F_c = (TawOnT - 1.) / (np.arcsin(alpha) + np.arcsin(beta))**2.
# Solve the implicit equation for the incompressible skin friction
LHS = self.localReynolds / (F_c*(TawOnT**0.772 * TwOnT**-1.474))
K = 0.4
E = 12.
kappa = lambda cf: K * (2./cf)**0.5
# bracket = (2. + (2. - kappa)**2.)*exp(kappa) - 6. - 2.*kappa - (1./12)*kappa**4. - (1./20)*kappa**5. - (1./60)*kappa**6. - (1./256)*kappa**7.
bracket = lambda cf: (2. + (2. - kappa(cf))**2.)*exp(kappa(cf)) - 6. - 2.*kappa(cf) - (1./12)*kappa(cf)**4. - (1./20)*kappa(cf)**5. - (1./60)*kappa(cf)**6. - (1./256)*kappa(cf)**7.
cf_inc_func = lambda cf: (1./12)*(2./cf)**2. + (1./(E*K**3.)) * bracket(cf) - LHS
try:
cf_inc = brentq(cf_inc_func, 5e-6, 0.1)
cf = (1./F_c) * cf_inc
except:
# print "Calculation of turbulent Cf failed, Flow properties at culprit cell below."
# print "Am I in the Wake? Running length is", self.localRunningLength, "Set cf to zero."
# names = ['Local Re', 'length', 'mu', 'mu_wall', 'T_aw', 'T_edge', 'T_wall', 'p', 'rho', 'velocity', 'Mach']
# properties = [float(self.localReynolds), float(self.localRunningLength), self.mu, self.mu_wall, self.T_adiabaticWall, self.T, T_wall, self.p, self.rho, self.velocityMagnitude, self.M]
# for c1, c2 in zip(names, properties):
# print "%-10s %s" % (c1, c2)
# print '\n'
cf = 0.
self.badCfCount += 1
if self.coneCorrectionFlag:
# Flow is 3D, apply cone rule correction
cf *= 1.15
return cf
#%%
class postProcessor(Data):
def __init__(self, Field, flowData):
self.flowData = flowData
self.propertiesToSave = ['cf', 'ch', 'Velocity', 'T_aw'] + additionalProperties
def viscousForceCoefficients(self):
""" This function will calculate and return the viscous force
coefficients. The forces are calculated and stored here.
Sign convention
x - positive toward tail (flow in the direction of positive x)
y - positive upwards
z - positive left spanwise sitting in cockpit facing forwards
"""
# Visous forces in body axes
viscousForces_body = sum(self.flowData.loc['Ff'])
# Transform to wind axes
viscousForces = viscousForces_body
# Calculate velocity
u_inf = M_inf*Fluid.a_inf
cl_viscous = viscousForces[0]/(0.5*S_ref*rho_inf*u_inf**2.)
cd_viscous = viscousForces[1]/(0.5*S_ref*rho_inf*u_inf**2.)
return cl_viscous, cd_viscous
def viscousMomentCoefficients(self):
""" Similar to the above this function will calculate the viscous
pitching moment coefficients.
Sign convention
Directions same as above
Positive rotations defined by RH rule
"""
cm_viscous = 5
return cm_viscous
def saveAs_CSV(self, outputFilename=outputFilename, properties=['x', 'y', 'z', 'cf', 'ch', 'Velocity', 'T_aw']):
""" This function will write the flow data to file so we can view it
in Paraview.
"""
outputFilename += '.csv'
if additionalProperties != []:
for i in additionalProperties:
properties.append(i)
# self.flowData = self.flowData.round(decimals=5)
self.flowData.to_csv(outputFilename, sep=',', columns=properties, index=0, index_label=0, float_format='%.3f')
print "output file saved as", outputFilename
def saveSlice_CSV(self, outputFilename=outputFilename, xSlice=[], ySlice=[], zSlice=[]):
""" Take a slice and save it to csv """
outputFilename += '_slice.csv'
# # This defines how 'narrow' slice we want. Why am I writing this if ParaView will do it fark
# tol = 1e-2
#
# # Pre allocate empty DF here?
# slicedData = pd.DataFrame()
#
# if not xSlice:
# # We have some slices along x to make
# for point in xSlice:
# # we want to slice at all of these points
# > xSlice[point] - tol
# self.flowData.transpose().loc[(self.flowData.transpose()["x"] > 0.599 & self.flowData.transpose()["x"] < 0.601 & self.flowData.transpose()["z"] == 0), "cf"]
# elif not ySlice:
# # Slices along y to take
# elif not zSlice:
# # And slices aong z
flowData = self.flowData.apply(pd.to_numeric, errors='ignore')
slicedData_indices = (flowData["z"] > -0.01) & (flowData["z"] < 0.01)
slicedData = flowData.loc[slicedData_indices]
slicedData.to_csv(outputFilename, sep=',', index=0, index_label=0)
print "Slices saved in", outputFilename
def saveAs_VTK(self, outputFilename):
"""
Write the flow data as a VTK unstructured grid - STILL NOT SURE WHY???
"""
outputFilename += '.vtu'
vtuFile = open(outputFilename, "w")
NumberOfPoints = Field.nVertices
NumberOfTriangles = Field.nTriangles
# Write the header
vtuFile.write("<VTKFile type=\"UnstructuredGrid\" byte_order=\"BigEndian\">\n")
vtuFile.write("<UnstructuredGrid>")
vtuFile.write("<Piece NumberOfPoints=\"%d\" NumberOfCells=\"%d\">\n" %
(NumberOfPoints, NumberOfTriangles))
# Write the point coordinates
vtuFile.write("<Points>\n")
vtuFile.write(" <DataArray type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfPoints-500000):
x, y, z = Field.vertices.iloc[index]
vtuFile.write(" %e %e %e\n" % (x, y, z))
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Points>\n")
vtuFile.write("<Cells>\n")
# Write the connectivity
vtuFile.write(" <DataArray type=\"Int32\" Name=\"connectivity\"")
vtuFile.write(" format=\"ascii\">\n")
temp = Field.triangles.transpose()
for index in range(NumberOfTriangles):
v1, v2, v3 = temp.iloc[index]
vtuFile.write(" %d %d %d\n" % (v1, v2, v3))
vtuFile.write(" </DataArray>\n")
# Write the offsets
# vtuFile.write(" <DataArray type=\"Int32\" Name=\"offsets\"")
# vtuFile.write(" format=\"ascii\">\n")
# # Since all of the point-lists are concatenated, these offsets into the connectivity
# # array specify the end of each cell.
# for point in range(NumberOfTriangles):
# if two_D:
# conn_offset = 4*(1+i+j*nic)
# else:
# conn_offset = 8*(1+i+j*nic+k*(nic*njc))
# vtuFile.write(" %d\n" % conn_offset)
# vtuFile.write(" </DataArray>\n")
# Write the types
vtuFile.write(" <DataArray type=\"UInt8\" Name=\"types\"")
vtuFile.write(" format=\"ascii\">\n")
VTKtype = 5 # VTK_TRIANGLE
for point in range(NumberOfTriangles):
vtuFile.write(" %d\n" % VTKtype)
vtuFile.write(" </DataArray>\n")
vtuFile.write("</Cells>\n")
# Write the flow variables
vtuFile.write("<CellData>\n")
# Write variables from the dictionary.
for variable in self.propertiesToSave:
vtuFile.write(" <DataArray Name=\"%s\" type=\"Float32\" NumberOfComponents=\"1\"" % (variable))
vtuFile.write(" format=\"ascii\">\n")
for index in range(NumberOfTriangles):
vtuFile.write(" %e\n" % Field.flowData.transpose()[variable].iloc[index])
vtuFile.write(" </DataArray>\n")
# Write the velocity vector - have to do this separately because it's a vector
vtuFile.write(" <DataArray Name=\"Velocity vector\" type=\"Float32\" NumberOfComponents=\"3\"")
vtuFile.write(" format=\"ascii\">\n")
for index in NumberOfTriangles:
U, V, W = (Field.flowData.transpose()['U'].iloc[index],
Field.flowData.transpose()['V'].iloc[index],
Field.flowData.transpose()['W'].iloc[index])
vtuFile.write(" %e %e %e\n" % (U, V, W))
vtuFile.write(" </DataArray>\n")
# Write footers and close file
vtuFile.write("</CellData>\n")
vtuFile.write("</Piece>\n")
vtuFile.write("</UnstructuredGrid>\n")
vtuFile.write("</VTKFile>\n")
vtuFile.close()
return
#%% ----- Run the program
if __name__ == '__main__':
print time.strftime("%H:%M:%S"), 'Starting....'
# Run Tecplot in batch mode to generate and save the streamline data
# try:
# call(['tec360', '-b', 'Components.i.plt', 'retrieveStreamlines.mcr'])
# except:
# print 'Import of Tecplot streamline data failed'
# Initialise Fluid class - sets up basic fluid and freestream properties
Fluid = Fluid()
# Initialise streamline class
Streamlines = Streamline()
streamlineCoordinates, streamlineLengths, maxSteplength = Streamlines.importer(streamlineFilename)
StreamlinesDict = Streamline.__dict__
# Initialise a data class, this contains all the Field (Cart3D) data.
Field = Data(cart3DFilename)
# Import Cart3D data
vertices, triangles, flow = Field.triqImporter()
# Run the actual code - calculate viscous forces
flowData = Field.getProperties()
flowData = flowData.round(decimals=5)
post = postProcessor(Field, flowData)
if csvFlag:
post.saveAs_CSV()
if vtkFlag:
post.saveAs_CSV()
post.saveSlice_CSV()
DataDict = Field.__dict__
#Field.plotter()
| {
"repo_name": "AlexanderWard1/VC3D",
"path": "Integral Methods/addViscous_Bowcutt.py",
"copies": "1",
"size": "61090",
"license": "mit",
"hash": -7859063281651208000,
"line_mean": 41.7921146953,
"line_max": 203,
"alpha_frac": 0.5467834343,
"autogenerated": false,
"ratio": 3.8069421075590455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4853725541859045,
"avg_score": null,
"num_lines": null
} |
"""Add visibility options to contribution fields
Revision ID: 093533d27a96
Revises: 9c4418d7a6aa
Create Date: 2017-11-30 17:15:07.141552
"""
import sqlalchemy as sa
from alembic import op
from indico.core.db.sqlalchemy import PyIntEnum
from indico.modules.events.contributions.models.fields import ContributionFieldVisibility
# revision identifiers, used by Alembic.
revision = '093533d27a96'
down_revision = '9c4418d7a6aa'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('contribution_fields',
sa.Column('is_user_editable', sa.Boolean(), nullable=False, server_default='true'),
schema='events')
op.add_column('contribution_fields',
sa.Column('visibility', PyIntEnum(ContributionFieldVisibility),
nullable=False, server_default='1'),
schema='events')
op.alter_column('contribution_fields', 'is_user_editable', server_default=None, schema='events')
op.alter_column('contribution_fields', 'visibility', server_default=None, schema='events')
def downgrade():
op.drop_column('contribution_fields', 'visibility', schema='events')
op.drop_column('contribution_fields', 'is_user_editable', schema='events')
| {
"repo_name": "OmeGak/indico",
"path": "indico/migrations/versions/20180126_1130_093533d27a96_add_visibility_options_to_contribution.py",
"copies": "7",
"size": "1253",
"license": "mit",
"hash": -2915901649111873500,
"line_mean": 33.8055555556,
"line_max": 101,
"alpha_frac": 0.6983240223,
"autogenerated": false,
"ratio": 3.6424418604651163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014871516810956017,
"num_lines": 36
} |
"""add vpnsessions table
Revision ID: 12e8cae9c38
Revises: 25883baca06
Create Date: 2014-03-23 02:11:37.838134
"""
# revision identifiers, used by Alembic.
revision = '12e8cae9c38'
down_revision = '25883baca06'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('vpnsessions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('gateway_id', sa.Integer(), nullable=False),
sa.Column('gateway_version', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('profile_id', sa.Integer(), nullable=True),
sa.Column('connect_date', sa.DateTime(), nullable=False),
sa.Column('disconnect_date', sa.DateTime(), nullable=True),
sa.Column('remote_addr', sa.String(), nullable=False),
sa.Column('bytes_up', sa.Integer(), nullable=True),
sa.Column('bytes_down', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['gateway_id'], ['gateways.id'], ),
sa.ForeignKeyConstraint(['profile_id'], ['profiles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('vpnsessions')
### end Alembic commands ###
| {
"repo_name": "bspavel/ccvpn",
"path": "alembic/versions/12e8cae9c38_add_vpnsessions_tabl.py",
"copies": "2",
"size": "1373",
"license": "mit",
"hash": 1457273431983146800,
"line_mean": 32.487804878,
"line_max": 63,
"alpha_frac": 0.6686088857,
"autogenerated": false,
"ratio": 3.4759493670886075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144558252788607,
"avg_score": null,
"num_lines": null
} |
"""Add WCF
Revision ID: a1dbd23261c3
Revises: 69e765223549
Create Date: 2017-10-11 22:28:26.411628
"""
from alembic import op
import sqlalchemy as sa
from pathlib import Path
from json import load
from collections import OrderedDict
# revision identifiers, used by Alembic.
revision = 'a1dbd23261c3'
down_revision = '69e765223549'
branch_labels = None
depends_on = None
with (Path(__file__).resolve().parent / f'{revision}_wcf.json').open() as f:
wcf_data = load(f, object_pairs_hook=lambda x: OrderedDict(x))
metadata = sa.MetaData()
confessions = sa.Table('confessions', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('command', sa.String, unique=True),
sa.Column('name', sa.String))
confession_chapters = sa.Table('confession_chapters', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('confession_id', sa.Integer, sa.ForeignKey('confessions.id')),
sa.Column('chapter_number', sa.Integer),
sa.Column('title', sa.String))
confession_paragraphs = sa.Table('confession_paragraphs', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('confession_id', sa.Integer, sa.ForeignKey('confessions.id')),
sa.Column('chapter_number', sa.Integer),
sa.Column('paragraph_number', sa.Integer),
sa.Column('text', sa.Text))
def upgrade():
conn = op.get_bind()
result = conn.execute(confessions.insert(),
dict(command='wcf', name='The Westminster Confession of Faith'))
confession_id = result.inserted_primary_key[0]
for chapter_str, chapter in wcf_data['chapters'].items():
chapter_number = int(chapter_str)
conn.execute(confession_chapters.insert(),
dict(confession_id=confession_id, chapter_number=chapter_number,
title=chapter['title']))
conn.execute(confession_paragraphs.insert(), *[
dict(confession_id=confession_id, chapter_number=chapter_number,
paragraph_number=int(paragraph_str), text=text) for paragraph_str, text in
chapter['paragraphs'].items()
])
def downgrade():
conn = op.get_bind()
result = conn.execute(confessions.select().where(confessions.c.command == 'wcf'))
row = result.fetchone()
conn.execute(confession_paragraphs.delete().where(confession_paragraphs.c.confession_id == row['id']))
conn.execute(confession_chapters.delete().where(confession_chapters.c.confession_id == row['id']))
conn.execute(confessions.delete().where(confessions.c.id == row['id']))
result.close()
| {
"repo_name": "bryanforbes/Erasmus",
"path": "alembic/versions/a1dbd23261c3_add_wcf.py",
"copies": "1",
"size": "2904",
"license": "bsd-3-clause",
"hash": 8415405919201777000,
"line_mean": 37.72,
"line_max": 106,
"alpha_frac": 0.6022727273,
"autogenerated": false,
"ratio": 3.8260869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4928359683821739,
"avg_score": null,
"num_lines": null
} |
"""add_webpack_stats
Revision ID: 89a8aa1c611c
Revises: 87cbddd5b946
Create Date: 2018-01-04 14:16:23.592897
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import zeus
# revision identifiers, used by Alembic.
revision = "89a8aa1c611c"
down_revision = "87cbddd5b946"
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"webpack_asset",
sa.Column("job_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("filename", sa.Text(), nullable=False),
sa.Column("size", sa.Integer(), nullable=True),
sa.Column("chunk_names", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column("repository_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column(
"date_created",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["job_id"], ["job.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["repository_id"], ["repository.id"], ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("job_id", "filename", name="unq_webpack_asset"),
)
op.create_index(
op.f("ix_webpack_asset_repository_id"),
"webpack_asset",
["repository_id"],
unique=False,
)
op.create_table(
"webpack_entrypoint",
sa.Column("job_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("asset_names", postgresql.ARRAY(sa.String()), nullable=True),
sa.Column("repository_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column(
"date_created",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["job_id"], ["job.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["repository_id"], ["repository.id"], ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("job_id", "name", name="unq_webpack_entrypoint"),
)
op.create_index(
op.f("ix_webpack_entrypoint_repository_id"),
"webpack_entrypoint",
["repository_id"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(
op.f("ix_webpack_entrypoint_repository_id"), table_name="webpack_entrypoint"
)
op.drop_table("webpack_entrypoint")
op.drop_index(op.f("ix_webpack_asset_repository_id"), table_name="webpack_asset")
op.drop_table("webpack_asset")
# ### end Alembic commands ###
| {
"repo_name": "getsentry/zeus",
"path": "zeus/migrations/89a8aa1c611c_add_webpack_stats.py",
"copies": "1",
"size": "2998",
"license": "apache-2.0",
"hash": 4497802606304673000,
"line_mean": 32.3111111111,
"line_max": 85,
"alpha_frac": 0.6070713809,
"autogenerated": false,
"ratio": 3.5563463819691576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9662118173265875,
"avg_score": 0.0002599179206566347,
"num_lines": 90
} |
"""add white rabbit status
Revision ID: 63b625cf7b06
Revises: e83aa47e530b
Create Date: 2019-12-06 02:45:01.418693+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '63b625cf7b06'
down_revision = 'e83aa47e530b'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('node_white_rabbit_status',
sa.Column('node_time', sa.BigInteger(), nullable=False),
sa.Column('node', sa.Integer(), nullable=False),
sa.Column('board_info_str', sa.String(), nullable=True),
sa.Column('aliases', sa.String(), nullable=True),
sa.Column('ip', sa.String(), nullable=True),
sa.Column('mode', sa.String(), nullable=True),
sa.Column('serial', sa.String(), nullable=True),
sa.Column('temperature', sa.Float(), nullable=True),
sa.Column('build_date', sa.BigInteger(), nullable=True),
sa.Column('gw_date', sa.BigInteger(), nullable=True),
sa.Column('gw_version', sa.String(), nullable=True),
sa.Column('gw_id', sa.String(), nullable=True),
sa.Column('build_hash', sa.String(), nullable=True),
sa.Column('manufacture_tag', sa.String(), nullable=True),
sa.Column('manufacture_device', sa.String(), nullable=True),
sa.Column('manufacture_date', sa.BigInteger(), nullable=True),
sa.Column('manufacture_partnum', sa.String(), nullable=True),
sa.Column('manufacture_serial', sa.String(), nullable=True),
sa.Column('manufacture_vendor', sa.String(), nullable=True),
sa.Column('port0_ad', sa.Integer(), nullable=True),
sa.Column('port0_link_asymmetry_ps', sa.Integer(), nullable=True),
sa.Column('port0_manual_phase_ps', sa.Integer(), nullable=True),
sa.Column('port0_clock_offset_ps', sa.Integer(), nullable=True),
sa.Column('port0_cable_rt_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_slave_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_slave_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_master_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_slave_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port0_hd', sa.Integer(), nullable=True),
sa.Column('port0_link', sa.Boolean(), nullable=True),
sa.Column('port0_lock', sa.Boolean(), nullable=True),
sa.Column('port0_md', sa.Integer(), nullable=True),
sa.Column('port0_rt_time_ps', sa.Integer(), nullable=True),
sa.Column('port0_nsec', sa.Integer(), nullable=True),
sa.Column('port0_packets_received', sa.Integer(), nullable=True),
sa.Column('port0_phase_setpoint_ps', sa.Integer(), nullable=True),
sa.Column('port0_servo_state', sa.String(), nullable=True),
sa.Column('port0_sv', sa.Integer(), nullable=True),
sa.Column('port0_sync_source', sa.String(), nullable=True),
sa.Column('port0_packets_sent', sa.Integer(), nullable=True),
sa.Column('port0_update_counter', sa.Integer(), nullable=True),
sa.Column('port0_time', sa.BigInteger(), nullable=True),
sa.Column('port1_ad', sa.Integer(), nullable=True),
sa.Column('port1_link_asymmetry_ps', sa.Integer(), nullable=True),
sa.Column('port1_manual_phase_ps', sa.Integer(), nullable=True),
sa.Column('port1_clock_offset_ps', sa.Integer(), nullable=True),
sa.Column('port1_cable_rt_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_slave_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_slave_rx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_master_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_slave_tx_phy_delay_ps', sa.Integer(), nullable=True),
sa.Column('port1_hd', sa.Integer(), nullable=True),
sa.Column('port1_link', sa.Boolean(), nullable=True),
sa.Column('port1_lock', sa.Boolean(), nullable=True),
sa.Column('port1_md', sa.Integer(), nullable=True),
sa.Column('port1_rt_time_ps', sa.Integer(), nullable=True),
sa.Column('port1_nsec', sa.Integer(), nullable=True),
sa.Column('port1_packets_received', sa.Integer(), nullable=True),
sa.Column('port1_phase_setpoint_ps', sa.Integer(), nullable=True),
sa.Column('port1_servo_state', sa.String(), nullable=True),
sa.Column('port1_sv', sa.Integer(), nullable=True),
sa.Column('port1_sync_source', sa.String(), nullable=True),
sa.Column('port1_packets_sent', sa.Integer(), nullable=True),
sa.Column('port1_update_counter', sa.Integer(), nullable=True),
sa.Column('port1_time', sa.BigInteger(), nullable=True),
sa.PrimaryKeyConstraint('node_time', 'node')
)
def downgrade():
op.drop_table('node_white_rabbit_status')
| {
"repo_name": "HERA-Team/Monitor_and_Control",
"path": "alembic/versions/63b625cf7b06_add_white_rabbit_status.py",
"copies": "2",
"size": "4778",
"license": "bsd-2-clause",
"hash": -7190889092605620000,
"line_mean": 50.376344086,
"line_max": 75,
"alpha_frac": 0.6764336542,
"autogenerated": false,
"ratio": 3.143421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48198547068315795,
"avg_score": null,
"num_lines": null
} |
"""add wifi shards
Revision ID: 4860cb8e54f5
Revises: None
Create Date: 2015-08-02 17:24:14.293552
"""
import logging
from alembic import op
import sqlalchemy as sa
log = logging.getLogger('alembic.migration')
revision = '4860cb8e54f5'
down_revision = None
def upgrade():
log.info('Create wifi_shard_* tables')
stmt = """\
CREATE TABLE `wifi_shard_{id}` (
`mac` binary(6) NOT NULL,
`lat` double DEFAULT NULL,
`lon` double DEFAULT NULL,
`radius` int(10) unsigned DEFAULT NULL,
`max_lat` double DEFAULT NULL,
`min_lat` double DEFAULT NULL,
`max_lon` double DEFAULT NULL,
`min_lon` double DEFAULT NULL,
`country` varchar(2) DEFAULT NULL,
`samples` int(10) unsigned DEFAULT NULL,
`source` tinyint(4) DEFAULT NULL,
`created` datetime DEFAULT NULL,
`modified` datetime DEFAULT NULL,
`block_first` date DEFAULT NULL,
`block_last` date DEFAULT NULL,
`block_count` tinyint(3) unsigned DEFAULT NULL,
PRIMARY KEY (`mac`),
KEY `wifi_shard_{id}_country_idx` (`country`),
KEY `wifi_shard_{id}_created_idx` (`created`),
KEY `wifi_shard_{id}_modified_idx` (`modified`),
KEY `wifi_shard_{id}_latlon_idx` (`lat`, `lon`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8
"""
shard_ids = (
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
)
for shard_id in shard_ids:
op.execute(sa.text(stmt.format(id=shard_id)))
def downgrade():
pass
| {
"repo_name": "therewillbecode/ichnaea",
"path": "alembic/versions/4860cb8e54f5_add_wifi_shards.py",
"copies": "1",
"size": "1391",
"license": "apache-2.0",
"hash": 3283938116329958000,
"line_mean": 24.2909090909,
"line_max": 53,
"alpha_frac": 0.6642703091,
"autogenerated": false,
"ratio": 2.940803382663848,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4105073691763848,
"avg_score": null,
"num_lines": null
} |
"""add winter seasonal category
Revision ID: 2eeb0ddd8f9
Revises: 14b52358eb42
Create Date: 2015-10-16 00:50:15.154646
"""
# revision identifiers, used by Alembic.
revision = '2eeb0ddd8f9'
down_revision = '14b52358eb42'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table
def upgrade():
shopping_categories = table(
'shopping_category',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.Unicode(75), nullable=False),
sa.Column('daily_limit', sa.Integer, nullable=True),
sa.Column('monthly_limit', sa.Integer, nullable=False),
sa.Column('family_wide', sa.Boolean, nullable=False),
sa.Column('order', sa.Integer, nullable=False),
sa.Column('min_age', sa.Integer, nullable=True),
sa.Column('max_age', sa.Integer, nullable=True),
sa.Column('disabled', sa.Boolean, nullable=False)
)
op.bulk_insert(
shopping_categories,
[
{'id': 12, 'name': 'Winter Seasonal',
'daily_limit': 1, 'monthly_limit': 4,
'family_wide': False,
'order': 9, 'disabled': False}
])
op.execute(
shopping_categories.update().
where(shopping_categories.c.name == op.inline_literal('Seasonal')).
values({'disabled': op.inline_literal(True)})
)
def downgrade():
pass
| {
"repo_name": "jlutz777/FreeStore",
"path": "alembic/versions/2eeb0ddd8f9_add_winter_seasonal_category.py",
"copies": "1",
"size": "1405",
"license": "mit",
"hash": 7815905309395029000,
"line_mean": 27.6734693878,
"line_max": 75,
"alpha_frac": 0.6135231317,
"autogenerated": false,
"ratio": 3.5125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9593834564573545,
"avg_score": 0.006437713425291065,
"num_lines": 49
} |
"""add wishlists
Revision ID: 3519117eb69
Revises: 3d943ee3b4e
Create Date: 2013-11-03 14:10:07.861337
"""
# revision identifiers, used by Alembic.
revision = '3519117eb69'
down_revision = '3d943ee3b4e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('event_items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('cost', sa.Float(), nullable=True),
sa.Column('quantity', sa.Integer(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('url', sa.String(length=255), nullable=True),
sa.Column('event_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['event_id', 'user_id'], ['event_recipients.event_id', 'event_recipients.recipient_id'], name='fk_wishlist', use_alter=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('event_items')
### end Alembic commands ###
| {
"repo_name": "dirn/Secret-Santa",
"path": "migrations/versions/3519117eb69_add_wishlists.py",
"copies": "1",
"size": "1245",
"license": "bsd-3-clause",
"hash": -6247044707921538000,
"line_mean": 31.7631578947,
"line_max": 153,
"alpha_frac": 0.6722891566,
"autogenerated": false,
"ratio": 3.346774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4519063350148387,
"avg_score": null,
"num_lines": null
} |
"""add withdrawal reason
Revision ID: 378ee128c23f
Revises: 380d2c363481
Create Date: 2018-10-11 11:07:27.080104
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import WithdrawalReason
# revision identifiers, used by Alembic.
revision = "378ee128c23f"
down_revision = "380d2c363481"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant", sa.Column("withdrawal_reason", model.utils.Enum(WithdrawalReason), nullable=True))
op.add_column("participant", sa.Column("withdrawal_reason_justification", sa.UnicodeText(), nullable=True))
op.add_column(
"participant_history", sa.Column("withdrawal_reason", model.utils.Enum(WithdrawalReason), nullable=True)
)
op.add_column("participant_history", sa.Column("withdrawal_reason_justification", sa.UnicodeText(), nullable=True))
op.add_column(
"participant_summary", sa.Column("withdrawal_reason", model.utils.Enum(WithdrawalReason), nullable=True)
)
op.add_column("participant_summary", sa.Column("withdrawal_reason_justification", sa.UnicodeText(), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "withdrawal_reason_justification")
op.drop_column("participant_summary", "withdrawal_reason")
op.drop_column("participant_history", "withdrawal_reason_justification")
op.drop_column("participant_history", "withdrawal_reason")
op.drop_column("participant", "withdrawal_reason_justification")
op.drop_column("participant", "withdrawal_reason")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/378ee128c23f_add_withdrawal_reason.py",
"copies": "1",
"size": "2197",
"license": "bsd-3-clause",
"hash": 7220451943836488000,
"line_mean": 33.328125,
"line_max": 119,
"alpha_frac": 0.7068730086,
"autogenerated": false,
"ratio": 3.532154340836013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9734971486717681,
"avg_score": 0.0008111725436665358,
"num_lines": 64
} |
"""Add withdrawals table
Revision ID: 305794ad46c5
Revises: 392c0a07a3c
Create Date: 2015-12-19 21:22:36.390203
"""
# revision identifiers, used by Alembic.
revision = '305794ad46c5'
down_revision = '392c0a07a3c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('withdrawals',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('payment_id', postgresql.UUID(), autoincrement=False, nullable=True),
sa.Column('transaction_id', sa.VARCHAR(length=128), autoincrement=False, nullable=False),
sa.Column('currency', postgresql.ENUM(u'BTC', u'LTC', u'DOGE', name='currency_types', create_type=False), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text(u'now()'), autoincrement=False, nullable=True),
sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text(u'now()'), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['payment_id'], [u'payments.id'], name=u'withdrawals_payment_id_fkey'),
sa.PrimaryKeyConstraint('id', name=u'withdrawals_pkey'),
sa.UniqueConstraint('transaction_id', name=u'withdrawals_transaction_id_key')
)
def downgrade():
op.drop_table('withdrawals')
| {
"repo_name": "smilledge/transient",
"path": "transient/migrations/versions/305794ad46c5_add_withdrawals_table.py",
"copies": "1",
"size": "1385",
"license": "mit",
"hash": -7920482508932324000,
"line_mean": 37.4722222222,
"line_max": 151,
"alpha_frac": 0.7119133574,
"autogenerated": false,
"ratio": 3.3780487804878048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4589962137887804,
"avg_score": null,
"num_lines": null
} |
"""add word location
Revision ID: 532b69010c42
Revises: 0f6fa482b10c
Create Date: 2018-04-27 18:41:43.570647
"""
# revision identifiers, used by Alembic.
revision = '532b69010c42'
down_revision = '0f6fa482b10c'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('words',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('word', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_words_word'), 'words', ['word'], unique=True)
op.create_table('wordlocation',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('location', sa.Integer(), nullable=True),
sa.Column('word_id', sa.Integer(), nullable=True),
sa.Column('article_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_id'], ['articles.id'], ),
sa.ForeignKeyConstraint(['word_id'], ['words.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('wordlocation')
op.drop_index(op.f('ix_words_word'), table_name='words')
op.drop_table('words')
# ### end Alembic commands ###
| {
"repo_name": "mapan1984/Hidden-Island",
"path": "migrations/versions/532b69010c42_add_word_location.py",
"copies": "1",
"size": "1295",
"license": "mit",
"hash": 196193291476874560,
"line_mean": 29.8333333333,
"line_max": 74,
"alpha_frac": 0.6548262548,
"autogenerated": false,
"ratio": 3.312020460358056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4466846715158056,
"avg_score": null,
"num_lines": null
} |
"""add workbench tables
Revision ID: f38451075acc
Revises: 2681d14b61d8
Create Date: 2019-12-04 13:44:24.237092
"""
from alembic import op
import sqlalchemy as sa
from rdr_service.model.field_types import BlobUTF8
from rdr_service.model.utils import Enum
from rdr_service.participant_enums import WorkbenchWorkspaceStatus, WorkbenchWorkspaceUserRole
# revision identifiers, used by Alembic.
revision = 'f38451075acc'
down_revision = '2681d14b61d8'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('workbench_researcher',
sa.Column('user_source_id', sa.Integer(), nullable=False),
sa.Column('creation_time', sa.DateTime(), nullable=True),
sa.Column('modified_time', sa.DateTime(), nullable=True),
sa.Column('given_name', sa.String(length=100), nullable=True),
sa.Column('family_name', sa.String(length=100), nullable=True),
sa.Column('street_address1', sa.String(length=250), nullable=True),
sa.Column('street_address2', sa.String(length=250), nullable=True),
sa.Column('city', sa.String(length=80), nullable=True),
sa.Column('state', sa.String(length=80), nullable=True),
sa.Column('zip_code', sa.String(length=80), nullable=True),
sa.Column('country', sa.String(length=80), nullable=True),
sa.Column('ethnicity', sa.String(length=80), nullable=True),
sa.Column('gender', sa.String(length=80), nullable=True),
sa.Column('race', sa.String(length=80), nullable=True),
sa.Column('resource', BlobUTF8(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_source_id', name='uniqe_user_source_id')
)
op.create_table('workbench_researcher_history',
sa.Column('user_source_id', sa.Integer(), nullable=False),
sa.Column('creation_time', sa.DateTime(), nullable=True),
sa.Column('modified_time', sa.DateTime(), nullable=True),
sa.Column('given_name', sa.String(length=100), nullable=True),
sa.Column('family_name', sa.String(length=100), nullable=True),
sa.Column('street_address1', sa.String(length=250), nullable=True),
sa.Column('street_address2', sa.String(length=250), nullable=True),
sa.Column('city', sa.String(length=80), nullable=True),
sa.Column('state', sa.String(length=80), nullable=True),
sa.Column('zip_code', sa.String(length=80), nullable=True),
sa.Column('country', sa.String(length=80), nullable=True),
sa.Column('ethnicity', sa.String(length=80), nullable=True),
sa.Column('gender', sa.String(length=80), nullable=True),
sa.Column('race', sa.String(length=80), nullable=True),
sa.Column('resource', BlobUTF8(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workbench_workspace',
sa.Column('workspace_source_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('creation_time', sa.DateTime(), nullable=True),
sa.Column('modified_time', sa.DateTime(), nullable=True),
sa.Column('status', Enum(WorkbenchWorkspaceStatus), nullable=True),
sa.Column('exclude_from_public_directory', sa.Boolean(), nullable=True),
sa.Column('disease_focused_research', sa.Boolean(), nullable=True),
sa.Column('disease_focused_research_name', sa.String(length=250), nullable=True),
sa.Column('other_purpose_details', sa.String(length=250), nullable=True),
sa.Column('methods_development', sa.Boolean(), nullable=True),
sa.Column('control_set', sa.Boolean(), nullable=True),
sa.Column('ancestry', sa.Boolean(), nullable=True),
sa.Column('social_behavioral', sa.Boolean(), nullable=True),
sa.Column('population_health', sa.Boolean(), nullable=True),
sa.Column('drug_development', sa.Boolean(), nullable=True),
sa.Column('commercial_purpose', sa.Boolean(), nullable=True),
sa.Column('educational', sa.Boolean(), nullable=True),
sa.Column('other_purpose', sa.Boolean(), nullable=True),
sa.Column('resource', BlobUTF8(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('workspace_source_id', name='uniqe_workspace_source_id')
)
op.create_table('workbench_workspace_history',
sa.Column('workspace_source_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('creation_time', sa.DateTime(), nullable=True),
sa.Column('modified_time', sa.DateTime(), nullable=True),
sa.Column('status', Enum(WorkbenchWorkspaceStatus), nullable=True),
sa.Column('exclude_from_public_directory', sa.Boolean(), nullable=True),
sa.Column('disease_focused_research', sa.Boolean(), nullable=True),
sa.Column('disease_focused_research_name', sa.String(length=250), nullable=True),
sa.Column('other_purpose_details', sa.String(length=250), nullable=True),
sa.Column('methods_development', sa.Boolean(), nullable=True),
sa.Column('control_set', sa.Boolean(), nullable=True),
sa.Column('ancestry', sa.Boolean(), nullable=True),
sa.Column('social_behavioral', sa.Boolean(), nullable=True),
sa.Column('population_health', sa.Boolean(), nullable=True),
sa.Column('drug_development', sa.Boolean(), nullable=True),
sa.Column('commercial_purpose', sa.Boolean(), nullable=True),
sa.Column('educational', sa.Boolean(), nullable=True),
sa.Column('other_purpose', sa.Boolean(), nullable=True),
sa.Column('resource', BlobUTF8(), nullable=False),
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workbench_institutional_affiliations',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('researcher_id', sa.Integer(), nullable=False),
sa.Column('institution', sa.String(length=250), nullable=True),
sa.Column('role', sa.String(length=80), nullable=True),
sa.Column('non_academic_affiliation', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['researcher_id'], ['workbench_researcher.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workbench_institutional_affiliations_history',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('researcher_id', sa.Integer(), nullable=False),
sa.Column('institution', sa.String(length=250), nullable=True),
sa.Column('role', sa.String(length=80), nullable=True),
sa.Column('non_academic_affiliation', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['researcher_id'], ['workbench_researcher_history.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workbench_workspace_user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('workspace_id', sa.Integer(), nullable=False),
sa.Column('researcher_Id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('role', Enum(WorkbenchWorkspaceUserRole), nullable=True),
sa.Column('status', Enum(WorkbenchWorkspaceStatus), nullable=True),
sa.ForeignKeyConstraint(['researcher_Id'], ['workbench_researcher.id'], ),
sa.ForeignKeyConstraint(['workspace_id'], ['workbench_workspace.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workbench_workspace_user_history',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('workspace_id', sa.Integer(), nullable=False),
sa.Column('researcher_Id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('role', Enum(WorkbenchWorkspaceUserRole), nullable=True),
sa.Column('status', Enum(WorkbenchWorkspaceStatus), nullable=True),
sa.ForeignKeyConstraint(['researcher_Id'], ['workbench_researcher_history.id'], ),
sa.ForeignKeyConstraint(['workspace_id'], ['workbench_workspace_history.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('workbench_workspace_user_history')
op.drop_table('workbench_workspace_user')
op.drop_table('workbench_institutional_affiliations_history')
op.drop_table('workbench_institutional_affiliations')
op.drop_table('workbench_workspace_history')
op.drop_table('workbench_workspace')
op.drop_table('workbench_researcher_history')
op.drop_table('workbench_researcher')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| {
"repo_name": "all-of-us/raw-data-repository",
"path": "rdr_service/alembic/versions/f38451075acc_add_workbench_tables.py",
"copies": "1",
"size": "10019",
"license": "bsd-3-clause",
"hash": 3321153244435922400,
"line_mean": 49.3467336683,
"line_max": 94,
"alpha_frac": 0.6917856073,
"autogenerated": false,
"ratio": 3.5365337098482175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9647999480554534,
"avg_score": 0.016063967318736473,
"num_lines": 199
} |
"""add work
Revision ID: 57fc5e85328
Revises: 1ed82ef0071
Create Date: 2014-08-08 00:16:41.428095
"""
from alembic import context, op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '57fc5e85328'
down_revision = '1ed82ef0071'
driver_name = context.get_bind().dialect.name
def upgrade():
op.create_table(
'works',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=False),
sa.Column('dop', sa.Date(), nullable=True),
sa.Column('team_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=False),
sa.ForeignKeyConstraint(['team_id'], ['teams.id']),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_works_title'), 'works', ['title'], unique=False)
def downgrade():
op.drop_index(op.f('ix_works_title'), table_name='works')
op.drop_table('works')
if driver_name == 'postgresql':
op.execute('DROP SEQUENCE works_id_seq')
| {
"repo_name": "clicheio/cliche",
"path": "cliche/migrations/versions/57fc5e85328_add_work.py",
"copies": "2",
"size": "1035",
"license": "mit",
"hash": -3725044222001228300,
"line_mean": 26.972972973,
"line_max": 77,
"alpha_frac": 0.6473429952,
"autogenerated": false,
"ratio": 3.2242990654205608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.487164206062056,
"avg_score": null,
"num_lines": null
} |
#add your custom command output handler class to this module
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultCommandOutputHandler:
def __init__(self,**args):
pass
def __call__(self, raw_cmd_output):
print_xml_stream(raw_cmd_output)
class GoGenHandler:
def __init__(self,**args):
self.index = args['index']
self.source = args['source']
self.sourcetype = args['sourcetype']
self.host = args['host']
def __call__(self,raw_cmd_output):
print "<stream><event><data>%s</data><source>%s</source><sourcetype>%s</sourcetype><index>%s</index><host>%s</host></event></stream>" % (encodeXMLText(raw_cmd_output),self.source,self.sourcetype,self.index,self.host)
class MyCommandOutputHandler:
def __init__(self,**args):
pass
def __call__(self,raw_cmd_output):
print_xml_stream("foobar")
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
| {
"repo_name": "damiendallimore/SplunkModularInputsPythonFramework",
"path": "implementations/command/bin/outputhandlers.py",
"copies": "1",
"size": "1399",
"license": "apache-2.0",
"hash": 3664474160835140600,
"line_mean": 30.0888888889,
"line_max": 231,
"alpha_frac": 0.583273767,
"autogenerated": false,
"ratio": 3.77088948787062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.485416325487062,
"avg_score": null,
"num_lines": null
} |
#add your custom response handler class to this module
import json
import datetime
import time
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self, **args):
pass
def __call__(self, response_object, raw_response_output, response_type, req_args, endpoint):
print_xml_stream(raw_response_output)
class MyCustomTeslaHandler:
def __init__(self, **args):
pass
def __call__(self, response_object, raw_response_output, response_type, req_args, endpoint):
req_args["data"] = 'What does the fox say'
print_xml_stream(raw_response_output)
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text
| {
"repo_name": "damiendallimore/SplunkModularInputsPythonFramework",
"path": "implementations/tesla/bin/responsehandlers.py",
"copies": "1",
"size": "1317",
"license": "apache-2.0",
"hash": -946670394053362300,
"line_mean": 31.1219512195,
"line_max": 104,
"alpha_frac": 0.5391040243,
"autogenerated": false,
"ratio": 4.248387096774193,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.031166410763940618,
"num_lines": 41
} |
#add your custom response handler class to this module
import json
import datetime
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
cookies = response_object.cookies
if cookies:
req_args["cookies"] = cookies
print_xml_stream(raw_response_output)
class MyResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
print_xml_stream("foobar")
class BoxEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
if not "params" in req_args:
req_args["params"] = {}
if "next_stream_position" in output:
req_args["params"]["stream_position"] = output["next_stream_position"]
for entry in output["entries"]:
print_xml_stream(json.dumps(entry))
else:
print_xml_stream(raw_response_output)
class QualysGuardActivityLog:
'''Response handler for QualysGuard activity log.'''
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if not "params" in req_args:
req_args["params"] = {}
date_from = (datetime.datetime.now() - datetime.timedelta(minutes=1)).strftime("%Y-%m-%dT%H:%M:%SZ")
req_args["params"]["date_from"] = date_from
print_xml_stream(raw_response_output)
class FourSquareCheckinsEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
for checkin in output["response"]["checkins"]["items"]:
print_xml_stream(json.dumps(checkin))
else:
print_xml_stream(raw_response_output)
class BugsenseErrorsEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
for error in output["data"]:
print_xml_stream(json.dumps(error))
else:
print_xml_stream(raw_response_output)
class MyCustomHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
req_args["data"] = 'What does the fox say'
print_xml_stream(raw_response_output)
class TwitterEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
last_tweet_indexed_id = 0
for twitter_event in output["statuses"]:
print_xml_stream(json.dumps(twitter_event))
if "id_str" in twitter_event:
tweet_id = twitter_event["id_str"]
if tweet_id > last_tweet_indexed_id:
last_tweet_indexed_id = tweet_id
if not "params" in req_args:
req_args["params"] = {}
req_args["params"]["since_id"] = last_tweet_indexed_id
else:
print_xml_stream(raw_response_output)
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text | {
"repo_name": "d4rt/SplunkModularInputsPythonFramework",
"path": "implementations/rest/bin/responsehandlers.py",
"copies": "1",
"size": "4680",
"license": "apache-2.0",
"hash": 9037620378930432000,
"line_mean": 32.9202898551,
"line_max": 108,
"alpha_frac": 0.5414529915,
"autogenerated": false,
"ratio": 4.189794091316025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0331167810472567,
"num_lines": 138
} |
# add your custom response handler class to this module
import json
import datetime
# the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
print_xml_stream(raw_response_output)
class OpenDNSErrorsEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
# perform any custom processing of the JSON response
for line in output:
print_xml_stream(json.dumps(line))
else:
print_xml_stream(raw_response_output)
class OpenDNSHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
# perform any custom processing of the JSON response
for line in output:
print_xml_stream(json.dumps(line))
else:
print_xml_stream(raw_response_output)
# prints XML stream
def print_xml_stream(s):
print '<stream><event unbroken="1"><data>%s</data><done/></event></stream>' % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace('&', '&')
text = text.replace('"', '"')
text = text.replace("'", ''')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace("\n", '')
return text
| {
"repo_name": "nnam/SplunkModularInputsPythonFramework",
"path": "implementations/opendns/bin/responsehandlers.py",
"copies": "1",
"size": "1736",
"license": "apache-2.0",
"hash": -7125338420206024000,
"line_mean": 30,
"line_max": 98,
"alpha_frac": 0.6209677419,
"autogenerated": false,
"ratio": 3.9365079365079363,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007834251980362038,
"num_lines": 56
} |
#add your custom response handler class to this module
import json
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
print_xml_stream(raw_response_output)
class MyResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
print_xml_stream("foobar")
class BoxEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
if not "params" in req_args:
req_args["params"] = {}
if "next_stream_position" in output:
req_args["params"]["stream_position"] = output["next_stream_position"]
for entry in output["entries"]:
print_xml_stream(json.dumps(entry))
else:
print_xml_stream(raw_response_output)
class FourSquareCheckinsEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
for checkin in output["response"]["checkins"]["items"]:
print_xml_stream(json.dumps(checkin))
else:
print_xml_stream(raw_response_output)
class TwitterEventHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,raw_response_output,response_type,req_args,endpoint):
if response_type == "json":
output = json.loads(raw_response_output)
for tweet in output["statuses"]:
print_xml_stream(json.dumps(tweet))
else:
print_xml_stream(raw_response_output)
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text | {
"repo_name": "Jaykul/SplunkModularInputsPythonFramework",
"path": "implementations/rest/bin/responsehandlers.py",
"copies": "1",
"size": "2720",
"license": "apache-2.0",
"hash": -4090032459048250000,
"line_mean": 33.0125,
"line_max": 100,
"alpha_frac": 0.5610294118,
"autogenerated": false,
"ratio": 4.121212121212121,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5182241533012121,
"avg_score": null,
"num_lines": null
} |
#add your custom response handler class to this module
import sys,json,csv,io,logging
from pysnmp.entity.rfc3413 import mibvar
# Initialize the root logger with a StreamHandler and a format message:
logging.basicConfig(level=logging.ERROR, format='%(levelname)s %(message)s')
#the default handler , does nothing , just passes the raw output directly to STDOUT
class DefaultResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,destination,table=False,from_trap=False,trap_metadata=None,split_bulk_output=False,mibView=None):
splunkevent =""
#handle traps
if from_trap:
for oid, val in response_object:
try:
(symName, modName), indices = mibvar.oidToMibName(mibView, oid)
splunkevent +='%s::%s.%s = ' % (modName, symName,'.'.join([ v.prettyPrint() for v in indices]))
except: # catch *all* exceptions
e = sys.exc_info()[1]
logging.error("Exception resolving MIB name in the caught trap: %s" % str(e))
splunkevent +='%s = ' % (oid)
try:
decodedVal = mibvar.cloneFromMibValue(mibView,modName,symName,val)
splunkevent +='%s ' % (decodedVal.prettyPrint())
except: # catch *all* exceptions
e = sys.exc_info()[1]
logging.error("Exception resolving MIB value in the caught trap: %s" % str(e))
splunkevent +='%s ' % (val.prettyPrint())
splunkevent = trap_metadata + splunkevent
print_xml_single_instance_mode(destination, splunkevent)
#handle tables
elif table:
for varBindTableRow in response_object:
for name, val in varBindTableRow:
output_element = '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
if split_bulk_output:
print_xml_single_instance_mode(destination, output_element)
else:
splunkevent += output_element
print_xml_single_instance_mode(destination, splunkevent)
#handle scalars
else:
for name, val in response_object:
splunkevent += '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
print_xml_single_instance_mode(destination, splunkevent)
#Like DefaultResponseHandler, but splits multiple OIDs pulled from a GET request (instead of GETBULK) into separate events.
class SplitNonBulkResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,destination,table=False,from_trap=False,trap_metadata=None,split_bulk_output=False,mibView=None):
splunkevent =""
#handle traps
if from_trap:
for oid, val in response_object:
try:
(symName, modName), indices = mibvar.oidToMibName(mibView, oid)
splunkevent +='%s::%s.%s = ' % (modName, symName,'.'.join([ v.prettyPrint() for v in indices]))
except: # catch *all* exceptions
e = sys.exc_info()[1]
splunkevent +='%s = ' % (oid)
try:
decodedVal = mibvar.cloneFromMibValue(mibView,modName,symName,val)
splunkevent +='%s ' % (decodedVal.prettyPrint())
except: # catch *all* exceptions
e = sys.exc_info()[1]
splunkevent +='%s ' % (val.prettyPrint())
splunkevent = trap_metadata + splunkevent
print_xml_single_instance_mode(destination, splunkevent)
#handle tables
elif table:
for varBindTableRow in response_object:
for name, val in varBindTableRow:
output_element = '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
if split_bulk_output:
print_xml_single_instance_mode(destination, output_element)
else:
splunkevent += output_element
print_xml_single_instance_mode(destination, splunkevent)
#handle scalars
else:
for name, val in response_object:
output_element = '%s = "%s" ' % (name.prettyPrint(), val.prettyPrint())
if split_bulk_output:
print_xml_single_instance_mode(destination, output_element)
else:
splunkevent += output_element
print_xml_single_instance_mode(destination, splunkevent)
class JSONFormatterResponseHandler:
def __init__(self,**args):
pass
def __call__(self, response_object,destination,table=False,from_trap=False,trap_metadata=None,split_bulk_output=False,mibView=None):
#handle tables
if table:
values = []
for varBindTableRow in response_object:
row = {}
for name, val in varBindTableRow:
row[name.prettyPrint()] = val.prettyPrint()
values.append(row)
print_xml_single_instance_mode(destination, json.dumps(values))
#handle scalars
else:
values = {}
for name, val in response_object:
values[name.prettyPrint()] = val.prettyPrint()
print_xml_single_instance_mode(destination, json.dumps(values))
# prints XML stream
def print_xml_single_instance_mode(server, event):
print "<stream><event><data>%s</data><host>%s</host></event></stream>" % (
encodeXMLText(event), server)
# prints XML stream
def print_xml_multi_instance_mode(server, event, stanza):
print "<stream><event stanza=""%s""><data>%s</data><host>%s</host></event></stream>" % (
stanza, encodeXMLText(event), server)
# prints simple stream
def print_simple(s):
print "%s\n" % s
#HELPER FUNCTIONS
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text | {
"repo_name": "damiendallimore/SplunkModularInputsPythonFramework",
"path": "implementations/snmp/bin/responsehandlers.py",
"copies": "1",
"size": "6702",
"license": "apache-2.0",
"hash": 1168701453793013500,
"line_mean": 42.2451612903,
"line_max": 144,
"alpha_frac": 0.5459564309,
"autogenerated": false,
"ratio": 4.086585365853659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9954030677547809,
"avg_score": 0.03570222384116982,
"num_lines": 155
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.