function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def add_collaborator_to_license_view(
self, repo, collaborator, view, db_privileges=[]):
# check that all repo names, usernames, and privileges passed aren't
# sql injections
self._check_for_injections(repo)
self._check_for_injections(collaborator)
for privilege in db_privileges:
self._check_for_injections(privilege)
query = ('BEGIN;'
'GRANT %s ON %s.%s TO %s;'
'ALTER DEFAULT PRIVILEGES IN SCHEMA %s '
'GRANT %s ON %s.%s TO %s;'
'COMMIT;'
)
privileges_str = ', '.join(db_privileges)
params = [
privileges_str, repo, view,
collaborator, repo, privileges_str,
repo, view, collaborator]
params = tuple(map(lambda x: AsIs(x), params))
res = self.execute_sql(query, params)
return res['status'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_license_view(self, repo_base, repo,
table, view_sql, license_id):
view_name = table.lower() + "_license_view_"+str(license_id)
res = self.create_view(repo, view_name, view_sql)
return res | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def get_view_sql(self, repo_base, repo, table, view_params, license_id):
# create view based on license
license = LicenseManager.find_license_by_id(license_id)
pii_def = license.pii_def
if license.pii_removed:
# remove columns
query = ('SELECT column_name FROM information_schema.columns '
'WHERE table_schema = %s'
'AND table_name = %s'
)
params = (repo, table)
res = self.execute_sql(query, params)
columns = [t[0] for t in res['tuples']]
all_columns = set(columns)
removed_columns = set(view_params['removed-columns'])
columns_to_show = list(all_columns - removed_columns)
# if columns_to_show < 1:
# #error
# pass
query = 'SELECT {} FROM {}.{}'
columns_query = ""
for i in range(len(columns_to_show)):
columns_query += columns_to_show[i]
if i < len(columns_to_show) - 1:
columns_query += ","
query = query.format(columns_query, repo, table)
return query | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def list_tables(self, repo):
self._check_for_injections(repo)
all_repos = self.list_repos()
if repo not in all_repos:
raise LookupError('Invalid repository name: %s' % (repo))
query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = %s AND table_type = \'BASE TABLE\';'
)
params = (repo,)
res = self.execute_sql(query, params)
return [t[0] for t in res['tuples']] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def list_table_permissions(self, repo, table):
query = ("select privilege_type from "
"information_schema.role_table_grants where table_schema=%s "
"and table_name=%s and grantee=%s")
params = (repo, table, self.user)
res = self.execute_sql(query, params)
return res['tuples'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def list_views(self, repo):
self._check_for_injections(repo)
all_repos = self.list_repos()
if repo not in all_repos:
raise LookupError('Invalid repository name: %s' % (repo))
query = ('SELECT table_name FROM information_schema.tables '
'WHERE table_schema = %s '
'AND table_type = \'VIEW\';')
params = (repo,)
res = self.execute_sql(query, params)
return [t[0] for t in res['tuples']] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def describe_view(self, repo, view, detail=False):
query = ("SELECT %s "
"FROM information_schema.columns "
"WHERE table_schema = %s and table_name = %s;")
params = None
if detail:
params = (AsIs('*'), repo, view)
else:
params = (AsIs('column_name, data_type'), repo, view)
res = self.execute_sql(query, params)
return res['tuples'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def clone_table(self, repo, table, new_table):
self._validate_table_name(table)
self._validate_table_name(new_table)
query = 'CREATE TABLE %s.%s AS SELECT * FROM %s.%s'
params = (AsIs(repo), AsIs(new_table), AsIs(repo), AsIs(table))
res = self.execute_sql(query, params)
return res['status'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def explain_query(self, query):
"""
returns the number of rows, the cost (in time) to execute,
and the width (bytes) of rows outputted
"""
# if it's a select query, return a different set of defaults
select_query = bool((query.split()[0]).lower() == 'select')
if not select_query:
response = {'num_rows': 1, 'time_cost': 0, 'byte_width': 0}
return response
query = 'EXPLAIN %s' % (query)
res = self.execute_sql(query)
num_rows = re.match(r'.*rows=(\d+).*', res['tuples'][0][0]).group(1)
byte_width = re.match(r'.*width=(\d+).*', res['tuples'][0][0]).group(1)
time_cost_re = re.match(
r'.*cost=(\d+.\d+)..(\d+.\d+)*', res['tuples'][0][0])
time_cost = (float(time_cost_re.group(1)),
float(time_cost_re.group(2)))
response = {'num_rows': int(num_rows),
'time_cost': time_cost,
'byte_width': int(byte_width)
}
return response | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def select_table_query(self, repo_base, repo, table):
dh_table_name = '%s.%s.%s' % (repo_base, repo, table)
query = 'SELECT * FROM %s;' % (dh_table_name)
return query | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def user_exists(self, username):
query = "SELECT 1 FROM pg_roles WHERE rolname=%s"
params = (username,)
result = self.execute_sql(query, params)
return (result['row_count'] > 0) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_user(self, username, password, create_db=True):
self._check_for_injections(username)
query = ('CREATE ROLE %s WITH LOGIN '
'NOCREATEDB NOCREATEROLE NOCREATEUSER PASSWORD %s')
params = (AsIs(username), password)
self.execute_sql(query, params)
# Don't do this in the case of the public user.
if username != settings.PUBLIC_ROLE:
query = ('GRANT %s to %s')
params = (AsIs(settings.PUBLIC_ROLE), AsIs(username))
self.execute_sql(query, params)
if create_db:
return self.create_user_database(username) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def remove_user(self, username):
self._check_for_injections(username)
query = 'DROP ROLE %s;'
params = (AsIs(username),)
return self.execute_sql(query, params) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def list_all_users(self):
query = 'SELECT usename FROM pg_catalog.pg_user WHERE usename != %s'
params = (self.user,)
res = self.execute_sql(query, params)
user_tuples = res['tuples']
all_users_list = []
for user_tuple in user_tuples:
all_users_list.append(user_tuple[0])
return all_users_list | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def remove_database(self, database, revoke_collaborators=True):
self._check_for_injections(database)
# remove collaborator access to the database
if revoke_collaborators:
all_users = self.list_all_users()
for user in all_users:
query = "REVOKE ALL ON DATABASE %s FROM %s;"
params = (AsIs(database), AsIs(user))
self.execute_sql(query, params)
# Make sure to close all extant connections to this database or the
# drop will fail.
_close_all_connections(database)
# drop database
query = 'DROP DATABASE %s;'
params = (AsIs(database),)
try:
return self.execute_sql(query, params)
except psycopg2.ProgrammingError as e:
print(e)
print('this probably happened because the postgres role'
'exists, but a database of the same name does not.') | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def list_collaborators(self, repo):
query = 'SELECT unnest(nspacl) FROM pg_namespace WHERE nspname=%s;'
params = (repo, )
res = self.execute_sql(query, params)
# postgres privileges
# r -- SELECT ("read")
# w -- UPDATE ("write")
# a -- INSERT ("append")
# d -- DELETE
# D -- TRUNCATE
# x -- REFERENCES
# t -- TRIGGER
# X -- EXECUTE
# U -- USAGE
# C -- CREATE
# c -- CONNECT
# T -- TEMPORARY
# arwdDxt -- ALL PRIVILEGES (for tables, varies for other objects)
# * -- grant option for preceding privilege
# /yyyy -- role that granted this privilege
collaborators = []
for row in res['tuples']:
# for reference, rows look like this:
# ('username=UC/repo_base',)
collab_obj = {}
username = row[0].split('=')[0].strip()
permissions = row[0].split('=')[1].split('/')[0]
collab_obj['username'] = username
collab_obj['db_permissions'] = permissions
collaborators.append(collab_obj)
return collaborators | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def has_repo_db_privilege(self, login, repo, privilege):
"""
returns True or False for whether the use has privileges for the
repo (schema)
"""
query = 'SELECT has_schema_privilege(%s, %s, %s);'
params = (login, repo, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def has_column_privilege(self, login, table, column, privilege):
query = 'SELECT has_column_privilege(%s, %s, %s, %s);'
params = (login, table, column, privilege)
res = self.execute_sql(query, params)
return res['tuples'][0][0] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def export_view(self, view_name, file_path, file_format='CSV',
delimiter=',', header=True):
words = view_name.split('.')
for word in words[:-1]:
self._check_for_injections(word)
self._validate_table_name(words[-1])
self._check_for_injections(file_format)
query = 'SELECT * FROM %s' % view_name
self.export_query(
query,
file_path,
file_format=file_format,
delimiter=delimiter,
header=header) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def import_file(self, table_name, file_path, file_format='CSV',
delimiter=',', header=True, encoding='ISO-8859-1',
quote_character='"'):
header_option = 'HEADER' if header else ''
words = table_name.split('.')
for word in words[:-1]:
self._check_for_injections(word)
self._validate_table_name(words[-1])
self._check_for_injections(file_format)
query = 'COPY %s FROM %s WITH %s %s DELIMITER %s ENCODING %s QUOTE %s;'
params = (AsIs(table_name), file_path, AsIs(file_format),
AsIs(header_option), delimiter, encoding, quote_character)
try:
self.execute_sql(query, params)
except Exception as e:
self.execute_sql('DROP TABLE IF EXISTS %s', (AsIs(table_name),))
raise ImportError(e)
# Try importing using dbtruck. Was never enabled by anant.
# RogerTangos 2015-12-09
# return self.import_file_w_dbtruck(table_name, file_path) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def import_file_w_dbtruck(self, table_name, file_path):
# dbtruck is not tested for safety. At all. It's currently disabled
# in the project RogerTangos 2015-12-09
from dbtruck.dbtruck import import_datafiles
# from dbtruck.util import get_logger
from dbtruck.exporters.pg import PGMethods
dbsettings = {
'dbname': self.repo_base,
'hostname': self.host,
'username': self.user,
'password': self.password,
'port': self.port,
}
create_new = True
errfile = None
return import_datafiles([file_path], create_new, table_name, errfile,
PGMethods, **dbsettings) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_license_schema(self):
public_role = settings.PUBLIC_ROLE
schema = settings.LICENSE_SCHEMA
self._check_for_injections(public_role)
self._check_for_injections(schema)
query = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s'
params = (AsIs(schema), AsIs(public_role))
return self.execute_sql(query, params) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_license_link_table(self):
schema = settings.LICENSE_LINK_SCHEMA
table = settings.LICENSE_LINK_TABLE
public_role = settings.PUBLIC_ROLE
self._check_for_injections(schema)
self._validate_table_name(table)
self._check_for_injections(public_role)
query = ('CREATE TABLE IF NOT EXISTS %s.%s '
'(license_link_id serial primary key,'
'repo_base VARCHAR(40) NOT NULL,'
'repo VARCHAR(40) NOT NULL,'
'license_id integer NOT NULL);')
params = (AsIs(schema), AsIs(table))
self.execute_sql(query, params)
query = ('GRANT ALL ON %s.%s to %s;')
params = (AsIs(schema), AsIs(table), AsIs(public_role))
return self.execute_sql(query, params) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_license_link(self, repo_base, repo, license_id):
'''
Creates a new license
'''
# check if link already exists
query = ('SELECT license_link_id, repo_base, repo, license_id '
'FROM %s.%s where '
'repo_base = %s and repo = %s and license_id = %s;')
params = (
AsIs(settings.LICENSE_SCHEMA),
AsIs(settings.LICENSE_LINK_TABLE),
repo_base, repo, license_id)
res = self.execute_sql(query, params)
if res['tuples']:
return res['status']
query = (
'INSERT INTO dh_public.license_link '
'(repo_base, repo, license_id) '
'values (%s, %s, %s)')
params = (repo_base, repo, license_id)
res = self.execute_sql(query, params)
return res['status'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def find_license_links_by_repo(self, repo_base, repo):
query = ('SELECT license_link_id, repo_base, repo, license_id '
'FROM %s.%s where repo_base = %s and repo = %s;')
params = (
AsIs(settings.LICENSE_SCHEMA),
AsIs(settings.LICENSE_LINK_TABLE),
repo_base, repo)
res = self.execute_sql(query, params)
if not res['tuples']:
return []
return res['tuples'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def find_license_by_id(self, license_id):
query = (
'SELECT license_id, license_name, pii_def, '
'pii_anonymized, pii_removed '
'FROM %s.%s where license_id= %s;')
params = (
AsIs(settings.LICENSE_SCHEMA),
AsIs(settings.LICENSE_TABLE),
license_id)
res = self.execute_sql(query, params)
# return None if the list is empty
if not res['tuples']:
return None
# else, return the policy
return res['tuples'][0] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_security_policy_schema(self):
public_role = settings.PUBLIC_ROLE
schema = settings.POLICY_SCHEMA
self._check_for_injections(public_role)
self._check_for_injections(schema)
query = 'CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s'
params = (AsIs(schema), AsIs(public_role))
return self.execute_sql(query, params) | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def create_security_policy(self, policy, policy_type, grantee, grantor,
repo_base, repo, table):
'''
Creates a new security policy in the policy table if the policy
does not yet exist.
'''
# disallow semicolons in policy. This helps prevent the policy creator
# from shooting themself in the foot with an attempted sql injection.
# Note that we don't actually _need_ to do this. The parameters are all
# escaped in RLS methods executed by the superuser, so there's not a
# really a risk of a user acquiring root access.
if ';' in policy:
raise ValueError("\';'s are disallowed in the policy field")
query = ('INSERT INTO dh_public.policy (policy, policy_type, grantee, '
'grantor, table_name, repo, repo_base) values '
'(%s, %s, %s, %s, %s, %s, %s)')
params = (policy, policy_type, grantee, grantor, table, repo,
repo_base)
res = self.execute_sql(query, params)
return res['status'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def find_security_policies(self, repo_base, repo=None, table=None,
policy_id=None, policy=None, policy_type=None,
grantee=None, grantor=None):
'''
Returns a list of all security polices that match the inputs specied
by the user.
'''
query = ('SELECT policy_id, policy, policy_type, grantee, grantor '
'repo_base, repo, table_name '
'FROM %s.%s WHERE ')
params = [AsIs(settings.POLICY_SCHEMA), AsIs(settings.POLICY_TABLE)]
conditions = []
# append mandatory passed-in conditions
conditions.append('repo_base = %s')
params.append(repo_base)
# append optional conditions
if repo:
conditions.append('repo = %s')
params.append(repo)
if table:
conditions.append('table_name = %s')
params.append(table)
if policy_id:
conditions.append('policy_id = %s')
params.append(policy_id)
if policy:
conditions.append('policy = %s')
params.append(policy)
if policy_type:
conditions.append('policy_type = %s')
params.append(policy_type)
if grantee:
conditions.append('grantee = %s')
params.append(grantee)
if grantor:
conditions.append('grantor = %s')
params.append(grantor)
conditions = " and ".join(conditions)
params = tuple(params)
query += conditions
res = self.execute_sql(query, params)
return res['tuples'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def update_security_policy(self, policy_id, new_policy, new_policy_type,
new_grantee):
'''
Updates an existing security policy based on the inputs specified
by the user.
'''
query = ('UPDATE dh_public.policy '
'SET policy = %s, policy_type = %s, '
'grantee = %s '
'WHERE policy_id = %s')
params = (new_policy, new_policy_type, new_grantee, policy_id)
res = self.execute_sql(query, params)
return res['status'] | datahuborg/datahub | [
210,
60,
210,
43,
1380229308
] |
def __init__(self, id, type, file_id):
if id is None:
self.id = DBFileType.id_counter
DBFileType.id_counter += 1
else:
self.id = id
self.type = to_utf8(type)
self.file_id = file_id | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def __init__(self):
self.db = None | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def __create_table(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, SqliteDatabase):
import sqlite3.dbapi2 | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def __create_indices(self, cnn):
cursor = cnn.cursor()
if isinstance(self.db, MysqlDatabase):
import MySQLdb | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def __get_files_for_repository(self, repo_id, cursor):
query = "SELECT ft.file_id from file_types ft, files f " + \
"WHERE f.id = ft.file_id and f.repository_id = ?"
cursor.execute(statement(query, self.db.place_holder), (repo_id,))
files = [res[0] for res in cursor.fetchall()]
return files | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def run(self, repo, uri, db):
self.db = db
path = uri_to_filename(uri)
if path is not None:
repo_uri = repo.get_uri_for_path(path)
else:
repo_uri = uri | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def backout(self, repo, uri, db):
update_statement = """delete from file_types where
file_id in (select id from files f
where f.repository_id = ?)"""
self._do_backout(repo, uri, db, update_statement) | SoftwareIntrospectionLab/MininGit | [
15,
17,
15,
14,
1286235438
] |
def __init__(self, file):
core.AVContainer.__init__(self)
self.sequence_header_offset = 0
self.mpeg_version = 2
self.get_time = None
self.audio = []
self.video = []
self.start = None
self.__seek_size__ = None
self.__sample_size__ = None
self.__search__ = None
self.filename = None
self.length = None
self.audio_ok = None
# detect TS (fast scan)
if not self.isTS(file):
# detect system mpeg (many infos)
if not self.isMPEG(file):
# detect PES
if not self.isPES(file):
# Maybe it's MPEG-ES
if self.isES(file):
# If isES() succeeds, we needn't do anything further.
return
if file.name.lower().endswith('mpeg') or \
file.name.lower().endswith('mpg'):
# This has to be an mpeg file. It could be a bad
# recording from an ivtv based hardware encoder with
# same bytes missing at the beginning.
# Do some more digging...
if not self.isMPEG(file, force=True) or \
not self.video or not self.audio:
# does not look like an mpeg at all
raise ParseError()
else:
# no mpeg at all
raise ParseError()
self.mime = 'video/mpeg'
if not self.video:
self.video.append(core.VideoStream())
if self.sequence_header_offset <= 0:
return
self.progressive(file)
for vi in self.video:
vi.width, vi.height = self.dxy(file)
vi.fps, vi.aspect = self.framerate_aspect(file)
vi.bitrate = self.bitrate(file)
if self.length:
vi.length = self.length
if not self.type:
self.type = 'MPEG Video'
# set fourcc codec for video and audio
vc, ac = 'MP2V', 'MP2A'
if self.mpeg_version == 1:
vc, ac = 'MPEG', 0x0050
for v in self.video:
v.codec = vc
for a in self.audio:
if not a.codec:
a.codec = ac | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def framerate_aspect(self, file):
"""
read framerate and aspect ratio
"""
file.seek(self.sequence_header_offset + 7, 0)
v = struct.unpack('>B', file.read(1))[0]
try:
fps = FRAME_RATE[v & 0xf]
except IndexError:
fps = None
if v >> 4 < len(ASPECT_RATIO):
aspect = ASPECT_RATIO[v >> 4]
else:
aspect = None
return fps, aspect | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def bitrate(self, file):
"""
read the bitrate (most of the time broken)
"""
file.seek(self.sequence_header_offset + 8, 0)
t, b = struct.unpack('>HB', file.read(3))
vrate = t << 2 | b >> 6
return vrate * 400 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def ReadSCRMpeg2(buffer):
"""
read SCR (timestamp) for MPEG2 at the buffer beginning (6 Bytes)
"""
if len(buffer) < 6:
return None
highbit = (byte2int(buffer) & 0x20) >> 5
low4Bytes = ((int(byte2int(buffer)) & 0x18) >> 3) << 30
low4Bytes |= (byte2int(buffer) & 0x03) << 28
low4Bytes |= indexbytes(buffer, 1) << 20
low4Bytes |= (indexbytes(buffer, 2) & 0xF8) << 12
low4Bytes |= (indexbytes(buffer, 2) & 0x03) << 13
low4Bytes |= indexbytes(buffer, 3) << 5
low4Bytes |= (indexbytes(buffer, 4)) >> 3
sys_clock_ref = (indexbytes(buffer, 4) & 0x3) << 7
sys_clock_ref |= (indexbytes(buffer, 5) >> 1)
return (int(highbit * (1 << 16) * (1 << 16)) + low4Bytes) / 90000 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def ReadSCRMpeg1(buffer):
"""
read SCR (timestamp) for MPEG1 at the buffer beginning (5 Bytes)
"""
if len(buffer) < 5:
return None
highbit = (byte2int(buffer) >> 3) & 0x01
low4Bytes = ((int(byte2int(buffer)) >> 1) & 0x03) << 30
low4Bytes |= indexbytes(buffer, 1) << 22
low4Bytes |= (indexbytes(buffer, 2) >> 1) << 15
low4Bytes |= indexbytes(buffer, 3) << 7
low4Bytes |= indexbytes(buffer, 4) >> 1
return (int(highbit) * (1 << 16) * (1 << 16) + low4Bytes) / 90000 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def ReadPTS(buffer):
"""
read PTS (PES timestamp) at the buffer beginning (5 Bytes)
"""
high = ((byte2int(buffer) & 0xF) >> 1)
med = (indexbytes(buffer, 1) << 7) + (indexbytes(buffer, 2) >> 1)
low = (indexbytes(buffer, 3) << 7) + (indexbytes(buffer, 4) >> 1)
return ((int(high) << 30) + (med << 15) + low) / 90000 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def isMPEG(self, file, force=False):
"""
This MPEG starts with a sequence of 0x00 followed by a PACK Header
http://dvd.sourceforge.net/dvdinfo/packhdr.html
"""
file.seek(0, 0)
buffer = file.read(10000)
offset = 0
# seek until the 0 byte stop
while offset < len(buffer) - 100 and buffer[offset] == '\0':
offset += 1
offset -= 2
# test for mpeg header 0x00 0x00 0x01
header = '\x00\x00\x01%s' % chr(PACK_PKT)
if offset < 0 or not buffer[offset:offset + 4] == header:
if not force:
return 0
# brute force and try to find the pack header in the first
# 10000 bytes somehow
offset = buffer.find(header)
if offset < 0:
return 0
# scan the 100000 bytes of data
buffer += file.read(100000)
# scan first header, to get basic info about
# how to read a timestamp
self.ReadHeader(buffer, offset)
# store first timestamp
self.start = self.get_time(buffer[offset + 4:])
while len(buffer) > offset + 1000 and \
buffer[offset:offset + 3] == '\x00\x00\x01':
# read the mpeg header
new_offset = self.ReadHeader(buffer, offset)
# header scanning detected error, this is no mpeg
if new_offset is None:
return 0
if new_offset:
# we have a new offset
offset = new_offset
# skip padding 0 before a new header
while len(buffer) > offset + 10 and \
not indexbytes(buffer, offset + 2):
offset += 1
else:
# seek to new header by brute force
offset += buffer[offset + 4:].find('\x00\x00\x01') + 4
# fill in values for support functions:
self.__seek_size__ = 1000000
self.__sample_size__ = 10000
self.__search__ = self._find_timer_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _find_timer_(buffer):
"""
Return position of timer in buffer or None if not found.
This function is valid for 'normal' mpeg files
"""
pos = buffer.find('\x00\x00\x01%s' % chr(PACK_PKT))
if pos == -1:
return None
return pos + 4 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def ReadPESHeader(self, offset, buffer, id=0):
"""
Parse a PES header.
Since it starts with 0x00 0x00 0x01 like 'normal' mpegs, this
function will return (0, None) when it is no PES header or
(packet length, timestamp position (maybe None))
http://dvd.sourceforge.net/dvdinfo/pes-hdr.html
"""
if not buffer[0:3] == '\x00\x00\x01':
return 0, None
packet_length = (indexbytes(buffer, 4) << 8) + indexbytes(buffer, 5) + 6
align = indexbytes(buffer, 6) & 4
header_length = indexbytes(buffer, 8)
# PES ID (starting with 001)
if indexbytes(buffer, 3) & 0xE0 == 0xC0:
id = id or indexbytes(buffer, 3) & 0x1F
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
elif indexbytes(buffer, 3) & 0xF0 == 0xE0:
id = id or indexbytes(buffer, 3) & 0xF
for v in self.video:
if v.id == id:
break
else:
self.video.append(core.VideoStream())
self.video[-1]._set('id', id)
# new mpeg starting
if buffer[header_length + 9:header_length + 13] == \
'\x00\x00\x01\xB3' and not self.sequence_header_offset:
# yes, remember offset for later use
self.sequence_header_offset = offset + header_length + 9
elif indexbytes(buffer, 3) == 189 or indexbytes(buffer, 3) == 191:
# private stream. we don't know, but maybe we can guess later
id = id or indexbytes(buffer, 3) & 0xF
if align and \
buffer[header_length + 9:header_length + 11] == '\x0b\x77':
# AC3 stream
for a in self.audio:
if a.id == id:
break
else:
self.audio.append(core.AudioStream())
self.audio[-1]._set('id', id)
self.audio[-1].codec = 0x2000 # AC3
else:
# unknown content
pass
ptsdts = indexbytes(buffer, 7) >> 6
if ptsdts and ptsdts == indexbytes(buffer, 9) >> 4:
if indexbytes(buffer, 9) >> 4 != ptsdts:
log.warning(u'WARNING: bad PTS/DTS, please contact us')
return packet_length, None
# timestamp = self.ReadPTS(buffer[9:14])
high = ((indexbytes(buffer, 9) & 0xF) >> 1)
med = (indexbytes(buffer, 10) << 7) + (indexbytes(buffer, 11) >> 1)
low = (indexbytes(buffer, 12) << 7) + (indexbytes(buffer, 13) >> 1)
return packet_length, 9
return packet_length, None | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _find_timer_PES_(self, buffer):
"""
Return position of timer in buffer or -1 if not found.
This function is valid for PES files
"""
pos = buffer.find('\x00\x00\x01')
offset = 0
if pos == -1 or offset + 1000 >= len(buffer):
return None
retpos = -1
ackcount = 0
while offset + 1000 < len(buffer):
pos, timestamp = self.ReadPESHeader(offset, buffer[offset:])
if timestamp is not None and retpos == -1:
retpos = offset + timestamp
if pos == 0:
# Oops, that was a mpeg header, no PES header
offset += buffer[offset:].find('\x00\x00\x01')
retpos = -1
ackcount = 0
else:
offset += pos
if retpos != -1:
ackcount += 1
if ackcount > 10:
# looks ok to me
return retpos
return None | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def isES(self, file):
file.seek(0, 0)
try:
header = struct.unpack('>LL', file.read(8))
except (struct.error, IOError):
return False
if header[0] != 0x1B3:
return False
# Is an mpeg video elementary stream
self.mime = 'video/mpeg'
video = core.VideoStream()
video.width = header[1] >> 20
video.height = (header[1] >> 8) & 0xfff
if header[1] & 0xf < len(FRAME_RATE):
video.fps = FRAME_RATE[header[1] & 0xf]
if (header[1] >> 4) & 0xf < len(ASPECT_RATIO):
# FIXME: Empirically the aspect looks like PAR rather than DAR
video.aspect = ASPECT_RATIO[(header[1] >> 4) & 0xf]
self.video.append(video)
return True | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def isTS(self, file):
file.seek(0, 0)
buffer = file.read(TS_PACKET_LENGTH * 2)
c = 0
while c + TS_PACKET_LENGTH < len(buffer):
if indexbytes(buffer, c) == indexbytes(buffer, c + TS_PACKET_LENGTH) == TS_SYNC:
break
c += 1
else:
return 0
buffer += file.read(10000)
self.type = 'MPEG-TS'
while c + TS_PACKET_LENGTH < len(buffer):
start = indexbytes(buffer, c + 1) & 0x40
# maybe load more into the buffer
if c + 2 * TS_PACKET_LENGTH > len(buffer) and c < 500000:
buffer += file.read(10000)
# wait until the ts payload contains a payload header
if not start:
c += TS_PACKET_LENGTH
continue
tsid = ((indexbytes(buffer, c + 1) & 0x3F) << 8) + indexbytes(buffer, c + 2)
adapt = (indexbytes(buffer, c + 3) & 0x30) >> 4
offset = 4
if adapt & 0x02:
# meta info present, skip it for now
adapt_len = indexbytes(buffer, c + offset)
offset += adapt_len + 1
if not indexbytes(buffer, c + 1) & 0x40:
# no new pes or psi in stream payload starting
pass
elif adapt & 0x01:
# PES
timestamp = self.ReadPESHeader(c + offset, buffer[c + offset:],
tsid)[1]
if timestamp is not None:
if not hasattr(self, 'start'):
self.get_time = self.ReadPTS
timestamp = c + offset + timestamp
self.start = self.get_time(buffer[timestamp:timestamp + 5])
elif not hasattr(self, 'audio_ok'):
timestamp = c + offset + timestamp
start = self.get_time(buffer[timestamp:timestamp + 5])
if start is not None and self.start is not None and \
abs(start - self.start) < 10:
# looks ok
self.audio_ok = True
else:
# timestamp broken
del self.start
log.warning(u'Timestamp error, correcting')
if hasattr(self, 'start') and self.start and \
self.sequence_header_offset and self.video and self.audio:
break
c += TS_PACKET_LENGTH
if not self.sequence_header_offset:
return 0
# fill in values for support functions:
self.__seek_size__ = 10000000 # 10 MB
self.__sample_size__ = 100000 # 100 k scanning
self.__search__ = self._find_timer_TS_
self.filename = file.name
# get length of the file
self.length = self.get_length()
return 1 | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def get_endpos(self):
"""
get the last timestamp of the mpeg, return -1 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return None
length = os.stat(self.filename)[stat.ST_SIZE]
if length < self.__sample_size__:
return
file = open(self.filename)
file.seek(length - self.__sample_size__)
buffer = file.read(self.__sample_size__)
end = None
while 1:
pos = self.__search__(buffer)
if pos is None:
break
end = self.get_time(buffer[pos:]) or end
buffer = buffer[pos + 100:]
file.close()
return end | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def seek(self, end_time):
"""
Return the byte position in the file where the time position
is 'pos' seconds. Return 0 if this is not possible
"""
if not hasattr(self, 'filename') or not hasattr(self, 'start'):
return 0
file = open(self.filename)
seek_to = 0
while 1:
file.seek(self.__seek_size__, 1)
buffer = file.read(self.__sample_size__)
if len(buffer) < 10000:
break
pos = self.__search__(buffer)
if pos is not None:
# found something
nt = self.get_time(buffer[pos:])
if nt is not None and nt >= end_time:
# too much, break
break
# that wasn't enough
seek_to = file.tell()
file.close()
return seek_to | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def detect_version():
"""Emit GIPS' software version. May be overridden for testing purposes.
To override version.py, put a desired version string in the environment
variable GIPS_OVERRIDE_VERSION."""
return os.environ.get('GIPS_OVERRIDE_VERSION', version.__version__) | Applied-GeoSolutions/gips | [
17,
5,
17,
97,
1441029399
] |
def main():
G = nx.DiGraph() # G eh um grafo direcionado
# gera o grafo apartir de suas arestas
G.add_weighted_edges_from([(1,2,2.0),(1,3,1.0),(2,3,3.0),(2,4,3.0),(3,5,1.0),(4,6,2.0),(5,4,2.0),(5,6,5.0)])
for i in G.edges():
# print i[0], i[1]
G[i[0]][i[1]]["color"] = "black"
# G[1][2]["color"] = "red"
maiorCaminho = nx.dag_longest_path(G)
print maiorCaminho
for i in range(1, len(maiorCaminho)):
G[maiorCaminho[i-1]][maiorCaminho[i]]["color"] = "red"
desenhaGrafo(G, "grafo-3.png") | caioau/caioau-personal | [
9,
1,
9,
5,
1460423339
] |
def __init__(self, rpc_error):
super(JSONRPCException, self).__init__('msg: %r code: %r' %
(rpc_error['message'], rpc_error['code']))
self.error = rpc_error | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def __init__(self, service_url=None,
service_port=None,
btc_conf_file=None,
timeout=HTTP_TIMEOUT,
_connection=None):
"""Low-level JSON-RPC proxy
Unlike Proxy no conversion is done from the raw JSON objects.
"""
if service_url is None:
# Figure out the path to the bitcoin.conf file
if btc_conf_file is None:
if platform.system() == 'Darwin':
btc_conf_file = os.path.expanduser('~/Library/Application Support/Bitcoin/')
elif platform.system() == 'Windows':
btc_conf_file = os.path.join(os.environ['APPDATA'], 'Bitcoin')
else:
btc_conf_file = os.path.expanduser('~/.bitcoin')
btc_conf_file = os.path.join(btc_conf_file, 'bitcoin.conf')
# Extract contents of bitcoin.conf to build service_url
with open(btc_conf_file, 'r') as fd:
conf = {}
for line in fd.readlines():
if '#' in line:
line = line[:line.index('#')]
if '=' not in line:
continue
k, v = line.split('=', 1)
conf[k.strip()] = v.strip()
if service_port is None:
service_port = bitcoin.params.RPC_PORT
conf['rpcport'] = int(conf.get('rpcport', service_port))
conf['rpcssl'] = conf.get('rpcssl', '0')
if conf['rpcssl'].lower() in ('0', 'false'):
conf['rpcssl'] = False
elif conf['rpcssl'].lower() in ('1', 'true'):
conf['rpcssl'] = True
else:
raise ValueError('Unknown rpcssl value %r' % conf['rpcssl'])
service_url = ('%s://%s:%s@localhost:%d' %
('https' if conf['rpcssl'] else 'http',
conf['rpcuser'], conf['rpcpassword'],
conf['rpcport']))
self.__service_url = service_url
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
self.__id_count = 0
authpair = "%s:%s" % (self.__url.username, self.__url.password)
authpair = authpair.encode('utf8')
self.__auth_header = b"Basic " + base64.b64encode(authpair)
if _connection:
# Callables re-use the connection of the original proxy
self.__conn = _connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port=port,
key_file=None, cert_file=None,
timeout=timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port=port,
timeout=timeout) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
f = lambda *args: self._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
return json.loads(http_response.read().decode('utf8'),
parse_float=decimal.Decimal) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def __init__(self, service_url=None,
service_port=None,
btc_conf_file=None,
timeout=HTTP_TIMEOUT,
**kwargs):
"""Create a proxy to a bitcoin RPC service
Unlike RawProxy data is passed as objects, rather than JSON. (not yet
fully implemented) Assumes Bitcoin Core version >= 0.9; older versions
mostly work, but there are a few incompatibilities.
If service_url is not specified the username and password are read out
of the file btc_conf_file. If btc_conf_file is not specified
~/.bitcoin/bitcoin.conf or equivalent is used by default. The default
port is set according to the chain parameters in use: mainnet, testnet,
or regtest.
Usually no arguments to Proxy() are needed; the local bitcoind will be
used.
timeout - timeout in seconds before the HTTP interface times out
"""
super(Proxy, self).__init__(service_url=service_url, service_port=service_port, btc_conf_file=btc_conf_file,
timeout=HTTP_TIMEOUT,
**kwargs) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def getbalance(self, account='*', minconf=1):
"""Get the balance
account - The selected account. Defaults to "*" for entire wallet. It may be the default account using "".
minconf - Only include transactions confirmed at least this many times. (default=1)
"""
r = self._call('getbalance', account, minconf)
return int(r*COIN) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def getblockhash(self, height):
"""Return hash of block in best-block-chain at height.
Raises IndexError if height is not valid.
"""
try:
return lx(self._call('getblockhash', height))
except JSONRPCException as ex:
raise IndexError('%s.getblockhash(): %s (%d)' %
(self.__class__.__name__, ex.error['message'], ex.error['code'])) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def getnewaddress(self, account=None):
"""Return a new Bitcoin address for receiving payments.
If account is not None, it is added to the address book so payments
received with the address will be credited to account.
"""
r = None
if account is not None:
r = self._call('getnewaddress', account)
else:
r = self._call('getnewaddress')
return CBitcoinAddress(r) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def getrawmempool(self, verbose=False):
"""Return the mempool"""
if verbose:
return self._call('getrawmempool', verbose)
else:
r = self._call('getrawmempool')
r = [lx(txid) for txid in r]
return r | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def gettransaction(self, txid):
"""Get detailed information about in-wallet transaction txid
Raises IndexError if transaction not found in the wallet.
FIXME: Returned data types are not yet converted.
"""
try:
r = self._call('gettransaction', b2lx(txid))
except JSONRPCException as ex:
raise IndexError('%s.getrawtransaction(): %s (%d)' %
(self.__class__.__name__, ex.error['message'], ex.error['code']))
return r | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def listunspent(self, minconf=0, maxconf=9999999, addrs=None):
"""Return unspent transaction outputs in wallet
Outputs will have between minconf and maxconf (inclusive)
confirmations, optionally filtered to only include txouts paid to
addresses in addrs.
"""
r = None
if addrs is None:
r = self._call('listunspent', minconf, maxconf)
else:
addrs = [str(addr) for addr in addrs]
r = self._call('listunspent', minconf, maxconf, addrs)
r2 = []
for unspent in r:
unspent['outpoint'] = COutPoint(lx(unspent['txid']), unspent['vout'])
del unspent['txid']
del unspent['vout']
unspent['address'] = CBitcoinAddress(unspent['address'])
unspent['scriptPubKey'] = CScript(unhexlify(unspent['scriptPubKey']))
unspent['amount'] = int(unspent['amount'] * COIN)
r2.append(unspent)
return r2 | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def sendrawtransaction(self, tx):
"""Submit transaction to local node and network."""
hextx = hexlify(tx.serialize())
r = self._call('sendrawtransaction', hextx)
return lx(r) | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def signrawtransaction(self, tx, *args):
"""Sign inputs for transaction
FIXME: implement options
"""
hextx = hexlify(tx.serialize())
r = self._call('signrawtransaction', hextx, *args)
r['tx'] = CTransaction.deserialize(unhexlify(r['hex']))
del r['hex']
return r | petertodd/timelock | [
115,
22,
115,
1,
1401794890
] |
def test_bigrams_should_return_correct_score(self):
# We need this list comprehension because we need to save the word list
# in mongo (thus, it needs to be json serializable). Also, a list is
# what will be available to the worker in real situations.
tokens = [w for w in
nltk.corpus.genesis.words('english-web.txt')]
doc_id = self.collection.insert({'tokens': tokens}, w=1)
Bigrams().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
bigram_rank = refreshed_document['bigram_rank']
result = bigram_rank[0][1][0]
# This is the value of the chi_sq measure for this bigram in this
# colocation
expected_chi_sq = 95.59393417173634
self.assertEqual(result, expected_chi_sq) | NAMD/pypln.backend | [
63,
17,
63,
7,
1334771826
] |
def bits_set(x):
bits = 0
for i in range(0,8):
if (x & (1<<i))>0:
bits += 1
return bits | kit-cel/gr-dab | [
36,
12,
36,
18,
1494320572
] |
def __init__(self, request, paths, session, target, overwrite, **kwargs):
super(CopyFiles, self).__init__(request=request, **kwargs)
self.paths = paths
self.session = session
self.target = target
self.overwrite = overwrite | LTD-Beget/sprutio | [
463,
85,
463,
54,
1450265808
] |
def __init__(self, **kwargs):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __base__(cls):
"""Get base class.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __eq__(self, other):
"""Equal.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __ne__(self, other):
"""Equal.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __hash__(self):
"""Hash.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __setattr__(self, name, value):
"""Prevent mutability.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __repr__(self): # pragma: no cover
"""Representation.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, *args, **kwargs):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __iter__(self):
"""Iterator.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __len__(self):
"""Length.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __getitem__(self, key):
"""Get item: `namespace['key']`."""
return self._d[key] | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __hash__(self):
"""Hash.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __repr__(self): # pragma: no cover
"""Representation.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, *args, **kwargs):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, *args, **kwargs):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(
self, tag, ids, classes, attributes, nth, selectors,
relation, rel_type, contains, lang, flags | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, name, prefix):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, attribute, prefix, pattern, xml_type_pattern):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, text):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, a, n, b, of_type, last, selectors):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, languages):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __iter__(self):
"""Iterator.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __len__(self): # pragma: no cover
"""Length.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __getitem__(self, index): # pragma: no cover
"""Get item.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __init__(self, selectors=tuple(), is_not=False, is_html=False):
"""Initialize.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __iter__(self):
"""Iterator.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __len__(self):
"""Length.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def __getitem__(self, index):
"""Get item.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def _pickle(p):
return p.__base__(), tuple([getattr(p, s) for s in p.__slots__[:-1]]) | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
def pickle_register(obj):
"""Allow object to be pickled.""" | SickGear/SickGear | [
574,
83,
574,
2,
1415773777
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.