body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
2307722c2dc83f68a0900a5282a7733977da07c16e059fcf13cd7d415d5eb7bc | def scale_scores(raw_path, upper_limit=15.0):
"\n scale all essays' total_score to a preset value\n "
features = []
with codecs.open(raw_path, 'r') as raw_file:
reader = csv.DictReader(raw_file)
for line in reader:
total_score = float(line['total_score'])
score = float(line['score'])
if (total_score != upper_limit):
scale_ratio = (upper_limit / total_score)
new_score = round((score * scale_ratio))
line['total_score'] = upper_limit
line['score'] = new_score
features.append(line)
return features | scale all essays' total_score to a preset value | utils/preprocess.py | scale_scores | yanshengjia/hierarchical-classifier | 3 | python | def scale_scores(raw_path, upper_limit=15.0):
"\n \n "
features = []
with codecs.open(raw_path, 'r') as raw_file:
reader = csv.DictReader(raw_file)
for line in reader:
total_score = float(line['total_score'])
score = float(line['score'])
if (total_score != upper_limit):
scale_ratio = (upper_limit / total_score)
new_score = round((score * scale_ratio))
line['total_score'] = upper_limit
line['score'] = new_score
features.append(line)
return features | def scale_scores(raw_path, upper_limit=15.0):
"\n \n "
features = []
with codecs.open(raw_path, 'r') as raw_file:
reader = csv.DictReader(raw_file)
for line in reader:
total_score = float(line['total_score'])
score = float(line['score'])
if (total_score != upper_limit):
scale_ratio = (upper_limit / total_score)
new_score = round((score * scale_ratio))
line['total_score'] = upper_limit
line['score'] = new_score
features.append(line)
return features<|docstring|>scale all essays' total_score to a preset value<|endoftext|> |
9dbe48a47c4a2e8bd9a4186f9066abf0bac68f1c57651003a6dd6d925cce449a | def create_envelope(config: Config, database: Client) -> APIRouter:
'Create a envelope router & model with access to the given database.'
model = Model(database)
balance_model = BalanceModel(database)
auth_user = create_auth_dep(database, config.jwt_key)
envelope = APIRouter(prefix='/envelope', tags=['Envelope'])
@envelope.post('', status_code=201, response_model=EnvelopeOut, summary='Create new Envelope.')
async def post_root(envelope: EnvelopeIn, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.create.new(EnvelopeNew(**envelope.dict(), user_id=user_id)))
@envelope.get('', response_model=List[EnvelopeOut], summary='Get all Envelopes for current user.')
async def get_root(user_id: UUID=Depends(auth_user)) -> List[EnvelopeOut]:
return (await model.read.many_by_user(user_id))
@envelope.get('/{envelope_id}', response_model=EnvelopeOut, summary='Get requested Envelope for current user.')
async def get_id(envelope_id: UUID, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.read.one(envelope_id, user_id))
@envelope.put('/{envelope_id}', response_model=EnvelopeOut, summary='Update the given Envelope.')
async def put_id(envelope_id: UUID, changes: EnvelopeChanges, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.update.changes(envelope_id, user_id, changes))
default_other = Query(None, description='Where take funds from; default: Available Balance.')
@envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id))
return envelope | Create a envelope router & model with access to the given database. | src/routers/envelope.py | create_envelope | andrew-chang-dewitt/hoops-api | 0 | python | def create_envelope(config: Config, database: Client) -> APIRouter:
model = Model(database)
balance_model = BalanceModel(database)
auth_user = create_auth_dep(database, config.jwt_key)
envelope = APIRouter(prefix='/envelope', tags=['Envelope'])
@envelope.post(, status_code=201, response_model=EnvelopeOut, summary='Create new Envelope.')
async def post_root(envelope: EnvelopeIn, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.create.new(EnvelopeNew(**envelope.dict(), user_id=user_id)))
@envelope.get(, response_model=List[EnvelopeOut], summary='Get all Envelopes for current user.')
async def get_root(user_id: UUID=Depends(auth_user)) -> List[EnvelopeOut]:
return (await model.read.many_by_user(user_id))
@envelope.get('/{envelope_id}', response_model=EnvelopeOut, summary='Get requested Envelope for current user.')
async def get_id(envelope_id: UUID, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.read.one(envelope_id, user_id))
@envelope.put('/{envelope_id}', response_model=EnvelopeOut, summary='Update the given Envelope.')
async def put_id(envelope_id: UUID, changes: EnvelopeChanges, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.update.changes(envelope_id, user_id, changes))
default_other = Query(None, description='Where take funds from; default: Available Balance.')
@envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id))
return envelope | def create_envelope(config: Config, database: Client) -> APIRouter:
model = Model(database)
balance_model = BalanceModel(database)
auth_user = create_auth_dep(database, config.jwt_key)
envelope = APIRouter(prefix='/envelope', tags=['Envelope'])
@envelope.post(, status_code=201, response_model=EnvelopeOut, summary='Create new Envelope.')
async def post_root(envelope: EnvelopeIn, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.create.new(EnvelopeNew(**envelope.dict(), user_id=user_id)))
@envelope.get(, response_model=List[EnvelopeOut], summary='Get all Envelopes for current user.')
async def get_root(user_id: UUID=Depends(auth_user)) -> List[EnvelopeOut]:
return (await model.read.many_by_user(user_id))
@envelope.get('/{envelope_id}', response_model=EnvelopeOut, summary='Get requested Envelope for current user.')
async def get_id(envelope_id: UUID, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.read.one(envelope_id, user_id))
@envelope.put('/{envelope_id}', response_model=EnvelopeOut, summary='Update the given Envelope.')
async def put_id(envelope_id: UUID, changes: EnvelopeChanges, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
return (await model.update.changes(envelope_id, user_id, changes))
default_other = Query(None, description='Where take funds from; default: Available Balance.')
@envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id))
return envelope<|docstring|>Create a envelope router & model with access to the given database.<|endoftext|> |
2033cf5795976fe05821298b09d8dc8781c1700d634535e0ce7bed771df5df63 | @envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id)) | Add funds to (or remove funds from, if given as a negative) envelope.
Optionally, include source/target envelope for funds to be taken
from/sent to. Defaults to Available Balance if not given. | src/routers/envelope.py | put_funds | andrew-chang-dewitt/hoops-api | 0 | python | @envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id)) | @envelope.put('/{envelope_id}/funds/{funds}', response_model=EnvelopeOut, summary='Add given funds to Envelope.')
async def put_funds(envelope_id: UUID, funds: Amount, other: Optional[UUID]=default_other, user_id: UUID=Depends(auth_user)) -> EnvelopeOut:
'\n Add funds to (or remove funds from, if given as a negative) envelope.\n\n Optionally, include source/target envelope for funds to be taken\n from/sent to. Defaults to Available Balance if not given.\n '
envelope_bal = (await balance_model.read.one_by_collection(envelope_id, user_id))
other_bal = ((await balance_model.read.one_by_collection(other, user_id)) if other else (await balance_model.read.all_minus_allocated(user_id)))
source_balance = (envelope_bal if (funds < 0) else other_bal)
if ((source_balance.amount - funds) < 0):
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail='Not enough funds available in source.')
if other:
(await model.update.sum_funds((0 - funds), other, user_id))
return (await model.update.sum_funds(funds, envelope_id, user_id))<|docstring|>Add funds to (or remove funds from, if given as a negative) envelope.
Optionally, include source/target envelope for funds to be taken
from/sent to. Defaults to Available Balance if not given.<|endoftext|> |
621db6faab0fe53d8a5bb21591ef662b54ece8346a020bf7bbcf1bd398e82ac0 | @classmethod
def unquote(cls, value, plus_as_space=False):
'\n Python 2 and 3 compat layer for utf-8 unquoting\n '
if six.PY2:
if plus_as_space:
return unquote_plus(value).decode('utf8')
else:
return unquote(value).decode('utf8')
elif plus_as_space:
return unquote_plus(value.decode('ascii'))
else:
return unquote(value.decode('ascii')) | Python 2 and 3 compat layer for utf-8 unquoting | env/lib/python3.6/site-packages/daphne/http_protocol.py | unquote | CanOzcan93/TriviaServer | 0 | python | @classmethod
def unquote(cls, value, plus_as_space=False):
'\n \n '
if six.PY2:
if plus_as_space:
return unquote_plus(value).decode('utf8')
else:
return unquote(value).decode('utf8')
elif plus_as_space:
return unquote_plus(value.decode('ascii'))
else:
return unquote(value.decode('ascii')) | @classmethod
def unquote(cls, value, plus_as_space=False):
'\n \n '
if six.PY2:
if plus_as_space:
return unquote_plus(value).decode('utf8')
else:
return unquote(value).decode('utf8')
elif plus_as_space:
return unquote_plus(value.decode('ascii'))
else:
return unquote(value.decode('ascii'))<|docstring|>Python 2 and 3 compat layer for utf-8 unquoting<|endoftext|> |
b4c83bf6989086d6af24406adaf8a6872d89604874c37dcaad42859324c4bc89 | def send_disconnect(self):
'\n Sends a disconnect message on the http.disconnect channel.\n Useful only really for long-polling.\n '
try:
self.factory.channel_layer.send('http.disconnect', {'reply_channel': self.reply_channel, 'path': self.unquote(self.path)})
except self.factory.channel_layer.ChannelFull:
pass | Sends a disconnect message on the http.disconnect channel.
Useful only really for long-polling. | env/lib/python3.6/site-packages/daphne/http_protocol.py | send_disconnect | CanOzcan93/TriviaServer | 0 | python | def send_disconnect(self):
'\n Sends a disconnect message on the http.disconnect channel.\n Useful only really for long-polling.\n '
try:
self.factory.channel_layer.send('http.disconnect', {'reply_channel': self.reply_channel, 'path': self.unquote(self.path)})
except self.factory.channel_layer.ChannelFull:
pass | def send_disconnect(self):
'\n Sends a disconnect message on the http.disconnect channel.\n Useful only really for long-polling.\n '
try:
self.factory.channel_layer.send('http.disconnect', {'reply_channel': self.reply_channel, 'path': self.unquote(self.path)})
except self.factory.channel_layer.ChannelFull:
pass<|docstring|>Sends a disconnect message on the http.disconnect channel.
Useful only really for long-polling.<|endoftext|> |
99c3e102f558a75036ee977d2eef774b7c4e498eb32694dfe2be85c429f2a7b2 | def connectionLost(self, reason):
'\n Cleans up reply channel on close.\n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP disconnect for %s', self.reply_channel)
http.Request.connectionLost(self, reason) | Cleans up reply channel on close. | env/lib/python3.6/site-packages/daphne/http_protocol.py | connectionLost | CanOzcan93/TriviaServer | 0 | python | def connectionLost(self, reason):
'\n \n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP disconnect for %s', self.reply_channel)
http.Request.connectionLost(self, reason) | def connectionLost(self, reason):
'\n \n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP disconnect for %s', self.reply_channel)
http.Request.connectionLost(self, reason)<|docstring|>Cleans up reply channel on close.<|endoftext|> |
be9d066abb1b811f3c5346d6f172a3d74bf8486f367ae7ed24f4eaf4b773ecc9 | def finish(self):
'\n Cleans up reply channel on close.\n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP close for %s', self.reply_channel)
http.Request.finish(self) | Cleans up reply channel on close. | env/lib/python3.6/site-packages/daphne/http_protocol.py | finish | CanOzcan93/TriviaServer | 0 | python | def finish(self):
'\n \n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP close for %s', self.reply_channel)
http.Request.finish(self) | def finish(self):
'\n \n '
if (self.reply_channel and (self.reply_channel in self.channel.factory.reply_protocols)):
self.send_disconnect()
del self.channel.factory.reply_protocols[self.reply_channel]
logger.debug('HTTP close for %s', self.reply_channel)
http.Request.finish(self)<|docstring|>Cleans up reply channel on close.<|endoftext|> |
c2cc5e56654d8f83295b1456dabf4e57b9b84c3d01be4e119d86d800c45914fb | def serverResponse(self, message):
'\n Writes a received HTTP response back out to the transport.\n '
if ('status' in message):
if self._got_response_start:
raise ValueError(('Got multiple Response messages for %s!' % self.reply_channel))
self._got_response_start = True
self.setResponseCode(message['status'])
for (header, value) in message.get('headers', {}):
if isinstance(header, six.text_type):
header = header.encode('latin1')
self.responseHeaders.addRawHeader(header, value)
logger.debug('HTTP %s response started for %s', message['status'], self.reply_channel)
if ('content' in message):
http.Request.write(self, message['content'])
if (not message.get('more_content', False)):
self.finish()
logger.debug('HTTP response complete for %s', self.reply_channel)
try:
self.factory.log_action('http', 'complete', {'path': self.uri.decode('ascii'), 'status': self.code, 'method': self.method.decode('ascii'), 'client': (('%s:%s' % tuple(self.client_addr)) if self.client_addr else None), 'time_taken': self.duration(), 'size': self.sentLength})
except Exception as e:
logging.error(traceback.format_exc())
else:
logger.debug('HTTP response chunk for %s', self.reply_channel) | Writes a received HTTP response back out to the transport. | env/lib/python3.6/site-packages/daphne/http_protocol.py | serverResponse | CanOzcan93/TriviaServer | 0 | python | def serverResponse(self, message):
'\n \n '
if ('status' in message):
if self._got_response_start:
raise ValueError(('Got multiple Response messages for %s!' % self.reply_channel))
self._got_response_start = True
self.setResponseCode(message['status'])
for (header, value) in message.get('headers', {}):
if isinstance(header, six.text_type):
header = header.encode('latin1')
self.responseHeaders.addRawHeader(header, value)
logger.debug('HTTP %s response started for %s', message['status'], self.reply_channel)
if ('content' in message):
http.Request.write(self, message['content'])
if (not message.get('more_content', False)):
self.finish()
logger.debug('HTTP response complete for %s', self.reply_channel)
try:
self.factory.log_action('http', 'complete', {'path': self.uri.decode('ascii'), 'status': self.code, 'method': self.method.decode('ascii'), 'client': (('%s:%s' % tuple(self.client_addr)) if self.client_addr else None), 'time_taken': self.duration(), 'size': self.sentLength})
except Exception as e:
logging.error(traceback.format_exc())
else:
logger.debug('HTTP response chunk for %s', self.reply_channel) | def serverResponse(self, message):
'\n \n '
if ('status' in message):
if self._got_response_start:
raise ValueError(('Got multiple Response messages for %s!' % self.reply_channel))
self._got_response_start = True
self.setResponseCode(message['status'])
for (header, value) in message.get('headers', {}):
if isinstance(header, six.text_type):
header = header.encode('latin1')
self.responseHeaders.addRawHeader(header, value)
logger.debug('HTTP %s response started for %s', message['status'], self.reply_channel)
if ('content' in message):
http.Request.write(self, message['content'])
if (not message.get('more_content', False)):
self.finish()
logger.debug('HTTP response complete for %s', self.reply_channel)
try:
self.factory.log_action('http', 'complete', {'path': self.uri.decode('ascii'), 'status': self.code, 'method': self.method.decode('ascii'), 'client': (('%s:%s' % tuple(self.client_addr)) if self.client_addr else None), 'time_taken': self.duration(), 'size': self.sentLength})
except Exception as e:
logging.error(traceback.format_exc())
else:
logger.debug('HTTP response chunk for %s', self.reply_channel)<|docstring|>Writes a received HTTP response back out to the transport.<|endoftext|> |
f4fb4d770aa4ad28ebf343c8af64336110e35195d6652832d8a176a7dfb3581a | def duration(self):
'\n Returns the time since the start of the request.\n '
if (not hasattr(self, 'request_start')):
return 0
return (time.time() - self.request_start) | Returns the time since the start of the request. | env/lib/python3.6/site-packages/daphne/http_protocol.py | duration | CanOzcan93/TriviaServer | 0 | python | def duration(self):
'\n \n '
if (not hasattr(self, 'request_start')):
return 0
return (time.time() - self.request_start) | def duration(self):
'\n \n '
if (not hasattr(self, 'request_start')):
return 0
return (time.time() - self.request_start)<|docstring|>Returns the time since the start of the request.<|endoftext|> |
be78a3d062fb1dea5708f343056697766198cc2fc297d485856569035429a7fb | def basic_error(self, status, status_text, body):
'\n Responds with a server-level error page (very basic)\n '
self.serverResponse({'status': status, 'status_text': status_text, 'headers': [(b'Content-Type', b'text/html; charset=utf-8')], 'content': (self.error_template % {'title': ((str(status) + ' ') + status_text.decode('ascii')), 'body': body}).encode('utf8')}) | Responds with a server-level error page (very basic) | env/lib/python3.6/site-packages/daphne/http_protocol.py | basic_error | CanOzcan93/TriviaServer | 0 | python | def basic_error(self, status, status_text, body):
'\n \n '
self.serverResponse({'status': status, 'status_text': status_text, 'headers': [(b'Content-Type', b'text/html; charset=utf-8')], 'content': (self.error_template % {'title': ((str(status) + ' ') + status_text.decode('ascii')), 'body': body}).encode('utf8')}) | def basic_error(self, status, status_text, body):
'\n \n '
self.serverResponse({'status': status, 'status_text': status_text, 'headers': [(b'Content-Type', b'text/html; charset=utf-8')], 'content': (self.error_template % {'title': ((str(status) + ' ') + status_text.decode('ascii')), 'body': body}).encode('utf8')})<|docstring|>Responds with a server-level error page (very basic)<|endoftext|> |
358e84ea678ddd1a75e3344b0585ef6a348236672653f6417e0ddf14098c0b3b | def log_action(self, protocol, action, details):
'\n Dispatches to any registered action logger, if there is one.\n '
if self.action_logger:
self.action_logger(protocol, action, details) | Dispatches to any registered action logger, if there is one. | env/lib/python3.6/site-packages/daphne/http_protocol.py | log_action | CanOzcan93/TriviaServer | 0 | python | def log_action(self, protocol, action, details):
'\n \n '
if self.action_logger:
self.action_logger(protocol, action, details) | def log_action(self, protocol, action, details):
'\n \n '
if self.action_logger:
self.action_logger(protocol, action, details)<|docstring|>Dispatches to any registered action logger, if there is one.<|endoftext|> |
8d65d8b9685bf5d1a744b5f1286c8787501de1e2e8a4ce38176ef388c62c4e7e | def check_timeouts(self):
"\n Runs through all HTTP protocol instances and times them out if they've\n taken too long (and so their message is probably expired)\n "
for protocol in list(self.reply_protocols.values()):
if (isinstance(protocol, WebRequest) and (protocol.duration() > self.timeout)):
protocol.basic_error(503, b'Service Unavailable', 'Worker server failed to respond within time limit.')
elif isinstance(protocol, WebSocketProtocol):
if (protocol.duration() > self.websocket_timeout):
protocol.serverClose()
else:
protocol.check_ping() | Runs through all HTTP protocol instances and times them out if they've
taken too long (and so their message is probably expired) | env/lib/python3.6/site-packages/daphne/http_protocol.py | check_timeouts | CanOzcan93/TriviaServer | 0 | python | def check_timeouts(self):
"\n Runs through all HTTP protocol instances and times them out if they've\n taken too long (and so their message is probably expired)\n "
for protocol in list(self.reply_protocols.values()):
if (isinstance(protocol, WebRequest) and (protocol.duration() > self.timeout)):
protocol.basic_error(503, b'Service Unavailable', 'Worker server failed to respond within time limit.')
elif isinstance(protocol, WebSocketProtocol):
if (protocol.duration() > self.websocket_timeout):
protocol.serverClose()
else:
protocol.check_ping() | def check_timeouts(self):
"\n Runs through all HTTP protocol instances and times them out if they've\n taken too long (and so their message is probably expired)\n "
for protocol in list(self.reply_protocols.values()):
if (isinstance(protocol, WebRequest) and (protocol.duration() > self.timeout)):
protocol.basic_error(503, b'Service Unavailable', 'Worker server failed to respond within time limit.')
elif isinstance(protocol, WebSocketProtocol):
if (protocol.duration() > self.websocket_timeout):
protocol.serverClose()
else:
protocol.check_ping()<|docstring|>Runs through all HTTP protocol instances and times them out if they've
taken too long (and so their message is probably expired)<|endoftext|> |
b2c4d7f2de76c6b7b700729e1f527c2f9eb42b2b22d60aba58f2fa1af70ef50b | @pytest.mark.parametrize(argnames=['interface'], argvalues=[pytest.param(constants.CEPHBLOCKPOOL, marks=pytest.mark.polarion_id('OCS-612')), pytest.param(constants.CEPHFILESYSTEM, marks=pytest.mark.polarion_id('OCS-612'))])
def test_install_amq_cephfs(self, interface, test_fixture_amq):
'\n Create amq cluster and run open messages on it\n\n '
sc = default_storage_class(interface_type=interface)
test_fixture_amq.setup_amq_cluster(sc.name)
test_fixture_amq.create_messaging_on_amq()
waiting_time = 60
log.info(f'Waiting for {waiting_time}sec to generate msg')
time.sleep(waiting_time)
threads = test_fixture_amq.run_in_bg()
for t in threads:
t.join() | Create amq cluster and run open messages on it | tests/e2e/workloads/amq/test_amq_streamer_creation.py | test_install_amq_cephfs | gabriellasroman/ocs-ci | 0 | python | @pytest.mark.parametrize(argnames=['interface'], argvalues=[pytest.param(constants.CEPHBLOCKPOOL, marks=pytest.mark.polarion_id('OCS-612')), pytest.param(constants.CEPHFILESYSTEM, marks=pytest.mark.polarion_id('OCS-612'))])
def test_install_amq_cephfs(self, interface, test_fixture_amq):
'\n \n\n '
sc = default_storage_class(interface_type=interface)
test_fixture_amq.setup_amq_cluster(sc.name)
test_fixture_amq.create_messaging_on_amq()
waiting_time = 60
log.info(f'Waiting for {waiting_time}sec to generate msg')
time.sleep(waiting_time)
threads = test_fixture_amq.run_in_bg()
for t in threads:
t.join() | @pytest.mark.parametrize(argnames=['interface'], argvalues=[pytest.param(constants.CEPHBLOCKPOOL, marks=pytest.mark.polarion_id('OCS-612')), pytest.param(constants.CEPHFILESYSTEM, marks=pytest.mark.polarion_id('OCS-612'))])
def test_install_amq_cephfs(self, interface, test_fixture_amq):
'\n \n\n '
sc = default_storage_class(interface_type=interface)
test_fixture_amq.setup_amq_cluster(sc.name)
test_fixture_amq.create_messaging_on_amq()
waiting_time = 60
log.info(f'Waiting for {waiting_time}sec to generate msg')
time.sleep(waiting_time)
threads = test_fixture_amq.run_in_bg()
for t in threads:
t.join()<|docstring|>Create amq cluster and run open messages on it<|endoftext|> |
d5ff5e49e224f1b0cee69f8a55265b5f368d5c37d3165760754e7d45759522f9 | def get_external_container_database(external_container_database_id: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetExternalContainerDatabaseResult:
'\n This data source provides details about a specific External Container Database resource in Oracle Cloud Infrastructure Database service.\n\n Gets information about the specified external container database.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_external_container_database = oci.database.get_external_container_database(external_container_database_id=oci_database_external_container_database["test_external_container_database"]["id"])\n ```\n\n\n :param str external_container_database_id: The ExternalContainerDatabase [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n '
__args__ = dict()
__args__['externalContainerDatabaseId'] = external_container_database_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getExternalContainerDatabase:getExternalContainerDatabase', __args__, opts=opts, typ=GetExternalContainerDatabaseResult).value
return AwaitableGetExternalContainerDatabaseResult(character_set=__ret__.character_set, compartment_id=__ret__.compartment_id, database_configuration=__ret__.database_configuration, database_edition=__ret__.database_edition, database_management_config=__ret__.database_management_config, database_version=__ret__.database_version, db_id=__ret__.db_id, db_packs=__ret__.db_packs, db_unique_name=__ret__.db_unique_name, defined_tags=__ret__.defined_tags, display_name=__ret__.display_name, external_container_database_id=__ret__.external_container_database_id, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_details=__ret__.lifecycle_details, ncharacter_set=__ret__.ncharacter_set, state=__ret__.state, time_created=__ret__.time_created, time_zone=__ret__.time_zone) | This data source provides details about a specific External Container Database resource in Oracle Cloud Infrastructure Database service.
Gets information about the specified external container database.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_external_container_database = oci.database.get_external_container_database(external_container_database_id=oci_database_external_container_database["test_external_container_database"]["id"])
```
:param str external_container_database_id: The ExternalContainerDatabase [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm). | sdk/python/pulumi_oci/database/get_external_container_database.py | get_external_container_database | pellizzetti/pulumi-oci-dev | 5 | python | def get_external_container_database(external_container_database_id: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetExternalContainerDatabaseResult:
'\n This data source provides details about a specific External Container Database resource in Oracle Cloud Infrastructure Database service.\n\n Gets information about the specified external container database.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_external_container_database = oci.database.get_external_container_database(external_container_database_id=oci_database_external_container_database["test_external_container_database"]["id"])\n ```\n\n\n :param str external_container_database_id: The ExternalContainerDatabase [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n '
__args__ = dict()
__args__['externalContainerDatabaseId'] = external_container_database_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getExternalContainerDatabase:getExternalContainerDatabase', __args__, opts=opts, typ=GetExternalContainerDatabaseResult).value
return AwaitableGetExternalContainerDatabaseResult(character_set=__ret__.character_set, compartment_id=__ret__.compartment_id, database_configuration=__ret__.database_configuration, database_edition=__ret__.database_edition, database_management_config=__ret__.database_management_config, database_version=__ret__.database_version, db_id=__ret__.db_id, db_packs=__ret__.db_packs, db_unique_name=__ret__.db_unique_name, defined_tags=__ret__.defined_tags, display_name=__ret__.display_name, external_container_database_id=__ret__.external_container_database_id, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_details=__ret__.lifecycle_details, ncharacter_set=__ret__.ncharacter_set, state=__ret__.state, time_created=__ret__.time_created, time_zone=__ret__.time_zone) | def get_external_container_database(external_container_database_id: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetExternalContainerDatabaseResult:
'\n This data source provides details about a specific External Container Database resource in Oracle Cloud Infrastructure Database service.\n\n Gets information about the specified external container database.\n\n ## Example Usage\n\n ```python\n import pulumi\n import pulumi_oci as oci\n\n test_external_container_database = oci.database.get_external_container_database(external_container_database_id=oci_database_external_container_database["test_external_container_database"]["id"])\n ```\n\n\n :param str external_container_database_id: The ExternalContainerDatabase [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).\n '
__args__ = dict()
__args__['externalContainerDatabaseId'] = external_container_database_id
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getExternalContainerDatabase:getExternalContainerDatabase', __args__, opts=opts, typ=GetExternalContainerDatabaseResult).value
return AwaitableGetExternalContainerDatabaseResult(character_set=__ret__.character_set, compartment_id=__ret__.compartment_id, database_configuration=__ret__.database_configuration, database_edition=__ret__.database_edition, database_management_config=__ret__.database_management_config, database_version=__ret__.database_version, db_id=__ret__.db_id, db_packs=__ret__.db_packs, db_unique_name=__ret__.db_unique_name, defined_tags=__ret__.defined_tags, display_name=__ret__.display_name, external_container_database_id=__ret__.external_container_database_id, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_details=__ret__.lifecycle_details, ncharacter_set=__ret__.ncharacter_set, state=__ret__.state, time_created=__ret__.time_created, time_zone=__ret__.time_zone)<|docstring|>This data source provides details about a specific External Container Database resource in Oracle Cloud Infrastructure Database service.
Gets information about the specified external container database.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_external_container_database = oci.database.get_external_container_database(external_container_database_id=oci_database_external_container_database["test_external_container_database"]["id"])
```
:param str external_container_database_id: The ExternalContainerDatabase [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).<|endoftext|> |
7850f854ef3abaaa34a78ff966f0d13614297f7eacc477c962da4f902d35f1dd | @property
@pulumi.getter(name='characterSet')
def character_set(self) -> str:
'\n The character set of the external database.\n '
return pulumi.get(self, 'character_set') | The character set of the external database. | sdk/python/pulumi_oci/database/get_external_container_database.py | character_set | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='characterSet')
def character_set(self) -> str:
'\n \n '
return pulumi.get(self, 'character_set') | @property
@pulumi.getter(name='characterSet')
def character_set(self) -> str:
'\n \n '
return pulumi.get(self, 'character_set')<|docstring|>The character set of the external database.<|endoftext|> |
435e6f5bea6f397044193c5ff94e8db20ed16d1fe90ff4ce9c97c2f87dc5a4d7 | @property
@pulumi.getter(name='compartmentId')
def compartment_id(self) -> str:
'\n The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.\n '
return pulumi.get(self, 'compartment_id') | The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment. | sdk/python/pulumi_oci/database/get_external_container_database.py | compartment_id | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='compartmentId')
def compartment_id(self) -> str:
'\n \n '
return pulumi.get(self, 'compartment_id') | @property
@pulumi.getter(name='compartmentId')
def compartment_id(self) -> str:
'\n \n '
return pulumi.get(self, 'compartment_id')<|docstring|>The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.<|endoftext|> |
710271db9519042801342470efe2f68f3e197c6225da117b5ffb7142ba8638e6 | @property
@pulumi.getter(name='databaseConfiguration')
def database_configuration(self) -> str:
'\n The Oracle Database configuration\n '
return pulumi.get(self, 'database_configuration') | The Oracle Database configuration | sdk/python/pulumi_oci/database/get_external_container_database.py | database_configuration | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='databaseConfiguration')
def database_configuration(self) -> str:
'\n \n '
return pulumi.get(self, 'database_configuration') | @property
@pulumi.getter(name='databaseConfiguration')
def database_configuration(self) -> str:
'\n \n '
return pulumi.get(self, 'database_configuration')<|docstring|>The Oracle Database configuration<|endoftext|> |
08d78ff5780adff11d33cb504a96a489abf5cf077bd97356dcd484613cbffd3f | @property
@pulumi.getter(name='databaseEdition')
def database_edition(self) -> str:
'\n The Oracle Database edition.\n '
return pulumi.get(self, 'database_edition') | The Oracle Database edition. | sdk/python/pulumi_oci/database/get_external_container_database.py | database_edition | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='databaseEdition')
def database_edition(self) -> str:
'\n \n '
return pulumi.get(self, 'database_edition') | @property
@pulumi.getter(name='databaseEdition')
def database_edition(self) -> str:
'\n \n '
return pulumi.get(self, 'database_edition')<|docstring|>The Oracle Database edition.<|endoftext|> |
d04f2638868eacc3b5adf627dc7ffb3b53d08750066da0f2d17742b3b3d04e0f | @property
@pulumi.getter(name='databaseManagementConfig')
def database_management_config(self) -> 'outputs.GetExternalContainerDatabaseDatabaseManagementConfigResult':
'\n The configuration of the Database Management service.\n '
return pulumi.get(self, 'database_management_config') | The configuration of the Database Management service. | sdk/python/pulumi_oci/database/get_external_container_database.py | database_management_config | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='databaseManagementConfig')
def database_management_config(self) -> 'outputs.GetExternalContainerDatabaseDatabaseManagementConfigResult':
'\n \n '
return pulumi.get(self, 'database_management_config') | @property
@pulumi.getter(name='databaseManagementConfig')
def database_management_config(self) -> 'outputs.GetExternalContainerDatabaseDatabaseManagementConfigResult':
'\n \n '
return pulumi.get(self, 'database_management_config')<|docstring|>The configuration of the Database Management service.<|endoftext|> |
2ddebee886751fabc33d581136c4792c4798dec5df7ef715baa3223eaa100674 | @property
@pulumi.getter(name='databaseVersion')
def database_version(self) -> str:
'\n The Oracle Database version.\n '
return pulumi.get(self, 'database_version') | The Oracle Database version. | sdk/python/pulumi_oci/database/get_external_container_database.py | database_version | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='databaseVersion')
def database_version(self) -> str:
'\n \n '
return pulumi.get(self, 'database_version') | @property
@pulumi.getter(name='databaseVersion')
def database_version(self) -> str:
'\n \n '
return pulumi.get(self, 'database_version')<|docstring|>The Oracle Database version.<|endoftext|> |
01f4a9446c0ebebd8736d8fb9179fd9aca32fcd8d55cef0306f621d7c941d7fa | @property
@pulumi.getter(name='dbId')
def db_id(self) -> str:
'\n The Oracle Database ID, which identifies an Oracle Database located outside of Oracle Cloud.\n '
return pulumi.get(self, 'db_id') | The Oracle Database ID, which identifies an Oracle Database located outside of Oracle Cloud. | sdk/python/pulumi_oci/database/get_external_container_database.py | db_id | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='dbId')
def db_id(self) -> str:
'\n \n '
return pulumi.get(self, 'db_id') | @property
@pulumi.getter(name='dbId')
def db_id(self) -> str:
'\n \n '
return pulumi.get(self, 'db_id')<|docstring|>The Oracle Database ID, which identifies an Oracle Database located outside of Oracle Cloud.<|endoftext|> |
47152758ea3d2cf604033f6ce90efb90b22c384b55a9fd8318fd0ebc378262f3 | @property
@pulumi.getter(name='dbPacks')
def db_packs(self) -> str:
'\n The database packs licensed for the external Oracle Database.\n '
return pulumi.get(self, 'db_packs') | The database packs licensed for the external Oracle Database. | sdk/python/pulumi_oci/database/get_external_container_database.py | db_packs | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='dbPacks')
def db_packs(self) -> str:
'\n \n '
return pulumi.get(self, 'db_packs') | @property
@pulumi.getter(name='dbPacks')
def db_packs(self) -> str:
'\n \n '
return pulumi.get(self, 'db_packs')<|docstring|>The database packs licensed for the external Oracle Database.<|endoftext|> |
f5a789415f8f662c83c43d5654b8c5f3c2942b3bc582be0d62876f70400cbff3 | @property
@pulumi.getter(name='dbUniqueName')
def db_unique_name(self) -> str:
'\n The `DB_UNIQUE_NAME` of the external database.\n '
return pulumi.get(self, 'db_unique_name') | The `DB_UNIQUE_NAME` of the external database. | sdk/python/pulumi_oci/database/get_external_container_database.py | db_unique_name | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='dbUniqueName')
def db_unique_name(self) -> str:
'\n \n '
return pulumi.get(self, 'db_unique_name') | @property
@pulumi.getter(name='dbUniqueName')
def db_unique_name(self) -> str:
'\n \n '
return pulumi.get(self, 'db_unique_name')<|docstring|>The `DB_UNIQUE_NAME` of the external database.<|endoftext|> |
e07f63f490e4409f2a7ff7ffe42b86524f7e3432d61c83fd8d97fa0170ff3637 | @property
@pulumi.getter(name='definedTags')
def defined_tags(self) -> Mapping[(str, Any)]:
'\n Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).\n '
return pulumi.get(self, 'defined_tags') | Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). | sdk/python/pulumi_oci/database/get_external_container_database.py | defined_tags | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='definedTags')
def defined_tags(self) -> Mapping[(str, Any)]:
'\n \n '
return pulumi.get(self, 'defined_tags') | @property
@pulumi.getter(name='definedTags')
def defined_tags(self) -> Mapping[(str, Any)]:
'\n \n '
return pulumi.get(self, 'defined_tags')<|docstring|>Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).<|endoftext|> |
c9073e87f04d0568d844d1188c93732613d2f717a41de62d8d003389d3f6a48c | @property
@pulumi.getter(name='displayName')
def display_name(self) -> str:
'\n The user-friendly name for the external database. The name does not have to be unique.\n '
return pulumi.get(self, 'display_name') | The user-friendly name for the external database. The name does not have to be unique. | sdk/python/pulumi_oci/database/get_external_container_database.py | display_name | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='displayName')
def display_name(self) -> str:
'\n \n '
return pulumi.get(self, 'display_name') | @property
@pulumi.getter(name='displayName')
def display_name(self) -> str:
'\n \n '
return pulumi.get(self, 'display_name')<|docstring|>The user-friendly name for the external database. The name does not have to be unique.<|endoftext|> |
cd58c56bd9944f8fd05cd5519380119f889b1615db377698f4cc2c1c76bc877e | @property
@pulumi.getter(name='freeformTags')
def freeform_tags(self) -> Mapping[(str, Any)]:
'\n Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`\n '
return pulumi.get(self, 'freeform_tags') | Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}` | sdk/python/pulumi_oci/database/get_external_container_database.py | freeform_tags | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='freeformTags')
def freeform_tags(self) -> Mapping[(str, Any)]:
'\n \n '
return pulumi.get(self, 'freeform_tags') | @property
@pulumi.getter(name='freeformTags')
def freeform_tags(self) -> Mapping[(str, Any)]:
'\n \n '
return pulumi.get(self, 'freeform_tags')<|docstring|>Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`<|endoftext|> |
8126733690e812d943dafe26da9b8cad5f8683a6df96e3cfbc5dcc992f98be55 | @property
@pulumi.getter
def id(self) -> str:
'\n The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure external database resource.\n '
return pulumi.get(self, 'id') | The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure external database resource. | sdk/python/pulumi_oci/database/get_external_container_database.py | id | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id') | @property
@pulumi.getter
def id(self) -> str:
'\n \n '
return pulumi.get(self, 'id')<|docstring|>The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Oracle Cloud Infrastructure external database resource.<|endoftext|> |
8504a77402bf608c61171f28ebec87c34ab0aaaf58f99a563ff889e965cec39f | @property
@pulumi.getter(name='lifecycleDetails')
def lifecycle_details(self) -> str:
'\n Additional information about the current lifecycle state.\n '
return pulumi.get(self, 'lifecycle_details') | Additional information about the current lifecycle state. | sdk/python/pulumi_oci/database/get_external_container_database.py | lifecycle_details | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='lifecycleDetails')
def lifecycle_details(self) -> str:
'\n \n '
return pulumi.get(self, 'lifecycle_details') | @property
@pulumi.getter(name='lifecycleDetails')
def lifecycle_details(self) -> str:
'\n \n '
return pulumi.get(self, 'lifecycle_details')<|docstring|>Additional information about the current lifecycle state.<|endoftext|> |
2d944371f620fbc13944f4ce999b1d3339c94af103bafde4fbd92abfaf7666e3 | @property
@pulumi.getter(name='ncharacterSet')
def ncharacter_set(self) -> str:
'\n The national character of the external database.\n '
return pulumi.get(self, 'ncharacter_set') | The national character of the external database. | sdk/python/pulumi_oci/database/get_external_container_database.py | ncharacter_set | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='ncharacterSet')
def ncharacter_set(self) -> str:
'\n \n '
return pulumi.get(self, 'ncharacter_set') | @property
@pulumi.getter(name='ncharacterSet')
def ncharacter_set(self) -> str:
'\n \n '
return pulumi.get(self, 'ncharacter_set')<|docstring|>The national character of the external database.<|endoftext|> |
b54f748c9b3c1d5311e86ebfa51e92194bde5f534c3105bfbc4005a0270bf359 | @property
@pulumi.getter
def state(self) -> str:
'\n The current state of the Oracle Cloud Infrastructure external database resource.\n '
return pulumi.get(self, 'state') | The current state of the Oracle Cloud Infrastructure external database resource. | sdk/python/pulumi_oci/database/get_external_container_database.py | state | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter
def state(self) -> str:
'\n \n '
return pulumi.get(self, 'state') | @property
@pulumi.getter
def state(self) -> str:
'\n \n '
return pulumi.get(self, 'state')<|docstring|>The current state of the Oracle Cloud Infrastructure external database resource.<|endoftext|> |
a1891b4bb99437f8fad7a7356708971d31391124aee15b4a9c03e713ee5f18c9 | @property
@pulumi.getter(name='timeCreated')
def time_created(self) -> str:
'\n The date and time the database was created.\n '
return pulumi.get(self, 'time_created') | The date and time the database was created. | sdk/python/pulumi_oci/database/get_external_container_database.py | time_created | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='timeCreated')
def time_created(self) -> str:
'\n \n '
return pulumi.get(self, 'time_created') | @property
@pulumi.getter(name='timeCreated')
def time_created(self) -> str:
'\n \n '
return pulumi.get(self, 'time_created')<|docstring|>The date and time the database was created.<|endoftext|> |
c3ef27f37f52867c211b2a8f5ce484e7b4dcd37317bd8ed7e7fa4332970e8270 | @property
@pulumi.getter(name='timeZone')
def time_zone(self) -> str:
"\n The time zone of the external database. It is a time zone offset (a character type in the format '[+|-]TZH:TZM') or a time zone region name, depending on how the time zone value was specified when the database was created / last altered.\n "
return pulumi.get(self, 'time_zone') | The time zone of the external database. It is a time zone offset (a character type in the format '[+|-]TZH:TZM') or a time zone region name, depending on how the time zone value was specified when the database was created / last altered. | sdk/python/pulumi_oci/database/get_external_container_database.py | time_zone | pellizzetti/pulumi-oci-dev | 5 | python | @property
@pulumi.getter(name='timeZone')
def time_zone(self) -> str:
"\n \n "
return pulumi.get(self, 'time_zone') | @property
@pulumi.getter(name='timeZone')
def time_zone(self) -> str:
"\n \n "
return pulumi.get(self, 'time_zone')<|docstring|>The time zone of the external database. It is a time zone offset (a character type in the format '[+|-]TZH:TZM') or a time zone region name, depending on how the time zone value was specified when the database was created / last altered.<|endoftext|> |
12f1d586dce13a72466f07d955257af828bcb45325403d4ba63ba38b04853433 | def nodeSet(node):
'Return a nodeset containing a single node'
return [node] | Return a nodeset containing a single node | igor/xmlDatabase.py | nodeSet | cwi-dis/igor | 5 | python | def nodeSet(node):
return [node] | def nodeSet(node):
return [node]<|docstring|>Return a nodeset containing a single node<|endoftext|> |
d04bdeabf0b564748f4e287d5246b2f5b30faf4d0fcc6d721b48fbc2345e43bd | def recursiveNodeSet(node):
'Return a nodeset containing a node and all its descendents'
rv = [node]
child = node.firstChild
while child:
if (child.nodeType == child.ELEMENT_NODE):
rv += recursiveNodeSet(child)
child = child.nextSibling
return rv | Return a nodeset containing a node and all its descendents | igor/xmlDatabase.py | recursiveNodeSet | cwi-dis/igor | 5 | python | def recursiveNodeSet(node):
rv = [node]
child = node.firstChild
while child:
if (child.nodeType == child.ELEMENT_NODE):
rv += recursiveNodeSet(child)
child = child.nextSibling
return rv | def recursiveNodeSet(node):
rv = [node]
child = node.firstChild
while child:
if (child.nodeType == child.ELEMENT_NODE):
rv += recursiveNodeSet(child)
child = child.nextSibling
return rv<|docstring|>Return a nodeset containing a node and all its descendents<|endoftext|> |
558561298779eb679044a4a22e224f55c8b7bdb073e8ba214817fe659267b723 | def _signalNodelist(self, nodelist):
'Wake up clients waiting for the given nodes and return dictionary with callbacks. Must be called while holding lock.'
assert self.writelock().locked()
if DEBUG:
print(('_signalNodelist(%s)' % repr(nodelist)))
tocallback = {}
for (location, callback) in self._callbacks:
waitnodelist = xpath.find(location, self._doc.documentElement)
for wn in waitnodelist:
if (wn in nodelist):
if (callback in tocallback):
tocallback[callback].append(wn)
else:
tocallback[callback] = [wn]
return tocallback | Wake up clients waiting for the given nodes and return dictionary with callbacks. Must be called while holding lock. | igor/xmlDatabase.py | _signalNodelist | cwi-dis/igor | 5 | python | def _signalNodelist(self, nodelist):
assert self.writelock().locked()
if DEBUG:
print(('_signalNodelist(%s)' % repr(nodelist)))
tocallback = {}
for (location, callback) in self._callbacks:
waitnodelist = xpath.find(location, self._doc.documentElement)
for wn in waitnodelist:
if (wn in nodelist):
if (callback in tocallback):
tocallback[callback].append(wn)
else:
tocallback[callback] = [wn]
return tocallback | def _signalNodelist(self, nodelist):
assert self.writelock().locked()
if DEBUG:
print(('_signalNodelist(%s)' % repr(nodelist)))
tocallback = {}
for (location, callback) in self._callbacks:
waitnodelist = xpath.find(location, self._doc.documentElement)
for wn in waitnodelist:
if (wn in nodelist):
if (callback in tocallback):
tocallback[callback].append(wn)
else:
tocallback[callback] = [wn]
return tocallback<|docstring|>Wake up clients waiting for the given nodes and return dictionary with callbacks. Must be called while holding lock.<|endoftext|> |
ac6e1bbfa23fea3268e4e43400017718c04d6f5e19d2140a2609ad69e3dc3790 | def _runSignalCallbacks(self, callbacks):
'Second part of signalling: call callbacks. Must be called without holding lock'
for (callback, waitnodes) in list(callbacks.items()):
if DEBUG:
print(('_runSignalCallbacks calling %s(%s)' % (callback, waitnodes)))
callback(*waitnodes) | Second part of signalling: call callbacks. Must be called without holding lock | igor/xmlDatabase.py | _runSignalCallbacks | cwi-dis/igor | 5 | python | def _runSignalCallbacks(self, callbacks):
for (callback, waitnodes) in list(callbacks.items()):
if DEBUG:
print(('_runSignalCallbacks calling %s(%s)' % (callback, waitnodes)))
callback(*waitnodes) | def _runSignalCallbacks(self, callbacks):
for (callback, waitnodes) in list(callbacks.items()):
if DEBUG:
print(('_runSignalCallbacks calling %s(%s)' % (callback, waitnodes)))
callback(*waitnodes)<|docstring|>Second part of signalling: call callbacks. Must be called without holding lock<|endoftext|> |
39e9921c7eea781ab0dba415447913b21a291f1edfe67cd95847bb91c4f6d896 | def setChanged(self):
'Called by external modules when they have changed the database behind our back.'
self.currentGeneration += 1 | Called by external modules when they have changed the database behind our back. | igor/xmlDatabase.py | setChanged | cwi-dis/igor | 5 | python | def setChanged(self):
self.currentGeneration += 1 | def setChanged(self):
self.currentGeneration += 1<|docstring|>Called by external modules when they have changed the database behind our back.<|endoftext|> |
a688277ca2f4ead8c78882572127f5e61f76ac2da2661851b65bd9ecd235d253 | def initialize(self, xmlstring=None, filename=None):
'Reset the document to a known value (passed as an XML string'
if filename:
newDoc = xml.dom.minidom.parse(filename)
elif xmlstring:
newDoc = xml.dom.minidom.parseString(xmlstring)
else:
newDoc = self._domimpl.createDocument('', 'root', None)
self._removeBlanks(newDoc)
with self.writelock():
self.currentGeneration += 1
self._doc = newDoc | Reset the document to a known value (passed as an XML string | igor/xmlDatabase.py | initialize | cwi-dis/igor | 5 | python | def initialize(self, xmlstring=None, filename=None):
if filename:
newDoc = xml.dom.minidom.parse(filename)
elif xmlstring:
newDoc = xml.dom.minidom.parseString(xmlstring)
else:
newDoc = self._domimpl.createDocument(, 'root', None)
self._removeBlanks(newDoc)
with self.writelock():
self.currentGeneration += 1
self._doc = newDoc | def initialize(self, xmlstring=None, filename=None):
if filename:
newDoc = xml.dom.minidom.parse(filename)
elif xmlstring:
newDoc = xml.dom.minidom.parseString(xmlstring)
else:
newDoc = self._domimpl.createDocument(, 'root', None)
self._removeBlanks(newDoc)
with self.writelock():
self.currentGeneration += 1
self._doc = newDoc<|docstring|>Reset the document to a known value (passed as an XML string<|endoftext|> |
9786f38c0608174d13fefe989252d9085c1e1cb7330f142ca73ea417a68ae2ee | def getDocument(self, token):
'Return the whole document (as a DOM element)'
with self.readlock():
self._checkAccess('get', self._doc.documentElement, token)
return self._doc.documentElement | Return the whole document (as a DOM element) | igor/xmlDatabase.py | getDocument | cwi-dis/igor | 5 | python | def getDocument(self, token):
with self.readlock():
self._checkAccess('get', self._doc.documentElement, token)
return self._doc.documentElement | def getDocument(self, token):
with self.readlock():
self._checkAccess('get', self._doc.documentElement, token)
return self._doc.documentElement<|docstring|>Return the whole document (as a DOM element)<|endoftext|> |
ce8be46d959902e8cc3a9df487d5057ed4a7bc3cdc3168c9ee2dd36ed628be93 | def xmlFromElement(self, element, stripHidden=False):
'Return XML representation, possibly after stripping namespaced elements and attributes'
with self.readlock():
if (not stripHidden):
return element.toxml()
if element.namespaceURI:
return ''
def _hasNS(e):
'Helper method to check whether anything is namespaced in the subtree'
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling
if (not _hasNS(element)):
return element.toxml()
copied = element.cloneNode(True)
def _stripNS(e):
'Helper method to strip all namespaced items from a subtree'
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c)
_stripNS(copied)
rv = copied.toxml()
copied.unlink()
return rv | Return XML representation, possibly after stripping namespaced elements and attributes | igor/xmlDatabase.py | xmlFromElement | cwi-dis/igor | 5 | python | def xmlFromElement(self, element, stripHidden=False):
with self.readlock():
if (not stripHidden):
return element.toxml()
if element.namespaceURI:
return
def _hasNS(e):
'Helper method to check whether anything is namespaced in the subtree'
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling
if (not _hasNS(element)):
return element.toxml()
copied = element.cloneNode(True)
def _stripNS(e):
'Helper method to strip all namespaced items from a subtree'
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c)
_stripNS(copied)
rv = copied.toxml()
copied.unlink()
return rv | def xmlFromElement(self, element, stripHidden=False):
with self.readlock():
if (not stripHidden):
return element.toxml()
if element.namespaceURI:
return
def _hasNS(e):
'Helper method to check whether anything is namespaced in the subtree'
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling
if (not _hasNS(element)):
return element.toxml()
copied = element.cloneNode(True)
def _stripNS(e):
'Helper method to strip all namespaced items from a subtree'
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c)
_stripNS(copied)
rv = copied.toxml()
copied.unlink()
return rv<|docstring|>Return XML representation, possibly after stripping namespaced elements and attributes<|endoftext|> |
10075e473fd888a3a133ce5e26217ef499c7c706e249aaaca12c223f5d7ca5e0 | def delValues(self, location, token, context=None, namespaces=NAMESPACES):
'Remove a (possibly empty) set of nodes from the document'
with self.writelock():
self.currentGeneration += 1
if (context == None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, namespaces=namespaces)
for n in nodeList:
self._checkAccess('delete', n, token)
parentList = []
for node in nodeList:
parentNode = node.parentNode
parentNode.removeChild(node)
if (not (parentNode in parentList)):
parentList += nodeSet(parentNode)
callbacks = self._signalNodelist(parentList)
self._runSignalCallbacks(callbacks) | Remove a (possibly empty) set of nodes from the document | igor/xmlDatabase.py | delValues | cwi-dis/igor | 5 | python | def delValues(self, location, token, context=None, namespaces=NAMESPACES):
with self.writelock():
self.currentGeneration += 1
if (context == None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, namespaces=namespaces)
for n in nodeList:
self._checkAccess('delete', n, token)
parentList = []
for node in nodeList:
parentNode = node.parentNode
parentNode.removeChild(node)
if (not (parentNode in parentList)):
parentList += nodeSet(parentNode)
callbacks = self._signalNodelist(parentList)
self._runSignalCallbacks(callbacks) | def delValues(self, location, token, context=None, namespaces=NAMESPACES):
with self.writelock():
self.currentGeneration += 1
if (context == None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, namespaces=namespaces)
for n in nodeList:
self._checkAccess('delete', n, token)
parentList = []
for node in nodeList:
parentNode = node.parentNode
parentNode.removeChild(node)
if (not (parentNode in parentList)):
parentList += nodeSet(parentNode)
callbacks = self._signalNodelist(parentList)
self._runSignalCallbacks(callbacks)<|docstring|>Remove a (possibly empty) set of nodes from the document<|endoftext|> |
a0bdcb7c1fb7d9962af45b25248fa789b71fbe8ce05974d0b015c4ff698030ae | def getValue(self, location, token, context=None, namespaces=NAMESPACES):
'Return a single value from the document (as string)'
with self.readlock():
if (context is None):
context = self._doc.documentElement
result = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
if xpath.expr.nodesetp(result):
for n in result:
self._checkAccess('get', n, token)
return xpath.expr.string(result)
return result | Return a single value from the document (as string) | igor/xmlDatabase.py | getValue | cwi-dis/igor | 5 | python | def getValue(self, location, token, context=None, namespaces=NAMESPACES):
with self.readlock():
if (context is None):
context = self._doc.documentElement
result = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
if xpath.expr.nodesetp(result):
for n in result:
self._checkAccess('get', n, token)
return xpath.expr.string(result)
return result | def getValue(self, location, token, context=None, namespaces=NAMESPACES):
with self.readlock():
if (context is None):
context = self._doc.documentElement
result = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
if xpath.expr.nodesetp(result):
for n in result:
self._checkAccess('get', n, token)
return xpath.expr.string(result)
return result<|docstring|>Return a single value from the document (as string)<|endoftext|> |
03b628819ce0340d1efbab062f48a6f3cedaffe38a45defd55bac082a768e674 | def getValues(self, location, token, context=None, namespaces=NAMESPACES):
'Return a list of node values from the document (as names and strings)'
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess('get', n, token)
return self._getValueList(nodeList) | Return a list of node values from the document (as names and strings) | igor/xmlDatabase.py | getValues | cwi-dis/igor | 5 | python | def getValues(self, location, token, context=None, namespaces=NAMESPACES):
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess('get', n, token)
return self._getValueList(nodeList) | def getValues(self, location, token, context=None, namespaces=NAMESPACES):
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess('get', n, token)
return self._getValueList(nodeList)<|docstring|>Return a list of node values from the document (as names and strings)<|endoftext|> |
c065ad29efc49db5fa4f184e6bb6ac6422c744619692a8fffab56818850b35a9 | def getElements(self, location, operation, token, context=None, namespaces=NAMESPACES, postChild=None):
'Return a list of DOM nodes (elements only, for now) that match the location'
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess(operation, n, token, postChild)
return nodeList | Return a list of DOM nodes (elements only, for now) that match the location | igor/xmlDatabase.py | getElements | cwi-dis/igor | 5 | python | def getElements(self, location, operation, token, context=None, namespaces=NAMESPACES, postChild=None):
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess(operation, n, token, postChild)
return nodeList | def getElements(self, location, operation, token, context=None, namespaces=NAMESPACES, postChild=None):
with self.readlock():
if (context is None):
context = self._doc.documentElement
nodeList = xpath.find(location, context, originalContext=[context], namespaces=namespaces)
for n in nodeList:
self._checkAccess(operation, n, token, postChild)
return nodeList<|docstring|>Return a list of DOM nodes (elements only, for now) that match the location<|endoftext|> |
f327f7e208c5376b652037fb31f941c3bcb41a4148607a7615310e4897358d4f | def addElement(self, parentPath, tag, element, token, context=None, namespaces=NAMESPACES):
'Add a new element to the database.'
callbacks = None
with self.writelock():
self.currentGeneration += 1
if (context is None):
context = self._doc.documentElement
parentElements = xpath.find(parentPath, context, namespaces=namespaces)
if (len(parentElements) == 1):
parent = parentElements[0]
elif (len(parentElements) > 1):
raise DBParamError(('Multiple parents: %s' % parentPath))
elif True:
parent = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
else:
raise DBParamError(('Parent not found: %s' % parentPath))
self._checkAccess('post', parent, token, tag)
parent.appendChild(element)
nodesToSignal = recursiveNodeSet(element)
nodesToSignal += nodeSet(parent)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks) | Add a new element to the database. | igor/xmlDatabase.py | addElement | cwi-dis/igor | 5 | python | def addElement(self, parentPath, tag, element, token, context=None, namespaces=NAMESPACES):
callbacks = None
with self.writelock():
self.currentGeneration += 1
if (context is None):
context = self._doc.documentElement
parentElements = xpath.find(parentPath, context, namespaces=namespaces)
if (len(parentElements) == 1):
parent = parentElements[0]
elif (len(parentElements) > 1):
raise DBParamError(('Multiple parents: %s' % parentPath))
elif True:
parent = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
else:
raise DBParamError(('Parent not found: %s' % parentPath))
self._checkAccess('post', parent, token, tag)
parent.appendChild(element)
nodesToSignal = recursiveNodeSet(element)
nodesToSignal += nodeSet(parent)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks) | def addElement(self, parentPath, tag, element, token, context=None, namespaces=NAMESPACES):
callbacks = None
with self.writelock():
self.currentGeneration += 1
if (context is None):
context = self._doc.documentElement
parentElements = xpath.find(parentPath, context, namespaces=namespaces)
if (len(parentElements) == 1):
parent = parentElements[0]
elif (len(parentElements) > 1):
raise DBParamError(('Multiple parents: %s' % parentPath))
elif True:
parent = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
else:
raise DBParamError(('Parent not found: %s' % parentPath))
self._checkAccess('post', parent, token, tag)
parent.appendChild(element)
nodesToSignal = recursiveNodeSet(element)
nodesToSignal += nodeSet(parent)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks)<|docstring|>Add a new element to the database.<|endoftext|> |
a9a8dd4474a5b7a9df71fd2d92aeca62d273157021040fa68341035ced69542f | def _ensureElement(self, path, token, context=None, namespaces=NAMESPACES):
"Create an element, if it doesn't exist yet"
elements = xpath.find(path, context, namespaces=namespaces)
if (len(elements) == 1):
return elements[0]
if (len(elements) > 1):
raise DBParamError('Multiple patches for path: {path}')
(parentPath, tag) = self.splitXPath(path, stripPredicate=True)
parentElement = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
element = self._elementFromTagAndData(tag, None)
parentElement.appendChild(element)
return element | Create an element, if it doesn't exist yet | igor/xmlDatabase.py | _ensureElement | cwi-dis/igor | 5 | python | def _ensureElement(self, path, token, context=None, namespaces=NAMESPACES):
elements = xpath.find(path, context, namespaces=namespaces)
if (len(elements) == 1):
return elements[0]
if (len(elements) > 1):
raise DBParamError('Multiple patches for path: {path}')
(parentPath, tag) = self.splitXPath(path, stripPredicate=True)
parentElement = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
element = self._elementFromTagAndData(tag, None)
parentElement.appendChild(element)
return element | def _ensureElement(self, path, token, context=None, namespaces=NAMESPACES):
elements = xpath.find(path, context, namespaces=namespaces)
if (len(elements) == 1):
return elements[0]
if (len(elements) > 1):
raise DBParamError('Multiple patches for path: {path}')
(parentPath, tag) = self.splitXPath(path, stripPredicate=True)
parentElement = self._ensureElement(parentPath, token, context=context, namespaces=namespaces)
element = self._elementFromTagAndData(tag, None)
parentElement.appendChild(element)
return element<|docstring|>Create an element, if it doesn't exist yet<|endoftext|> |
85d806688ba7f5597346df4c143e037dfc2af4e2f5d0e02e5d766414472102a8 | def replaceElement(self, oldElement, newElement, token, context=None, namespaces=NAMESPACES):
'Replace an existing element in the database. Returns True if nothing changed'
callbacks = None
with self.writelock():
self.currentGeneration += 1
self._checkAccess('put', oldElement, token)
if self._identicalSubTrees(oldElement, newElement):
return True
parent = oldElement.parentNode
parent.replaceChild(newElement, oldElement)
nodesToSignal = recursiveNodeSet(newElement)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks)
return False | Replace an existing element in the database. Returns True if nothing changed | igor/xmlDatabase.py | replaceElement | cwi-dis/igor | 5 | python | def replaceElement(self, oldElement, newElement, token, context=None, namespaces=NAMESPACES):
callbacks = None
with self.writelock():
self.currentGeneration += 1
self._checkAccess('put', oldElement, token)
if self._identicalSubTrees(oldElement, newElement):
return True
parent = oldElement.parentNode
parent.replaceChild(newElement, oldElement)
nodesToSignal = recursiveNodeSet(newElement)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks)
return False | def replaceElement(self, oldElement, newElement, token, context=None, namespaces=NAMESPACES):
callbacks = None
with self.writelock():
self.currentGeneration += 1
self._checkAccess('put', oldElement, token)
if self._identicalSubTrees(oldElement, newElement):
return True
parent = oldElement.parentNode
parent.replaceChild(newElement, oldElement)
nodesToSignal = recursiveNodeSet(newElement)
callbacks = self._signalNodelist(nodesToSignal)
if callbacks:
self._runSignalCallbacks(callbacks)
return False<|docstring|>Replace an existing element in the database. Returns True if nothing changed<|endoftext|> |
ad4856206e042d609237a10bf0d17ddbaf8a39810fae6d1732eba529833f89ff | def _hasNS(e):
'Helper method to check whether anything is namespaced in the subtree'
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling | Helper method to check whether anything is namespaced in the subtree | igor/xmlDatabase.py | _hasNS | cwi-dis/igor | 5 | python | def _hasNS(e):
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling | def _hasNS(e):
if e.namespaceURI:
return True
if e.attributes:
for a in e.attributes.values():
if a.namespaceURI:
return True
c = e.firstChild
while c:
if c.namespaceURI:
return True
if _hasNS(c):
return True
c = c.nextSibling<|docstring|>Helper method to check whether anything is namespaced in the subtree<|endoftext|> |
aa329a2ca361302e8307dcf1b9e22cdb73b616da2fc3ba58bdf48bcb01649577 | def _stripNS(e):
'Helper method to strip all namespaced items from a subtree'
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c) | Helper method to strip all namespaced items from a subtree | igor/xmlDatabase.py | _stripNS | cwi-dis/igor | 5 | python | def _stripNS(e):
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c) | def _stripNS(e):
assert (not e.namespaceURI)
if e.attributes:
toRemoveAttrs = []
for av in e.attributes.values():
if av.namespaceURI:
toRemoveAttrs.append(av)
for av in toRemoveAttrs:
e.removeAttributeNode(av)
toRemove = []
for c in e.childNodes:
if c.namespaceURI:
toRemove.append(c)
for c in toRemove:
e.removeChild(c)
for c in e.childNodes:
_stripNS(c)<|docstring|>Helper method to strip all namespaced items from a subtree<|endoftext|> |
d739303c1d211d9bd40d1250e2db49a130a2a32dcaee52469047f81373d6ea41 | def parse_summary(self, fn):
'\n @param fn: whatif pdbout.txt file to parse\n @type fn: str\n\n @return: A dict containing some of the WhatCheck results\n @rtype: a dict\n '
f_handler = open(os.path.expanduser(fn))
text = f_handler.read()
info = dict()
re_ramachandran = re.compile('Ramachandran\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_1st = re.compile('1st\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_2nd = re.compile('2nd\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_backbone = re.compile('Backbone\\s*conformation\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_rotamer = re.compile('chi-1\\S*chi-2\\s*rotamer\\s*normality\\s*:\\s*([0-9.Ee-]+)')
info['rama_z_score'] = float(re_ramachandran.search(text).groups(0)[0])
info['bb_z_score'] = float(re_backbone.search(text).groups(0)[0])
info['1st_packing_z_score'] = float(re_1st.search(text).groups(0)[0])
info['2nd_packing_z_score'] = float(re_2nd.search(text).groups(0)[0])
info['rotamer_score'] = float(re_rotamer.search(text).groups(0)[0])
f_handler.close()
return info | @param fn: whatif pdbout.txt file to parse
@type fn: str
@return: A dict containing some of the WhatCheck results
@rtype: a dict | csb/bio/io/whatif.py | parse_summary | santosh653/CSB | 10 | python | def parse_summary(self, fn):
'\n @param fn: whatif pdbout.txt file to parse\n @type fn: str\n\n @return: A dict containing some of the WhatCheck results\n @rtype: a dict\n '
f_handler = open(os.path.expanduser(fn))
text = f_handler.read()
info = dict()
re_ramachandran = re.compile('Ramachandran\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_1st = re.compile('1st\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_2nd = re.compile('2nd\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_backbone = re.compile('Backbone\\s*conformation\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_rotamer = re.compile('chi-1\\S*chi-2\\s*rotamer\\s*normality\\s*:\\s*([0-9.Ee-]+)')
info['rama_z_score'] = float(re_ramachandran.search(text).groups(0)[0])
info['bb_z_score'] = float(re_backbone.search(text).groups(0)[0])
info['1st_packing_z_score'] = float(re_1st.search(text).groups(0)[0])
info['2nd_packing_z_score'] = float(re_2nd.search(text).groups(0)[0])
info['rotamer_score'] = float(re_rotamer.search(text).groups(0)[0])
f_handler.close()
return info | def parse_summary(self, fn):
'\n @param fn: whatif pdbout.txt file to parse\n @type fn: str\n\n @return: A dict containing some of the WhatCheck results\n @rtype: a dict\n '
f_handler = open(os.path.expanduser(fn))
text = f_handler.read()
info = dict()
re_ramachandran = re.compile('Ramachandran\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_1st = re.compile('1st\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_2nd = re.compile('2nd\\s*generation\\s*packing\\s*quality\\s*:\\s*([0-9.Ee-]+)')
re_backbone = re.compile('Backbone\\s*conformation\\s*Z-score\\s*:\\s*([0-9.Ee-]+)')
re_rotamer = re.compile('chi-1\\S*chi-2\\s*rotamer\\s*normality\\s*:\\s*([0-9.Ee-]+)')
info['rama_z_score'] = float(re_ramachandran.search(text).groups(0)[0])
info['bb_z_score'] = float(re_backbone.search(text).groups(0)[0])
info['1st_packing_z_score'] = float(re_1st.search(text).groups(0)[0])
info['2nd_packing_z_score'] = float(re_2nd.search(text).groups(0)[0])
info['rotamer_score'] = float(re_rotamer.search(text).groups(0)[0])
f_handler.close()
return info<|docstring|>@param fn: whatif pdbout.txt file to parse
@type fn: str
@return: A dict containing some of the WhatCheck results
@rtype: a dict<|endoftext|> |
b7a1bd87a98cd7539f84d761cab3e7a8775e2601485f97d197f5fa980a349b95 | def run(self, pdb_file):
'\n Runs WhatCheck for the given pdbfile and parses the output.\n Will fail if the WhatCheck binary is not in the path.\n \n @param pdb_file: file to parse\n @return: dict of parsed values\n '
wd = os.getcwd()
base = os.path.basename(pdb_file)
with TempFolder() as tmp:
shutil.copy(os.path.expanduser(pdb_file), tmp.name)
os.chdir(tmp.name)
Shell.run('{0} {1}'.format(self.binary, os.path.join(tmp.name, base)))
out = self.parse_summary(os.path.join(tmp.name, 'pdbout.txt'))
os.chdir(wd)
return out | Runs WhatCheck for the given pdbfile and parses the output.
Will fail if the WhatCheck binary is not in the path.
@param pdb_file: file to parse
@return: dict of parsed values | csb/bio/io/whatif.py | run | santosh653/CSB | 10 | python | def run(self, pdb_file):
'\n Runs WhatCheck for the given pdbfile and parses the output.\n Will fail if the WhatCheck binary is not in the path.\n \n @param pdb_file: file to parse\n @return: dict of parsed values\n '
wd = os.getcwd()
base = os.path.basename(pdb_file)
with TempFolder() as tmp:
shutil.copy(os.path.expanduser(pdb_file), tmp.name)
os.chdir(tmp.name)
Shell.run('{0} {1}'.format(self.binary, os.path.join(tmp.name, base)))
out = self.parse_summary(os.path.join(tmp.name, 'pdbout.txt'))
os.chdir(wd)
return out | def run(self, pdb_file):
'\n Runs WhatCheck for the given pdbfile and parses the output.\n Will fail if the WhatCheck binary is not in the path.\n \n @param pdb_file: file to parse\n @return: dict of parsed values\n '
wd = os.getcwd()
base = os.path.basename(pdb_file)
with TempFolder() as tmp:
shutil.copy(os.path.expanduser(pdb_file), tmp.name)
os.chdir(tmp.name)
Shell.run('{0} {1}'.format(self.binary, os.path.join(tmp.name, base)))
out = self.parse_summary(os.path.join(tmp.name, 'pdbout.txt'))
os.chdir(wd)
return out<|docstring|>Runs WhatCheck for the given pdbfile and parses the output.
Will fail if the WhatCheck binary is not in the path.
@param pdb_file: file to parse
@return: dict of parsed values<|endoftext|> |
0ea56c6125df82dd0e7b2eb4015fd116d660c5d213d9e66443ea32349eaeaef1 | def save(tensor, name='noise.png'):
'\n Save an image Tensor to a file.\n\n :param Tensor tensor: Image tensor\n :param str name: Filename, ending with .png or .jpg\n :return: None\n '
tensor = tf.image.convert_image_dtype(tensor, tf.uint8, saturate=True)
if name.lower().endswith('.png'):
data = tf.image.encode_png(tensor).numpy()
elif name.lower().endswith(('.jpg', '.jpeg')):
data = tf.image.encode_jpeg(tensor).numpy()
else:
raise ValueError('Filename should end with .png or .jpg')
with open(name, 'wb') as fh:
fh.write(data) | Save an image Tensor to a file.
:param Tensor tensor: Image tensor
:param str name: Filename, ending with .png or .jpg
:return: None | noisemaker/util.py | save | BumpierZulu9930/py-noisemaker | 0 | python | def save(tensor, name='noise.png'):
'\n Save an image Tensor to a file.\n\n :param Tensor tensor: Image tensor\n :param str name: Filename, ending with .png or .jpg\n :return: None\n '
tensor = tf.image.convert_image_dtype(tensor, tf.uint8, saturate=True)
if name.lower().endswith('.png'):
data = tf.image.encode_png(tensor).numpy()
elif name.lower().endswith(('.jpg', '.jpeg')):
data = tf.image.encode_jpeg(tensor).numpy()
else:
raise ValueError('Filename should end with .png or .jpg')
with open(name, 'wb') as fh:
fh.write(data) | def save(tensor, name='noise.png'):
'\n Save an image Tensor to a file.\n\n :param Tensor tensor: Image tensor\n :param str name: Filename, ending with .png or .jpg\n :return: None\n '
tensor = tf.image.convert_image_dtype(tensor, tf.uint8, saturate=True)
if name.lower().endswith('.png'):
data = tf.image.encode_png(tensor).numpy()
elif name.lower().endswith(('.jpg', '.jpeg')):
data = tf.image.encode_jpeg(tensor).numpy()
else:
raise ValueError('Filename should end with .png or .jpg')
with open(name, 'wb') as fh:
fh.write(data)<|docstring|>Save an image Tensor to a file.
:param Tensor tensor: Image tensor
:param str name: Filename, ending with .png or .jpg
:return: None<|endoftext|> |
ec7b00080f2620972e1d9089db26d348382dfe7b75e2e87cbf6f6bc5bea2642c | def load(filename, channels=None):
'\n Load a .png or .jpg by filename.\n\n :param str filename:\n :return: Tensor\n '
with open(filename, 'rb') as fh:
if filename.lower().endswith('.png'):
return tf.image.decode_png(fh.read(), channels=channels)
elif filename.lower().endswith(('.jpg', '.jpeg')):
return tf.image.decode_jpeg(fh.read(), channels=channels) | Load a .png or .jpg by filename.
:param str filename:
:return: Tensor | noisemaker/util.py | load | BumpierZulu9930/py-noisemaker | 0 | python | def load(filename, channels=None):
'\n Load a .png or .jpg by filename.\n\n :param str filename:\n :return: Tensor\n '
with open(filename, 'rb') as fh:
if filename.lower().endswith('.png'):
return tf.image.decode_png(fh.read(), channels=channels)
elif filename.lower().endswith(('.jpg', '.jpeg')):
return tf.image.decode_jpeg(fh.read(), channels=channels) | def load(filename, channels=None):
'\n Load a .png or .jpg by filename.\n\n :param str filename:\n :return: Tensor\n '
with open(filename, 'rb') as fh:
if filename.lower().endswith('.png'):
return tf.image.decode_png(fh.read(), channels=channels)
elif filename.lower().endswith(('.jpg', '.jpeg')):
return tf.image.decode_jpeg(fh.read(), channels=channels)<|docstring|>Load a .png or .jpg by filename.
:param str filename:
:return: Tensor<|endoftext|> |
a9a554c485dce4ebc3cba7c6d20d3d35b46201701b0f4fbf053fcb09e358ad74 | def sitemap_urls_from_robots(robots_text):
'Return an iterator over all sitemap urls contained in the given\n robots.txt file\n '
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
(yield line.split(':', 1)[1].strip()) | Return an iterator over all sitemap urls contained in the given
robots.txt file | scrapy/utils/sitemap.py | sitemap_urls_from_robots | emschorsch/scrapy | 1 | python | def sitemap_urls_from_robots(robots_text):
'Return an iterator over all sitemap urls contained in the given\n robots.txt file\n '
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
(yield line.split(':', 1)[1].strip()) | def sitemap_urls_from_robots(robots_text):
'Return an iterator over all sitemap urls contained in the given\n robots.txt file\n '
for line in robots_text.splitlines():
if line.lstrip().startswith('Sitemap:'):
(yield line.split(':', 1)[1].strip())<|docstring|>Return an iterator over all sitemap urls contained in the given
robots.txt file<|endoftext|> |
1fb64c686ba1e164bbaf31ffc886303d615202d8679150a77161b04f3adba789 | def count_multiclass_num(class_list):
'Count the number of different types of elements'
assert isinstance(class_list, list)
dict_class = dict(Counter(class_list))
multiclass_num = list(OrderedDict(sorted(dict_class.items())).values())
return multiclass_num | Count the number of different types of elements | utils.py | count_multiclass_num | JACKYLUO1991/FaceAttribute | 1 | python | def count_multiclass_num(class_list):
assert isinstance(class_list, list)
dict_class = dict(Counter(class_list))
multiclass_num = list(OrderedDict(sorted(dict_class.items())).values())
return multiclass_num | def count_multiclass_num(class_list):
assert isinstance(class_list, list)
dict_class = dict(Counter(class_list))
multiclass_num = list(OrderedDict(sorted(dict_class.items())).values())
return multiclass_num<|docstring|>Count the number of different types of elements<|endoftext|> |
0861c25a41b3e46a2277b21a9d60b92753b3ee48189fc06f5eaf5809b3c06533 | def circle_touching_line(center, radius, start, end):
' Return true if the given circle intersects the given segment. Note \n that this checks for intersection with a line segment, and not an actual \n line.\n\n :param center: Center of the circle.\n :type center: Vector\n :param radius: Radius of the circle.\n :type radius: float\n :param start: The first end of the line segment.\n :type start: Vector\n :param end: The second end of the line segment.\n :type end: Vector\n '
(C, R) = (center, radius)
(A, B) = (start, end)
a = (((B.x - A.x) ** 2) + ((B.y - A.y) ** 2))
b = (((2 * (B.x - A.x)) * (A.x - C.x)) + ((2 * (B.y - A.y)) * (A.y - C.y)))
c = ((((((C.x ** 2) + (C.y ** 2)) + (A.x ** 2)) + (A.y ** 2)) - (2 * ((C.x * A.x) + (C.y * A.y)))) - (R ** 2))
discriminant = ((b ** 2) - ((4 * a) * c))
if (discriminant < 0):
return False
elif (discriminant == 0):
u = v = ((- b) / float((2 * a)))
else:
u = (((- b) + math.sqrt(discriminant)) / float((2 * a)))
v = (((- b) - math.sqrt(discriminant)) / float((2 * a)))
if ((u < 0) and (v < 0)):
return False
if ((u > 1) and (v > 1)):
return False
return True | Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector | vecrec/collisions.py | circle_touching_line | kxgames/vecrec | 0 | python | def circle_touching_line(center, radius, start, end):
' Return true if the given circle intersects the given segment. Note \n that this checks for intersection with a line segment, and not an actual \n line.\n\n :param center: Center of the circle.\n :type center: Vector\n :param radius: Radius of the circle.\n :type radius: float\n :param start: The first end of the line segment.\n :type start: Vector\n :param end: The second end of the line segment.\n :type end: Vector\n '
(C, R) = (center, radius)
(A, B) = (start, end)
a = (((B.x - A.x) ** 2) + ((B.y - A.y) ** 2))
b = (((2 * (B.x - A.x)) * (A.x - C.x)) + ((2 * (B.y - A.y)) * (A.y - C.y)))
c = ((((((C.x ** 2) + (C.y ** 2)) + (A.x ** 2)) + (A.y ** 2)) - (2 * ((C.x * A.x) + (C.y * A.y)))) - (R ** 2))
discriminant = ((b ** 2) - ((4 * a) * c))
if (discriminant < 0):
return False
elif (discriminant == 0):
u = v = ((- b) / float((2 * a)))
else:
u = (((- b) + math.sqrt(discriminant)) / float((2 * a)))
v = (((- b) - math.sqrt(discriminant)) / float((2 * a)))
if ((u < 0) and (v < 0)):
return False
if ((u > 1) and (v > 1)):
return False
return True | def circle_touching_line(center, radius, start, end):
' Return true if the given circle intersects the given segment. Note \n that this checks for intersection with a line segment, and not an actual \n line.\n\n :param center: Center of the circle.\n :type center: Vector\n :param radius: Radius of the circle.\n :type radius: float\n :param start: The first end of the line segment.\n :type start: Vector\n :param end: The second end of the line segment.\n :type end: Vector\n '
(C, R) = (center, radius)
(A, B) = (start, end)
a = (((B.x - A.x) ** 2) + ((B.y - A.y) ** 2))
b = (((2 * (B.x - A.x)) * (A.x - C.x)) + ((2 * (B.y - A.y)) * (A.y - C.y)))
c = ((((((C.x ** 2) + (C.y ** 2)) + (A.x ** 2)) + (A.y ** 2)) - (2 * ((C.x * A.x) + (C.y * A.y)))) - (R ** 2))
discriminant = ((b ** 2) - ((4 * a) * c))
if (discriminant < 0):
return False
elif (discriminant == 0):
u = v = ((- b) / float((2 * a)))
else:
u = (((- b) + math.sqrt(discriminant)) / float((2 * a)))
v = (((- b) - math.sqrt(discriminant)) / float((2 * a)))
if ((u < 0) and (v < 0)):
return False
if ((u > 1) and (v > 1)):
return False
return True<|docstring|>Return true if the given circle intersects the given segment. Note
that this checks for intersection with a line segment, and not an actual
line.
:param center: Center of the circle.
:type center: Vector
:param radius: Radius of the circle.
:type radius: float
:param start: The first end of the line segment.
:type start: Vector
:param end: The second end of the line segment.
:type end: Vector<|endoftext|> |
b9b3d163c3f9041d064135def33c5f0746a9ccdf2d0deda942090d76f1d81a76 | def write_table(self):
"\n |write_table| with Markdown table format.\n\n :raises pytablewriter.EmptyHeaderError: If the |headers| is empty.\n :Example:\n :ref:`example-markdown-table-writer`\n\n .. note::\n - |None| values are written as an empty string\n - Vertical bar characters (``'|'``) in table items are escaped\n "
with self._logger:
self._verify_property()
self.__write_chapter()
self._write_table()
if self.is_write_null_line_after_table:
self.write_null_line() | |write_table| with Markdown table format.
:raises pytablewriter.EmptyHeaderError: If the |headers| is empty.
:Example:
:ref:`example-markdown-table-writer`
.. note::
- |None| values are written as an empty string
- Vertical bar characters (``'|'``) in table items are escaped | pytablewriter/writer/text/_markdown.py | write_table | sundarsrst/pytablewriter | 1 | python | def write_table(self):
"\n |write_table| with Markdown table format.\n\n :raises pytablewriter.EmptyHeaderError: If the |headers| is empty.\n :Example:\n :ref:`example-markdown-table-writer`\n\n .. note::\n - |None| values are written as an empty string\n - Vertical bar characters (``'|'``) in table items are escaped\n "
with self._logger:
self._verify_property()
self.__write_chapter()
self._write_table()
if self.is_write_null_line_after_table:
self.write_null_line() | def write_table(self):
"\n |write_table| with Markdown table format.\n\n :raises pytablewriter.EmptyHeaderError: If the |headers| is empty.\n :Example:\n :ref:`example-markdown-table-writer`\n\n .. note::\n - |None| values are written as an empty string\n - Vertical bar characters (``'|'``) in table items are escaped\n "
with self._logger:
self._verify_property()
self.__write_chapter()
self._write_table()
if self.is_write_null_line_after_table:
self.write_null_line()<|docstring|>|write_table| with Markdown table format.
:raises pytablewriter.EmptyHeaderError: If the |headers| is empty.
:Example:
:ref:`example-markdown-table-writer`
.. note::
- |None| values are written as an empty string
- Vertical bar characters (``'|'``) in table items are escaped<|endoftext|> |
3769141443b1195bbe7e8ec6d5b6cec05b1e9280a9f344b70d29317747a809b9 | def gmm_cov(mom_cond, mom_cond_jacob, weighting_matrix):
'\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n mom_cond_jacob (np.array): 3d array of the moment condition derivatives\n w.r.t. the parameters of dimension (nobs, nmoms, nparams).\n\n weighting_matrix (np.array):\n 2d array weighting matrix for the moments of dimension (nmoms, nmoms)\n\n Returns:\n sandwich (np.array):\n 2d array variance-covariance matrix of the GMM estimator of dimension\n (nparams, nparams)\n\n '
omega = _covariance_moments(mom_cond)
q_hat = np.mean(mom_cond_jacob, axis=0)
return sandwich_cov(q_hat, weighting_matrix, omega, mom_cond.shape[0]) | Args:
mom_cond (np.array): 2d array matrix of the moment conditions of
dimension (nobs, nmoms).
mom_cond_jacob (np.array): 3d array of the moment condition derivatives
w.r.t. the parameters of dimension (nobs, nmoms, nparams).
weighting_matrix (np.array):
2d array weighting matrix for the moments of dimension (nmoms, nmoms)
Returns:
sandwich (np.array):
2d array variance-covariance matrix of the GMM estimator of dimension
(nparams, nparams) | estimagic/inference/moment_covs.py | gmm_cov | RobinMusolff/estimagic | 0 | python | def gmm_cov(mom_cond, mom_cond_jacob, weighting_matrix):
'\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n mom_cond_jacob (np.array): 3d array of the moment condition derivatives\n w.r.t. the parameters of dimension (nobs, nmoms, nparams).\n\n weighting_matrix (np.array):\n 2d array weighting matrix for the moments of dimension (nmoms, nmoms)\n\n Returns:\n sandwich (np.array):\n 2d array variance-covariance matrix of the GMM estimator of dimension\n (nparams, nparams)\n\n '
omega = _covariance_moments(mom_cond)
q_hat = np.mean(mom_cond_jacob, axis=0)
return sandwich_cov(q_hat, weighting_matrix, omega, mom_cond.shape[0]) | def gmm_cov(mom_cond, mom_cond_jacob, weighting_matrix):
'\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n mom_cond_jacob (np.array): 3d array of the moment condition derivatives\n w.r.t. the parameters of dimension (nobs, nmoms, nparams).\n\n weighting_matrix (np.array):\n 2d array weighting matrix for the moments of dimension (nmoms, nmoms)\n\n Returns:\n sandwich (np.array):\n 2d array variance-covariance matrix of the GMM estimator of dimension\n (nparams, nparams)\n\n '
omega = _covariance_moments(mom_cond)
q_hat = np.mean(mom_cond_jacob, axis=0)
return sandwich_cov(q_hat, weighting_matrix, omega, mom_cond.shape[0])<|docstring|>Args:
mom_cond (np.array): 2d array matrix of the moment conditions of
dimension (nobs, nmoms).
mom_cond_jacob (np.array): 3d array of the moment condition derivatives
w.r.t. the parameters of dimension (nobs, nmoms, nparams).
weighting_matrix (np.array):
2d array weighting matrix for the moments of dimension (nmoms, nmoms)
Returns:
sandwich (np.array):
2d array variance-covariance matrix of the GMM estimator of dimension
(nparams, nparams)<|endoftext|> |
534a2f05a0ec0b799a5e436b7214f73057b23adf64a434da92003a119dbd2589 | def _covariance_moments(mom_cond):
'\n Calculate the standard covariance matrix Omega.\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n Returns:\n cov (np.array): 2d array covariance matrix of the moments (nmoms, nmoms)\n\n '
dev = (mom_cond - np.mean(mom_cond, axis=0))
cov = ((dev.T @ dev) / mom_cond.shape[0])
return cov | Calculate the standard covariance matrix Omega.
Args:
mom_cond (np.array): 2d array matrix of the moment conditions of
dimension (nobs, nmoms).
Returns:
cov (np.array): 2d array covariance matrix of the moments (nmoms, nmoms) | estimagic/inference/moment_covs.py | _covariance_moments | RobinMusolff/estimagic | 0 | python | def _covariance_moments(mom_cond):
'\n Calculate the standard covariance matrix Omega.\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n Returns:\n cov (np.array): 2d array covariance matrix of the moments (nmoms, nmoms)\n\n '
dev = (mom_cond - np.mean(mom_cond, axis=0))
cov = ((dev.T @ dev) / mom_cond.shape[0])
return cov | def _covariance_moments(mom_cond):
'\n Calculate the standard covariance matrix Omega.\n\n Args:\n mom_cond (np.array): 2d array matrix of the moment conditions of\n dimension (nobs, nmoms).\n\n Returns:\n cov (np.array): 2d array covariance matrix of the moments (nmoms, nmoms)\n\n '
dev = (mom_cond - np.mean(mom_cond, axis=0))
cov = ((dev.T @ dev) / mom_cond.shape[0])
return cov<|docstring|>Calculate the standard covariance matrix Omega.
Args:
mom_cond (np.array): 2d array matrix of the moment conditions of
dimension (nobs, nmoms).
Returns:
cov (np.array): 2d array covariance matrix of the moments (nmoms, nmoms)<|endoftext|> |
9bd60f3e5cf409c04e1ba7413ff2570e363e167050c72350e23ec861562fa0de | def test_parabolic_shape_force(self):
' test whether the Elastic energy is a quadratic function of the\n applied force'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
force = random(self.res)
force -= force.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
disp = hs.evaluate_disp((i * force))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol)) | test whether the Elastic energy is a quadratic function of the
applied force | test/test_fft_elastic_half_space.py | test_parabolic_shape_force | ComputationalMechanics/ContactMechanics | 7 | python | def test_parabolic_shape_force(self):
' test whether the Elastic energy is a quadratic function of the\n applied force'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
force = random(self.res)
force -= force.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
disp = hs.evaluate_disp((i * force))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol)) | def test_parabolic_shape_force(self):
' test whether the Elastic energy is a quadratic function of the\n applied force'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
force = random(self.res)
force -= force.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
disp = hs.evaluate_disp((i * force))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol))<|docstring|>test whether the Elastic energy is a quadratic function of the
applied force<|endoftext|> |
f4311e5e89c4e056db85e7b060e37d80b2e7711bc7a144f97aa278387070a86e | def test_parabolic_shape_disp(self):
' test whether the Elastic energy is a quadratic function of the\n applied displacement'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
disp = random(self.res)
disp -= disp.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
force = hs.evaluate_force((i * disp))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol)) | test whether the Elastic energy is a quadratic function of the
applied displacement | test/test_fft_elastic_half_space.py | test_parabolic_shape_disp | ComputationalMechanics/ContactMechanics | 7 | python | def test_parabolic_shape_disp(self):
' test whether the Elastic energy is a quadratic function of the\n applied displacement'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
disp = random(self.res)
disp -= disp.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
force = hs.evaluate_force((i * disp))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol)) | def test_parabolic_shape_disp(self):
' test whether the Elastic energy is a quadratic function of the\n applied displacement'
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes)
disp = random(self.res)
disp -= disp.mean()
nb_tests = 4
El = np.zeros(nb_tests)
for i in range(nb_tests):
force = hs.evaluate_force((i * disp))
El[i] = hs.evaluate_elastic_energy((i * force), disp)
tol = 1e-10
error = norm(((El / El[1]) - (np.arange(nb_tests) ** 2)))
self.assertTrue((error < tol))<|docstring|>test whether the Elastic energy is a quadratic function of the
applied displacement<|endoftext|> |
4887ebe375c82ffaa3792cd15f78a1c925d38064d943f2754ac38d1685017f9f | def test_same_energy(self):
'\n Asserts that the energies computed in the real space and in the fourier\n space are the same\n '
for res in [(16, 16), (16, 15), (15, 16), (15, 9)]:
disp = np.random.normal(size=res)
hs = PeriodicFFTElasticHalfSpace(res, self.young, self.physical_sizes)
np.testing.assert_allclose(hs.evaluate(disp, pot=True, forces=True)[0], hs.evaluate(disp, pot=True, forces=False)[0]) | Asserts that the energies computed in the real space and in the fourier
space are the same | test/test_fft_elastic_half_space.py | test_same_energy | ComputationalMechanics/ContactMechanics | 7 | python | def test_same_energy(self):
'\n Asserts that the energies computed in the real space and in the fourier\n space are the same\n '
for res in [(16, 16), (16, 15), (15, 16), (15, 9)]:
disp = np.random.normal(size=res)
hs = PeriodicFFTElasticHalfSpace(res, self.young, self.physical_sizes)
np.testing.assert_allclose(hs.evaluate(disp, pot=True, forces=True)[0], hs.evaluate(disp, pot=True, forces=False)[0]) | def test_same_energy(self):
'\n Asserts that the energies computed in the real space and in the fourier\n space are the same\n '
for res in [(16, 16), (16, 15), (15, 16), (15, 9)]:
disp = np.random.normal(size=res)
hs = PeriodicFFTElasticHalfSpace(res, self.young, self.physical_sizes)
np.testing.assert_allclose(hs.evaluate(disp, pot=True, forces=True)[0], hs.evaluate(disp, pot=True, forces=False)[0])<|docstring|>Asserts that the energies computed in the real space and in the fourier
space are the same<|endoftext|> |
ccff38ab5a9b5c4a70ca9c7a9612dbeb0dfe2c60510e04bbe0039199d8a6e73e | def test_uniform_displacement(self):
' test whether uniform displacement returns stiffness_q0'
sq0 = 1.43
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, stiffness_q0=sq0)
force = hs.evaluate_force((- np.ones(self.res)))
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), sq0) | test whether uniform displacement returns stiffness_q0 | test/test_fft_elastic_half_space.py | test_uniform_displacement | ComputationalMechanics/ContactMechanics | 7 | python | def test_uniform_displacement(self):
' '
sq0 = 1.43
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, stiffness_q0=sq0)
force = hs.evaluate_force((- np.ones(self.res)))
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), sq0) | def test_uniform_displacement(self):
' '
sq0 = 1.43
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, stiffness_q0=sq0)
force = hs.evaluate_force((- np.ones(self.res)))
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), sq0)<|docstring|>test whether uniform displacement returns stiffness_q0<|endoftext|> |
43ac18a8e40550486bf7bd06fa84a335ff51b0cd2c38a45435dc385dddcf2c0f | def test_uniform_displacement_finite_height(self):
' test whether uniform displacement returns stiffness_q0'
h0 = 3.45
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, thickness=h0, poisson=self.poisson)
force = hs.evaluate_force((- np.ones(self.res)))
M = (((1 - self.poisson) / ((1 - (2 * self.poisson)) * (1 + self.poisson))) * self.young)
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), (M / h0)) | test whether uniform displacement returns stiffness_q0 | test/test_fft_elastic_half_space.py | test_uniform_displacement_finite_height | ComputationalMechanics/ContactMechanics | 7 | python | def test_uniform_displacement_finite_height(self):
' '
h0 = 3.45
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, thickness=h0, poisson=self.poisson)
force = hs.evaluate_force((- np.ones(self.res)))
M = (((1 - self.poisson) / ((1 - (2 * self.poisson)) * (1 + self.poisson))) * self.young)
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), (M / h0)) | def test_uniform_displacement_finite_height(self):
' '
h0 = 3.45
hs = PeriodicFFTElasticHalfSpace(self.res, self.young, self.physical_sizes, thickness=h0, poisson=self.poisson)
force = hs.evaluate_force((- np.ones(self.res)))
M = (((1 - self.poisson) / ((1 - (2 * self.poisson)) * (1 + self.poisson))) * self.young)
self.assertAlmostEqual((force.sum() / np.prod(self.physical_sizes)), (M / h0))<|docstring|>test whether uniform displacement returns stiffness_q0<|endoftext|> |
85c40c49e22a59d998cda10aa4cc6a5e12d5df1a944870aa3e65cdf145101966 | def save_original_and_compressed_versions_of_image(filename, entity_type, entity_id, original_image_content, filename_prefix, image_is_compressible):
'Saves the three versions of the image file.\n\n Args:\n filename: str. The name of the image file.\n entity_type: str. The type of the entity.\n entity_id: str. The id of the entity.\n original_image_content: str. The content of the original image.\n filename_prefix: str. The string to prefix to the filename.\n image_is_compressible: bool. Whether the image can be compressed or\n not.\n '
filepath = ('%s/%s' % (filename_prefix, filename))
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
compressed_image_filepath = ('%s/%s' % (filename_prefix, compressed_image_filename))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
micro_image_filepath = ('%s/%s' % (filename_prefix, micro_image_filename))
fs = GcsFileSystem(entity_type, entity_id)
if image_is_compressible:
compressed_image_content = image_services.compress_image(original_image_content, 0.8)
micro_image_content = image_services.compress_image(original_image_content, 0.7)
else:
compressed_image_content = original_image_content
micro_image_content = original_image_content
mimetype = ('image/svg+xml' if (filetype == 'svg') else ('image/%s' % filetype))
if (not fs.isfile(filepath)):
fs.commit(filepath, original_image_content, mimetype=mimetype)
if (not fs.isfile(compressed_image_filepath)):
fs.commit(compressed_image_filepath, compressed_image_content, mimetype=mimetype)
if (not fs.isfile(micro_image_filepath)):
fs.commit(micro_image_filepath, micro_image_content, mimetype=mimetype) | Saves the three versions of the image file.
Args:
filename: str. The name of the image file.
entity_type: str. The type of the entity.
entity_id: str. The id of the entity.
original_image_content: str. The content of the original image.
filename_prefix: str. The string to prefix to the filename.
image_is_compressible: bool. Whether the image can be compressed or
not. | core/domain/fs_services.py | save_original_and_compressed_versions_of_image | AyushJ7/oppia | 1 | python | def save_original_and_compressed_versions_of_image(filename, entity_type, entity_id, original_image_content, filename_prefix, image_is_compressible):
'Saves the three versions of the image file.\n\n Args:\n filename: str. The name of the image file.\n entity_type: str. The type of the entity.\n entity_id: str. The id of the entity.\n original_image_content: str. The content of the original image.\n filename_prefix: str. The string to prefix to the filename.\n image_is_compressible: bool. Whether the image can be compressed or\n not.\n '
filepath = ('%s/%s' % (filename_prefix, filename))
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
compressed_image_filepath = ('%s/%s' % (filename_prefix, compressed_image_filename))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
micro_image_filepath = ('%s/%s' % (filename_prefix, micro_image_filename))
fs = GcsFileSystem(entity_type, entity_id)
if image_is_compressible:
compressed_image_content = image_services.compress_image(original_image_content, 0.8)
micro_image_content = image_services.compress_image(original_image_content, 0.7)
else:
compressed_image_content = original_image_content
micro_image_content = original_image_content
mimetype = ('image/svg+xml' if (filetype == 'svg') else ('image/%s' % filetype))
if (not fs.isfile(filepath)):
fs.commit(filepath, original_image_content, mimetype=mimetype)
if (not fs.isfile(compressed_image_filepath)):
fs.commit(compressed_image_filepath, compressed_image_content, mimetype=mimetype)
if (not fs.isfile(micro_image_filepath)):
fs.commit(micro_image_filepath, micro_image_content, mimetype=mimetype) | def save_original_and_compressed_versions_of_image(filename, entity_type, entity_id, original_image_content, filename_prefix, image_is_compressible):
'Saves the three versions of the image file.\n\n Args:\n filename: str. The name of the image file.\n entity_type: str. The type of the entity.\n entity_id: str. The id of the entity.\n original_image_content: str. The content of the original image.\n filename_prefix: str. The string to prefix to the filename.\n image_is_compressible: bool. Whether the image can be compressed or\n not.\n '
filepath = ('%s/%s' % (filename_prefix, filename))
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
compressed_image_filepath = ('%s/%s' % (filename_prefix, compressed_image_filename))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
micro_image_filepath = ('%s/%s' % (filename_prefix, micro_image_filename))
fs = GcsFileSystem(entity_type, entity_id)
if image_is_compressible:
compressed_image_content = image_services.compress_image(original_image_content, 0.8)
micro_image_content = image_services.compress_image(original_image_content, 0.7)
else:
compressed_image_content = original_image_content
micro_image_content = original_image_content
mimetype = ('image/svg+xml' if (filetype == 'svg') else ('image/%s' % filetype))
if (not fs.isfile(filepath)):
fs.commit(filepath, original_image_content, mimetype=mimetype)
if (not fs.isfile(compressed_image_filepath)):
fs.commit(compressed_image_filepath, compressed_image_content, mimetype=mimetype)
if (not fs.isfile(micro_image_filepath)):
fs.commit(micro_image_filepath, micro_image_content, mimetype=mimetype)<|docstring|>Saves the three versions of the image file.
Args:
filename: str. The name of the image file.
entity_type: str. The type of the entity.
entity_id: str. The id of the entity.
original_image_content: str. The content of the original image.
filename_prefix: str. The string to prefix to the filename.
image_is_compressible: bool. Whether the image can be compressed or
not.<|endoftext|> |
e604b1e17a2f2d2ce14952c9a3df6983ad348ee3af0aa5bc1e51dec4eeda921c | def save_classifier_data(exp_id, job_id, classifier_data_proto):
'Store classifier model data in a file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n classifier_data_proto: Object. Protobuf object of the classifier data\n to be stored.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
content = utils.compress_to_zlib(classifier_data_proto.SerializeToString())
fs.commit(filepath, content, mimetype='application/octet-stream') | Store classifier model data in a file.
Args:
exp_id: str. The id of the exploration.
job_id: str. The id of the classifier training job model.
classifier_data_proto: Object. Protobuf object of the classifier data
to be stored. | core/domain/fs_services.py | save_classifier_data | AyushJ7/oppia | 1 | python | def save_classifier_data(exp_id, job_id, classifier_data_proto):
'Store classifier model data in a file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n classifier_data_proto: Object. Protobuf object of the classifier data\n to be stored.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
content = utils.compress_to_zlib(classifier_data_proto.SerializeToString())
fs.commit(filepath, content, mimetype='application/octet-stream') | def save_classifier_data(exp_id, job_id, classifier_data_proto):
'Store classifier model data in a file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n classifier_data_proto: Object. Protobuf object of the classifier data\n to be stored.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
content = utils.compress_to_zlib(classifier_data_proto.SerializeToString())
fs.commit(filepath, content, mimetype='application/octet-stream')<|docstring|>Store classifier model data in a file.
Args:
exp_id: str. The id of the exploration.
job_id: str. The id of the classifier training job model.
classifier_data_proto: Object. Protobuf object of the classifier data
to be stored.<|endoftext|> |
08080633c8a5d1f72cdc07051b25960e05f263b2158f940aae0e7f8ea291eb46 | def delete_classifier_data(exp_id, job_id):
'Delete the classifier data from file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
if fs.isfile(filepath):
fs.delete(filepath) | Delete the classifier data from file.
Args:
exp_id: str. The id of the exploration.
job_id: str. The id of the classifier training job model. | core/domain/fs_services.py | delete_classifier_data | AyushJ7/oppia | 1 | python | def delete_classifier_data(exp_id, job_id):
'Delete the classifier data from file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
if fs.isfile(filepath):
fs.delete(filepath) | def delete_classifier_data(exp_id, job_id):
'Delete the classifier data from file.\n\n Args:\n exp_id: str. The id of the exploration.\n job_id: str. The id of the classifier training job model.\n '
filepath = ('%s-classifier-data.pb.xz' % job_id)
fs = GcsFileSystem(feconf.ENTITY_TYPE_EXPLORATION, exp_id)
if fs.isfile(filepath):
fs.delete(filepath)<|docstring|>Delete the classifier data from file.
Args:
exp_id: str. The id of the exploration.
job_id: str. The id of the classifier training job model.<|endoftext|> |
66f2830c1875115fd787f1756957bf69554aa8cd1f9e2430e1f953c9ee04302b | def copy_images(source_entity_type, source_entity_id, destination_entity_type, destination_entity_id, filenames):
'Copy images from source to destination.\n\n Args:\n source_entity_type: str. The entity type of the source.\n source_entity_id: str. The type of the source entity.\n destination_entity_id: str. The id of the destination entity.\n destination_entity_type: str. The entity type of the destination.\n filenames: list(str). The list of filenames to copy.\n '
source_fs = GcsFileSystem(source_entity_type, source_entity_id)
destination_fs = GcsFileSystem(destination_entity_type, destination_entity_id)
for filename in filenames:
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
destination_fs.copy(source_fs.assets_path, ('image/%s' % filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % compressed_image_filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % micro_image_filename)) | Copy images from source to destination.
Args:
source_entity_type: str. The entity type of the source.
source_entity_id: str. The type of the source entity.
destination_entity_id: str. The id of the destination entity.
destination_entity_type: str. The entity type of the destination.
filenames: list(str). The list of filenames to copy. | core/domain/fs_services.py | copy_images | AyushJ7/oppia | 1 | python | def copy_images(source_entity_type, source_entity_id, destination_entity_type, destination_entity_id, filenames):
'Copy images from source to destination.\n\n Args:\n source_entity_type: str. The entity type of the source.\n source_entity_id: str. The type of the source entity.\n destination_entity_id: str. The id of the destination entity.\n destination_entity_type: str. The entity type of the destination.\n filenames: list(str). The list of filenames to copy.\n '
source_fs = GcsFileSystem(source_entity_type, source_entity_id)
destination_fs = GcsFileSystem(destination_entity_type, destination_entity_id)
for filename in filenames:
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
destination_fs.copy(source_fs.assets_path, ('image/%s' % filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % compressed_image_filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % micro_image_filename)) | def copy_images(source_entity_type, source_entity_id, destination_entity_type, destination_entity_id, filenames):
'Copy images from source to destination.\n\n Args:\n source_entity_type: str. The entity type of the source.\n source_entity_id: str. The type of the source entity.\n destination_entity_id: str. The id of the destination entity.\n destination_entity_type: str. The entity type of the destination.\n filenames: list(str). The list of filenames to copy.\n '
source_fs = GcsFileSystem(source_entity_type, source_entity_id)
destination_fs = GcsFileSystem(destination_entity_type, destination_entity_id)
for filename in filenames:
filename_wo_filetype = filename[:filename.rfind('.')]
filetype = filename[(filename.rfind('.') + 1):]
compressed_image_filename = ('%s_compressed.%s' % (filename_wo_filetype, filetype))
micro_image_filename = ('%s_micro.%s' % (filename_wo_filetype, filetype))
destination_fs.copy(source_fs.assets_path, ('image/%s' % filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % compressed_image_filename))
destination_fs.copy(source_fs.assets_path, ('image/%s' % micro_image_filename))<|docstring|>Copy images from source to destination.
Args:
source_entity_type: str. The entity type of the source.
source_entity_id: str. The type of the source entity.
destination_entity_id: str. The id of the destination entity.
destination_entity_type: str. The entity type of the destination.
filenames: list(str). The list of filenames to copy.<|endoftext|> |
16e75116787cd02cac6ce3c7924cec44164ee32cb5ada980d4a8be574db1915b | def __init__(self, entity_name, entity_id):
'Constructs a GeneralFileSystem object.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n '
self._validate_entity_parameters(entity_name, entity_id)
self._assets_path = ('%s/%s/assets' % (entity_name, entity_id)) | Constructs a GeneralFileSystem object.
Args:
entity_name: str. The name of the entity
(eg: exploration, topic etc).
entity_id: str. The ID of the corresponding entity. | core/domain/fs_services.py | __init__ | AyushJ7/oppia | 1 | python | def __init__(self, entity_name, entity_id):
'Constructs a GeneralFileSystem object.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n '
self._validate_entity_parameters(entity_name, entity_id)
self._assets_path = ('%s/%s/assets' % (entity_name, entity_id)) | def __init__(self, entity_name, entity_id):
'Constructs a GeneralFileSystem object.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n '
self._validate_entity_parameters(entity_name, entity_id)
self._assets_path = ('%s/%s/assets' % (entity_name, entity_id))<|docstring|>Constructs a GeneralFileSystem object.
Args:
entity_name: str. The name of the entity
(eg: exploration, topic etc).
entity_id: str. The ID of the corresponding entity.<|endoftext|> |
61f9a851802352dc96861415b27ef917b9f32a7e8ad964c0673c94847fd99478 | def _validate_entity_parameters(self, entity_name, entity_id):
'Checks whether the entity_id and entity_name passed in are valid.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n\n Raises:\n ValidationError. When parameters passed in are invalid.\n '
if ((entity_name not in ALLOWED_ENTITY_NAMES) and (entity_name not in ALLOWED_SUGGESTION_IMAGE_CONTEXTS)):
raise utils.ValidationError(('Invalid entity_name received: %s.' % entity_name))
if (not isinstance(entity_id, str)):
raise utils.ValidationError(('Invalid entity_id received: %s' % entity_id))
if (entity_id == ''):
raise utils.ValidationError('Entity id cannot be empty') | Checks whether the entity_id and entity_name passed in are valid.
Args:
entity_name: str. The name of the entity
(eg: exploration, topic etc).
entity_id: str. The ID of the corresponding entity.
Raises:
ValidationError. When parameters passed in are invalid. | core/domain/fs_services.py | _validate_entity_parameters | AyushJ7/oppia | 1 | python | def _validate_entity_parameters(self, entity_name, entity_id):
'Checks whether the entity_id and entity_name passed in are valid.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n\n Raises:\n ValidationError. When parameters passed in are invalid.\n '
if ((entity_name not in ALLOWED_ENTITY_NAMES) and (entity_name not in ALLOWED_SUGGESTION_IMAGE_CONTEXTS)):
raise utils.ValidationError(('Invalid entity_name received: %s.' % entity_name))
if (not isinstance(entity_id, str)):
raise utils.ValidationError(('Invalid entity_id received: %s' % entity_id))
if (entity_id == ):
raise utils.ValidationError('Entity id cannot be empty') | def _validate_entity_parameters(self, entity_name, entity_id):
'Checks whether the entity_id and entity_name passed in are valid.\n\n Args:\n entity_name: str. The name of the entity\n (eg: exploration, topic etc).\n entity_id: str. The ID of the corresponding entity.\n\n Raises:\n ValidationError. When parameters passed in are invalid.\n '
if ((entity_name not in ALLOWED_ENTITY_NAMES) and (entity_name not in ALLOWED_SUGGESTION_IMAGE_CONTEXTS)):
raise utils.ValidationError(('Invalid entity_name received: %s.' % entity_name))
if (not isinstance(entity_id, str)):
raise utils.ValidationError(('Invalid entity_id received: %s' % entity_id))
if (entity_id == ):
raise utils.ValidationError('Entity id cannot be empty')<|docstring|>Checks whether the entity_id and entity_name passed in are valid.
Args:
entity_name: str. The name of the entity
(eg: exploration, topic etc).
entity_id: str. The ID of the corresponding entity.
Raises:
ValidationError. When parameters passed in are invalid.<|endoftext|> |
0383f8bc79e83a23ec98bab1953e6f1add64bc642e4d80fb8d716377df6ee317 | @property
def assets_path(self):
'Returns the path of the parent folder of assets.\n\n Returns:\n str. The path.\n '
return self._assets_path | Returns the path of the parent folder of assets.
Returns:
str. The path. | core/domain/fs_services.py | assets_path | AyushJ7/oppia | 1 | python | @property
def assets_path(self):
'Returns the path of the parent folder of assets.\n\n Returns:\n str. The path.\n '
return self._assets_path | @property
def assets_path(self):
'Returns the path of the parent folder of assets.\n\n Returns:\n str. The path.\n '
return self._assets_path<|docstring|>Returns the path of the parent folder of assets.
Returns:
str. The path.<|endoftext|> |
203b1ff49917f172d50f23cd3e90011bf9916a7694c04bc0da842b69a7c487ea | def _get_gcs_file_url(self, filepath):
"Returns the constructed GCS file URL.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n str. The GCS file URL.\n "
gcs_file_url = ('%s/%s' % (self._assets_path, filepath))
return gcs_file_url | Returns the constructed GCS file URL.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
str. The GCS file URL. | core/domain/fs_services.py | _get_gcs_file_url | AyushJ7/oppia | 1 | python | def _get_gcs_file_url(self, filepath):
"Returns the constructed GCS file URL.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n str. The GCS file URL.\n "
gcs_file_url = ('%s/%s' % (self._assets_path, filepath))
return gcs_file_url | def _get_gcs_file_url(self, filepath):
"Returns the constructed GCS file URL.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n str. The GCS file URL.\n "
gcs_file_url = ('%s/%s' % (self._assets_path, filepath))
return gcs_file_url<|docstring|>Returns the constructed GCS file URL.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
str. The GCS file URL.<|endoftext|> |
142a8b8f3165b05e0d0ec344d4f9850594b0353cd59534f93508f6783b4c877f | def _check_filepath(self, filepath):
"Raises an error if a filepath is invalid.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Invalid filepath.\n "
base_dir = utils.vfs_construct_path('/', self.assets_path, 'assets')
absolute_path = utils.vfs_construct_path(base_dir, filepath)
normalized_path = utils.vfs_normpath(absolute_path)
if (not normalized_path.startswith(base_dir)):
raise IOError(('Invalid filepath: %s' % filepath)) | Raises an error if a filepath is invalid.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Raises:
OSError. Invalid filepath. | core/domain/fs_services.py | _check_filepath | AyushJ7/oppia | 1 | python | def _check_filepath(self, filepath):
"Raises an error if a filepath is invalid.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Invalid filepath.\n "
base_dir = utils.vfs_construct_path('/', self.assets_path, 'assets')
absolute_path = utils.vfs_construct_path(base_dir, filepath)
normalized_path = utils.vfs_normpath(absolute_path)
if (not normalized_path.startswith(base_dir)):
raise IOError(('Invalid filepath: %s' % filepath)) | def _check_filepath(self, filepath):
"Raises an error if a filepath is invalid.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Invalid filepath.\n "
base_dir = utils.vfs_construct_path('/', self.assets_path, 'assets')
absolute_path = utils.vfs_construct_path(base_dir, filepath)
normalized_path = utils.vfs_normpath(absolute_path)
if (not normalized_path.startswith(base_dir)):
raise IOError(('Invalid filepath: %s' % filepath))<|docstring|>Raises an error if a filepath is invalid.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Raises:
OSError. Invalid filepath.<|endoftext|> |
2d0f27fa2365de6c64a0fdb90f4eca920a4f2f5da28578920323be397755dd29 | def isfile(self, filepath):
"Checks if the file with the given filepath exists in the GCS.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bool. Whether the file exists in GCS.\n "
self._check_filepath(filepath)
return storage_services.isfile(self._bucket_name, self._get_gcs_file_url(filepath)) | Checks if the file with the given filepath exists in the GCS.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
bool. Whether the file exists in GCS. | core/domain/fs_services.py | isfile | AyushJ7/oppia | 1 | python | def isfile(self, filepath):
"Checks if the file with the given filepath exists in the GCS.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bool. Whether the file exists in GCS.\n "
self._check_filepath(filepath)
return storage_services.isfile(self._bucket_name, self._get_gcs_file_url(filepath)) | def isfile(self, filepath):
"Checks if the file with the given filepath exists in the GCS.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bool. Whether the file exists in GCS.\n "
self._check_filepath(filepath)
return storage_services.isfile(self._bucket_name, self._get_gcs_file_url(filepath))<|docstring|>Checks if the file with the given filepath exists in the GCS.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
bool. Whether the file exists in GCS.<|endoftext|> |
85437c5da6d198bd3036dfbc1feb18273caeafd44e663fef61cbc923d166a6c9 | def get(self, filepath):
"Gets a file as an unencoded stream of raw bytes.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bytes. A stream of raw bytes if the file exists.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
return storage_services.get(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File %s not found.' % filepath)) | Gets a file as an unencoded stream of raw bytes.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
bytes. A stream of raw bytes if the file exists.
Raises:
OSError. Given file does not exist. | core/domain/fs_services.py | get | AyushJ7/oppia | 1 | python | def get(self, filepath):
"Gets a file as an unencoded stream of raw bytes.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bytes. A stream of raw bytes if the file exists.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
return storage_services.get(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File %s not found.' % filepath)) | def get(self, filepath):
"Gets a file as an unencoded stream of raw bytes.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Returns:\n bytes. A stream of raw bytes if the file exists.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
return storage_services.get(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File %s not found.' % filepath))<|docstring|>Gets a file as an unencoded stream of raw bytes.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Returns:
bytes. A stream of raw bytes if the file exists.
Raises:
OSError. Given file does not exist.<|endoftext|> |
1bee6cb2870720ea19bb5a9d32a17163a9fca3753979a6d703a8144751c81771 | def commit(self, filepath, raw_bytes, mimetype=None):
"Commit raw_bytes to the relevant file in the entity's assets folder.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n raw_bytes: str. The content to be stored in the file.\n mimetype: str. The content-type of the cloud file.\n "
if isinstance(raw_bytes, str):
raw_bytes = raw_bytes.encode('utf-8')
self._check_filepath(filepath)
storage_services.commit(self._bucket_name, self._get_gcs_file_url(filepath), raw_bytes, mimetype) | Commit raw_bytes to the relevant file in the entity's assets folder.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
raw_bytes: str. The content to be stored in the file.
mimetype: str. The content-type of the cloud file. | core/domain/fs_services.py | commit | AyushJ7/oppia | 1 | python | def commit(self, filepath, raw_bytes, mimetype=None):
"Commit raw_bytes to the relevant file in the entity's assets folder.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n raw_bytes: str. The content to be stored in the file.\n mimetype: str. The content-type of the cloud file.\n "
if isinstance(raw_bytes, str):
raw_bytes = raw_bytes.encode('utf-8')
self._check_filepath(filepath)
storage_services.commit(self._bucket_name, self._get_gcs_file_url(filepath), raw_bytes, mimetype) | def commit(self, filepath, raw_bytes, mimetype=None):
"Commit raw_bytes to the relevant file in the entity's assets folder.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n raw_bytes: str. The content to be stored in the file.\n mimetype: str. The content-type of the cloud file.\n "
if isinstance(raw_bytes, str):
raw_bytes = raw_bytes.encode('utf-8')
self._check_filepath(filepath)
storage_services.commit(self._bucket_name, self._get_gcs_file_url(filepath), raw_bytes, mimetype)<|docstring|>Commit raw_bytes to the relevant file in the entity's assets folder.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
raw_bytes: str. The content to be stored in the file.
mimetype: str. The content-type of the cloud file.<|endoftext|> |
2351d47a988f22fc85fbf3a2c240f2782ab36b221f3e71b0471a6a8ff4880017 | def delete(self, filepath):
"Deletes a file and the metadata associated with it.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
storage_services.delete(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File does not exist: %s' % filepath)) | Deletes a file and the metadata associated with it.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Raises:
OSError. Given file does not exist. | core/domain/fs_services.py | delete | AyushJ7/oppia | 1 | python | def delete(self, filepath):
"Deletes a file and the metadata associated with it.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
storage_services.delete(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File does not exist: %s' % filepath)) | def delete(self, filepath):
"Deletes a file and the metadata associated with it.\n\n Args:\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n\n Raises:\n OSError. Given file does not exist.\n "
if self.isfile(filepath):
storage_services.delete(self._bucket_name, self._get_gcs_file_url(filepath))
else:
raise IOError(('File does not exist: %s' % filepath))<|docstring|>Deletes a file and the metadata associated with it.
Args:
filepath: str. The path to the relevant file within the entity's
assets folder.
Raises:
OSError. Given file does not exist.<|endoftext|> |
235087a0c2024e5167d8d0f7ca2c3e81679ed43122afeb40a0d459798b959c5f | def copy(self, source_assets_path, filepath):
"Copy images from source_path.\n\n Args:\n source_assets_path: str. The path to the source entity's assets\n folder.\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n "
source_file_url = ('%s/%s' % (source_assets_path, filepath))
storage_services.copy(self._bucket_name, source_file_url, self._get_gcs_file_url(filepath)) | Copy images from source_path.
Args:
source_assets_path: str. The path to the source entity's assets
folder.
filepath: str. The path to the relevant file within the entity's
assets folder. | core/domain/fs_services.py | copy | AyushJ7/oppia | 1 | python | def copy(self, source_assets_path, filepath):
"Copy images from source_path.\n\n Args:\n source_assets_path: str. The path to the source entity's assets\n folder.\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n "
source_file_url = ('%s/%s' % (source_assets_path, filepath))
storage_services.copy(self._bucket_name, source_file_url, self._get_gcs_file_url(filepath)) | def copy(self, source_assets_path, filepath):
"Copy images from source_path.\n\n Args:\n source_assets_path: str. The path to the source entity's assets\n folder.\n filepath: str. The path to the relevant file within the entity's\n assets folder.\n "
source_file_url = ('%s/%s' % (source_assets_path, filepath))
storage_services.copy(self._bucket_name, source_file_url, self._get_gcs_file_url(filepath))<|docstring|>Copy images from source_path.
Args:
source_assets_path: str. The path to the source entity's assets
folder.
filepath: str. The path to the relevant file within the entity's
assets folder.<|endoftext|> |
994598434cc9fe4fc147c52c50f3acc4d31e14aad4f3a7a75d095485b73be4b6 | def listdir(self, dir_name):
"Lists all files in a directory.\n\n Args:\n dir_name: str. The directory whose files should be listed. This\n should not start with '/' or end with '/'.\n\n Returns:\n list(str). A lexicographically-sorted list of filenames.\n\n Raises:\n OSError. The directory name starts or ends with '/'.\n "
self._check_filepath(dir_name)
if (dir_name.startswith('/') or dir_name.endswith('/')):
raise IOError(('The dir_name should not start with / or end with / : %s' % dir_name))
if (dir_name and (not dir_name.endswith('/'))):
dir_name += '/'
assets_path = ('%s/' % self._assets_path)
prefix = utils.vfs_construct_path(self._assets_path, dir_name)
blobs_in_dir = storage_services.listdir(self._bucket_name, prefix)
return [blob.name.replace(assets_path, '') for blob in blobs_in_dir] | Lists all files in a directory.
Args:
dir_name: str. The directory whose files should be listed. This
should not start with '/' or end with '/'.
Returns:
list(str). A lexicographically-sorted list of filenames.
Raises:
OSError. The directory name starts or ends with '/'. | core/domain/fs_services.py | listdir | AyushJ7/oppia | 1 | python | def listdir(self, dir_name):
"Lists all files in a directory.\n\n Args:\n dir_name: str. The directory whose files should be listed. This\n should not start with '/' or end with '/'.\n\n Returns:\n list(str). A lexicographically-sorted list of filenames.\n\n Raises:\n OSError. The directory name starts or ends with '/'.\n "
self._check_filepath(dir_name)
if (dir_name.startswith('/') or dir_name.endswith('/')):
raise IOError(('The dir_name should not start with / or end with / : %s' % dir_name))
if (dir_name and (not dir_name.endswith('/'))):
dir_name += '/'
assets_path = ('%s/' % self._assets_path)
prefix = utils.vfs_construct_path(self._assets_path, dir_name)
blobs_in_dir = storage_services.listdir(self._bucket_name, prefix)
return [blob.name.replace(assets_path, ) for blob in blobs_in_dir] | def listdir(self, dir_name):
"Lists all files in a directory.\n\n Args:\n dir_name: str. The directory whose files should be listed. This\n should not start with '/' or end with '/'.\n\n Returns:\n list(str). A lexicographically-sorted list of filenames.\n\n Raises:\n OSError. The directory name starts or ends with '/'.\n "
self._check_filepath(dir_name)
if (dir_name.startswith('/') or dir_name.endswith('/')):
raise IOError(('The dir_name should not start with / or end with / : %s' % dir_name))
if (dir_name and (not dir_name.endswith('/'))):
dir_name += '/'
assets_path = ('%s/' % self._assets_path)
prefix = utils.vfs_construct_path(self._assets_path, dir_name)
blobs_in_dir = storage_services.listdir(self._bucket_name, prefix)
return [blob.name.replace(assets_path, ) for blob in blobs_in_dir]<|docstring|>Lists all files in a directory.
Args:
dir_name: str. The directory whose files should be listed. This
should not start with '/' or end with '/'.
Returns:
list(str). A lexicographically-sorted list of filenames.
Raises:
OSError. The directory name starts or ends with '/'.<|endoftext|> |
c538c904300cd8e9bd35d3068360c0562e5fcbb6f631ec36bf86e26406741341 | def _swap_default_io_man(resources: Dict[(str, ResourceDefinition)], job: PipelineDefinition):
'\n Used to create the user facing experience of the default io_manager\n switching to in-memory when using execute_in_process.\n '
from dagster.core.storage.mem_io_manager import mem_io_manager
from .graph_definition import default_job_io_manager
if ((resources.get('io_manager') in [default_job_io_manager, fs_asset_io_manager]) and (job.version_strategy is None)):
updated_resources = dict(resources)
updated_resources['io_manager'] = mem_io_manager
return updated_resources
return resources | Used to create the user facing experience of the default io_manager
switching to in-memory when using execute_in_process. | python_modules/dagster/dagster/core/definitions/job_definition.py | _swap_default_io_man | asamoal/dagster | 0 | python | def _swap_default_io_man(resources: Dict[(str, ResourceDefinition)], job: PipelineDefinition):
'\n Used to create the user facing experience of the default io_manager\n switching to in-memory when using execute_in_process.\n '
from dagster.core.storage.mem_io_manager import mem_io_manager
from .graph_definition import default_job_io_manager
if ((resources.get('io_manager') in [default_job_io_manager, fs_asset_io_manager]) and (job.version_strategy is None)):
updated_resources = dict(resources)
updated_resources['io_manager'] = mem_io_manager
return updated_resources
return resources | def _swap_default_io_man(resources: Dict[(str, ResourceDefinition)], job: PipelineDefinition):
'\n Used to create the user facing experience of the default io_manager\n switching to in-memory when using execute_in_process.\n '
from dagster.core.storage.mem_io_manager import mem_io_manager
from .graph_definition import default_job_io_manager
if ((resources.get('io_manager') in [default_job_io_manager, fs_asset_io_manager]) and (job.version_strategy is None)):
updated_resources = dict(resources)
updated_resources['io_manager'] = mem_io_manager
return updated_resources
return resources<|docstring|>Used to create the user facing experience of the default io_manager
switching to in-memory when using execute_in_process.<|endoftext|> |
43074f19a161126b6b56d1491b74aecb3f14b82dc2486021bbda900947b69739 | def execute_in_process(self, run_config: Optional[Dict[(str, Any)]]=None, instance: Optional['DagsterInstance']=None, partition_key: Optional[str]=None, raise_on_error: bool=True, op_selection: Optional[List[str]]=None, asset_selection: Optional[List[AssetKey]]=None, run_id: Optional[str]=None, input_values: Optional[Mapping[(str, object)]]=None) -> 'ExecuteInProcessResult':
"\n Execute the Job in-process, gathering results in-memory.\n\n The `executor_def` on the Job will be ignored, and replaced with the in-process executor.\n If using the default `io_manager`, it will switch from filesystem to in-memory.\n\n\n Args:\n run_config (Optional[Dict[str, Any]]:\n The configuration for the run\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for jobs with partitioned config.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n\n "
from dagster.core.definitions.executor_definition import execute_in_process_executor
from dagster.core.execution.execute_in_process import core_execute_in_process
run_config = check.opt_dict_param(run_config, 'run_config')
op_selection = check.opt_list_param(op_selection, 'op_selection', str)
asset_selection = check.opt_list_param(asset_selection, 'asset_selection', AssetKey)
check.invariant((not (op_selection and asset_selection)), 'op_selection and asset_selection cannot both be provided as args to execute_in_process')
partition_key = check.opt_str_param(partition_key, 'partition_key')
input_values = check.opt_mapping_param(input_values, 'input_values')
input_values = merge_dicts(self._input_values, input_values)
resource_defs = dict(self.resource_defs)
logger_defs = dict(self.loggers)
ephemeral_job = JobDefinition(name=self._name, graph_def=self._graph_def, resource_defs=_swap_default_io_man(resource_defs, self), executor_def=execute_in_process_executor, logger_defs=logger_defs, hook_defs=self.hook_defs, config_mapping=self.config_mapping, partitioned_config=self.partitioned_config, tags=self.tags, op_retry_policy=self._solid_retry_policy, version_strategy=self.version_strategy, asset_layer=self.asset_layer, _input_values=input_values)
ephemeral_job = ephemeral_job.get_job_def_for_subset_selection(op_selection, (frozenset(asset_selection) if asset_selection else None))
tags = None
if partition_key:
if (not self.partitioned_config):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
check.invariant((not run_config), 'Cannot provide both run_config and partition_key arguments to `execute_in_process`')
partition_set = self.get_partition_set_def()
if (not partition_set):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
partition = partition_set.get_partition(partition_key)
run_config = partition_set.run_config_for_partition(partition)
tags = partition_set.tags_for_partition(partition)
return core_execute_in_process(node=self._graph_def, ephemeral_pipeline=ephemeral_job, run_config=run_config, instance=instance, output_capturing_enabled=True, raise_on_error=raise_on_error, run_tags=tags, run_id=run_id, asset_selection=frozenset(asset_selection)) | Execute the Job in-process, gathering results in-memory.
The `executor_def` on the Job will be ignored, and replaced with the in-process executor.
If using the default `io_manager`, it will switch from filesystem to in-memory.
Args:
run_config (Optional[Dict[str, Any]]:
The configuration for the run
instance (Optional[DagsterInstance]):
The instance to execute against, an ephemeral one will be used if none provided.
partition_key: (Optional[str])
The string partition key that specifies the run config to execute. Can only be used
to select run config for jobs with partitioned config.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``.
op_selection (Optional[List[str]]): A list of op selection queries (including single op
names) to execute. For example:
* ``['some_op']``: selects ``some_op`` itself.
* ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).
* ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
* ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its
ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.
Returns:
:py:class:`~dagster.ExecuteInProcessResult` | python_modules/dagster/dagster/core/definitions/job_definition.py | execute_in_process | asamoal/dagster | 0 | python | def execute_in_process(self, run_config: Optional[Dict[(str, Any)]]=None, instance: Optional['DagsterInstance']=None, partition_key: Optional[str]=None, raise_on_error: bool=True, op_selection: Optional[List[str]]=None, asset_selection: Optional[List[AssetKey]]=None, run_id: Optional[str]=None, input_values: Optional[Mapping[(str, object)]]=None) -> 'ExecuteInProcessResult':
"\n Execute the Job in-process, gathering results in-memory.\n\n The `executor_def` on the Job will be ignored, and replaced with the in-process executor.\n If using the default `io_manager`, it will switch from filesystem to in-memory.\n\n\n Args:\n run_config (Optional[Dict[str, Any]]:\n The configuration for the run\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for jobs with partitioned config.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n\n "
from dagster.core.definitions.executor_definition import execute_in_process_executor
from dagster.core.execution.execute_in_process import core_execute_in_process
run_config = check.opt_dict_param(run_config, 'run_config')
op_selection = check.opt_list_param(op_selection, 'op_selection', str)
asset_selection = check.opt_list_param(asset_selection, 'asset_selection', AssetKey)
check.invariant((not (op_selection and asset_selection)), 'op_selection and asset_selection cannot both be provided as args to execute_in_process')
partition_key = check.opt_str_param(partition_key, 'partition_key')
input_values = check.opt_mapping_param(input_values, 'input_values')
input_values = merge_dicts(self._input_values, input_values)
resource_defs = dict(self.resource_defs)
logger_defs = dict(self.loggers)
ephemeral_job = JobDefinition(name=self._name, graph_def=self._graph_def, resource_defs=_swap_default_io_man(resource_defs, self), executor_def=execute_in_process_executor, logger_defs=logger_defs, hook_defs=self.hook_defs, config_mapping=self.config_mapping, partitioned_config=self.partitioned_config, tags=self.tags, op_retry_policy=self._solid_retry_policy, version_strategy=self.version_strategy, asset_layer=self.asset_layer, _input_values=input_values)
ephemeral_job = ephemeral_job.get_job_def_for_subset_selection(op_selection, (frozenset(asset_selection) if asset_selection else None))
tags = None
if partition_key:
if (not self.partitioned_config):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
check.invariant((not run_config), 'Cannot provide both run_config and partition_key arguments to `execute_in_process`')
partition_set = self.get_partition_set_def()
if (not partition_set):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
partition = partition_set.get_partition(partition_key)
run_config = partition_set.run_config_for_partition(partition)
tags = partition_set.tags_for_partition(partition)
return core_execute_in_process(node=self._graph_def, ephemeral_pipeline=ephemeral_job, run_config=run_config, instance=instance, output_capturing_enabled=True, raise_on_error=raise_on_error, run_tags=tags, run_id=run_id, asset_selection=frozenset(asset_selection)) | def execute_in_process(self, run_config: Optional[Dict[(str, Any)]]=None, instance: Optional['DagsterInstance']=None, partition_key: Optional[str]=None, raise_on_error: bool=True, op_selection: Optional[List[str]]=None, asset_selection: Optional[List[AssetKey]]=None, run_id: Optional[str]=None, input_values: Optional[Mapping[(str, object)]]=None) -> 'ExecuteInProcessResult':
"\n Execute the Job in-process, gathering results in-memory.\n\n The `executor_def` on the Job will be ignored, and replaced with the in-process executor.\n If using the default `io_manager`, it will switch from filesystem to in-memory.\n\n\n Args:\n run_config (Optional[Dict[str, Any]]:\n The configuration for the run\n instance (Optional[DagsterInstance]):\n The instance to execute against, an ephemeral one will be used if none provided.\n partition_key: (Optional[str])\n The string partition key that specifies the run config to execute. Can only be used\n to select run config for jobs with partitioned config.\n raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.\n Defaults to ``True``.\n op_selection (Optional[List[str]]): A list of op selection queries (including single op\n names) to execute. For example:\n * ``['some_op']``: selects ``some_op`` itself.\n * ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).\n * ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants\n (downstream dependencies) within 3 levels down.\n * ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its\n ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.\n input_values (Optional[Mapping[str, Any]]):\n A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.\n Returns:\n :py:class:`~dagster.ExecuteInProcessResult`\n\n "
from dagster.core.definitions.executor_definition import execute_in_process_executor
from dagster.core.execution.execute_in_process import core_execute_in_process
run_config = check.opt_dict_param(run_config, 'run_config')
op_selection = check.opt_list_param(op_selection, 'op_selection', str)
asset_selection = check.opt_list_param(asset_selection, 'asset_selection', AssetKey)
check.invariant((not (op_selection and asset_selection)), 'op_selection and asset_selection cannot both be provided as args to execute_in_process')
partition_key = check.opt_str_param(partition_key, 'partition_key')
input_values = check.opt_mapping_param(input_values, 'input_values')
input_values = merge_dicts(self._input_values, input_values)
resource_defs = dict(self.resource_defs)
logger_defs = dict(self.loggers)
ephemeral_job = JobDefinition(name=self._name, graph_def=self._graph_def, resource_defs=_swap_default_io_man(resource_defs, self), executor_def=execute_in_process_executor, logger_defs=logger_defs, hook_defs=self.hook_defs, config_mapping=self.config_mapping, partitioned_config=self.partitioned_config, tags=self.tags, op_retry_policy=self._solid_retry_policy, version_strategy=self.version_strategy, asset_layer=self.asset_layer, _input_values=input_values)
ephemeral_job = ephemeral_job.get_job_def_for_subset_selection(op_selection, (frozenset(asset_selection) if asset_selection else None))
tags = None
if partition_key:
if (not self.partitioned_config):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
check.invariant((not run_config), 'Cannot provide both run_config and partition_key arguments to `execute_in_process`')
partition_set = self.get_partition_set_def()
if (not partition_set):
check.failed(f'Provided partition key `{partition_key}` for job `{self._name}` without a partitioned config')
partition = partition_set.get_partition(partition_key)
run_config = partition_set.run_config_for_partition(partition)
tags = partition_set.tags_for_partition(partition)
return core_execute_in_process(node=self._graph_def, ephemeral_pipeline=ephemeral_job, run_config=run_config, instance=instance, output_capturing_enabled=True, raise_on_error=raise_on_error, run_tags=tags, run_id=run_id, asset_selection=frozenset(asset_selection))<|docstring|>Execute the Job in-process, gathering results in-memory.
The `executor_def` on the Job will be ignored, and replaced with the in-process executor.
If using the default `io_manager`, it will switch from filesystem to in-memory.
Args:
run_config (Optional[Dict[str, Any]]:
The configuration for the run
instance (Optional[DagsterInstance]):
The instance to execute against, an ephemeral one will be used if none provided.
partition_key: (Optional[str])
The string partition key that specifies the run config to execute. Can only be used
to select run config for jobs with partitioned config.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``.
op_selection (Optional[List[str]]): A list of op selection queries (including single op
names) to execute. For example:
* ``['some_op']``: selects ``some_op`` itself.
* ``['*some_op']``: select ``some_op`` and all its ancestors (upstream dependencies).
* ``['*some_op+++']``: select ``some_op``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
* ``['*some_op', 'other_op_a', 'other_op_b+']``: select ``some_op`` and all its
ancestors, ``other_op_a`` itself, and ``other_op_b`` and its direct child ops.
input_values (Optional[Mapping[str, Any]]):
A dictionary that maps python objects to the top-level inputs of the job. Input values provided here will override input values that have been provided to the job directly.
Returns:
:py:class:`~dagster.ExecuteInProcessResult`<|endoftext|> |
cd084ef5c785dedf20f4e3a941da1b55e0138652338106bd878b3e550f014c7e | def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> 'JobDefinition':
'Apply a set of hooks to all op instances within the job.'
hook_defs = check.set_param(hook_defs, 'hook_defs', of_type=HookDefinition)
job_def = JobDefinition(name=self.name, graph_def=self._graph_def, resource_defs=dict(self.resource_defs), logger_defs=dict(self.loggers), executor_def=self.executor_def, partitioned_config=self.partitioned_config, config_mapping=self.config_mapping, preset_defs=self.preset_defs, tags=self.tags, hook_defs=(hook_defs | self.hook_defs), description=self._description, op_retry_policy=self._solid_retry_policy, asset_layer=self.asset_layer, _subset_selection_data=self._subset_selection_data)
update_wrapper(job_def, self, updated=())
return job_def | Apply a set of hooks to all op instances within the job. | python_modules/dagster/dagster/core/definitions/job_definition.py | with_hooks | asamoal/dagster | 0 | python | def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> 'JobDefinition':
hook_defs = check.set_param(hook_defs, 'hook_defs', of_type=HookDefinition)
job_def = JobDefinition(name=self.name, graph_def=self._graph_def, resource_defs=dict(self.resource_defs), logger_defs=dict(self.loggers), executor_def=self.executor_def, partitioned_config=self.partitioned_config, config_mapping=self.config_mapping, preset_defs=self.preset_defs, tags=self.tags, hook_defs=(hook_defs | self.hook_defs), description=self._description, op_retry_policy=self._solid_retry_policy, asset_layer=self.asset_layer, _subset_selection_data=self._subset_selection_data)
update_wrapper(job_def, self, updated=())
return job_def | def with_hooks(self, hook_defs: AbstractSet[HookDefinition]) -> 'JobDefinition':
hook_defs = check.set_param(hook_defs, 'hook_defs', of_type=HookDefinition)
job_def = JobDefinition(name=self.name, graph_def=self._graph_def, resource_defs=dict(self.resource_defs), logger_defs=dict(self.loggers), executor_def=self.executor_def, partitioned_config=self.partitioned_config, config_mapping=self.config_mapping, preset_defs=self.preset_defs, tags=self.tags, hook_defs=(hook_defs | self.hook_defs), description=self._description, op_retry_policy=self._solid_retry_policy, asset_layer=self.asset_layer, _subset_selection_data=self._subset_selection_data)
update_wrapper(job_def, self, updated=())
return job_def<|docstring|>Apply a set of hooks to all op instances within the job.<|endoftext|> |
a21e76148229c4c942a910847ceae1574d1bc9c1f12d52bd0b558224489ba507 | def create_lookup_module(path, Generators):
'Create a Python module of look-up tables.'
required_imports = reduce(op.add, reduce((lambda gen1, gen2: (gen1[0].imports | gen2[0].imports)), Generators))
with open(path, 'w') as file:
file.write(required_imports)
for (Generator, *args) in Generators:
gen = Generator(*args)
file.write('\n')
file.write(gen.as_code_str()) | Create a Python module of look-up tables. | frame_2D_alg/LUT_generators.py | create_lookup_module | bayshore-intelligence-solutions/CogAlg | 1 | python | def create_lookup_module(path, Generators):
required_imports = reduce(op.add, reduce((lambda gen1, gen2: (gen1[0].imports | gen2[0].imports)), Generators))
with open(path, 'w') as file:
file.write(required_imports)
for (Generator, *args) in Generators:
gen = Generator(*args)
file.write('\n')
file.write(gen.as_code_str()) | def create_lookup_module(path, Generators):
required_imports = reduce(op.add, reduce((lambda gen1, gen2: (gen1[0].imports | gen2[0].imports)), Generators))
with open(path, 'w') as file:
file.write(required_imports)
for (Generator, *args) in Generators:
gen = Generator(*args)
file.write('\n')
file.write(gen.as_code_str())<|docstring|>Create a Python module of look-up tables.<|endoftext|> |
3296edbd19445526fe55b0ece54cb1de5a5ab4c3054fcc45395d357d60463a9b | def __init__(self, *args, **kwargs):
'Meta __init__ method.'
self._generate_table(*args, **kwargs) | Meta __init__ method. | frame_2D_alg/LUT_generators.py | __init__ | bayshore-intelligence-solutions/CogAlg | 1 | python | def __init__(self, *args, **kwargs):
self._generate_table(*args, **kwargs) | def __init__(self, *args, **kwargs):
self._generate_table(*args, **kwargs)<|docstring|>Meta __init__ method.<|endoftext|> |
d295fc2c1e3a447b7ab40abb3c28c030f298ad693f4a5e66580b0dc8d2111a24 | def _generate_table(self, *args, **kwargs):
'Meta-method for generating look-up table.'
return self | Meta-method for generating look-up table. | frame_2D_alg/LUT_generators.py | _generate_table | bayshore-intelligence-solutions/CogAlg | 1 | python | def _generate_table(self, *args, **kwargs):
return self | def _generate_table(self, *args, **kwargs):
return self<|docstring|>Meta-method for generating look-up table.<|endoftext|> |
7a5ee9c862fb82dacf4306c1492e34072b2f8fdf78027ad16929e98640e09c72 | def as_code_str(self):
'\n Meta-method for generating python code string declaring\n look-up table in Python code.\n '
return '' | Meta-method for generating python code string declaring
look-up table in Python code. | frame_2D_alg/LUT_generators.py | as_code_str | bayshore-intelligence-solutions/CogAlg | 1 | python | def as_code_str(self):
'\n Meta-method for generating python code string declaring\n look-up table in Python code.\n '
return | def as_code_str(self):
'\n Meta-method for generating python code string declaring\n look-up table in Python code.\n '
return <|docstring|>Meta-method for generating python code string declaring
look-up table in Python code.<|endoftext|> |
54e9cfed980695731902074d4e89c8848462c59b584d7eb2049c76d73f099710 | def to_file(self, path):
'\n Meta-method for outputting loop-up table to Python script.\n '
with open(path, 'w') as file:
file.write(self.as_code_str())
return self | Meta-method for outputting loop-up table to Python script. | frame_2D_alg/LUT_generators.py | to_file | bayshore-intelligence-solutions/CogAlg | 1 | python | def to_file(self, path):
'\n \n '
with open(path, 'w') as file:
file.write(self.as_code_str())
return self | def to_file(self, path):
'\n \n '
with open(path, 'w') as file:
file.write(self.as_code_str())
return self<|docstring|>Meta-method for outputting loop-up table to Python script.<|endoftext|> |
a0e63a5f16a15165cf32f9033cced814b68dfc149bad04d7613753bac8c7dd78 | def __init__(self, max_rng=3):
'\n Instantiate a GenCoeffs object.\n '
MTLookupTable.__init__(self, max_rng=max_rng) | Instantiate a GenCoeffs object. | frame_2D_alg/LUT_generators.py | __init__ | bayshore-intelligence-solutions/CogAlg | 1 | python | def __init__(self, max_rng=3):
'\n \n '
MTLookupTable.__init__(self, max_rng=max_rng) | def __init__(self, max_rng=3):
'\n \n '
MTLookupTable.__init__(self, max_rng=max_rng)<|docstring|>Instantiate a GenCoeffs object.<|endoftext|> |
1a69fac9291b4266b5583ba1cd5a113be39448843a03cb0e0be8e1153fbef3e4 | @classmethod
def flattened_rim(cls, a, arranged_d=0):
"\n Acquire and flatten the outer-pad of an array's\n last/first two dimensions.\n Parameters\n ----------\n arranged_d : int\n Operate on last two dimensions if = 0, else\n operate on first two dimensions.\n Examples\n --------\n >>> a = np.arange(9).reshape(3, 3)\n >>> GenCoeffs.flattened_rim(a)\n array([0, 1, 2, 5, 8, 7, 6, 3])\n "
return np.concatenate(tuple(map((lambda slices: a[slices]), cls.rim_slices[arranged_d])), axis=(arranged_d - 1)) | Acquire and flatten the outer-pad of an array's
last/first two dimensions.
Parameters
----------
arranged_d : int
Operate on last two dimensions if = 0, else
operate on first two dimensions.
Examples
--------
>>> a = np.arange(9).reshape(3, 3)
>>> GenCoeffs.flattened_rim(a)
array([0, 1, 2, 5, 8, 7, 6, 3]) | frame_2D_alg/LUT_generators.py | flattened_rim | bayshore-intelligence-solutions/CogAlg | 1 | python | @classmethod
def flattened_rim(cls, a, arranged_d=0):
"\n Acquire and flatten the outer-pad of an array's\n last/first two dimensions.\n Parameters\n ----------\n arranged_d : int\n Operate on last two dimensions if = 0, else\n operate on first two dimensions.\n Examples\n --------\n >>> a = np.arange(9).reshape(3, 3)\n >>> GenCoeffs.flattened_rim(a)\n array([0, 1, 2, 5, 8, 7, 6, 3])\n "
return np.concatenate(tuple(map((lambda slices: a[slices]), cls.rim_slices[arranged_d])), axis=(arranged_d - 1)) | @classmethod
def flattened_rim(cls, a, arranged_d=0):
"\n Acquire and flatten the outer-pad of an array's\n last/first two dimensions.\n Parameters\n ----------\n arranged_d : int\n Operate on last two dimensions if = 0, else\n operate on first two dimensions.\n Examples\n --------\n >>> a = np.arange(9).reshape(3, 3)\n >>> GenCoeffs.flattened_rim(a)\n array([0, 1, 2, 5, 8, 7, 6, 3])\n "
return np.concatenate(tuple(map((lambda slices: a[slices]), cls.rim_slices[arranged_d])), axis=(arranged_d - 1))<|docstring|>Acquire and flatten the outer-pad of an array's
last/first two dimensions.
Parameters
----------
arranged_d : int
Operate on last two dimensions if = 0, else
operate on first two dimensions.
Examples
--------
>>> a = np.arange(9).reshape(3, 3)
>>> GenCoeffs.flattened_rim(a)
array([0, 1, 2, 5, 8, 7, 6, 3])<|endoftext|> |
2425bd875ff4ba75a416db10a4a45b264ae22a33729946acfbbd35cdbc598ede | def _generate_table(self, max_rng):
'\n Workhorse of GenCoeffs class, compute kernel\n and separate into rng specific coefficients.\n '
kers = kernel(max_rng)
self._coeffs = [*reversed([*map(GenCoeffs.flattened_rim, map((lambda slices: kers[slices]), zip(repeat(...), *tee(chain((slice(None, None),), map((lambda i: slice(i, (- i))), range(1, max_rng)))))))])]
self._g_scalers = [*map((lambda coeffs: (255.9 / (255 * np.hypot(*coeffs)))), accumulate(map((lambda coeffs: np.maximum(coeffs, 0).sum(axis=1)), self._coeffs)))]
return self | Workhorse of GenCoeffs class, compute kernel
and separate into rng specific coefficients. | frame_2D_alg/LUT_generators.py | _generate_table | bayshore-intelligence-solutions/CogAlg | 1 | python | def _generate_table(self, max_rng):
'\n Workhorse of GenCoeffs class, compute kernel\n and separate into rng specific coefficients.\n '
kers = kernel(max_rng)
self._coeffs = [*reversed([*map(GenCoeffs.flattened_rim, map((lambda slices: kers[slices]), zip(repeat(...), *tee(chain((slice(None, None),), map((lambda i: slice(i, (- i))), range(1, max_rng)))))))])]
self._g_scalers = [*map((lambda coeffs: (255.9 / (255 * np.hypot(*coeffs)))), accumulate(map((lambda coeffs: np.maximum(coeffs, 0).sum(axis=1)), self._coeffs)))]
return self | def _generate_table(self, max_rng):
'\n Workhorse of GenCoeffs class, compute kernel\n and separate into rng specific coefficients.\n '
kers = kernel(max_rng)
self._coeffs = [*reversed([*map(GenCoeffs.flattened_rim, map((lambda slices: kers[slices]), zip(repeat(...), *tee(chain((slice(None, None),), map((lambda i: slice(i, (- i))), range(1, max_rng)))))))])]
self._g_scalers = [*map((lambda coeffs: (255.9 / (255 * np.hypot(*coeffs)))), accumulate(map((lambda coeffs: np.maximum(coeffs, 0).sum(axis=1)), self._coeffs)))]
return self<|docstring|>Workhorse of GenCoeffs class, compute kernel
and separate into rng specific coefficients.<|endoftext|> |
a1e69045aaefd326444c8176fbec7f2b1d32ac4b7c3943e01d6fdfdeaea4ec89 | def as_code_str(self):
'\n s = ("\nSCALER_g = {\n"\n + reduce(op.add,\n (" %d:%0.15f,\n" % (i, scaler)\n for i, scaler in\n enumerate(self._g_scalers, start=1)))\n + "}\n")\n '
(ycoeffslist, xcoeffslist) = zip(*self._coeffs)
s = (((('\nY_COEFFS = {\n' + reduce(op.add, ((((' %d:np.' % i) + repr(ycoeffs)) + ',\n') for (i, ycoeffs) in enumerate(ycoeffslist, start=1)))) + '}\n\nX_COEFFS = {\n') + reduce(op.add, ((((' %d:np.' % i) + repr(xcoeffs)) + ',\n') for (i, xcoeffs) in enumerate(xcoeffslist, start=1)))) + '}\n')
return s | s = ("
SCALER_g = {
"
+ reduce(op.add,
(" %d:%0.15f,
" % (i, scaler)
for i, scaler in
enumerate(self._g_scalers, start=1)))
+ "}
") | frame_2D_alg/LUT_generators.py | as_code_str | bayshore-intelligence-solutions/CogAlg | 1 | python | def as_code_str(self):
'\n s = ("\nSCALER_g = {\n"\n + reduce(op.add,\n (" %d:%0.15f,\n" % (i, scaler)\n for i, scaler in\n enumerate(self._g_scalers, start=1)))\n + "}\n")\n '
(ycoeffslist, xcoeffslist) = zip(*self._coeffs)
s = (((('\nY_COEFFS = {\n' + reduce(op.add, ((((' %d:np.' % i) + repr(ycoeffs)) + ',\n') for (i, ycoeffs) in enumerate(ycoeffslist, start=1)))) + '}\n\nX_COEFFS = {\n') + reduce(op.add, ((((' %d:np.' % i) + repr(xcoeffs)) + ',\n') for (i, xcoeffs) in enumerate(xcoeffslist, start=1)))) + '}\n')
return s | def as_code_str(self):
'\n s = ("\nSCALER_g = {\n"\n + reduce(op.add,\n (" %d:%0.15f,\n" % (i, scaler)\n for i, scaler in\n enumerate(self._g_scalers, start=1)))\n + "}\n")\n '
(ycoeffslist, xcoeffslist) = zip(*self._coeffs)
s = (((('\nY_COEFFS = {\n' + reduce(op.add, ((((' %d:np.' % i) + repr(ycoeffs)) + ',\n') for (i, ycoeffs) in enumerate(ycoeffslist, start=1)))) + '}\n\nX_COEFFS = {\n') + reduce(op.add, ((((' %d:np.' % i) + repr(xcoeffs)) + ',\n') for (i, xcoeffs) in enumerate(xcoeffslist, start=1)))) + '}\n')
return s<|docstring|>s = ("
SCALER_g = {
"
+ reduce(op.add,
(" %d:%0.15f,
" % (i, scaler)
for i, scaler in
enumerate(self._g_scalers, start=1)))
+ "}
")<|endoftext|> |
c7c2ff1d4af42d5154fb670f80201523a29a66a3e7391d03588dcc6041dc05bf | def __init__(self, max_rng=3):
'\n Instantiate a GenTransSlice object.\n '
MTLookupTable.__init__(self, max_rng=max_rng) | Instantiate a GenTransSlice object. | frame_2D_alg/LUT_generators.py | __init__ | bayshore-intelligence-solutions/CogAlg | 1 | python | def __init__(self, max_rng=3):
'\n \n '
MTLookupTable.__init__(self, max_rng=max_rng) | def __init__(self, max_rng=3):
'\n \n '
MTLookupTable.__init__(self, max_rng=max_rng)<|docstring|>Instantiate a GenTransSlice object.<|endoftext|> |
d90fd6f54c448414bf47e7ed81d49a50bac138c1b7ef19200b1c3b489065fcfe | def _generate_table(self, max_rng):
'Generate target slices for comparison function.'
self._slices = []
slice_inds = [*chain((None,), range(1, ((max_rng * 2) + 1)))]
for r in range(3, ((max_rng * 2) + 2), 2):
slices = [*starmap(slice, bipolar(slice_inds[:r]))]
slices = [*chain(slices, repeat(slices[(- 1)], (r - 2)), reversed(slices), repeat(slices[0], (r - 2)))]
slices = [*zip(repeat(...), (slices[((- r) + 1):] + slices[:((- r) + 1)]), slices)]
self._slices.append(slices)
return self | Generate target slices for comparison function. | frame_2D_alg/LUT_generators.py | _generate_table | bayshore-intelligence-solutions/CogAlg | 1 | python | def _generate_table(self, max_rng):
self._slices = []
slice_inds = [*chain((None,), range(1, ((max_rng * 2) + 1)))]
for r in range(3, ((max_rng * 2) + 2), 2):
slices = [*starmap(slice, bipolar(slice_inds[:r]))]
slices = [*chain(slices, repeat(slices[(- 1)], (r - 2)), reversed(slices), repeat(slices[0], (r - 2)))]
slices = [*zip(repeat(...), (slices[((- r) + 1):] + slices[:((- r) + 1)]), slices)]
self._slices.append(slices)
return self | def _generate_table(self, max_rng):
self._slices = []
slice_inds = [*chain((None,), range(1, ((max_rng * 2) + 1)))]
for r in range(3, ((max_rng * 2) + 2), 2):
slices = [*starmap(slice, bipolar(slice_inds[:r]))]
slices = [*chain(slices, repeat(slices[(- 1)], (r - 2)), reversed(slices), repeat(slices[0], (r - 2)))]
slices = [*zip(repeat(...), (slices[((- r) + 1):] + slices[:((- r) + 1)]), slices)]
self._slices.append(slices)
return self<|docstring|>Generate target slices for comparison function.<|endoftext|> |
ee336e87d3f8a325fe1aa2b47b2c8c786a8585b9e9b12931f8900dbd85f4e16a | def forwards(self, orm):
'"Removes \'no\' instances and moves \'nb-NO\' instances to \'no\'.'
orm.Image.objects.filter(locale='no').delete()
orm.Image.objects.filter(locale='nb-NO').update(locale='no')
orm.Video.objects.filter(locale='no').delete()
orm.Video.objects.filter(locale='nb-NO').update(locale='no') | "Removes 'no' instances and moves 'nb-NO' instances to 'no'. | kitsune/gallery/migrations/0002_move_nbNO_to_no_locale.py | forwards | Dinodog/kitsune | 1 | python | def forwards(self, orm):
'"Removes \'no\' instances and moves \'nb-NO\' instances to \'no\'.'
orm.Image.objects.filter(locale='no').delete()
orm.Image.objects.filter(locale='nb-NO').update(locale='no')
orm.Video.objects.filter(locale='no').delete()
orm.Video.objects.filter(locale='nb-NO').update(locale='no') | def forwards(self, orm):
'"Removes \'no\' instances and moves \'nb-NO\' instances to \'no\'.'
orm.Image.objects.filter(locale='no').delete()
orm.Image.objects.filter(locale='nb-NO').update(locale='no')
orm.Video.objects.filter(locale='no').delete()
orm.Video.objects.filter(locale='nb-NO').update(locale='no')<|docstring|>"Removes 'no' instances and moves 'nb-NO' instances to 'no'.<|endoftext|> |
8c3c2a035e199151ee4d004e63558972ce8caa55ae169d01d82a837d8ced15ad | def backwards(self, orm):
'"Moves \'no\' instances to \'nb-NO\'.'
orm.Image.objects.filter(locale='no').update(locale='nb-NO')
orm.Video.objects.filter(locale='no').update(locale='nb-NO') | "Moves 'no' instances to 'nb-NO'. | kitsune/gallery/migrations/0002_move_nbNO_to_no_locale.py | backwards | Dinodog/kitsune | 1 | python | def backwards(self, orm):
'"Moves \'no\' instances to \'nb-NO\'.'
orm.Image.objects.filter(locale='no').update(locale='nb-NO')
orm.Video.objects.filter(locale='no').update(locale='nb-NO') | def backwards(self, orm):
'"Moves \'no\' instances to \'nb-NO\'.'
orm.Image.objects.filter(locale='no').update(locale='nb-NO')
orm.Video.objects.filter(locale='no').update(locale='nb-NO')<|docstring|>"Moves 'no' instances to 'nb-NO'.<|endoftext|> |
8ecb4e571298f632e6124a3399f2664311a33993e54c572dc538415e9b2870f5 | async def test_recent_items_intent(opp, sl_setup):
'Test recent items.'
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'beer'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'wine'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'soda'}}))
response = (await intent.async_handle(opp, 'test', 'OppShoppingListLastItems'))
assert (response.speech['plain']['speech'] == 'These are the top 3 items on your shopping list: soda, wine, beer') | Test recent items. | tests/components/shopping_list/test_intent.py | test_recent_items_intent | pcaston/core | 1 | python | async def test_recent_items_intent(opp, sl_setup):
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'beer'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'wine'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'soda'}}))
response = (await intent.async_handle(opp, 'test', 'OppShoppingListLastItems'))
assert (response.speech['plain']['speech'] == 'These are the top 3 items on your shopping list: soda, wine, beer') | async def test_recent_items_intent(opp, sl_setup):
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'beer'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'wine'}}))
(await intent.async_handle(opp, 'test', 'OppShoppingListAddItem', {'item': {'value': 'soda'}}))
response = (await intent.async_handle(opp, 'test', 'OppShoppingListLastItems'))
assert (response.speech['plain']['speech'] == 'These are the top 3 items on your shopping list: soda, wine, beer')<|docstring|>Test recent items.<|endoftext|> |
96edca57769aa5e6e34a3b2af37fdb14f3d1fa4459eddf769e75ad097664d2ca | @property
def input_shape(self):
'\n Input tensor shape of current node.\n\n Returns:\n tuple, tensor shape of input.\n '
return self._op_params['input_shape'] | Input tensor shape of current node.
Returns:
tuple, tensor shape of input. | ecosystem_tools/mindconverter/mindconverter/graph_based_converter/third_party_graph/input_node.py | input_shape | mindspore-ai/mindinsight | 216 | python | @property
def input_shape(self):
'\n Input tensor shape of current node.\n\n Returns:\n tuple, tensor shape of input.\n '
return self._op_params['input_shape'] | @property
def input_shape(self):
'\n Input tensor shape of current node.\n\n Returns:\n tuple, tensor shape of input.\n '
return self._op_params['input_shape']<|docstring|>Input tensor shape of current node.
Returns:
tuple, tensor shape of input.<|endoftext|> |
c11d254c3875a26dd1a8cae96eb8c483b4f782117b40b7ea9008a19742e8965b | @property
def output_shape(self):
'\n Output tensor shape.\n\n Returns:\n tuple, output tensor shape.\n '
return self._op_params['output_shape'] | Output tensor shape.
Returns:
tuple, output tensor shape. | ecosystem_tools/mindconverter/mindconverter/graph_based_converter/third_party_graph/input_node.py | output_shape | mindspore-ai/mindinsight | 216 | python | @property
def output_shape(self):
'\n Output tensor shape.\n\n Returns:\n tuple, output tensor shape.\n '
return self._op_params['output_shape'] | @property
def output_shape(self):
'\n Output tensor shape.\n\n Returns:\n tuple, output tensor shape.\n '
return self._op_params['output_shape']<|docstring|>Output tensor shape.
Returns:
tuple, output tensor shape.<|endoftext|> |
f890d0ac5cd6ffd604c45c06a4d3b69e1e1d4f957d661b985eaca8db780dbb85 | def set_scope_name(self, original_input_scope_name):
'\n Set scope name.\n Args:\n original_input_scope_name: Original input scope name needed to be linked.\n '
prefix_name = original_input_scope_name.split(SEPARATOR_IN_SCOPE)[0]
node_name = ''.join((self.node_type, '[input]'))
self._scope_name = os.path.join(prefix_name, node_name) | Set scope name.
Args:
original_input_scope_name: Original input scope name needed to be linked. | ecosystem_tools/mindconverter/mindconverter/graph_based_converter/third_party_graph/input_node.py | set_scope_name | mindspore-ai/mindinsight | 216 | python | def set_scope_name(self, original_input_scope_name):
'\n Set scope name.\n Args:\n original_input_scope_name: Original input scope name needed to be linked.\n '
prefix_name = original_input_scope_name.split(SEPARATOR_IN_SCOPE)[0]
node_name = .join((self.node_type, '[input]'))
self._scope_name = os.path.join(prefix_name, node_name) | def set_scope_name(self, original_input_scope_name):
'\n Set scope name.\n Args:\n original_input_scope_name: Original input scope name needed to be linked.\n '
prefix_name = original_input_scope_name.split(SEPARATOR_IN_SCOPE)[0]
node_name = .join((self.node_type, '[input]'))
self._scope_name = os.path.join(prefix_name, node_name)<|docstring|>Set scope name.
Args:
original_input_scope_name: Original input scope name needed to be linked.<|endoftext|> |
bf1b1299e96ba21d2f53715f69438445250792e2bbaa77333c1872782592c733 | def set_successor_nodes(self, original_input_scope_names):
'\n Set successor nodes.\n Args:\n original_input_scope_names: Original input scope names needed to be linked.\n '
if isinstance(original_input_scope_names, list):
self.successor_nodes = original_input_scope_names
elif isinstance(original_input_scope_names, str):
self.successor_nodes.append(original_input_scope_names)
else:
raise ValueError | Set successor nodes.
Args:
original_input_scope_names: Original input scope names needed to be linked. | ecosystem_tools/mindconverter/mindconverter/graph_based_converter/third_party_graph/input_node.py | set_successor_nodes | mindspore-ai/mindinsight | 216 | python | def set_successor_nodes(self, original_input_scope_names):
'\n Set successor nodes.\n Args:\n original_input_scope_names: Original input scope names needed to be linked.\n '
if isinstance(original_input_scope_names, list):
self.successor_nodes = original_input_scope_names
elif isinstance(original_input_scope_names, str):
self.successor_nodes.append(original_input_scope_names)
else:
raise ValueError | def set_successor_nodes(self, original_input_scope_names):
'\n Set successor nodes.\n Args:\n original_input_scope_names: Original input scope names needed to be linked.\n '
if isinstance(original_input_scope_names, list):
self.successor_nodes = original_input_scope_names
elif isinstance(original_input_scope_names, str):
self.successor_nodes.append(original_input_scope_names)
else:
raise ValueError<|docstring|>Set successor nodes.
Args:
original_input_scope_names: Original input scope names needed to be linked.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.