code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import sys
import logging
def get_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(threadName)s - %(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger"
] |
[((64, 87), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (81, 87), False, 'import logging\n'), ((137, 170), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (158, 170), False, 'import logging\n'), ((222, 300), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(threadName)s - %(levelname)s: %(message)s"""'], {}), "('%(asctime)s - %(threadName)s - %(levelname)s: %(message)s')\n", (239, 300), False, 'import logging\n')]
|
"""Run tests on documentation tests(doctest).
Currently setup this way b/c pytest does not support "load_tests" protocol.
"""
import unittest
import doctest
import mochart.utils
def load_tests(loader, tests, ignore):
"""Load doctests as unit test suite."""
tests.addTests(doctest.DocTestSuite(mochart.utils))
return tests
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"doctest.DocTestSuite"
] |
[((372, 387), 'unittest.main', 'unittest.main', ([], {}), '()\n', (385, 387), False, 'import unittest\n'), ((285, 320), 'doctest.DocTestSuite', 'doctest.DocTestSuite', (['mochart.utils'], {}), '(mochart.utils)\n', (305, 320), False, 'import doctest\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google DisplayVideo operators.
"""
import csv
import json
import shutil
import tempfile
import urllib.request
from typing import Any, Dict, List, Optional, Sequence, Union
from urllib.parse import urlparse
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from airflow.utils.decorators import apply_defaults
class GoogleDisplayVideo360CreateReportOperator(BaseOperator):
"""
Creates a query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360CreateReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/createquery`
:param body: Report object passed to the request's body as described here:
https://developers.google.com/bid-manager/v1/queries#resource
:type body: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("body", "impersonation_chain",)
template_ext = (".json",)
@apply_defaults
def __init__(
self, *,
body: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.body, str) and self.body.endswith('.json'):
with open(self.body, 'r') as file:
self.body = json.load(file)
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating Display & Video 360 report.")
response = hook.create_query(query=self.body)
report_id = response["queryId"]
self.xcom_push(context, key="report_id", value=report_id)
self.log.info("Created report with ID: %s", report_id)
return response
class GoogleDisplayVideo360DeleteReportOperator(BaseOperator):
"""
Deletes a stored query as well as the associated stored reports.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DeleteReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/deletequery`
:param report_id: Report ID to delete.
:type report_id: str
:param report_name: Name of the report to delete.
:type report_name: str
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("report_id", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
report_id: Optional[str] = None,
report_name: Optional[str] = None,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.report_name = report_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
if report_name and report_id:
raise AirflowException("Use only one value - `report_name` or `report_id`.")
if not (report_name or report_id):
raise AirflowException(
"Provide one of the values: `report_name` or `report_id`."
)
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if self.report_id:
reports_ids_to_delete = [self.report_id]
else:
reports = hook.list_queries()
reports_ids_to_delete = [
report["queryId"]
for report in reports
if report["metadata"]["title"] == self.report_name
]
for report_id in reports_ids_to_delete:
self.log.info("Deleting report with id: %s", report_id)
hook.delete_query(query_id=report_id)
self.log.info("Report deleted.")
class GoogleDisplayVideo360DownloadReportOperator(BaseOperator):
"""
Retrieves a stored query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DownloadReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/getquery`
:param report_id: Report ID to retrieve.
:type report_id: str
:param bucket_name: The bucket to upload to.
:type bucket_name: str
:param report_name: The report name to set when uploading the local file.
:type report_name: str
:param chunk_size: File will be downloaded in chunks of this many bytes.
:type chunk_size: int
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("report_id", "bucket_name", "report_name", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
report_id: str,
bucket_name: str,
report_name: Optional[str] = None,
gzip: bool = True,
chunk_size: int = 10 * 1024 * 1024,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.chunk_size = chunk_size
self.gzip = gzip
self.bucket_name = self._set_bucket_name(bucket_name)
self.report_name = report_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def _resolve_file_name(self, name: str) -> str:
new_name = name if name.endswith(".csv") else f"{name}.csv"
new_name = f"{new_name}.gz" if self.gzip else new_name
return new_name
@staticmethod
def _set_bucket_name(name: str) -> str:
bucket = name if not name.startswith("gs://") else name[5:]
return bucket.strip("/")
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
resource = hook.get_query(query_id=self.report_id)
# Check if report is ready
if resource["metadata"]["running"]:
raise AirflowException(f"Report {self.report_id} is still running")
# If no custom report_name provided, use DV360 name
file_url = resource["metadata"]["googleCloudStoragePathForLatestReport"]
report_name = self.report_name or urlparse(file_url).path.split("/")[-1]
report_name = self._resolve_file_name(report_name)
# Download the report
self.log.info("Starting downloading report %s", self.report_id)
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
with urllib.request.urlopen(file_url) as response:
shutil.copyfileobj(response, temp_file, length=self.chunk_size)
temp_file.flush()
# Upload the local file to bucket
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=report_name,
gzip=self.gzip,
filename=temp_file.name,
mime_type="text/csv",
)
self.log.info(
"Report %s was saved in bucket %s as %s.",
self.report_id,
self.bucket_name,
report_name,
)
self.xcom_push(context, key="report_name", value=report_name)
class GoogleDisplayVideo360RunReportOperator(BaseOperator):
"""
Runs a stored query to generate a report.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360RunReportOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1/queries/runquery`
:param report_id: Report ID to run.
:type report_id: str
:param params: Parameters for running a report as described here:
https://developers.google.com/bid-manager/v1/queries/runquery
:type params: Dict[str, Any]
:param api_version: The version of the api that will be requested for example 'v3'.
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("report_id", "params", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
report_id: str,
params: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.report_id = report_id
self.params = params
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info(
"Running report %s with the following params:\n %s",
self.report_id,
self.params,
)
hook.run_query(query_id=self.report_id, params=self.params)
class GoogleDisplayVideo360DownloadLineItemsOperator(BaseOperator):
"""
Retrieves line items in CSV format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360DownloadLineItemsOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1.1/lineitems/downloadlineitems`
:param request_body: dictionary with parameters that should be passed into.
More information about it can be found here:
https://developers.google.com/bid-manager/v1.1/lineitems/downloadlineitems
:type request_body: Dict[str, Any],
"""
template_fields = ("request_body", "bucket_name", "object_name", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
request_body: Dict[str, Any],
bucket_name: str,
object_name: str,
gzip: bool = False,
api_version: str = "v1.1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
self.request_body = request_body
self.object_name = object_name
self.bucket_name = bucket_name
self.gzip = gzip
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict) -> str:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Retrieving report...")
content: List[str] = hook.download_line_items(request_body=self.request_body)
with tempfile.NamedTemporaryFile("w+") as temp_file:
writer = csv.writer(temp_file)
writer.writerows(content)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
mime_type="text/csv",
gzip=self.gzip,
)
return f"{self.bucket_name}/{self.object_name}"
class GoogleDisplayVideo360UploadLineItemsOperator(BaseOperator):
"""
Uploads line items in CSV format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360UploadLineItemsOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/bid-manager/v1.1/lineitems/uploadlineitems`
:param request_body: request to upload line items.
:type request_body: Dict[str, Any]
:param bucket_name: The bucket form data is downloaded.
:type bucket_name: str
:param object_name: The object to fetch.
:type object_name: str,
:param filename: The filename to fetch.
:type filename: str,
:param dry_run: Upload status without actually persisting the line items.
:type filename: str,
"""
template_fields = (
"bucket_name",
"object_name",
"impersonation_chain",
)
@apply_defaults
def __init__(
self, *,
bucket_name: str,
object_name: str,
api_version: str = "v1.1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket_name = bucket_name
self.object_name = object_name
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict):
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Uploading file %s...")
# Saving file in the temporary directory,
# downloaded file from the GCS could be a 1GB size or even more
with tempfile.NamedTemporaryFile("w+") as f:
line_items = gcs_hook.download(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=f.name,
)
f.flush()
hook.upload_line_items(line_items=line_items)
class GoogleDisplayVideo360CreateSDFDownloadTaskOperator(BaseOperator):
"""
Creates SDF operation task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360CreateSDFDownloadTaskOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/display-video/api/reference/rest`
:param version: The SDF version of the downloaded file..
:type version: str
:param partner_id: The ID of the partner to download SDF for.
:type partner_id: str
:param advertiser_id: The ID of the advertiser to download SDF for.
:type advertiser_id: str
:param parent_entity_filter: Filters on selected file types.
:type parent_entity_filter: Dict[str, Any]
:param id_filter: Filters on entities by their entity IDs.
:type id_filter: Dict[str, Any]
:param inventory_source_filter: Filters on Inventory Sources by their IDs.
:type inventory_source_filter: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("body_request", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
body_request: Dict[str, Any],
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body_request = body_request
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Creating operation for SDF download task...")
operation = hook.create_sdf_download_operation(
body_request=self.body_request
)
return operation
class GoogleDisplayVideo360SDFtoGCSOperator(BaseOperator):
"""
Download SDF media and save it in the Google Cloud Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDisplayVideo360SDFtoGCSOperator`
.. seealso::
Check also the official API docs:
`https://developers.google.com/display-video/api/reference/rest`
:param version: The SDF version of the downloaded file..
:type version: str
:param partner_id: The ID of the partner to download SDF for.
:type partner_id: str
:param advertiser_id: The ID of the advertiser to download SDF for.
:type advertiser_id: str
:param parent_entity_filter: Filters on selected file types.
:type parent_entity_filter: Dict[str, Any]
:param id_filter: Filters on entities by their entity IDs.
:type id_filter: Dict[str, Any]
:param inventory_source_filter: Filters on Inventory Sources by their IDs.
:type inventory_source_filter: Dict[str, Any]
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = ("operation_name", "bucket_name", "object_name", "impersonation_chain",)
@apply_defaults
def __init__(
self, *,
operation_name: str,
bucket_name: str,
object_name: str,
gzip: bool = False,
api_version: str = "v1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.operation_name = operation_name
self.bucket_name = bucket_name
self.object_name = object_name
self.gzip = gzip
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict):
hook = GoogleDisplayVideo360Hook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Retrieving operation...")
operation = hook.get_sdf_download_operation(operation_name=self.operation_name)
self.log.info("Creating file for upload...")
media = hook.download_media(resource_name=operation)
self.log.info("Sending file to the Google Cloud Storage...")
with tempfile.NamedTemporaryFile() as temp_file:
hook.download_content_from_request(
temp_file, media, chunk_size=1024 * 1024
)
temp_file.flush()
gcs_hook.upload(
bucket_name=self.bucket_name,
object_name=self.object_name,
filename=temp_file.name,
gzip=self.gzip,
)
return f"{self.bucket_name}/{self.object_name}"
|
[
"tempfile.NamedTemporaryFile",
"airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook",
"json.load",
"csv.writer",
"airflow.providers.google.cloud.hooks.gcs.GCSHook",
"shutil.copyfileobj",
"airflow.exceptions.AirflowException",
"urllib.parse.urlparse"
] |
[((4025, 4196), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (4050, 4196), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((7388, 7559), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (7413, 7559), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((11549, 11720), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (11574, 11720), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((11789, 11924), 'airflow.providers.google.cloud.hooks.gcs.GCSHook', 'GCSHook', ([], {'google_cloud_storage_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'impersonation_chain': 'self.impersonation_chain'}), '(google_cloud_storage_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, impersonation_chain=self.impersonation_chain)\n', (11796, 11924), False, 'from airflow.providers.google.cloud.hooks.gcs import GCSHook\n'), ((15907, 16078), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (15932, 16078), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((17980, 18097), 'airflow.providers.google.cloud.hooks.gcs.GCSHook', 'GCSHook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to,\n impersonation_chain=self.impersonation_chain)\n', (17987, 18097), False, 'from airflow.providers.google.cloud.hooks.gcs import GCSHook\n'), ((18156, 18327), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'api_version': 'self.api_version', 'delegate_to': 'self.delegate_to', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, api_version=self.\n api_version, delegate_to=self.delegate_to, impersonation_chain=self.\n impersonation_chain)\n', (18181, 18327), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((20648, 20765), 'airflow.providers.google.cloud.hooks.gcs.GCSHook', 'GCSHook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to,\n impersonation_chain=self.impersonation_chain)\n', (20655, 20765), False, 'from airflow.providers.google.cloud.hooks.gcs import GCSHook\n'), ((20824, 20995), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (20849, 20995), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((24322, 24493), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (24347, 24493), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((27756, 27927), 'airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook', 'GoogleDisplayVideo360Hook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'api_version': 'self.api_version', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.\n delegate_to, api_version=self.api_version, impersonation_chain=self.\n impersonation_chain)\n', (27781, 27927), False, 'from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook\n'), ((27996, 28113), 'airflow.providers.google.cloud.hooks.gcs.GCSHook', 'GCSHook', ([], {'gcp_conn_id': 'self.gcp_conn_id', 'delegate_to': 'self.delegate_to', 'impersonation_chain': 'self.impersonation_chain'}), '(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to,\n impersonation_chain=self.impersonation_chain)\n', (28003, 28113), False, 'from airflow.providers.google.cloud.hooks.gcs import GCSHook\n'), ((7094, 7164), 'airflow.exceptions.AirflowException', 'AirflowException', (['"""Use only one value - `report_name` or `report_id`."""'], {}), "('Use only one value - `report_name` or `report_id`.')\n", (7110, 7164), False, 'from airflow.exceptions import AirflowException\n'), ((7227, 7303), 'airflow.exceptions.AirflowException', 'AirflowException', (['"""Provide one of the values: `report_name` or `report_id`."""'], {}), "('Provide one of the values: `report_name` or `report_id`.')\n", (7243, 7303), False, 'from airflow.exceptions import AirflowException\n'), ((12124, 12185), 'airflow.exceptions.AirflowException', 'AirflowException', (['f"""Report {self.report_id} is still running"""'], {}), "(f'Report {self.report_id} is still running')\n", (12140, 12185), False, 'from airflow.exceptions import AirflowException\n'), ((12584, 12625), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (12611, 12625), False, 'import tempfile\n'), ((18524, 18557), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+"""'], {}), "('w+')\n", (18551, 18557), False, 'import tempfile\n'), ((18593, 18614), 'csv.writer', 'csv.writer', (['temp_file'], {}), '(temp_file)\n', (18603, 18614), False, 'import csv\n'), ((21227, 21260), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+"""'], {}), "('w+')\n", (21254, 21260), False, 'import tempfile\n'), ((28493, 28522), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (28520, 28522), False, 'import tempfile\n'), ((3955, 3970), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3964, 3970), False, 'import json\n'), ((12719, 12782), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'temp_file'], {'length': 'self.chunk_size'}), '(response, temp_file, length=self.chunk_size)\n', (12737, 12782), False, 'import shutil\n'), ((12370, 12388), 'urllib.parse.urlparse', 'urlparse', (['file_url'], {}), '(file_url)\n', (12378, 12388), False, 'from urllib.parse import urlparse\n')]
|
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
from torch.nn import Module
from warprnnt_numba.rnnt_loss.rnnt_atomic_locks import rnnt_atomic_locks
from warprnnt_numba.rnnt_loss.utils.cpu_utils import cpu_rnnt
class _RNNTNumbaAtomicLock(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, reduction, fastemit_lambda, clamp):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
"""
is_cuda = acts.is_cuda
certify_inputs(acts, labels, act_lens, label_lens)
if clamp < 0:
raise ValueError("`clamp` must be 0.0 or positive float value.")
loss_func = rnnt_atomic_locks.rnnt_loss_gpu if is_cuda else rnnt_atomic_locks.rnnt_loss_cpu
grads = torch.zeros_like(acts) if acts.requires_grad else None
minibatch_size = acts.size(0)
costs = torch.zeros(minibatch_size, device=acts.device, dtype=acts.dtype)
loss_func(
acts,
labels=labels,
input_lengths=act_lens,
label_lengths=label_lens,
costs=costs,
grads=grads,
blank_label=blank,
fastemit_lambda=fastemit_lambda,
clamp=clamp,
num_threads=0,
)
if reduction in ['sum', 'mean']:
costs = costs.sum().unsqueeze_(-1)
if reduction == 'mean':
costs /= minibatch_size
if grads is not None:
grads /= minibatch_size
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
if grad_output is not None and ctx.grads is not None:
grad_output = grad_output.view(-1, 1, 1, 1).to(ctx.grads)
return ctx.grads.mul(grad_output), None, None, None, None, None, None, None
class RNNTLossNumbaAtomicLock(Module):
"""
Parameters:
blank (int, optional): blank label. Default: 0.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum'. 'none': no reduction will be applied,
'mean': the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: 'mean'
fastemit_lambda: Float scaling factor for FastEmit regularization. Refer to
FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization.
clamp: Float value. When set to value >= 0.0, will clamp the gradient to [-clamp, clamp].
"""
def __init__(self, blank=0, reduction='mean', fastemit_lambda: float = 0.0, clamp: float = -1):
super().__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.clamp = float(clamp) if clamp > 0 else 0.0
self.reduction = reduction
self.loss = _RNNTNumbaAtomicLock.apply
def forward(self, acts, labels, act_lens, label_lens):
"""
log_probs: Tensor of (batch x seqLength x labelLength x outputDim) containing output from network
labels: 2 dimensional Tensor containing all the targets of the batch with zero padded
act_lens: Tensor of size (batch) containing size of each output sequence from the network
label_lens: Tensor of (batch) containing label length of each example
"""
if not acts.is_cuda:
# Since CPU requires log_softmax to be computed explicitly, we need to perform grad clipping
# *after* we have obtained the gradients of loss(logsoftmax()).
# This is highly wasteful since it requires a copy of the entire joint tensor which is expensive.
# CUDA version is much more efficient since it performs an inplace logsoftmax, and therefore
# can inplace clamp the gradient.
if self.clamp > 0.0:
acts = cpu_rnnt.LogSoftmaxGradModification.apply(acts, self.clamp)
# NOTE manually done log_softmax for CPU version,
# log_softmax is computed within GPU version.
acts = torch.nn.functional.log_softmax(acts, -1)
return self.loss(
acts, labels, act_lens, label_lens, self.blank, self.reduction, self.fastemit_lambda, self.clamp
)
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
|
[
"torch.zeros_like",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.zeros",
"warprnnt_numba.rnnt_loss.utils.cpu_utils.cpu_rnnt.LogSoftmaxGradModification.apply"
] |
[((6836, 6854), 'torch.max', 'torch.max', (['lengths'], {}), '(lengths)\n', (6845, 6854), False, 'import torch\n'), ((6867, 6891), 'torch.max', 'torch.max', (['label_lengths'], {}), '(label_lengths)\n', (6876, 6891), False, 'import torch\n'), ((1941, 2006), 'torch.zeros', 'torch.zeros', (['minibatch_size'], {'device': 'acts.device', 'dtype': 'acts.dtype'}), '(minibatch_size, device=acts.device, dtype=acts.dtype)\n', (1952, 2006), False, 'import torch\n'), ((1832, 1854), 'torch.zeros_like', 'torch.zeros_like', (['acts'], {}), '(acts)\n', (1848, 1854), False, 'import torch\n'), ((5149, 5190), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['acts', '(-1)'], {}), '(acts, -1)\n', (5180, 5190), False, 'import torch\n'), ((4949, 5008), 'warprnnt_numba.rnnt_loss.utils.cpu_utils.cpu_rnnt.LogSoftmaxGradModification.apply', 'cpu_rnnt.LogSoftmaxGradModification.apply', (['acts', 'self.clamp'], {}), '(acts, self.clamp)\n', (4990, 5008), False, 'from warprnnt_numba.rnnt_loss.utils.cpu_utils import cpu_rnnt\n')]
|
from singleton_decorator import singleton
import re
from .Cardinal import Cardinal
from .Ordinal import Ordinal
@singleton
class Date:
"""
Steps:
- 1 Preprocess token
- 1.1 Remove dots from token
- 1.2 Remove "th", "nd", etc. from "5th July" while preserving "Thursday"
- 1.3 Check for a day prefix, eg "Thursday 5 may"
- 1.4 Check for "the" prefix, eg "the 5 july"
- 2 Match "DD Month" or "Month DD"
- 3 Match "MM-DD-YY(YY)", "YY(YY)-MM-DD", "DD-Month-YY(YY)", "YY(YY)-Month-DD" or "Month-DD-YY(YY)"
- 4 Match "DD Month YYYY", "Month YYYY", "YYYY", "YYYYs" or "Month DD, YYYY"
Edge cases:
"Thursday 5th of May" -> "thursday fifth of may"
"90s" -> "nineties"
"December 2010s" -> "december twenty tens"
"13 AD" -> "thirteen a d"
Note:
This converters essentially uses regular expressions only. The regular expressions could be used to classify the data as well.
"""
def __init__(self):
super().__init__()
# Regex to remove dots
self.filter_regex = re.compile(r"[,']")
# Regex to check for a prefixed day
self.day_regex = re.compile(r"^(?P<prefix>monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tue|wed|thu|fri|sat|sun)\.?", flags=re.I)
# Regex to check for yyyy-mm-dd date
self.dash_date_ymd_regex = re.compile(r"^(?P<year>\d{2,5}) *(?:-|\.|/) *(?P<month>\d{1,2}) *(?:-|\.|/) *(?P<day>\d{1,2})$", flags=re.I)
# Regex to check for mm-dd-yyyy date
self.dash_date_mdy_regex = re.compile(r"^(?P<month>\d{1,2}) *(?:-|\.|/) *(?P<day>\d{1,2}) *(?:-|\.|/) *(?P<year>\d{2,5})$", flags=re.I)
# Regex to check for YYYY-Month-DD
self.text_ymd_regex = re.compile(r"^(?P<year>\d{2,5}) *(?:-|\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\.|/) *(?P<day>\d{1,2})$", flags=re.I)
# Regex to check for DD-Month-YYYY
self.text_dmy_regex = re.compile(r"^(?P<day>\d{1,2}) *(?:-|\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\.|/) *(?P<year>\d{2,5})$", flags=re.I)
# Regex to check for Month-DD-YYYY
self.text_mdy_regex = re.compile(r"^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\.|/) *(?P<day>\d{1,2}) *(?:-|\.|/) *(?P<year>\d{2,5})$", flags=re.I)
# Regex to check for DD Month YYYY, Month YYYY, YYYY or YYYYs
self.dmy_regex = re.compile(r"^(?:(?:(?P<day>\d{1,2}) +(of +)?)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\.? +)?(?P<year>\d{1,5})(?P<suffix>s?)\/?(?: *(?P<bcsuffix>[A-Z\.]+)?)$", flags=re.I)
# Regex to check for Month DD, YYYY
self.mdy_regex = re.compile(r"^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)?\.? *(?P<day>\d{1,2})? +(?P<year>\d{1,5})(?P<suffix>s?)\/?(?: *(?P<bcsuffix>[A-Z\.]+)?)$", flags=re.I)
# Regex to check for DD Month
self.dm_regex = re.compile(r"^(?P<day>\d{1,2}) +(of +)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\.?(?: *(?P<bcsuffix>[A-Z\.]+)?)$", flags=re.I)
# Regex to check for Month DD
self.md_regex = re.compile(r"^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\.? +(?P<day>\d{1,2})(?: *(?P<bcsuffix>[A-Z\.]+)?)$", flags=re.I)
# Regex to find "th" in "5th", "nd" in "22nd", "rd" in "3rd", without matching "thursday", "monday", etc.
self.th_regex = re.compile(r"(?:(?<=\d)|(?<=\d ))(?:th|nd|rd|st)", flags=re.I)
# Translation dict to convert potential months to the correct format
self.trans_month_dict = {
"jan": "january",
"feb": "february",
"mar": "march",
"apr": "april",
#"may": "may",
"jun": "june",
"jul": "july",
"aug": "august",
"sep": "september",
"oct": "october",
"nov": "november",
"dec": "december",
"sept": "september",
"01": "january",
"02": "february",
"03": "march",
"04": "april",
"05": "may",
"06": "june",
"07": "july",
"08": "august",
"09": "september",
"10": "october",
"11": "november",
"12": "december",
"1": "january",
"2": "february",
"3": "march",
"4": "april",
"5": "may",
"6": "june",
"7": "july",
"8": "august",
"9": "september",
}
# Translation dict to convert days to the correct format
self.trans_day_dict = {
"mon": "monday",
"tue": "tuesday",
"wed": "wednesday",
"thu": "thursday",
"fri": "friday",
"sat": "saturday",
"sun": "sunday",
}
# Cardinal and Ordinal conversion
self.cardinal = Cardinal()
self.ordinal = Ordinal()
def convert(self, token: str) -> str:
# dmy to true means the format is "the day of month year"
dmy = True
# Prefix could be "Thursday", while Suffix might be "B.C."
prefix = None
day = None
month = None
year = None
suffix = None
# 1.1 Remove dots from token
token = self.filter_regex.sub("", token).strip()
# 1.2 Remove "th" from "5th" while preserving "thursday"
match = self.th_regex.search(token)
if match:
token = token[:match.span()[0]] + token[match.span()[1]:]
# 1.3 Check for a day prefix, eg "Thursday 14 May 2009"
match = self.day_regex.match(token)
if match:
prefix = self.get_prefix(match.group("prefix"))
token = token[match.span()[1]:].strip()
# 1.4 Remove "the " if the token starts with it
if token.lower().startswith("the "):
token = token[4:]
def construct_output():
result_list = []
result_list.append(prefix)
# If we want the "the D of M Y" format
if dmy:
if day:
result_list.append("the")
result_list.append(day)
result_list.append("of")
result_list.append(month)
else:
# Otherwise use "M D Y" format
result_list.append(month)
result_list.append(day)
result_list.append(year)
result_list.append(suffix)
# Pad non-empty elements of list with spaces
return " ".join([result for result in result_list if result])
# 2 Match "DD Month" or "Month DD"
match = self.dm_regex.match(token)
if not match:
match = self.md_regex.match(token)
# If the second option is matched, we want to use the "M D Y" output format
if match:
dmy = False
if match:
# Extract the day, month and optionally the suffix from the match
day = self.ordinal.convert(match.group("day"))
month = self.get_month(match.group("month"))
try:
suffix = " ".join([c for c in match.group("bcsuffix").lower() if c not in (" ", ".")])
except (IndexError, AttributeError):
pass
return construct_output()
# 3 Match "MM-DD-YY(YY)", "YY(YY)-MM-DD", "DD-Month-YY(YY)", "YY(YY)-Month-DD" or "Month-DD-YY(YY)"
match = self.dash_date_mdy_regex.match(token) or self.dash_date_ymd_regex.match(token) or self.text_dmy_regex.match(token) or self.text_ymd_regex.match(token) or self.text_mdy_regex.match(token)
if match:
# Extract day, month and year from the match
day, month, year = match.group("day"), match.group("month"), match.group("year")
try:
# If the format is mm-dd-yyyy, and the "day" > 12, we don't use the dmy output format
if match.group(0).startswith(month) and int(day) > 12 or prefix and match.group(0).endswith(year) and int(month) <= 12:
dmy = False
# Swap the day and month if it's clear that a different format was used
if int(month) > 12:
month, day = day, month
except ValueError:
# Get here if month is textual instead of numeric
pass
# Convert month, year and day to the correct format
month, year = self.get_month(month), self.convert_year(year)
if day:
day = self.ordinal.convert(day)
return construct_output()
# 4 Match "DD Month YYYY", "Month YYYY", "YYYY", "YYYYs" or "Month DD, YYYY"
match = self.dmy_regex.match(token)
if not match:
match = self.mdy_regex.match(token)
# If the second option is matched, we want to use the "M D Y" output format
if match:
dmy = False
if match:
# Get and convert day, month, year and optionally suffix. Note that year may be converted using ordinal
# conversion if there was a suffix to year, eg: "2000s" -> "two thousands"
if match.group("day"):
day = self.ordinal.convert(match.group("day"))
month = self.get_month(match.group("month"))
if match.group("suffix"):
year = self.convert_year(match.group("year"), cardinal=False)
else:
year = self.convert_year(match.group("year"))
try:
suffix = " ".join([c for c in match.group("bcsuffix").lower() if c not in (" ", ".")])
except (IndexError, AttributeError):
pass
return construct_output()
return token
def get_prefix(self, prefix):
if prefix is None:
return prefix
if prefix.lower() in self.trans_day_dict:
return self.trans_day_dict[prefix.lower()]
return prefix.lower()
def convert_year(self, token: str, cardinal:bool = True) -> str:
# Check for edge case: "00" -> "o o"
if token == "00":
return "o o"
# If the token is of the form "...x00x", then we use cardinal conversion
# eg 2001 -> "two thousand one"
if token[-3:-1] == "00":
result = self.cardinal.convert(token)
# Convert to ordinal if needed. Add "s" or "es" depending on what the cardinal ends with
if not cardinal:
if result[-1] == "x":
result += "e"
result += "s"
return result
result_list = []
# Get the value from the third and fourth characters from the right
if token[-4:-2]:
result_list.append(self.cardinal.convert(token[-4:-2]))
# If the last two values are 00, add "hundred" or "hundreds"
if token[-2:] == "00":
result_list.append("hundred" if cardinal else "hundreds")
return " ".join(result_list)
# If the second character from the right is a 0, add "o", eg: "nineteen o six",
# But only if what's before is larger than 10. eg: "201" -> "two hundred one"
if token[-2:-1] == "0":
if len(token) == 3:
result_list.append("hundred")
else:
result_list.append("o")
# Get the text for the right two values
year_text = self.cardinal.convert(token[-2:])
# If the value should not simply be a cardinal, replace "y" with "ies", and otherwise add "s".
# eg "nineteen thirty" -> "nineteen thirties"
if not cardinal:
if year_text.endswith("y"):
year_text = year_text[:-1] + "ies"
else:
year_text += "s" if year_text[-1] != "x" else "es"
result_list.append(year_text)
return " ".join(result_list)
def get_month(self, token: str) -> str:
if not token:
return token
if token.lower() in self.trans_month_dict:
return self.trans_month_dict[token.lower()]
return token.lower()
|
[
"re.compile"
] |
[((1096, 1114), 're.compile', 're.compile', (['"""[,\']"""'], {}), '("[,\']")\n', (1106, 1114), False, 'import re\n'), ((1185, 1322), 're.compile', 're.compile', (['"""^(?P<prefix>monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tue|wed|thu|fri|sat|sun)\\\\.?"""'], {'flags': 're.I'}), "(\n '^(?P<prefix>monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tue|wed|thu|fri|sat|sun)\\\\.?'\n , flags=re.I)\n", (1195, 1322), False, 'import re\n'), ((1394, 1516), 're.compile', 're.compile', (['"""^(?P<year>\\\\d{2,5}) *(?:-|\\\\.|/) *(?P<month>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2})$"""'], {'flags': 're.I'}), "(\n '^(?P<year>\\\\d{2,5}) *(?:-|\\\\.|/) *(?P<month>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2})$'\n , flags=re.I)\n", (1404, 1516), False, 'import re\n'), ((1583, 1705), 're.compile', 're.compile', (['"""^(?P<month>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$"""'], {'flags': 're.I'}), "(\n '^(?P<month>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$'\n , flags=re.I)\n", (1593, 1705), False, 'import re\n'), ((1766, 2018), 're.compile', 're.compile', (['"""^(?P<year>\\\\d{2,5}) *(?:-|\\\\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2})$"""'], {'flags': 're.I'}), "(\n '^(?P<year>\\\\d{2,5}) *(?:-|\\\\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2})$'\n , flags=re.I)\n", (1776, 2018), False, 'import re\n'), ((2079, 2331), 're.compile', 're.compile', (['"""^(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$"""'], {'flags': 're.I'}), "(\n '^(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$'\n , flags=re.I)\n", (2089, 2331), False, 'import re\n'), ((2392, 2644), 're.compile', 're.compile', (['"""^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$"""'], {'flags': 're.I'}), "(\n '^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) *(?:-|\\\\.|/) *(?P<day>\\\\d{1,2}) *(?:-|\\\\.|/) *(?P<year>\\\\d{2,5})$'\n , flags=re.I)\n", (2402, 2644), False, 'import re\n'), ((2728, 3023), 're.compile', 're.compile', (['"""^(?:(?:(?P<day>\\\\d{1,2}) +(of +)?)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.? +)?(?P<year>\\\\d{1,5})(?P<suffix>s?)\\\\/?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$"""'], {'flags': 're.I'}), "(\n '^(?:(?:(?P<day>\\\\d{1,2}) +(of +)?)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.? +)?(?P<year>\\\\d{1,5})(?P<suffix>s?)\\\\/?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$'\n , flags=re.I)\n", (2738, 3023), False, 'import re\n'), ((3079, 3359), 're.compile', 're.compile', (['"""^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)?\\\\.? *(?P<day>\\\\d{1,2})? +(?P<year>\\\\d{1,5})(?P<suffix>s?)\\\\/?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$"""'], {'flags': 're.I'}), "(\n '^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)?\\\\.? *(?P<day>\\\\d{1,2})? +(?P<year>\\\\d{1,5})(?P<suffix>s?)\\\\/?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$'\n , flags=re.I)\n", (3089, 3359), False, 'import re\n'), ((3409, 3656), 're.compile', 're.compile', (['"""^(?P<day>\\\\d{1,2}) +(of +)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$"""'], {'flags': 're.I'}), "(\n '^(?P<day>\\\\d{1,2}) +(of +)?(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.?(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$'\n , flags=re.I)\n", (3419, 3656), False, 'import re\n'), ((3707, 3947), 're.compile', 're.compile', (['"""^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.? +(?P<day>\\\\d{1,2})(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$"""'], {'flags': 're.I'}), "(\n '^(?P<month>january|february|march|april|may|june|july|august|september|october|november|december|sept|jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)\\\\.? +(?P<day>\\\\d{1,2})(?: *(?P<bcsuffix>[A-Z\\\\.]+)?)$'\n , flags=re.I)\n", (3717, 3947), False, 'import re\n'), ((4075, 4138), 're.compile', 're.compile', (['"""(?:(?<=\\\\d)|(?<=\\\\d ))(?:th|nd|rd|st)"""'], {'flags': 're.I'}), "('(?:(?<=\\\\d)|(?<=\\\\d ))(?:th|nd|rd|st)', flags=re.I)\n", (4085, 4138), False, 'import re\n')]
|
import sys
from cx_Freeze import setup, Executable
import requests
import os
from multiprocessing import Queue
build_exe_options = {
"includes":
[
'os',
'requests',
'json',
'queue'
]
}
base = None
executable = Executable(r"auto_checkin.py", base=base, icon = "auto_checkin.ico")
setup(
name = "auto_checkin",
version = "0.1",
description = "blue.fun Check in",
options = {'build_exe': build_exe_options},
executables = [executable],
author = "kevtyle",
)
|
[
"cx_Freeze.Executable",
"cx_Freeze.setup"
] |
[((270, 335), 'cx_Freeze.Executable', 'Executable', (['"""auto_checkin.py"""'], {'base': 'base', 'icon': '"""auto_checkin.ico"""'}), "('auto_checkin.py', base=base, icon='auto_checkin.ico')\n", (280, 335), False, 'from cx_Freeze import setup, Executable\n'), ((340, 508), 'cx_Freeze.setup', 'setup', ([], {'name': '"""auto_checkin"""', 'version': '"""0.1"""', 'description': '"""blue.fun Check in"""', 'options': "{'build_exe': build_exe_options}", 'executables': '[executable]', 'author': '"""kevtyle"""'}), "(name='auto_checkin', version='0.1', description='blue.fun Check in',\n options={'build_exe': build_exe_options}, executables=[executable],\n author='kevtyle')\n", (345, 508), False, 'from cx_Freeze import setup, Executable\n')]
|
"""
Author:<NAME>
Version: 0.1
This script is created to make life easier for ppl riggers that do a lot of connections in the Graph Editor.
Its designed to be easiely extended to encompas more node simply adding more node to the typeDict dictionary
in form of: {NODETYPE: (INPUTPLUG, OUTPUTPLUG)}
"""
import sys
if not "C:/tools/mayaTools" in sys.path:
sys.path.append("C:/tools/mayaTools")
import maya.api.OpenMaya as om2
from connectByType.constants import CONSTANTS as CONST
reload(CONST)
dgMod = om2.MDGModifier()
def getInputOutput(srcMobj, trgMobj):
"""
Get input based on selected MObjects
:param srcMobj: MObject
:param trgMobj: MObject
:return: None
"""
srcMFn = om2.MFnDependencyNode(srcMobj)
trgMFn = om2.MFnDependencyNode(trgMobj)
srcType = srcMFn.typeName
trgType = trgMFn.typeName
srcInputPlugs, srcOutputPlugs = CONST.typeDict[srcType]
trgInputPlugs, trgOutputPlugs = CONST.typeDict[trgType]
ioPlugsList = [srcOutputPlugs, trgInputPlugs]
return tuple(ioPlugsList)
def getMPlugs(mFn, mPlugList):
"""
get MPlugs from a list
:param mFn: mFn
:param mPlugList: list
:return: list
"""
if type(mPlugList) == list:
returnList = list()
for plug in mPlugList:
mPlug = mFn.findPlug(plug, False)
if mPlug.isArray or plug == "worldMatrix":
returnList.append(mPlug.elementByLogicalIndex(0))
else:
returnList.append(mPlug)
return returnList
elif type(mPlugList) == tuple:
returnTuple = list()
for inputList in mPlugList:
returnTuple.append([mFn.findPlug(mPlug, False) for mPlug in inputList])
return tuple(returnTuple)
def connectNodes(srcMobjHandle, trgMobjHandle):
"""
connect nodes
:param srcMobjHandle: MObjectHandle
:param trgMobjHandle: MObjectHandle
:return: None
"""
assert srcMobjHandle.isValid() == True, "src MObject not valid"
assert trgMobjHandle.isValid() == True, "trg MObject not valid"
srcMobj = srcMobjHandle.object()
trgMobj = trgMobjHandle.object()
srcMFn = om2.MFnDependencyNode(srcMobj)
trgMFn = om2.MFnDependencyNode(trgMobj)
srcOutputPlugList, trgInputPlugList = getInputOutput(srcMobj, trgMobj)
srcOutputPlugs = getMPlugs(srcMFn, srcOutputPlugList)
trgInputPlugs = getMPlugs(trgMFn, trgInputPlugList)
for srcPlug, trgPlug in zip(srcOutputPlugs, trgInputPlugs):
dgMod.connect(srcPlug, trgPlug)
selList = om2.MGlobal.getActiveSelectionList()
mobjs = [selList.getDependNode(idx) for idx in range(selList.length())]
srcMFn = om2.MFnDependencyNode(mobjs[0])
srcMobjHandle = om2.MObjectHandle(mobjs[0])
srcType = srcMFn.typeName
for mobj in mobjs[1:]:
trgMobjHandle = om2.MObjectHandle(mobj)
connectNodes(srcMobjHandle, trgMobjHandle)
dgMod.doIt()
|
[
"maya.api.OpenMaya.MDGModifier",
"sys.path.append",
"maya.api.OpenMaya.MFnDependencyNode",
"maya.api.OpenMaya.MObjectHandle",
"maya.api.OpenMaya.MGlobal.getActiveSelectionList"
] |
[((507, 524), 'maya.api.OpenMaya.MDGModifier', 'om2.MDGModifier', ([], {}), '()\n', (522, 524), True, 'import maya.api.OpenMaya as om2\n'), ((2538, 2574), 'maya.api.OpenMaya.MGlobal.getActiveSelectionList', 'om2.MGlobal.getActiveSelectionList', ([], {}), '()\n', (2572, 2574), True, 'import maya.api.OpenMaya as om2\n'), ((2656, 2687), 'maya.api.OpenMaya.MFnDependencyNode', 'om2.MFnDependencyNode', (['mobjs[0]'], {}), '(mobjs[0])\n', (2677, 2687), True, 'import maya.api.OpenMaya as om2\n'), ((2704, 2731), 'maya.api.OpenMaya.MObjectHandle', 'om2.MObjectHandle', (['mobjs[0]'], {}), '(mobjs[0])\n', (2721, 2731), True, 'import maya.api.OpenMaya as om2\n'), ((358, 395), 'sys.path.append', 'sys.path.append', (['"""C:/tools/mayaTools"""'], {}), "('C:/tools/mayaTools')\n", (373, 395), False, 'import sys\n'), ((708, 738), 'maya.api.OpenMaya.MFnDependencyNode', 'om2.MFnDependencyNode', (['srcMobj'], {}), '(srcMobj)\n', (729, 738), True, 'import maya.api.OpenMaya as om2\n'), ((752, 782), 'maya.api.OpenMaya.MFnDependencyNode', 'om2.MFnDependencyNode', (['trgMobj'], {}), '(trgMobj)\n', (773, 782), True, 'import maya.api.OpenMaya as om2\n'), ((2155, 2185), 'maya.api.OpenMaya.MFnDependencyNode', 'om2.MFnDependencyNode', (['srcMobj'], {}), '(srcMobj)\n', (2176, 2185), True, 'import maya.api.OpenMaya as om2\n'), ((2199, 2229), 'maya.api.OpenMaya.MFnDependencyNode', 'om2.MFnDependencyNode', (['trgMobj'], {}), '(trgMobj)\n', (2220, 2229), True, 'import maya.api.OpenMaya as om2\n'), ((2802, 2825), 'maya.api.OpenMaya.MObjectHandle', 'om2.MObjectHandle', (['mobj'], {}), '(mobj)\n', (2819, 2825), True, 'import maya.api.OpenMaya as om2\n')]
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools for protein features.
"""
from collections import OrderedDict
from enum import Enum
class ProteinTokenizer(object):
"""
Protein Tokenizer.
"""
padding_token = '<pad>'
mask_token = '<mask>'
start_token = class_token = '<cls>'
end_token = seperate_token = '<sep>'
unknown_token = '<unk>'
padding_token_id = 0
mask_token_id = 1
start_token_id = class_token_id = 2
end_token_id = seperate_token_id = 3
unknown_token_id = 4
special_token_ids = [padding_token_id, mask_token_id, start_token_id, end_token_id, unknown_token_id]
vocab = OrderedDict([
(padding_token, 0),
(mask_token, 1),
(class_token, 2),
(seperate_token, 3),
(unknown_token, 4),
('A', 5),
('B', 6),
('C', 7),
('D', 8),
('E', 9),
('F', 10),
('G', 11),
('H', 12),
('I', 13),
('K', 14),
('L', 15),
('M', 16),
('N', 17),
('O', 18),
('P', 19),
('Q', 20),
('R', 21),
('S', 22),
('T', 23),
('U', 24),
('V', 25),
('W', 26),
('X', 27),
('Y', 28),
('Z', 29)])
def tokenize(self, sequence):
"""
Split the sequence into token list.
Args:
sequence: The sequence to be tokenized.
Returns:
tokens: The token lists.
"""
return [x for x in sequence]
def convert_token_to_id(self, token):
"""
Converts a token to an id.
Args:
token: Token.
Returns:
id: The id of the input token.
"""
if token not in self.vocab:
return ProteinTokenizer.unknown_token_id
else:
return ProteinTokenizer.vocab[token]
def convert_tokens_to_ids(self, tokens):
"""
Convert multiple tokens to ids.
Args:
tokens: The list of tokens.
Returns:
ids: The id list of the input tokens.
"""
return [self.convert_token_to_id(token) for token in tokens]
def gen_token_ids(self, sequence):
"""
Generate the list of token ids according the input sequence.
Args:
sequence: Sequence to be tokenized.
Returns:
token_ids: The list of token ids.
"""
tokens = []
tokens.append(ProteinTokenizer.start_token)
tokens.extend(self.tokenize(sequence))
tokens.append(ProteinTokenizer.end_token)
token_ids = self.convert_tokens_to_ids(tokens)
return token_ids
|
[
"collections.OrderedDict"
] |
[((1217, 1621), 'collections.OrderedDict', 'OrderedDict', (["[(padding_token, 0), (mask_token, 1), (class_token, 2), (seperate_token, 3),\n (unknown_token, 4), ('A', 5), ('B', 6), ('C', 7), ('D', 8), ('E', 9), (\n 'F', 10), ('G', 11), ('H', 12), ('I', 13), ('K', 14), ('L', 15), ('M', \n 16), ('N', 17), ('O', 18), ('P', 19), ('Q', 20), ('R', 21), ('S', 22),\n ('T', 23), ('U', 24), ('V', 25), ('W', 26), ('X', 27), ('Y', 28), ('Z', 29)\n ]"], {}), "([(padding_token, 0), (mask_token, 1), (class_token, 2), (\n seperate_token, 3), (unknown_token, 4), ('A', 5), ('B', 6), ('C', 7), (\n 'D', 8), ('E', 9), ('F', 10), ('G', 11), ('H', 12), ('I', 13), ('K', 14\n ), ('L', 15), ('M', 16), ('N', 17), ('O', 18), ('P', 19), ('Q', 20), (\n 'R', 21), ('S', 22), ('T', 23), ('U', 24), ('V', 25), ('W', 26), ('X', \n 27), ('Y', 28), ('Z', 29)])\n", (1228, 1621), False, 'from collections import OrderedDict\n')]
|
import numpy as np
import cv2
import Person
import time
#Contadores de entrada y salida
cnt_up = 0
cnt_down = 0
#Fuente de video
#cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture('peopleCounter.avi')
#Propiedades del video
##cap.set(3, 160) #Width
##cap.set(4, 120) #Height
# Imprime las propiedades de captura a consola
for i in range(19):
print(i, cap.get(i))
w = cap.get(3)
h = cap.get(4)
frameArea = h*w
#areaTH = frameArea/250 # Límite de área para la cual detectará una persona
areaTH = 1500
print('Area Threshold', areaTH)
#Lineas de entrada/salida
line_up = int(2*(h/5))
line_down = int(3*(h/5))
up_limit = int(1*(h/5))
down_limit = int(4*(h/5))
print("Red line y:", str(line_down))
print("Blue line y:", str(line_up))
line_down_color = (255, 0, 0)
line_up_color = (0, 0, 255)
pt1 = [0, line_down];
pt2 = [w, line_down];
pts_L1 = np.array([pt1, pt2], np.int32) # Límite inferior de decisión
pts_L1 = pts_L1.reshape((-1, 1, 2))
pt3 = [0, line_up];
pt4 = [w, line_up];
pts_L2 = np.array([pt3, pt4], np.int32) # Límite superior de decisión
pts_L2 = pts_L2.reshape((-1, 1, 2))
pt5 = [0, up_limit];
pt6 = [w, up_limit];
pts_L3 = np.array([pt5, pt6], np.int32)
pts_L3 = pts_L3.reshape((-1, 1, 2))
pt7 = [0, down_limit];
pt8 = [w, down_limit];
pts_L4 = np.array([pt7,pt8], np.int32)
pts_L4 = pts_L4.reshape((-1, 1, 2))
# Sustractor de fondo
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
# Elementos estructurales para filtros morfológicos
kernelOp = np.ones((3, 3), np.uint8)
kernelOp2 = np.ones((5, 5), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)
# Variables
font = cv2.FONT_HERSHEY_SIMPLEX
persons = []
max_p_age = 5
pid = 1
while(cap.isOpened()):
##for image in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
#Lee una imagen de la fuente de video
ret, frame = cap.read()
## frame = image.array
for i in persons:
i.age_one() #age every person one frame
#########################
# PRE-PROCESAMIENTO #
#########################
# Aplica sustracción de fondo
fgmask = fgbg.apply(frame)
fgmask2 = fgbg.apply(frame)
try:
# Binariazción para eliminar sombras (color gris)
ret, imBin= cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
ret, imBin2 = cv2.threshold(fgmask2, 200, 255, cv2.THRESH_BINARY)
# Opening (erode->dilate) para quitar ruido
mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
mask2 = cv2.morphologyEx(imBin2, cv2.MORPH_OPEN, kernelOp)
# Closing (dilate -> erode) para juntar regiones blancas
mask = cv2.morphologyEx(mask , cv2.MORPH_CLOSE, kernelCl)
mask2 = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernelCl)
except:
print('EOF')
print('UP:', cnt_up)
print('DOWN:', cnt_down)
break
#################
# CONTORNOS #
#################
# RETR_EXTERNAL returns only extreme outer flags. All child contours are left behind.
_, contours0, hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours0:
area = cv2.contourArea(cnt)
if area > areaTH:
#################
# TRACKING #
#################
#Falta agregar condiciones para multipersonas, salidas y entradas de pantalla.
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
x, y, w, h = cv2.boundingRect(cnt)
new = True
if cy in range(up_limit, down_limit):
for i in persons:
if abs(cx-i.getX()) <= w and abs(cy-i.getY()) <= h:
# El objeto esta cerca de uno que ya se detecto antes: es el mismo
new = False
i.updateCoords(cx, cy) # Actualiza coordenadas en el objeto and resets age
if i.going_UP(line_down, line_up) == True:
cnt_up += 1;
print("ID:", i.getId(), 'crossed going up at', time.strftime("%c"))
elif i.going_DOWN(line_down, line_up) == True:
cnt_down += 1;
print("ID:", i.getId(), 'crossed going down at', time.strftime("%c"))
break
if i.getState() == '1':
if i.getDir() == 'down' and i.getY() > down_limit:
i.setDone()
elif i.getDir() == 'up' and i.getY() < up_limit:
i.setDone()
if i.timedOut():
# Sacar i de la lista persons
index = persons.index(i)
persons.pop(index)
del i #liberar la memoria de i
if new == True:
p = Person.MyPerson(pid, cx, cy, max_p_age)
persons.append(p)
pid += 1
#################
# DIBUJOS #
#################
cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)
img = cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#END for cnt in contours0
#########################
# DIBUJAR TRAYECTORIAS #
#########################
for i in persons:
## if len(i.getTracks()) >= 2:
## pts = np.array(i.getTracks(), np.int32)
## pts = pts.reshape((-1,1,2))
## frame = cv2.polylines(frame,[pts],False,i.getRGB())
## if i.getId() == 9:
## print str(i.getX()), ',', str(i.getY())
cv2.putText(frame, str(i.getId()), (i.getX(), i.getY()), font, 0.3, i.getRGB(), 1, cv2.LINE_AA)
#################
# IMAGENES #
#################
str_up = 'UP: ' + str(cnt_up)
str_down = 'DOWN: ' + str(cnt_down)
frame = cv2.polylines(frame, [pts_L1], False, line_down_color, thickness=2)
frame = cv2.polylines(frame, [pts_L2], False, line_up_color, thickness=2)
frame = cv2.polylines(frame, [pts_L3], False, (255,255,255), thickness=1)
frame = cv2.polylines(frame, [pts_L4], False, (255,255,255), thickness=1)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA)
cv2.putText(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('Frame', frame)
#cv2.imshow('Mask',mask)
#preisonar ESC para salir
k = cv2.waitKey(30) & 0xff
if k == 27:
break
#END while(cap.isOpened())
#################
# LIMPIEZA #
#################
cap.release()
cv2.destroyAllWindows()
|
[
"numpy.ones",
"time.strftime",
"cv2.rectangle",
"cv2.imshow",
"cv2.contourArea",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"cv2.circle",
"cv2.waitKey",
"cv2.morphologyEx",
"Person.MyPerson",
"cv2.createBackgroundSubtractorMOG2",
"cv2.putText",
"cv2.polylines",
"cv2.threshold",
"cv2.moments",
"cv2.VideoCapture",
"numpy.array",
"cv2.findContours"
] |
[((164, 201), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""peopleCounter.avi"""'], {}), "('peopleCounter.avi')\n", (180, 201), False, 'import cv2\n'), ((852, 882), 'numpy.array', 'np.array', (['[pt1, pt2]', 'np.int32'], {}), '([pt1, pt2], np.int32)\n', (860, 882), True, 'import numpy as np\n'), ((998, 1028), 'numpy.array', 'np.array', (['[pt3, pt4]', 'np.int32'], {}), '([pt3, pt4], np.int32)\n', (1006, 1028), True, 'import numpy as np\n'), ((1147, 1177), 'numpy.array', 'np.array', (['[pt5, pt6]', 'np.int32'], {}), '([pt5, pt6], np.int32)\n', (1155, 1177), True, 'import numpy as np\n'), ((1269, 1299), 'numpy.array', 'np.array', (['[pt7, pt8]', 'np.int32'], {}), '([pt7, pt8], np.int32)\n', (1277, 1299), True, 'import numpy as np\n'), ((1365, 1419), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {'detectShadows': '(True)'}), '(detectShadows=True)\n', (1399, 1419), False, 'import cv2\n'), ((1486, 1511), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1493, 1511), True, 'import numpy as np\n'), ((1524, 1549), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1531, 1549), True, 'import numpy as np\n'), ((1561, 1588), 'numpy.ones', 'np.ones', (['(11, 11)', 'np.uint8'], {}), '((11, 11), np.uint8)\n', (1568, 1588), True, 'import numpy as np\n'), ((7001, 7024), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7022, 7024), False, 'import cv2\n'), ((3037, 3104), 'cv2.findContours', 'cv2.findContours', (['mask2', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3053, 3104), False, 'import cv2\n'), ((6102, 6169), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L1]', '(False)', 'line_down_color'], {'thickness': '(2)'}), '(frame, [pts_L1], False, line_down_color, thickness=2)\n', (6115, 6169), False, 'import cv2\n'), ((6182, 6247), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L2]', '(False)', 'line_up_color'], {'thickness': '(2)'}), '(frame, [pts_L2], False, line_up_color, thickness=2)\n', (6195, 6247), False, 'import cv2\n'), ((6260, 6327), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L3]', '(False)', '(255, 255, 255)'], {'thickness': '(1)'}), '(frame, [pts_L3], False, (255, 255, 255), thickness=1)\n', (6273, 6327), False, 'import cv2\n'), ((6338, 6405), 'cv2.polylines', 'cv2.polylines', (['frame', '[pts_L4]', '(False)', '(255, 255, 255)'], {'thickness': '(1)'}), '(frame, [pts_L4], False, (255, 255, 255), thickness=1)\n', (6351, 6405), False, 'import cv2\n'), ((6408, 6493), 'cv2.putText', 'cv2.putText', (['frame', 'str_up', '(10, 40)', 'font', '(0.5)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_up, (10, 40), font, 0.5, (255, 255, 255), 2, cv2.LINE_AA\n )\n', (6419, 6493), False, 'import cv2\n'), ((6493, 6569), 'cv2.putText', 'cv2.putText', (['frame', 'str_up', '(10, 40)', 'font', '(0.5)', '(0, 0, 255)', '(1)', 'cv2.LINE_AA'], {}), '(frame, str_up, (10, 40), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA)\n', (6504, 6569), False, 'import cv2\n'), ((6574, 6661), 'cv2.putText', 'cv2.putText', (['frame', 'str_down', '(10, 90)', 'font', '(0.5)', '(255, 255, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_down, (10, 90), font, 0.5, (255, 255, 255), 2, cv2.\n LINE_AA)\n', (6585, 6661), False, 'import cv2\n'), ((6661, 6739), 'cv2.putText', 'cv2.putText', (['frame', 'str_down', '(10, 90)', 'font', '(0.5)', '(255, 0, 0)', '(1)', 'cv2.LINE_AA'], {}), '(frame, str_down, (10, 90), font, 0.5, (255, 0, 0), 1, cv2.LINE_AA)\n', (6672, 6739), False, 'import cv2\n'), ((6745, 6771), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (6755, 6771), False, 'import cv2\n'), ((2229, 2279), 'cv2.threshold', 'cv2.threshold', (['fgmask', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(fgmask, 200, 255, cv2.THRESH_BINARY)\n', (2242, 2279), False, 'import cv2\n'), ((2302, 2353), 'cv2.threshold', 'cv2.threshold', (['fgmask2', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(fgmask2, 200, 255, cv2.THRESH_BINARY)\n', (2315, 2353), False, 'import cv2\n'), ((2421, 2470), 'cv2.morphologyEx', 'cv2.morphologyEx', (['imBin', 'cv2.MORPH_OPEN', 'kernelOp'], {}), '(imBin, cv2.MORPH_OPEN, kernelOp)\n', (2437, 2470), False, 'import cv2\n'), ((2487, 2537), 'cv2.morphologyEx', 'cv2.morphologyEx', (['imBin2', 'cv2.MORPH_OPEN', 'kernelOp'], {}), '(imBin2, cv2.MORPH_OPEN, kernelOp)\n', (2503, 2537), False, 'import cv2\n'), ((2619, 2668), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernelCl'], {}), '(mask, cv2.MORPH_CLOSE, kernelCl)\n', (2635, 2668), False, 'import cv2\n'), ((2686, 2736), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask2', 'cv2.MORPH_CLOSE', 'kernelCl'], {}), '(mask2, cv2.MORPH_CLOSE, kernelCl)\n', (2702, 2736), False, 'import cv2\n'), ((3146, 3166), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3161, 3166), False, 'import cv2\n'), ((6848, 6863), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (6859, 6863), False, 'import cv2\n'), ((3416, 3432), 'cv2.moments', 'cv2.moments', (['cnt'], {}), '(cnt)\n', (3427, 3432), False, 'import cv2\n'), ((3538, 3559), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (3554, 3559), False, 'import cv2\n'), ((5191, 5238), 'cv2.circle', 'cv2.circle', (['frame', '(cx, cy)', '(5)', '(0, 0, 255)', '(-1)'], {}), '(frame, (cx, cy), 5, (0, 0, 255), -1)\n', (5201, 5238), False, 'import cv2\n'), ((5257, 5317), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (5270, 5317), False, 'import cv2\n'), ((4982, 5021), 'Person.MyPerson', 'Person.MyPerson', (['pid', 'cx', 'cy', 'max_p_age'], {}), '(pid, cx, cy, max_p_age)\n', (4997, 5021), False, 'import Person\n'), ((4149, 4168), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (4162, 4168), False, 'import time\n'), ((4361, 4380), 'time.strftime', 'time.strftime', (['"""%c"""'], {}), "('%c')\n", (4374, 4380), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-13 22:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('author', models.CharField(blank=True, max_length=100, null=True, verbose_name='Author')),
('preparation_time', models.TimeField(blank=True, null=True, verbose_name='Time required in active preparation')),
('idle_time', models.TimeField(blank=True, help_text='e.g. marinating time, standing time', null=True, verbose_name='Waiting time required')),
('cook_time', models.TimeField(blank=True, null=True, verbose_name='Time spent actively cooking/baking/grilling')),
('outdoor_cooking_friendly', models.BooleanField(default=False, verbose_name='Whether the meal is suitable for outdoor cooking')),
('instructions', models.TextField(verbose_name='Instructions')),
],
options={
'ordering': ('name', 'author'),
},
),
migrations.CreateModel(
name='RecipeIngredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Amount used')),
('unit_of_measure', models.CharField(blank=True, help_text='pounds, ounces, cups, kilograms, ...', max_length=50, null=True, verbose_name='Unit of measure')),
('preparation_method', models.CharField(blank=True, help_text='chopped, diced, julienned, etc.', max_length=50, null=True, verbose_name='Means of preparation')),
('ingredient', models.ForeignKey(help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='cookery.Ingredient')),
('recipe', models.ForeignKey(help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='cookery.Recipe')),
],
),
migrations.CreateModel(
name='RecipeTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
),
migrations.AddField(
model_name='recipe',
name='ingredients',
field=models.ManyToManyField(help_text='Ingredients', through='cookery.RecipeIngredient', to='cookery.Ingredient'),
),
migrations.AddField(
model_name='recipe',
name='tags',
field=models.ManyToManyField(help_text='Tags', to='cookery.RecipeTag'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.TimeField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.PositiveSmallIntegerField"
] |
[((3145, 3258), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""Ingredients"""', 'through': '"""cookery.RecipeIngredient"""', 'to': '"""cookery.Ingredient"""'}), "(help_text='Ingredients', through=\n 'cookery.RecipeIngredient', to='cookery.Ingredient')\n", (3167, 3258), False, 'from django.db import migrations, models\n'), ((3371, 3435), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""Tags"""', 'to': '"""cookery.RecipeTag"""'}), "(help_text='Tags', to='cookery.RecipeTag')\n", (3393, 3435), False, 'from django.db import migrations, models\n'), ((403, 496), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (419, 496), False, 'from django.db import migrations, models\n'), ((520, 573), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Name"""'}), "(max_length=100, verbose_name='Name')\n", (536, 573), False, 'from django.db import migrations, models\n'), ((705, 798), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (721, 798), False, 'from django.db import migrations, models\n'), ((822, 875), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Name"""'}), "(max_length=100, verbose_name='Name')\n", (838, 875), False, 'from django.db import migrations, models\n'), ((905, 983), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)', 'null': '(True)', 'verbose_name': '"""Author"""'}), "(blank=True, max_length=100, null=True, verbose_name='Author')\n", (921, 983), False, 'from django.db import migrations, models\n'), ((1023, 1119), 'django.db.models.TimeField', 'models.TimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Time required in active preparation"""'}), "(blank=True, null=True, verbose_name=\n 'Time required in active preparation')\n", (1039, 1119), False, 'from django.db import migrations, models\n'), ((1147, 1283), 'django.db.models.TimeField', 'models.TimeField', ([], {'blank': '(True)', 'help_text': '"""e.g. marinating time, standing time"""', 'null': '(True)', 'verbose_name': '"""Waiting time required"""'}), "(blank=True, help_text=\n 'e.g. marinating time, standing time', null=True, verbose_name=\n 'Waiting time required')\n", (1163, 1283), False, 'from django.db import migrations, models\n'), ((1306, 1410), 'django.db.models.TimeField', 'models.TimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Time spent actively cooking/baking/grilling"""'}), "(blank=True, null=True, verbose_name=\n 'Time spent actively cooking/baking/grilling')\n", (1322, 1410), False, 'from django.db import migrations, models\n'), ((1453, 1557), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Whether the meal is suitable for outdoor cooking"""'}), "(default=False, verbose_name=\n 'Whether the meal is suitable for outdoor cooking')\n", (1472, 1557), False, 'from django.db import migrations, models\n'), ((1588, 1633), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""Instructions"""'}), "(verbose_name='Instructions')\n", (1604, 1633), False, 'from django.db import migrations, models\n'), ((1860, 1953), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1876, 1953), False, 'from django.db import migrations, models\n'), ((1981, 2069), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Amount used"""'}), "(blank=True, null=True, verbose_name=\n 'Amount used')\n", (2013, 2069), False, 'from django.db import migrations, models\n'), ((2103, 2248), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""pounds, ounces, cups, kilograms, ..."""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Unit of measure"""'}), "(blank=True, help_text=\n 'pounds, ounces, cups, kilograms, ...', max_length=50, null=True,\n verbose_name='Unit of measure')\n", (2119, 2248), False, 'from django.db import migrations, models\n'), ((2281, 2421), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""chopped, diced, julienned, etc."""', 'max_length': '(50)', 'null': '(True)', 'verbose_name': '"""Means of preparation"""'}), "(blank=True, help_text='chopped, diced, julienned, etc.',\n max_length=50, null=True, verbose_name='Means of preparation')\n", (2297, 2421), False, 'from django.db import migrations, models\n'), ((2451, 2567), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Ingredient"""', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""cookery.Ingredient"""'}), "(help_text='Ingredient', on_delete=django.db.models.\n deletion.CASCADE, to='cookery.Ingredient')\n", (2468, 2567), False, 'from django.db import migrations, models\n'), ((2592, 2704), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""Ingredient"""', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""cookery.Recipe"""'}), "(help_text='Ingredient', on_delete=django.db.models.\n deletion.CASCADE, to='cookery.Recipe')\n", (2609, 2704), False, 'from django.db import migrations, models\n'), ((2834, 2927), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2850, 2927), False, 'from django.db import migrations, models\n'), ((2951, 3004), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'verbose_name': '"""Name"""'}), "(max_length=100, verbose_name='Name')\n", (2967, 3004), False, 'from django.db import migrations, models\n')]
|
# encoding: utf-8
"""
Chart builder and related objects.
"""
from __future__ import absolute_import, print_function, unicode_literals
from contextlib import contextmanager
from xlsxwriter import Workbook
from StringIO import StringIO
class _BaseWorkbookWriter(object):
"""
Base class for workbook writers, providing shared members.
"""
def __init__(self, chart_data):
super(_BaseWorkbookWriter, self).__init__()
self._chart_data = chart_data
@property
def xlsx_blob(self):
"""
Return the byte stream of an Excel file formatted as chart data for
the category chart specified in the chart data object.
"""
xlsx_file = StringIO()
with self._open_worksheet(xlsx_file) as (workbook, worksheet):
self._populate_worksheet(workbook, worksheet)
return xlsx_file.getvalue()
@contextmanager
def _open_worksheet(self, xlsx_file):
"""
Enable XlsxWriter Worksheet object to be opened, operated on, and
then automatically closed within a `with` statement. A filename or
stream object (such as a ``StringIO`` instance) is expected as
*xlsx_file*.
"""
workbook = Workbook(xlsx_file, {"in_memory": True})
worksheet = workbook.add_worksheet()
yield workbook, worksheet
workbook.close()
def _populate_worksheet(self, workbook, worksheet):
"""
Must be overridden by each subclass to provide the particulars of
writing the spreadsheet data.
"""
raise NotImplementedError("must be provided by each subclass")
class CategoryWorkbookWriter(_BaseWorkbookWriter):
"""
Determines Excel worksheet layout and can write an Excel workbook from
a CategoryChartData object. Serves as the authority for Excel worksheet
ranges.
"""
@property
def categories_ref(self):
"""
The Excel worksheet reference to the categories for this chart (not
including the column heading).
"""
categories = self._chart_data.categories
if categories.depth == 0:
raise ValueError("chart data contains no categories")
right_col = chr(ord("A") + categories.depth - 1)
bottom_row = categories.leaf_count + 1
return "Sheet1!$A$2:$%s$%d" % (right_col, bottom_row)
def series_name_ref(self, series):
"""
Return the Excel worksheet reference to the cell containing the name
for *series*. This also serves as the column heading for the series
values.
"""
return "Sheet1!$%s$1" % self._series_col_letter(series)
def values_ref(self, series):
"""
The Excel worksheet reference to the values for this series (not
including the column heading).
"""
return "Sheet1!${col_letter}$2:${col_letter}${bottom_row}".format(
**{
"col_letter": self._series_col_letter(series),
"bottom_row": len(series) + 1,
}
)
@staticmethod
def _column_reference(column_number):
"""Return str Excel column reference like 'BQ' for *column_number*.
*column_number* is an int in the range 1-16384 inclusive, where
1 maps to column 'A'.
"""
if column_number < 1 or column_number > 16384:
raise ValueError("column_number must be in range 1-16384")
# ---Work right-to-left, one order of magnitude at a time. Note there
# is no zero representation in Excel address scheme, so this is
# not just a conversion to base-26---
col_ref = ""
while column_number:
remainder = column_number % 26
if remainder == 0:
remainder = 26
col_letter = chr(ord("A") + remainder - 1)
col_ref = col_letter + col_ref
# ---Advance to next order of magnitude or terminate loop. The
# minus-one in this expression reflects the fact the next lower
# order of magnitude has a minumum value of 1 (not zero). This is
# essentially the complement to the "if it's 0 make it 26' step
# above.---
column_number = (column_number - 1) // 26
return col_ref
def _populate_worksheet(self, workbook, worksheet):
"""
Write the chart data contents to *worksheet* in category chart
layout. Write categories starting in the first column starting in
the second row, and proceeding one column per category level (for
charts having multi-level categories). Write series as columns
starting in the next following column, placing the series title in
the first cell.
"""
self._write_categories(workbook, worksheet)
self._write_series(workbook, worksheet)
def _series_col_letter(self, series):
"""
The letter of the Excel worksheet column in which the data for a
series appears.
"""
column_number = 1 + series.categories.depth + series.index
return self._column_reference(column_number)
def _write_categories(self, workbook, worksheet):
"""
Write the categories column(s) to *worksheet*. Categories start in
the first column starting in the second row, and proceeding one
column per category level (for charts having multi-level categories).
A date category is formatted as a date. All others are formatted
`General`.
"""
categories = self._chart_data.categories
num_format = workbook.add_format({"num_format": categories.number_format})
depth = categories.depth
for idx, level in enumerate(categories.levels):
col = depth - idx - 1
self._write_cat_column(worksheet, col, level, num_format)
def _write_cat_column(self, worksheet, col, level, num_format):
"""
Write a category column defined by *level* to *worksheet* at offset
*col* and formatted with *num_format*.
"""
worksheet.set_column(col, col, 10) # wide enough for a date
for off, name in level:
row = off + 1
worksheet.write(row, col, name, num_format)
def _write_series(self, workbook, worksheet):
"""
Write the series column(s) to *worksheet*. Series start in the column
following the last categories column, placing the series title in the
first cell.
"""
col_offset = self._chart_data.categories.depth
for idx, series in enumerate(self._chart_data):
num_format = workbook.add_format({"num_format": series.number_format})
series_col = idx + col_offset
worksheet.write(0, series_col, series.name)
worksheet.write_column(1, series_col, series.values, num_format)
class XyWorkbookWriter(_BaseWorkbookWriter):
"""
Determines Excel worksheet layout and can write an Excel workbook from XY
chart data. Serves as the authority for Excel worksheet ranges.
"""
def series_name_ref(self, series):
"""
Return the Excel worksheet reference to the cell containing the name
for *series*. This also serves as the column heading for the series
Y values.
"""
row = self.series_table_row_offset(series) + 1
return "Sheet1!$B$%d" % row
def series_table_row_offset(self, series):
"""
Return the number of rows preceding the data table for *series* in
the Excel worksheet.
"""
title_and_spacer_rows = series.index * 2
data_point_rows = series.data_point_offset
return title_and_spacer_rows + data_point_rows
def x_values_ref(self, series):
"""
The Excel worksheet reference to the X values for this chart (not
including the column label).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$A$%d:$A$%d" % (top_row, bottom_row)
def y_values_ref(self, series):
"""
The Excel worksheet reference to the Y values for this chart (not
including the column label).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$B$%d:$B$%d" % (top_row, bottom_row)
def _populate_worksheet(self, workbook, worksheet):
"""
Write chart data contents to *worksheet* in the standard XY chart
layout. Write the data for each series to a separate two-column
table, X values in column A and Y values in column B. Place the
series label in the first (heading) cell of the column.
"""
chart_num_format = workbook.add_format(
{"num_format": self._chart_data.number_format}
)
for series in self._chart_data:
series_num_format = workbook.add_format(
{"num_format": series.number_format}
)
offset = self.series_table_row_offset(series)
# write X values
worksheet.write_column(offset + 1, 0, series.x_values, chart_num_format)
# write Y values
worksheet.write(offset, 1, series.name)
worksheet.write_column(offset + 1, 1, series.y_values, series_num_format)
class BubbleWorkbookWriter(XyWorkbookWriter):
"""
Service object that knows how to write an Excel workbook from bubble
chart data.
"""
def bubble_sizes_ref(self, series):
"""
The Excel worksheet reference to the range containing the bubble
sizes for *series* (not including the column heading cell).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$C$%d:$C$%d" % (top_row, bottom_row)
def _populate_worksheet(self, workbook, worksheet):
"""
Write chart data contents to *worksheet* in the bubble chart layout.
Write the data for each series to a separate three-column table with
X values in column A, Y values in column B, and bubble sizes in
column C. Place the series label in the first (heading) cell of the
values column.
"""
chart_num_format = workbook.add_format(
{"num_format": self._chart_data.number_format}
)
for series in self._chart_data:
series_num_format = workbook.add_format(
{"num_format": series.number_format}
)
offset = self.series_table_row_offset(series)
# write X values
worksheet.write_column(offset + 1, 0, series.x_values, chart_num_format)
# write Y values
worksheet.write(offset, 1, series.name)
worksheet.write_column(offset + 1, 1, series.y_values, series_num_format)
# write bubble sizes
worksheet.write(offset, 2, "Size")
worksheet.write_column(offset + 1, 2, series.bubble_sizes, chart_num_format)
|
[
"xlsxwriter.Workbook",
"StringIO.StringIO"
] |
[((705, 715), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (713, 715), False, 'from StringIO import StringIO\n'), ((1228, 1268), 'xlsxwriter.Workbook', 'Workbook', (['xlsx_file', "{'in_memory': True}"], {}), "(xlsx_file, {'in_memory': True})\n", (1236, 1268), False, 'from xlsxwriter import Workbook\n')]
|
import argparse
import os
import re
parser = argparse.ArgumentParser('Visualizing Training sample, top200 pairs from randomly top 2000 pairs')
parser.add_argument(
'--outHtml', type=str, help='output html file')
parser.add_argument(
'--imgDir', type=str, help='image directory')
args = parser.parse_args()
### Writing the table format###
f = open(args.outHtml, 'w')
f.write('<html>\n')
f.write('<head>\n')
f.write('\t<title></title>\n')
f.write('\t<meta name=\"keywords\" content= \"Visual Result\" /> <meta charset=\"utf-8\" />\n')
f.write('\t<meta name=\"robots\" content=\"index, follow\" />\n')
f.write('\t<meta http-equiv=\"Content-Script-Type\" content=\"text/javascript\" />\n')
f.write('\t<meta http-equiv=\"expires\" content=\"0\" />\n')
f.write('\t<meta name=\"description\" content= \"Project page of style.css\" />\n')
f.write('\t<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\" media=\"screen\" />\n')
f.write('\t<link rel=\"shortcut icon\" href=\"favicon.ico\" />\n')
f.write('</head>\n')
f.write('<body>\n')
f.write('<div id="website">\n')
f.write('<center>\n')
f.write('\t<div class=\"blank\"></div>\n')
f.write('\t<h1>\n')
f.write('\t\tVisualize Training Sample\n')
f.write('\t</h1>\n')
f.write('</center>\n')
f.write('<div class=\"blank\"></div>\n')
f.write('<center>\n')
f.write('<div>\n')
f.write('</div>\n')
### ---HTML Table--- ###
f.write('<table>\n')
f.write('\t<tr>\n')
f.write('\t\t<th># Rank</th>\n')
f.write('\t\t<th>Img 1 </th>\n')
f.write('\t\t<th>Img 2 </th>\n')
f.write('\t</tr>\n')
nbPair = len(os.listdir(args.imgDir)) / 2 ## Nb of row
for j in range(nbPair) :
f.write('\t<tr >\n')
msg = '\t\t<th>{:d}</th>\n'.format(j + 1)
f.write(msg)## Rank
img1 = os.path.join(args.imgDir, 'Rank{:d}_1.jpg'.format(j))
msg = '\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img1, img1, img1)
f.write(msg)## Img 1
img2 = os.path.join(args.imgDir, 'Rank{:d}_2.jpg'.format(j))
msg = '\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img2, img2, img2)
f.write(msg)## Img 2
f.write('\t</tr>\n')
f.write('</table>\n')
f.write('</center>\n</div>\n </body>\n</html>\n')
f.close()
|
[
"os.listdir",
"argparse.ArgumentParser"
] |
[((47, 149), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Visualizing Training sample, top200 pairs from randomly top 2000 pairs"""'], {}), "(\n 'Visualizing Training sample, top200 pairs from randomly top 2000 pairs')\n", (70, 149), False, 'import argparse\n'), ((1559, 1582), 'os.listdir', 'os.listdir', (['args.imgDir'], {}), '(args.imgDir)\n', (1569, 1582), False, 'import os\n')]
|
import random
import time
from actors import Wizard, Creature, SmallAnimal, Dragon
def print_header():
print('--------------------------------')
print(' WIZARD GAME APP')
print('--------------------------------\n')
def game_loop():
creatures = [
SmallAnimal('Toad', 1),
Creature('Tiger', 12),
SmallAnimal('Bat', 3),
Dragon('Dragon', 50, 75, True),
Wizard('Evil Wizard', 1000)
]
hero = Wizard('Gandolf', 75)
while True:
active_creature = random.choice(creatures)
print("A {} of level {} has appeared from a dark and foggy forest...\n"
.format(active_creature.name, active_creature.level))
cmd = input('Do you [a]ttack, [r]unaway or [l]ook around? \n').lower().strip()
print()
if cmd == 'a':
# print('attack')
if hero.attack(active_creature):
creatures.remove(active_creature)
else:
print('The wizard runs and hides taking time to recover...')
for sec in range(1, 6):
time.sleep(1)
print('{}... '.format(sec))
print('The wizard returns revitalized!')
elif cmd == 'r':
print('The wizard has become unsure of his powers and flees!!!')
elif cmd == 'l':
print('The wizard {} takes in the surroundings and sees:'.format(hero.name))
for c in creatures:
print(' * A {} of level {}'.format(c.name, c.level))
else:
print('Ok, exiting game... bye!')
break
if not creatures:
print('You defeated all the creatures, well done!')
break
print()
def main():
print_header()
game_loop()
if __name__ == '__main__':
main()
|
[
"actors.Wizard",
"random.choice",
"time.sleep",
"actors.SmallAnimal",
"actors.Creature",
"actors.Dragon"
] |
[((461, 482), 'actors.Wizard', 'Wizard', (['"""Gandolf"""', '(75)'], {}), "('Gandolf', 75)\n", (467, 482), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((281, 303), 'actors.SmallAnimal', 'SmallAnimal', (['"""Toad"""', '(1)'], {}), "('Toad', 1)\n", (292, 303), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((313, 334), 'actors.Creature', 'Creature', (['"""Tiger"""', '(12)'], {}), "('Tiger', 12)\n", (321, 334), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((344, 365), 'actors.SmallAnimal', 'SmallAnimal', (['"""Bat"""', '(3)'], {}), "('Bat', 3)\n", (355, 365), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((375, 405), 'actors.Dragon', 'Dragon', (['"""Dragon"""', '(50)', '(75)', '(True)'], {}), "('Dragon', 50, 75, True)\n", (381, 405), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((415, 442), 'actors.Wizard', 'Wizard', (['"""Evil Wizard"""', '(1000)'], {}), "('Evil Wizard', 1000)\n", (421, 442), False, 'from actors import Wizard, Creature, SmallAnimal, Dragon\n'), ((527, 551), 'random.choice', 'random.choice', (['creatures'], {}), '(creatures)\n', (540, 551), False, 'import random\n'), ((1108, 1121), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1118, 1121), False, 'import time\n')]
|
import json, subprocess
from ... pyaz_utils import get_cli_name, get_params
def create(account_name, resource_group, name, start_timestamp=None, end_timestamp=None, presentation_window_duration=None, live_backoff_duration=None, timescale=None, force_end_timestamp=None, bitrate=None, first_quality=None, tracks=None):
params = get_params(locals())
command = "az ams account-filter create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def show(resource_group, account_name, name):
params = get_params(locals())
command = "az ams account-filter show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group, account_name):
params = get_params(locals())
command = "az ams account-filter list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(resource_group, account_name, name):
params = get_params(locals())
command = "az ams account-filter delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, account_name, name, start_timestamp=None, end_timestamp=None, presentation_window_duration=None, live_backoff_duration=None, timescale=None, bitrate=None, first_quality=None, tracks=None, force_end_timestamp=None, set=None, add=None, remove=None, force_string=None):
params = get_params(locals())
command = "az ams account-filter update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
|
[
"subprocess.run",
"json.loads"
] |
[((444, 532), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (458, 532), False, 'import json, subprocess\n'), ((921, 1009), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (935, 1009), False, 'import json, subprocess\n'), ((1392, 1480), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (1406, 1480), False, 'import json, subprocess\n'), ((1873, 1961), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (1887, 1961), False, 'import json, subprocess\n'), ((2600, 2688), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (2614, 2688), False, 'import json, subprocess\n'), ((645, 663), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (655, 663), False, 'import json, subprocess\n'), ((1122, 1140), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (1132, 1140), False, 'import json, subprocess\n'), ((1593, 1611), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (1603, 1611), False, 'import json, subprocess\n'), ((2074, 2092), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (2084, 2092), False, 'import json, subprocess\n'), ((2801, 2819), 'json.loads', 'json.loads', (['stdout'], {}), '(stdout)\n', (2811, 2819), False, 'import json, subprocess\n')]
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with TensorFlow."""
import ast
class HParams(object):
"""Creates an object for passing around hyperparameter values.
Use the parse method to overwrite the default hyperparameters with values
passed in as a string representation of a Python dictionary mapping
hyperparameters to values.
Ex.
hparams = magenta.common.HParams(batch_size=128, hidden_size=256)
hparams.parse('{"hidden_size":512}')
assert hparams.batch_size == 128
assert hparams.hidden_size == 512
"""
def __init__(self, **init_hparams):
object.__setattr__(self, 'keyvals', init_hparams)
def __getattr__(self, key):
"""Returns value of the given hyperameter, or None if does not exist."""
return self.keyvals.get(key)
def __setattr__(self, key, value):
"""Sets value for the hyperameter."""
self.keyvals[key] = value
def parse(self, string):
"""Merges in new hyperparameters, replacing existing with same key."""
self.keyvals.update(ast.literal_eval(string))
def values(self):
"""Return the hyperparameter values as a Python dictionary."""
return self.keyvals
|
[
"ast.literal_eval"
] |
[((1588, 1612), 'ast.literal_eval', 'ast.literal_eval', (['string'], {}), '(string)\n', (1604, 1612), False, 'import ast\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 19:42:57 2019
@author: jasoncasey
"""
from io import BytesIO, StringIO
from zipfile import ZipFile
from urllib.request import urlopen
import pandas as pd
import pickle
# item_recode maps labels on to coded columns
def item_recode(col, codings):
# df.replace({colname: codings})
answer = col.map(codings, na_action = "ignore")
return(answer)
def fix_cols(data):
data.columns = [colname.lower() for colname in list(data.columns.values)]
return(data)
def fix_number(col):
try:
col = col.str.replace("^\\.$", "")
answer = pd.to_numeric(col.str.replace("[^0-9\\.\\-]", ""), errors = "coerce")
# answer = pd.to_numeric(col, errors = "coerce")
except Exception as e:
print(str(e))
return(str(e))
return(answer)
def make_proportion(col):
""" turn an integer column into a decimal proportion. """
answer = col / 100
return(answer)
def get_filename(file_list):
""" find csv file from IPEDS download. If a revised file exists ("_rv"), return
that, otherwise, return the csv."""
match = [s for s in file_list if "_rv" in s]
answer = file_list[0]
if len(match) > 0:
answer = match[0]
return(answer)
def net_load_info(url):
""" return the file list from a zip file downloaded from a URL. """
resp = urlopen(url)
zipfile = ZipFile(BytesIO(resp.read()))
files = zipfile.namelist()
return(files)
def net_load_data(url, types = "object"):
""" load a csv from an IPEDS zip at the specified URL. use get_filename() to
get the most recent revision. Returns a pandas DataFrame. """
with urlopen(url) as resp:
zipfile = ZipFile(BytesIO(resp.read()))
file_name = get_filename(zipfile.namelist())
with zipfile.open(file_name) as data_file:
answer = pd.read_csv(data_file,
dtype = types,
na_values = '.',
index_col = False,
low_memory = False,
encoding = "iso-8859-1")
return(answer)
def item_recode(col, codings, default_value = None):
""" recode values in column col using codings and default_value for unmatched codings """
if default_value == None:
answer = col.map(codings, na_action = 'ignore')
else:
answer = col.map(codings, na_action = 'ignore').fillna(default_value)
return(answer)
def read_pickle(filespec):
""" read pickle file at filespec """
try:
with open(filespec, 'rb') as f:
answer = pickle.load(f)
except Exception as e:
print('File not loaded properly.\n\n{}'.format(str(e)))
raise
return answer
|
[
"pandas.read_csv",
"pickle.load",
"urllib.request.urlopen"
] |
[((1408, 1420), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1415, 1420), False, 'from urllib.request import urlopen\n'), ((1719, 1731), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1726, 1731), False, 'from urllib.request import urlopen\n'), ((1914, 2026), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'dtype': 'types', 'na_values': '"""."""', 'index_col': '(False)', 'low_memory': '(False)', 'encoding': '"""iso-8859-1"""'}), "(data_file, dtype=types, na_values='.', index_col=False,\n low_memory=False, encoding='iso-8859-1')\n", (1925, 2026), True, 'import pandas as pd\n'), ((2702, 2716), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2713, 2716), False, 'import pickle\n')]
|
from get_api_key import api_key
import argparse
import os
from datetime import datetime
youtube_instance = api_key()
youtube_instance.get_api_key()
youtube = youtube_instance.get_youtube()
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\
description='Explore the oldest videos on a Topic',\
epilog='''Examples \n .\oldest_videos.py tesla \n .\oldest_videos.py "game of thrones" -n 15 -s 2012''')
parser.add_argument("topic", help='Enter the topic')
group2 = parser.add_argument_group()
group2.add_argument('-n','--max_results',type=int, metavar='', default=5, help='The script will display "n" results')
group2.add_argument('-s','--start_year',type=int, metavar='', default=2005, help='By default, it will search from 2005')
group2.add_argument('-e','--end_year',type=int, metavar='', default=2010, help='By default, it will search till 2010')
parser.add_argument('-o','--output', action='store_true', help='output to a File')
args = parser.parse_args()
def oldest_videos_on_a_topic(topic,Max_limit,start_yr,end_yr):
if args.output:
f = open("old_videos.txt",'w',encoding = 'utf-8')
f.close()
else:
print('\n')
print('Video ID','\t','Upload Date/Time','\t','Video Title')
print('--------','\t','----------------','\t','-----------')
limit = 0
global youtube
start_time = datetime(year=2005, month=4, day=1).strftime('%Y-%m-%dT%H:%M:%SZ')
end_time = datetime(year=2010, month=1, day=1).strftime('%Y-%m-%dT%H:%M:%SZ')
res = youtube.search().list(part='snippet',
q=topic,
type='video',
publishedAfter=start_time,
publishedBefore=end_time,
maxResults=50).execute()
for item in sorted(res['items'], key=lambda x:x['snippet']['publishedAt']):
title = str(item['snippet']['title']).replace(''',"'").replace('"','"')
if topic.lower() in title.lower():
limit += 1
date_format = "%Y-%m-%dT%H:%M:%SZ"
publishedAt = datetime.strptime(item['snippet']['publishedAt'], date_format)
if args.output:
f = open("old_videos.txt",'a',encoding = 'utf-8')
f.write(item['id']['videoId']+'\t\t'+str(publishedAt)+'\t\t'+ title )
f.write('\n')
f.close()
else:
print(item['id']['videoId'],'\t',publishedAt,'\t', title )
if limit == Max_limit:
break
else:
continue
if args.output:
print('\nDone! Check the file old_video.txt\n')
else:
print('\n')
if __name__ == "__main__":
key = input("Enter key\n")
youtube = get_api_key(key)
oldest_videos_on_a_topic(args.topic,args.max_results,args.start_year,args.end_year)
|
[
"get_api_key.api_key",
"datetime.datetime.strptime",
"argparse.ArgumentParser",
"datetime.datetime"
] |
[((109, 118), 'get_api_key.api_key', 'api_key', ([], {}), '()\n', (116, 118), False, 'from get_api_key import api_key\n'), ((201, 455), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': '"""Explore the oldest videos on a Topic"""', 'epilog': '"""Examples \n .\\\\oldest_videos.py tesla \n .\\\\oldest_videos.py "game of thrones" -n 15 -s 2012"""'}), '(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n \'Explore the oldest videos on a Topic\', epilog=\n """Examples \n .\\\\oldest_videos.py tesla \n .\\\\oldest_videos.py "game of thrones" -n 15 -s 2012"""\n )\n', (224, 455), False, 'import argparse\n'), ((1443, 1478), 'datetime.datetime', 'datetime', ([], {'year': '(2005)', 'month': '(4)', 'day': '(1)'}), '(year=2005, month=4, day=1)\n', (1451, 1478), False, 'from datetime import datetime\n'), ((1525, 1560), 'datetime.datetime', 'datetime', ([], {'year': '(2010)', 'month': '(1)', 'day': '(1)'}), '(year=2010, month=1, day=1)\n', (1533, 1560), False, 'from datetime import datetime\n'), ((2191, 2253), 'datetime.datetime.strptime', 'datetime.strptime', (["item['snippet']['publishedAt']", 'date_format'], {}), "(item['snippet']['publishedAt'], date_format)\n", (2208, 2253), False, 'from datetime import datetime\n')]
|
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
# Contains general matrix utilities. Some, but not all, of these tools are specific to
# matrices over the ints modulo 2.
import numpy as _np
def dotmod2(m1, m2):
"""
Returns the product over the itegers modulo 2 of
two matrices.
"""
return _np.dot(m1, m2) % 2
def multidotmod2(mlist):
"""
Returns the product over the itegers modulo 2 of
a list of matrices.
"""
return _np.linalg.multi_dot(mlist) % 2
def detmod2(m):
"""
Returns the determinant of a matrix over the itegers
modulo 2 (GL(n,2)).
"""
return _np.round(_np.linalg.det(m)) % 2
# A utility function used by the random symplectic matrix sampler.
def matrix_directsum(m1, m2):
"""
Returns the direct sum of two square matrices of integers.
"""
n1 = len(m1[0, :])
n2 = len(m2[0, :])
output = _np.zeros((n1 + n2, n1 + n2), dtype='int8')
output[0:n1, 0:n1] = m1
output[n1:n1 + n2, n1:n1 + n2] = m2
return output
def inv_mod2(m):
"""
Finds the inverse of a matrix over GL(n,2)
"""
t = len(m)
c = _np.append(m, _np.eye(t), 1)
return _np.array(gaussian_elimination_mod2(c)[:, t:])
def Axb_mod2(A, b):
"""
Solves Ax = b over GF(2)
"""
b = _np.array([b]).T
C = _np.append(A, b, 1)
return _np.array([gaussian_elimination_mod2(C)[:, -1]]).T
def gaussian_elimination_mod2(A):
"""
Gaussian elimination mod2 of A.
"""
A = _np.array(A, dtype='int')
m, n = A.shape
i, j = 0, 0
while (i < m) and (j < n):
k = A[i:m, j].argmax() + i
A[_np.array([i, k]), :] = A[_np.array([k, i]), :]
aijn = _np.array([A[i, j:]])
col = _np.array([A[:, j]]).T
col[i] = 0
flip = _np.dot(col, aijn)
A[:, j:] = _np.bitwise_xor(A[:, j:], flip)
i += 1
j += 1
return A
def diagonal_as_vec(m):
"""
Returns a 1D array containing the diagonal of the input square 2D array m.
"""
l = _np.shape(m)[0]
vec = _np.zeros(l, int)
for i in range(0, l):
vec[i] = m[i, i]
return vec
def strictly_upper_triangle(m):
"""
Returns a matrix containing the strictly upper triangle of m and zeros elsewhere.
"""
l = _np.shape(m)[0]
out = m.copy()
for i in range(0, l):
for j in range(0, i + 1):
out[i, j] = 0
return out
def diagonal_as_matrix(m):
"""
Returns a diagonal matrix containing the diagonal of m.
"""
l = _np.shape(m)[0]
out = _np.zeros((l, l), int)
for i in range(0, l):
out[i, i] = m[i, i]
return out
# Code for factorizing a symmetric matrix invertable matrix A over GL(n,2) into
# the form A = F F.T. The algorithm mostly follows the proof in *Orthogonal Matrices
# Over Finite Fields* by <NAME> in The American Mathematical Monthly,
# Vol. 76, No. 2 (Feb., 1969), pp. 152-164
def albert_factor(D, failcount=0):
"""
Returns a matrix M such that D = M M.T for symmetric D, where D and M are
matrices over [0,1] mod 2. The algorithm mostly follows the proof in "Orthogonal Matrices
Over Finite Fields" by <NAME> in The American Mathematical Monthly, Vol. 76, No. 2
(Feb., 1969), pp. 152-164
There is generally not a unique albert factorization, and this algorthm is randomized. It will
general return a different factorizations from multiple calls.
"""
D = _np.array(D, dtype='int')
proper = False
while not proper:
N = onesify(D)
aa = multidotmod2([N, D, N.T])
P = proper_permutation(aa)
A = multidotmod2([P, aa, P.T])
proper = check_proper_permutation(A)
t = len(A)
# Start in lower right
L = _np.array([[1]])
for ind in range(t - 2, -1, -1):
block = A[ind:, ind:].copy()
z = block[0, 1:]
B = block[1:, 1:]
n = Axb_mod2(B, z).T
x = _np.array(_np.dot(n, L), dtype='int')
zer = _np.zeros([t - ind - 1, 1])
L = _np.array(_np.bmat([[_np.eye(1), x], [zer, L]]), dtype='int')
Qinv = inv_mod2(dotmod2(P, N))
L = dotmod2(_np.array(Qinv), L)
return L
def random_bitstring(n, p, failcount=0):
"""
Constructs a random bitstring of length n with parity p
"""
bitstring = _np.random.randint(0, 2, size=n)
if _np.mod(sum(bitstring), 2) == p:
return bitstring
elif failcount < 100:
return _np.array(random_bitstring(n, p, failcount + 1), dtype='int')
def random_invertable_matrix(n, failcount=0):
"""
Finds a random invertable matrix M over GL(n,2)
"""
M = _np.array([random_bitstring(n, _np.random.randint(0, 2)) for x in range(n)])
if detmod2(M) == 0:
if failcount < 100:
return random_invertable_matrix(n, failcount + 1)
else:
return M
def random_symmetric_invertable_matrix(n):
"""
Creates a random, symmetric, invertible matrix from GL(n,2)
"""
M = random_invertable_matrix(n)
return dotmod2(M, M.T)
def onesify(A, failcount=0, maxfailcount=100):
"""
Returns M such that M A M.T has ones along the main diagonal
"""
assert(failcount < maxfailcount), "The function has failed too many times! Perhaps the input is invalid."
# This is probably the slowest function since it just tries things
t = len(A)
count = 0
test_string = _np.diag(A)
M = []
while (len(M) < t) and (count < 40):
bitstr = random_bitstring(t, _np.random.randint(0, 2))
if dotmod2(bitstr, test_string) == 1:
if not _np.any([_np.array_equal(bitstr, m) for m in M]):
M += [bitstr]
else:
count += 1
if len(M) < t:
return onesify(A, failcount + 1)
M = _np.array(M, dtype='int')
if _np.array_equal(dotmod2(M, inv_mod2(M)), _np.identity(t, int)):
return _np.array(M)
else:
return onesify(A, failcount + 1, maxfailcount=maxfailcount)
def permute_top(A, i):
"""
Permutes the first row & col with the i'th row & col
"""
t = len(A)
P = _np.eye(t)
P[0, 0] = 0
P[i, i] = 0
P[0, i] = 1
P[i, 0] = 1
return multidotmod2([P, A, P]), P
def fix_top(A):
"""
Takes a symmetric binary matrix with ones along the diagonal
and returns the permutation matrix P such that the [1:t,1:t]
submatrix of P A P is invertible
"""
if A.shape == (1, 1):
return _np.eye(1, dtype='int')
t = len(A)
found_B = False
for ind in range(t):
aa, P = permute_top(A, ind)
B = _np.round_(aa[1:, 1:])
if detmod2(B) == 0:
continue
else:
found_B = True
break
# Todo : put a more meaningful fail message here #
assert(found_B), "Algorithm failed!"
return P
def proper_permutation(A):
"""
Takes a symmetric binary matrix with ones along the diagonal
and returns the permutation matrix P such that all [n:t,n:t]
submatrices of P A P are invertible.
"""
t = len(A)
Ps = [] # permutation matrices
for ind in range(t):
perm = fix_top(A[ind:, ind:])
zer = _np.zeros([ind, t - ind])
full_perm = _np.array(_np.bmat([[_np.eye(ind), zer], [zer.T, perm]]))
A = multidotmod2([full_perm, A, full_perm.T])
Ps += [full_perm]
# return Ps
return multidotmod2(list(reversed(Ps)))
#return _np.linalg.multi_dot(list(reversed(Ps))) # Should this not be multidot_mod2 ?
def check_proper_permutation(A):
"""
Check to see if the matrix has been properly permuted
This should be redundent to what is already built into
'fix_top'.
"""
t = len(A)
for ind in range(0, t):
b = A[ind:, ind:]
if detmod2(b) == 0:
return False
return True
|
[
"numpy.array_equal",
"numpy.eye",
"numpy.bitwise_xor",
"numpy.zeros",
"numpy.identity",
"numpy.shape",
"numpy.append",
"numpy.random.randint",
"numpy.array",
"numpy.round_",
"numpy.linalg.det",
"numpy.dot",
"numpy.diag",
"numpy.linalg.multi_dot"
] |
[((1528, 1571), 'numpy.zeros', '_np.zeros', (['(n1 + n2, n1 + n2)'], {'dtype': '"""int8"""'}), "((n1 + n2, n1 + n2), dtype='int8')\n", (1537, 1571), True, 'import numpy as _np\n'), ((1952, 1971), 'numpy.append', '_np.append', (['A', 'b', '(1)'], {}), '(A, b, 1)\n', (1962, 1971), True, 'import numpy as _np\n'), ((2132, 2157), 'numpy.array', '_np.array', (['A'], {'dtype': '"""int"""'}), "(A, dtype='int')\n", (2141, 2157), True, 'import numpy as _np\n'), ((2695, 2712), 'numpy.zeros', '_np.zeros', (['l', 'int'], {}), '(l, int)\n', (2704, 2712), True, 'import numpy as _np\n'), ((3202, 3224), 'numpy.zeros', '_np.zeros', (['(l, l)', 'int'], {}), '((l, l), int)\n', (3211, 3224), True, 'import numpy as _np\n'), ((4092, 4117), 'numpy.array', '_np.array', (['D'], {'dtype': '"""int"""'}), "(D, dtype='int')\n", (4101, 4117), True, 'import numpy as _np\n'), ((4393, 4409), 'numpy.array', '_np.array', (['[[1]]'], {}), '([[1]])\n', (4402, 4409), True, 'import numpy as _np\n'), ((4952, 4984), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {'size': 'n'}), '(0, 2, size=n)\n', (4970, 4984), True, 'import numpy as _np\n'), ((6042, 6053), 'numpy.diag', '_np.diag', (['A'], {}), '(A)\n', (6050, 6053), True, 'import numpy as _np\n'), ((6430, 6455), 'numpy.array', '_np.array', (['M'], {'dtype': '"""int"""'}), "(M, dtype='int')\n", (6439, 6455), True, 'import numpy as _np\n'), ((6756, 6766), 'numpy.eye', '_np.eye', (['t'], {}), '(t)\n', (6763, 6766), True, 'import numpy as _np\n'), ((948, 963), 'numpy.dot', '_np.dot', (['m1', 'm2'], {}), '(m1, m2)\n', (955, 963), True, 'import numpy as _np\n'), ((1099, 1126), 'numpy.linalg.multi_dot', '_np.linalg.multi_dot', (['mlist'], {}), '(mlist)\n', (1119, 1126), True, 'import numpy as _np\n'), ((1778, 1788), 'numpy.eye', '_np.eye', (['t'], {}), '(t)\n', (1785, 1788), True, 'import numpy as _np\n'), ((1927, 1941), 'numpy.array', '_np.array', (['[b]'], {}), '([b])\n', (1936, 1941), True, 'import numpy as _np\n'), ((2333, 2354), 'numpy.array', '_np.array', (['[A[i, j:]]'], {}), '([A[i, j:]])\n', (2342, 2354), True, 'import numpy as _np\n'), ((2426, 2444), 'numpy.dot', '_np.dot', (['col', 'aijn'], {}), '(col, aijn)\n', (2433, 2444), True, 'import numpy as _np\n'), ((2464, 2495), 'numpy.bitwise_xor', '_np.bitwise_xor', (['A[:, j:]', 'flip'], {}), '(A[:, j:], flip)\n', (2479, 2495), True, 'import numpy as _np\n'), ((2669, 2681), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (2678, 2681), True, 'import numpy as _np\n'), ((2924, 2936), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (2933, 2936), True, 'import numpy as _np\n'), ((3176, 3188), 'numpy.shape', '_np.shape', (['m'], {}), '(m)\n', (3185, 3188), True, 'import numpy as _np\n'), ((4629, 4656), 'numpy.zeros', '_np.zeros', (['[t - ind - 1, 1]'], {}), '([t - ind - 1, 1])\n', (4638, 4656), True, 'import numpy as _np\n'), ((4783, 4798), 'numpy.array', '_np.array', (['Qinv'], {}), '(Qinv)\n', (4792, 4798), True, 'import numpy as _np\n'), ((6505, 6525), 'numpy.identity', '_np.identity', (['t', 'int'], {}), '(t, int)\n', (6517, 6525), True, 'import numpy as _np\n'), ((6543, 6555), 'numpy.array', '_np.array', (['M'], {}), '(M)\n', (6552, 6555), True, 'import numpy as _np\n'), ((7112, 7135), 'numpy.eye', '_np.eye', (['(1)'], {'dtype': '"""int"""'}), "(1, dtype='int')\n", (7119, 7135), True, 'import numpy as _np\n'), ((7246, 7268), 'numpy.round_', '_np.round_', (['aa[1:, 1:]'], {}), '(aa[1:, 1:])\n', (7256, 7268), True, 'import numpy as _np\n'), ((7834, 7859), 'numpy.zeros', '_np.zeros', (['[ind, t - ind]'], {}), '([ind, t - ind])\n', (7843, 7859), True, 'import numpy as _np\n'), ((1267, 1284), 'numpy.linalg.det', '_np.linalg.det', (['m'], {}), '(m)\n', (1281, 1284), True, 'import numpy as _np\n'), ((2369, 2389), 'numpy.array', '_np.array', (['[A[:, j]]'], {}), '([A[:, j]])\n', (2378, 2389), True, 'import numpy as _np\n'), ((4587, 4600), 'numpy.dot', '_np.dot', (['n', 'L'], {}), '(n, L)\n', (4594, 4600), True, 'import numpy as _np\n'), ((6144, 6168), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (6162, 6168), True, 'import numpy as _np\n'), ((2270, 2287), 'numpy.array', '_np.array', (['[i, k]'], {}), '([i, k])\n', (2279, 2287), True, 'import numpy as _np\n'), ((2296, 2313), 'numpy.array', '_np.array', (['[k, i]'], {}), '([k, i])\n', (2305, 2313), True, 'import numpy as _np\n'), ((5308, 5332), 'numpy.random.randint', '_np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5326, 5332), True, 'import numpy as _np\n'), ((4690, 4700), 'numpy.eye', '_np.eye', (['(1)'], {}), '(1)\n', (4697, 4700), True, 'import numpy as _np\n'), ((6244, 6270), 'numpy.array_equal', '_np.array_equal', (['bitstr', 'm'], {}), '(bitstr, m)\n', (6259, 6270), True, 'import numpy as _np\n'), ((7901, 7913), 'numpy.eye', '_np.eye', (['ind'], {}), '(ind)\n', (7908, 7913), True, 'import numpy as _np\n')]
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Implements weight equalization as per https://arxiv.org/abs/1906.04721
import logging
import math
from copy import copy
import numpy as np
from graph.types import (ActivationParameters, FilterParameters,
ConvFusionParameters)
from stats.ranges import Ranges
from stats.scales import Scales
LOG = logging.getLogger('nntool.'+__name__)
def process_node(node, last_neuron, group, groups, neurons):
if not node.can_equalize:
group = add_group(group, groups, neurons)
return True, None, group
if isinstance(node, FilterParameters):
last_neuron = add_neuron(node.name, node, last_neuron, neurons, group)
return True, last_neuron, group
if isinstance(node, ActivationParameters) and\
last_neuron is not None and node.activation == 'relu':
assert 'activation' not in last_neuron, "weird 2 activations after conv"
last_neuron['activation'] = node
return True, last_neuron, group
return False, last_neuron, group
def discover_groups(G):
groups = []
group = []
neurons = []
last_neuron = None
for step in G.graph_state.steps:
node = step['node']
# nodes cannot have multiple outputs
if len(G.successors(node.name)) != 1 or len(G.successors(node.name)[0]) != 1:
last_neuron = None
group = add_group(group, groups, neurons)
continue
should_continue, last_neuron, group = process_node(node, last_neuron, group,
groups, neurons)
if should_continue:
continue
if isinstance(node, ConvFusionParameters):
for fnode in node.contained_nodes():
_, last_neuron, group = process_node(fnode, last_neuron, group,
groups, neurons)
if group:
add_group(group, groups, neurons)
return groups, neurons
def add_group(group, groups, neurons):
if group:
LOG.info("Adding group with %d neuron pairs", len(group))
groups.append(group)
neurons.append(group[-1][1])
group = []
return group
def add_neuron(node_name, node, last_neuron, neurons, group):
new_neuron = {'name': node_name, 'node': node,
'weights': None, 'biases': None}
if last_neuron is not None:
neurons.append(last_neuron)
LOG.info("Discovered neuron pair %s -> %s", last_neuron['name'], new_neuron['name'])
group.append((last_neuron, new_neuron))
last_neuron = new_neuron
return last_neuron
def calculate_s(range_1, range_2):
assert len(range_1) == len(range_2)
# note: the paper is wrong. It should be 1/range2 not 1/range1
return [(1/range_2[i]) * math.sqrt(range_1[i] * range_2[i]) for i in range(len(range_1))]
class QuantizationError(Exception):
pass
def calculate_precisions(step):
nn_0 = step[0]
nn_1 = step[1]
ranges_0, max_0 = Ranges.range_output(nn_0['node'], weights=nn_0['weights'])
ranges_1, max_1 = Ranges.range_input(nn_1['node'], weights=nn_1['weights'])
prec_0 = ranges_0/max_0
prec_1 = ranges_1/max_1
return prec_0, prec_1
def process_group(group, threshold):
total_precision = 0
cycles = 0
# Keep going until we converge
while True:
precisions = []
cycles += 1
if cycles > 50:
raise QuantizationError("Weight scaling has failed to converge")
for step in group:
prec_0, prec_1 = calculate_precisions(step)
precisions.append(np.sum(prec_0 * prec_1))
new_total_precision = sum(precisions)
# end when the precision change drops below threshold
if abs(new_total_precision - total_precision) < threshold:
LOG.info("group has converged under %f after %d cycles", threshold, cycles)
break
total_precision = new_total_precision
# note: traversing in reverse order. Not sure that it makes any difference.
for step in reversed(group):
nn_0 = step[0]
nn_1 = step[1]
# get the ranges of the output channels of layer 0 and input channels of layer 2
ranges_0, _ = Ranges.range_output(nn_0['node'], weights=nn_0['weights'])
ranges_1, _ = Ranges.range_input(nn_1['node'], weights=nn_1['weights'])
scale = calculate_s(ranges_0, ranges_1)
# now apply the scale to the output and input channels
nn_0['weights'], nn_0['biases'] =\
Scales.scale_output(nn_0['node'], scale, nn_0['weights'], nn_0['biases'])
nn_1['weights'] = Scales.scale_input(nn_1['node'], scale, nn_1['weights'])
def process_groups(groups, threshold=0.01):
for group in groups:
LOG.info("processing group")
process_group(group, float(threshold))
def update_parameters(neurons):
for neuron in neurons:
params = neuron['node']
params.weights = neuron['weights']
if neuron['biases'] is not None:
params.biases = neuron['biases']
def weight_equalization(G, threshold=0.01):
LOG.info("discovering groups")
groups, neurons = discover_groups(G)
if groups and neurons:
LOG.info("found %d groups and %d neurons", len(groups), len(neurons))
process_groups(groups, threshold)
update_parameters(neurons)
G.graph_identity.set_equalized(threshold)
else:
LOG.warning("no groups to equalize found")
def adjust_biases(G, stats):
for nid, stat in stats.items():
node = nid.get_node(G)
if isinstance(node, FilterParameters):
chan_err = np.array(stat['chan_err'], dtype=np.float32)
if node.has_bias:
node.biases = node.biases - chan_err
else:
node.has_bias = True
node.biases = chan_err * -1
# TODO - set quantization of biases
|
[
"stats.ranges.Ranges.range_output",
"numpy.sum",
"math.sqrt",
"stats.scales.Scales.scale_output",
"numpy.array",
"stats.scales.Scales.scale_input",
"stats.ranges.Ranges.range_input",
"logging.getLogger"
] |
[((1034, 1073), 'logging.getLogger', 'logging.getLogger', (["('nntool.' + __name__)"], {}), "('nntool.' + __name__)\n", (1051, 1073), False, 'import logging\n'), ((3704, 3762), 'stats.ranges.Ranges.range_output', 'Ranges.range_output', (["nn_0['node']"], {'weights': "nn_0['weights']"}), "(nn_0['node'], weights=nn_0['weights'])\n", (3723, 3762), False, 'from stats.ranges import Ranges\n'), ((3785, 3842), 'stats.ranges.Ranges.range_input', 'Ranges.range_input', (["nn_1['node']"], {'weights': "nn_1['weights']"}), "(nn_1['node'], weights=nn_1['weights'])\n", (3803, 3842), False, 'from stats.ranges import Ranges\n'), ((3498, 3532), 'math.sqrt', 'math.sqrt', (['(range_1[i] * range_2[i])'], {}), '(range_1[i] * range_2[i])\n', (3507, 3532), False, 'import math\n'), ((4960, 5018), 'stats.ranges.Ranges.range_output', 'Ranges.range_output', (["nn_0['node']"], {'weights': "nn_0['weights']"}), "(nn_0['node'], weights=nn_0['weights'])\n", (4979, 5018), False, 'from stats.ranges import Ranges\n'), ((5045, 5102), 'stats.ranges.Ranges.range_input', 'Ranges.range_input', (["nn_1['node']"], {'weights': "nn_1['weights']"}), "(nn_1['node'], weights=nn_1['weights'])\n", (5063, 5102), False, 'from stats.ranges import Ranges\n'), ((5285, 5358), 'stats.scales.Scales.scale_output', 'Scales.scale_output', (["nn_0['node']", 'scale', "nn_0['weights']", "nn_0['biases']"], {}), "(nn_0['node'], scale, nn_0['weights'], nn_0['biases'])\n", (5304, 5358), False, 'from stats.scales import Scales\n'), ((5389, 5445), 'stats.scales.Scales.scale_input', 'Scales.scale_input', (["nn_1['node']", 'scale', "nn_1['weights']"], {}), "(nn_1['node'], scale, nn_1['weights'])\n", (5407, 5445), False, 'from stats.scales import Scales\n'), ((6405, 6449), 'numpy.array', 'np.array', (["stat['chan_err']"], {'dtype': 'np.float32'}), "(stat['chan_err'], dtype=np.float32)\n", (6413, 6449), True, 'import numpy as np\n'), ((4312, 4335), 'numpy.sum', 'np.sum', (['(prec_0 * prec_1)'], {}), '(prec_0 * prec_1)\n', (4318, 4335), True, 'import numpy as np\n')]
|
import copy
import os
import ntpath
from pandas import json_normalize
from app.utility.base_svc import BaseService
class DataService(BaseService):
adversary_path = os.path.abspath('data/evaluations/')
procedures_path = os.path.abspath('data/procedures/')
apt29_categories = ['None', 'Telemetry', 'MSSP', 'General', 'Tactic', 'Technique', 'N/A']
apt29_modifiers = {"None", "Alert", "Correlated", "Delayed (Manual)", "Delayed (Processing)", "Host Interrogation",
"Residual Artifact",
"Configuration Change (Detections)",
"Configuration Change (UX)", "Innovative"}
mod_organized = {'datasets': {"None": [], "Alert": [], "Correlated": [],
"Delayed (Manual)": [], "Delayed (Processing)": [], "Host Interrogation": [],
"Residual Artifact": [], "Configuration Change (Detections)": [],
"Configuration Change (UX)": [], "Innovative": []}, 'labels': []}
organized = {'datasets': {'None': [], 'Telemetry': [], 'General': [], 'Tactic': [], 'Technique': [], 'MSSP': [], 'N/A': []},
'labels': []}
def __init__(self):
self.log = self.add_service('data_svc', self)
self.schema = dict(procedures=[], evaluations={})
self.ram = copy.deepcopy(self.schema)
async def load_evaluations(self):
evaluations = await self.get_service('file_svc').get_json_files(self.adversary_path)
for evaluation in evaluations:
results = await self.get_service('file_svc').load_json_file(evaluation)
name = ntpath.basename(evaluation).rstrip('.json')
data = await self.analyze_evaluations(results, name)
self.ram['evaluations'].update({name: {'data': data, 'results': results}})
async def load_procedures(self):
procedures = await self.get_service('file_svc').get_json_files(self.procedures_path)
for procedure in procedures:
data = await self.get_service('file_svc').load_json_file(procedure)
name = ntpath.basename(procedure)
self.ram['procedures'].append({name: data})
async def get_evaluations(self):
return self.ram.get('evaluations')
async def get_procedures(self):
return self.ram.get('procedures')
async def get_evaluations(self, criteria):
evaluations = list(self.ram['evaluations'].keys())
evaluations = [(eval_id, eval_id.split('.')[0]) for eval_id in evaluations if criteria['round'] in eval_id]
return sorted(evaluations)
async def analyze_evaluations(self, results, eval_name):
detections = 'DetectionCategories' if 'apt3' in eval_name else 'Detections'
tmp = json_normalize(data=results['Techniques'],
record_path=['Steps', detections],
meta=['TechniqueId', 'TechniqueName', 'Tactics', ['Steps', 'SubStep']])
tmp['Tactic'] = tmp.apply(lambda r: r.Tactics[0]['TacticName'], axis=1)
mod_df = tmp.explode('Modifiers')
mod_df['Step'] = mod_df.apply(lambda row: row['Steps.SubStep'].split('.', 1)[0], axis=1)
modifier_detections = mod_df.groupby(['DetectionType', 'Modifiers']).size().to_dict()
data = dict(modifier_detections=await self.consolidate(modifier_detections))
data['technique'] = await self.consolidate(mod_df.groupby(['TechniqueName', 'DetectionType']).size().to_dict())
data['total'] = tmp.groupby('DetectionType').count()['DetectionNote'].to_dict()
data['technique_mod'] = await self.consolidate(mod_df.groupby(['TechniqueName', 'Modifiers']).size().to_dict())
data['substep'] = await self.consolidate(mod_df.groupby(['Steps.SubStep', 'DetectionType']).size().to_dict())
data['step'] = await self.consolidate(mod_df.groupby(['Step', 'DetectionType']).size().to_dict())
data['tactic'] = await self.consolidate(mod_df.groupby(['Tactic', 'DetectionType']).size().to_dict())
data['tactic_steps'] = await self.consolidate(mod_df.groupby(['Tactic', 'Step', 'DetectionType']).size().to_dict())
data['step_modifiers'] = await self.consolidate(
mod_df.groupby(['Step', 'Modifiers']).size().to_dict())
return data
async def get_data(self, criteria):
eval_name = criteria['eval']
if 'category' in criteria.keys():
return self.ram['evaluations'][eval_name]['data'][criteria['data']][criteria['category']]
else:
return self.ram['evaluations'][eval_name]['data'][criteria['data']]
@staticmethod
async def consolidate(expanded_data):
data = {}
for tp, val in expanded_data.items():
if tp[0] in data.keys():
data[tp[0]].update({tp[1]: val})
else:
data.update({tp[0]: {tp[1]: val}})
return data
async def step_data(self, criteria):
eval_name = criteria['eval']
data = self.ram['evaluations'][eval_name]['data'][criteria['data']]
tmp_org = copy.deepcopy(self.organized)
for key in range(1, 21):
key = str(key)
tmp_org['labels'].append(key)
for cat in self.apt29_categories:
if cat not in data[key].keys():
tmp_org['datasets'][cat].append(0)
else:
tmp_org['datasets'][cat].append(data[key][cat])
return tmp_org
async def substep_data(self, criteria):
eval_name = criteria['eval']
data = self.ram['evaluations'][eval_name]['data'][criteria['data']]
tmp_org = copy.deepcopy(self.organized)
tmp = sorted(data.items(), key=lambda k: int(k[0].split('.')[0]))
for key in tmp:
tmp_org['labels'].append(key[0])
for cat in self.apt29_categories:
if cat not in data[key[0]].keys():
tmp_org['datasets'][cat].append(0)
else:
tmp_org['datasets'][cat].append(data[key[0]][cat])
return tmp_org
async def modifier_data(self, criteria):
eval_name = criteria['eval']
data = self.ram['evaluations'][eval_name]['data'][criteria['data']]
tmp_org = copy.deepcopy(self.organized)
del tmp_org['datasets']['N/A']
apt29_cat = copy.deepcopy(self.apt29_categories)
for key in apt29_cat[:6]:
if key in data.keys():
for mod in self.apt29_modifiers:
if mod not in data[key].keys():
tmp_org['datasets'][key].append(0)
else:
tmp_org['datasets'][key].append(data[key][mod])
else:
for mod in self.apt29_modifiers:
tmp_org['datasets'][key].append(0)
return tmp_org
async def tactic_data(self, criteria):
eval_name = criteria['eval']
data = self.ram['evaluations'][eval_name]['data'][criteria['data']]
return data
|
[
"copy.deepcopy",
"os.path.abspath",
"ntpath.basename",
"pandas.json_normalize"
] |
[((170, 206), 'os.path.abspath', 'os.path.abspath', (['"""data/evaluations/"""'], {}), "('data/evaluations/')\n", (185, 206), False, 'import os\n'), ((229, 264), 'os.path.abspath', 'os.path.abspath', (['"""data/procedures/"""'], {}), "('data/procedures/')\n", (244, 264), False, 'import os\n'), ((1342, 1368), 'copy.deepcopy', 'copy.deepcopy', (['self.schema'], {}), '(self.schema)\n', (1355, 1368), False, 'import copy\n'), ((2767, 2921), 'pandas.json_normalize', 'json_normalize', ([], {'data': "results['Techniques']", 'record_path': "['Steps', detections]", 'meta': "['TechniqueId', 'TechniqueName', 'Tactics', ['Steps', 'SubStep']]"}), "(data=results['Techniques'], record_path=['Steps', detections\n ], meta=['TechniqueId', 'TechniqueName', 'Tactics', ['Steps', 'SubStep']])\n", (2781, 2921), False, 'from pandas import json_normalize\n'), ((5094, 5123), 'copy.deepcopy', 'copy.deepcopy', (['self.organized'], {}), '(self.organized)\n', (5107, 5123), False, 'import copy\n'), ((5664, 5693), 'copy.deepcopy', 'copy.deepcopy', (['self.organized'], {}), '(self.organized)\n', (5677, 5693), False, 'import copy\n'), ((6282, 6311), 'copy.deepcopy', 'copy.deepcopy', (['self.organized'], {}), '(self.organized)\n', (6295, 6311), False, 'import copy\n'), ((6371, 6407), 'copy.deepcopy', 'copy.deepcopy', (['self.apt29_categories'], {}), '(self.apt29_categories)\n', (6384, 6407), False, 'import copy\n'), ((2106, 2132), 'ntpath.basename', 'ntpath.basename', (['procedure'], {}), '(procedure)\n', (2121, 2132), False, 'import ntpath\n'), ((1643, 1670), 'ntpath.basename', 'ntpath.basename', (['evaluation'], {}), '(evaluation)\n', (1658, 1670), False, 'import ntpath\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 12:00:26 2015
@author: pre
"""
import mapapi.MapClasses as MapHierarchy
import warnings
class Boiler(MapHierarchy.MapComponent):
"""Representation of AixLib.Fluid.HeatExchangers.Boiler
"""
def init_me(self):
self.fluid_two_port()
self.T_set = self.add_connector(name="T_set", type="Real", \
dimension=1)
return True
def mapp_me(self):
self.target_name += "boiler"
try:
self.target_location = self.mapped_component.getTargetLocation()
prop_list = self.mapped_component.getMappedPropertyList()
self.arrange_parameters(prop_list)
except RuntimeError:
raise("could not apply mapping")
try:
boil_child = self.hierarchy_node.getChildList()
for a in range(boil_child.size()):
if boil_child[a].ClassType() == "SimController_SupplyWater_Temperature":
self.add_constant_flow()
except Exception:
warnings.warn("Could not apply controller to boiler", self)
def add_constant_flow(self):
'''adds a constants flow Temperature for the hot water loop'''
map_sim = self.hierarchy_node.getMappedComponents()
for i in range(map_sim.size()):
if map_sim[i].getTargetLocation() == \
'Modelica.Blocks.Sources.Constant':
map_const = map_sim[i]
from mapapi.molibs.MSL.Blocks.Sources.Constant import Constant
const = Constant(self.project,
self.hierarchy_node,
self)
const.mapped_component = map_const
const.init_me()
const.mapp_me()
const.target_name = "setTemp" + "_" + self.target_name
self.project.mod_components.append(const)
self.add_connection(self.T_set, const.y)
|
[
"warnings.warn",
"mapapi.molibs.MSL.Blocks.Sources.Constant.Constant"
] |
[((1582, 1631), 'mapapi.molibs.MSL.Blocks.Sources.Constant.Constant', 'Constant', (['self.project', 'self.hierarchy_node', 'self'], {}), '(self.project, self.hierarchy_node, self)\n', (1590, 1631), False, 'from mapapi.molibs.MSL.Blocks.Sources.Constant import Constant\n'), ((1082, 1141), 'warnings.warn', 'warnings.warn', (['"""Could not apply controller to boiler"""', 'self'], {}), "('Could not apply controller to boiler', self)\n", (1095, 1141), False, 'import warnings\n')]
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compass test which requires operator place the DUT heading north and south.
"""
import math
from cros.factory.device import device_utils
from cros.factory.test.i18n import _
from cros.factory.test import test_case
from cros.factory.utils.arg_utils import Arg
from cros.factory.utils import sync_utils
from cros.factory.utils import type_utils
_TEST_ITEMS = [(_('north'), (0, 1)), (_('south'), (0, -1))]
_FLASH_STATUS_TIME = 1
class CompassTest(test_case.TestCase):
ARGS = [
Arg('tolerance', int, 'The tolerance in degree.',
default=5),
Arg('location', type_utils.Enum(['base', 'lid']),
'Where the compass is located.',
default='base')
]
def setUp(self):
self.dut = device_utils.CreateDUTInterface()
self.controller = self.dut.magnetometer.GetController(
location=self.args.location)
def runTest(self):
for direction_label, direction in _TEST_ITEMS:
self.ui.SetView('main')
self.ui.SetInstruction(_(
'Put the DUT towards {direction}', direction=direction_label))
sync_utils.PollForCondition(
poll_method=type_utils.BindFunction(self._CheckDirection, direction),
timeout_secs=1000,
poll_interval_secs=0.1)
self.ui.SetView('success')
self.Sleep(_FLASH_STATUS_TIME)
def _CalculateDirection(self, x, y):
"""Calculate the absolute direction of the compass in degree.
Args:
x: X-axis component of the direction. X-axis points towards east.
y: Y-axis component of the direction. Y-axis points towards north.
Returns:
Directed angle relative to north (0, 1), in degree, clockwise.
For example:
North = 0
East = 90
South = 180 = -180
West = -90
"""
rad = math.atan2(x, y)
return rad / math.pi * 180
def _CalculateAngle(self, x1, y1, x2, y2):
"""Calculate the angle between two vectors (x1, y1) and (x2, y2)."""
rad = math.acos(
(x1 * x2 + y1 * y2) / math.hypot(x1, y1) / math.hypot(x2, y2))
return rad / math.pi * 180
def _CheckDirection(self, expected_direction):
values = self.controller.GetData(capture_count=1)
x, y = values['in_magn_x'], values['in_magn_y']
if x == 0 and y == 0:
# atan2(0, 0) returns 0, we need to avoid this case.
self.FailTask('Sensor outputs (0, 0), possibly not working.')
degree = self._CalculateDirection(x, y)
self._UpdateUI(degree=degree, **values)
return (
self._CalculateAngle(x, y, *expected_direction) < self.args.tolerance)
def _UpdateUI(self, degree, in_magn_x, in_magn_y, in_magn_z):
self.ui.SetHTML('%.2f' % degree, id='degree')
self.ui.SetHTML(in_magn_x, id='in-magn-x')
self.ui.SetHTML(in_magn_y, id='in-magn-y')
self.ui.SetHTML(in_magn_z, id='in-magn-z')
self.ui.RunJS('document.getElementById("compass").style.transform = '
'"rotate(%ddeg)";' % degree)
|
[
"cros.factory.utils.arg_utils.Arg",
"math.hypot",
"math.atan2",
"cros.factory.test.i18n._",
"cros.factory.utils.type_utils.Enum",
"cros.factory.utils.type_utils.BindFunction",
"cros.factory.device.device_utils.CreateDUTInterface"
] |
[((532, 542), 'cros.factory.test.i18n._', '_', (['"""north"""'], {}), "('north')\n", (533, 542), False, 'from cros.factory.test.i18n import _\n'), ((554, 564), 'cros.factory.test.i18n._', '_', (['"""south"""'], {}), "('south')\n", (555, 564), False, 'from cros.factory.test.i18n import _\n'), ((658, 718), 'cros.factory.utils.arg_utils.Arg', 'Arg', (['"""tolerance"""', 'int', '"""The tolerance in degree."""'], {'default': '(5)'}), "('tolerance', int, 'The tolerance in degree.', default=5)\n", (661, 718), False, 'from cros.factory.utils.arg_utils import Arg\n'), ((894, 927), 'cros.factory.device.device_utils.CreateDUTInterface', 'device_utils.CreateDUTInterface', ([], {}), '()\n', (925, 927), False, 'from cros.factory.device import device_utils\n'), ((1953, 1969), 'math.atan2', 'math.atan2', (['x', 'y'], {}), '(x, y)\n', (1963, 1969), False, 'import math\n'), ((752, 784), 'cros.factory.utils.type_utils.Enum', 'type_utils.Enum', (["['base', 'lid']"], {}), "(['base', 'lid'])\n", (767, 784), False, 'from cros.factory.utils import type_utils\n'), ((1156, 1219), 'cros.factory.test.i18n._', '_', (['"""Put the DUT towards {direction}"""'], {'direction': 'direction_label'}), "('Put the DUT towards {direction}', direction=direction_label)\n", (1157, 1219), False, 'from cros.factory.test.i18n import _\n'), ((2192, 2210), 'math.hypot', 'math.hypot', (['x2', 'y2'], {}), '(x2, y2)\n', (2202, 2210), False, 'import math\n'), ((1289, 1345), 'cros.factory.utils.type_utils.BindFunction', 'type_utils.BindFunction', (['self._CheckDirection', 'direction'], {}), '(self._CheckDirection, direction)\n', (1312, 1345), False, 'from cros.factory.utils import type_utils\n'), ((2171, 2189), 'math.hypot', 'math.hypot', (['x1', 'y1'], {}), '(x1, y1)\n', (2181, 2189), False, 'import math\n')]
|
# -*- coding: utf-8 -*-
"""
An extension of the pystruct OneSlackSSVM module to have a fit_with_valid
method on it
Copyright Xerox(C) 2016 <NAME>
Developed for the EU project READ. The READ project has received funding
from the European Union�s Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
from time import time
import numpy as np
import cvxopt.solvers
from pystruct.learners import OneSlackSSVM as Pystruct_OneSlackSSVM
from pystruct.learners.one_slack_ssvm import NoConstraint
class OneSlackSSVM(Pystruct_OneSlackSSVM):
"""
Same as its parent with an additional method: fit_with_valid
"""
def __init__(self, model, max_iter=10000, C=1.0, check_constraints=False,
verbose=0, negativity_constraint=None, n_jobs=1,
break_on_bad=False, show_loss_every=0, tol=1e-3,
inference_cache=0, inactive_threshold=1e-5,
inactive_window=50, logger=None, cache_tol='auto',
switch_to=None):
Pystruct_OneSlackSSVM.__init__(self, model, max_iter=max_iter, C=C, check_constraints=check_constraints,
verbose=verbose, negativity_constraint=negativity_constraint, n_jobs=n_jobs,
break_on_bad=break_on_bad, show_loss_every=show_loss_every, tol=tol,
inference_cache=inference_cache, inactive_threshold=inactive_threshold,
inactive_window=inactive_window, logger=logger, cache_tol=cache_tol,
switch_to=switch_to)
def fit_with_valid(self, X, Y, lX_vld, lY_vld, constraints=None
, warm_start=False, initialize=True
, valid_every=50):
"""Learn parameters using cutting plane method.
Parameters
----------
X : iterable
Traing instances. Contains the structured input objects.
No requirement on the particular form of entries of X is made.
Y : iterable
Training labels. Contains the strctured labels for inputs in X.
Needs to have the same length as X.
lX_vld, lY_vld : iterable X and Y validation set
contraints : ignored
warm_start : bool, default=False
Whether we are warmstarting from a previous fit.
initialize : boolean, default=True
Whether to initialize the model for the data.
Leave this true except if you really know what you are doing.
valid_every : integer. Periodic check with validation set to get best model
"""
best_iteration = -1
try:
self._fit_valid_best_score
print("score of best model: %.6f"%self._fit_valid_best_score)
except:
self._fit_valid_best_score = -99999
if self.verbose:
print("Training 1-slack dual structural SVM")
cvxopt.solvers.options['show_progress'] = self.verbose > 3
if initialize:
self.model.initialize(X, Y)
# parse cache_tol parameter
if self.cache_tol is None or self.cache_tol == 'auto':
self.cache_tol_ = self.tol
else:
self.cache_tol_ = self.cache_tol
if not warm_start:
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.objective_curve_, self.primal_objective_curve_ = [], []
self.cached_constraint_ = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
self.inference_cache_ = None
self.timestamps_ = [time()]
elif warm_start == "soft":
self.w = np.zeros(self.model.size_joint_feature)
constraints = []
self.alphas = [] # dual solutions
# append constraint given by ground truth to make our life easier
constraints.append((np.zeros(self.model.size_joint_feature), 0))
self.alphas.append([self.C])
else:
constraints = self.constraints_
self.last_slack_ = -1
# get the joint_feature of the ground truth
if getattr(self.model, 'rescale_C', False):
joint_feature_gt = self.model.batch_joint_feature(X, Y, Y)
else:
joint_feature_gt = self.model.batch_joint_feature(X, Y)
try:
# catch ctrl+c to stop training
for iteration in range(self.max_iter):
# main loop
cached_constraint = False
if self.verbose > 0:
print("----- %d -----"%iteration)
if self.verbose > 2:
print(self)
try:
Y_hat, djoint_feature, loss_mean = self._constraint_from_cache(
X, Y, joint_feature_gt, constraints)
cached_constraint = True
except NoConstraint:
try:
Y_hat, djoint_feature, loss_mean = self._find_new_constraint(
X, Y, joint_feature_gt, constraints)
self._update_cache(X, Y, Y_hat)
except NoConstraint:
if self.verbose:
print("no additional constraints")
if (self.switch_to is not None
and self.model.inference_method !=
self.switch_to):
if self.verbose:
print(("Switching to %s inference" %
str(self.switch_to)))
self.model.inference_method_ = \
self.model.inference_method
self.model.inference_method = self.switch_to
continue
else:
break
self.timestamps_.append(time() - self.timestamps_[0])
self._compute_training_loss(X, Y, iteration)
constraints.append((djoint_feature, loss_mean))
# compute primal objective
last_slack = -np.dot(self.w, djoint_feature) + loss_mean
primal_objective = (self.C * len(X)
* max(last_slack, 0)
+ np.sum(self.w ** 2) / 2)
self.primal_objective_curve_.append(primal_objective)
self.cached_constraint_.append(cached_constraint)
objective = self._solve_1_slack_qp(constraints,
n_samples=len(X))
# update cache tolerance if cache_tol is auto:
if self.cache_tol == "auto" and not cached_constraint:
self.cache_tol_ = (primal_objective - objective) / 4
self.last_slack_ = np.max([(-np.dot(self.w, djoint_feature) + loss_mean)
for djoint_feature, loss_mean in constraints])
self.last_slack_ = max(self.last_slack_, 0)
if self.verbose > 0:
# the cutting plane objective can also be computed as
# self.C * len(X) * self.last_slack_ + np.sum(self.w**2)/2
print(("cutting plane objective: %f, primal objective %f"
% (objective, primal_objective)))
# we only do this here because we didn't add the gt to the
# constraints, which makes the dual behave a bit oddly
self.objective_curve_.append(objective)
self.constraints_ = constraints
if self.logger is not None:
if iteration % valid_every == 0:
cur_score = self.score(lX_vld, lY_vld)
#print(self._fit_valid_best_score, cur_score)
if cur_score > self._fit_valid_best_score:
best_iteration = iteration
self._fit_valid_best_score = cur_score
self.logger(self, 'final')
if self.verbose > 0: print("Current model is best with validation score=%.6f" % self._fit_valid_best_score)
else:
# we save the last model, even if it is not the best, in case of warm start
self.logger.save(self, self.logger.file_name + "._last_")
print("Current validation score=%.6f (best=%.6f at iteration %d)" % (cur_score, self._fit_valid_best_score, best_iteration))
if self.verbose > 5:
print((self.w))
except KeyboardInterrupt:
pass
if self.verbose and self.n_jobs == 1:
print(("calls to inference: %d" % self.model.inference_calls))
# compute final objective:
self.timestamps_.append(time() - self.timestamps_[0])
primal_objective = self._objective(X, Y)
self.primal_objective_curve_.append(primal_objective)
self.objective_curve_.append(objective)
self.cached_constraint_.append(False)
if self.logger is not None:
cur_score = self.score(lX_vld, lY_vld)
# print("finished ", self._fit_valid_best_score, cur_score)
if cur_score > self._fit_valid_best_score:
self._fit_valid_best_score = cur_score
best_iteration = iteration
self.logger(self, 'final')
if self.verbose > 0: print("Best model saved at iteration %d: validation score=%.6f" % (best_iteration, self._fit_valid_best_score))
if self.verbose > 0:
print(("final primal objective: %f gap: %f (validation score: %.6f)"
% (primal_objective, primal_objective - objective, cur_score)))
return self
|
[
"numpy.sum",
"numpy.zeros",
"time.time",
"pystruct.learners.OneSlackSSVM.__init__",
"numpy.dot"
] |
[((1079, 1515), 'pystruct.learners.OneSlackSSVM.__init__', 'Pystruct_OneSlackSSVM.__init__', (['self', 'model'], {'max_iter': 'max_iter', 'C': 'C', 'check_constraints': 'check_constraints', 'verbose': 'verbose', 'negativity_constraint': 'negativity_constraint', 'n_jobs': 'n_jobs', 'break_on_bad': 'break_on_bad', 'show_loss_every': 'show_loss_every', 'tol': 'tol', 'inference_cache': 'inference_cache', 'inactive_threshold': 'inactive_threshold', 'inactive_window': 'inactive_window', 'logger': 'logger', 'cache_tol': 'cache_tol', 'switch_to': 'switch_to'}), '(self, model, max_iter=max_iter, C=C,\n check_constraints=check_constraints, verbose=verbose,\n negativity_constraint=negativity_constraint, n_jobs=n_jobs,\n break_on_bad=break_on_bad, show_loss_every=show_loss_every, tol=tol,\n inference_cache=inference_cache, inactive_threshold=inactive_threshold,\n inactive_window=inactive_window, logger=logger, cache_tol=cache_tol,\n switch_to=switch_to)\n', (1109, 1515), True, 'from pystruct.learners import OneSlackSSVM as Pystruct_OneSlackSSVM\n'), ((3373, 3412), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3381, 3412), True, 'import numpy as np\n'), ((3872, 3878), 'time.time', 'time', ([], {}), '()\n', (3876, 3878), False, 'from time import time\n'), ((3936, 3975), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3944, 3975), True, 'import numpy as np\n'), ((9334, 9340), 'time.time', 'time', ([], {}), '()\n', (9338, 9340), False, 'from time import time\n'), ((3713, 3752), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (3721, 3752), True, 'import numpy as np\n'), ((4162, 4201), 'numpy.zeros', 'np.zeros', (['self.model.size_joint_feature'], {}), '(self.model.size_joint_feature)\n', (4170, 4201), True, 'import numpy as np\n'), ((6241, 6247), 'time.time', 'time', ([], {}), '()\n', (6245, 6247), False, 'from time import time\n'), ((6470, 6500), 'numpy.dot', 'np.dot', (['self.w', 'djoint_feature'], {}), '(self.w, djoint_feature)\n', (6476, 6500), True, 'import numpy as np\n'), ((6660, 6679), 'numpy.sum', 'np.sum', (['(self.w ** 2)'], {}), '(self.w ** 2)\n', (6666, 6679), True, 'import numpy as np\n'), ((7209, 7239), 'numpy.dot', 'np.dot', (['self.w', 'djoint_feature'], {}), '(self.w, djoint_feature)\n', (7215, 7239), True, 'import numpy as np\n')]
|
__copyright__ = "Copyright (C) 2020 <NAME>"
__license__ = """
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import sys
import mlir.run as mlirrun
import pytest
from pytools.prefork import ExecError
def is_mlir_opt_present():
try:
mlirrun.get_mlir_opt_version()
return True
except ExecError:
return False
@pytest.mark.skipif(not is_mlir_opt_present(), reason="mlir-opt not found")
def test_add():
source = """
#identity = affine_map<(i,j) -> (i,j)>
#attrs = {
indexing_maps = [#identity, #identity, #identity],
iterator_types = ["parallel", "parallel"]
}
func @example(%A: memref<?x?xf64>, %B: memref<?x?xf64>, %C: memref<?x?xf64>) {
linalg.generic #attrs ins(%A, %B: memref<?x?xf64>, memref<?x?xf64>) outs(%C: memref<?x?xf64>) {
^bb0(%a: f64, %b: f64, %c: f64):
%d = addf %a, %b : f64
linalg.yield %d : f64
}
return
}"""
source = mlirrun.mlir_opt(source, ["-convert-linalg-to-loops",
"-convert-scf-to-std"])
a = np.random.rand(10, 10)
b = np.random.rand(10, 10)
c = np.empty_like(a)
mlirrun.call_function(source, "example", [a, b, c])
np.testing.assert_allclose(c, a+b)
@pytest.mark.skipif(not is_mlir_opt_present(), reason="mlir-opt not found")
def test_axpy():
source = """
func @saxpy(%a : f32, %x : memref<?xf32>, %y : memref<?xf32>) {
%c0 = constant 0: index
%n = dim %x, %c0 : memref<?xf32>
affine.for %i = 0 to %n {
%xi = affine.load %x[%i] : memref<?xf32>
%axi = mulf %a, %xi : f32
%yi = affine.load %y[%i] : memref<?xf32>
%axpyi = addf %yi, %axi : f32
affine.store %axpyi, %y[%i] : memref<?xf32>
}
return
}"""
source = mlirrun.mlir_opt(source, ["-lower-affine",
"-convert-scf-to-std"])
alpha = np.float32(np.random.rand())
x_in = np.random.rand(10).astype(np.float32)
y_in = np.random.rand(10).astype(np.float32)
y_out = y_in.copy()
mlirrun.call_function(source, "saxpy", [alpha, x_in, y_out])
np.testing.assert_allclose(y_out, alpha*x_in+y_in)
if __name__ == "__main__":
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
|
[
"mlir.run.call_function",
"numpy.empty_like",
"mlir.run.mlir_opt",
"pytest.main",
"mlir.run.get_mlir_opt_version",
"numpy.random.rand",
"numpy.testing.assert_allclose"
] |
[((2362, 2439), 'mlir.run.mlir_opt', 'mlirrun.mlir_opt', (['source', "['-convert-linalg-to-loops', '-convert-scf-to-std']"], {}), "(source, ['-convert-linalg-to-loops', '-convert-scf-to-std'])\n", (2378, 2439), True, 'import mlir.run as mlirrun\n'), ((2487, 2509), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (2501, 2509), True, 'import numpy as np\n'), ((2518, 2540), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (2532, 2540), True, 'import numpy as np\n'), ((2549, 2565), 'numpy.empty_like', 'np.empty_like', (['a'], {}), '(a)\n', (2562, 2565), True, 'import numpy as np\n'), ((2571, 2622), 'mlir.run.call_function', 'mlirrun.call_function', (['source', '"""example"""', '[a, b, c]'], {}), "(source, 'example', [a, b, c])\n", (2592, 2622), True, 'import mlir.run as mlirrun\n'), ((2628, 2664), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['c', '(a + b)'], {}), '(c, a + b)\n', (2654, 2664), True, 'import numpy as np\n'), ((3164, 3230), 'mlir.run.mlir_opt', 'mlirrun.mlir_opt', (['source', "['-lower-affine', '-convert-scf-to-std']"], {}), "(source, ['-lower-affine', '-convert-scf-to-std'])\n", (3180, 3230), True, 'import mlir.run as mlirrun\n'), ((3438, 3498), 'mlir.run.call_function', 'mlirrun.call_function', (['source', '"""saxpy"""', '[alpha, x_in, y_out]'], {}), "(source, 'saxpy', [alpha, x_in, y_out])\n", (3459, 3498), True, 'import mlir.run as mlirrun\n'), ((3504, 3558), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y_out', '(alpha * x_in + y_in)'], {}), '(y_out, alpha * x_in + y_in)\n', (3530, 3558), True, 'import numpy as np\n'), ((1659, 1689), 'mlir.run.get_mlir_opt_version', 'mlirrun.get_mlir_opt_version', ([], {}), '()\n', (1687, 1689), True, 'import mlir.run as mlirrun\n'), ((3293, 3309), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3307, 3309), True, 'import numpy as np\n'), ((3686, 3702), 'pytest.main', 'main', (['[__file__]'], {}), '([__file__])\n', (3690, 3702), False, 'from pytest import main\n'), ((3322, 3340), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (3336, 3340), True, 'import numpy as np\n'), ((3371, 3389), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (3385, 3389), True, 'import numpy as np\n')]
|
# generated by shell2pipe on 2016-03-15
import os
import plugin
plugin_class='mkdir1'
class mkdir1(plugin.AriadneOp):
name='mkdir1'
def run(self, args):
os.system('mkdir tmp')
|
[
"os.system"
] |
[((174, 196), 'os.system', 'os.system', (['"""mkdir tmp"""'], {}), "('mkdir tmp')\n", (183, 196), False, 'import os\n')]
|
"""Tests of the atom behaviour."""
# -*- coding: UTF-8 -*-
import base64
import datetime
import textwrap
import unittest
import pytest
import flask.ext.webtest
import mock
import webtest.app
import dnstwister
from dnstwister import tools
import patches
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unicode_atom(webapp):
"""Unicode should just work too, this is just a sanity check."""
unicode_domain = 'xn--plnt-1na.com'.decode('idna') # 'plànt.com'
get_path = tools.encode_domain(unicode_domain)
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/atom/{}'.format(get_path))
assert '404 NOT FOUND' in err.value.message
assert 'New RSS feed generation currently disabled.' in err.value.message
def test_atom_feeds_validate_domain(webapp):
"""Test that the validation checks for valid domains before creating
feeds.
"""
with pytest.raises(webtest.app.AppError) as err:
webapp.get('/atom/324u82938798swefsdf')
assert '400 BAD REQUEST' in err.value.message
# TODO: Update to pytest-style.
class TestAtom(unittest.TestCase):
"""Tests of the atom feed behaviour."""
def setUp(self):
"""Set up the app for testing."""
# Create a webtest Test App for use
self.app = flask.ext.webtest.TestApp(dnstwister.app)
# Clear the webapp cache
dnstwister.cache.clear()
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_new_feed(self):
"""Tests the registration of a new feed - currently disabled."""
repository = dnstwister.repository
# We need a domain to get the feed for.
domain = 'www.example.com'
# A feed is registered by trying to load it (and it not already being
# registered).
with pytest.raises(webtest.app.AppError) as err:
res = self.app.get('/atom/{}'.format(base64.b64encode(domain))).follow()
assert '404 NOT FOUND' in err.value.message
assert 'New RSS feed generation currently disabled.' in err.value.message
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_deleted_items_appear_in_rss(self):
"""Tests that deleted items in delta reports appear in the RSS.
"""
repository = dnstwister.repository
# We need a domain to get the feed for.
domain = 'www.example.com'
repository.register_domain(domain)
# We can calculate a delta though.
update_date = datetime.datetime(2016, 2, 28, 11, 10, 34)
repository.update_delta_report(
domain, {
'new': [('www.examp1e.com', '127.0.0.1')],
'updated': [('wwwexa.mple.com', '127.0.0.1', '127.0.0.2')],
'deleted': ['www.eeexample.com', 'www2.example.com.au'],
},
update_date
)
# Clear the webapp cache
dnstwister.cache.clear()
res = self.app.get('/atom/{}'.format(base64.b64encode(domain))).follow()
assert str(res) == textwrap.dedent("""
Response: 200 OK
Content-Type: application/atom+xml; charset=utf-8
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dnstwister report for www.example.com</title>
<id>http://localhost:80/atom/7777772e6578616d706c652e636f6d</id>
<updated>2016-02-28T11:10:34Z</updated>
<link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />
<link href="http://localhost:80/atom/7777772e6578616d706c652e636f6d" rel="self" />
<generator>Werkzeug</generator>
<entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">
<title type="text">NEW: www.examp1e.com</title>
<id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />
<author>
<name>dnstwister</name>
</author>
<content type="html"><h1>IP: 127.0.0.1</h1>
<a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">
<title type="text">UPDATED: wwwexa.mple.com</title>
<id>updated:wwwexa.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />
<author>
<name>dnstwister</name>
</author>
<content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>
<a href="https://dnstwister.report/analyse/7777776578612e6d706c652e636f6d">analyse</a></content>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">
<title type="text">DELETED: www.eeexample.com</title>
<id>deleted:www.eeexample.com:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />
<author>
<name>dnstwister</name>
</author>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">
<title type="text">DELETED: www2.example.com.au</title>
<id>deleted:www2.example.com.au:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />
<author>
<name>dnstwister</name>
</author>
</entry>
</feed>
""").strip()
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_feed_reading_is_tracked(self):
"""Tests that reading a feed is logged."""
repository = dnstwister.repository
domain = 'www.example.com'
b64domain = base64.b64encode(domain)
# Read dates are None by default
read_date = repository.delta_report_last_read(domain)
assert read_date is None
# Registering a feed will update the read date
repository.register_domain(domain)
self.app.get('/atom/{}'.format(b64domain)).follow()
read_date = repository.delta_report_last_read(domain)
assert type(read_date) is datetime.datetime
# Manually set the date to an older date so we don't have to 'sleep'
# in the test.
repository.mark_delta_report_as_read(
domain, datetime.datetime(2000, 1, 1, 0, 0, 0)
)
# Clear the webapp cache
dnstwister.cache.clear()
# Reading a feed will update the read date
read_date = repository.delta_report_last_read(domain)
self.app.get('/atom/{}'.format(b64domain)).follow()
read_date2 = repository.delta_report_last_read(domain)
assert read_date2 > read_date
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unregister_tidies_database(self):
"""Tests that you can unregister domains."""
repository = dnstwister.repository
domain = 'www.example.com'
b64domain = base64.b64encode(domain)
assert not repository.is_domain_registered(domain)
assert repository.db.data == {}
repository.register_domain(domain)
self.app.get('/atom/{}'.format(b64domain)).follow()
repository.update_delta_report(
domain, {
'new': [('www.examp1e.com', '127.0.0.1')],
'updated': [],
'deleted': [],
},
)
assert repository.is_domain_registered(domain)
assert repository.db.data != {}
repository.unregister_domain(domain)
assert not repository.is_domain_registered(domain)
assert repository.db.data == {}
# TODO: Update to pytest-style.
class TestAtomUnicode(unittest.TestCase):
"""Tests of the atom feed behaviour, with a Unicode domain."""
def setUp(self):
"""Set up the app for testing."""
# Create a webtest Test App for use
self.app = flask.ext.webtest.TestApp(dnstwister.app)
# Clear the webapp cache
dnstwister.cache.clear()
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_new_feed(self):
"""Tests the registration of a new feed - currently disabled."""
repository = dnstwister.repository
# We need a domain to get the feed for.
domain = u'www.\u0454xample.com'
# A feed is registered by trying to load it (and it not already being
# registered).
with pytest.raises(webtest.app.AppError) as err:
res = self.app.get('/atom/{}'.format(tools.encode_domain(domain)))
assert '404 NOT FOUND' in err.value.message
assert 'New RSS feed generation currently disabled.' in err.value.message
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_updated_and_deleted_items_appear_in_rss(self):
"""Tests that updated and deleted items in delta reports appear in the
RSS.
"""
repository = dnstwister.repository
# We need a domain to get the feed for.
domain = u'www.\u0454xample.com'
# We can calculate a delta though.
update_date = datetime.datetime(2016, 2, 28, 11, 10, 34)
repository.update_delta_report(
domain, {
'new': [('www.examp1e.com', '127.0.0.1')],
'updated': [(u'www\u0454xa.mple.com', '127.0.0.1', '127.0.0.2')],
'deleted': [u'www.\u0454xampl\u0454.com', 'www2.example.com.au'],
},
update_date
)
# Clear the webapp cache
dnstwister.cache.clear()
repository.register_domain(domain)
res = self.app.get('/atom/{}'.format(tools.encode_domain(domain)))
assert str(res) == textwrap.dedent("""
Response: 200 OK
Content-Type: application/atom+xml; charset=utf-8
<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">dnstwister report for www.\xd1\x94xample.com (www.xn--xample-9uf.com)</title>
<id>http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d</id>
<updated>2016-02-28T11:10:34Z</updated>
<link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />
<link href="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d" rel="self" />
<generator>Werkzeug</generator>
<entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">
<title type="text">NEW: www.examp1e.com</title>
<id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />
<author>
<name>dnstwister</name>
</author>
<content type="html"><h1>IP: 127.0.0.1</h1>
<a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">
<title type="text">UPDATED: www\xd1\x94xa.mple.com (xn--wwwxa-d2e.mple.com)</title>
<id>updated:xn--wwwxa-d2e.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />
<author>
<name>dnstwister</name>
</author>
<content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>
<a href="https://dnstwister.report/analyse/786e2d2d77777778612d6432652e6d706c652e636f6d">analyse</a></content>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">
<title type="text">DELETED: www.\xd1\x94xampl\xd1\x94.com (www.xn--xampl-91ef.com)</title>
<id>deleted:www.xn--xampl-91ef.com:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />
<author>
<name>dnstwister</name>
</author>
</entry>
<entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">
<title type="text">DELETED: www2.example.com.au</title>
<id>deleted:www2.example.com.au:1456657834.0</id>
<updated>2016-02-28T11:10:34Z</updated>
<published>2016-02-28T11:10:34Z</published>
<link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />
<author>
<name>dnstwister</name>
</author>
</entry>
</feed>
""").strip()
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_feed_reading_is_tracked(self):
"""Tests that reading a feed is logged."""
repository = dnstwister.repository
domain = u'www.\u0454xample.com'
get_param = tools.encode_domain(domain)
# Read dates are None by default
read_date = repository.delta_report_last_read(domain)
assert read_date is None
# Registering a feed will update the read date
repository.register_domain(domain)
self.app.get('/atom/{}'.format(get_param))
read_date = repository.delta_report_last_read(domain)
assert type(read_date) is datetime.datetime
# Manually set the date to an older date so we don't have to 'sleep'
# in the test.
repository.mark_delta_report_as_read(
domain, datetime.datetime(2000, 1, 1, 0, 0, 0)
)
# Clear the webapp cache
dnstwister.cache.clear()
# Reading a feed will update the read date
read_date = repository.delta_report_last_read(domain)
self.app.get('/atom/{}'.format(get_param))
read_date2 = repository.delta_report_last_read(domain)
assert read_date2 > read_date
@mock.patch('dnstwister.repository.db', patches.SimpleKVDatabase())
def test_unregister_tidies_database(self):
"""Tests that you can unregister domains."""
repository = dnstwister.repository
domain = u'www.\u0454xample.com'
get_param = tools.encode_domain(domain)
assert not repository.is_domain_registered(domain)
assert repository.db.data == {}
repository.register_domain(domain)
repository.update_delta_report(
domain, {
'new': [('www.examp1e.com', '127.0.0.1')],
'updated': [],
'deleted': [],
},
)
assert repository.is_domain_registered(domain)
assert repository.db.data != {}
repository.unregister_domain(domain)
assert not repository.is_domain_registered(domain)
assert repository.db.data == {}
|
[
"textwrap.dedent",
"patches.SimpleKVDatabase",
"dnstwister.cache.clear",
"datetime.datetime",
"dnstwister.tools.encode_domain",
"pytest.raises",
"base64.b64encode"
] |
[((532, 567), 'dnstwister.tools.encode_domain', 'tools.encode_domain', (['unicode_domain'], {}), '(unicode_domain)\n', (551, 567), False, 'from dnstwister import tools\n'), ((314, 340), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (338, 340), False, 'import patches\n'), ((578, 613), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (591, 613), False, 'import pytest\n'), ((954, 989), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (967, 989), False, 'import pytest\n'), ((1435, 1459), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (1457, 1459), False, 'import dnstwister\n'), ((1507, 1533), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (1531, 1533), False, 'import patches\n'), ((2609, 2651), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(2)', '(28)', '(11)', '(10)', '(34)'], {}), '(2016, 2, 28, 11, 10, 34)\n', (2626, 2651), False, 'import datetime\n'), ((3024, 3048), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (3046, 3048), False, 'import dnstwister\n'), ((2202, 2228), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (2226, 2228), False, 'import patches\n'), ((6816, 6840), 'base64.b64encode', 'base64.b64encode', (['domain'], {}), '(domain)\n', (6832, 6840), False, 'import base64\n'), ((7528, 7552), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (7550, 7552), False, 'import dnstwister\n'), ((6588, 6614), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (6612, 6614), False, 'import patches\n'), ((8116, 8140), 'base64.b64encode', 'base64.b64encode', (['domain'], {}), '(domain)\n', (8132, 8140), False, 'import base64\n'), ((7883, 7909), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (7907, 7909), False, 'import patches\n'), ((9183, 9207), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (9205, 9207), False, 'import dnstwister\n'), ((9255, 9281), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (9279, 9281), False, 'import patches\n'), ((10352, 10394), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(2)', '(28)', '(11)', '(10)', '(34)'], {}), '(2016, 2, 28, 11, 10, 34)\n', (10369, 10394), False, 'import datetime\n'), ((10782, 10806), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (10804, 10806), False, 'import dnstwister\n'), ((9950, 9976), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (9974, 9976), False, 'import patches\n'), ((14899, 14926), 'dnstwister.tools.encode_domain', 'tools.encode_domain', (['domain'], {}), '(domain)\n', (14918, 14926), False, 'from dnstwister import tools\n'), ((15605, 15629), 'dnstwister.cache.clear', 'dnstwister.cache.clear', ([], {}), '()\n', (15627, 15629), False, 'import dnstwister\n'), ((14665, 14691), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (14689, 14691), False, 'import patches\n'), ((16190, 16217), 'dnstwister.tools.encode_domain', 'tools.encode_domain', (['domain'], {}), '(domain)\n', (16209, 16217), False, 'from dnstwister import tools\n'), ((15951, 15977), 'patches.SimpleKVDatabase', 'patches.SimpleKVDatabase', ([], {}), '()\n', (15975, 15977), False, 'import patches\n'), ((1889, 1924), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (1902, 1924), False, 'import pytest\n'), ((7433, 7471), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2000, 1, 1, 0, 0, 0)\n', (7450, 7471), False, 'import datetime\n'), ((9643, 9678), 'pytest.raises', 'pytest.raises', (['webtest.app.AppError'], {}), '(webtest.app.AppError)\n', (9656, 9678), False, 'import pytest\n'), ((15510, 15548), 'datetime.datetime', 'datetime.datetime', (['(2000)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2000, 1, 1, 0, 0, 0)\n', (15527, 15548), False, 'import datetime\n'), ((10899, 10926), 'dnstwister.tools.encode_domain', 'tools.encode_domain', (['domain'], {}), '(domain)\n', (10918, 10926), False, 'from dnstwister import tools\n'), ((3161, 6486), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Response: 200 OK\n Content-Type: application/atom+xml; charset=utf-8\n <?xml version="1.0" encoding="utf-8"?>\n <feed xmlns="http://www.w3.org/2005/Atom">\n <title type="text">dnstwister report for www.example.com</title>\n <id>http://localhost:80/atom/7777772e6578616d706c652e636f6d</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <link href="http://localhost:80/atom/7777772e6578616d706c652e636f6d" rel="self" />\n <generator>Werkzeug</generator>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">NEW: www.examp1e.com</title>\n <id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1</h1>\n <a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">UPDATED: wwwexa.mple.com</title>\n <id>updated:wwwexa.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>\n <a href="https://dnstwister.report/analyse/7777776578612e6d706c652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">DELETED: www.eeexample.com</title>\n <id>deleted:www.eeexample.com:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">DELETED: www2.example.com.au</title>\n <id>deleted:www2.example.com.au:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n </feed>\n """'], {}), '(\n """\n Response: 200 OK\n Content-Type: application/atom+xml; charset=utf-8\n <?xml version="1.0" encoding="utf-8"?>\n <feed xmlns="http://www.w3.org/2005/Atom">\n <title type="text">dnstwister report for www.example.com</title>\n <id>http://localhost:80/atom/7777772e6578616d706c652e636f6d</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <link href="http://localhost:80/atom/7777772e6578616d706c652e636f6d" rel="self" />\n <generator>Werkzeug</generator>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">NEW: www.examp1e.com</title>\n <id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1</h1>\n <a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">UPDATED: wwwexa.mple.com</title>\n <id>updated:wwwexa.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>\n <a href="https://dnstwister.report/analyse/7777776578612e6d706c652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">DELETED: www.eeexample.com</title>\n <id>deleted:www.eeexample.com:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e6578616d706c652e636f6d">\n <title type="text">DELETED: www2.example.com.au</title>\n <id>deleted:www2.example.com.au:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e6578616d706c652e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n </feed>\n """\n )\n', (3176, 6486), False, 'import textwrap\n'), ((9737, 9764), 'dnstwister.tools.encode_domain', 'tools.encode_domain', (['domain'], {}), '(domain)\n', (9756, 9764), False, 'from dnstwister import tools\n'), ((10957, 14539), 'textwrap.dedent', 'textwrap.dedent', (['"""\n Response: 200 OK\n Content-Type: application/atom+xml; charset=utf-8\n <?xml version="1.0" encoding="utf-8"?>\n <feed xmlns="http://www.w3.org/2005/Atom">\n <title type="text">dnstwister report for www.Ñ\x94xample.com (www.xn--xample-9uf.com)</title>\n <id>http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <link href="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d" rel="self" />\n <generator>Werkzeug</generator>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">NEW: www.examp1e.com</title>\n <id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1</h1>\n <a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">UPDATED: wwwÑ\x94xa.mple.com (xn--wwwxa-d2e.mple.com)</title>\n <id>updated:xn--wwwxa-d2e.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>\n <a href="https://dnstwister.report/analyse/786e2d2d77777778612d6432652e6d706c652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">DELETED: www.Ñ\x94xamplÑ\x94.com (www.xn--xampl-91ef.com)</title>\n <id>deleted:www.xn--xampl-91ef.com:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">DELETED: www2.example.com.au</title>\n <id>deleted:www2.example.com.au:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n </feed>\n """'], {}), '(\n """\n Response: 200 OK\n Content-Type: application/atom+xml; charset=utf-8\n <?xml version="1.0" encoding="utf-8"?>\n <feed xmlns="http://www.w3.org/2005/Atom">\n <title type="text">dnstwister report for www.Ñ\x94xample.com (www.xn--xample-9uf.com)</title>\n <id>http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <link href="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d" rel="self" />\n <generator>Werkzeug</generator>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">NEW: www.examp1e.com</title>\n <id>new:www.examp1e.com:127.0.0.1:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1</h1>\n <a href="https://dnstwister.report/analyse/7777772e6578616d7031652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">UPDATED: wwwÑ\x94xa.mple.com (xn--wwwxa-d2e.mple.com)</title>\n <id>updated:xn--wwwxa-d2e.mple.com:127.0.0.1:127.0.0.2:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n <content type="html"><h1>IP: 127.0.0.1 &gt; 127.0.0.2</h1>\n <a href="https://dnstwister.report/analyse/786e2d2d77777778612d6432652e6d706c652e636f6d">analyse</a></content>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">DELETED: www.Ñ\x94xamplÑ\x94.com (www.xn--xampl-91ef.com)</title>\n <id>deleted:www.xn--xampl-91ef.com:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n <entry xml:base="http://localhost:80/atom/7777772e786e2d2d78616d706c652d3975662e636f6d">\n <title type="text">DELETED: www2.example.com.au</title>\n <id>deleted:www2.example.com.au:1456657834.0</id>\n <updated>2016-02-28T11:10:34Z</updated>\n <published>2016-02-28T11:10:34Z</published>\n <link href="http://localhost:80/search/7777772e786e2d2d78616d706c652d3975662e636f6d" />\n <author>\n <name>dnstwister</name>\n </author>\n </entry>\n </feed>\n """\n )\n', (10972, 14539), False, 'import textwrap\n'), ((3097, 3121), 'base64.b64encode', 'base64.b64encode', (['domain'], {}), '(domain)\n', (3113, 3121), False, 'import base64\n'), ((1983, 2007), 'base64.b64encode', 'base64.b64encode', (['domain'], {}), '(domain)\n', (1999, 2007), False, 'import base64\n')]
|
import random
import numpy as np
import scipy.stats as sps
import torch
import torch.utils.data as tud
import torch.nn.utils as tnnu
import models.dataset as md
import utils.tensorboard as utb
import utils.scaffold as usc
class Action:
def __init__(self, logger=None):
"""
(Abstract) Initializes an action.
:param logger: An optional logger instance.
"""
self.logger = logger
def _log(self, level, msg, *args):
"""
Logs a message with the class logger.
:param level: Log level.
:param msg: Message to log.
:param *args: The arguments to escape.
:return:
"""
if self.logger:
getattr(self.logger, level)(msg, *args)
class TrainModelPostEpochHook(Action):
def __init__(self, logger=None):
"""
Initializes a training hook that runs after every epoch.
This hook enables to save the model, change LR, etc. during training.
:return:
"""
Action.__init__(self, logger)
def run(self, model, training_set, epoch): # pylint: disable=unused-argument
"""
Performs the post-epoch hook. Notice that model should be modified in-place.
:param model: Model instance trained up to that epoch.
:param training_set: List of SMILES used as the training set.
:param epoch: Epoch number (for logging purposes).
:return: Boolean that indicates whether the training should continue or not.
"""
return True # simply does nothing...
class TrainModel(Action):
def __init__(self, model, optimizer, training_sets, batch_size, clip_gradient,
epochs, post_epoch_hook=None, logger=None):
"""
Initializes the training of an epoch.
: param model: A model instance, not loaded in sampling mode.
: param optimizer: The optimizer instance already initialized on the model.
: param training_sets: An iterator with all the training sets (scaffold, decoration) pairs.
: param batch_size: Batch size to use.
: param clip_gradient: Clip the gradients after each backpropagation.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.optimizer = optimizer
self.training_sets = training_sets
self.batch_size = batch_size
self.epochs = epochs
self.clip_gradient = clip_gradient
if not post_epoch_hook:
self.post_epoch_hook = TrainModelPostEpochHook(logger=self.logger)
else:
self.post_epoch_hook = post_epoch_hook
def run(self):
"""
Performs a training epoch with the parameters used in the constructor.
:return: An iterator of (total_batches, epoch_iterator), where the epoch iterator
returns the loss function at each batch in the epoch.
"""
for epoch, training_set in zip(range(1, self.epochs + 1), self.training_sets):
dataloader = self._initialize_dataloader(training_set)
epoch_iterator = self._epoch_iterator(dataloader)
yield len(dataloader), epoch_iterator
self.model.set_mode("eval")
post_epoch_status = self.post_epoch_hook.run(self.model, training_set, epoch)
self.model.set_mode("train")
if not post_epoch_status:
break
def _epoch_iterator(self, dataloader):
for scaffold_batch, decorator_batch in dataloader:
loss = self.model.likelihood(*scaffold_batch, *decorator_batch).mean()
self.optimizer.zero_grad()
loss.backward()
if self.clip_gradient > 0:
tnnu.clip_grad_norm_(self.model.network.parameters(), self.clip_gradient)
self.optimizer.step()
yield loss
def _initialize_dataloader(self, training_set):
dataset = md.DecoratorDataset(training_set, vocabulary=self.model.vocabulary)
return tud.DataLoader(dataset, batch_size=self.batch_size, shuffle=True,
collate_fn=md.DecoratorDataset.collate_fn, drop_last=True)
class CollectStatsFromModel(Action):
"""Collects stats from an existing RNN model."""
def __init__(self, model, epoch, training_set, validation_set, writer, sample_size,
decoration_type="single", with_weights=False, other_values=None, logger=None):
"""
Creates an instance of CollectStatsFromModel.
: param model: A model instance initialized as sampling_mode.
: param epoch: Epoch number to be sampled(informative purposes).
: param training_set: Iterator with the training set.
: param validation_set: Iterator with the validation set.
: param writer: Writer object(Tensorboard writer).
: param other_values: Other values to save for the epoch.
: param sample_size: Number of molecules to sample from the training / validation / sample set.
: param decoration_type: Kind of decorations (single or all).
: param with_weights: To calculate or not the weights.
: return:
"""
Action.__init__(self, logger)
self.model = model
self.epoch = epoch
self.sample_size = sample_size
self.training_set = training_set
self.validation_set = validation_set
self.writer = writer
self.other_values = other_values
self.decoration_type = decoration_type
self.with_weights = with_weights
self.sample_size = max(sample_size, 1)
self.data = {}
self._calc_nlls_action = CalculateNLLsFromModel(self.model, 128, self.logger)
self._sample_model_action = SampleModel(self.model, 128, self.logger)
@torch.no_grad()
def run(self):
"""
Collects stats for a specific model object, epoch, validation set, training set and writer object.
: return: A dictionary with all the data saved for that given epoch.
"""
self._log("info", "Collecting data for epoch %s", self.epoch)
self.data = {}
self._log("debug", "Slicing training and validation sets")
sliced_training_set = list(random.sample(self.training_set, self.sample_size))
sliced_validation_set = list(random.sample(self.validation_set, self.sample_size))
self._log("debug", "Sampling decorations for both sets")
sampled_training_mols, sampled_training_nlls = self._sample_decorations(next(zip(*sliced_training_set)))
sampled_validation_mols, sampled_validation_nlls = self._sample_decorations(next(zip(*sliced_validation_set)))
self._log("debug", "Calculating NLLs for the validation and training sets")
training_nlls = np.array(list(self._calc_nlls_action.run(sliced_training_set)))
validation_nlls = np.array(list(self._calc_nlls_action.run(sliced_validation_set)))
if self.with_weights:
self._log("debug", "Calculating weight stats")
self._weight_stats()
self._log("debug", "Calculating nll stats")
self._nll_stats(sampled_training_nlls, sampled_validation_nlls, training_nlls, validation_nlls)
self._log("debug", "Calculating validity stats")
self._valid_stats(sampled_training_mols, "training")
self._valid_stats(sampled_validation_mols, "validation")
self._log("debug", "Drawing some molecules")
self._draw_mols(sampled_training_mols, "training")
self._draw_mols(sampled_validation_mols, "validation")
if self.other_values:
self._log("debug", "Adding other values")
for name, val in self.other_values.items():
self._add_scalar(name, val)
return self.data
def _sample_decorations(self, scaffold_list):
mols = []
nlls = []
for scaff, decoration, nll in self._sample_model_action.run(scaffold_list):
if self.decoration_type == "single":
mol = usc.join_first_attachment(scaff, decoration)
elif self.decoration_type == "all":
mol = usc.join_joined_attachments(scaff, decoration)
if mol:
mols.append(mol)
nlls.append(nll)
return (mols, np.array(nlls))
def _valid_stats(self, mols, name):
self._add_scalar("valid_{}".format(name), 100.0*len(mols)/self.sample_size)
def _weight_stats(self):
for name, weights in self.model.network.named_parameters():
self._add_histogram("weights/{}".format(name), weights.clone().cpu().data.numpy())
def _nll_stats(self, sampled_training_nlls, sampled_validation_nlls, training_nlls, validation_nlls):
self._add_histogram("nll_plot/sampled_training", sampled_training_nlls)
self._add_histogram("nll_plot/sampled_validation", sampled_validation_nlls)
self._add_histogram("nll_plot/validation", validation_nlls)
self._add_histogram("nll_plot/training", training_nlls)
self._add_scalars("nll/avg", {
"sampled_training": sampled_training_nlls.mean(),
"sampled_validation": sampled_validation_nlls.mean(),
"validation": validation_nlls.mean(),
"training": training_nlls.mean()
})
self._add_scalars("nll/var", {
"sampled_training": sampled_training_nlls.var(),
"sampled_validation": sampled_validation_nlls.var(),
"validation": validation_nlls.var(),
"training": training_nlls.var()
})
def bin_dist(dist, bins=1000, dist_range=(0, 100)):
bins = np.histogram(dist, bins=bins, range=dist_range, density=False)[0]
bins[bins == 0] = 1
return bins / bins.sum()
def jsd(dists, binned=False): # notice that the dists can or cannot be binned
# get the min size of each dist
min_size = min(len(dist) for dist in dists)
dists = [dist[:min_size] for dist in dists]
if binned:
dists = [bin_dist(dist) for dist in dists]
num_dists = len(dists)
avg_dist = np.sum(dists, axis=0) / num_dists
return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists
self._add_scalar("nll_plot/jsd_joined_bins",
jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls], binned=True))
self._add_scalar("nll_plot/jsd_joined_no_bins",
jsd([sampled_training_nlls, sampled_validation_nlls,
training_nlls, validation_nlls]))
def _draw_mols(self, mols, name):
try:
utb.add_mols(self.writer, "molecules_{}".format(name), random.sample(
mols, 16), mols_per_row=4, global_step=self.epoch)
except ValueError:
pass
def _add_scalar(self, key, val):
self.data[key] = val
self.writer.add_scalar(key, val, self.epoch)
def _add_scalars(self, key, dict_vals):
for k, val in dict_vals.items():
self.data["{}.{}".format(key, k)] = val
self.writer.add_scalars(key, dict_vals, self.epoch)
def _add_histogram(self, key, vals):
self.data[key] = vals
self.writer.add_histogram(key, vals, self.epoch)
class SampleModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of SampleModel.
:params model: A model instance (better in sampling mode).
:params batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, scaffold_list):
"""
Samples the model for the given number of SMILES.
:params scaffold_list: A list of scaffold SMILES.
:return: An iterator with each of the batches sampled in (scaffold, decoration, nll) triplets.
"""
dataset = md.Dataset(scaffold_list, self.model.vocabulary.scaffold_vocabulary,
self.model.vocabulary.scaffold_tokenizer)
dataloader = tud.DataLoader(dataset, batch_size=self.batch_size,
shuffle=False, collate_fn=md.Dataset.collate_fn)
for batch in dataloader:
for scaff, dec, nll in self.model.sample_decorations(*batch):
yield scaff, dec, nll
class CalculateNLLsFromModel(Action):
def __init__(self, model, batch_size, logger=None):
"""
Creates an instance of CalculateNLLsFromModel.
:param model: A model instance.
:param batch_size: Batch size to use.
:return:
"""
Action.__init__(self, logger)
self.model = model
self.batch_size = batch_size
def run(self, scaffold_decoration_list):
"""
Calculates the NLL for a set of SMILES strings.
:param scaffold_decoration_list: List with pairs of (scaffold, decoration) SMILES.
:return: An iterator with each NLLs in the same order as the list.
"""
dataset = md.DecoratorDataset(scaffold_decoration_list, self.model.vocabulary)
dataloader = tud.DataLoader(dataset, batch_size=self.batch_size, collate_fn=md.DecoratorDataset.collate_fn,
shuffle=False)
for scaffold_batch, decorator_batch in dataloader:
for nll in self.model.likelihood(*scaffold_batch, *decorator_batch).data.cpu().numpy():
yield nll
|
[
"numpy.sum",
"torch.utils.data.DataLoader",
"models.dataset.Dataset",
"random.sample",
"utils.scaffold.join_joined_attachments",
"scipy.stats.entropy",
"numpy.histogram",
"models.dataset.DecoratorDataset",
"numpy.array",
"utils.scaffold.join_first_attachment",
"torch.no_grad"
] |
[((5789, 5804), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5802, 5804), False, 'import torch\n'), ((3927, 3994), 'models.dataset.DecoratorDataset', 'md.DecoratorDataset', (['training_set'], {'vocabulary': 'self.model.vocabulary'}), '(training_set, vocabulary=self.model.vocabulary)\n', (3946, 3994), True, 'import models.dataset as md\n'), ((4010, 4138), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'collate_fn': 'md.DecoratorDataset.collate_fn', 'drop_last': '(True)'}), '(dataset, batch_size=self.batch_size, shuffle=True,\n collate_fn=md.DecoratorDataset.collate_fn, drop_last=True)\n', (4024, 4138), True, 'import torch.utils.data as tud\n'), ((12077, 12192), 'models.dataset.Dataset', 'md.Dataset', (['scaffold_list', 'self.model.vocabulary.scaffold_vocabulary', 'self.model.vocabulary.scaffold_tokenizer'], {}), '(scaffold_list, self.model.vocabulary.scaffold_vocabulary, self.\n model.vocabulary.scaffold_tokenizer)\n', (12087, 12192), True, 'import models.dataset as md\n'), ((12238, 12342), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'collate_fn': 'md.Dataset.collate_fn'}), '(dataset, batch_size=self.batch_size, shuffle=False,\n collate_fn=md.Dataset.collate_fn)\n', (12252, 12342), True, 'import torch.utils.data as tud\n'), ((13211, 13279), 'models.dataset.DecoratorDataset', 'md.DecoratorDataset', (['scaffold_decoration_list', 'self.model.vocabulary'], {}), '(scaffold_decoration_list, self.model.vocabulary)\n', (13230, 13279), True, 'import models.dataset as md\n'), ((13301, 13415), 'torch.utils.data.DataLoader', 'tud.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'collate_fn': 'md.DecoratorDataset.collate_fn', 'shuffle': '(False)'}), '(dataset, batch_size=self.batch_size, collate_fn=md.\n DecoratorDataset.collate_fn, shuffle=False)\n', (13315, 13415), True, 'import torch.utils.data as tud\n'), ((6228, 6278), 'random.sample', 'random.sample', (['self.training_set', 'self.sample_size'], {}), '(self.training_set, self.sample_size)\n', (6241, 6278), False, 'import random\n'), ((6317, 6369), 'random.sample', 'random.sample', (['self.validation_set', 'self.sample_size'], {}), '(self.validation_set, self.sample_size)\n', (6330, 6369), False, 'import random\n'), ((8293, 8307), 'numpy.array', 'np.array', (['nlls'], {}), '(nlls)\n', (8301, 8307), True, 'import numpy as np\n'), ((8027, 8071), 'utils.scaffold.join_first_attachment', 'usc.join_first_attachment', (['scaff', 'decoration'], {}), '(scaff, decoration)\n', (8052, 8071), True, 'import utils.scaffold as usc\n'), ((9654, 9716), 'numpy.histogram', 'np.histogram', (['dist'], {'bins': 'bins', 'range': 'dist_range', 'density': '(False)'}), '(dist, bins=bins, range=dist_range, density=False)\n', (9666, 9716), True, 'import numpy as np\n'), ((10173, 10194), 'numpy.sum', 'np.sum', (['dists'], {'axis': '(0)'}), '(dists, axis=0)\n', (10179, 10194), True, 'import numpy as np\n'), ((10821, 10844), 'random.sample', 'random.sample', (['mols', '(16)'], {}), '(mols, 16)\n', (10834, 10844), False, 'import random\n'), ((8142, 8188), 'utils.scaffold.join_joined_attachments', 'usc.join_joined_attachments', (['scaff', 'decoration'], {}), '(scaff, decoration)\n', (8169, 8188), True, 'import utils.scaffold as usc\n'), ((10234, 10261), 'scipy.stats.entropy', 'sps.entropy', (['dist', 'avg_dist'], {}), '(dist, avg_dist)\n', (10245, 10261), True, 'import scipy.stats as sps\n')]
|
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import YandexProvider
urlpatterns = default_urlpatterns(YandexProvider)
|
[
"allauth.socialaccount.providers.oauth2.urls.default_urlpatterns"
] |
[((129, 164), 'allauth.socialaccount.providers.oauth2.urls.default_urlpatterns', 'default_urlpatterns', (['YandexProvider'], {}), '(YandexProvider)\n', (148, 164), False, 'from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns\n')]
|
import os
import sys
from collections import namedtuple, defaultdict
import json
import pickle
from glob import glob
from datetime import datetime, timedelta
import subprocess as sp
import logging
import traceback
PARTICIPANT_NAME = 'aaalgo'
CMS_HOME = os.path.abspath(os.path.dirname(__file__))
CORE_LIB_PATHS = [os.path.join(CMS_HOME, 'build/lib.linux-x86_64-' + sys.version[:3])]
HAS_CORE = False
for CORE_LIB_PATH in CORE_LIB_PATHS:
if os.path.exists(CORE_LIB_PATH):
sys.path.append(CORE_LIB_PATH)
HAS_CORE = True
break
if HAS_CORE:
from cms_core import *
#os.nice(20)
pass
logging.basicConfig(level=logging.INFO,
format='%(levelname)s %(asctime)s %(message)s')
# META_VERSION = '0ceb39a6c4254ac7c9a0a79ed517621477a5389b' # meta 1
META_VERSION = '3081ff0822843629b56b4884032ec872b68d8834' # meta 2
ROOT = os.path.abspath(os.path.dirname(__file__))
RAW_DATA_ROOT = os.environ.get("CMS_RAW_DATA_ROOT", "/shared/data/CMS/AI_Challenge_Stage2/data")
def string_to_day (text):
return int(text)
def load_dt (dt):
day = dt % 100
dt = dt // 100
month = dt % 100
year = dt // 100
return datetime(year, month, day)
def register_namedtuple (name, fields):
cls = namedtuple(name, fields)
setattr(sys.modules[__name__], name, cls)
return cls
DATA_ROOT = 'data'
class LineFormat:
def __init__ (self, line):
fs = line.strip().split(' ')
self.index = []
off = 0
for f in fs:
l = len(f)
self.index.append((off, l))
off += l + 1
pass
fs = ''.join(self.split(line))
for c in fs:
assert c == '-'
pass
def split (self, line):
fs = []
for off, l in self.index:
fs.append(line[off:(off + l)].strip())
pass
return fs
def check_fts_line (self, line):
min_length = self.index[-2][0]
if len(line) < min_length:
return False
return True
pass
Field = register_namedtuple('Field', ['no', 'long_name', 'short_name', 'type', 'line'])
def load_columns (fts):
# 返回fts文件中列出的column names
# [field1, field2, ...]
# 每一项是一个Field
with open(fts, 'r') as f:
index = None
while True:
l = next(f)
if l[:2] == '--':
index = LineFormat(l)
break
pass
last = 0
fields = []
min_line_length = index.index[-1][0] # offset of last field
while True:
try:
l = next(f)
except StopIteration:
break
if not index.check_fts_line (l):
strip = l.strip()
if len(strip) > 0 and strip != '-- End of Document --' and strip != '- End of Document' and strip != 'Note: All DATE fields are written out in CCYYMMDD format.':
print("SKIP %s: %s" % (fts, strip))
continue
fs = index.split(l)
try:
# infer column type
ctype = None
c3 = fs[3]
if c3[0] == '$' or c3 == 'CHAR' or fs[1] == 'DESY_SORT_KEY':
ctype = TYPE_STR
elif c3.isnumeric() or c3 == 'NUM':
# is data
ctype = TYPE_INT
c4 = fs[4].strip()
if len(c4) == 0:
pass
elif c4 == 'YYMMDDN8.':
pass
elif float(c4) != int(float(c4)):
ctype= TYPE_FLOAT
pass
pass
elif c3 == 'DATE':
# 老数据里的_DT列都存成int
ctype = TYPE_STR_TO_DAY
else:
assert False
pass
fs = fs[:3]
fs.append(ctype)
fs.append(l.strip())
fs[0] = int(fs[0])
num = fs[0]
if num != last + 1:
assert False
except:
traceback.print_exc(1000, sys.stderr)
sys.stderr.write('%s: %s\n' % (fts, l))
sys.stderr.write(str(index.split(l)))
break
# fs[0]: colum number
# fs[1]: long name
# fs[2]: short name
# fs[3]: type
# fs[4]: the full line describing the column
field = Field(*fs)
fields.append(field)
last = num
pass
pass
return fields
def load_columns_bname (bname):
fts_bname = bname
if not bname[-1].isnumeric(): # 2010 car
fts_bname = bname[:-1]
pass
return load_columns('data/%s.fts' % fts_bname)
CLAIM_TYPES = ['car', 'dme', 'hha', 'hosp', 'inp', 'out', 'snf']
def deep_glob (pattern):
o = sp.check_output("find %s/ -name '%s'" % (DATA_ROOT, pattern), shell=True).decode('ascii')
return list(filter(lambda x: x != '', o.strip().split('\n')))
class Records:
def __init__ (self, cols):
col_lookup = {}
for i, col in enumerate(cols):
col_lookup[col.long_name] = i
pass
self.cols = cols
self.col_lookup = col_lookup
self.claim_no_col = col_lookup.get('CLAIM_NO', None)
pass
def get (self, name, row):
#print(name)
#print(self.col_lookup[name])
#print(len(row))
return row[self.col_lookup[name]]
pass
class SimpleRecords(Records):
def __init__ (self, cols):
super().__init__(cols)
self.rows = []
pass
def add (self, row):
self.rows.append(row)
pass
pass
class GroupedRecords(Records):
def __init__ (self, cols):
super().__init__(cols)
self.rows = defaultdict(list)
assert not self.claim_no_col is None
pass
def add (self, row):
key = row[self.claim_no_col]
self.rows[key].append(row)
pass
pass
DEFAULT_META_JSON = os.path.join(ROOT, 'meta2.json')
TYPE_STR = 1
TYPE_INT = 2
TYPE_FLOAT = 3
TYPE_STR_TO_DAY = 4
def assert_false (text):
assert False
return None
TYPE_CTORS = [assert_false, str, int, float, string_to_day]
class RawCaseLoader:
def __init__ (self, meta_path=DEFAULT_META_JSON, include=None):
with open(meta_path, 'r') as f:
version, formats, lookup = json.load(f)
assert version == META_VERSION, "meta.pkl is incompatible, please report"
pass
table_lookup = {}
self.formats = [] # add this format to table
self.table_ctors = []
names = []
self.names = names
for ind, cols in formats:
year, ctype, sub, key = ind
name = None
cols_new = [Field(*col) for col in cols]
if ctype == 'den':
name = 'den'
elif sub is None:
name = '%s' % ctype
else:
name = '%s_%s' % (ctype, sub)
pass
if name in table_lookup:
tid = table_lookup[name]
else:
tid = len(self.table_ctors)
table_lookup[name] = tid
names.append(name)
if sub is None or sub == 'claimsj' or sub == 'claimsk':
self.table_ctors.append((SimpleRecords, cols_new))
else:
self.table_ctors.append((GroupedRecords, cols_new))
pass
use = True
if not include is None:
use = False
for x in include:
if x in name:
use = True
break
self.formats.append((tid, year, ctype, sub, key, cols_new, use))
pass
self.raw_case_ctor = register_namedtuple('RawCase', ['pid'] + names)
pass
def load (self, row):
#print(row)
pid = None
tables = [m(c) for m, c in self.table_ctors]
for one in row.split('|'):
raw_fs = one.split(',')
fid = int(raw_fs[0])
raw_fs = raw_fs[1:]
tid, year, ctype, sub, key, cols, use = self.formats[fid]
if not use:
continue
assert len(cols) <= len(raw_fs), 'not %d <= %d: %d-%s-%s %s' % (len(cols), len(raw_fs), year, ctype, sub, one)
fs = []
for col, f in zip(cols, raw_fs):
if len(f) == 0:
fs.append(None)
else:
try:
fs.append(TYPE_CTORS[col.type](f))
except:
if (col.type is int) and (not '.' in f):
fs.append(int(float(f)))
else:
raise
pass
pass
if pid is None:
pid = fs[key]
else:
assert pid == fs[key]
pass
tables[tid].add(fs)
pass
return self.raw_case_ctor(int(pid), *tables)
# RawCase format:
# case.{pid, den, car, ... car_claimsj, ...}
# 需要把RawCase经过正规化,变成格式同一的Case
#
# case.{pid, den, car, dme, ...}
#
# 需要做的
# 1. 前后统一命名标准
# 2. 对于新格式中已经独立出来的表格的字段,从对应的表格中载入
# 对于老格式中,则需要从对应字段中提取
# 3. 剩下有没有独立的,都要从对应字段中提取
# 4. 表格排序,子表格排序
# spec: name, names
DEFAULT_MAPPING_JSON = os.path.join(ROOT, 'mapping2.json')
class CaseNormalizer:
"""Case normalizer.
This class loads raw case data, which might contain multiple versions of
data of the same claim type, into a unified format. The output normalized
case contains the follow fields:
pid
den
car
dme
hha
hosp
inp
out
snf
This loader operates upon a pre-defined ETL spec, which specifies for each
ctype, which raw tables must be scanned, which output fields should be
extracted and which input fields from the raw data should be used for these
output fields.
self.specs: [ctype, [input_table_spec], [output_field_spec], ctor]
Each ctype has a corresponding row in self.specs, in unspecified order.
Each input_table_spec specified must be scanned to generate output for this
ctype.
input_table_spec = [input_table_name]:
lists all raw/input tables must be scanned.
For example, for ctype dme, there are two versions: dme and dme_claimsj.
output_field_spec = [field_name, subfields, [input_spec_v1, input_spec_v2, ...], ctor]
Each entry defines an output field. There are two kinds of fields:
- Atomic field: field_name must be all capitalized, like CLAIM_NO.
subfields is None.
- Table field: the field is a sub-table, in such case name is in lower
case, like "lines". Subfields lists the fields of the sub-table.
The input_specs then specify how from each version these fields
are to be loaded. Version specs are ordered and can be indexed by vid.
input_spec_v? = [input_field_name, input_subfield_names]
input_field_name input_subfield_names comments
---------------- -------------------- --------
str None Atomic field, get input_field_name from input_table
str list of str Sub-table. Input field name specifies the raw table
to load these fields from and input_field_names
specified the raw fields to load (new version).
None list of str Sub-table. Load these columns from the raw table
directly (old version).
None None This version does not contain data.
"""
def __init__ (self, mapping_path = DEFAULT_MAPPING_JSON):
with open(mapping_path, 'r') as f:
self.specs = json.load(f)
self.case_ctor = register_namedtuple('Case', ['pid'] + [spec[0] for spec in self.specs])
for spec in self.specs:
ctype, _, output_field_specs = spec
spec.append(register_namedtuple('Rec_%s' % ctype, [s[0] for s in output_field_specs]))
for field in output_field_specs:
field_name, sub_field_names, _, _ = field
if sub_field_names is None:
field.append(None)
else:
field.append(register_namedtuple('Field_%s_%s' % (ctype, field_name), sub_field_names))
pass
pass
pass
pass
def load_column_groups (self, table, row, input_field_names, ctor):
rows = []
for i in range(1, 1000):
fs = []
good = False
for fname in input_field_names:
try:
v = table.get("%s%d" % (fname, i), row)
if not v is None:
good = True
fs.append(v)
except:
break
pass
if len(fs) == 0:
break
assert len(fs) == len(input_field_names)
if good:
rows.append(ctor(*fs))
pass
return rows
def load_sub_table (self, table, claim_no, input_field_names, ctor):
rows = []
assert isinstance(table, GroupedRecords)
for row in table.rows.get(claim_no, []):
fs = []
for fname in input_field_names:
fs.append(table.get(fname, row))
pass
rows.append(ctor(*fs))
pass
return rows
def apply (self, raw):
tup = [raw.pid]
for _, input_table_specs, output_field_specs, ctor in self.specs:
recs = []
# vid: version_id
for vid, input_table_name in enumerate(input_table_specs):
input_table = raw.__getattribute__(input_table_name)
assert isinstance(input_table, SimpleRecords)
for row in input_table.rows:
fs = []
# process each output field
for _, subfields, input_field_spec_by_version, _, field_ctor in output_field_specs:
input_field_name, input_subfield_names = input_field_spec_by_version[vid]
if field_ctor is None:
# atomic output field
assert not input_field_name is None
assert input_subfield_names is None
fs.append(input_table.get(input_field_name, row))
else:
if input_field_name is None:
if input_subfield_names is None:
fs.append([])
else:
# load from input table directly
fs.append(self.load_column_groups(input_table, row, input_subfield_names, field_ctor))
else:
# new version
assert type(input_subfield_names) is list
claim_no = input_table.get("CLAIM_NO", row)
fs.append(self.load_sub_table(raw.__getattribute__(input_field_name), claim_no, input_subfield_names, field_ctor))
pass
pass
recs.append(ctor(*fs))
pass
pass
tup.append(recs)
return self.case_ctor(*tup)
pass
DEFAULT_ICD9_CODEBOOK_PKL = os.path.join(ROOT, 'icd9_codebook.pkl')
class CaseLoader:
def __init__ (self):
self.loader = RawCaseLoader()
self.normer = CaseNormalizer()
if os.path.exists(DEFAULT_ICD9_CODEBOOK_PKL):
with open(DEFAULT_ICD9_CODEBOOK_PKL, 'rb') as f:
self.icd9 = pickle.load(f)
pass
def load (self, v):
case = self.loader.load(v)
return self.normer.apply(case)
pass
loader = None
if os.path.exists(DEFAULT_META_JSON) and os.path.exists(DEFAULT_MAPPING_JSON):
loader = CaseLoader()
DEFAULT_EXTRACTOR_PKL = os.path.join(ROOT, 'extractor.pkl')
MAX_DGNS = 25
MAX_PRCDR = 25
SUBTABLE_DUPLICITY = {
'buyin': 0,
'hmoind': 0,
'dgns': MAX_DGNS,
'linej': 0,
'dgns_e': MAX_DGNS,
'prcdr': MAX_PRCDR,
'revenuej': 0,
'instval': 0,
'instoccr': 0,
'instcond': 0
}
EXTRACTOR_SKIP = {
'DSYSRTKY': 1,
'CLAIMNO': 1,
'PRCDR_DT': 1
}
class Claim:
def __init__ (self, features, thru_dt, admsn_dt = None):
self.features = features
self.thru_dt = thru_dt
self.admsn_dt = admsn_dt
pass
# TODO: 2008, 2009 data ignored
class Extractor:
def __init__ (self, mapping_path = DEFAULT_MAPPING_JSON, extractor_path = DEFAULT_EXTRACTOR_PKL):
with open(mapping_path, 'r') as f:
self.specs = json.load(f)
pass
with open(extractor_path, 'rb') as f:
xtors = pickle.load(f)
pass
self.case_ctor = register_namedtuple('ExtractedCase', ['pid'] + [spec[0] for spec in self.specs])
for spec in self.specs:
ctype, _, fields = spec
# fields: [[field, subfields]]
xtor = xtors[ctype] # {field: [n, lookup]}
# or {field: {subfield: [n, lookup]}}
off = 0
fts = []
for field in fields:
field_name, subfields, _, _ = field
field.clear()
# field = [field_name, use, offset, is_numeric, exhorstive, lookup, sub_xtor, sub_duplicity]
# field_name
# use: use this field
# offset:
# is_numeric:
# exhostive:
# lookup:
# sub_xtor:
# sub_duplicity:
# subfield = [subfield_name, use, snumeric, exhorstive, lookup]
#
if subfields is None:
if field_name in EXTRACTOR_SKIP:
# skip this field while extraction
field.extend([field_name, False, None, None, None, None, None, None])
elif field_name in xtor:
fv = xtor[field_name]
assert type(fv) is list # use, exhorstive, lookup
U, E, lookup = fv
if not U:
field.extend([field_name, False, None, None, None, None, None, None])
else:
field.extend([field_name, True, off, False, E, lookup, None, None])
fts.append((field_name, True))
off += 1
else:
# col is numeric
field.extend([field_name, True, off, True, None, None, None, None])
fts.append((field_name, False))
off += 1
pass
pass
else:
duplicity = SUBTABLE_DUPLICITY[field_name]
sub_xtor = []
sub_off = 0
sub_fts = []
fv = xtor.get(field_name, {})
assert type(fv) is dict
for subfield in subfields:
if subfield in EXTRACTOR_SKIP:
sub_xtor.append([subfield, False, None, None, None])
elif subfield in fv:
sfv = fv[subfield]
assert type(sfv) is list
U, E, lookup = sfv
if not U:
# subfield, use, numeric, exhorstive, lookup
sub_xtor.append([subfield, False, None, None, None])
else:
sub_xtor.append([subfield, True, False, E, lookup])
sub_fts.append((subfield, True))
sub_off += 1
else:
# col is numeric
sub_xtor.append([subfield, True, True, None, None])
sub_fts.append((subfield, False))
sub_off += 1
pass
pass
field.extend([field_name, True, off, None, None, None, sub_xtor, duplicity])
for i in range(duplicity):
for sf, is_cat in sub_fts:
fts.append(("%s%d" % (sf, i), is_cat))
pass
pass
assert len(sub_xtor) >= sub_off
# TODO: 这里应该是sub_off * duplicity
off += sub_off * duplicity
pass
pass
pass
spec.clear()
spec.extend([ctype, fields, fts])
pass
def apply (self, case):
xxx = [case.pid]
for ctype, fields, ft_names in self.specs:
rows = getattr(case, ctype)
fts = []
for row in rows:
thru_dt = getattr(row, "THRU_DT", None)
admsn_dt = getattr(row, "ADMSN_DT", None)
if not thru_dt is None:
if thru_dt //10000 < 2010: # TODO!!!
continue
thru_dt = load_dt(thru_dt)
if not admsn_dt is None:
admsn_dt = load_dt(admsn_dt)
ft = []
try:
for col, field in zip(row, fields):
field_name, use, off, is_numeric, exhorstive, lookup, sub_xtor, sub_duplicity = field
if not use:
continue
if sub_xtor is None:
if is_numeric:
if type(col) is str:
col = float(col)
ft.append(col)
else: # category
ft.append(lookup.get(col, len(lookup)))
else:
for sub in col[:sub_duplicity]:
assert len(sub) == len(sub_xtor)
for c, f in zip(sub, sub_xtor):
sf, suse, snumeric, _, slookup = f
if not suse:
continue
if snumeric:
if type(c) is str:
try:
c = float(c)
except:
print(ctype, field_name, sf, suse)
c = np.nan
ft.append(c)
else:
ft.append(slookup.get(c, len(slookup)))
pass
pass
for _ in range(len(col), sub_duplicity):
for _, suse, _, _, _ in sub_xtor:
if suse:
ft.append(None)
pass
pass
assert len(ft) == len(ft_names)
fts.append(Claim(ft, thru_dt, admsn_dt))
except Exception as e:
print(e)
pass
if ctype != 'den':
fts.sort(key = lambda x: x.thru_dt)
xxx.append(fts)
pass
return self.case_ctor(*xxx)
pass
#class ICD:
# def __init__ (self):
# self.lookup = {}
# with open(os.path.join(ROOT, 'CMS32_DESC_LONG_DX.txt'), 'rb') as f:
# for l in f:
# l = l.decode('iso-8859-1')
# k, v = l.split(' ', 1)
# v = v.strip()
# self.lookup[k] = v
# pass
# pass
# pass
#
# def explain (self, code):
# return self.lookup.get(code, 'CODE %s' % code)
# pass
def load_gs (gs_path):
# train_gs.dat*
# test_gs
with open(gs_path, 'r') as f:
for l in f:
death365, pid, observe, cutoff = l.strip().split('\t')[:4]
yield int(death365), int(pid), int(observe), int(cutoff)
pass
pass
def iter_lines (*patterns):
for pattern in patterns:
for path in glob(pattern):
with open(path, 'rb') as f:
for l in f:
yield l
pass
def get_claim_icd9_nodes (claim, lookup=None):
standalone = False
if lookup is None:
lookup = defaultdict(lambda:[False])
standalone = True
pass
for code, version, source in claim.get_icd_codes():
nodes, exact = loader.icd9.lookup(code, version)
for node in nodes:
item = lookup[node]
item[0] = item[0] or exact
pass
pass
if standalone:
return [[k, v[0]] for k, v in lookup.items()]
pass
def get_case_icd9_nodes (case):
lookup = defaultdict(lambda:[False])
for claim in case.claims():
get_claim_icd9_nodes(claim, lookup)
pass
return [[k, v[0]] for k, v in lookup.items()]
|
[
"sys.path.append",
"json.load",
"traceback.print_exc",
"logging.basicConfig",
"os.path.dirname",
"subprocess.check_output",
"os.path.exists",
"datetime.datetime",
"os.environ.get",
"collections.defaultdict",
"pickle.load",
"collections.namedtuple",
"glob.glob",
"sys.stderr.write",
"os.path.join"
] |
[((624, 716), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(levelname)s %(asctime)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(levelname)s %(asctime)s %(message)s')\n", (643, 716), False, 'import logging\n'), ((922, 1007), 'os.environ.get', 'os.environ.get', (['"""CMS_RAW_DATA_ROOT"""', '"""/shared/data/CMS/AI_Challenge_Stage2/data"""'], {}), "('CMS_RAW_DATA_ROOT', '/shared/data/CMS/AI_Challenge_Stage2/data'\n )\n", (936, 1007), False, 'import os\n'), ((6112, 6144), 'os.path.join', 'os.path.join', (['ROOT', '"""meta2.json"""'], {}), "(ROOT, 'meta2.json')\n", (6124, 6144), False, 'import os\n'), ((9563, 9598), 'os.path.join', 'os.path.join', (['ROOT', '"""mapping2.json"""'], {}), "(ROOT, 'mapping2.json')\n", (9575, 9598), False, 'import os\n'), ((16113, 16152), 'os.path.join', 'os.path.join', (['ROOT', '"""icd9_codebook.pkl"""'], {}), "(ROOT, 'icd9_codebook.pkl')\n", (16125, 16152), False, 'import os\n'), ((16698, 16733), 'os.path.join', 'os.path.join', (['ROOT', '"""extractor.pkl"""'], {}), "(ROOT, 'extractor.pkl')\n", (16710, 16733), False, 'import os\n'), ((271, 296), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'import os\n'), ((317, 384), 'os.path.join', 'os.path.join', (['CMS_HOME', "('build/lib.linux-x86_64-' + sys.version[:3])"], {}), "(CMS_HOME, 'build/lib.linux-x86_64-' + sys.version[:3])\n", (329, 384), False, 'import os\n'), ((448, 477), 'os.path.exists', 'os.path.exists', (['CORE_LIB_PATH'], {}), '(CORE_LIB_PATH)\n', (462, 477), False, 'import os\n'), ((879, 904), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (894, 904), False, 'import os\n'), ((1165, 1191), 'datetime.datetime', 'datetime', (['year', 'month', 'day'], {}), '(year, month, day)\n', (1173, 1191), False, 'from datetime import datetime, timedelta\n'), ((1243, 1267), 'collections.namedtuple', 'namedtuple', (['name', 'fields'], {}), '(name, fields)\n', (1253, 1267), False, 'from collections import namedtuple, defaultdict\n'), ((16571, 16604), 'os.path.exists', 'os.path.exists', (['DEFAULT_META_JSON'], {}), '(DEFAULT_META_JSON)\n', (16585, 16604), False, 'import os\n'), ((16609, 16645), 'os.path.exists', 'os.path.exists', (['DEFAULT_MAPPING_JSON'], {}), '(DEFAULT_MAPPING_JSON)\n', (16623, 16645), False, 'import os\n'), ((26175, 26204), 'collections.defaultdict', 'defaultdict', (['(lambda : [False])'], {}), '(lambda : [False])\n', (26186, 26204), False, 'from collections import namedtuple, defaultdict\n'), ((487, 517), 'sys.path.append', 'sys.path.append', (['CORE_LIB_PATH'], {}), '(CORE_LIB_PATH)\n', (502, 517), False, 'import sys\n'), ((5895, 5912), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5906, 5912), False, 'from collections import namedtuple, defaultdict\n'), ((16285, 16326), 'os.path.exists', 'os.path.exists', (['DEFAULT_ICD9_CODEBOOK_PKL'], {}), '(DEFAULT_ICD9_CODEBOOK_PKL)\n', (16299, 16326), False, 'import os\n'), ((25507, 25520), 'glob.glob', 'glob', (['pattern'], {}), '(pattern)\n', (25511, 25520), False, 'from glob import glob\n'), ((25739, 25768), 'collections.defaultdict', 'defaultdict', (['(lambda : [False])'], {}), '(lambda : [False])\n', (25750, 25768), False, 'from collections import namedtuple, defaultdict\n'), ((4946, 5019), 'subprocess.check_output', 'sp.check_output', (['("find %s/ -name \'%s\'" % (DATA_ROOT, pattern))'], {'shell': '(True)'}), '("find %s/ -name \'%s\'" % (DATA_ROOT, pattern), shell=True)\n', (4961, 5019), True, 'import subprocess as sp\n'), ((6497, 6509), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6506, 6509), False, 'import json\n'), ((12292, 12304), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12301, 12304), False, 'import json\n'), ((17478, 17490), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17487, 17490), False, 'import json\n'), ((17575, 17589), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (17586, 17589), False, 'import pickle\n'), ((16417, 16431), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (16428, 16431), False, 'import pickle\n'), ((4163, 4200), 'traceback.print_exc', 'traceback.print_exc', (['(1000)', 'sys.stderr'], {}), '(1000, sys.stderr)\n', (4182, 4200), False, 'import traceback\n'), ((4217, 4256), 'sys.stderr.write', 'sys.stderr.write', (["('%s: %s\\n' % (fts, l))"], {}), "('%s: %s\\n' % (fts, l))\n", (4233, 4256), False, 'import sys\n')]
|
#!/usr/bin/python
'''
(C) Copyright 2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Governments rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from __future__ import print_function
import os
import subprocess
from env_modules import load_mpi
from command_utils import EnvironmentVariables
class MpioFailed(Exception):
"""Raise if MPIO failed"""
class MpioUtils():
"""MpioUtils Class"""
def __init__(self):
self.mpichinstall = None
def mpich_installed(self, hostlist):
"""Check if mpich is installed"""
load_mpi('mpich')
try:
# checking mpich install
self.mpichinstall = subprocess.check_output(
["ssh", hostlist[0],
"command -v mpichversion"]).rstrip()[:-len('bin/mpichversion')]
return True
except subprocess.CalledProcessError as excep:
print("Mpich not installed \n {}".format(excep))
return False
# pylint: disable=R0913
def run_mpiio_tests(self, hostfile, pool_uuid, svcl, test_repo,
test_name, client_processes, cont_uuid):
"""
Running LLNL, MPI4PY and HDF5 testsuites
Function Arguments:
hostfile --client hostfile
pool_uuid --Pool UUID
svcl --Pool SVCL
test_repo --test repo location
test_name --name of test to be tested
"""
print("self.mpichinstall: {}".format(self.mpichinstall))
# environment variables only to be set on client node
env = EnvironmentVariables()
env["DAOS_POOL"] = "{}".format(pool_uuid)
env["DAOS_SVCL"] = "{}".format(":".join([str(item) for item in svcl]))
env["DAOS_CONT"] = "{}".format(cont_uuid)
mpirun = os.path.join(self.mpichinstall, "bin", "mpirun")
if test_name == "romio" and os.path.isfile(
os.path.join(test_repo, "runtests")):
test_cmd = [env.get_export_str(),
os.path.join(test_repo, 'runtests'),
'-fname=daos:test1',
'-subset']
cmd = " ".join(test_cmd)
elif test_name == "llnl" and os.path.isfile(
os.path.join(test_repo, "testmpio_daos")):
env["MPIO_USER_PATH"] = "daos:"
test_cmd = [env.get_export_str(),
mpirun,
'-np',
str(client_processes),
'--hostfile',
hostfile,
os.path.join(test_repo, 'testmpio_daos'),
'1']
cmd = " ".join(test_cmd)
elif test_name == "mpi4py" and \
os.path.isfile(os.path.join(test_repo, "test_io_daos.py")):
test_cmd = [env.get_export_str(),
mpirun,
'-np',
str(client_processes),
'--hostfile',
hostfile,
'python',
os.path.join(test_repo, 'test_io_daos.py')]
cmd = " ".join(test_cmd)
elif test_name == "hdf5" and \
(os.path.isfile(os.path.join(test_repo, "testphdf5")) and
os.path.isfile(os.path.join(test_repo, "t_shapesame"))):
env["HDF5_PARAPREFIX"] = "daos:"
cmd = ''
for test in ["testphdf5", "t_shapesame"]:
fqtp = os.path.join(test_repo, test)
test_cmd = [env.get_export_str(),
'echo ***Running {}*** ;'.format(fqtp),
mpirun,
'-np',
str(client_processes),
'--hostfile',
hostfile,
fqtp + ';']
cmd += " ".join(test_cmd)
else:
raise MpioFailed("Wrong test name ({}) or test repo location ({}) "
"specified".format(test_name, test_repo))
print("run command: {}".format(cmd))
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
if process.poll() != 0:
raise MpioFailed("{} Run process".format(test_name)
+ " Failed with non zero exit"
+ " code:{}".format(process.poll()))
except (ValueError, OSError) as excep:
raise MpioFailed("<Test FAILED> \nException occurred: {}"\
.format(str(excep)))
|
[
"subprocess.Popen",
"env_modules.load_mpi",
"subprocess.check_output",
"command_utils.EnvironmentVariables",
"os.path.join"
] |
[((1412, 1429), 'env_modules.load_mpi', 'load_mpi', (['"""mpich"""'], {}), "('mpich')\n", (1420, 1429), False, 'from env_modules import load_mpi\n'), ((2496, 2518), 'command_utils.EnvironmentVariables', 'EnvironmentVariables', ([], {}), '()\n', (2516, 2518), False, 'from command_utils import EnvironmentVariables\n'), ((2715, 2763), 'os.path.join', 'os.path.join', (['self.mpichinstall', '"""bin"""', '"""mpirun"""'], {}), "(self.mpichinstall, 'bin', 'mpirun')\n", (2727, 2763), False, 'import os\n'), ((5095, 5182), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n shell=True)\n', (5111, 5182), False, 'import subprocess\n'), ((2833, 2868), 'os.path.join', 'os.path.join', (['test_repo', '"""runtests"""'], {}), "(test_repo, 'runtests')\n", (2845, 2868), False, 'import os\n'), ((2941, 2976), 'os.path.join', 'os.path.join', (['test_repo', '"""runtests"""'], {}), "(test_repo, 'runtests')\n", (2953, 2976), False, 'import os\n'), ((3164, 3204), 'os.path.join', 'os.path.join', (['test_repo', '"""testmpio_daos"""'], {}), "(test_repo, 'testmpio_daos')\n", (3176, 3204), False, 'import os\n'), ((3503, 3543), 'os.path.join', 'os.path.join', (['test_repo', '"""testmpio_daos"""'], {}), "(test_repo, 'testmpio_daos')\n", (3515, 3543), False, 'import os\n'), ((1513, 1585), 'subprocess.check_output', 'subprocess.check_output', (["['ssh', hostlist[0], 'command -v mpichversion']"], {}), "(['ssh', hostlist[0], 'command -v mpichversion'])\n", (1536, 1585), False, 'import subprocess\n'), ((3680, 3722), 'os.path.join', 'os.path.join', (['test_repo', '"""test_io_daos.py"""'], {}), "(test_repo, 'test_io_daos.py')\n", (3692, 3722), False, 'import os\n'), ((4011, 4053), 'os.path.join', 'os.path.join', (['test_repo', '"""test_io_daos.py"""'], {}), "(test_repo, 'test_io_daos.py')\n", (4023, 4053), False, 'import os\n'), ((4416, 4445), 'os.path.join', 'os.path.join', (['test_repo', 'test'], {}), '(test_repo, test)\n', (4428, 4445), False, 'import os\n'), ((4160, 4196), 'os.path.join', 'os.path.join', (['test_repo', '"""testphdf5"""'], {}), "(test_repo, 'testphdf5')\n", (4172, 4196), False, 'import os\n'), ((4231, 4269), 'os.path.join', 'os.path.join', (['test_repo', '"""t_shapesame"""'], {}), "(test_repo, 't_shapesame')\n", (4243, 4269), False, 'import os\n')]
|
#!/usr/bin/env python3
# This Python file uses the following encoding: utf-8
from pathlib import Path
from pprint import pprint, pformat
import fitz, os, queue, sys
from .fitzcli import main as fitzGetText
from PySide6.QtWidgets import QApplication, QFileDialog, QWidget, QRadioButton, QTextEdit
from PySide6.QtGui import QImage, QPixmap
from PySide6.QtCore import QFile, Qt, QCoreApplication, QEvent
from PySide6.QtUiTools import QUiLoader
class MetadataEditor(QWidget):
def __init__(self):
super(MetadataEditor, self).__init__()
self.docsList = []
self.metadata = {}
self.load_ui()
# style sheets give another way of signalling UI state - If I can ever figure out such good way.
#self.setStyleSheet("QLabel { color: rgb(50, 50, 50); font-size: 11px; background-color: rgba(188, 188, 188, 50); border: 1px solid rgba(188, 188, 188, 250); } QSpinBox { color: rgb(50, 50, 50); font-size: 11px; background-color: rgba(255, 188, 20, 50); }"
#self.ui.pdf_bmp_label = QImage(531, 666, QImage.Format_ARGB32) #.setPixmap(100, 100)
def load_ui(self):
loader = QUiLoader()
path = os.fspath(Path(__file__).resolve().parent / "metadata.ui")
if not path:
return None
ui_file = QFile(path)
ui_file.open(QFile.ReadOnly)
self.ui = None
self.ui = loader.load(ui_file, self)
ui_file.close()
# connect the page navigation push buttons
self.ui.commit_pushButton.clicked.connect(self.commit_check)
self.ui.back_pushButton.clicked.connect(self.go_back)
self.ui.next_pushButton.clicked.connect(self.go_next)
self.ui.quit_pushButton.clicked.connect(self.quit)
# connect the fitzcli gettext radio buttons
for w in self.findChildren(QRadioButton):
w.clicked.connect(self.BtnSelected)
# connect the textedit input fields to a common function
for w in self.findChildren(QTextEdit):
if w.objectName().endswith("Edit"):
w.textChanged.connect(self.EditSelected)
w.installEventFilter(self)
w.get = w.toPlainText
# qline edit is slightly different, sigh
# we are setting "get" to "text"
self.ui.rename_lineEdit.textChanged.connect(self.EditSelected)
self.ui.rename_lineEdit.installEventFilter(self)
self.ui.rename_lineEdit.get = self.ui.rename_lineEdit.text
# date is basically read only so no event filter
self.ui.date_lineEdit.textChanged.connect(self.EditSelected)
self.ui.date_lineEdit.get = self.ui.date_lineEdit.text
self.initialize_editButtons()
self.newname = False
def checkNewFileName(self, fn):
newname = fn+'.pdf'
r, f = self.docsList[self.docsList_current]
trial = not os.path.isfile(os.path.join(r, newname))
return trial, newname
def eventFilter(self, obj, event):
""" Catch the keyboard Enter key event and use it to
Commit the Title - for further processing in the
pdf file rename function.
Commit the rename - this actually just prepares, the
actual rename happens after the Pdf is saved.
"""
if event.type() == QEvent.KeyPress and obj.hasFocus() :
# print(event.type())
weAre = obj.objectName()
key = weAre[0:weAre.find('_')]
# get() resolves to the right text retrieve function
data = obj.get()
#if key == 'title':
# print(key)
#data = obj.toPlainText()
# print(data)
# print(self.metadata)
#self.metadata[key] = data
# print(f'Enter {key} pressed')
if event.key() == Qt.Key_F3 and key == 'title':
data = data.title().replace(' ', '').replace('/', '-')
self.ui.rename_lineEdit.setText(data)
return True
elif event.key() == Qt.Key_Return and (key == 'title' or key == 'author'):
data = data.title()
obj.setPlainText(data)
return True
elif event.key() == Qt.Key_Return and key == 'rename' :
status, newname = self.checkNewFileName(data)
if status :
# proposed file name is ok, save for later
self.newname = newname
else:
# same named file already exists, clear to signal restart naming needed
self.ui.rename_lineEdit.clear()
return True
return super().eventFilter(obj, event)
def initialize_editButtons(self):
# reset the edit fields back to their placeholder strings.
self.ui.author_textEdit.clear()
self.ui.title_textEdit.clear()
self.ui.keywords_textEdit.clear()
self.ui.date_lineEdit.clear()
self.ui.commit_pushButton.setEnabled(False)
def go_back(self) :
self.docsList_current = self.docsList_current -1
self.processPdf()
def go_next(self) :
self.docsList_current = self.docsList_current +1
self.processPdf()
def commit_check(self):
# missing some sanity checking here!
self.update_metadata()
# print("back from check")
def BtnSelected(self, event, ) :
# Change the call to fitzcli requesting different layout processing.
try:
genAllRadioButtons = self.findChildren(QRadioButton)
mode = ([rb.text() for rb in genAllRadioButtons if rb.isChecked()][0]).lower()
# set some command line arguments for the call to fitzcli
fo = "cause I can't make it use a string.io object"
sys.argv = [sys.argv[0], 'gettext', "-mode", mode, "-pages", "1", "-output",fo, self.fn]
fitzGetText() # calls the main() function with above args.
# paste the output into a browser window widget
self.ui.textBrowser.setText(open(fo, "r").read())
except Exception as e:
print(e)
def EditSelected(self) :
try:
x = self.sender()
widgetKey = x.objectName()
#ourKey = self.sender().objectName()
metaKey = widgetKey[:widgetKey.find("_")]
data = x.get()
# print("meta was ", self.metadata)
if metaKey == 'date':
metaKey = 'creationDate'
if metaKey != 'rename':
self.metadata[metaKey] = data
# print(f' New .{metaKey}. is .{data}.')
self.ui.commit_pushButton.setEnabled(True)
except Exception as e:
print(e)
def update_metadata(self):
try:
# deug log to terminal
pprint(self.metadata)
# do an incremental update, no error checks!
self.doc.set_metadata(self.metadata)
if (self.doc.can_save_incrementally()):
self.doc.saveIncr()
else:
dst = self.fn + ".tmp"
self.doc.save(dst)
os.rename(self.fn, self.fn + ".original")
os.rename(dst, self.fn)
self.doc.close()
if self.newname != False:
# now we need to actually rename it. Should be ok now that
# fitz is closed.
dir, fn = self.docsList[self.docsList_current]
os.rename(self.fn, os.path.join(dir, self.newname))
x = self.docsList[self.docsList_current]
self.docsList[self.docsList_current] = [ x[0], self.newname ]
# print(x)
except Exception as e:
print(e)
def setEditData(self, metadata):
# get the metadata found on reading the pdf and insert it into
# the edit fields.
# uninterpreted label shows what we came in with.
self.ui.metadata_label.setText(pformat(metadata)[1:-1])
# initialize some flags
self.date_set = self.title_set = self.author_set = self.keywords_set = False
# parse out the data and put into the widget entry fields
for key in metadata:
k = key.lower()
if k == "author":
self.ui.author_textEdit.setPlainText(metadata[key])
elif k == "keywords":
self.ui.keywords_textEdit.setPlainText(metadata[key])
elif k == "subject":
self.ui.subject_textEdit.setPlainText(metadata[key])
elif k == "creationdate":
self.ui.date_lineEdit.setText(metadata[key])
elif k == "title":
self.ui.title_textEdit.setPlainText(metadata[key])
def processPdf(self) :
self.initialize_editButtons() # clear the entry fields to their placeholder strings.
# get a path to a pdf to work on
dir, fn = self.docsList[self.docsList_current]
path = self.fn = os.path.join(dir, fn)
# display the pdf file name
self.ui.path_label.setText(f"{fn}")
# preset the rename file widget to the original file name.
self.ui.rename_lineEdit.setText(f"{fn[:-4]}")
# set up the file list navigation buttons
# this logic allows up or down moves through the list,
# but no wrap around modulo stuff.
if self.docsList_top > self.docsList_current +1 :
self.ui.next_pushButton.setEnabled(True)
else:
self.ui.next_pushButton.setEnabled(False)
if self.docsList_current > 0 :
self.ui.back_pushButton.setEnabled(True)
else:
self.ui.back_pushButton.setEnabled(False)
# make a file progress label
pos = f"{(self.docsList_current +1)} / {len(self.docsList)} "
self.ui.filePosition.setText(pos)
# At last we can start processing the pdf!
try:
#fin = '/mnt/Marcs80GB/Reiserfs1/Kubo/3DCS \udca5ѥ\udcf3\udca5\udcd5ura.pdf'
# turns out that
#stream_buffer = open(path, "rb").read()
#doc = fitz.open("pdf", stream_buffer)
#path = bytes(Path(path))
# Actually, let PyMuPDF call on MuPDF to do the work!
# building on the stout shoulders of our betters.
self.doc = doc = fitz.open(path)
page = doc.loadPage(0)
# get the metadata - our whole purpose is to update it!
self.metadata = doc.metadata
self.setEditData(self.metadata)
# display page 0 in text strings
text = page.getText() # output = plain text by default
self.ui.textBrowser.setText(text)
# now get a properly rendered bitmap of the pdf document, page 0 only.
pno = 0
pix = doc.get_page_pixmap(pno)
# adjust image size to fit the GUI
width, height = pix.w, pix.h
w_vue, h_vue = self.ui.pdf_bmp_label.width(), self.ui.pdf_bmp_label.height()
png = pix.tobytes(output="png")
qimage = QImage()
qimage.loadFromData(png, "PNG")
self.ui.pdf_bmp_label.setPixmap(QPixmap.fromImage(qimage))
# Commit was enabled at each of the above metadata value settings.
# want to wait for changes from now on.
self.ui.commit_pushButton.setEnabled(False)
except Exception as e :
print(e)
def quit(self):
""" Quit button handler. """
self.close()
def keyPressEvent(self, e):
""" Escape key is a quick bail-out. """
if e.key() == Qt.Key.Key_Escape:
self.close()
def EditorMain():
# attempt to quiet some error messages.
QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts)
app = QApplication([])
widget = MetadataEditor()
# just sets the window title, the expected qt designer ui ways didn't work.
widget.setWindowTitle('Quick and Dirty PDF Metadata Editor')
'''
Options for single argument:
1. relative or absolute path to a single pdf file - just process it
2. path is a directory, do a filesystem walk and collect .pdf files found into a work list
3. no arguments - open a qt directory chooser and then process as in 2.
'''
if len(sys.argv) == 1 : # no calling arguments, open a directory chooser
path = QFileDialog.getExistingDirectory(widget, ("Open Top Level PDF Directory"),None,
#"/home",
QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks)
if len(sys.argv) == 2:
path = os.fspath(Path(__file__).resolve().parent / sys.argv[1])
if os.path.isfile(path) : # case of only single file to process
dir, fn = os.path.split(path)
widget.docsList.append([dir, fn])
widget.docsList_top = 1
widget.docsList_current = 0
if len(widget.docsList) == 0:
for root, dirs, files in os.walk(path) :
for fn in files:
if fn.lower().endswith("pdf"):
widget.docsList.append([root, fn])
widget.docsList_top = len(widget.docsList)
widget.docsList_current = 0
print( "This is what we'll look at first: ", widget.docsList[0])
widget.show()
widget.processPdf()
sys.exit(app.exec())
if __name__ == "__main__":
EditorMain()
|
[
"pprint.pformat",
"PySide6.QtWidgets.QFileDialog.getExistingDirectory",
"os.walk",
"os.rename",
"PySide6.QtCore.QCoreApplication.setAttribute",
"os.path.isfile",
"pathlib.Path",
"PySide6.QtGui.QImage",
"PySide6.QtWidgets.QApplication",
"pprint.pprint",
"PySide6.QtCore.QFile",
"fitz.open",
"PySide6.QtGui.QPixmap.fromImage",
"os.path.split",
"os.path.join",
"PySide6.QtUiTools.QUiLoader"
] |
[((12164, 12220), 'PySide6.QtCore.QCoreApplication.setAttribute', 'QCoreApplication.setAttribute', (['Qt.AA_ShareOpenGLContexts'], {}), '(Qt.AA_ShareOpenGLContexts)\n', (12193, 12220), False, 'from PySide6.QtCore import QFile, Qt, QCoreApplication, QEvent\n'), ((12232, 12248), 'PySide6.QtWidgets.QApplication', 'QApplication', (['[]'], {}), '([])\n', (12244, 12248), False, 'from PySide6.QtWidgets import QApplication, QFileDialog, QWidget, QRadioButton, QTextEdit\n'), ((1148, 1159), 'PySide6.QtUiTools.QUiLoader', 'QUiLoader', ([], {}), '()\n', (1157, 1159), False, 'from PySide6.QtUiTools import QUiLoader\n'), ((1297, 1308), 'PySide6.QtCore.QFile', 'QFile', (['path'], {}), '(path)\n', (1302, 1308), False, 'from PySide6.QtCore import QFile, Qt, QCoreApplication, QEvent\n'), ((9347, 9368), 'os.path.join', 'os.path.join', (['dir', 'fn'], {}), '(dir, fn)\n', (9359, 9368), False, 'import fitz, os, queue, sys\n'), ((12832, 12974), 'PySide6.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['widget', '"""Open Top Level PDF Directory"""', 'None', '(QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)'], {}), "(widget, 'Open Top Level PDF Directory',\n None, QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)\n", (12864, 12974), False, 'from PySide6.QtWidgets import QApplication, QFileDialog, QWidget, QRadioButton, QTextEdit\n'), ((13201, 13221), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (13215, 13221), False, 'import fitz, os, queue, sys\n'), ((13504, 13517), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (13511, 13517), False, 'import fitz, os, queue, sys\n'), ((7133, 7154), 'pprint.pprint', 'pprint', (['self.metadata'], {}), '(self.metadata)\n', (7139, 7154), False, 'from pprint import pprint, pformat\n'), ((10715, 10730), 'fitz.open', 'fitz.open', (['path'], {}), '(path)\n', (10724, 10730), False, 'import fitz, os, queue, sys\n'), ((11492, 11500), 'PySide6.QtGui.QImage', 'QImage', ([], {}), '()\n', (11498, 11500), False, 'from PySide6.QtGui import QImage, QPixmap\n'), ((13294, 13313), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (13307, 13313), False, 'import fitz, os, queue, sys\n'), ((2891, 2915), 'os.path.join', 'os.path.join', (['r', 'newname'], {}), '(r, newname)\n', (2903, 2915), False, 'import fitz, os, queue, sys\n'), ((7458, 7499), 'os.rename', 'os.rename', (['self.fn', "(self.fn + '.original')"], {}), "(self.fn, self.fn + '.original')\n", (7467, 7499), False, 'import fitz, os, queue, sys\n'), ((7516, 7539), 'os.rename', 'os.rename', (['dst', 'self.fn'], {}), '(dst, self.fn)\n', (7525, 7539), False, 'import fitz, os, queue, sys\n'), ((8301, 8318), 'pprint.pformat', 'pformat', (['metadata'], {}), '(metadata)\n', (8308, 8318), False, 'from pprint import pprint, pformat\n'), ((11589, 11614), 'PySide6.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['qimage'], {}), '(qimage)\n', (11606, 11614), False, 'from PySide6.QtGui import QImage, QPixmap\n'), ((7819, 7850), 'os.path.join', 'os.path.join', (['dir', 'self.newname'], {}), '(dir, self.newname)\n', (7831, 7850), False, 'import fitz, os, queue, sys\n'), ((1185, 1199), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1189, 1199), False, 'from pathlib import Path\n'), ((13143, 13157), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (13147, 13157), False, 'from pathlib import Path\n')]
|
import random
import functools as fcn
from itertools import islice, chain, combinations
def get_twoopt_mutation(adjacency_matrix, mutation_probability):
return fcn.partial(__twoopt_mutation, adjacency_matrix,
mutation_probability)
def __twoopt_mutation(adjacency_matrix, mutation_probility, genotype):
'''
After each valid exchange_cost(i, j), tsp_cycle:
city_0, city_1, city_2, ..., city_i-1, city_i, ... city_j-1, city_j, ...,
city_n-1, city_0
becomes
city_0, city_1, city_2, ..., city_i-1, city_j, city_j-1, ..., city_i,
city_j+1..., city_n-1, city_0
'''
if not random.random() < mutation_probility:
return genotype
tsp_cycle = list(chain([0], genotype, [0]))
n = len(tsp_cycle)
distance = lambda idx1, idx2: adjacency_matrix[tsp_cycle[idx1]][tsp_cycle[idx2]]
def exchange_cost(i, j):
removed_cost = distance(i - 1, i) + distance(j - 1, j)
added_cost = distance(i - 1, j - 1) + distance(i, j)
return added_cost - removed_cost
exchange = True
idxs = range(1, n - 1)
while exchange:
exchange = False
for i, j in combinations(idxs, 2):
if j > i + 1 and exchange_cost(i, j) < 0:
tsp_cycle = list(chain(islice(tsp_cycle, 0, i),
reversed(tsp_cycle[i:j]),
islice(tsp_cycle, j, n)))
exchange = True
new_genotype = tuple(islice(tsp_cycle, 1, n - 1))
return new_genotype
|
[
"functools.partial",
"itertools.combinations",
"random.random",
"itertools.islice",
"itertools.chain"
] |
[((166, 236), 'functools.partial', 'fcn.partial', (['__twoopt_mutation', 'adjacency_matrix', 'mutation_probability'], {}), '(__twoopt_mutation, adjacency_matrix, mutation_probability)\n', (177, 236), True, 'import functools as fcn\n'), ((720, 745), 'itertools.chain', 'chain', (['[0]', 'genotype', '[0]'], {}), '([0], genotype, [0])\n', (725, 745), False, 'from itertools import islice, chain, combinations\n'), ((1166, 1187), 'itertools.combinations', 'combinations', (['idxs', '(2)'], {}), '(idxs, 2)\n', (1178, 1187), False, 'from itertools import islice, chain, combinations\n'), ((1496, 1523), 'itertools.islice', 'islice', (['tsp_cycle', '(1)', '(n - 1)'], {}), '(tsp_cycle, 1, n - 1)\n', (1502, 1523), False, 'from itertools import islice, chain, combinations\n'), ((636, 651), 'random.random', 'random.random', ([], {}), '()\n', (649, 651), False, 'import random\n'), ((1283, 1306), 'itertools.islice', 'islice', (['tsp_cycle', '(0)', 'i'], {}), '(tsp_cycle, 0, i)\n', (1289, 1306), False, 'from itertools import islice, chain, combinations\n'), ((1412, 1435), 'itertools.islice', 'islice', (['tsp_cycle', 'j', 'n'], {}), '(tsp_cycle, j, n)\n', (1418, 1435), False, 'from itertools import islice, chain, combinations\n')]
|
"""Publish documentation for a project on the server.
Usage:
client.py <host> <project> <directory>
client.py -h | --help
Options:
-h --help Display help message and exit.
"""
# pylint: disable=no-value-for-parameter
from __future__ import print_function, absolute_import
import os
import shutil
import zipfile
import tempfile
import click
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
def get_zipped_content(file_or_directory):
"""Return zipped content of given file or directory.
Args:
file_or_directory (str): path for the filesystem file or directory to
be zipped.
Returns:
str: bytes of the zip file containing required files.
"""
if os.path.splitext(file_or_directory)[1] == ".zip":
click.secho("File iz already zipped... ", nl=False)
click.secho("SKIP!", bold=True, fg="yellow")
with open(file_or_directory, "rb") as zipped_file:
return zipped_file.read()
click.secho("Zipping content of '{}'... ".format(file_or_directory),
nl=False)
temp_dir = None
try:
temp_dir = tempfile.mkdtemp()
zip_path = os.path.join(temp_dir, "content.zip")
if os.path.isfile(file_or_directory):
with zipfile.ZipFile(zip_path, mode="w") as zipped_file:
zipped_file.write(file_or_directory)
else:
# make_archive likes getting filename without the extension
shutil.make_archive(zip_path[:-len(".zip")], "zip",
file_or_directory)
click.secho("DONE!", bold=True, fg="green")
with open(zip_path, "rb") as zipped_file:
return zipped_file.read()
finally:
if temp_dir is not None:
shutil.rmtree(temp_dir)
def deploy_documentation(content, host, project):
"""Send documentation content to the server.
Args:
content (str): the zipped documentation content.
host (str): URL for reaching the server, e.g.: "docs:5000/".
project (str): the project's name.
"""
click.secho("Publishing content for project '{}'... ".format(project),
nl=False)
filename = "{}.zip".format(project)
multipart = MultipartEncoder(
fields={"files": (filename, content, "text/plain")}
)
response = requests.put(
"http://{}/api/upload_docs".format(host),
data=multipart,
headers={"Content-Type": multipart.content_type})
response.raise_for_status()
click.secho("DONE!", bold=True, fg="green")
def publish(host, project, file_or_directory):
"""Deploy documentation on the server.
Args:
host (str): URL for reaching the server, e.g.: "docs:5000/".
project (str): the project's name.
file_or_directory (str): path to the root directory or file containing
the documentation, e.g.: "index.html", "path/to/root/dir".
"""
content = get_zipped_content(file_or_directory)
deploy_documentation(content, host, project)
@click.command("publish",
short_help="Send documentation to the server.",
context_settings=dict(help_option_names=['-h', '--help']))
@click.argument("host", metavar="<host>")
@click.argument("project", metavar="<project>")
@click.argument("file_or_directory", metavar="<file_or_directory>",
type=click.Path(exists=True))
def publish_cli(host, project, file_or_directory):
"""Publish documentation for a project on the server."""
publish(host=host,
project=project,
file_or_directory=file_or_directory)
if __name__ == "__main__":
publish_cli()
|
[
"zipfile.ZipFile",
"click.argument",
"shutil.rmtree",
"os.path.isfile",
"tempfile.mkdtemp",
"os.path.splitext",
"click.Path",
"requests_toolbelt.multipart.encoder.MultipartEncoder",
"click.secho",
"os.path.join"
] |
[((3245, 3285), 'click.argument', 'click.argument', (['"""host"""'], {'metavar': '"""<host>"""'}), "('host', metavar='<host>')\n", (3259, 3285), False, 'import click\n'), ((3287, 3333), 'click.argument', 'click.argument', (['"""project"""'], {'metavar': '"""<project>"""'}), "('project', metavar='<project>')\n", (3301, 3333), False, 'import click\n'), ((2279, 2348), 'requests_toolbelt.multipart.encoder.MultipartEncoder', 'MultipartEncoder', ([], {'fields': "{'files': (filename, content, 'text/plain')}"}), "(fields={'files': (filename, content, 'text/plain')})\n", (2295, 2348), False, 'from requests_toolbelt.multipart.encoder import MultipartEncoder\n'), ((2561, 2604), 'click.secho', 'click.secho', (['"""DONE!"""'], {'bold': '(True)', 'fg': '"""green"""'}), "('DONE!', bold=True, fg='green')\n", (2572, 2604), False, 'import click\n'), ((810, 861), 'click.secho', 'click.secho', (['"""File iz already zipped... """'], {'nl': '(False)'}), "('File iz already zipped... ', nl=False)\n", (821, 861), False, 'import click\n'), ((870, 914), 'click.secho', 'click.secho', (['"""SKIP!"""'], {'bold': '(True)', 'fg': '"""yellow"""'}), "('SKIP!', bold=True, fg='yellow')\n", (881, 914), False, 'import click\n'), ((1161, 1179), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1177, 1179), False, 'import tempfile\n'), ((1200, 1237), 'os.path.join', 'os.path.join', (['temp_dir', '"""content.zip"""'], {}), "(temp_dir, 'content.zip')\n", (1212, 1237), False, 'import os\n'), ((1249, 1282), 'os.path.isfile', 'os.path.isfile', (['file_or_directory'], {}), '(file_or_directory)\n', (1263, 1282), False, 'import os\n'), ((1617, 1660), 'click.secho', 'click.secho', (['"""DONE!"""'], {'bold': '(True)', 'fg': '"""green"""'}), "('DONE!', bold=True, fg='green')\n", (1628, 1660), False, 'import click\n'), ((3423, 3446), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (3433, 3446), False, 'import click\n'), ((752, 787), 'os.path.splitext', 'os.path.splitext', (['file_or_directory'], {}), '(file_or_directory)\n', (768, 787), False, 'import os\n'), ((1808, 1831), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (1821, 1831), False, 'import shutil\n'), ((1301, 1336), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_path'], {'mode': '"""w"""'}), "(zip_path, mode='w')\n", (1316, 1336), False, 'import zipfile\n')]
|
#!/usr/bin/env python
# Import modules
import time
import math
# Import files
import src.py3_pi_markov as spy3
# Constant
print("Disks \t Points\t Total Point \t Pi \t Error \t Time")
# Number of point
for nPoint in [10, 100, 1000, 10**4, 10**5, 10**6, 10**7]:
# Number of disk
for nDisk in [10, 100, 1000, 10**4, 10**5, 10**6, 10**7]:
# Time
aTime = time.clock()
# Call nDisk with nPoint
pPidisk = spy3.fpimarkov(nDisk, nPoint)
# mean of all pi
aPi = sum(pPidisk)/nDisk
# Error
aError = math.fabs(math.pi - aPi)
# Time
aTime = time.clock() - aTime
print(nDisk, '\t', nPoint, '\t', nDisk*nPoint, '\t', aPi, '\t', aError, '\t', aTime)
|
[
"math.fabs",
"time.clock",
"src.py3_pi_markov.fpimarkov"
] |
[((380, 392), 'time.clock', 'time.clock', ([], {}), '()\n', (390, 392), False, 'import time\n'), ((445, 474), 'src.py3_pi_markov.fpimarkov', 'spy3.fpimarkov', (['nDisk', 'nPoint'], {}), '(nDisk, nPoint)\n', (459, 474), True, 'import src.py3_pi_markov as spy3\n'), ((575, 599), 'math.fabs', 'math.fabs', (['(math.pi - aPi)'], {}), '(math.pi - aPi)\n', (584, 599), False, 'import math\n'), ((631, 643), 'time.clock', 'time.clock', ([], {}), '()\n', (641, 643), False, 'import time\n')]
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import torch
import pandas as pd
import os
from learning_models.sidarthe import Sidarthe
from torch_euler import Heun, euler
from matplotlib import pyplot as plt
# %%
params = {
"alpha": [0.570] * 4 + [0.422] * 18 + [0.360] * 6 + [0.210] * 11,
"beta": [0.011] * 4 + [0.0057] * 18 + [0.005] * 17,
"gamma": [0.456] * 4 + [0.285] * 18 + [0.2] * 6 + [0.11] * 11,
"delta": [0.011] * 4 + [0.0057] * 18 + [0.005] * 17,
"epsilon": [0.171] * 12 + [0.143] * 26 + [0.2],
"theta": [0.371] * 39,
"zeta": [0.125] * 22 + [0.034] * 16 + [0.025],
"eta": [0.125] * 22 + [0.034] * 16 + [0.025],
"mu": [0.017] * 22 + [0.008] * 17,
"nu": [0.027] * 22 + [0.015] * 17,
"tau": [0.01] * 39,
"lambda": [0.034] * 22 + [0.08] * 17,
"kappa": [0.017] * 22 + [0.017] * 16 + [0.02],
"xi": [0.017] * 22 + [0.017] * 16 + [0.02],
"rho": [0.034] * 22 + [0.017] * 16 + [0.02],
"sigma": [0.017] * 22 + [0.017] * 16 + [0.01]
}
i = 200./60e6
d = 20./60e6
a = 1./60e6
r = 2./60e6
t, h, e = 0., 0., 0.
s = 1. - (i + d + a + r + t + h + e)
initial_values = [
s,
i,
d,
a,
r,
t,
e,
h,
]
a_len = len(params["alpha"])
for key, value in params.items():
assert len(value) == a_len, f"{key} has wrong size"
t_inc = 0.01
sidarthe = Sidarthe(params, 1, initial_values, euler, 0.01,
d_weight=0.,
r_weight=0.,
t_weight=0.,
h_weight=0.,
e_weight=0.,
der_1st_reg=0.,
bound_reg=0.,
verbose=False,
loss_type="rmse"
)
# %%
size = 197
t_grid = torch.linspace(0, size, int(size/t_inc) + 1)
t_slice = slice(0, int(size/t_inc) + 1, int(1/t_inc))
# %%
with torch.no_grad():
inference = sidarthe.inference(t_grid)
# %%
new_inference = {}
for k,v in inference.items():
new_inference[k] = v[t_slice]
# %%
print(inference["r0"][700])
# %%
#sol = inference["sol"]
# plot state evolution
base_path = os.path.join(os.getcwd(), "plots")
if not os.path.exists(base_path):
os.mkdir(base_path)
def build_plot(t_grid, keys_string, inference, base_path):
plt.figure()
for key in keys_string:
plt.xlabel("number of days")
plt.ylabel(key)
plt.plot(t_grid[t_slice].numpy(), inference[key], label=key)
plt.legend()
plot_filename = os.path.join(base_path, str(keys_string) + '.png')
plt.show()
plt.savefig(plot_filename)
build_plot(t_grid, 'idarthe', new_inference, base_path)
build_plot(t_grid, 's', new_inference, base_path)
build_plot(t_grid, ['r0'], new_inference, base_path)
# store parameters and state in a pandas dataframe results
params_df = pd.DataFrame.from_dict(params)
state_df = pd.DataFrame.from_dict(new_inference)
state_df = state_df.drop(columns='sol')
extend = state_df.shape[0]-params_df.shape[0]
params_df = params_df.append(params_df.iloc[[-1]*extend])
params_df.index = range(params_df.shape[0])
results = pd.concat([params_df, state_df], axis=1, sort=False)
# save results to csv
base_path = os.path.join(os.getcwd(), "regioni")
if not os.path.exists(base_path):
os.mkdir(base_path)
results.to_csv(os.path.join(base_path,'sidarthe_results_new.csv'))
# %%
|
[
"os.mkdir",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"os.getcwd",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"learning_models.sidarthe.Sidarthe",
"matplotlib.pyplot.ylabel",
"torch.no_grad",
"os.path.join",
"pandas.concat",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] |
[((1412, 1604), 'learning_models.sidarthe.Sidarthe', 'Sidarthe', (['params', '(1)', 'initial_values', 'euler', '(0.01)'], {'d_weight': '(0.0)', 'r_weight': '(0.0)', 't_weight': '(0.0)', 'h_weight': '(0.0)', 'e_weight': '(0.0)', 'der_1st_reg': '(0.0)', 'bound_reg': '(0.0)', 'verbose': '(False)', 'loss_type': '"""rmse"""'}), "(params, 1, initial_values, euler, 0.01, d_weight=0.0, r_weight=0.0,\n t_weight=0.0, h_weight=0.0, e_weight=0.0, der_1st_reg=0.0, bound_reg=\n 0.0, verbose=False, loss_type='rmse')\n", (1420, 1604), False, 'from learning_models.sidarthe import Sidarthe\n'), ((2878, 2908), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['params'], {}), '(params)\n', (2900, 2908), True, 'import pandas as pd\n'), ((2920, 2957), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['new_inference'], {}), '(new_inference)\n', (2942, 2957), True, 'import pandas as pd\n'), ((3156, 3208), 'pandas.concat', 'pd.concat', (['[params_df, state_df]'], {'axis': '(1)', 'sort': '(False)'}), '([params_df, state_df], axis=1, sort=False)\n', (3165, 3208), True, 'import pandas as pd\n'), ((1928, 1943), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1941, 1943), False, 'import torch\n'), ((2193, 2204), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2202, 2204), False, 'import os\n'), ((2222, 2247), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (2236, 2247), False, 'import os\n'), ((2253, 2272), 'os.mkdir', 'os.mkdir', (['base_path'], {}), '(base_path)\n', (2261, 2272), False, 'import os\n'), ((2337, 2349), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2347, 2349), True, 'from matplotlib import pyplot as plt\n'), ((2604, 2614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2612, 2614), True, 'from matplotlib import pyplot as plt\n'), ((2619, 2645), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_filename'], {}), '(plot_filename)\n', (2630, 2645), True, 'from matplotlib import pyplot as plt\n'), ((3257, 3268), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3266, 3268), False, 'import os\n'), ((3288, 3313), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (3302, 3313), False, 'import os\n'), ((3319, 3338), 'os.mkdir', 'os.mkdir', (['base_path'], {}), '(base_path)\n', (3327, 3338), False, 'import os\n'), ((3354, 3405), 'os.path.join', 'os.path.join', (['base_path', '"""sidarthe_results_new.csv"""'], {}), "(base_path, 'sidarthe_results_new.csv')\n", (3366, 3405), False, 'import os\n'), ((2386, 2414), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of days"""'], {}), "('number of days')\n", (2396, 2414), True, 'from matplotlib import pyplot as plt\n'), ((2423, 2438), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['key'], {}), '(key)\n', (2433, 2438), True, 'from matplotlib import pyplot as plt\n'), ((2516, 2528), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2526, 2528), True, 'from matplotlib import pyplot as plt\n')]
|
"""
This Module contains Training Files for the CNN extractor
"""
# system util imports
import os
import numpy as np
# custom dataset imports
from data_p1 import DATA_P1
from models_p1 import CNN_P1
from models_p1 import CNN_P1_UPPER
import parser
from visualizations import plot_p1_train_info, plot_embedding
# system related
import random
import os
# torch related
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torch.nn as nn
# torch dataset related
from torchvision import datasets
from torchvision import transforms
from sklearn.manifold import TSNE
def extract_features():
"""
Extracts CNN features using resnet50
"""
# Load the training dataset
train = DATA_P1(mode='train')
# Load the validation dataset
val = DATA_P1(mode='valid')
output_size = 2048 * 9 * 7
cnn = CNN_P1(57600)
cnn.cuda()
cnn.eval()
resnet50_out_train = []
with torch.no_grad():
for i in range(len(train)):
frames, _ = train[i]
frames = torch.stack(frames)
cnn_out = cnn.resnet50(frames.cuda()).cpu().view(-1, output_size)
resnet50_out_train.append(torch.mean(cnn_out ,0))
print("Training dataset processed...")
resnet50_out_valid = []
with torch.no_grad():
for i in range(len(val)):
frames, _ = val[i]
frames = torch.stack(frames)
cnn_out = cnn.resnet50(frames.cuda()).cpu().view(-1, output_size)
resnet50_out_valid.append(torch.mean(cnn_out,0))
print("Validation dataset processed...")
return resnet50_out_train, resnet50_out_valid, train, val
def process_dataset(resnet50_out_train, resnet50_out_valid, train, val):
training_y = []
for i in range(len(train)):
training_y.append(train[i][1])
val_y = []
for i in range(len(val)):
val_y.append(val[i][1])
t_x = torch.stack(resnet50_out_train)
t_y = torch.LongTensor(training_y)
v_x = torch.stack(resnet50_out_valid)
v_y = torch.LongTensor(val_y)
return t_x, t_y, v_x, v_y
if __name__=='__main__':
"""
Configuration parameters for arguments
args.mode: train
args.batch_size: 64
args.num_epochs: 50
args.lr: 0.0001
args.random_seed = 999
"""
# If you want to visualize the t-sne
plot_t_sne = False
plot_train_loss = False
# parse the arguments
args = parser.arg_parse()
# Set up a Random Seed
manual_seed = args.random_seed #999
random.seed(manual_seed)
torch.manual_seed(manual_seed)
""" Parse Arguments"""
epochs = args.num_epochs
num_batches = args.batch_size
lr = args.lr
""" Set up Training Variables """
output_size = 2048 * 9 * 7
acc_max = 0.0
total_loss_t = []
total_val_acc = []
# Set the Model
cnn = CNN_P1_UPPER(output_size).cuda()
cnn.train()
# Set the optimizer
adam = torch.optim.Adam(cnn.parameters(), lr=lr)
# set the loss
cross_entropy = nn.CrossEntropyLoss()
# retrieve dataset
resnet50_out_train, resnet50_out_valid, train, val = extract_features()
t_x, t_y, v_x, v_y = process_dataset(resnet50_out_train, resnet50_out_valid, train, val)
len_t_x = len(t_x)
for epoch in range(1, epochs + 1):
print("CURRENT EPOCH =====>>>> ====>>>> ", epoch)
epoch_loss = 0.0
# Get a random item from the dataset
rnd_idx = torch.randperm(len_t_x)
x = t_x[rnd_idx]
y = t_y[rnd_idx]
# Get a batch of sample
for idx in range(0, len_t_x, num_batches):
if (idx + num_batches) > len_t_x:
print("Index out of boundary...")
break
adam.zero_grad()
batch_x = x[idx:idx+num_batches].cuda()
batch_y = y[idx:idx+num_batches].cuda()
# flow gradients
cnn_out = cnn(batch_x)
cross_entropy = cross_entropy(cnn_out, batch_y)
cross_entropy.backward()
adam.step()
epoch_loss += np.numpy(cross_entropy.cpu().data)
print("== TRANING LOSS ==> ==> ==> ", epoch_loss)
total_loss_t .append(epoch_loss)
# Check the validation loss
with torch.no_grad():
cnn.eval()
v_x = v_x.cuda()
out_cnn = cnn(v_x)
pred_cnn = torch.argmax(out_cnn,1).cpu().data
acc = np.mean((pred_cnn == v_y).numpy())
print("== VALIDATION SET ACCURACY ==> ==> ==> ", acc)
total_val_acc.append(acc)
if acc > acc_max:
torch.save(cnn.state_dict(), "./models/cnn_model.pkt")
acc_max = acc
cnn.train()
print('Training Finished...')
if plot_t_sne == True:
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=3000)
print(t_x.detach().cpu().numpy().shape)
feature_np = t_x.detach().cpu().numpy()
feature_np = feature_np.reshape(feature_np.shape[0], feature_np.shape[1] )
print(feature_np.shape)
#dann_tsne = tsne.fit_transform(combined_feature.detach().cpu().numpy())
tsne = tsne.fit_transform(feature_np)
label_y = t_y.detach().cpu().numpy()
labels = []
for i in label_y:
labels.append(int(i))
plot_embedding(tsne, labels, 'train', 't-sne')
if plot_train_loss == True:
plot_p1_train_info(total_loss_t, total_val_acc, save_dir = "./saved_plot/problem1_loss_acc.png")
print('Plots Finished...')
|
[
"visualizations.plot_embedding",
"torch.mean",
"data_p1.DATA_P1",
"torch.stack",
"sklearn.manifold.TSNE",
"torch.LongTensor",
"torch.manual_seed",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"models_p1.CNN_P1",
"random.seed",
"torch.randperm",
"parser.arg_parse",
"visualizations.plot_p1_train_info",
"torch.no_grad",
"models_p1.CNN_P1_UPPER"
] |
[((757, 778), 'data_p1.DATA_P1', 'DATA_P1', ([], {'mode': '"""train"""'}), "(mode='train')\n", (764, 778), False, 'from data_p1 import DATA_P1\n'), ((824, 845), 'data_p1.DATA_P1', 'DATA_P1', ([], {'mode': '"""valid"""'}), "(mode='valid')\n", (831, 845), False, 'from data_p1 import DATA_P1\n'), ((888, 901), 'models_p1.CNN_P1', 'CNN_P1', (['(57600)'], {}), '(57600)\n', (894, 901), False, 'from models_p1 import CNN_P1\n'), ((1959, 1990), 'torch.stack', 'torch.stack', (['resnet50_out_train'], {}), '(resnet50_out_train)\n', (1970, 1990), False, 'import torch\n'), ((2001, 2029), 'torch.LongTensor', 'torch.LongTensor', (['training_y'], {}), '(training_y)\n', (2017, 2029), False, 'import torch\n'), ((2041, 2072), 'torch.stack', 'torch.stack', (['resnet50_out_valid'], {}), '(resnet50_out_valid)\n', (2052, 2072), False, 'import torch\n'), ((2083, 2106), 'torch.LongTensor', 'torch.LongTensor', (['val_y'], {}), '(val_y)\n', (2099, 2106), False, 'import torch\n'), ((2472, 2490), 'parser.arg_parse', 'parser.arg_parse', ([], {}), '()\n', (2488, 2490), False, 'import parser\n'), ((2563, 2587), 'random.seed', 'random.seed', (['manual_seed'], {}), '(manual_seed)\n', (2574, 2587), False, 'import random\n'), ((2592, 2622), 'torch.manual_seed', 'torch.manual_seed', (['manual_seed'], {}), '(manual_seed)\n', (2609, 2622), False, 'import torch\n'), ((3059, 3080), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3078, 3080), True, 'import torch.nn as nn\n'), ((974, 989), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (987, 989), False, 'import torch\n'), ((1333, 1348), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1346, 1348), False, 'import torch\n'), ((3493, 3516), 'torch.randperm', 'torch.randperm', (['len_t_x'], {}), '(len_t_x)\n', (3507, 3516), False, 'import torch\n'), ((4842, 4902), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(30)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(3000)'}), "(perplexity=30, n_components=2, init='pca', n_iter=3000)\n", (4846, 4902), False, 'from sklearn.manifold import TSNE\n'), ((5374, 5420), 'visualizations.plot_embedding', 'plot_embedding', (['tsne', 'labels', '"""train"""', '"""t-sne"""'], {}), "(tsne, labels, 'train', 't-sne')\n", (5388, 5420), False, 'from visualizations import plot_p1_train_info, plot_embedding\n'), ((5461, 5560), 'visualizations.plot_p1_train_info', 'plot_p1_train_info', (['total_loss_t', 'total_val_acc'], {'save_dir': '"""./saved_plot/problem1_loss_acc.png"""'}), "(total_loss_t, total_val_acc, save_dir=\n './saved_plot/problem1_loss_acc.png')\n", (5479, 5560), False, 'from visualizations import plot_p1_train_info, plot_embedding\n'), ((1082, 1101), 'torch.stack', 'torch.stack', (['frames'], {}), '(frames)\n', (1093, 1101), False, 'import torch\n'), ((1437, 1456), 'torch.stack', 'torch.stack', (['frames'], {}), '(frames)\n', (1448, 1456), False, 'import torch\n'), ((2894, 2919), 'models_p1.CNN_P1_UPPER', 'CNN_P1_UPPER', (['output_size'], {}), '(output_size)\n', (2906, 2919), False, 'from models_p1 import CNN_P1_UPPER\n'), ((4307, 4322), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4320, 4322), False, 'import torch\n'), ((1218, 1240), 'torch.mean', 'torch.mean', (['cnn_out', '(0)'], {}), '(cnn_out, 0)\n', (1228, 1240), False, 'import torch\n'), ((1573, 1595), 'torch.mean', 'torch.mean', (['cnn_out', '(0)'], {}), '(cnn_out, 0)\n', (1583, 1595), False, 'import torch\n'), ((4430, 4454), 'torch.argmax', 'torch.argmax', (['out_cnn', '(1)'], {}), '(out_cnn, 1)\n', (4442, 4454), False, 'import torch\n')]
|
"""Define a dynamical system for a 3D quadrotor"""
from typing import Tuple, List, Optional
import torch
import numpy as np
from .control_affine_system import ControlAffineSystem
from .utils import grav, Scenario
class Quad3D(ControlAffineSystem):
"""
Represents a planar quadrotor.
The system has state
x = [px, py, pz, vx, vy, vz, phi, theta, psi]
representing the position, orientation, and velocities of the quadrotor, and it
has control inputs
u = [f, phi_dot, theta_dot, psi_dot]
The system is parameterized by
m: mass
NOTE: Z is defined as positive downwards
"""
# Number of states and controls
N_DIMS = 9
N_CONTROLS = 4
# State indices
PX = 0
PY = 1
PZ = 2
VX = 3
VY = 4
VZ = 5
PHI = 6
THETA = 7
PSI = 8
# Control indices
F = 0
PHI_DOT = 1
THETA_DOT = 2
PSI_DOT = 3
def __init__(
self,
nominal_params: Scenario,
dt: float = 0.01,
controller_dt: Optional[float] = None,
):
"""
Initialize the quadrotor.
args:
nominal_params: a dictionary giving the parameter values for the system.
Requires keys ["m"]
dt: the timestep to use for the simulation
controller_dt: the timestep for the LQR discretization. Defaults to dt
raises:
ValueError if nominal_params are not valid for this system
"""
super().__init__(nominal_params, dt, controller_dt)
def validate_params(self, params: Scenario) -> bool:
"""Check if a given set of parameters is valid
args:
params: a dictionary giving the parameter values for the system.
Requires keys ["m"]
returns:
True if parameters are valid, False otherwise
"""
valid = True
# Make sure all needed parameters were provided
valid = valid and "m" in params
# Make sure all parameters are physically valid
valid = valid and params["m"] > 0
return valid
@property
def n_dims(self) -> int:
return Quad3D.N_DIMS
@property
def angle_dims(self) -> List[int]:
return [Quad3D.PHI, Quad3D.THETA, Quad3D.PSI]
@property
def n_controls(self) -> int:
return Quad3D.N_CONTROLS
@property
def state_limits(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Return a tuple (upper, lower) describing the expected range of states for this
system
"""
# define upper and lower limits based around the nominal equilibrium input
upper_limit = torch.ones(self.n_dims)
upper_limit[Quad3D.PX] = 4.0
upper_limit[Quad3D.PY] = 4.0
upper_limit[Quad3D.PZ] = 4.0
upper_limit[Quad3D.VX] = 8.0
upper_limit[Quad3D.VY] = 8.0
upper_limit[Quad3D.VZ] = 8.0
upper_limit[Quad3D.PHI] = np.pi / 2.0
upper_limit[Quad3D.THETA] = np.pi / 2.0
upper_limit[Quad3D.PSI] = np.pi / 2.0
lower_limit = -1.0 * upper_limit
return (upper_limit, lower_limit)
@property
def control_limits(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Return a tuple (upper, lower) describing the range of allowable control
limits for this system
"""
# define upper and lower limits based around the nominal equilibrium input
upper_limit = torch.tensor([100, 50, 50, 50])
lower_limit = -1.0 * upper_limit
return (upper_limit, lower_limit)
def safe_mask(self, x):
"""Return the mask of x indicating safe regions for the obstacle task
args:
x: a tensor of points in the state space
"""
safe_mask = torch.ones_like(x[:, 0], dtype=torch.bool)
# We have a floor that we need to avoid and a radius we need to stay inside of
safe_z = 0.0
safe_radius = 3
safe_mask = torch.logical_and(
x[:, Quad3D.PZ] <= safe_z, x.norm(dim=-1) <= safe_radius
)
return safe_mask
def unsafe_mask(self, x):
"""Return the mask of x indicating unsafe regions for the obstacle task
args:
x: a tensor of points in the state space
"""
unsafe_mask = torch.zeros_like(x[:, 0], dtype=torch.bool)
# We have a floor that we need to avoid and a radius we need to stay inside of
unsafe_z = 0.3
unsafe_radius = 3.5
unsafe_mask = torch.logical_or(
x[:, Quad3D.PZ] >= unsafe_z, x.norm(dim=-1) >= unsafe_radius
)
return unsafe_mask
def goal_mask(self, x):
"""Return the mask of x indicating points in the goal set (within 0.2 m of the
goal).
args:
x: a tensor of points in the state space
"""
goal_mask = torch.ones_like(x[:, 0], dtype=torch.bool)
# Define the goal region as being near the goal
near_goal = x.norm(dim=-1) <= 0.3
goal_mask.logical_and_(near_goal)
# The goal set has to be a subset of the safe set
goal_mask.logical_and_(self.safe_mask(x))
return goal_mask
def _f(self, x: torch.Tensor, params: Scenario):
"""
Return the control-independent part of the control-affine dynamics.
args:
x: bs x self.n_dims tensor of state
params: a dictionary giving the parameter values for the system. If None,
default to the nominal parameters used at initialization
returns:
f: bs x self.n_dims x 1 tensor
"""
# Extract batch size and set up a tensor for holding the result
batch_size = x.shape[0]
f = torch.zeros((batch_size, self.n_dims, 1))
f = f.type_as(x)
# Derivatives of positions are just velocities
f[:, Quad3D.PX] = x[:, Quad3D.VX] # x
f[:, Quad3D.PY] = x[:, Quad3D.VY] # y
f[:, Quad3D.PZ] = x[:, Quad3D.VZ] # z
# Constant acceleration in z due to gravity
f[:, Quad3D.VZ] = grav
# Orientation velocities are directly actuated
return f
def _g(self, x: torch.Tensor, params: Scenario):
"""
Return the control-independent part of the control-affine dynamics.
args:
x: bs x self.n_dims tensor of state
params: a dictionary giving the parameter values for the system. If None,
default to the nominal parameters used at initialization
returns:
g: bs x self.n_dims x self.n_controls tensor
"""
# Extract batch size and set up a tensor for holding the result
batch_size = x.shape[0]
g = torch.zeros((batch_size, self.n_dims, self.n_controls))
g = g.type_as(x)
# Extract the needed parameters
m = params["m"]
# Derivatives of linear velocities depend on thrust f
s_theta = torch.sin(x[:, Quad3D.THETA])
c_theta = torch.cos(x[:, Quad3D.THETA])
s_phi = torch.sin(x[:, Quad3D.PHI])
c_phi = torch.cos(x[:, Quad3D.PHI])
g[:, Quad3D.VX, Quad3D.F] = -s_theta / m
g[:, Quad3D.VY, Quad3D.F] = c_theta * s_phi / m
g[:, Quad3D.VZ, Quad3D.F] = -c_theta * c_phi / m
# Derivatives of all orientations are control variables
g[:, Quad3D.PHI :, Quad3D.PHI_DOT :] = torch.eye(self.n_controls - 1)
return g
@property
def u_eq(self):
u_eq = torch.zeros((1, self.n_controls))
u_eq[0, Quad3D.F] = self.nominal_params["m"] * grav
return u_eq
|
[
"torch.ones_like",
"torch.ones",
"torch.eye",
"torch.zeros_like",
"torch.cos",
"torch.zeros",
"torch.sin",
"torch.tensor"
] |
[((2698, 2721), 'torch.ones', 'torch.ones', (['self.n_dims'], {}), '(self.n_dims)\n', (2708, 2721), False, 'import torch\n'), ((3491, 3522), 'torch.tensor', 'torch.tensor', (['[100, 50, 50, 50]'], {}), '([100, 50, 50, 50])\n', (3503, 3522), False, 'import torch\n'), ((3814, 3856), 'torch.ones_like', 'torch.ones_like', (['x[:, 0]'], {'dtype': 'torch.bool'}), '(x[:, 0], dtype=torch.bool)\n', (3829, 3856), False, 'import torch\n'), ((4347, 4390), 'torch.zeros_like', 'torch.zeros_like', (['x[:, 0]'], {'dtype': 'torch.bool'}), '(x[:, 0], dtype=torch.bool)\n', (4363, 4390), False, 'import torch\n'), ((4912, 4954), 'torch.ones_like', 'torch.ones_like', (['x[:, 0]'], {'dtype': 'torch.bool'}), '(x[:, 0], dtype=torch.bool)\n', (4927, 4954), False, 'import torch\n'), ((5787, 5828), 'torch.zeros', 'torch.zeros', (['(batch_size, self.n_dims, 1)'], {}), '((batch_size, self.n_dims, 1))\n', (5798, 5828), False, 'import torch\n'), ((6779, 6834), 'torch.zeros', 'torch.zeros', (['(batch_size, self.n_dims, self.n_controls)'], {}), '((batch_size, self.n_dims, self.n_controls))\n', (6790, 6834), False, 'import torch\n'), ((7006, 7035), 'torch.sin', 'torch.sin', (['x[:, Quad3D.THETA]'], {}), '(x[:, Quad3D.THETA])\n', (7015, 7035), False, 'import torch\n'), ((7054, 7083), 'torch.cos', 'torch.cos', (['x[:, Quad3D.THETA]'], {}), '(x[:, Quad3D.THETA])\n', (7063, 7083), False, 'import torch\n'), ((7100, 7127), 'torch.sin', 'torch.sin', (['x[:, Quad3D.PHI]'], {}), '(x[:, Quad3D.PHI])\n', (7109, 7127), False, 'import torch\n'), ((7144, 7171), 'torch.cos', 'torch.cos', (['x[:, Quad3D.PHI]'], {}), '(x[:, Quad3D.PHI])\n', (7153, 7171), False, 'import torch\n'), ((7446, 7476), 'torch.eye', 'torch.eye', (['(self.n_controls - 1)'], {}), '(self.n_controls - 1)\n', (7455, 7476), False, 'import torch\n'), ((7545, 7578), 'torch.zeros', 'torch.zeros', (['(1, self.n_controls)'], {}), '((1, self.n_controls))\n', (7556, 7578), False, 'import torch\n')]
|
import json
import os
import cv2
from car_seal.bounding_box import BoundingBox
PREDICTION_THRESHOLD = 50
def normalize_bounding_box(img_name, left, top, width, height):
def limit(desimal):
if desimal < 0:
return 0
elif desimal > 1:
return 1
return desimal
image_path = os.path.join(
os.path.dirname(__file__), "../../../dataset/images/" + img_name + ".JPG"
)
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
img_height, img_width, _ = image.shape
left = limit(left / img_width)
top = limit(top / img_height)
width = limit(width / img_width)
height = limit(height / img_height)
return left, top, width, height
def parse_txt(file_path):
results = {}
current_img = None
with open(file_path, "r") as f:
for line in f:
if "/home" in line:
img_name = line.split("obj/")[1].split(".JPG")[0]
current_img = img_name
results[current_img] = []
elif "Car Seal" in line:
percentage = line.split("%")[0].split(" ")[-1]
if int(percentage) < PREDICTION_THRESHOLD:
continue
box = line.split("(")[1].split(")")[0].split(" ")
_, left, _, top, _, width, _, height = [word for word in box if word]
if "Green" in line:
label = "green_car_seal"
elif "Red" in line:
label = "red_car_seal"
left, top, width, height = normalize_bounding_box(
img_name=current_img,
left=int(left),
top=int(top),
width=int(width),
height=int(height),
)
box = BoundingBox(
label=label, left=left, top=top, width=width, height=height
)
results[current_img].append(box)
return results
if __name__ == "__main__":
file_path = os.path.join(os.path.dirname(__file__), "results/darknet_results.txt")
results = parse_txt(file_path=file_path)
file_path = os.path.join(
os.path.dirname(__file__),
f"results/darknet_results_{PREDICTION_THRESHOLD}.json",
)
with open(file_path, "w") as f:
json.dump(results, f)
|
[
"json.dump",
"cv2.imread",
"os.path.dirname",
"car_seal.bounding_box.BoundingBox"
] |
[((444, 488), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_UNCHANGED'], {}), '(image_path, cv2.IMREAD_UNCHANGED)\n', (454, 488), False, 'import cv2\n'), ((352, 377), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (367, 377), False, 'import os\n'), ((2055, 2080), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2070, 2080), False, 'import os\n'), ((2196, 2221), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2211, 2221), False, 'import os\n'), ((2337, 2358), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (2346, 2358), False, 'import json\n'), ((1817, 1889), 'car_seal.bounding_box.BoundingBox', 'BoundingBox', ([], {'label': 'label', 'left': 'left', 'top': 'top', 'width': 'width', 'height': 'height'}), '(label=label, left=left, top=top, width=width, height=height)\n', (1828, 1889), False, 'from car_seal.bounding_box import BoundingBox\n')]
|
import onnx
from onnx import numpy_helper
import numpy as np
# Filter
sobel = {
3: np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]], dtype='float32'),
5: np.array([[2, 1, 0, -1, -2],
[3, 2, 0, -2, -3],
[4, 3, 0, -3, -4],
[3, 2, 0, -2, -3],
[2, 1, 0, -1, -2]], dtype='float32'),
7: np.array([[3, 2, 1, 0, -1, -2, -3],
[4, 3, 2, 0, -2, -3, -4],
[5, 4, 3, 0, -3, -4, -5],
[6, 5, 4, 0, -4, -5, -6],
[5, 4, 3, 0, -3, -4, -5],
[4, 3, 2, 0, -2, -3, -4],
[3, 2, 1, 0, -1, -2, -3]], dtype='float32'),
9: np.array([[4, 3, 2, 1, 0, -1, -2, -3, -4],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[8, 7, 6, 5, 0, -5, -6, -7, -8],
[7, 6, 5, 4, 0, -4, -5, -6, -7],
[6, 5, 4, 3, 0, -3, -4, -5, -6],
[5, 4, 3, 2, 0, -2, -3, -4, -5],
[4, 3, 2, 1, 0, -1, -2, -3, -4]], dtype='float32')
}
def get_output_shape(i):
if i == 3:
return [1, 1, 2046, 2046]
elif i == 5:
return [1, 1, 2044, 2044]
elif i == 7:
return [1, 1, 2042, 2042]
elif i == 9:
return [1, 1, 2040, 2040]
def main():
for i in range(3, 10, 2):
# Filter
w = sobel[i].reshape((1, 1, i, i))
# Input
x = np.random.rand(1, 1, 2048, 2048).astype('float32')
# Initializer of the weight
initializer_w = numpy_helper.from_array(w, 'w')
tensor_w = onnx.helper.make_tensor_value_info('w', onnx.TensorProto.FLOAT, [1, 1, i, i])
tensor_x = onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, [1, 1, 2048, 2048])
tensor_y = onnx.helper.make_tensor_value_info('y', onnx.TensorProto.FLOAT, get_output_shape(i))
# Create a node
node_def = onnx.helper.make_node(
'Conv',
inputs=['x', 'w'],
outputs=['y'],
kernel_shape=[i, i]
)
# Create the graph
graph_def = onnx.helper.make_graph(
[node_def],
f'conv_{i}x{i}',
[tensor_x],
[tensor_y],
[initializer_w]
)
# Create the model
model_def = onnx.helper.make_model(graph_def,
producer_name='python_script',
ir_version=6
)
model_def.opset_import[0].version = 10
# Check the model
onnx.checker.check_model(model_def)
# Save the model
onnx.save(model_def, f'conv_{i}x{i}.onnx')
if __name__ == "__main__":
main()
|
[
"onnx.helper.make_node",
"onnx.numpy_helper.from_array",
"onnx.save",
"onnx.helper.make_model",
"onnx.helper.make_tensor_value_info",
"numpy.array",
"numpy.random.rand",
"onnx.checker.check_model",
"onnx.helper.make_graph"
] |
[((86, 149), 'numpy.array', 'np.array', (['[[1, 0, -1], [2, 0, -2], [1, 0, -1]]'], {'dtype': '"""float32"""'}), "([[1, 0, -1], [2, 0, -2], [1, 0, -1]], dtype='float32')\n", (94, 149), True, 'import numpy as np\n'), ((164, 290), 'numpy.array', 'np.array', (['[[2, 1, 0, -1, -2], [3, 2, 0, -2, -3], [4, 3, 0, -3, -4], [3, 2, 0, -2, -3],\n [2, 1, 0, -1, -2]]'], {'dtype': '"""float32"""'}), "([[2, 1, 0, -1, -2], [3, 2, 0, -2, -3], [4, 3, 0, -3, -4], [3, 2, 0,\n -2, -3], [2, 1, 0, -1, -2]], dtype='float32')\n", (172, 290), True, 'import numpy as np\n'), ((309, 527), 'numpy.array', 'np.array', (['[[3, 2, 1, 0, -1, -2, -3], [4, 3, 2, 0, -2, -3, -4], [5, 4, 3, 0, -3, -4, -\n 5], [6, 5, 4, 0, -4, -5, -6], [5, 4, 3, 0, -3, -4, -5], [4, 3, 2, 0, -2,\n -3, -4], [3, 2, 1, 0, -1, -2, -3]]'], {'dtype': '"""float32"""'}), "([[3, 2, 1, 0, -1, -2, -3], [4, 3, 2, 0, -2, -3, -4], [5, 4, 3, 0, \n -3, -4, -5], [6, 5, 4, 0, -4, -5, -6], [5, 4, 3, 0, -3, -4, -5], [4, 3,\n 2, 0, -2, -3, -4], [3, 2, 1, 0, -1, -2, -3]], dtype='float32')\n", (317, 527), True, 'import numpy as np\n'), ((549, 889), 'numpy.array', 'np.array', (['[[4, 3, 2, 1, 0, -1, -2, -3, -4], [5, 4, 3, 2, 0, -2, -3, -4, -5], [6, 5, 4,\n 3, 0, -3, -4, -5, -6], [7, 6, 5, 4, 0, -4, -5, -6, -7], [8, 7, 6, 5, 0,\n -5, -6, -7, -8], [7, 6, 5, 4, 0, -4, -5, -6, -7], [6, 5, 4, 3, 0, -3, -\n 4, -5, -6], [5, 4, 3, 2, 0, -2, -3, -4, -5], [4, 3, 2, 1, 0, -1, -2, -3,\n -4]]'], {'dtype': '"""float32"""'}), "([[4, 3, 2, 1, 0, -1, -2, -3, -4], [5, 4, 3, 2, 0, -2, -3, -4, -5],\n [6, 5, 4, 3, 0, -3, -4, -5, -6], [7, 6, 5, 4, 0, -4, -5, -6, -7], [8, 7,\n 6, 5, 0, -5, -6, -7, -8], [7, 6, 5, 4, 0, -4, -5, -6, -7], [6, 5, 4, 3,\n 0, -3, -4, -5, -6], [5, 4, 3, 2, 0, -2, -3, -4, -5], [4, 3, 2, 1, 0, -1,\n -2, -3, -4]], dtype='float32')\n", (557, 889), True, 'import numpy as np\n'), ((1330, 1361), 'onnx.numpy_helper.from_array', 'numpy_helper.from_array', (['w', '"""w"""'], {}), "(w, 'w')\n", (1353, 1361), False, 'from onnx import numpy_helper\n'), ((1378, 1455), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""w"""', 'onnx.TensorProto.FLOAT', '[1, 1, i, i]'], {}), "('w', onnx.TensorProto.FLOAT, [1, 1, i, i])\n", (1412, 1455), False, 'import onnx\n'), ((1471, 1558), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""x"""', 'onnx.TensorProto.FLOAT', '[1, 1, 2048, 2048]'], {}), "('x', onnx.TensorProto.FLOAT, [1, 1, 2048,\n 2048])\n", (1505, 1558), False, 'import onnx\n'), ((1691, 1779), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Conv"""'], {'inputs': "['x', 'w']", 'outputs': "['y']", 'kernel_shape': '[i, i]'}), "('Conv', inputs=['x', 'w'], outputs=['y'],\n kernel_shape=[i, i])\n", (1712, 1779), False, 'import onnx\n'), ((1846, 1942), 'onnx.helper.make_graph', 'onnx.helper.make_graph', (['[node_def]', 'f"""conv_{i}x{i}"""', '[tensor_x]', '[tensor_y]', '[initializer_w]'], {}), "([node_def], f'conv_{i}x{i}', [tensor_x], [tensor_y],\n [initializer_w])\n", (1868, 1942), False, 'import onnx\n'), ((2015, 2093), 'onnx.helper.make_model', 'onnx.helper.make_model', (['graph_def'], {'producer_name': '"""python_script"""', 'ir_version': '(6)'}), "(graph_def, producer_name='python_script', ir_version=6)\n", (2037, 2093), False, 'import onnx\n'), ((2181, 2216), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model_def'], {}), '(model_def)\n', (2205, 2216), False, 'import onnx\n'), ((2243, 2285), 'onnx.save', 'onnx.save', (['model_def', 'f"""conv_{i}x{i}.onnx"""'], {}), "(model_def, f'conv_{i}x{i}.onnx')\n", (2252, 2285), False, 'import onnx\n'), ((1226, 1258), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)', '(2048)', '(2048)'], {}), '(1, 1, 2048, 2048)\n', (1240, 1258), True, 'import numpy as np\n')]
|
from django.urls import path, include
from . import views
from about_info import views as about_views
urlpatterns = [
path('', views.home, name = 'Home-Landing'),
path(r'about-me/', include('about_info.urls')),
path('admin/', views.admin404, name = 'Admin404'),
path('access-denied/', views.access_denied, name = 'AccessDenied'),
path('edit/into-sec/', views.Edit_Intro, name = 'Intro-Edit'),
path('edit/explore-cards/<int:pk>/', views.Edit_cards.as_view(), name = 'Card-Edit'),
path('explore-cards/add-new/card/', views.Add_card.as_view(), name = 'Card-Add'),
path('edit/general-status/', views.Edit_general_status, name = 'State-Edit'),
path('edit/single-skill/<int:pk>/', views.Edit_skills.as_view(), name = 'Skills-Edit'),
path('skill-set/add-new/skill/', views.Add_skills.as_view(), name = 'Skills-Add'),
]
|
[
"django.urls.path",
"django.urls.include"
] |
[((127, 168), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""Home-Landing"""'}), "('', views.home, name='Home-Landing')\n", (131, 168), False, 'from django.urls import path, include\n'), ((233, 280), 'django.urls.path', 'path', (['"""admin/"""', 'views.admin404'], {'name': '"""Admin404"""'}), "('admin/', views.admin404, name='Admin404')\n", (237, 280), False, 'from django.urls import path, include\n'), ((288, 352), 'django.urls.path', 'path', (['"""access-denied/"""', 'views.access_denied'], {'name': '"""AccessDenied"""'}), "('access-denied/', views.access_denied, name='AccessDenied')\n", (292, 352), False, 'from django.urls import path, include\n'), ((361, 420), 'django.urls.path', 'path', (['"""edit/into-sec/"""', 'views.Edit_Intro'], {'name': '"""Intro-Edit"""'}), "('edit/into-sec/', views.Edit_Intro, name='Intro-Edit')\n", (365, 420), False, 'from django.urls import path, include\n'), ((606, 680), 'django.urls.path', 'path', (['"""edit/general-status/"""', 'views.Edit_general_status'], {'name': '"""State-Edit"""'}), "('edit/general-status/', views.Edit_general_status, name='State-Edit')\n", (610, 680), False, 'from django.urls import path, include\n'), ((195, 221), 'django.urls.include', 'include', (['"""about_info.urls"""'], {}), "('about_info.urls')\n", (202, 221), False, 'from django.urls import path, include\n')]
|
import torch
import cv2
import os
import glob
from torch.utils.data import Dataset
import random
class ISBI_Loader(Dataset):
def __init__(self, data_path):
# 初始化函数,读取所有data_path下的图片
self.data_path = data_path
self.imgs_path = glob.glob(os.path.join(data_path, 'image/*.png'))
def augment(self, image, flipCode):
# 使用cv2.flip进行数据增强,filpCode为1水平翻转,0垂直翻转,-1水平+垂直翻转
flip = cv2.flip(image, flipCode)
return flip
def __getitem__(self, index):
# 根据index读取图片
image_path = self.imgs_path[index]
# 根据image_path生成label_path
label_path = image_path.replace('image', 'label').split('.')[0]+"_mask.png"
# 读取训练图片和标签图片
image = cv2.imread(image_path, 0)
label = cv2.imread(label_path, 0)
# 将数据转为单通道的图片
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# label = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY)
image = image.reshape(1, image.shape[0], image.shape[1])
label = label.reshape(1, label.shape[0], label.shape[1])
# 处理标签,将像素值为255的改为1
if label.max() > 1:
label = label / 255
# 随机进行数据增强,为2时不做处理
flipCode = random.choice([-1, 0, 1, 2])
if flipCode != 2:
image = self.augment(image, flipCode)
label = self.augment(label, flipCode)
return image, label
def __len__(self):
# 返回训练集大小
return len(self.imgs_path)
# import torch.utils.data as data
# import PIL.Image as Image
# from sklearn.model_selection import train_test_split
# import os
# import random
# import numpy as np
# from skimage.io import imread
# import cv2
# from glob import glob
# import imageio
# import torch
# from torchvision.transforms import transforms
# class esophagusDataset(data.Dataset):
# def __init__(self, state, transform=None, target_transform=None):
# self.state = state
# self.train_root = "/data/wangkun/data_sta_all/train_data"
# self.val_root = "/data/wangkun/data_sta_all/test_data"
# self.test_root = self.val_root
# self.pics,self.masks = self.getDataPath()
# self.transform = transform
# self.target_transform = target_transform
# def getDataPath(self):
# assert self.state =='train' or self.state == 'val' or self.state == 'test'
# if self.state == 'train':
# root = self.train_root
# if self.state == 'val':
# root = self.val_root
# if self.state == 'test':
# root = self.test_root
# pics = []
# masks = []
# n = len(os.listdir(root)) // 2 # 因为数据集中一套训练数据包含有训练图和mask图,所以要除2
# for i in range(n):
# img = os.path.join(root, "%05d.png" % i) # liver is %03d
# mask = os.path.join(root, "%05d_mask.png" % i)
# pics.append(img)
# masks.append(mask)
# #imgs.append((img, mask))
# return pics,masks
# def __getitem__(self, index):
# #x_path, y_path = self.imgs[index]
# x_path = self.pics[index]
# y_path = self.masks[index]
# # origin_x = Image.open(x_path)
# # origin_y = Image.open(y_path)
# origin_x = cv2.imread(x_path)
# origin_y = cv2.imread(y_path,cv2.COLOR_BGR2GRAY)
# if self.transform is not None:
# img_x = self.transform(origin_x)
# if self.target_transform is not None:
# img_y = self.target_transform(origin_y)
# return img_x, img_y,x_path,y_path
# def __len__(self):
# return len(self.pics)
if __name__ == "__main__":
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# x_transforms = transforms.Compose([
# transforms.ToTensor(), # -> [0,1]
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) # ->[-1,1]
# ])
# # mask只需要转换为tensor
# y_transforms = transforms.ToTensor()
# train_dataset = esophagusDataset( r'train', transform=x_transforms, target_transform=y_transforms)
# print("数据个数:", len(train_dataset))
# train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
# batch_size=4,
# shuffle=True)
if __name__ == "__main__":
isbi_dataset = ISBI_Loader("/data/wangkun/dataset_96/train_96")
print("数据个数:", len(isbi_dataset))
train_loader = torch.utils.data.DataLoader(dataset=isbi_dataset,
batch_size=4,
shuffle=True)
for image, label in train_loader:
print(image.shape)
|
[
"torch.utils.data.DataLoader",
"random.choice",
"cv2.imread",
"cv2.flip",
"os.path.join"
] |
[((4466, 4543), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'isbi_dataset', 'batch_size': '(4)', 'shuffle': '(True)'}), '(dataset=isbi_dataset, batch_size=4, shuffle=True)\n', (4493, 4543), False, 'import torch\n'), ((419, 444), 'cv2.flip', 'cv2.flip', (['image', 'flipCode'], {}), '(image, flipCode)\n', (427, 444), False, 'import cv2\n'), ((739, 764), 'cv2.imread', 'cv2.imread', (['image_path', '(0)'], {}), '(image_path, 0)\n', (749, 764), False, 'import cv2\n'), ((781, 806), 'cv2.imread', 'cv2.imread', (['label_path', '(0)'], {}), '(label_path, 0)\n', (791, 806), False, 'import cv2\n'), ((1216, 1244), 'random.choice', 'random.choice', (['[-1, 0, 1, 2]'], {}), '([-1, 0, 1, 2])\n', (1229, 1244), False, 'import random\n'), ((265, 303), 'os.path.join', 'os.path.join', (['data_path', '"""image/*.png"""'], {}), "(data_path, 'image/*.png')\n", (277, 303), False, 'import os\n')]
|
#!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import glob
import json
from multiprocessing import Pool
import os
import os.path
import shutil
import subprocess
import sys
import time
from build.chronometer import Chronometer
CLEAN_SLATE = "-keep-out" not in sys.argv
BUILD_CORE = "-keep-kernel" not in sys.argv
BUILD_USERSPACE = "-keep-user" not in sys.argv
BUILD_NEWLIB = "-build-newlib" in sys.argv
APPS_TO_REBUILD = []
for arg in sys.argv:
if arg.startswith("-rebuild-app="):
APPS_TO_REBUILD.append(arg.replace("-rebuild-app=", ""))
APPS_TO_SKIP = []
for arg in sys.argv:
if arg.startswith("-skip-app="):
APPS_TO_SKIP.append(arg.replace("-skip-app=", ""))
if len(APPS_TO_REBUILD) > 0:
CLEAN_SLATE = False
BUILD_CORE = False
if (not BUILD_USERSPACE) or (not BUILD_CORE):
CLEAN_SLATE = False
MYPATH = os.path.abspath(os.getcwd())
if BUILD_NEWLIB and not BUILD_CORE:
# TODO: separate building the kernel and the core libraries
print("Rebuilding core system components because newlib is being built")
BUILD_CORE = True
if BUILD_CORE and BUILD_USERSPACE:
print("Build type: Full; OS path: %s" % MYPATH)
elif BUILD_CORE:
print("Build type: Core; OS path: %s" % MYPATH)
elif BUILD_USERSPACE:
print("Build type: Userland; OS path: %s" % MYPATH)
def shell(command, shell=True, stdin=None, printout=False, onerrignore=False, curdir=None):
if printout or VERBOSE: print("$ %s" % command)
try:
stdout = subprocess.check_output(command, stdin=stdin, stderr=subprocess.STDOUT, shell=shell, cwd=curdir)
o = stdout.decode('utf-8')
if printout or VERBOSE: print(o)
return o
except subprocess.CalledProcessError as e:
print("$ %s" % command)
print(e.output.decode('utf-8'))
if onerrignore:
print("shell command failed")
else:
error("shell command failed")
VERBOSE = "-v" in sys.argv
BUILD_START = time.time()
def buildSignature():
if not hasattr(buildSignature, 'signature'):
H = shell('git rev-parse HEAD')
B = shell('git rev-parse --abbrev-ref HEAD')
H = H.replace('\n', '')
B = B.replace('\n', '')
buildSignature.signature = '%s/%s' % (B,H)
return buildSignature.signature
def buildVersion():
if not hasattr(buildVersion, 'version'):
H = shell('git rev-parse --short HEAD')
H = '0x%s' % H.replace('\n', '')
buildVersion.version = H
return buildVersion.version
C_OPTIONS = [
'-D__build_signature_=\"\\"%s\\"\"' % (buildSignature()),
'-D__build_version_=0x%sLLU' % (buildVersion()),
'-D__puppy__',
'-fdiagnostics-color=always',
'-ffreestanding',
'-fno-builtin',
'-fno-exceptions',
'-fno-omit-frame-pointer',
'-fno-stack-protector',
'-funsigned-char',
'-m32',
'-march=i686',
'-nostdlib',
'-O2',
'-Wall',
'-Wextra']
BASIC_CFLAGS = [
'-masm=intel',
'-Werror',
'-Wno-error=format',
'-Wno-missing-field-initializers',
'-nodefaultlibs',
'-nostartfiles',
'-c'] + C_OPTIONS
BASIC_CPPFLAGS = [
'-fno-exceptions',
'-fno-rtti',
'-std=c++14']
BASIC_ASFLAGS = ["-f elf"]
BASIC_LDFLAGS = ["-ffreestanding", "-nostdlib"]
USERSPACE_CFLAGS = ["-c"]
USERSPACE_CPPFLAGS = [""]
USERSPACE_ASFLAGS = ["-f elf"]
USERSPACE_LDFLAGS = [""]
def forEachFile(path, f):
for fld,x,lst in os.walk(path):
for nm in lst:
rp = os.path.join(fld,nm)
f(rp)
def calculateSize(path):
totalSize = 0
def callback(flpt):
nonlocal totalSize
totalSize = totalSize + os.stat(flpt).st_size
forEachFile(path, callback)
return totalSize
def findSubdirectories(dir, self=True):
if self:
yield dir
for subdir in os.listdir(dir):
candidate = os.path.join(dir, subdir)
if os.path.isdir(candidate):
yield candidate
def error(msg):
print("error: %s" % msg)
raise SystemError # force the subprocesses to exit as brutally as possible
def findAll(base, extension):
def _find(dir, extension=None):
if extension:
if extension[0] != '.':
extension = '.%s' % extension
for root, dirs, files in os.walk(dir):
for file in files:
if extension is None or file.endswith(extension):
yield os.path.join(root, file)
L = set()
for subdir in findSubdirectories(base):
for f in _find(subdir, extension):
L.add(f)
return L
def makeOutputFilename(src, prefix=None):
if prefix is not None: src = "%s_%s" % (prefix, src)
return os.path.join("out", src.replace("src/","").replace("/","_").replace(".","_")) + ".o"
def copy(src, dst):
cmdline = 'cp "%s" "%s"' % (src, dst)
shell(cmdline)
return dst
def xcopy(src, dst):
cmdline = 'cp %s "%s"' % (src, dst)
shell(cmdline)
return dst
def rcopy(src, dst):
cmdline = 'cp -Lr "%s" "%s"' % (src, dst)
shell(cmdline)
return dst
def read(src):
with open(src, 'r') as f:
return f.read()
def write(dst, txt):
with open(dst, 'w') as f:
f.write(txt)
def buildAsm(inp, flags=BASIC_ASFLAGS, assembler='nasm'):
out = makeOutputFilename(inp)
cmdline = '%s %s %s -o %s' % (assembler, flags, inp, out)
shell(cmdline)
return out
def buildCpp(inp, flags=BASIC_CPPFLAGS, compiler="i686-elf-gcc"):
out = makeOutputFilename(inp)
cmdline = "%s %s %s -o %s" % (compiler, flags, inp, out)
shell(cmdline)
return out
def buildC(inp, flags=BASIC_CFLAGS, compiler="i686-elf-gcc"):
out = makeOutputFilename(inp)
cmdline = "%s %s %s -o %s" % (compiler, flags, inp, out)
shell(cmdline)
return out
def linkGcc(files, out, flags=BASIC_LDFLAGS, linker="i686-elf-gcc"):
CMDLINE = "%s %s %s -o %s -lgcc" % (linker, flags, ' '.join(files), out)
shell(CMDLINE)
return out
def clearDir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def makeDir(path):
if not os.path.isdir(path):
cmdline = 'mkdir "%s"' % path
shell(cmdline)
return path
class _BuildC(object):
def __init__(self, gcc, flags):
self.gcc = gcc
self.flags = flags
def __call__(self, x):
return buildC(x, compiler=self.gcc, flags=self.flags)
class _BuildCpp(object):
def __init__(self, gcc, flags):
self.gcc = gcc
self.flags = flags
def __call__(self, x):
return buildCpp(x, compiler=self.gcc, flags=self.flags)
# do not move the definition of the THE_POOL above here; because of how multiprocessing works
# all the things that we want the pooled processes to see and use must be defined before the
# pool itself is defined..
THE_POOL = Pool(5)
class Project(object):
def __init__(self, name, srcdir, cflags=None, cppflags=None, asmflags=None, ldflags=None, ipaths=None, assembler="nasm", linkerdeps=None, outwhere="out", gcc="i686-elf-gcc", announce=True):
self.name = name
self.srcdir = srcdir
self.cflags = ' '.join(cflags if cflags else BASIC_CFLAGS)
self.cppflags = ' '.join(cppflags if cppflags else (BASIC_CFLAGS + BASIC_CPPFLAGS))
self.asmflags = ' '.join(asmflags if asmflags else BASIC_ASFLAGS)
self.ldflags = ' '.join(ldflags if ldflags else BASIC_LDFLAGS)
self.assembler = assembler
self.ipaths = ipaths if ipaths is not None else ["include"]
self.linkerdeps = linkerdeps if linkerdeps else []
self.outwhere = outwhere
self.announce = announce
self.cflags = self.cflags + " %s " % ' '.join([" -I%s " % x for x in self.ipaths])
self.cppflags = self.cppflags + " %s " % ' '.join([" -I%s " % x for x in self.ipaths])
self.gcc = gcc if gcc else "i686-elf-gcc"
def findCFiles(self):
return findAll(self.srcdir, "c")
def findCPPFiles(self):
return findAll(self.srcdir, "cpp")
def findSFiles(self):
return findAll(self.srcdir, "s")
def buildCFiles(self):
return THE_POOL.map(_BuildC(self.gcc, self.cflags), self.findCFiles())
def buildCPPFiles(self):
return THE_POOL.map(_BuildCpp(self.gcc, self.cppflags), self.findCPPFiles())
def buildSFiles(self):
S_OUTPUT = []
for inp in self.findSFiles():
S_OUTPUT.append(buildAsm(inp, flags=self.asmflags, assembler=self.assembler))
return S_OUTPUT
def compile(self):
return self.buildCFiles() + self.buildCPPFiles() + self.buildSFiles()
def linkAr(self, out):
destfile = "%s/lib%s.a" % (self.outwhere, self.name.lower())
shell("i686-elf-ar rcs %s %s" % (destfile, ' '.join(out)))
return destfile
def linkGcc(self, out):
destfile = "%s/%s" % (self.outwhere, self.name.lower())
out = out + self.linkerdeps
linkGcc(linker=self.gcc, files=out, flags=self.ldflags, out=destfile)
return destfile
def linkDylib(self, out):
destfile = "%s/%s" % (self.outwhere, self.name.lower())
shell("i686-elf-ld -shared %s -o %s" % (' '.join(out), destfile))
return destfile
def linkCopy(self, out):
if len(out) != 1:
raise ValueError("linkCopy can only handle a single file: %s" % out)
out = out[0]
destfile = "%s/%s" % (self.outwhere, self.name.lower())
copy(out, destfile)
return destfile
def linkScript(self, out):
destfile = self.name.lower()
shell("%s %s %s" % (self.linker, destfile, ' '.join(out)))
return destfile
def link(self, out):
pass
def hasMakefile(self):
return os.path.exists(os.path.join(self.srcdir, "Makefile"))
def getMakefileEnvironment(self):
env = {
"V" : "1",
"PUPPY_ROOT" : MYPATH,
"OUTWHERE" : self.outwhere,
"CC" : MY_CC_PATH,
"CXX" : MY_CXX_PATH,
"TGTNAME" : self.name
}
return env
def build(self):
with Chronometer("Compiling %s" % self.name if self.announce else None):
if not self.hasMakefile():
return self.link(self.compile())
else:
guessname = os.path.basename(self.srcdir)
env = self.getMakefileEnvironment()
env_string = ' '.join(['%s=%s' % (a,b) for (a,b) in env.items()])
shell("make %s -j" % (env_string), curdir=self.srcdir)
return os.path.join(self.outwhere, guessname)
class UserspaceTool(Project):
def linkAndStrip(self, out):
target = self.linkGcc(out)
CMDLINE="strip %s" % target
shell(CMDLINE)
return target
def __init__(self, name, srcdir, cflags=None, cppflags=None, outwhere="out/apps", linkerdeps=[], announce=False):
cflags = USERSPACE_CFLAGS + (cflags if cflags else [])
cppflags = USERSPACE_CFLAGS + USERSPACE_CPPFLAGS + (cppflags if cppflags else [])
ipaths=[]
ldflags = USERSPACE_LDFLAGS
ldeps=[""]
gcc="build/gcc.sh"
ldeps = linkerdeps + ldeps
Project.__init__(self,
name=name,
srcdir=srcdir,
cflags=cflags,
cppflags=cppflags,
ldflags=ldflags,
ipaths=ipaths,
assembler="nasm",
linkerdeps=ldeps,
outwhere=outwhere,
gcc=gcc,
announce=announce)
self.link = self.linkAndStrip
def parseSymbolTable(symf):
symtab = {}
with open(symf, "r") as f:
while True:
ln = f.readline()
if ln is None or len(ln) == 0: break
parts = ln[:-1].split(' ')
key = parts[1]
val = int('0x' + parts[0], 16)
symtab[key] = val
return symtab
def writeSpecsFile(outfile):
with open(outfile, "w") as f:
print("*cpp_unique_options:", file=f)
specs_include_paths = [os.path.abspath(x) for x in SPECS_INCLUDE_PATHS]
specs_include_paths = ["-I%s" % x for x in specs_include_paths]
specs_include_paths = ' '.join(specs_include_paths)
print("+ -D__puppy__ %s" % specs_include_paths, file=f)
print("", file=f)
print("*cc1plus:", file=f)
cc1plus_options = ' '.join(C_OPTIONS) + ' ' + ' '.join(BASIC_CPPFLAGS)
print(" %s" % cc1plus_options, file=f)
print("", file=f)
print("*cc1_options:", file=f)
print(" %s" % specs_include_paths, file=f)
print("", file=f)
print("*startfile:", file=f)
print(" %s" % os.path.abspath(NEWLIB_CRT0), file=f)
print("", file=f)
print("*lib:", file=f)
libs = [os.path.abspath(x) for x in NEWLIB_ARS]
libs = ' '.join(libs)
print(" %s" % libs, file=f)
print("", file=f)
print("*link:", file=f)
print(" -T %s -e_start -L %s" % (os.path.abspath(USERSPACE_LD_SCRIPT),
os.path.join(MYPATH, "out", "mnt", "libs")), file=f)
def prepareDiskImages(file, sysPartitionMBs = 48,
userPartitionMBs = 64,
reservedSectors = 2047):
rootFile = file
sysPartitionFile = "%s.sys" % file
userPartitionFile = "%s.usr" % file
headerFile = "%s.boot" % file
CMDLINE="dd if=/dev/zero of=%s bs=%s count=%s" % (headerFile, 1024*1024, 1)
shell(CMDLINE)
CMDLINE="dd if=/dev/zero of=%s bs=%s count=%s" % (sysPartitionFile, 1024*1024, sysPartitionMBs)
shell(CMDLINE)
CMDLINE="mkfs.fat -F32 %s -i 55AABB66" % (sysPartitionFile)
shell(CMDLINE)
CMDLINE="dd if=/dev/zero of=%s bs=%s count=%s" % (userPartitionFile, 1024*1024, userPartitionMBs)
shell(CMDLINE)
CMDLINE="mkfs.fat -F32 %s -i A0B0C0D0" % (userPartitionFile)
shell(CMDLINE)
partitions = [
{'bootable' : 'yes',
'lba' : reservedSectors + 1,
'type' : 0xc,
'size' : sysPartitionMBs * 1024*1024},
{'type' : 0xc,
'size' : userPartitionMBs * 1024*1024}
]
with open("out/systemdsk.json", "w") as f:
json.dump(partitions, f)
CMDLINE="build/imgptable.py out/systemdsk.json out/bootsect.0"
shell(CMDLINE)
# copy the full boot sector from the partition table tool, but then overwrite the rest of it
CMDLINE="dd if=out/bootsect.0 conv=notrunc bs=1 count=512 of=%s" % (headerFile)
shell(CMDLINE)
CMDLINE="dd if=build/bootsect.0 conv=notrunc bs=1 count=446 of=%s" % (headerFile)
shell(CMDLINE)
CMDLINE="dd if=build/bootsect.0 conv=notrunc ibs=1 obs=1 seek=510 skip=510 count=2 of=%s" % (headerFile)
shell(CMDLINE)
CMDLINE="grub-mkimage -c build/earlygrub.cfg -O i386-pc -o out/boot.ldr -p /boot/grub part_msdos biosdisk fat multiboot configfile"
shell(CMDLINE)
CMDLINE="dd if=out/boot.ldr bs=512 seek=1 of=%s conv=notrunc" % (headerFile)
shell(CMDLINE)
return (rootFile, headerFile, sysPartitionFile, userPartitionFile)
def buildUserlandComponent(name, sourceDir, outWhere, beforeBuild=None, afterBuild=None, cflags=None, cppflags=None, linkerdeps=[]):
component = UserspaceTool(name = name,
srcdir = sourceDir,
outwhere = outWhere,
cflags=cflags,
cppflags=cppflags,
linkerdeps=linkerdeps)
if beforeBuild: beforeBuild(component)
print(component.name, end='', flush=True)
cout = component.build()
if afterBuild: afterBuild(component, cout)
print(' ', end='', flush=True)
def expandNewlibDeps(deps):
out = []
for dep in deps:
if dep == "${NEWLIB}":
out += NEWLIB_DEPS
else:
out += [dep]
return out
def expandNewlibIncludes(ipaths):
out = []
for ipath in ipaths:
if ipath == "${NEWLIB}":
out += ["include", "include/newlib", "include/stl"]
else:
out += [ipath]
return out
NEWLIB_CRT0 = "out/mnt/libs/crt0.o"
NEWLIB_ARS = ["out/mnt/libs/libeastl.a",
"out/mnt/libs/libcxxsupport.a",
"out/mnt/libs/libpcre2-posix.a",
"out/mnt/libs/libpcre2-8.a",
"out/mnt/libs/libm.a",
"out/mnt/libs/libc.a"]
NEWLIB_DEPS = [NEWLIB_CRT0] + NEWLIB_ARS
SPECS_INCLUDE_PATHS = ["out/mnt/include", "out/mnt/include/newlib", "out/mnt/include/stl"]
USERSPACE_LD_SCRIPT = "out/mnt/libs/app.ld"
GCC_SPECS_PATH = "out/mnt/libs/gcc.specs"
MY_CC_PATH = os.path.join(MYPATH, "build", "gcc.sh")
MY_CXX_PATH = os.path.join(MYPATH, "build", "g++.sh")
LIBGCC_FILE = shell("i686-elf-gcc -print-libgcc-file-name").rstrip()
IMG_FILE = "out/os.img"
if BUILD_NEWLIB:
with Chronometer("Building Newlib"):
CMDLINE="build/makenewlib.sh"
shell(CMDLINE)
if CLEAN_SLATE:
clearDir("out")
clearDir("out/mnt")
if BUILD_CORE:
ROOT_DISK, BOOT_DISK, SYS_DISK, USER_DISK = prepareDiskImages(IMG_FILE)
print("OS disk image parts: %s %s %s, which will be combined to produce %s" % (BOOT_DISK, SYS_DISK, USER_DISK, ROOT_DISK))
else:
ROOT_DISK = IMG_FILE
BOOT_DISK = "%s.boot" % IMG_FILE
SYS_DISK = "%s.sys" % IMG_FILE
USER_DISK = "%s.usr" % IMG_FILE
if BUILD_CORE:
makeDir("out/mnt/apps")
makeDir("out/mnt/libs")
makeDir("out/mnt/tests")
makeDir("out/mnt/include")
makeDir("out/mnt/boot")
makeDir("out/mnt/config")
CORE_PROJECTS = []
for core_project in json.loads(open("build/core.json", "r").read()):
assembler = core_project.get('assembler', 'nasm')
cflags = core_project.get('cflags', BASIC_CFLAGS)
cflags = cflags + core_project.get('cflagsAgument', [])
cppflags = core_project.get('cppflags', BASIC_CFLAGS + BASIC_CPPFLAGS)
cppflags = cppflags + core_project.get('cppflagsAgument', [])
asmflags = core_project.get('asmflags', BASIC_ASFLAGS)
asmflags = asmflags + core_project.get('asmflagsAugment', [])
ldflags = core_project.get('ldflags', BASIC_LDFLAGS)
ldflags = ldflags + core_project.get('ldflagsAugment', [])
ipaths = core_project.get('includePaths', ["include"])
ipaths = expandNewlibIncludes(ipaths)
linkerdeps = core_project.get('linkerDependencies', [])
linkerdeps = expandNewlibDeps(linkerdeps)
project = Project(name = core_project.get('name'),
srcdir = core_project.get('src'),
assembler = assembler,
cflags = cflags,
cppflags = cppflags,
asmflags = asmflags,
ldflags = ldflags,
ipaths = ipaths,
linkerdeps = linkerdeps)
linklogic = core_project.get('linkLogic', 'ar')
if linklogic == 'ar':
project.link = project.linkAr
elif linklogic == 'copy':
project.link = project.linkCopy
elif linklogic == 'gcc':
project.link = project.linkGcc
elif linklogic == 'script':
project.link = project.linkScript
project.linker = "%s/%s" % (project.srcdir, "linker.py")
CORE_PROJECTS.append(project)
for project in CORE_PROJECTS:
project.build()
with Chronometer("Copyings headers and core libraries"):
rcopy("include", "out/mnt")
xcopy("third_party/pcre2-10.32/libs/lib*.a", "out/mnt/libs")
xcopy("out/lib*.a", "out/mnt/libs")
copy("newlib/lib/libm.a", "out/mnt/libs")
copy("newlib/lib/libg.a", "out/mnt/libs")
copy("newlib/lib/libnosys.a", "out/mnt/libs")
rcopy("python", "out/mnt/libs")
copy(LIBGCC_FILE, "out/mnt/libs")
copy("out/newlibcrt0", NEWLIB_CRT0)
copy("build/app.ld", "out/mnt/libs")
print("newlib dependency list: %s" % ', '.join(NEWLIB_DEPS))
with Chronometer("Generating GCC specs"):
writeSpecsFile(GCC_SPECS_PATH)
with Chronometer("Generating kernel symbol table"):
CMDLINE = "nm out/kernel | grep -e ' [BbDdGgSsTtRr] ' | awk '{ print $1 \" \" $3 }' > out/kernel.sym"
shell(CMDLINE)
symtab = parseSymbolTable("out/kernel.sym")
kernel_end = symtab["__kernel_end"]
kernel_start = symtab["__kernel_start"]
print("Kernel runtime size: %u bytes" % (kernel_end - kernel_start))
with Chronometer("Copying configuration data"):
rcopy("config", "out/mnt")
copy("LICENSE", "out/mnt/config/LICENSE")
anydiff = "0" != shell('git diff HEAD | wc -c | sed "s/ //g"').replace('\n', '')
sysinfo = read('config/sysinfo')
sysinfo = sysinfo.replace("${NOW}", datetime.now().__str__())
sysinfo = sysinfo.replace("${GIT-HASH}", shell("git rev-parse HEAD").replace('\n', ''))
sysinfo = sysinfo.replace("${ANY-DIFF}", "Local diff applied" if anydiff else "No diff applied")
sysinfo = sysinfo.replace("${GCC-VERSION}", shell("i686-elf-gcc --version").replace('\n', ''))
sysinfo = sysinfo.replace("${NASM-VERSION}", shell("nasm -v").replace('\n', ''))
sysinfo = sysinfo.replace("${OS-VERSION}", str(int(buildVersion(), 16)))
sysinfo = sysinfo.replace("${OS-SIGNATURE}", buildSignature())
write("out/mnt/config/sysinfo", sysinfo)
if anydiff:
diff_text = shell("git diff")
write("out/mnt/config/local.diff", diff_text)
sig_info = {
"name" : "Puppy",
"version" : int(buildVersion(), 16),
"signature" : buildSignature()
}
with open("out/mnt/config/signature.json", "w") as f:
json.dump(sig_info, f)
# apps can end up in /initrd and/or /apps in the main filesystem
# this table allows one to configure which apps land where (the default
# being /apps in the main filesystem and not /initrd)
APPS_CONFIG = {
"init" : {"initrd": True},
"mount" : {"initrd": True},
"ls" : {"initrd": True},
"halt" : {"initrd": True},
"reboot" : {"initrd": True},
}
INITRD_REFS = [] # apps for initrd
if BUILD_USERSPACE:
with Chronometer("Building apps and tests"):
SLIBS_PRINT_PREFIX="Building static libraries: "
print(SLIBS_PRINT_PREFIX, end='', flush=True)
SLIB_DIRS = findSubdirectories("slibs", self=False)
def markAsStatic(lib):
lib.link = lib.linkAr
for lib in SLIB_DIRS:
lib_name = os.path.basename(lib)
lib_include = "out/mnt/include/lib%s" % lib_name
makeDir(lib_include)
xcopy("%s/include/*" % lib, lib_include)
buildUserlandComponent(lib_name,
lib,
"out/mnt/libs",
beforeBuild = markAsStatic)
print('')
DYLIBS_PRINT_PREFIX="Building dynamic libraries: "
print(DYLIBS_PRINT_PREFIX, end='', flush=True)
DYLIB_DIRS = findSubdirectories("dylibs", self=False)
def markAsDynamic(lib):
lib.link = lib.linkDylib
for lib in DYLIB_DIRS:
buildUserlandComponent(os.path.basename(lib),
lib,
"out/mnt/libs",
beforeBuild = markAsDynamic)
print('')
APPS_PRINT_PREFIX="Building apps: "
print(APPS_PRINT_PREFIX, end='', flush=True)
APP_DIRS = findSubdirectories("apps", self=False)
def needsInitrd(app, app_out):
config = APPS_CONFIG.get(app.name, {"initrd": False})
if config["initrd"]: INITRD_REFS.append(app_out)
for app in APP_DIRS:
bn = os.path.basename(app)
if bn in APPS_TO_SKIP: continue
if len(APPS_TO_REBUILD) == 0 or bn in APPS_TO_REBUILD or bn in APPS_CONFIG:
buildUserlandComponent(bn,
app,
"out/mnt/apps",
afterBuild=needsInitrd)
print('')
TEST_PLAN = []
TEST_PRINT_PREFIX="Building tests: "
print(TEST_PRINT_PREFIX, end='', flush=True)
TEST_DIRS = findSubdirectories("tests", self=False)
def pushToTestPlan(test, test_out):
test_ref = "/system/%s" % (test_out.replace("out/mnt/", ""))
TEST_PLAN.append({
"path" : test_ref,
"id" : test.name,
"wait" : "20" # TODO: allow individual tests to edit this value
})
for test in TEST_DIRS:
test_name = os.path.basename(test)
test_name_define = ' -DTEST_NAME=\\"%s\\" ' % test_name
if test_name in APPS_TO_SKIP: continue
if len(APPS_TO_REBUILD) == 0 or test_name in APPS_TO_REBUILD:
buildUserlandComponent(test_name,
test,
"out/mnt/tests",
afterBuild=pushToTestPlan,
cflags = [test_name_define],
cppflags = [test_name_define],
linkerdeps = ["out/mnt/libs/libcheckup.a"])
# provide a consistent sort order for test execution regardless of underlying FS
TEST_PLAN.sort(key=lambda test: test["id"])
with open("out/testplan.json", "w") as f:
json.dump(TEST_PLAN, f)
with open("out/mnt/tests/runall.sh", "w") as testScript:
print("#!/system/apps/shell", file=testScript)
for test in TEST_PLAN:
print("%s" % test['path'], file=testScript)
print('')
with Chronometer("Configuring bootloader"):
MENU_MODULE_REFS = ["module /boot/initrd.img /initrd"] # add kernel modules here, should any exist
# won't have a new initrd without building userland
if BUILD_USERSPACE:
INITRD_ARGS = ["--file " + x for x in INITRD_REFS]
shell("initrd/gen.py --dest out/mnt/boot/initrd.img %s" % ' '.join(INITRD_ARGS))
print("Size of initrd image: %d bytes" % os.stat("out/mnt/boot/initrd.img").st_size)
rcopy("build/grub", "out/mnt/boot")
copy("out/kernel", "out/mnt/boot/puppy")
menulst = read('build/grub.cfg')
menulst = menulst.replace("${MODULES}", '\n'.join(MENU_MODULE_REFS))
write("out/mnt/boot/grub/grub.cfg", menulst)
with Chronometer("Building final disk image"):
CMDLINE="mcopy -D overwrite -s -i %s out/mnt/* ::/" % (SYS_DISK)
shell(CMDLINE)
PART_USAGE = calculateSize("out/mnt")
CMDLINE="build/concatimg.sh %s %s %s %s" % (ROOT_DISK, BOOT_DISK, SYS_DISK, USER_DISK)
shell(CMDLINE)
print("Size of OS disk image: %10d bytes\n %10d bytes used" % (os.stat(ROOT_DISK).st_size, PART_USAGE))
BUILD_END = time.time()
print("Build took %s seconds" % int(BUILD_END - BUILD_START))
|
[
"json.dump",
"os.path.abspath",
"os.makedirs",
"os.stat",
"os.path.basename",
"os.getcwd",
"os.path.isdir",
"subprocess.check_output",
"os.walk",
"os.path.exists",
"datetime.datetime.now",
"time.time",
"build.chronometer.Chronometer",
"multiprocessing.Pool",
"shutil.rmtree",
"os.path.join",
"os.listdir"
] |
[((2546, 2557), 'time.time', 'time.time', ([], {}), '()\n', (2555, 2557), False, 'import time\n'), ((7400, 7407), 'multiprocessing.Pool', 'Pool', (['(5)'], {}), '(5)\n', (7404, 7407), False, 'from multiprocessing import Pool\n'), ((17383, 17422), 'os.path.join', 'os.path.join', (['MYPATH', '"""build"""', '"""gcc.sh"""'], {}), "(MYPATH, 'build', 'gcc.sh')\n", (17395, 17422), False, 'import os\n'), ((17437, 17476), 'os.path.join', 'os.path.join', (['MYPATH', '"""build"""', '"""g++.sh"""'], {}), "(MYPATH, 'build', 'g++.sh')\n", (17449, 17476), False, 'import os\n'), ((27507, 27518), 'time.time', 'time.time', ([], {}), '()\n', (27516, 27518), False, 'import time\n'), ((1445, 1456), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1454, 1456), False, 'import os\n'), ((4000, 4013), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (4007, 4013), False, 'import os\n'), ((4386, 4401), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (4396, 4401), False, 'import os\n'), ((6561, 6581), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6575, 6581), False, 'import os\n'), ((6615, 6632), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6626, 6632), False, 'import os\n'), ((20088, 20138), 'build.chronometer.Chronometer', 'Chronometer', (['"""Copyings headers and core libraries"""'], {}), "('Copyings headers and core libraries')\n", (20099, 20138), False, 'from build.chronometer import Chronometer\n'), ((20642, 20677), 'build.chronometer.Chronometer', 'Chronometer', (['"""Generating GCC specs"""'], {}), "('Generating GCC specs')\n", (20653, 20677), False, 'from build.chronometer import Chronometer\n'), ((20720, 20765), 'build.chronometer.Chronometer', 'Chronometer', (['"""Generating kernel symbol table"""'], {}), "('Generating kernel symbol table')\n", (20731, 20765), False, 'from build.chronometer import Chronometer\n'), ((21103, 21144), 'build.chronometer.Chronometer', 'Chronometer', (['"""Copying configuration data"""'], {}), "('Copying configuration data')\n", (21114, 21144), False, 'from build.chronometer import Chronometer\n'), ((26363, 26400), 'build.chronometer.Chronometer', 'Chronometer', (['"""Configuring bootloader"""'], {}), "('Configuring bootloader')\n", (26374, 26400), False, 'from build.chronometer import Chronometer\n'), ((27079, 27119), 'build.chronometer.Chronometer', 'Chronometer', (['"""Building final disk image"""'], {}), "('Building final disk image')\n", (27090, 27119), False, 'from build.chronometer import Chronometer\n'), ((2064, 2164), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'stdin': 'stdin', 'stderr': 'subprocess.STDOUT', 'shell': 'shell', 'cwd': 'curdir'}), '(command, stdin=stdin, stderr=subprocess.STDOUT,\n shell=shell, cwd=curdir)\n', (2087, 2164), False, 'import subprocess\n'), ((4423, 4448), 'os.path.join', 'os.path.join', (['dir', 'subdir'], {}), '(dir, subdir)\n', (4435, 4448), False, 'import os\n'), ((4460, 4484), 'os.path.isdir', 'os.path.isdir', (['candidate'], {}), '(candidate)\n', (4473, 4484), False, 'import os\n'), ((4843, 4855), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (4850, 4855), False, 'import os\n'), ((6591, 6610), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (6604, 6610), False, 'import shutil\n'), ((6664, 6683), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6677, 6683), False, 'import os\n'), ((14950, 14974), 'json.dump', 'json.dump', (['partitions', 'f'], {}), '(partitions, f)\n', (14959, 14974), False, 'import json\n'), ((17599, 17629), 'build.chronometer.Chronometer', 'Chronometer', (['"""Building Newlib"""'], {}), "('Building Newlib')\n", (17610, 17629), False, 'from build.chronometer import Chronometer\n'), ((22290, 22312), 'json.dump', 'json.dump', (['sig_info', 'f'], {}), '(sig_info, f)\n', (22299, 22312), False, 'import json\n'), ((22754, 22792), 'build.chronometer.Chronometer', 'Chronometer', (['"""Building apps and tests"""'], {}), "('Building apps and tests')\n", (22765, 22792), False, 'from build.chronometer import Chronometer\n'), ((4055, 4076), 'os.path.join', 'os.path.join', (['fld', 'nm'], {}), '(fld, nm)\n', (4067, 4076), False, 'import os\n'), ((10325, 10362), 'os.path.join', 'os.path.join', (['self.srcdir', '"""Makefile"""'], {}), "(self.srcdir, 'Makefile')\n", (10337, 10362), False, 'import os\n'), ((10679, 10745), 'build.chronometer.Chronometer', 'Chronometer', (["('Compiling %s' % self.name if self.announce else None)"], {}), "('Compiling %s' % self.name if self.announce else None)\n", (10690, 10745), False, 'from build.chronometer import Chronometer\n'), ((12759, 12777), 'os.path.abspath', 'os.path.abspath', (['x'], {}), '(x)\n', (12774, 12777), False, 'import os\n'), ((13514, 13532), 'os.path.abspath', 'os.path.abspath', (['x'], {}), '(x)\n', (13529, 13532), False, 'import os\n'), ((23084, 23105), 'os.path.basename', 'os.path.basename', (['lib'], {}), '(lib)\n', (23100, 23105), False, 'import os\n'), ((24330, 24351), 'os.path.basename', 'os.path.basename', (['app'], {}), '(app)\n', (24346, 24351), False, 'import os\n'), ((25249, 25271), 'os.path.basename', 'os.path.basename', (['test'], {}), '(test)\n', (25265, 25271), False, 'import os\n'), ((26090, 26113), 'json.dump', 'json.dump', (['TEST_PLAN', 'f'], {}), '(TEST_PLAN, f)\n', (26099, 26113), False, 'import json\n'), ((4221, 4234), 'os.stat', 'os.stat', (['flpt'], {}), '(flpt)\n', (4228, 4234), False, 'import os\n'), ((10881, 10910), 'os.path.basename', 'os.path.basename', (['self.srcdir'], {}), '(self.srcdir)\n', (10897, 10910), False, 'import os\n'), ((11139, 11177), 'os.path.join', 'os.path.join', (['self.outwhere', 'guessname'], {}), '(self.outwhere, guessname)\n', (11151, 11177), False, 'import os\n'), ((13403, 13431), 'os.path.abspath', 'os.path.abspath', (['NEWLIB_CRT0'], {}), '(NEWLIB_CRT0)\n', (13418, 13431), False, 'import os\n'), ((21386, 21400), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21398, 21400), False, 'from datetime import datetime\n'), ((23774, 23795), 'os.path.basename', 'os.path.basename', (['lib'], {}), '(lib)\n', (23790, 23795), False, 'import os\n'), ((13725, 13761), 'os.path.abspath', 'os.path.abspath', (['USERSPACE_LD_SCRIPT'], {}), '(USERSPACE_LD_SCRIPT)\n', (13740, 13761), False, 'import os\n'), ((13807, 13849), 'os.path.join', 'os.path.join', (['MYPATH', '"""out"""', '"""mnt"""', '"""libs"""'], {}), "(MYPATH, 'out', 'mnt', 'libs')\n", (13819, 13849), False, 'import os\n'), ((26783, 26817), 'os.stat', 'os.stat', (['"""out/mnt/boot/initrd.img"""'], {}), "('out/mnt/boot/initrd.img')\n", (26790, 26817), False, 'import os\n'), ((27453, 27471), 'os.stat', 'os.stat', (['ROOT_DISK'], {}), '(ROOT_DISK)\n', (27460, 27471), False, 'import os\n'), ((4980, 5004), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (4992, 5004), False, 'import os\n')]
|
import vim
import re
from typing import Union
class PopupOptDict(object):
pass
class PopupPos:
# __valid_keys = ('line', 'col', 'pos', 'posinvert')
def __init__(self,
line : Union[int, str, None] = None,
col : Union[int, str, None] = None,
pos : Union[str, None] = None,
posinvert : Union[bool, None] = None,
):
self.line = line
self.col = col
self.pos = pos
self.posinvert = posinvert
def __str__(self):
return str(dict(filter(lambda elem: elem[1] is not None, self.opt_dict.items())))
@property
def opt_dict(self) -> dict:
return {
"line": self.line,
"col": self.col,
"pos": self.pos,
"posinvert": self.posinvert,
}
def _check_set(self, value: Union[int, str, None], opt: str):
if isinstance(value, str) and (
value != "cursor" or
re.match(r'cursor[+-]\d+$', value) is not None):
raise ValueError(f"""value of popup option "{opt}" should not be "cursor", or something like "cursor+1" and "cursor-1" """)
elif isinstance(value, (int, type(None))):
if opt == "line":
self.__line = value
elif opt == "col":
self.__col = value
else:
raise ValueError(f"""value of popup option "{opt}" of type `{type(value)}` should have type of `int` or `str`""")
@property
def line(self):
return self.__line
@line.setter
def line(self, val: Union[int, str, None]):
self._check_set(val, "line")
@property
def col(self):
return self.__col
@col.setter
def col(self, val: Union[int, str, None]):
self._check_set(val, "col")
@property
def pos(self):
return self.__pos
@pos.setter
def pos(self, val: Union[str, None]):
if not isinstance(val, str):
raise ValueError(f"""value of popup option "pos" of type `{type(val)}` should have type of `str` or `None`""")
elif val is None or val in ["topleft", "topright", "botleft", "botright", "center"]:
self.__pos = val
else:
raise ValueError(f"""invalid value {val} of popup option `pos`""")
@property
def posinvert(self):
return self.__posinvert
@pos.setter
def posinvert(self, val: Union[bool, None]):
if not isinstance(val, (bool, type(None))):
raise ValueError(f"""value of popup option "posinvert" of type `{type(val)}` should have type of `bool` or `None`""")
else:
self.__posinvert = val
class PopupOpt(object):
"""popup options"""
def __init__(self,
line : Union[int, str, None] = None,
col : Union[int, str, None] = None,
pos : Union[str, None] = None,
posinvert : Union[bool, None] = None,
textprop=None,
textpropwin=None,
textpropid=None,
fixed=None,
flip=None,
maxheight=None,
minheight=None,
maxwidth=None,
minwidth=None,
firstline=None,
hidden=None,
tabpage=None,
title=None,
wrap=None,
drag=None,
resize=None,
close=None,
highlight=None,
padding=None,
border=None,
borderhighlight=None,
borderchars=None,
scrollbar=None,
scrollbarhighlight=None,
thumbhighlight=None,
zindex=None,
mask=None,
time=None,
moved=None,
mousemoved=None,
cursorline=None,
filter=None,
mapping=None,
filtermode=None,
callback=None,
):
self.__line = line
self.__col = col
self.__pos = pos
self.__posinvert = posinvert
self.__textprop = textprop
self.__textpropwin = textpropwin
self.__textpropid = textpropid
self.__fixed = fixed
self.__flip = flip
self.__maxheight = maxheight
self.__minheight = minheight
self.__maxwidth = maxwidth
self.__minwidth = minwidth
self.__firstline = firstline
self.__hidden = hidden
self.__tabpage = tabpage
self.__title = title
self.__wrap = wrap
self.__drag = drag
self.__resize = resize
self.__close = close
self.__highlight = highlight
self.__padding = padding
self.__border = border
self.__borderhighlight = borderhighlight
self.__borderchars = borderchars
self.__scrollbar = scrollbar
self.__scrollbarhighlight = scrollbarhighlight
self.__thumbhighlight = thumbhighlight
self.__zindex = zindex
self.__mask = mask
self.__time = time
self.__moved = moved
self.__mousemoved = mousemoved
self.__cursorline = cursorline
self.__filter = filter
self.__mapping = mapping
self.__filtermode = filtermode
self.__callback = callback
self.__opt_dict = {
"line": line,
"col": col,
"pos": pos,
"posinvert": posinvert,
"textprop": textprop,
"textpropwin": textpropwin,
"textpropid": textpropid,
"fixed": fixed,
"flip": flip,
"maxheight": maxheight,
"minheight": minheight,
"maxwidth": maxwidth,
"minwidth": minwidth,
"firstline": firstline,
"hidden": hidden,
"tabpage": tabpage,
"title": title,
"wrap": wrap,
"drag": drag,
"resize": resize,
"close": close,
"highlight": highlight,
"padding": padding,
"border": border,
"borderhighlight": borderhighlight,
"borderchars": borderchars,
"scrollbar": scrollbar,
"scrollbarhighlight": scrollbarhighlight,
"thumbhighlight": thumbhighlight,
"zindex": zindex,
"mask": mask,
"time": time,
"moved": moved,
"mousemoved": mousemoved,
"cursorline": cursorline,
"filter": filter,
"mapping": mapping,
"filtermode": filtermode,
"callback": callback,
}
@property
def line(self):
return self.__line
@line.setter
def line(self, x):
if isinstance(x, int) and x < 0:
raise ValueError("""value of popup option "line" should not be negative""")
elif isinstance(x, str) and (
x != "cursor" or
re.match(r'cursor[+-]\d+$', x) is not None):
raise ValueError("""value of popup option "line" should not be "cursor", or something like "cursor+1" and "cursor-1" """)
elif x is not None:
raise ValueError("""value of popup option "line" should have type `int` or `str`""")
else:
self.__line = x
def get_option(self, opt):
return self.__opt_dict[opt]
|
[
"re.match"
] |
[((1058, 1092), 're.match', 're.match', (['"""cursor[+-]\\\\d+$"""', 'value'], {}), "('cursor[+-]\\\\d+$', value)\n", (1066, 1092), False, 'import re\n'), ((7148, 7178), 're.match', 're.match', (['"""cursor[+-]\\\\d+$"""', 'x'], {}), "('cursor[+-]\\\\d+$', x)\n", (7156, 7178), False, 'import re\n')]
|
import os
import shutil
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
fixtures_dir = os.path.join(settings.PROJECT_ROOT, settings.SITE_NAME, 'core', 'fixtures')
fixture_file = os.path.join(fixtures_dir, 'initial_data.json')
image_src_dir = os.path.join(fixtures_dir, 'images')
image_dest_dir = os.path.join(settings.MEDIA_ROOT, 'original_images')
call_command('loaddata', fixture_file, verbosity=3)
if not os.path.isdir(image_dest_dir):
os.makedirs(image_dest_dir)
for filename in os.listdir(image_src_dir):
shutil.copy(os.path.join(image_src_dir, filename), image_dest_dir)
|
[
"os.makedirs",
"os.path.isdir",
"django.core.management.call_command",
"os.path.join",
"os.listdir"
] |
[((251, 326), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', 'settings.SITE_NAME', '"""core"""', '"""fixtures"""'], {}), "(settings.PROJECT_ROOT, settings.SITE_NAME, 'core', 'fixtures')\n", (263, 326), False, 'import os\n'), ((350, 397), 'os.path.join', 'os.path.join', (['fixtures_dir', '"""initial_data.json"""'], {}), "(fixtures_dir, 'initial_data.json')\n", (362, 397), False, 'import os\n'), ((422, 458), 'os.path.join', 'os.path.join', (['fixtures_dir', '"""images"""'], {}), "(fixtures_dir, 'images')\n", (434, 458), False, 'import os\n'), ((484, 536), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '"""original_images"""'], {}), "(settings.MEDIA_ROOT, 'original_images')\n", (496, 536), False, 'import os\n'), ((546, 597), 'django.core.management.call_command', 'call_command', (['"""loaddata"""', 'fixture_file'], {'verbosity': '(3)'}), "('loaddata', fixture_file, verbosity=3)\n", (558, 597), False, 'from django.core.management import call_command\n'), ((710, 735), 'os.listdir', 'os.listdir', (['image_src_dir'], {}), '(image_src_dir)\n', (720, 735), False, 'import os\n'), ((614, 643), 'os.path.isdir', 'os.path.isdir', (['image_dest_dir'], {}), '(image_dest_dir)\n', (627, 643), False, 'import os\n'), ((657, 684), 'os.makedirs', 'os.makedirs', (['image_dest_dir'], {}), '(image_dest_dir)\n', (668, 684), False, 'import os\n'), ((761, 798), 'os.path.join', 'os.path.join', (['image_src_dir', 'filename'], {}), '(image_src_dir, filename)\n', (773, 798), False, 'import os\n')]
|
import argparse
import logging
import os
from gfootball.env.config import Config
from gfootball.common.args import bool_arg
from gfootball.common.history import History, HistoryItem
from gfootball.env import football_env
from gfootball.env.football_action_set import DEFAULT_ACTION_SET, ActionSetType
from gfootball.policies.base_policy import PolicyConfig, PolicyType
def parse_args():
parser = argparse.ArgumentParser(description='Train')
# 'keyboard:left_players=1'
parser.add_argument(
'--players', type=str, default='bot_1v1:left_players=1',
help='Semicolon separated list of players, single keyboard player on the left by default')
parser.add_argument('--level', type=str, default='1_vs_1_easy', help='Level to play')
# parser.add_argument('--action_set', type=str, default='default', help='default or full')
# parser.add_argument('--real_time', type=bool_arg, default=True,
# help='If true, environment will slow down so humans can play.')
parser.add_argument('--render', type=bool_arg, default=True, help='Whether to do game rendering.')
parser.add_argument('--warmstart', type=bool_arg, default=False,
help='Whether to warmstart using the handmade agent.')
parser.add_argument('--verbose', type=bool_arg, default=True)
parser.add_argument('--pitch_scale', type=float, default=0.5, help='Pitch scale. Can be 1.0 or 0.5 for now.')
parser.add_argument('--checkpoint', type=str, default=None, help='Pickle file of Q')
parser.add_argument('--random_frac', type=float, default=0.1, help='')
parser.add_argument('--video', type=str, default='', help='')
parser.add_argument('--num_games', type=int, default=1000000000, help='')
parser.add_argument('--lr', type=float, default=1e-4, help='')
parser.add_argument('--policy_type', type=PolicyType, default=PolicyType.Q_LEARNING, help='')
parser.add_argument('--n_steps', type=int, default=50, help='')
parser.add_argument('--discount', type=float, default=0.999, help='')
args = parser.parse_args()
return args
def main():
args = parse_args()
players = args.players.split(';')
config = Config({
'action_set': ActionSetType.DEFAULT,
'dump_full_episodes': False,
'players': players,
# 'real_time': args.real_time and args.render,
'real_time': args.render and (not args.video),
'pitch_scale': args.pitch_scale,
})
base_player_config = {
'policy_config': PolicyConfig(
policy_type=args.policy_type,
checkpoint=args.checkpoint,
random_frac=args.random_frac,
action_set=DEFAULT_ACTION_SET,
lr=args.lr,
discount=args.discount,
n_steps=args.n_steps,
verbose=args.verbose,
),
'warmstart': args.warmstart,
'verbose': args.verbose,
'video': args.video,
}
if args.level:
config['level'] = args.level
checkpoint = 'agents/' + args.policy_type.value.lower() + '/agent.npz'
assert not os.system('mkdir -p %s' % os.path.dirname(checkpoint))
env = football_env.FootballEnv(config=config, base_player_config=base_player_config)
if args.render:
env.render()
obs_history = [
env.reset(), # Need this to know the initial state
]
# self_play_history = History(max_size=int(1e7))
running_score_update = 0.999
running_score = [0, 0, 0]
record = [0, 0, 0]
try:
game_num = 0
epoch_history = []
# cnts_by_mode = defaultdict(int)
while True:
obs, reward, done, info = env.step()
# _, old_relative_obs = env.get_players_and_relative_obs_pairs(obs=obs_history[-1])
# _, new_relative_obs = env.get_players_and_relative_obs_pairs(obs=obs)
if env._agent.num_controlled_right_players() > 0:
reward *= -1
item = HistoryItem(
old_state=obs_history[-1],
action=info['agent_action'],
new_state=obs,
reward=reward.item(),
)
epoch_history.append(item)
# env._agent.give_reward(item=item)
# self_play_history.add(item=item)
# cnts_by_mode[(obs[0]['game_mode'], obs[0]['ball_owned_team'])] += 1
obs_history.append(obs)
if args.verbose:
print(reward, done, info)
if done:
# defaultdict(<class 'int'>, {(0, -1): 36256, (0, 0): 12701, (0, 1): 55352, (2, -1): 1871, (3, -1): 2146, (5, 1): 140, (5, 0): 19269, (4, -1): 1119, (5, -1): 1, (6, -1): 145})
# print(cnts_by_mode)
game_num += 1
score = obs[0]['score']
running_score[0] = running_score_update * running_score[0] + (1.0 - running_score_update) * score[0]
running_score[1] = running_score_update * running_score[1] + (1.0 - running_score_update) * score[1]
running_score[2] = running_score[0] - running_score[1]
if score[0] > score[1]:
record[0] += 1
elif score[0] < score[1]:
record[2] += 1
else:
record[1] += 1
# mean_reward = self_play_history.mean_reward()
print(
'Final Score:', score,
'Running score: [%.3f, %.3f, %.3f]' % tuple(
[x / (1 - running_score_update ** game_num) for x in running_score]),
'Record:', record,
# 'Mean Reward in history:', mean_reward,
)
# for item in self_play_history.sample(n=int(1e3)):
# env._agent.give_reward(item=item) # ._replace(reward=item.reward - mean_reward))
env._agent.process_epoch(items=epoch_history)
env._agent.reset()
obs_history.append(env.reset())
epoch_history = []
if (not args.render) and (game_num % 25 == 0):
env._agent.save(checkpoint=checkpoint)
if game_num == args.num_games:
break
except KeyboardInterrupt:
logging.warning('Game stopped, writing dump...')
if (not args.render):
env._agent.save(checkpoint='agent.pkl')
# env.write_dump('shutdown')
# return env._agent
print(checkpoint)
exit(1)
if __name__ == '__main__':
# app.run(main)
main()
# update_states()
|
[
"gfootball.policies.base_policy.PolicyConfig",
"gfootball.env.football_env.FootballEnv",
"argparse.ArgumentParser",
"gfootball.env.config.Config",
"logging.warning",
"os.path.dirname"
] |
[((405, 449), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train"""'}), "(description='Train')\n", (428, 449), False, 'import argparse\n'), ((2165, 2345), 'gfootball.env.config.Config', 'Config', (["{'action_set': ActionSetType.DEFAULT, 'dump_full_episodes': False,\n 'players': players, 'real_time': args.render and not args.video,\n 'pitch_scale': args.pitch_scale}"], {}), "({'action_set': ActionSetType.DEFAULT, 'dump_full_episodes': False,\n 'players': players, 'real_time': args.render and not args.video,\n 'pitch_scale': args.pitch_scale})\n", (2171, 2345), False, 'from gfootball.env.config import Config\n'), ((3130, 3208), 'gfootball.env.football_env.FootballEnv', 'football_env.FootballEnv', ([], {'config': 'config', 'base_player_config': 'base_player_config'}), '(config=config, base_player_config=base_player_config)\n', (3154, 3208), False, 'from gfootball.env import football_env\n'), ((2494, 2713), 'gfootball.policies.base_policy.PolicyConfig', 'PolicyConfig', ([], {'policy_type': 'args.policy_type', 'checkpoint': 'args.checkpoint', 'random_frac': 'args.random_frac', 'action_set': 'DEFAULT_ACTION_SET', 'lr': 'args.lr', 'discount': 'args.discount', 'n_steps': 'args.n_steps', 'verbose': 'args.verbose'}), '(policy_type=args.policy_type, checkpoint=args.checkpoint,\n random_frac=args.random_frac, action_set=DEFAULT_ACTION_SET, lr=args.lr,\n discount=args.discount, n_steps=args.n_steps, verbose=args.verbose)\n', (2506, 2713), False, 'from gfootball.policies.base_policy import PolicyConfig, PolicyType\n'), ((6267, 6315), 'logging.warning', 'logging.warning', (['"""Game stopped, writing dump..."""'], {}), "('Game stopped, writing dump...')\n", (6282, 6315), False, 'import logging\n'), ((3091, 3118), 'os.path.dirname', 'os.path.dirname', (['checkpoint'], {}), '(checkpoint)\n', (3106, 3118), False, 'import os\n')]
|
import sys
sys.path.append("../ern/")
sys.path.append("../dies/")
import copy
import torch
import numpy as np
import pandas as pd
from dies.utils import listify
from sklearn.metrics import mean_squared_error as mse
from torch.utils.data.dataloader import DataLoader
from fastai.basic_data import DataBunch
from fastai.basic_data import DatasetType
import glob
def to_short_name(file):
return (
file.split("/")[-1]
.replace(".h5", "")
.replace(".csv", "")
.replace(".pkl", "")
.replace(".pth", "")
.replace("_config", "")
)
def create_databunch(
train_ds, val_ds, test_ds, batch_size, device,
):
train_ds.to_device(device)
tr = DataLoader(
train_ds,
batch_size,
drop_last=True,
shuffle=True,
# num_workers=6,
pin_memory=False,
)
val_ds.to_device(device)
val = DataLoader(val_ds, batch_size, pin_memory=False)
if test_ds is not None:
test_ds.to_device(device)
test = DataLoader(test_ds, batch_size, pin_memory=False)
else:
test = None
data_bunch = DataBunch(tr, val, test_dl=test)
return data_bunch
def get_config(file, include_rmse=False):
df = pd.read_csv(file, sep=",")
min_rmse_idx = df.root_mean_squared_error.idxmin()
relevant_cols = [c for c in df.columns if "config" in c]
rename_cols = {c: c.replace("config/", "") for c in relevant_cols}
if include_rmse:
relevant_cols += ["root_mean_squared_error"]
df = df[relevant_cols].loc[min_rmse_idx]
df = df.rename(rename_cols)
return df
def match_file_names(file_name, file_names):
res = None
file_name = to_short_name(file_name)
for f in file_names:
if file_name == to_short_name(f):
res = f
break
return res
def get_preds(learn, data_type=DatasetType.Test):
y_hats, y = learn.get_preds(data_type)
y_hats = np.clip(y_hats, 0, 1.05)
return y, y_hats
def get_rmse(learn, data_type=DatasetType.Test):
y, y_hats = get_preds(learn, data_type=data_type)
y_hats = np.clip(y_hats, 0, 1.05)
e = mse(y, y_hats) ** 0.5
return e
def get_ds_from_type(data_bunch, data_type):
if data_type == DatasetType.Train:
return data_bunch.train_ds
elif data_type == DatasetType.Valid:
return data_bunch.valid_ds
elif data_type == DatasetType.Test:
return data_bunch.test_ds
def create_rmse_df_lstm(y, y_hat, file, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
y, y_hat = y.ravel(), y_hat.ravel()
res_rmse = mse(y, y_hat) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(file)
df_f = pd.DataFrame({"Y": y, "Yhat": y_hat, "Time": ds.index})
df_f["ParkId"] = to_short_name(file)
pdfs.append(df_f)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
pdfs = pd.concat(pdfs, axis=0)
return df_res, pdfs
def create_rmse_df_mtl(y, y_hat, files, data_bunch, data_type=DatasetType.Test):
res_rmses, park_ids = [], []
pdfs = []
ds = get_ds_from_type(data_bunch, data_type)
for i in range(y.shape[1]):
res_rmse = mse(y[:, i], y_hat[:, i]) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(files[i])
df_f = pd.DataFrame({"Y": y[:, i], "Yhat": y_hat[:, i], "Time": ds.index})
df_f["ParkId"] = to_short_name(data_bunch.files[i])
pdfs.append(df_f)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
pdfs = pd.concat(pdfs, axis=0)
return df_res, pdfs
def create_rmse_df_mlp(y, y_hat, park_ids, data_bunch, data_type=DatasetType.Test):
cat_park_ids = park_ids.ravel()
unique_park_ids = np.unique(park_ids)
ds = get_ds_from_type(data_bunch, data_type)
res_rmses, park_ids = [], []
dfs = []
for cur_park_id in unique_park_ids:
mask = cat_park_ids == cur_park_id
cy = y[mask]
cyh = y_hat[mask]
cid = ds.index[mask]
df_f = pd.DataFrame({"Y": cy.ravel(), "Yhat": cyh.ravel(), "Time": cid})
df_f["ParkId"] = to_short_name(data_bunch.files[cur_park_id])
dfs.append(df_f)
res_rmse = mse(cy, cyh) ** 0.5
res_rmses.append(res_rmse)
park_ids.append(cur_park_id)
dfs = pd.concat(dfs, axis=0)
df_res = pd.DataFrame({"RMSE": res_rmses, "ParkId": park_ids})
return df_res, dfs
def get_test_results(test_folder):
files = glob.glob(test_folder + f"/*.csv")
dfs = []
for f in files:
dfs.append(pd.read_csv(f, sep=";"))
df = pd.concat(dfs, axis=0)
return df
def get_eval_results(base_folder, data_type):
forecast_folder = f"{base_folder}/mtl/"
files = glob.glob(forecast_folder + f"/{data_type}*error.csv")
forecast_folder = f"{base_folder}/lstm/"
files = files + glob.glob(forecast_folder + f"/{data_type}*error.csv")
forecast_folder = f"{base_folder}/mlp/"
files = files + glob.glob(forecast_folder + f"/{data_type}*error.csv")
dfs = []
for f in files:
dfs.append(pd.read_csv(f, sep=","))
df = pd.concat(dfs, axis=0)
return df
|
[
"sys.path.append",
"pandas.DataFrame",
"sklearn.metrics.mean_squared_error",
"pandas.read_csv",
"numpy.clip",
"glob.glob",
"torch.utils.data.dataloader.DataLoader",
"pandas.concat",
"numpy.unique",
"fastai.basic_data.DataBunch"
] |
[((12, 38), 'sys.path.append', 'sys.path.append', (['"""../ern/"""'], {}), "('../ern/')\n", (27, 38), False, 'import sys\n'), ((39, 66), 'sys.path.append', 'sys.path.append', (['"""../dies/"""'], {}), "('../dies/')\n", (54, 66), False, 'import sys\n'), ((701, 786), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['train_ds', 'batch_size'], {'drop_last': '(True)', 'shuffle': '(True)', 'pin_memory': '(False)'}), '(train_ds, batch_size, drop_last=True, shuffle=True, pin_memory=False\n )\n', (711, 786), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((896, 944), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['val_ds', 'batch_size'], {'pin_memory': '(False)'}), '(val_ds, batch_size, pin_memory=False)\n', (906, 944), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((1121, 1153), 'fastai.basic_data.DataBunch', 'DataBunch', (['tr', 'val'], {'test_dl': 'test'}), '(tr, val, test_dl=test)\n', (1130, 1153), False, 'from fastai.basic_data import DataBunch\n'), ((1230, 1256), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '""","""'}), "(file, sep=',')\n", (1241, 1256), True, 'import pandas as pd\n'), ((1945, 1969), 'numpy.clip', 'np.clip', (['y_hats', '(0)', '(1.05)'], {}), '(y_hats, 0, 1.05)\n', (1952, 1969), True, 'import numpy as np\n'), ((2109, 2133), 'numpy.clip', 'np.clip', (['y_hats', '(0)', '(1.05)'], {}), '(y_hats, 0, 1.05)\n', (2116, 2133), True, 'import numpy as np\n'), ((2775, 2830), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': y, 'Yhat': y_hat, 'Time': ds.index}"], {}), "({'Y': y, 'Yhat': y_hat, 'Time': ds.index})\n", (2787, 2830), True, 'import pandas as pd\n'), ((2908, 2961), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (2920, 2961), True, 'import pandas as pd\n'), ((2973, 2996), 'pandas.concat', 'pd.concat', (['pdfs'], {'axis': '(0)'}), '(pdfs, axis=0)\n', (2982, 2996), True, 'import pandas as pd\n'), ((3539, 3592), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (3551, 3592), True, 'import pandas as pd\n'), ((3604, 3627), 'pandas.concat', 'pd.concat', (['pdfs'], {'axis': '(0)'}), '(pdfs, axis=0)\n', (3613, 3627), True, 'import pandas as pd\n'), ((3797, 3816), 'numpy.unique', 'np.unique', (['park_ids'], {}), '(park_ids)\n', (3806, 3816), True, 'import numpy as np\n'), ((4372, 4394), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (4381, 4394), True, 'import pandas as pd\n'), ((4408, 4461), 'pandas.DataFrame', 'pd.DataFrame', (["{'RMSE': res_rmses, 'ParkId': park_ids}"], {}), "({'RMSE': res_rmses, 'ParkId': park_ids})\n", (4420, 4461), True, 'import pandas as pd\n'), ((4534, 4568), 'glob.glob', 'glob.glob', (["(test_folder + f'/*.csv')"], {}), "(test_folder + f'/*.csv')\n", (4543, 4568), False, 'import glob\n'), ((4656, 4678), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (4665, 4678), True, 'import pandas as pd\n'), ((4799, 4853), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (4808, 4853), False, 'import glob\n'), ((5182, 5204), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0)\n', (5191, 5204), True, 'import pandas as pd\n'), ((1023, 1072), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['test_ds', 'batch_size'], {'pin_memory': '(False)'}), '(test_ds, batch_size, pin_memory=False)\n', (1033, 1072), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2142, 2156), 'sklearn.metrics.mean_squared_error', 'mse', (['y', 'y_hats'], {}), '(y, y_hats)\n', (2145, 2156), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((2685, 2698), 'sklearn.metrics.mean_squared_error', 'mse', (['y', 'y_hat'], {}), '(y, y_hat)\n', (2688, 2698), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((3371, 3438), 'pandas.DataFrame', 'pd.DataFrame', (["{'Y': y[:, i], 'Yhat': y_hat[:, i], 'Time': ds.index}"], {}), "({'Y': y[:, i], 'Yhat': y_hat[:, i], 'Time': ds.index})\n", (3383, 3438), True, 'import pandas as pd\n'), ((4920, 4974), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (4929, 4974), False, 'import glob\n'), ((5040, 5094), 'glob.glob', 'glob.glob', (["(forecast_folder + f'/{data_type}*error.csv')"], {}), "(forecast_folder + f'/{data_type}*error.csv')\n", (5049, 5094), False, 'import glob\n'), ((3253, 3278), 'sklearn.metrics.mean_squared_error', 'mse', (['y[:, i]', 'y_hat[:, i]'], {}), '(y[:, i], y_hat[:, i])\n', (3256, 3278), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((4270, 4282), 'sklearn.metrics.mean_squared_error', 'mse', (['cy', 'cyh'], {}), '(cy, cyh)\n', (4273, 4282), True, 'from sklearn.metrics import mean_squared_error as mse\n'), ((4622, 4645), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""";"""'}), "(f, sep=';')\n", (4633, 4645), True, 'import pandas as pd\n'), ((5148, 5171), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""","""'}), "(f, sep=',')\n", (5159, 5171), True, 'import pandas as pd\n')]
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Recipe module for Skia Swarming perf.
import calendar
import json
import os
DEPS = [
'env',
'flavor',
'recipe_engine/file',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
'recipe_engine/time',
'run',
'vars',
]
def upload_perf_results(buildername):
if 'Release' not in buildername:
return False
skip_upload_bots = [
'ASAN',
'Coverage',
'MSAN',
'TSAN',
'Valgrind',
]
for s in skip_upload_bots:
if s in buildername:
return False
return True
def perf_steps(api):
"""Run Skia benchmarks."""
b = api.properties['buildername']
if upload_perf_results(b):
api.flavor.create_clean_device_dir(
api.flavor.device_dirs.perf_data_dir)
# Find nanobench flags.
args = json.loads(api.properties['nanobench_flags'])
props = json.loads(api.properties['nanobench_properties'])
swarming_bot_id = api.vars.swarming_bot_id
swarming_task_id = api.vars.swarming_task_id
if upload_perf_results(b):
args.append('--properties')
# Map iteration order is arbitrary; in order to maintain a consistent step
# ordering, sort by key.
for k in sorted(props.keys()):
v = props[k]
if v == '${SWARMING_BOT_ID}':
v = swarming_bot_id
elif v == '${SWARMING_TASK_ID}':
v = swarming_task_id
if v != '':
args.extend([k, v])
# Paths to required resources.
args.extend(['-i', api.flavor.device_dirs.resource_dir])
if 'iOS' not in b:
args.extend(['--skps', api.flavor.device_dirs.skp_dir]),
if 'GPU' not in b:
args.extend(['--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'nanobench')])
if api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU' and 'Android' in b:
assert api.flavor.device_dirs.texttraces_dir
args.extend(['--texttraces', api.flavor.device_dirs.texttraces_dir])
# Do not run svgs on Valgrind.
if 'Valgrind' not in b:
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
if upload_perf_results(b):
now = api.time.utcnow()
ts = int(calendar.timegm(now.utctimetuple()))
json_path = api.flavor.device_path_join(
api.flavor.device_dirs.perf_data_dir,
'nanobench_%s_%d.json' % (api.properties['revision'], ts))
args.extend(['--outResultsFile', json_path])
api.run(api.flavor.step, 'nanobench', cmd=args,
abort_on_failure=False)
# Copy results to swarming out dir.
if upload_perf_results(b):
api.file.ensure_directory(
'makedirs perf_dir',
api.flavor.host_dirs.perf_data_dir)
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.perf_data_dir,
api.flavor.host_dirs.perf_data_dir)
def RunSteps(api):
api.vars.setup()
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup('nanobench')
try:
if all(v in api.vars.builder_name for v in ['Android', 'CPU']):
api.flavor.install(skps=True, images=True, svgs=True, resources=True,
texttraces=True)
else:
api.flavor.install(skps=True, images=True, svgs=True, resources=True)
perf_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Perf-Android-Clang-Nexus7-CPU-Tegra3-arm-Debug-All-Android',
('Perf-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_SK_CPU_LIMIT_SSE41'),
'Perf-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-ANGLE',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
nanobench_flags='["nanobench","--dummy","--flags"]',
nanobench_properties=('{"key1":"value1","key2":"",'
'"bot":"${SWARMING_BOT_ID}",'
'"task":"${SWARMING_TASK_ID}"}'),
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder:
test += api.platform('win', 64)
yield test
|
[
"json.loads"
] |
[((1014, 1059), 'json.loads', 'json.loads', (["api.properties['nanobench_flags']"], {}), "(api.properties['nanobench_flags'])\n", (1024, 1059), False, 'import json\n'), ((1070, 1120), 'json.loads', 'json.loads', (["api.properties['nanobench_properties']"], {}), "(api.properties['nanobench_properties'])\n", (1080, 1120), False, 'import json\n')]
|
import pytest
import numpy as np
from quantum_systems import BasisSet
def test_add_spin_spf():
spf = (np.arange(15) + 1).reshape(3, 5).T
n = 3
n_a = 2
n_b = n - n_a
l = 2 * spf.shape[0]
assert l == 10
m_a = l // 2 - n_a
assert m_a == 3
m_b = l // 2 - n_b
assert m_b == 4
new_spf = BasisSet.add_spin_spf(spf, np)
# Occupied spin-up
np.testing.assert_allclose(spf[0], new_spf[0])
np.testing.assert_allclose(spf[1], new_spf[2])
# Occupied spin-down
np.testing.assert_allclose(spf[0], new_spf[1])
# Virtual spin-up
np.testing.assert_allclose(spf[2], new_spf[4])
np.testing.assert_allclose(spf[3], new_spf[6])
np.testing.assert_allclose(spf[4], new_spf[8])
# Virtual spin-down
np.testing.assert_allclose(spf[1], new_spf[3])
np.testing.assert_allclose(spf[2], new_spf[5])
np.testing.assert_allclose(spf[3], new_spf[7])
np.testing.assert_allclose(spf[4], new_spf[9])
|
[
"numpy.testing.assert_allclose",
"numpy.arange",
"quantum_systems.BasisSet.add_spin_spf"
] |
[((333, 363), 'quantum_systems.BasisSet.add_spin_spf', 'BasisSet.add_spin_spf', (['spf', 'np'], {}), '(spf, np)\n', (354, 363), False, 'from quantum_systems import BasisSet\n'), ((392, 438), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[0]', 'new_spf[0]'], {}), '(spf[0], new_spf[0])\n', (418, 438), True, 'import numpy as np\n'), ((444, 490), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[1]', 'new_spf[2]'], {}), '(spf[1], new_spf[2])\n', (470, 490), True, 'import numpy as np\n'), ((521, 567), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[0]', 'new_spf[1]'], {}), '(spf[0], new_spf[1])\n', (547, 567), True, 'import numpy as np\n'), ((595, 641), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[2]', 'new_spf[4]'], {}), '(spf[2], new_spf[4])\n', (621, 641), True, 'import numpy as np\n'), ((647, 693), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[3]', 'new_spf[6]'], {}), '(spf[3], new_spf[6])\n', (673, 693), True, 'import numpy as np\n'), ((699, 745), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[4]', 'new_spf[8]'], {}), '(spf[4], new_spf[8])\n', (725, 745), True, 'import numpy as np\n'), ((775, 821), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[1]', 'new_spf[3]'], {}), '(spf[1], new_spf[3])\n', (801, 821), True, 'import numpy as np\n'), ((827, 873), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[2]', 'new_spf[5]'], {}), '(spf[2], new_spf[5])\n', (853, 873), True, 'import numpy as np\n'), ((879, 925), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[3]', 'new_spf[7]'], {}), '(spf[3], new_spf[7])\n', (905, 925), True, 'import numpy as np\n'), ((931, 977), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['spf[4]', 'new_spf[9]'], {}), '(spf[4], new_spf[9])\n', (957, 977), True, 'import numpy as np\n'), ((109, 122), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (118, 122), True, 'import numpy as np\n')]
|
from django.contrib.auth import get_user_model
from rest_framework import mixins
from rest_framework.viewsets import GenericViewSet
from drive.users.permissions import IsAuthenticatedOrCreate
from drive.users.serializers import UserSerializer
User = get_user_model()
class UsersViewSet(
GenericViewSet,
mixins.CreateModelMixin):
"""
Api endpoint for signup a user.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = [IsAuthenticatedOrCreate]
|
[
"django.contrib.auth.get_user_model"
] |
[((253, 269), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (267, 269), False, 'from django.contrib.auth import get_user_model\n')]
|
from django.db import models
from users.models import Profile
from django.urls import reverse
class Module(models.Model):
#Django cannot have composite primary keys, thus, using auto increment for pri key
moduleID = models.AutoField(primary_key = True)
moduleCode = models.CharField(max_length=20, unique=True)
moduleName = models.CharField(max_length=200)
def __str__(self):
return f"{self.moduleCode}: {self.moduleName}"
class Listing(models.Model):
listingID = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
description = models.TextField()
# Will automatically fill in date when listing created
datePosted = models.DateTimeField(auto_now_add=True)
# If module code does not exist, do not delete listing, but set the code to null
module = models.ForeignKey(Module, on_delete=models.SET_NULL, null=True)
# Get user from profile, if profile is deleted, the listings will also be deleted
user = models.ForeignKey(Profile, on_delete=models.CASCADE)
listingChoices = (
('Providing', 'Providing'),
('Requesting', 'Requesting')
)
# Either Providing or Requesting
typeOfListing = models.CharField(max_length=10, verbose_name=('Providing/Requesting'), choices=listingChoices)
# Once tutor wants to stop teaching, can close tuition listing
closed = models.BooleanField(default=False)
def __str__(self):
return f"{self.listingID}: {self.title}"
def get_absolute_url(self):
return reverse('listing-detail', kwargs={'pk': self.pk})
class TuitionSession(models.Model):
tuitionSessionID = models.AutoField(primary_key = True)
tutor = models.ForeignKey(Profile, on_delete=models.SET_NULL, related_name="tutor", null=True)
learner = models.ForeignKey(Profile, on_delete=models.SET_NULL, related_name="learner", null=True)
# When listing deleted, user can still leave review on the tutor, thus dont delete session
listing = models.ForeignKey(Listing, on_delete=models.SET_NULL, null=True)
#Initiated offer
#initiatedOffer = models.ForeignKey(Profile, on_delete=models.SET_NULL, null = True)
# 1 for send offer, 2 for accept offer, 3 for finalized offer, 0 is default
offer = models.IntegerField(default=0)
# Once complete
completed = models.BooleanField(default=False)
def __str__(self):
return f"{self.tuitionSessionID}"
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.urls.reverse",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((225, 259), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (241, 259), False, 'from django.db import models\n'), ((280, 324), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (296, 324), False, 'from django.db import models\n'), ((342, 374), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (358, 374), False, 'from django.db import models\n'), ((500, 534), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (516, 534), False, 'from django.db import models\n'), ((547, 578), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (563, 578), False, 'from django.db import models\n'), ((597, 615), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (613, 615), False, 'from django.db import models\n'), ((693, 732), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (713, 732), False, 'from django.db import models\n'), ((832, 895), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Module'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Module, on_delete=models.SET_NULL, null=True)\n', (849, 895), False, 'from django.db import models\n'), ((994, 1046), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE'}), '(Profile, on_delete=models.CASCADE)\n', (1011, 1046), False, 'from django.db import models\n'), ((1211, 1307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'verbose_name': '"""Providing/Requesting"""', 'choices': 'listingChoices'}), "(max_length=10, verbose_name='Providing/Requesting',\n choices=listingChoices)\n", (1227, 1307), False, 'from django.db import models\n'), ((1387, 1421), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1406, 1421), False, 'from django.db import models\n'), ((1654, 1688), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1670, 1688), False, 'from django.db import models\n'), ((1703, 1793), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.SET_NULL', 'related_name': '"""tutor"""', 'null': '(True)'}), "(Profile, on_delete=models.SET_NULL, related_name='tutor',\n null=True)\n", (1720, 1793), False, 'from django.db import models\n'), ((1804, 1897), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.SET_NULL', 'related_name': '"""learner"""', 'null': '(True)'}), "(Profile, on_delete=models.SET_NULL, related_name=\n 'learner', null=True)\n", (1821, 1897), False, 'from django.db import models\n'), ((2003, 2067), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Listing'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Listing, on_delete=models.SET_NULL, null=True)\n', (2020, 2067), False, 'from django.db import models\n'), ((2273, 2303), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2292, 2303), False, 'from django.db import models\n'), ((2340, 2374), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2359, 2374), False, 'from django.db import models\n'), ((1543, 1592), 'django.urls.reverse', 'reverse', (['"""listing-detail"""'], {'kwargs': "{'pk': self.pk}"}), "('listing-detail', kwargs={'pk': self.pk})\n", (1550, 1592), False, 'from django.urls import reverse\n')]
|
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
from scrapy.pipelines.images import ImagesPipeline
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
# class ImgsproPipeline:
# def process_item(self, item, spider):
# return item
#基于图片爬取功能,重写 get_media_requests,file_path,item_completed 方法
class imgspipeline(ImagesPipeline):
def get_media_requests(self, item, info):
#访问图片地址,下载图片
yield scrapy.Request(item['img_url'])
def file_path(self, request, response=None, info=None, *, item=None):
#取得图片的文件名称
imgName=request.url.split('/')[-1]
return imgName
def item_completed(self, results, item, info):
# 返回执行结果
return item
|
[
"scrapy.Request"
] |
[((605, 636), 'scrapy.Request', 'scrapy.Request', (["item['img_url']"], {}), "(item['img_url'])\n", (619, 636), False, 'import scrapy\n')]
|
"""
Train a spiking Bayesian WTA network and plot weight changes, spike trains and log-likelihood live.
MIT License
Copyright (c) 2019 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import utility as ut
import network as nt
from tqdm import tqdm as tqdm
from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter
from collections import deque
from copy import deepcopy
from data_generator import DataGenerator
delta_T = 1e-3
# parameters
spiking_input = False
labels = [0, 1, 2, 3]
n_outputs = 12
W, H = 24, 24
r_net = 50.0
t_max = 1000
n_inputs = W*H
m_k = 1.0/n_outputs
# load data
x, y = ut.load_mnist(h=H, w=W, labels=labels, train=False, frequencies=spiking_input)
net = nt.EventBasedBinaryWTANetwork(n_inputs=n_inputs, n_outputs=n_outputs,
r_net=r_net, m_k=m_k, eta_v=1e-2, eta_b=1e+0, max_trace_length=1000)
# train
pca_plotter = WeightPCAPlotter(x, y, n_outputs, labels)
weights_plotter = WeightPlotter(ut.sigmoid(net._V).reshape((-1, W, H)))
likelihood_plotter = CurvePlotter(x_label="Time [s]", y_label="$log[p(y)]$")
output_spiketrains = SpiketrainPlotter(n_outputs, 100)
likelihoods = []
def estimate_likelihood(estimation_duration=5.0):
log_likelihoods = deque([])
estimation_net = deepcopy(net)
estimation_net._current_time = 0
estimation_net._trace = deque([])
while estimation_net._current_time < estimation_duration:
estimation_net.step(lambda t: data_generator[t], update_weights=False)
pbar.n = int(net._current_time * 1000) / 1000
pbar.update(0)
# log likelihood
sample = estimation_net._trace[-1][1].reshape((1, -1))
pi = ut.sigmoid(net._V)
log_likelihoods.append(
np.log(1.0 / n_outputs) + np.log(np.sum(np.prod(sample * pi + (1 - sample) * (1 - pi), axis=-1))))
return np.mean(log_likelihoods), np.std(log_likelihoods)
data_generator = DataGenerator(X, 10000, t_image=0.250, delta_T=delta_T, spiking=spiking_input)
pbar = tqdm(total=t_max, unit='Time [s]')
while net._current_time < t_max:
z = net.step(lambda t: data_generator[t])
if output_spiketrains is not None and net._current_time > 100:
output_spiketrains.update([z], [net._current_time])
pbar.n = int(net._current_time * 1000) / 1000
pbar.update(0)
# update plots
if int(pbar.n) > len(likelihoods):
likelihoods.append(estimate_likelihood())
weights_plotter.update(ut.sigmoid(net._V))
pca_plotter.update(ut.sigmoid(net._V))
likelihood_plotter.update(likelihoods)
likelihood = likelihoods[-1][0] if len(likelihoods) > 0 else np.nan
pbar.set_description(
f'<sigma(V)> = {np.mean(ut.sigmoid(net._V)):.4f}, <b> = {np.mean(net._b):.4f}, <L(y)> = {likelihood:.4f}')
pbar.close()
|
[
"utility.sigmoid",
"tqdm.tqdm",
"copy.deepcopy",
"numpy.log",
"utility.load_mnist",
"plot.SpiketrainPlotter",
"plot.WeightPCAPlotter",
"data_generator.DataGenerator",
"numpy.std",
"numpy.prod",
"numpy.mean",
"plot.CurvePlotter",
"network.EventBasedBinaryWTANetwork",
"collections.deque"
] |
[((1636, 1714), 'utility.load_mnist', 'ut.load_mnist', ([], {'h': 'H', 'w': 'W', 'labels': 'labels', 'train': '(False)', 'frequencies': 'spiking_input'}), '(h=H, w=W, labels=labels, train=False, frequencies=spiking_input)\n', (1649, 1714), True, 'import utility as ut\n'), ((1723, 1865), 'network.EventBasedBinaryWTANetwork', 'nt.EventBasedBinaryWTANetwork', ([], {'n_inputs': 'n_inputs', 'n_outputs': 'n_outputs', 'r_net': 'r_net', 'm_k': 'm_k', 'eta_v': '(0.01)', 'eta_b': '(1.0)', 'max_trace_length': '(1000)'}), '(n_inputs=n_inputs, n_outputs=n_outputs, r_net\n =r_net, m_k=m_k, eta_v=0.01, eta_b=1.0, max_trace_length=1000)\n', (1752, 1865), True, 'import network as nt\n'), ((1921, 1962), 'plot.WeightPCAPlotter', 'WeightPCAPlotter', (['x', 'y', 'n_outputs', 'labels'], {}), '(x, y, n_outputs, labels)\n', (1937, 1962), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2056, 2111), 'plot.CurvePlotter', 'CurvePlotter', ([], {'x_label': '"""Time [s]"""', 'y_label': '"""$log[p(y)]$"""'}), "(x_label='Time [s]', y_label='$log[p(y)]$')\n", (2068, 2111), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2133, 2166), 'plot.SpiketrainPlotter', 'SpiketrainPlotter', (['n_outputs', '(100)'], {}), '(n_outputs, 100)\n', (2150, 2166), False, 'from plot import WeightPCAPlotter, WeightPlotter, CurvePlotter, SpiketrainPlotter\n'), ((2947, 3024), 'data_generator.DataGenerator', 'DataGenerator', (['X', '(10000)'], {'t_image': '(0.25)', 'delta_T': 'delta_T', 'spiking': 'spiking_input'}), '(X, 10000, t_image=0.25, delta_T=delta_T, spiking=spiking_input)\n', (2960, 3024), False, 'from data_generator import DataGenerator\n'), ((3033, 3067), 'tqdm.tqdm', 'tqdm', ([], {'total': 't_max', 'unit': '"""Time [s]"""'}), "(total=t_max, unit='Time [s]')\n", (3037, 3067), True, 'from tqdm import tqdm as tqdm\n'), ((2259, 2268), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (2264, 2268), False, 'from collections import deque\n'), ((2291, 2304), 'copy.deepcopy', 'deepcopy', (['net'], {}), '(net)\n', (2299, 2304), False, 'from copy import deepcopy\n'), ((2370, 2379), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (2375, 2379), False, 'from collections import deque\n'), ((2704, 2722), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (2714, 2722), True, 'import utility as ut\n'), ((2878, 2902), 'numpy.mean', 'np.mean', (['log_likelihoods'], {}), '(log_likelihoods)\n', (2885, 2902), True, 'import numpy as np\n'), ((2904, 2927), 'numpy.std', 'np.std', (['log_likelihoods'], {}), '(log_likelihoods)\n', (2910, 2927), True, 'import numpy as np\n'), ((1995, 2013), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (2005, 2013), True, 'import utility as ut\n'), ((3486, 3504), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3496, 3504), True, 'import utility as ut\n'), ((3533, 3551), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3543, 3551), True, 'import utility as ut\n'), ((2767, 2790), 'numpy.log', 'np.log', (['(1.0 / n_outputs)'], {}), '(1.0 / n_outputs)\n', (2773, 2790), True, 'import numpy as np\n'), ((3764, 3779), 'numpy.mean', 'np.mean', (['net._b'], {}), '(net._b)\n', (3771, 3779), True, 'import numpy as np\n'), ((3731, 3749), 'utility.sigmoid', 'ut.sigmoid', (['net._V'], {}), '(net._V)\n', (3741, 3749), True, 'import utility as ut\n'), ((2807, 2862), 'numpy.prod', 'np.prod', (['(sample * pi + (1 - sample) * (1 - pi))'], {'axis': '(-1)'}), '(sample * pi + (1 - sample) * (1 - pi), axis=-1)\n', (2814, 2862), True, 'import numpy as np\n')]
|
import socket
class SocketServer(object):
def __init__(self, host, port):
self._host = host
self._port = port
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._host, self._port))
self._socket.listen(1)
self._conn, self._addr = self._socket.accept()
print('Connected by: {}'.format(self._addr))
def sendall(self, buf):
self._conn.sendall(buf)
def recv(self, size=1024):
data = self._conn.recv(size)
return data
def __del__(self):
self._conn.close()
|
[
"socket.socket"
] |
[((156, 205), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (169, 205), False, 'import socket\n')]
|
from django.urls import include, path, reverse
from django.contrib import admin
from django.http import HttpResponse
from django.contrib.sitemaps.views import sitemap
from comic.sitemaps import ComicSitemap
from blog.sitemaps import BlogSitemap
from .sitemaps import StaticSitemap
from django.conf.urls import handler404, handler500, handler403, handler400
from info.views import about, about_edit, info_edit, custom_404, custom_500, custom_403, custom_400
sitemaps = {
'static': StaticSitemap(),
'comic': ComicSitemap(),
'blog': BlogSitemap(),
}
urlpatterns = [
path('', include('comic.urls')),
path('about', about, name="about"),
path('about/edit', about_edit, name="about-edit"),
path('info/edit', info_edit, name="info-edit"),
path('blog/', include('blog.urls')),
path('access-portal/', include('admin.urls')),
path('robots.txt', lambda r: HttpResponse('Sitemap: ' + reverse('django.contrib.sitemaps.views.sitemap') + "\nUser-Agent: *\nDisallow:", content_type="text/plain"), name="robots_txt"),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
]
handler404 = custom_404
handler500 = custom_500
handler403 = custom_403
handler400 = custom_400
|
[
"django.urls.path",
"comic.sitemaps.ComicSitemap",
"blog.sitemaps.BlogSitemap",
"django.urls.reverse",
"django.urls.include"
] |
[((527, 541), 'comic.sitemaps.ComicSitemap', 'ComicSitemap', ([], {}), '()\n', (539, 541), False, 'from comic.sitemaps import ComicSitemap\n'), ((556, 569), 'blog.sitemaps.BlogSitemap', 'BlogSitemap', ([], {}), '()\n', (567, 569), False, 'from blog.sitemaps import BlogSitemap\n'), ((636, 670), 'django.urls.path', 'path', (['"""about"""', 'about'], {'name': '"""about"""'}), "('about', about, name='about')\n", (640, 670), False, 'from django.urls import include, path, reverse\n'), ((677, 726), 'django.urls.path', 'path', (['"""about/edit"""', 'about_edit'], {'name': '"""about-edit"""'}), "('about/edit', about_edit, name='about-edit')\n", (681, 726), False, 'from django.urls import include, path, reverse\n'), ((733, 779), 'django.urls.path', 'path', (['"""info/edit"""', 'info_edit'], {'name': '"""info-edit"""'}), "('info/edit', info_edit, name='info-edit')\n", (737, 779), False, 'from django.urls import include, path, reverse\n'), ((1070, 1173), 'django.urls.path', 'path', (['"""sitemap.xml"""', 'sitemap', "{'sitemaps': sitemaps}"], {'name': '"""django.contrib.sitemaps.views.sitemap"""'}), "('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name=\n 'django.contrib.sitemaps.views.sitemap')\n", (1074, 1173), False, 'from django.urls import include, path, reverse\n'), ((607, 628), 'django.urls.include', 'include', (['"""comic.urls"""'], {}), "('comic.urls')\n", (614, 628), False, 'from django.urls import include, path, reverse\n'), ((800, 820), 'django.urls.include', 'include', (['"""blog.urls"""'], {}), "('blog.urls')\n", (807, 820), False, 'from django.urls import include, path, reverse\n'), ((851, 872), 'django.urls.include', 'include', (['"""admin.urls"""'], {}), "('admin.urls')\n", (858, 872), False, 'from django.urls import include, path, reverse\n'), ((936, 984), 'django.urls.reverse', 'reverse', (['"""django.contrib.sitemaps.views.sitemap"""'], {}), "('django.contrib.sitemaps.views.sitemap')\n", (943, 984), False, 'from django.urls import include, path, reverse\n')]
|
# Copyright (c) 2020 BlenderNPR and contributors. MIT license.
import math
import ctypes
import pyrr
from Malt.GL.GL import *
from Malt.GL.Shader import UBO
from Malt.GL.Texture import TextureArray, CubeMapArray
from Malt.GL.RenderTarget import ArrayLayerTarget, RenderTarget
from Malt import Pipeline
_LIGHTS_BUFFER = None
def get_lights_buffer():
if Pipeline.MAIN_CONTEXT:
global _LIGHTS_BUFFER
if _LIGHTS_BUFFER is None: _LIGHTS_BUFFER = LightsBuffer()
return _LIGHTS_BUFFER
else:
return LightsBuffer()
_SHADOWMAPS = None
def get_shadow_maps():
if Pipeline.MAIN_CONTEXT:
global _SHADOWMAPS
if _SHADOWMAPS is None: _SHADOWMAPS = ShadowMaps()
return _SHADOWMAPS
else:
return ShadowMaps()
LIGHT_SUN = 1
LIGHT_POINT = 2
LIGHT_SPOT = 3
class C_Light(ctypes.Structure):
_fields_ = [
('color', ctypes.c_float*3),
('type', ctypes.c_int32),
('position', ctypes.c_float*3),
('radius', ctypes.c_float),
('direction', ctypes.c_float*3),
('spot_angle', ctypes.c_float),
('spot_blend', ctypes.c_float),
('type_index', ctypes.c_int32),
('__padding', ctypes.c_int32*2),
]
MAX_SPOTS = 64
MAX_SUNS = 64
MAX_LIGHTS = 128
class C_LightsBuffer(ctypes.Structure):
_fields_ = [
('lights', C_Light*MAX_LIGHTS),
('lights_count', ctypes.c_int),
('cascades_count', ctypes.c_int),
('__padding', ctypes.c_int32*2),
('spot_matrices', ctypes.c_float*16*MAX_SPOTS),
('sun_matrices', ctypes.c_float*16*MAX_SUNS),
]
class ShadowMaps(object):
def __init__(self):
self.max_spots = 1
self.spot_resolution = 2048
self.spot_depth_t = None
self.spot_fbos = []
self.max_suns = 1
self.sun_resolution = 2048
self.sun_depth_t = None
self.sun_fbos = []
self.max_points = 1
self.point_resolution = 512
self.point_depth_t = None
self.point_fbos = []
self.initialized = False
def load(self, scene, spot_resolution, sun_resolution, point_resolution, sun_cascades):
needs_setup = self.initialized is False
self.initialized = True
new_settings = (spot_resolution, sun_resolution, point_resolution)
current_settings = (self.spot_resolution, self.sun_resolution, self.point_resolution)
if new_settings != current_settings:
self.spot_resolution = spot_resolution
self.sun_resolution = sun_resolution
self.point_resolution = point_resolution
needs_setup = True
spot_count = len([l for l in scene.lights if l.type == LIGHT_SPOT])
if spot_count > self.max_spots:
self.max_spots = spot_count
needs_setup = True
sun_count = len([l for l in scene.lights if l.type == LIGHT_SUN])
sun_count = sun_count * sun_cascades
if sun_count > self.max_suns:
self.max_suns = sun_count
needs_setup = True
point_count = len([l for l in scene.lights if l.type == LIGHT_POINT])
if point_count > self.max_points:
self.max_points = point_count
needs_setup = True
if needs_setup:
self.setup()
self.clear(spot_count, sun_count, point_count)
def setup(self, create_fbos=True):
self.spot_depth_t = TextureArray((self.spot_resolution, self.spot_resolution), self.max_spots, GL_DEPTH_COMPONENT32F)
self.sun_depth_t = TextureArray((self.sun_resolution, self.sun_resolution), self.max_suns, GL_DEPTH_COMPONENT32F)
self.point_depth_t = CubeMapArray((self.point_resolution, self.point_resolution), self.max_points, GL_DEPTH_COMPONENT32F)
if create_fbos:
self.spot_fbos = []
for i in range(self.spot_depth_t.length):
self.spot_fbos.append(RenderTarget([], ArrayLayerTarget(self.spot_depth_t, i)))
self.sun_fbos = []
for i in range(self.sun_depth_t.length):
self.sun_fbos.append(RenderTarget([], ArrayLayerTarget(self.sun_depth_t, i)))
self.point_fbos = []
for i in range(self.point_depth_t.length*6):
self.point_fbos.append(RenderTarget([], ArrayLayerTarget(self.point_depth_t, i)))
def clear(self, spot_count, sun_count, point_count):
for i in range(spot_count):
self.spot_fbos[i].clear(depth=1)
for i in range(sun_count):
self.sun_fbos[i].clear(depth=1)
for i in range(point_count*6):
self.point_fbos[i].clear(depth=1)
def shader_callback(self, shader):
shader.textures['SHADOWMAPS_DEPTH_SPOT'] = self.spot_depth_t
shader.textures['SHADOWMAPS_DEPTH_SUN'] = self.sun_depth_t
shader.textures['SHADOWMAPS_DEPTH_POINT'] = self.point_depth_t
class LightsBuffer(object):
def __init__(self):
self.data = C_LightsBuffer()
self.UBO = UBO()
self.spots = None
self.suns = None
self.points = None
def load(self, scene, cascades_count, cascades_distribution_scalar, cascades_max_distance=1.0):
#TODO: Automatic distribution exponent basedd on FOV
spot_count=0
sun_count=0
point_count=0
from collections import OrderedDict
self.spots = OrderedDict()
self.suns = OrderedDict()
self.points = OrderedDict()
for i, light in enumerate(scene.lights):
self.data.lights[i].color = light.color
self.data.lights[i].type = light.type
self.data.lights[i].position = light.position
self.data.lights[i].radius = light.radius
self.data.lights[i].direction = light.direction
self.data.lights[i].spot_angle = light.spot_angle
self.data.lights[i].spot_blend = light.spot_blend
if light.type == LIGHT_SPOT:
self.data.lights[i].type_index = spot_count
projection_matrix = make_projection_matrix(light.spot_angle,1,0.01,light.radius)
spot_matrix = projection_matrix * pyrr.Matrix44(light.matrix)
self.data.spot_matrices[spot_count] = flatten_matrix(spot_matrix)
self.spots[light] = [(light.matrix, flatten_matrix(projection_matrix))]
spot_count+=1
if light.type == LIGHT_SUN:
self.data.lights[i].type_index = sun_count
sun_matrix = pyrr.Matrix44(light.matrix)
projection_matrix = pyrr.Matrix44(scene.camera.projection_matrix)
view_matrix = projection_matrix * pyrr.Matrix44(scene.camera.camera_matrix)
cascades_matrices = get_sun_cascades(sun_matrix, projection_matrix, view_matrix, cascades_count, cascades_distribution_scalar, cascades_max_distance)
self.suns[light] = []
for i, cascade in enumerate(cascades_matrices):
cascade = flatten_matrix(cascade)
self.data.sun_matrices[sun_count * cascades_count + i] = cascade
self.suns[light].append((cascade, flatten_matrix(pyrr.Matrix44.identity())))
sun_count+=1
if light.type == LIGHT_POINT:
self.data.lights[i].type_index = point_count
cube_map_axes = [
(( 1, 0, 0),( 0,-1, 0)),
((-1, 0, 0),( 0,-1, 0)),
(( 0, 1, 0),( 0, 0, 1)),
(( 0,-1, 0),( 0, 0,-1)),
(( 0, 0, 1),( 0,-1, 0)),
(( 0, 0,-1),( 0,-1, 0))
]
matrices = []
for axes in cube_map_axes:
position = pyrr.Vector3(light.position)
front = pyrr.Vector3(axes[0])
up = pyrr.Vector3(axes[1])
matrices.append(pyrr.Matrix44.look_at(position, position + front, up))
projection_matrix = make_projection_matrix(math.pi / 2.0, 1.0, 0.01, light.radius)
self.points[light] = []
for i in range(6):
self.points[light].append((flatten_matrix(matrices[i]), flatten_matrix(projection_matrix)))
point_count+=1
self.data.lights_count = len(scene.lights)
self.data.cascades_count = cascades_count
self.UBO.load_data(self.data)
def bind(self, location):
self.UBO.bind(location)
def flatten_matrix(matrix):
return (ctypes.c_float * 16)(*[e for v in matrix for e in v])
#TODO: Hard-coded for Blender conventions for now
def make_projection_matrix(fov, aspect_ratio, near, far):
x_scale = 1.0 / math.tan(fov / 2.0)
y_scale = x_scale * aspect_ratio
return pyrr.Matrix44([
x_scale, 0, 0, 0,
0, y_scale, 0, 0,
0, 0, (-(far + near)) / (far - near), -1,
0, 0, (-2.0 * far * near) / (far - near), 0
])
def get_sun_cascades(sun_from_world_matrix, projection_matrix, view_from_world_matrix, cascades_count, cascades_distribution_scalar, cascades_max_distance):
cascades = []
splits = []
n,f = 0,0
if projection_matrix[3][3] == 1.0:
# ortho
n = cascades_max_distance / 2
f = -cascades_max_distance / 2
else:
# perspective
clip_start = projection_matrix.inverse * pyrr.Vector4([0,0,-1,1])
clip_start /= clip_start.w
n = clip_start.z
f = -cascades_max_distance
def lerp(a,b,f):
f = max(0,min(f,1))
return a * (1.0 - f) + b * f
for i in range(cascades_count+1):
split_log = n * pow(f/n, i/cascades_count)
split_uniform = n + (f-n) * (i/cascades_count)
split = lerp(split_uniform, split_log, cascades_distribution_scalar)
projected = projection_matrix * pyrr.Vector4([0,0,split,1])
projected = (projected / projected.w) * (1.0 if projected.w >= 0 else -1.0)
splits.append(projected.z)
for i in range(1, len(splits)):
near = splits[i-1]
far = splits[i]
cascades.append(sun_shadowmap_matrix(sun_from_world_matrix, view_from_world_matrix, near, far))
return cascades
def frustum_corners(view_from_world_matrix, near, far):
m = view_from_world_matrix.inverse
corners = []
for x in (-1, 1):
for y in (-1, 1):
for z in (near, far):
v = pyrr.Vector4([x, y, z, 1])
v = m * v
v /= v.w
corners.append(v)
return corners
def sun_shadowmap_matrix(sun_from_world_matrix, view_from_world_matrix, near, far):
INFINITY = float('inf')
aabb = {
'min': pyrr.Vector3([ INFINITY, INFINITY, INFINITY]),
'max': pyrr.Vector3([-INFINITY, -INFINITY, -INFINITY])
}
for corner in frustum_corners(view_from_world_matrix, near, far):
corner = sun_from_world_matrix * corner
aabb['min'].x = min(aabb['min'].x, corner.x)
aabb['min'].y = min(aabb['min'].y, corner.y)
aabb['min'].z = min(aabb['min'].z, corner.z)
aabb['max'].x = max(aabb['max'].x, corner.x)
aabb['max'].y = max(aabb['max'].y, corner.y)
aabb['max'].z = max(aabb['max'].z, corner.z)
world_from_light_space = sun_from_world_matrix.inverse
size = aabb['max'] - aabb['min']
aabb['min'] = world_from_light_space * pyrr.Vector4([*aabb['min'].tolist(), 1.0])
aabb['max'] = world_from_light_space * pyrr.Vector4([*aabb['max'].tolist(), 1.0])
center = (aabb['min'] + aabb['max']) / 2.0
center = pyrr.Vector3(center.tolist()[:3])
scale = pyrr.Matrix44.from_scale(size)
translate = pyrr.Matrix44.from_translation(center)
matrix = translate * world_from_light_space * scale
screen = pyrr.Matrix44([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0,-1, 0,
0, 0, 0, 1
])
return screen * matrix.inverse
|
[
"pyrr.Matrix44",
"pyrr.Vector3",
"pyrr.Matrix44.look_at",
"pyrr.Matrix44.identity",
"math.tan",
"Malt.GL.Shader.UBO",
"Malt.GL.Texture.TextureArray",
"Malt.GL.RenderTarget.ArrayLayerTarget",
"pyrr.Vector4",
"pyrr.Matrix44.from_translation",
"collections.OrderedDict",
"pyrr.Matrix44.from_scale",
"Malt.GL.Texture.CubeMapArray"
] |
[((9068, 9207), 'pyrr.Matrix44', 'pyrr.Matrix44', (['[x_scale, 0, 0, 0, 0, y_scale, 0, 0, 0, 0, -(far + near) / (far - near), -1,\n 0, 0, -2.0 * far * near / (far - near), 0]'], {}), '([x_scale, 0, 0, 0, 0, y_scale, 0, 0, 0, 0, -(far + near) / (\n far - near), -1, 0, 0, -2.0 * far * near / (far - near), 0])\n', (9081, 9207), False, 'import pyrr\n'), ((11943, 11973), 'pyrr.Matrix44.from_scale', 'pyrr.Matrix44.from_scale', (['size'], {}), '(size)\n', (11967, 11973), False, 'import pyrr\n'), ((11990, 12028), 'pyrr.Matrix44.from_translation', 'pyrr.Matrix44.from_translation', (['center'], {}), '(center)\n', (12020, 12028), False, 'import pyrr\n'), ((12104, 12168), 'pyrr.Matrix44', 'pyrr.Matrix44', (['[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1]'], {}), '([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1])\n', (12117, 12168), False, 'import pyrr\n'), ((3490, 3591), 'Malt.GL.Texture.TextureArray', 'TextureArray', (['(self.spot_resolution, self.spot_resolution)', 'self.max_spots', 'GL_DEPTH_COMPONENT32F'], {}), '((self.spot_resolution, self.spot_resolution), self.max_spots,\n GL_DEPTH_COMPONENT32F)\n', (3502, 3591), False, 'from Malt.GL.Texture import TextureArray, CubeMapArray\n'), ((3615, 3713), 'Malt.GL.Texture.TextureArray', 'TextureArray', (['(self.sun_resolution, self.sun_resolution)', 'self.max_suns', 'GL_DEPTH_COMPONENT32F'], {}), '((self.sun_resolution, self.sun_resolution), self.max_suns,\n GL_DEPTH_COMPONENT32F)\n', (3627, 3713), False, 'from Malt.GL.Texture import TextureArray, CubeMapArray\n'), ((3739, 3844), 'Malt.GL.Texture.CubeMapArray', 'CubeMapArray', (['(self.point_resolution, self.point_resolution)', 'self.max_points', 'GL_DEPTH_COMPONENT32F'], {}), '((self.point_resolution, self.point_resolution), self.\n max_points, GL_DEPTH_COMPONENT32F)\n', (3751, 3844), False, 'from Malt.GL.Texture import TextureArray, CubeMapArray\n'), ((5124, 5129), 'Malt.GL.Shader.UBO', 'UBO', ([], {}), '()\n', (5127, 5129), False, 'from Malt.GL.Shader import UBO\n'), ((5505, 5518), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5516, 5518), False, 'from collections import OrderedDict\n'), ((5539, 5552), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5550, 5552), False, 'from collections import OrderedDict\n'), ((5575, 5588), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5586, 5588), False, 'from collections import OrderedDict\n'), ((9000, 9019), 'math.tan', 'math.tan', (['(fov / 2.0)'], {}), '(fov / 2.0)\n', (9008, 9019), False, 'import math\n'), ((11007, 11051), 'pyrr.Vector3', 'pyrr.Vector3', (['[INFINITY, INFINITY, INFINITY]'], {}), '([INFINITY, INFINITY, INFINITY])\n', (11019, 11051), False, 'import pyrr\n'), ((11071, 11118), 'pyrr.Vector3', 'pyrr.Vector3', (['[-INFINITY, -INFINITY, -INFINITY]'], {}), '([-INFINITY, -INFINITY, -INFINITY])\n', (11083, 11118), False, 'import pyrr\n'), ((9670, 9697), 'pyrr.Vector4', 'pyrr.Vector4', (['[0, 0, -1, 1]'], {}), '([0, 0, -1, 1])\n', (9682, 9697), False, 'import pyrr\n'), ((10140, 10170), 'pyrr.Vector4', 'pyrr.Vector4', (['[0, 0, split, 1]'], {}), '([0, 0, split, 1])\n', (10152, 10170), False, 'import pyrr\n'), ((6676, 6703), 'pyrr.Matrix44', 'pyrr.Matrix44', (['light.matrix'], {}), '(light.matrix)\n', (6689, 6703), False, 'import pyrr\n'), ((6740, 6785), 'pyrr.Matrix44', 'pyrr.Matrix44', (['scene.camera.projection_matrix'], {}), '(scene.camera.projection_matrix)\n', (6753, 6785), False, 'import pyrr\n'), ((10729, 10755), 'pyrr.Vector4', 'pyrr.Vector4', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (10741, 10755), False, 'import pyrr\n'), ((6287, 6314), 'pyrr.Matrix44', 'pyrr.Matrix44', (['light.matrix'], {}), '(light.matrix)\n', (6300, 6314), False, 'import pyrr\n'), ((6836, 6877), 'pyrr.Matrix44', 'pyrr.Matrix44', (['scene.camera.camera_matrix'], {}), '(scene.camera.camera_matrix)\n', (6849, 6877), False, 'import pyrr\n'), ((7993, 8021), 'pyrr.Vector3', 'pyrr.Vector3', (['light.position'], {}), '(light.position)\n', (8005, 8021), False, 'import pyrr\n'), ((8050, 8071), 'pyrr.Vector3', 'pyrr.Vector3', (['axes[0]'], {}), '(axes[0])\n', (8062, 8071), False, 'import pyrr\n'), ((8097, 8118), 'pyrr.Vector3', 'pyrr.Vector3', (['axes[1]'], {}), '(axes[1])\n', (8109, 8118), False, 'import pyrr\n'), ((4006, 4044), 'Malt.GL.RenderTarget.ArrayLayerTarget', 'ArrayLayerTarget', (['self.spot_depth_t', 'i'], {}), '(self.spot_depth_t, i)\n', (4022, 4044), False, 'from Malt.GL.RenderTarget import ArrayLayerTarget, RenderTarget\n'), ((4198, 4235), 'Malt.GL.RenderTarget.ArrayLayerTarget', 'ArrayLayerTarget', (['self.sun_depth_t', 'i'], {}), '(self.sun_depth_t, i)\n', (4214, 4235), False, 'from Malt.GL.RenderTarget import ArrayLayerTarget, RenderTarget\n'), ((4405, 4444), 'Malt.GL.RenderTarget.ArrayLayerTarget', 'ArrayLayerTarget', (['self.point_depth_t', 'i'], {}), '(self.point_depth_t, i)\n', (4421, 4444), False, 'from Malt.GL.RenderTarget import ArrayLayerTarget, RenderTarget\n'), ((8155, 8208), 'pyrr.Matrix44.look_at', 'pyrr.Matrix44.look_at', (['position', '(position + front)', 'up'], {}), '(position, position + front, up)\n', (8176, 8208), False, 'import pyrr\n'), ((7393, 7417), 'pyrr.Matrix44.identity', 'pyrr.Matrix44.identity', ([], {}), '()\n', (7415, 7417), False, 'import pyrr\n')]
|
from random import choice
from dataclasses import dataclass
import core.constants as constants
from core.state import State
from pazaak.player import PazaakPlayer, Card
@dataclass
class PazaakState(State):
def get_all_states(self, player: PazaakPlayer):
position = self.board.empty_positions(player)
all_states = []
for card in player.side_deck:
new_state = PazaakState(
board=self.board.move_new(position, player, card=card),
player=player,
players=self.players,
player_index=player.player - 1
)
board_size = len(new_state.board.board)
player_score = self.board._calculate_score(0, board_size // 2)
opponent_score = self.board._calculate_score(board_size // 2, board_size)
if player.player == constants.PLAYER and player_score == constants.END_SCORE:
new_state.player = PazaakPlayer(player=player.player, stand=True, side_deck=player.side_deck)
new_state.players = [new_state.player, self.players[1]]
if player.player == constants.OPPONENT and opponent_score == constants.END_SCORE:
new_state.player = PazaakPlayer(player=player.player, stand=True, side_deck=player.side_deck)
new_state.players = [self.players[0], new_state.player]
all_states.append(new_state)
# stand
new_players = []
new_player = PazaakPlayer(player=player.player, stand=True, side_deck=player.side_deck)
if self.player_index == 0:
new_players = [self.players[0], new_player]
else:
new_players = [new_player, self.players[1]]
all_states.append(
PazaakState(
board=self.board,
player=new_player,
players=new_players,
player_index=player.player - 1
)
)
# end turn without doing anything
all_states.append(
PazaakState(
board=self.board,
player=player,
players=self.players,
player_index=player.player - 1
)
)
return all_states
def random_card(self):
card = Card(score=choice(range(1, 11)))
position = self.board.empty_positions(self.player)
return PazaakState(
board=self.board.move_new(position, self.player, card=card),
players=self.players,
player=self.player,
player_index=self.player_index
)
def random_play(self):
if self.player.stand:
self.player_index = self.player.player - 1
return self
self = self.random_card()
return choice(self.get_all_states(self.player))
|
[
"pazaak.player.PazaakPlayer"
] |
[((1487, 1561), 'pazaak.player.PazaakPlayer', 'PazaakPlayer', ([], {'player': 'player.player', 'stand': '(True)', 'side_deck': 'player.side_deck'}), '(player=player.player, stand=True, side_deck=player.side_deck)\n', (1499, 1561), False, 'from pazaak.player import PazaakPlayer, Card\n'), ((958, 1032), 'pazaak.player.PazaakPlayer', 'PazaakPlayer', ([], {'player': 'player.player', 'stand': '(True)', 'side_deck': 'player.side_deck'}), '(player=player.player, stand=True, side_deck=player.side_deck)\n', (970, 1032), False, 'from pazaak.player import PazaakPlayer, Card\n'), ((1235, 1309), 'pazaak.player.PazaakPlayer', 'PazaakPlayer', ([], {'player': 'player.player', 'stand': '(True)', 'side_deck': 'player.side_deck'}), '(player=player.player, stand=True, side_deck=player.side_deck)\n', (1247, 1309), False, 'from pazaak.player import PazaakPlayer, Card\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Feb 26 15:15:37 2018
@author: <NAME>, <NAME>
"""
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from fnmatch import fnmatch
import sys
import os
import matplotlib.image as mpimg
import scipy
# Make sure that caffe is on the python path:
#set the caffe_FAST path
CAFFE_ROOT = '../path_to/caffe_FAST/'
sys.path.insert(0, CAFFE_ROOT + 'python')
import caffe
caffe.set_mode_gpu()
caffe.set_device(0)
#Set the path of deploy and trained model in the following line:
net = caffe.Net('/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt', '/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel', caffe.TEST)
def get_files_in_dir(DIR, pattern = None):
all_files = []
if os.path.isdir(DIR):
for path, subdirs, files in os.walk(DIR):
for name in files:
if pattern is not None:
if fnmatch(name, pattern):
all_files.append(os.path.join(path, name))
else:
all_files.append(os.path.join(path, name))
else:
print("{} DOES NOT EXIST!!".format(DIR))
return all_files
#
# redo later to properly do using mem_data_layer
#
def segment(in_file,path):
# load image, switch to BGR, subtract mean
im = Image.open(in_file)
#im = im.resize(( 240,320),Image.ANTIALIAS)
in_ = np.array(im, dtype=np.float32)
in_ = in_[:,:,::-1]
#in_ -= np.array((126.8420, 134.2887, 123.7515)) #NTU mean BGR values
in_ -= np.array((103.28, 105.99, 92.54)) #JHMDB mean BGR values
in_ = in_.transpose((2,0,1))
# load net
#net = caffe.Net('fcn-2s-pascal-DEPLOY.prototxt', 'fcn_2s_snapshot_iter_1400000.caffemodel', caffe.TEST)
#NEW NET ARCH
#net = caffe.Net('fcn-16s-pascal-deploy_300.prototxt', 'FCN_16S_snapshot_iter_300000.caffemodel', caffe.TEST)
# shape for input (data blob is N x C x H x W), set data
net.blobs['data'].reshape(1, 3, 240, 320)
net.blobs['data'].data[...] = in_
# run net and take argmax for prediction
net.forward()
out = net.blobs['upscore'].data[0].argmax(axis=0)
for i in range(0, 240):
for j in range(0, 320):
if out[i,j] == 0:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=0
D[1]=0
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 1:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=219
D[1]=112
D[2]=147
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 2:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=32
D[1]=178
D[2]=170
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 3:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=182
D[2]=193
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 4:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=148
D[1]=0
D[2]=211
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 5:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=139
D[1]=0
D[2]=139
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 6:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=46
D[1]=139
D[2]=87
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 7:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=60
D[1]=179
D[2]=113
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 8:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=218
D[1]=112
D[2]=214
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 9:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=186
D[1]=85
D[2]=211
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 10:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=50
D[1]=205
D[2]=50
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 11:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=127
D[1]=255
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 12:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=69
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 13:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=127
D[2]=80
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 14:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=255
D[1]=215
D[2]=0
new_color=tuple(D)
im.putpixel( (j,i), new_color)
if out[i,j] == 15:
new_color = im.getpixel( (j,i))
D=list(new_color)
D[0]=218
D[1]=165
D[2]=32
new_color=tuple(D)
im.putpixel( (j,i), new_color)
SEGMENTED_DIR = path
#Set the path for saving results
ddBase='/path_to_save/test_d/'
#SEGMENTED_DIR=SEGMENTED_DIR
FILE = (in_file.rsplit('/', 1))[1]
FILE = FILE.replace(" ", "")
FILE = (FILE.rsplit(".",1))[0]
FILE1 = (in_file.rsplit('/', 2))[1]
FILE1 = FILE1.replace(" ", "")
FILE2 = (in_file.rsplit('/', 3))[1]
FILE2 = FILE2.replace(" ", "")
FILE3 = (in_file.rsplit('/', 4))[1]
FILE3 = FILE3.replace(" ", "")
Seg_save_Dir=ddBase + "/" + FILE3 + "/" + FILE2+ "/" + FILE1
if not os.path.exists(Seg_save_Dir):
os.makedirs(Seg_save_Dir)
save_file = Seg_save_Dir+ "/" + FILE + ".jpg"
#print "path %s." % (save_file)
#save_file = SEGMENTED_DIR + "/" + FILE + "_seg.png"
#fig = plt.figure()
#a=fig.add_subplot(121,aspect='equal')
#plt.axis('off')
##img = mpimg.imread(im)
#imgplot = plt.imshow(im)
#a=fig.add_subplot(122,aspect='equal')
#plt.axis('off')
#imgplot = plt.imshow(out)
#fig.savefig(save_file)
#plt.close(fig)
#Uncertainty
#scipy.misc.imsave(save_file, out)
scipy.misc.imsave(save_file, im)
#im = im.resize((1242,375),Image.ANTIALIAS)
#save_file = SEGMENTED_DIR + "/" + FILE + "_seg2.png"
#scipy.misc.imsave(save_file, out2)
if __name__ == '__main__':
tt=1
#read lines and remove \n
#lines = [line.rstrip('\r') for line in open('ntu_videos_crossSubject_woMissedSamples_train.txt')]
#print lines
# Open the file for reading.
#set the path of data list. each line in the list is the location of video frames.
with open('/path_to_list/data_list.txt', 'r') as infile:
data = infile.read() # Read the contents of the file into memory.
# Return a list of the lines, breaking at line boundaries.
my_list = data.splitlines()
for path in my_list:
#print path
DATASET_DIR = path
in_files = get_files_in_dir(DATASET_DIR, "*.jpg")
print(tt,"/",len(my_list))
tt=tt+1
#print in_files
for in_f in range(len(in_files)):
segment(in_files[in_f],path)
|
[
"caffe.set_mode_gpu",
"os.makedirs",
"os.path.isdir",
"os.walk",
"os.path.exists",
"sys.path.insert",
"PIL.Image.open",
"caffe.set_device",
"numpy.array",
"scipy.misc.imsave",
"caffe.Net",
"os.path.join",
"fnmatch.fnmatch"
] |
[((386, 427), 'sys.path.insert', 'sys.path.insert', (['(0)', "(CAFFE_ROOT + 'python')"], {}), "(0, CAFFE_ROOT + 'python')\n", (401, 427), False, 'import sys\n'), ((443, 463), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (461, 463), False, 'import caffe\n'), ((464, 483), 'caffe.set_device', 'caffe.set_device', (['(0)'], {}), '(0)\n', (480, 483), False, 'import caffe\n'), ((555, 747), 'caffe.Net', 'caffe.Net', (['"""/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt"""', '"""/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel"""', 'caffe.TEST'], {}), "(\n '/path_to/models_pose/model2_MPII_JHMDB/fcn-8s-pascal-deploy_300.prototxt',\n '/path_to/models_pose/model2_MPII_JHMDB/FCN_8S_snapshot_iter_300000.caffemodel'\n , caffe.TEST)\n", (564, 747), False, 'import caffe\n'), ((809, 827), 'os.path.isdir', 'os.path.isdir', (['DIR'], {}), '(DIR)\n', (822, 827), False, 'import os\n'), ((1372, 1391), 'PIL.Image.open', 'Image.open', (['in_file'], {}), '(in_file)\n', (1382, 1391), False, 'from PIL import Image\n'), ((1450, 1480), 'numpy.array', 'np.array', (['im'], {'dtype': 'np.float32'}), '(im, dtype=np.float32)\n', (1458, 1480), True, 'import numpy as np\n'), ((1595, 1628), 'numpy.array', 'np.array', (['(103.28, 105.99, 92.54)'], {}), '((103.28, 105.99, 92.54))\n', (1603, 1628), True, 'import numpy as np\n'), ((7885, 7917), 'scipy.misc.imsave', 'scipy.misc.imsave', (['save_file', 'im'], {}), '(save_file, im)\n', (7902, 7917), False, 'import scipy\n'), ((865, 877), 'os.walk', 'os.walk', (['DIR'], {}), '(DIR)\n', (872, 877), False, 'import os\n'), ((7299, 7327), 'os.path.exists', 'os.path.exists', (['Seg_save_Dir'], {}), '(Seg_save_Dir)\n', (7313, 7327), False, 'import os\n'), ((7337, 7362), 'os.makedirs', 'os.makedirs', (['Seg_save_Dir'], {}), '(Seg_save_Dir)\n', (7348, 7362), False, 'import os\n'), ((973, 995), 'fnmatch.fnmatch', 'fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (980, 995), False, 'from fnmatch import fnmatch\n'), ((1123, 1147), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1135, 1147), False, 'import os\n'), ((1038, 1062), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1050, 1062), False, 'import os\n')]
|
import telebot
from knapsack import knapsack
token = "YOUR_TELEGRAM_TOKEN_HERE"
bot = telebot.TeleBot(token)
W, val, itens, wt = 0, [], [], []
@bot.message_handler(commands=["knapsack"]) # /knapsack
def ask_itens(message):
global W, val, itens, wt
W, val, itens, wt = 0, [], [], []
chat_id = message.chat.id
bot.send_message(chat_id, "Digite o nome dos itens separados por vírgula")
bot.register_next_step_handler(message, ask_values)
def ask_values(message):
global itens
chat_id = message.chat.id
itens = message.text.split(",")
bot.send_message(chat_id, "Digite os valores dos itens separados por vírgula")
bot.register_next_step_handler(message, ask_weights)
def ask_weights(message):
global val
chat_id = message.chat.id
val = list(map(int, message.text.split(",")))
bot.send_message(chat_id, "Digite os pesos dos itens separados por vírgula.")
bot.register_next_step_handler(message, ask_max_weight)
def ask_max_weight(message):
global wt
chat_id = message.chat.id
wt = list(map(int, message.text.split(",")))
bot.send_message(chat_id, "Digite o peso máximo suportado pela mochila")
bot.register_next_step_handler(message, show_answer)
def show_answer(message):
global W, val, itens, wt
chat_id = message.chat.id
W = int(message.text)
answer = knapsack(W, wt, val, itens, len(val))
backpack = " ".join(answer[0])
total_value = answer[1]
bot.send_message(chat_id, f"Leve na mochilas os seguites itens: {backpack}")
bot.send_message(chat_id, f"Sua mochila terá o valor máximo de {total_value}")
bot.infinity_polling()
|
[
"telebot.TeleBot"
] |
[((91, 113), 'telebot.TeleBot', 'telebot.TeleBot', (['token'], {}), '(token)\n', (106, 113), False, 'import telebot\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetClustersResult',
'AwaitableGetClustersResult',
'get_clusters',
]
@pulumi.output_type
class GetClustersResult:
"""
A collection of values returned by getClusters.
"""
def __init__(__self__, id=None, names=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
"""
Set of EKS clusters names
"""
return pulumi.get(self, "names")
class AwaitableGetClustersResult(GetClustersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetClustersResult(
id=self.id,
names=self.names)
def get_clusters(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetClustersResult:
"""
Retrieve EKS Clusters list
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:eks/getClusters:getClusters', __args__, opts=opts, typ=GetClustersResult).value
return AwaitableGetClustersResult(
id=__ret__.id,
names=__ret__.names)
|
[
"pulumi.get",
"pulumi.runtime.invoke",
"pulumi.set",
"pulumi.InvokeOptions"
] |
[((706, 736), 'pulumi.set', 'pulumi.set', (['__self__', '"""id"""', 'id'], {}), "(__self__, 'id', id)\n", (716, 736), False, 'import pulumi\n'), ((865, 901), 'pulumi.set', 'pulumi.set', (['__self__', '"""names"""', 'names'], {}), "(__self__, 'names', names)\n", (875, 901), False, 'import pulumi\n'), ((1067, 1089), 'pulumi.get', 'pulumi.get', (['self', '"""id"""'], {}), "(self, 'id')\n", (1077, 1089), False, 'import pulumi\n'), ((1235, 1260), 'pulumi.get', 'pulumi.get', (['self', '"""names"""'], {}), "(self, 'names')\n", (1245, 1260), False, 'import pulumi\n'), ((1712, 1734), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ([], {}), '()\n', (1732, 1734), False, 'import pulumi\n'), ((1826, 1931), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (['"""aws:eks/getClusters:getClusters"""', '__args__'], {'opts': 'opts', 'typ': 'GetClustersResult'}), "('aws:eks/getClusters:getClusters', __args__, opts=\n opts, typ=GetClustersResult)\n", (1847, 1931), False, 'import pulumi\n')]
|
import numpy as np
from py_vbc.constants import *
from py_vbc.interpolations import interpolate_tf
def sigma(k, tf_spline, R=8.0/hconst):
"""Integrand to calculate the mass fluctuations in a sphere of radius
R, up to some constant of proportionality C, using transfer
functions. Uses the fact that
sigma^2 = int dk/k Delta^2(k) w(k)
and
Delta^2(k) = C k^(3+ns) T^2(k)
w(x) is the window function, defined as the Fourier transform of a
real-space top hat function.
:param k: should be in units of Mpc^-1
:param Tk: value of total matter transfer function at k and at z=0
:param R: radius of sphere to calculate fluctuations, for sigma_8 this is
8 h^-1 Mpc
:returns: dsigma^2/C where C is a normalization constant
:rtype: float
"""
def w(x):
return (3/x**3)*(np.sin(x) - x*np.cos(x))
x = k*R
Tk = tf_spline(k)
return k**(2+ns) * Tk**2 * w(x)**2
def calc_norm():
"""This calculates the value of the normalization constant, with
respect to sigma_8. The idea is that we already calculated
sigma_8/C from the transfer functions, so by dividing the
(specified) value of sigma_8 (at z=0) by our calculated sigma_8/C
we get sqrt(C), which we can use to go from transfer functions to
power spectra.
:returns: normalisation constant
:rtype: float
"""
from scipy.integrate import quad
tf0_spline = interpolate_tf(flag='t', z=0)
# Need to check limits on the spline
kmin = np.min(tf0_spline.x)
kmax = np.max(tf0_spline.x)
# Sigma is highly oscillatory above k ~ 5 Mpc^-1, so best to split
# the integral into two parts to improve the convergence --
# ideally kmid would be dynamically defined but 10 Mpc^-1 seems to
# work
kmid = 10.0
# Arguments for quad
epsrel = 1.0e-6
limit = int(1e6)
sigma_8c1 = np.sqrt(quad(lambda k: sigma(k, tf0_spline), kmin, kmid, limit=limit, epsrel=epsrel)[0])
sigma_8c2 = np.sqrt(quad(lambda k: sigma(k, tf0_spline), kmid, kmax, limit=limit, epsrel=epsrel)[0])
sigma_8c = sigma_8c1 + sigma_8c2
return sigma_8/sigma_8c
def calc_power_spec(k, g, zstart):
"""Calculates the power spectra at z=zinit. First evolves the z=1000
transfer functions forward using the linear growth factors
calculated earlier, then converts to power spectra by using the
normalization constant and
P(k) propto T(k)^2 k^ns
where ns is the tilt of the power spectrum.
:param k: (array) k-values for which the growth factors were calculated
:param g: (array) growth factors as produced by calc_derivs(), where the
first column is for CDM perturbations and the third column
is the baryon perturbations
:returns: the CDM and baryon power spectra
:rtype: arrays
"""
tf_b_spline = interpolate_tf('b', zstart)
tf_c_spline = interpolate_tf('c', zstart)
tf_b = tf_b_spline(k)
tf_c = tf_c_spline(k)
# In CICsASS the CDM TFs are used to calculate all of the power
# spectra -- I'm reproducing that here, but I'm not entirely
# convinced that's correct...
tf_spline = interpolate_tf('t', zstart)
tf = tf_spline(k)
# tf_c = tf
tf_b = tf_c
norm = calc_norm()
p_c = 2*np.pi**2 * norm**2 * g[:, 0]**2 * tf_c**2 * k**ns
p_b = 2*np.pi**2 * norm**2 * g[:, 2]**2 * tf_b**2 * k**ns
return p_c, p_b
def calc_delta(k, p):
"""Calculates the dimensionless power spectrum Delta^2 given a power
spectrum P(k)
:param k: k values of P(k)
:param p: power spectrum
:returns: Delta^2(k)
:rtype: array
"""
return p*k**3/(2*np.pi**2)
|
[
"py_vbc.interpolations.interpolate_tf",
"numpy.max",
"numpy.sin",
"numpy.min",
"numpy.cos"
] |
[((1468, 1497), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', ([], {'flag': '"""t"""', 'z': '(0)'}), "(flag='t', z=0)\n", (1482, 1497), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((1551, 1571), 'numpy.min', 'np.min', (['tf0_spline.x'], {}), '(tf0_spline.x)\n', (1557, 1571), True, 'import numpy as np\n'), ((1583, 1603), 'numpy.max', 'np.max', (['tf0_spline.x'], {}), '(tf0_spline.x)\n', (1589, 1603), True, 'import numpy as np\n'), ((2925, 2952), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""b"""', 'zstart'], {}), "('b', zstart)\n", (2939, 2952), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((2971, 2998), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""c"""', 'zstart'], {}), "('c', zstart)\n", (2985, 2998), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((3236, 3263), 'py_vbc.interpolations.interpolate_tf', 'interpolate_tf', (['"""t"""', 'zstart'], {}), "('t', zstart)\n", (3250, 3263), False, 'from py_vbc.interpolations import interpolate_tf\n'), ((868, 877), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (874, 877), True, 'import numpy as np\n'), ((882, 891), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (888, 891), True, 'import numpy as np\n')]
|
# Generated by Django 3.2.5 on 2021-07-19 07:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_alter_post_url'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='pub_date',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((224, 282), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""post"""', 'name': '"""pub_date"""'}), "(model_name='post', name='pub_date')\n", (246, 282), False, 'from django.db import migrations\n')]
|
from django.core.management.base import BaseCommand, CommandError
from grade.views import gerar_grade
class Command(BaseCommand):
args = 'no args can be provided'
help = 'Generate grids from data dump'
def handle(self, *args, **options):
self.stdout.write('Started generating grids.')
gerar_grade({})
self.stdout.write('Successfully generated grids.')
|
[
"grade.views.gerar_grade"
] |
[((315, 330), 'grade.views.gerar_grade', 'gerar_grade', (['{}'], {}), '({})\n', (326, 330), False, 'from grade.views import gerar_grade\n')]
|
"""REST decorators"""
import logging
from decorator import decorator
from pylons.controllers.util import abort
from pylons.decorators.util import get_pylons
__all__ = ['dispatch_on', 'restrict']
log = logging.getLogger(__name__)
def restrict(*methods):
"""Restricts access to the function depending on HTTP method
Example:
.. code-block:: python
from pylons.decorators import rest
class SomeController(BaseController):
@rest.restrict('GET')
def comment(self, id):
"""
def check_methods(func, *args, **kwargs):
"""Wrapper for restrict"""
if get_pylons(args).request.method not in methods:
log.debug("Method not allowed by restrict")
abort(405, headers=[('Allow', ','.join(methods))])
return func(*args, **kwargs)
return decorator(check_methods)
def dispatch_on(**method_map):
"""Dispatches to alternate controller methods based on HTTP method
Multiple keyword arguments should be passed, with the keyword
corresponding to the HTTP method to dispatch on (DELETE, POST, GET,
etc.) and the value being the function to call. The value should be
a string indicating the name of the function to dispatch to.
Example:
.. code-block:: python
from pylons.decorators import rest
class SomeController(BaseController):
@rest.dispatch_on(POST='create_comment')
def comment(self):
# Do something with the comment
def create_comment(self, id):
# Do something if its a post to comment
"""
def dispatcher(func, self, *args, **kwargs):
"""Wrapper for dispatch_on"""
alt_method = method_map.get(get_pylons(args).request.method)
if alt_method:
alt_method = getattr(self, alt_method)
log.debug("Dispatching to %s instead", alt_method)
return self._inspect_call(alt_method, **kwargs)
return func(self, *args, **kwargs)
return decorator(dispatcher)
|
[
"decorator.decorator",
"pylons.decorators.util.get_pylons",
"logging.getLogger"
] |
[((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((843, 867), 'decorator.decorator', 'decorator', (['check_methods'], {}), '(check_methods)\n', (852, 867), False, 'from decorator import decorator\n'), ((2029, 2050), 'decorator.decorator', 'decorator', (['dispatcher'], {}), '(dispatcher)\n', (2038, 2050), False, 'from decorator import decorator\n'), ((628, 644), 'pylons.decorators.util.get_pylons', 'get_pylons', (['args'], {}), '(args)\n', (638, 644), False, 'from pylons.decorators.util import get_pylons\n'), ((1745, 1761), 'pylons.decorators.util.get_pylons', 'get_pylons', (['args'], {}), '(args)\n', (1755, 1761), False, 'from pylons.decorators.util import get_pylons\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'SecretRotationRotationRulesArgs',
'SecretRotationRulesArgs',
]
@pulumi.input_type
class SecretRotationRotationRulesArgs:
def __init__(__self__, *,
automatically_after_days: pulumi.Input[float]):
"""
:param pulumi.Input[float] automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> pulumi.Input[float]:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@automatically_after_days.setter
def automatically_after_days(self, value: pulumi.Input[float]):
pulumi.set(self, "automatically_after_days", value)
@pulumi.input_type
class SecretRotationRulesArgs:
def __init__(__self__, *,
automatically_after_days: pulumi.Input[float]):
"""
:param pulumi.Input[float] automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""
pulumi.set(__self__, "automatically_after_days", automatically_after_days)
@property
@pulumi.getter(name="automaticallyAfterDays")
def automatically_after_days(self) -> pulumi.Input[float]:
"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""
return pulumi.get(self, "automatically_after_days")
@automatically_after_days.setter
def automatically_after_days(self, value: pulumi.Input[float]):
pulumi.set(self, "automatically_after_days", value)
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set"
] |
[((849, 893), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""automaticallyAfterDays"""'}), "(name='automaticallyAfterDays')\n", (862, 893), False, 'import pulumi\n'), ((1714, 1758), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""automaticallyAfterDays"""'}), "(name='automaticallyAfterDays')\n", (1727, 1758), False, 'import pulumi\n'), ((754, 828), 'pulumi.set', 'pulumi.set', (['__self__', '"""automatically_after_days"""', 'automatically_after_days'], {}), "(__self__, 'automatically_after_days', automatically_after_days)\n", (764, 828), False, 'import pulumi\n'), ((1086, 1130), 'pulumi.get', 'pulumi.get', (['self', '"""automatically_after_days"""'], {}), "(self, 'automatically_after_days')\n", (1096, 1130), False, 'import pulumi\n'), ((1245, 1296), 'pulumi.set', 'pulumi.set', (['self', '"""automatically_after_days"""', 'value'], {}), "(self, 'automatically_after_days', value)\n", (1255, 1296), False, 'import pulumi\n'), ((1619, 1693), 'pulumi.set', 'pulumi.set', (['__self__', '"""automatically_after_days"""', 'automatically_after_days'], {}), "(__self__, 'automatically_after_days', automatically_after_days)\n", (1629, 1693), False, 'import pulumi\n'), ((1951, 1995), 'pulumi.get', 'pulumi.get', (['self', '"""automatically_after_days"""'], {}), "(self, 'automatically_after_days')\n", (1961, 1995), False, 'import pulumi\n'), ((2110, 2161), 'pulumi.set', 'pulumi.set', (['self', '"""automatically_after_days"""', 'value'], {}), "(self, 'automatically_after_days', value)\n", (2120, 2161), False, 'import pulumi\n')]
|
import os
import torch
import torch.nn.functional as F
import numpy as np
from collections import namedtuple
import time
import matplotlib.pyplot as plt
# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def LpNormalize_cnn(input, p=2, cp=1, eps=1e-6):
r'''Calculate the unit vector on Lp sphere
:param input: tensor of weight, dims should be >= 2
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
norm_d = LpNorm_cnn(input, p, cp, eps)
inv_norm_d = 1 / norm_d
if dim == 2:
output = input.mul(inv_norm_d.view(input.size(0), 1))
elif dim == 3:
output = input.mul(inv_norm_d.view(input.size(0), 1, 1))
elif dim == 4:
output = input.mul(inv_norm_d.view(input.size(0), 1, 1, 1))
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output, norm_d
def LpNorm_cnn(input, p=2, cp=1, eps=1e-6):
r'''Calculate the Lp norm of weights
:param input: tensor of weight, dims should be >= 2, and the dim 0 is channels
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
if dim == 2:
norm_d = input.abs().pow(p / cp).sum(1).pow(cp / p).add(eps)
elif dim == 3:
norm_d = input.abs().pow(p / cp).sum(2).sum(1).pow(cp / p).add(eps)
elif dim == 4:
norm_d = input.abs().pow(p / cp).sum(3).sum(2).sum(1).pow(cp / p).add(eps)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return norm_d
def LpNormalize_layer(input, p=2, cp=1, eps=1e-6):
r'''Calculate the unit vector on Lp sphere (layer as a single vector)
:param input: tensor of weight, dims should be >= 2
:param p: the Lp parameter of weight
:param cp: the p power of current input, that means input = c*w^cp
:param eps:
:return: output = input/norm_d, norm_d = norm(input, p/cp)
'''
dim = input.dim()
if dim >= 2 and dim <= 4:
norm_d = input.abs().pow(p/cp).sum().pow(cp/p).add(eps)
inv_norm_d = 1/norm_d
output = input.mul(inv_norm_d)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output, inv_norm_d
def Hoyer_layer_sparsity(input, eps=1e-8):
# Hoyer’s sparsity of a layer's weight
# the average sparsity of weight in a layer
dim = input.dim()
abs_in = input.abs()
d = np.prod(input.size()[1:])
sqrt_d = np.sqrt(d)
if dim == 2:
output = abs_in.sum(1).div(abs_in.pow(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
elif dim == 3:
output = abs_in.sum(2).sum(1).div(abs_in.pow(2).sum(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
elif dim == 4:
output = abs_in.sum(3).sum(2).sum(1).div(abs_in.pow(2).sum(3).sum(2).sum(1).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d).mean(0)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output
def Hoyer_layervec_sparsity(input, eps=1e-8):
# Hoyer’s sparsity of a layer's weight
# the average sparsity of weight in a layer
dim = input.dim()
abs_in = input.abs()
d = np.prod(input.size())
sqrt_d = np.sqrt(d)
if dim >= 2 and dim <= 4:
output = abs_in.div(abs_in.pow(2).pow(0.5).add(eps)).sub(sqrt_d).div(1-sqrt_d)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return output
def Hoyer_net_sparsity(model):
# Hoyer’s sparsity of whole network
# the average sparsity of weight in whole network
weight_list = get_weight(model)
w_sparsity = []
num_w = [] # number of weight in each layer
for name, weight in weight_list:
if weight.dim() < 2:
continue
# sparsity
c_sparse = Hoyer_layer_sparsity(weight.data).item()
w_sparsity.append(c_sparse)
num_w.append(np.prod(weight.data.size()))
return np.average(w_sparsity, weights=num_w)
def Hoyer_net_ll_sparsity(model):
# Hoyer’s sparsity of whole network
# the average sparsity of weight in whole network
weight_list = get_weight(model)
w_sparsity = []
num_w = [] # number of weight in each layer
for name, weight in weight_list:
if weight.dim() < 2:
continue
# sparsity
c_sparse = Hoyer_layervec_sparsity(weight.data).item()
w_sparsity.append(c_sparse)
num_w.append(np.prod(weight.data.size()))
return np.average(w_sparsity, weights=num_w)
def Hoyer_activation_sparsity(input):
# Hoyer’s sparsity of a layer's activation
# the average sparsity of activation in a layer
return Hoyer_layer_sparsity(input)
def sparsify_weight(w, mask, h=0.1, eps=1e-8):
'''Weight sparsification by setting the small element to zero
Args:
w (torch.tensor): the weight for sparsification
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: w (sparse), mask
'''
wa = w.abs()
nmask_f = (~mask).float()
dim = wa.dim()
if dim == 2:
hh = wa.mul(nmask_f).sum(1).div(nmask_f.sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1)
w.masked_fill_(mask, 0)
elif dim == 3:
hh = wa.mul(nmask_f).sum(2).sum(1).div(nmask_f.sum(2).sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1, 1)
w.masked_fill_(mask, 0)
elif dim == 4:
hh = wa.mul(nmask_f).sum(3).sum(2).sum(1).div(nmask_f.sum(3).sum(2).sum(1).add(eps)).mul(h)
mask = wa < hh.view(wa.size(0), 1, 1, 1)
w.masked_fill_(mask, 0)
else:
raise ValueError('Expected dimension of input 2 <= dims <=4, got {}'.format(dim))
return w, mask
def sparsify_weight_ll(w, h=0.1):
'''Weight sparsification by setting the small element to zero
Args:
w (torch.tensor): the weight for sparsification
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: mask
'''
wa = w.abs()
ws = wa.mul(wa)
dim = ws.dim()
if dim >= 2 and dim <= 4:
hh = ws.mean().sqrt().mul(h)
mask = wa < hh
w.masked_fill_(mask, 0)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return w, mask
def sparsify_grad(g, mask, h=0.1, eps=1e-10):
'''grow connection by activate large gradient
Args:
g (torch.tensor): the gradient of weight
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: mask
'''
ga = g.abs()
nmask_f = (~mask).float()
dim = ga.dim()
if dim == 2:
hh = ga.mul(nmask_f).sum(1).div(nmask_f.sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1)) & mask
elif dim == 3:
hh = ga.mul(nmask_f).sum(2).sum(1).div(nmask_f.sum(2).sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1, 1)) & mask
elif dim == 4:
hh = ga.mul(nmask_f).sum(3).sum(2).sum(1).div(nmask_f.sum(3).sum(2).sum(1).add(eps)).mul(h)
mask = (ga < hh.view(ga.size(0), 1, 1, 1)) & mask
else:
raise ValueError('Expected dimension of input 2 <= dims <=4, got {}'.format(dim))
return mask
def sparsify_grad_ll(g, mask, h=0.1, eps=1e-8):
'''grow connection by activate large gradient
Args:
g (torch.tensor): the gradient of weight
mask (torch.tensor): the mask of no activated weight
h (torch.tensor/float, optional): the weight for sparsification (default: 0.1)
:return: output = input(input.abs()<h*input.abs().mean())=0
'''
ga = g.abs()
mask_f = mask.float()
nmask_f = 1 - mask_f
dim = ga.dim()
if dim >= 2 and dim <= 4:
hh = ga.mul(nmask_f).sum().div(nmask_f.sum().add(eps)).mul(h)
mask = (ga < hh) & mask
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return mask
def orthogonalProjection(w, x):
# the projection of x orthogonal to the w
size_w = w.size()
size_x = x.size()
if size_w != size_x:
raise ValueError('Expected size of x should be same as w {}, got {}'.format(size_w, size_x))
dim = w.dim()
if dim == 2:
r = w.mul(x).sum(1)
p = x.sub(w.mul(r.view(size_w[0], 1)))
elif dim == 3:
r = w.mul(x).sum(2).sum(1)
elif dim == 4:
r = w.mul(x).sum(3).sum(2).sum(1)
else:
raise ValueError('Expected input 2 <= dims <=4, got {}'.format(dim))
return r
def get_weight(model):
'''
获得模型的权重列表
:param model:
:return:
'''
weight_list = []
for name, param in model.named_parameters():
if 'weight' in name:
weight = (name, param)
weight_list.append(weight)
return weight_list
def saveLists(lists, textname):
# save the lists to certain text
file = open(textname,'w')
for data in lists:
m = len(data)
for i, p in enumerate(data):
file.write(str(p))
if i<m-1:
file.write(',')
file.write('\n')
file.close()
|
[
"torch.cuda.is_available",
"numpy.average",
"numpy.sqrt"
] |
[((2774, 2784), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (2781, 2784), True, 'import numpy as np\n'), ((3545, 3555), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (3552, 3555), True, 'import numpy as np\n'), ((4278, 4315), 'numpy.average', 'np.average', (['w_sparsity'], {'weights': 'num_w'}), '(w_sparsity, weights=num_w)\n', (4288, 4315), True, 'import numpy as np\n'), ((4820, 4857), 'numpy.average', 'np.average', (['w_sparsity'], {'weights': 'num_w'}), '(w_sparsity, weights=num_w)\n', (4830, 4857), True, 'import numpy as np\n'), ((200, 225), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (223, 225), False, 'import torch\n')]
|
import pytest
from tests.utils import assert_pytest_passes
@pytest.fixture
def basic_case_dir(testdir):
case_dir = testdir.mkdir('case_dir')
case_dir.join('snapshot1.txt').write_text('the valuÉ of snapshot1.txt', 'utf-8')
return case_dir
def test_assert_match_with_external_snapshot_path(testdir, basic_case_dir):
testdir.makepyfile("""
from pathlib import Path
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the value of snapshot1.txt', Path('not_case_dir/snapshot1.txt').absolute())
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
"E* AssertionError: Snapshot path not_case_dir?snapshot1.txt is not in case_dir",
])
assert result.ret == 1
def test_assert_match_success(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the valuÉ of snapshot1.txt', 'snapshot1.txt')
""")
assert_pytest_passes(testdir)
def test_assert_match_failure(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the INCORRECT value of snapshot1.txt', 'snapshot1.txt')
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
">* raise AssertionError(snapshot_diff_msg)",
'E* AssertionError: value does not match the expected value in snapshot case_dir?snapshot1.txt',
"E* assert * == *",
"E* - the valuÉ of snapshot1.txt",
"E* ? ^",
"E* + the INCORRECT value of snapshot1.txt",
"E* ? ++++++++++ ^",
])
assert result.ret == 1
def test_assert_match_invalid_type(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match(b'incorrect typed obj', 'snapshot1.txt')
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
'E* TypeError: value must be str',
])
assert result.ret == 1
def test_assert_match_missing_snapshot(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('something', 'snapshot_that_doesnt_exist.txt')
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
"E* snapshot case_dir?snapshot_that_doesnt_exist.txt doesn't exist. "
"(run pytest with --snapshot-update to create it)",
])
assert result.ret == 1
def test_assert_match_update_existing_snapshot_no_change(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the valuÉ of snapshot1.txt', 'snapshot1.txt')
""")
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth PASSED*',
])
assert result.ret == 0
assert_pytest_passes(testdir) # assert that snapshot update worked
@pytest.mark.parametrize('case_dir_repr',
["'case_dir'",
"str(Path('case_dir').absolute())",
"Path('case_dir')",
"Path('case_dir').absolute()"],
ids=['relative_string_case_dir',
'abs_string_case_dir',
'relative_path_case_dir',
'abs_path_case_dir'])
@pytest.mark.parametrize('snapshot_name_repr',
["'snapshot1.txt'",
"str(Path('case_dir/snapshot1.txt').absolute())",
"Path('case_dir/snapshot1.txt')", # TODO: support this or "Path('snapshot1.txt')"?
"Path('case_dir/snapshot1.txt').absolute()"],
ids=['relative_string_snapshot_name',
'abs_string_snapshot_name',
'relative_path_snapshot_name',
'abs_path_snapshot_name'])
def test_assert_match_update_existing_snapshot(testdir, basic_case_dir, case_dir_repr, snapshot_name_repr):
"""
Tests that `Snapshot.assert_match` works when updating an existing snapshot.
Also tests that `Snapshot` supports absolute/relative str/Path snapshot directories and snapshot paths.
"""
testdir.makepyfile("""
from pathlib import Path
def test_sth(snapshot):
snapshot.snapshot_dir = {case_dir_repr}
snapshot.assert_match('the NEW value of snapshot1.txt', {snapshot_name_repr})
""".format(case_dir_repr=case_dir_repr, snapshot_name_repr=snapshot_name_repr))
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth PASSED*',
'*::test_sth ERROR*',
"E* AssertionError: Snapshot directory was modified: case_dir",
'E* Updated snapshots:',
'E* snapshot1.txt',
])
assert result.ret == 1
assert_pytest_passes(testdir) # assert that snapshot update worked
def test_assert_match_update_existing_snapshot_and_exception_in_test(testdir, basic_case_dir):
"""
Tests that `Snapshot.assert_match` works when updating an existing snapshot and then the test function fails.
In this case, both the snapshot update error and the test function error are printed out.
"""
testdir.makepyfile("""
from pathlib import Path
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the NEW value of snapshot1.txt', 'snapshot1.txt')
assert False
""")
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
'*::test_sth ERROR*',
"E* AssertionError: Snapshot directory was modified: case_dir",
'E* Updated snapshots:',
'E* snapshot1.txt',
'E* assert False',
])
assert result.ret == 1
def test_assert_match_create_new_snapshot(testdir, basic_case_dir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('the NEW value of new_snapshot1.txt', 'sub_dir/new_snapshot1.txt')
""")
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth PASSED*',
'*::test_sth ERROR*',
"E* Snapshot directory was modified: case_dir",
'E* Created snapshots:',
'E* sub_dir?new_snapshot1.txt',
])
assert result.ret == 1
assert_pytest_passes(testdir) # assert that snapshot update worked
def test_assert_match_create_new_snapshot_in_default_dir(testdir):
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.assert_match('the value of new_snapshot1.txt', 'sub_dir/new_snapshot1.txt')
""")
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth PASSED*',
'*::test_sth ERROR*',
"E* Snapshot directory was modified: snapshots?test_assert_match_create_new_snapshot_in_default_dir?test_sth",
'E* Created snapshots:',
'E* sub_dir?new_snapshot1.txt',
])
assert result.ret == 1
assert testdir.tmpdir.join(
'snapshots/test_assert_match_create_new_snapshot_in_default_dir/test_sth/sub_dir/new_snapshot1.txt'
).read_text('utf-8') == 'the value of new_snapshot1.txt'
assert_pytest_passes(testdir) # assert that snapshot update worked
def test_assert_match_existing_snapshot_is_not_file(testdir, basic_case_dir):
basic_case_dir.mkdir('directory1')
testdir.makepyfile("""
def test_sth(snapshot):
snapshot.snapshot_dir = 'case_dir'
snapshot.assert_match('something', 'directory1')
""")
result = testdir.runpytest('-v', '--snapshot-update')
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
"E* AssertionError: snapshot exists but is not a file: case_dir?directory1",
])
assert result.ret == 1
|
[
"pytest.mark.parametrize",
"tests.utils.assert_pytest_passes"
] |
[((3333, 3592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""case_dir_repr"""', '["\'case_dir\'", "str(Path(\'case_dir\').absolute())", "Path(\'case_dir\')",\n "Path(\'case_dir\').absolute()"]'], {'ids': "['relative_string_case_dir', 'abs_string_case_dir',\n 'relative_path_case_dir', 'abs_path_case_dir']"}), '(\'case_dir_repr\', ["\'case_dir\'",\n "str(Path(\'case_dir\').absolute())", "Path(\'case_dir\')",\n "Path(\'case_dir\').absolute()"], ids=[\'relative_string_case_dir\',\n \'abs_string_case_dir\', \'relative_path_case_dir\', \'abs_path_case_dir\'])\n', (3356, 3592), False, 'import pytest\n'), ((3800, 4140), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""snapshot_name_repr"""', '["\'snapshot1.txt\'", "str(Path(\'case_dir/snapshot1.txt\').absolute())",\n "Path(\'case_dir/snapshot1.txt\')",\n "Path(\'case_dir/snapshot1.txt\').absolute()"]'], {'ids': "['relative_string_snapshot_name', 'abs_string_snapshot_name',\n 'relative_path_snapshot_name', 'abs_path_snapshot_name']"}), '(\'snapshot_name_repr\', ["\'snapshot1.txt\'",\n "str(Path(\'case_dir/snapshot1.txt\').absolute())",\n "Path(\'case_dir/snapshot1.txt\')",\n "Path(\'case_dir/snapshot1.txt\').absolute()"], ids=[\n \'relative_string_snapshot_name\', \'abs_string_snapshot_name\',\n \'relative_path_snapshot_name\', \'abs_path_snapshot_name\'])\n', (3823, 4140), False, 'import pytest\n'), ((1075, 1104), 'tests.utils.assert_pytest_passes', 'assert_pytest_passes', (['testdir'], {}), '(testdir)\n', (1095, 1104), False, 'from tests.utils import assert_pytest_passes\n'), ((3262, 3291), 'tests.utils.assert_pytest_passes', 'assert_pytest_passes', (['testdir'], {}), '(testdir)\n', (3282, 3291), False, 'from tests.utils import assert_pytest_passes\n'), ((5352, 5381), 'tests.utils.assert_pytest_passes', 'assert_pytest_passes', (['testdir'], {}), '(testdir)\n', (5372, 5381), False, 'from tests.utils import assert_pytest_passes\n'), ((6966, 6995), 'tests.utils.assert_pytest_passes', 'assert_pytest_passes', (['testdir'], {}), '(testdir)\n', (6986, 6995), False, 'from tests.utils import assert_pytest_passes\n'), ((7859, 7888), 'tests.utils.assert_pytest_passes', 'assert_pytest_passes', (['testdir'], {}), '(testdir)\n', (7879, 7888), False, 'from tests.utils import assert_pytest_passes\n')]
|
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.shortcuts import redirect, render, reverse
from django.views.generic import View
from .filters import PeerRecordFilterSet
from .forms import PeerRecordBulkEditForm
from .http import PeeringDB
from .models import Contact, Network, NetworkIXLAN, PeerRecord
from .tables import PeerRecordTable
from utils.views import BulkEditView
class CacheManagementView(View):
def get(self, request):
if not request.user.is_staff and not request.user.is_superuser:
messages.error(request, "You do not have the rights to index peer records.")
return redirect(reverse("home"))
last_synchronization = PeeringDB().get_last_synchronization()
sync_time = last_synchronization.time if last_synchronization else 0
context = {
"last_sync_time": sync_time,
"peeringdb_contact_count": Contact.objects.count(),
"peeringdb_network_count": Network.objects.count(),
"peeringdb_networkixlan_count": NetworkIXLAN.objects.count(),
"peer_record_count": PeerRecord.objects.count(),
}
return render(request, "peeringdb/cache.html", context)
class PeerRecordBulkEdit(PermissionRequiredMixin, BulkEditView):
permission_required = "peeringdb.change_peerrecord"
queryset = PeerRecord.objects.all()
filter = PeerRecordFilterSet
table = PeerRecordTable
form = PeerRecordBulkEditForm
|
[
"django.shortcuts.render",
"django.contrib.messages.error",
"django.shortcuts.reverse"
] |
[((1207, 1255), 'django.shortcuts.render', 'render', (['request', '"""peeringdb/cache.html"""', 'context'], {}), "(request, 'peeringdb/cache.html', context)\n", (1213, 1255), False, 'from django.shortcuts import redirect, render, reverse\n'), ((587, 663), 'django.contrib.messages.error', 'messages.error', (['request', '"""You do not have the rights to index peer records."""'], {}), "(request, 'You do not have the rights to index peer records.')\n", (601, 663), False, 'from django.contrib import messages\n'), ((692, 707), 'django.shortcuts.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (699, 707), False, 'from django.shortcuts import redirect, render, reverse\n')]
|
"""Plot 1d ovservables"""
from gna.ui import basecmd, append_typed, qualified
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from mpl_tools.helpers import savefig
import numpy as np
from gna.bindings import common
from gna.env import PartNotFoundError, env
class cmd(basecmd):
def __init__(self, *args, **kwargs):
basecmd.__init__(self, *args, **kwargs)
@classmethod
def initparser(cls, parser, env):
def observable(path):
try:
return env.ns('').getobservable(path)
except KeyError:
raise PartNotFoundError("observable", path)
parser.add_argument('data', metavar=('DATA',), type=observable, help='observable to store')
parser.add_argument('output', help='filename')
parser.add_argument('--header', default='', help='Header')
def run(self):
data = self.opts.data.data()
dt = self.opts.data.datatype()
header = self.opts.header
if dt.isHist():
if dt.shape.size()!=1:
raise Exception('2d histograms not yet implemented')
edges = np.array(dt.edges)
edges_left, edges_right = edges[:-1], edges[1:]
dump = edges_left, edges_right, data
if not header:
header = 'bin_left bin_right data'
elif dt.isPoints():
dump = data,
if not header:
header = 'data'
else:
raise Exception('DataType is undefined')
dump = np.array(dump).T
try:
np.savetxt(self.opts.output, dump, header=header)
except:
raise Exception('Unable to write data to: '+self.opts.output)
print(('Dump data to: '+self.opts.output))
|
[
"gna.ui.basecmd.__init__",
"numpy.savetxt",
"gna.env.PartNotFoundError",
"numpy.array",
"gna.env.env.ns"
] |
[((383, 422), 'gna.ui.basecmd.__init__', 'basecmd.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (399, 422), False, 'from gna.ui import basecmd, append_typed, qualified\n'), ((1174, 1192), 'numpy.array', 'np.array', (['dt.edges'], {}), '(dt.edges)\n', (1182, 1192), True, 'import numpy as np\n'), ((1577, 1591), 'numpy.array', 'np.array', (['dump'], {}), '(dump)\n', (1585, 1591), True, 'import numpy as np\n'), ((1620, 1669), 'numpy.savetxt', 'np.savetxt', (['self.opts.output', 'dump'], {'header': 'header'}), '(self.opts.output, dump, header=header)\n', (1630, 1669), True, 'import numpy as np\n'), ((631, 668), 'gna.env.PartNotFoundError', 'PartNotFoundError', (['"""observable"""', 'path'], {}), "('observable', path)\n", (648, 668), False, 'from gna.env import PartNotFoundError, env\n'), ((549, 559), 'gna.env.env.ns', 'env.ns', (['""""""'], {}), "('')\n", (555, 559), False, 'from gna.env import PartNotFoundError, env\n')]
|
import turtle
# set the screen height and width to 100%
# of our screen height and width
turtle.Screen().setup(width=1.0, height=1.0)
# Write hello world using turtle
turtle.write(
"<NAME>",
font=('Verdana', 16, 'italic'),
align='center'
)
# Hide turtle
turtle.hideturtle()
# to keep the screen on in vscode
turtle.Screen().mainloop()
|
[
"turtle.write",
"turtle.hideturtle",
"turtle.Screen"
] |
[((170, 240), 'turtle.write', 'turtle.write', (['"""<NAME>"""'], {'font': "('Verdana', 16, 'italic')", 'align': '"""center"""'}), "('<NAME>', font=('Verdana', 16, 'italic'), align='center')\n", (182, 240), False, 'import turtle\n'), ((272, 291), 'turtle.hideturtle', 'turtle.hideturtle', ([], {}), '()\n', (289, 291), False, 'import turtle\n'), ((91, 106), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (104, 106), False, 'import turtle\n'), ((327, 342), 'turtle.Screen', 'turtle.Screen', ([], {}), '()\n', (340, 342), False, 'import turtle\n')]
|
import torch
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
|
[
"torch.zeros",
"torch.utils.data.DataLoader"
] |
[((116, 195), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(dataset, batch_size=1, shuffle=True, num_workers=2)\n', (143, 195), False, 'import torch\n'), ((216, 230), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (227, 230), False, 'import torch\n'), ((241, 255), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (252, 255), False, 'import torch\n')]
|
import numpy as np
import pandas as pd
from tensorflow.keras import Input
from keras.layers.core import Dropout, Dense
from keras.layers import LSTM, Bidirectional, Concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
from tensorflow.keras.preprocessing.text import Tokenizer
from src.utils import *
from model import (do_padding,get_extra,preprocess_text,convert_cities,convert_countries)
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# Processing extra vars
keyword_bins = pd.read_csv("data/keyword_bins.csv", dtype={"keyword":str,
"keyword_bin":str})
locations = pd.read_csv("data/locations.csv")
def process_extra_vars(df, keyword_bins=keyword_bins, locations=locations):
df_plus = df.merge(keyword_bins, how="left", on = "keyword"). \
merge(locations, how="left", on="location")
df_plus.loc[df_plus["keyword_bin"].isna(), "keyword_bin"] = "missing"
df_plus = convert_cities(df_plus)
df_plus = convert_countries(df_plus)
df_plus = get_extra(df_plus)
dummies = pd.get_dummies(df_plus[["mention", "link", "hashtag",
"city", "country", "keyword_bin"]])
dummy_cols = dummies.columns
return dummies, dummy_cols
train_dummies, train_dummy_cols = process_extra_vars(train)
test_dummies, test_dummy_cols = process_extra_vars(test)
train_dummy_cols.difference(test_dummy_cols)
# Given that these countries don't exist in test, and we're building a new
# model, I'm going to drop these
train_dummies.drop(["country_south africa","country_spain"],axis=1,inplace=True)
# ensuring the same order
test_dummies = test_dummies[train_dummies.columns]
# Processing text
vocab_size = 8000
max_len = 25
"""
Preprocessing Text
"""
# Text
text_train = preprocess_text(train["text"])
text_test = preprocess_text(test["text"])
tokenizer = Tokenizer(num_words = vocab_size, oov_token = "<oov>")
tokenizer.fit_on_texts(text_train)
padded_train, _ = do_padding(text_train, tokenizer, max_len, "post", "post")
padded_test, _ = do_padding(text_test, tokenizer, max_len, "post", "post")
"""
Model
Concatenated tensorflow model
"""
dropout_rate = 0.5
input_1 = Input(shape=(max_len,))
input_2 = Input(shape=(len(train_dummies.columns),))
embedding_layer = Embedding(vocab_size, 36)(input_1)
lstm_1 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(embedding_layer)
lstm_2 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(lstm_1)
lstm_3 = Bidirectional(LSTM(16, dropout=dropout_rate))(lstm_2)
dense_1 = Dense(8, activation="relu")(lstm_3)
dense_2 = Dense(64, activation="relu")(input_2)
dropout_1 = Dropout(dropout_rate)(dense_2)
dense_3 = Dense(32, activation="relu")(dropout_1)
dropout_2 = Dropout(dropout_rate)(dense_3)
dense_4 = Dense(8, activation="relu")(dropout_2)
concat_layer = Concatenate()([dense_1, dense_4])
dropout_3 = Dropout(dropout_rate)(concat_layer)
dense_4 = Dense(20, activation="relu")(dropout_3)
dropout_6 = Dropout(dropout_rate)(dense_4)
output = Dense(1, activation='sigmoid')(dropout_6)
model = Model(inputs=[input_1, input_2], outputs=output)
model.compile(loss='binary_crossentropy',
# optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),
optimizer="adam",
metrics=['accuracy'])
model.summary()
history = model.fit(x=[padded_train, train_dummies], y=train["target"],
epochs=5, verbose=1)
preds = model.predict([padded_test, test_dummies])
preds_target = np.where(preds>0.5, 1, 0).reshape(-1)
submission = pd.DataFrame({"id":test["id"],
"target":preds_target})
submission.to_csv("submission.csv", index=False)
|
[
"pandas.DataFrame",
"model.convert_cities",
"tensorflow.keras.preprocessing.text.Tokenizer",
"keras.layers.embeddings.Embedding",
"keras.layers.core.Dense",
"model.convert_countries",
"pandas.read_csv",
"pandas.get_dummies",
"tensorflow.keras.Input",
"keras.layers.LSTM",
"keras.models.Model",
"keras.layers.Concatenate",
"numpy.where",
"model.preprocess_text",
"keras.layers.core.Dropout",
"model.get_extra",
"model.do_padding"
] |
[((440, 469), 'pandas.read_csv', 'pd.read_csv', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (451, 469), True, 'import pandas as pd\n'), ((477, 505), 'pandas.read_csv', 'pd.read_csv', (['"""data/test.csv"""'], {}), "('data/test.csv')\n", (488, 505), True, 'import pandas as pd\n'), ((546, 631), 'pandas.read_csv', 'pd.read_csv', (['"""data/keyword_bins.csv"""'], {'dtype': "{'keyword': str, 'keyword_bin': str}"}), "('data/keyword_bins.csv', dtype={'keyword': str, 'keyword_bin': str}\n )\n", (557, 631), True, 'import pandas as pd\n'), ((697, 730), 'pandas.read_csv', 'pd.read_csv', (['"""data/locations.csv"""'], {}), "('data/locations.csv')\n", (708, 730), True, 'import pandas as pd\n'), ((1877, 1907), 'model.preprocess_text', 'preprocess_text', (["train['text']"], {}), "(train['text'])\n", (1892, 1907), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1920, 1949), 'model.preprocess_text', 'preprocess_text', (["test['text']"], {}), "(test['text'])\n", (1935, 1949), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1963, 2013), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'vocab_size', 'oov_token': '"""<oov>"""'}), "(num_words=vocab_size, oov_token='<oov>')\n", (1972, 2013), False, 'from tensorflow.keras.preprocessing.text import Tokenizer\n'), ((2072, 2130), 'model.do_padding', 'do_padding', (['text_train', 'tokenizer', 'max_len', '"""post"""', '"""post"""'], {}), "(text_train, tokenizer, max_len, 'post', 'post')\n", (2082, 2130), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((2148, 2205), 'model.do_padding', 'do_padding', (['text_test', 'tokenizer', 'max_len', '"""post"""', '"""post"""'], {}), "(text_test, tokenizer, max_len, 'post', 'post')\n", (2158, 2205), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((2282, 2305), 'tensorflow.keras.Input', 'Input', ([], {'shape': '(max_len,)'}), '(shape=(max_len,))\n', (2287, 2305), False, 'from tensorflow.keras import Input\n'), ((3188, 3236), 'keras.models.Model', 'Model', ([], {'inputs': '[input_1, input_2]', 'outputs': 'output'}), '(inputs=[input_1, input_2], outputs=output)\n', (3193, 3236), False, 'from keras.models import Model\n'), ((3670, 3726), 'pandas.DataFrame', 'pd.DataFrame', (["{'id': test['id'], 'target': preds_target}"], {}), "({'id': test['id'], 'target': preds_target})\n", (3682, 3726), True, 'import pandas as pd\n'), ((1026, 1049), 'model.convert_cities', 'convert_cities', (['df_plus'], {}), '(df_plus)\n', (1040, 1049), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1064, 1090), 'model.convert_countries', 'convert_countries', (['df_plus'], {}), '(df_plus)\n', (1081, 1090), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1105, 1123), 'model.get_extra', 'get_extra', (['df_plus'], {}), '(df_plus)\n', (1114, 1123), False, 'from model import do_padding, get_extra, preprocess_text, convert_cities, convert_countries\n'), ((1143, 1236), 'pandas.get_dummies', 'pd.get_dummies', (["df_plus[['mention', 'link', 'hashtag', 'city', 'country', 'keyword_bin']]"], {}), "(df_plus[['mention', 'link', 'hashtag', 'city', 'country',\n 'keyword_bin']])\n", (1157, 1236), True, 'import pandas as pd\n'), ((2378, 2403), 'keras.layers.embeddings.Embedding', 'Embedding', (['vocab_size', '(36)'], {}), '(vocab_size, 36)\n', (2387, 2403), False, 'from keras.layers.embeddings import Embedding\n'), ((2667, 2694), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (2672, 2694), False, 'from keras.layers.core import Dropout, Dense\n'), ((2714, 2742), 'keras.layers.core.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2719, 2742), False, 'from keras.layers.core import Dropout, Dense\n'), ((2764, 2785), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2771, 2785), False, 'from keras.layers.core import Dropout, Dense\n'), ((2805, 2833), 'keras.layers.core.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (2810, 2833), False, 'from keras.layers.core import Dropout, Dense\n'), ((2857, 2878), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2864, 2878), False, 'from keras.layers.core import Dropout, Dense\n'), ((2898, 2925), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (2903, 2925), False, 'from keras.layers.core import Dropout, Dense\n'), ((2953, 2966), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (2964, 2966), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2999, 3020), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3006, 3020), False, 'from keras.layers.core import Dropout, Dense\n'), ((3045, 3073), 'keras.layers.core.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (3050, 3073), False, 'from keras.layers.core import Dropout, Dense\n'), ((3097, 3118), 'keras.layers.core.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (3104, 3118), False, 'from keras.layers.core import Dropout, Dense\n'), ((3137, 3167), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3142, 3167), False, 'from keras.layers.core import Dropout, Dense\n'), ((2436, 2489), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'return_sequences': '(True)', 'dropout': 'dropout_rate'}), '(16, return_sequences=True, dropout=dropout_rate)\n', (2440, 2489), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2531, 2584), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'return_sequences': '(True)', 'dropout': 'dropout_rate'}), '(16, return_sequences=True, dropout=dropout_rate)\n', (2535, 2584), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((2617, 2647), 'keras.layers.LSTM', 'LSTM', (['(16)'], {'dropout': 'dropout_rate'}), '(16, dropout=dropout_rate)\n', (2621, 2647), False, 'from keras.layers import LSTM, Bidirectional, Concatenate\n'), ((3618, 3645), 'numpy.where', 'np.where', (['(preds > 0.5)', '(1)', '(0)'], {}), '(preds > 0.5, 1, 0)\n', (3626, 3645), True, 'import numpy as np\n')]
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import importlib
from ._format import (
OrdinationFormat, OrdinationDirectoryFormat,
ProcrustesStatisticsFmt, ProcrustesStatisticsDirFmt,
)
from ._type import PCoAResults, ProcrustesStatistics
__all__ = ['OrdinationFormat', 'OrdinationDirectoryFormat',
'ProcrustesStatisticsFmt', 'ProcrustesStatisticsDirFmt',
'PCoAResults', 'ProcrustesStatistics']
importlib.import_module('q2_types.ordination._transformer')
|
[
"importlib.import_module"
] |
[((734, 793), 'importlib.import_module', 'importlib.import_module', (['"""q2_types.ordination._transformer"""'], {}), "('q2_types.ordination._transformer')\n", (757, 793), False, 'import importlib\n')]
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
"""
import os
import re
import sys
import logging
import importlib
import numpy as np
from glumpy import gl
from glumpy.log import log
from glumpy.ext.inputhook import inputhook_manager, stdin_ready
from glumpy.app.window import backends
from . import parser
from . import configuration
from . import clock as _clock
from . clock import Clock
from . console import Console
from . viewport import Viewport
from . window import EventDispatcher
# Default clock
__clock__ = None
# Active windows
__windows__ = []
# Current backend
__backend__ = None
__running__ = False
# --------------------------------------------------------------------- fps ---
def fps():
"""
Get FPS from the default clock.
"""
return __clock__.get_fps()
# --------------------------------------------------------------------- use ---
def use(backend, api=None, major=None, minor=None, profile=None):
""" Select a specific backend
Parameters
----------
backend : ['osxglut', 'freeglut', 'pyglet', 'glfw', 'sdl', 'sdl2', 'pyside']
Graphical toolkit to use
api : ['GL'|'ES']
OpenGL API to use
major : int
OpenGL major version to use
minor : int
OpenGL minor version to use
profile : ['compatibility'|'core']
OpenGL profile to use
Note
----
A shortened version is available with the following syntax:
use("backend (api major.minor profile)")
For example, `use("glfw (GL 3.3 core)")`
"""
global __backend__
config = configuration.get_default()
# Parse options (in backend name, see note above)
exp = """(?P<backend>\w+)?
(.*\(
(.*(?P<api>GL|ES))?
(.*(?P<major>[1234])\.(?P<minor>[012345]))?
(.*(?P<profile>compatibility|core))?.*\))?"""
r = re.search(exp, backend, re.IGNORECASE | re.VERBOSE)
_backend = r.group('backend') or "glfw"
_api = r.group('api') or "GL"
_major = int(r.group('major') or str(config.major_version))
_minor = int(r.group('minor') or str(config.minor_version))
_profile = r.group('profile') or ""
# Arguments take precedence over shortened options
backend = _backend
api = api or _api
major = major or _major
minor = minor or _minor
profile = profile or _profile
config.api = api
config.major_version = major
config.minor_version = minor
config.profile = profile
if backend not in backends.__backends__:
log.critical("Unknown backend (%s)" % backend)
log.critical("Available backends are: %s", str(backends.__backends__))
sys.exit(0)
# BUG: For some reason, the import module changes the working directory
# We save it beforehand and restore it just after
workdir = os.getcwd()
name = "glumpy.app.window.backends.backend_" + backend
importlib.import_module(name)
backend = sys.modules[name]
os.chdir(workdir)
# Check availability
if backend.available():
__backend__ = backend
return backend
else:
log.warning("Backend (%s) not available" % backend)
return None
# ----------------------------------------------------------------- Window ---
class Window(object):
"""
Abstract Window
This class is responsible for finding a suitable backend and parsing
arguments.
"""
def __new__(cls, *args, **kwargs):
global __backend__
all = list(backends.__backends__)
options = parser.get_options()
# No backend was specified
# Check for command line argument then pick a default one if possible
if __backend__ is None:
if options.backend != all[0]:
all = [options.backend,] + all
for name in all:
backend = use(name)
if backend and backend.available():
__backend__ = backend
break
# No backend available, there's nothing we can do
if __backend__ is None:
log.critical("No suitable backend found")
raise NotImplementedError
config = configuration.get_default()
if "config" not in kwargs.keys():
kwargs['config'] = config
if 'vsync' not in kwargs.keys():
kwargs['vsync'] = options.vsync
# Get command line size
# if options.size:
# size = options.size.split(",")
# kwargs['width'] = int(size[0])
# kwargs['height'] = int(size[1])
# else:
# kwargs['width'] = kwargs.get('width', 512)
# kwargs['height'] = kwargs.get('height', 512)
# Get command line position
# if options.position:
# position = options.position.split(",")
# #kwargs['x'] = kwargs.get('x', int(position[0]))
# #kwargs['y'] = kwargs.get('y', int(position[1]))
# else:
# pass
# #kwargs['x'] = kwargs.get('x', 0)
# #kwargs['y'] = kwargs.get('y', 0)
# Create the backend window
window = __backend__.Window(*args, **kwargs)
window._backend = __backend__
window._config = config
log.info("Using %s (%s %d.%d)" %
(__backend__.name(), config.api,
config.major_version, config.minor_version))
if config.samples > 0:
log.info("Using multisampling with %d samples" %
(config.samples))
# Display fps options
if options.display_fps:
@window.timer(1.0)
def timer(elapsed):
print("Estimated FPS: %f"% fps())
return window
# --------------------------------------------------------------- __init__ ---
def __init__(clock=None, framerate=None, backend=None):
""" Initialize the main loop
Parameters
----------
clock : Clock
clock to use to run the app (gives the elementary tick)
framerate : int
frames per second
backend : python module
Backend module
"""
global __clock__
options = parser.get_options()
if options.debug:
log.setLevel(logging.DEBUG)
if framerate is None:
framerate = options.framerate
if framerate > 0:
log.info("Running at %d frames/second" % framerate)
else:
log.info("Running at full speed")
if clock is None:
__clock__ = _clock.get_default()
else:
__clock__ = clock
__clock__.set_fps_limit(framerate)
# OpenGL Initialization
for window in backend.windows():
window.activate()
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
try: # This has been removed in 3.2 (it's now on by default)
gl.glEnable(gl.GL_POINT_SPRITE)
except:
pass
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# Initialize timers for all windows
for window in backend.windows():
window._clock = __clock__
# Start timers
for i in range(len(window._timer_stack)):
handler, interval = window._timer_stack[i]
__clock__.schedule_interval(handler, interval)
# Activate window
window.activate()
# Dispatch init event
window.dispatch_event('on_init')
# Dispatch an initial resize event
window.dispatch_event('on_resize', window._width, window._height)
return __clock__
# -------------------------------------------------------------------- quit ---
def quit():
global __running__
__running__ = False
# count = len(__backend__.windows())
# while count:
# dt = clock.tick()
# window = __backend__.windows()[-1]
# window.close()
# count = __backend__.process(dt)
# --------------------------------------------------------------------- run ---
def run(clock=None, framerate=None, interactive=None,
duration = sys.maxsize, framecount = sys.maxsize):
""" Run the main loop
Parameters
----------
clock : Clock
clock to use to run the app (gives the elementary tick)
framerate : int
frames per second
duration : float
Duration after which the app will be stopped
framecount : int
Number of frame to display before stopping.
"""
global __running__
clock = __init__(clock=clock, framerate=framerate, backend=__backend__)
options = parser.get_options()
if interactive is None:
interactive = options.interactive
if interactive:
# Set interactive python session
os.environ['PYTHONINSPECT'] = '1'
import readline
readline.parse_and_bind("tab: complete")
def run():
while not stdin_ready():
__backend__.process(clock.tick())
return 0
inputhook_manager.set_inputhook(run)
else:
__running__ = True
def run(duration, framecount):
count = len(__backend__.windows())
while count and duration > 0 and framecount > 0 and __running__:
dt = clock.tick()
duration -= dt
framecount -= 1
count = __backend__.process(dt)
if options.record:
from .movie import record
try:
# Check if output file name given
name = sys.argv[2]
except:
# Obtain the name of the script that is being run
name = os.path.basename(sys.argv[0])
# Replace .py extension with .mp4
filename=re.sub('.py$', '.mp4', name)
log.info("Recording movie in '%s'" % filename)
with record(window=__backend__.windows()[0],
filename=filename,
fps=60):
run(duration, framecount)
else:
run(duration, framecount)
|
[
"readline.parse_and_bind",
"glumpy.gl.glEnable",
"glumpy.log.log.setLevel",
"importlib.import_module",
"glumpy.gl.glBlendFunc",
"os.getcwd",
"glumpy.ext.inputhook.inputhook_manager.set_inputhook",
"glumpy.ext.inputhook.stdin_ready",
"os.path.basename",
"glumpy.log.log.critical",
"glumpy.gl.glPixelStorei",
"re.sub",
"glumpy.log.log.warning",
"re.search",
"os.chdir",
"sys.exit",
"glumpy.log.log.info"
] |
[((2079, 2130), 're.search', 're.search', (['exp', 'backend', '(re.IGNORECASE | re.VERBOSE)'], {}), '(exp, backend, re.IGNORECASE | re.VERBOSE)\n', (2088, 2130), False, 'import re\n'), ((3051, 3062), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3060, 3062), False, 'import os\n'), ((3126, 3155), 'importlib.import_module', 'importlib.import_module', (['name'], {}), '(name)\n', (3149, 3155), False, 'import importlib\n'), ((3192, 3209), 'os.chdir', 'os.chdir', (['workdir'], {}), '(workdir)\n', (3200, 3209), False, 'import os\n'), ((2755, 2801), 'glumpy.log.log.critical', 'log.critical', (["('Unknown backend (%s)' % backend)"], {}), "('Unknown backend (%s)' % backend)\n", (2767, 2801), False, 'from glumpy.log import log\n'), ((2889, 2900), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2897, 2900), False, 'import sys\n'), ((3335, 3386), 'glumpy.log.log.warning', 'log.warning', (["('Backend (%s) not available' % backend)"], {}), "('Backend (%s) not available' % backend)\n", (3346, 3386), False, 'from glumpy.log import log\n'), ((6448, 6475), 'glumpy.log.log.setLevel', 'log.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (6460, 6475), False, 'from glumpy.log import log\n'), ((6571, 6622), 'glumpy.log.log.info', 'log.info', (["('Running at %d frames/second' % framerate)"], {}), "('Running at %d frames/second' % framerate)\n", (6579, 6622), False, 'from glumpy.log import log\n'), ((6641, 6674), 'glumpy.log.log.info', 'log.info', (['"""Running at full speed"""'], {}), "('Running at full speed')\n", (6649, 6674), False, 'from glumpy.log import log\n'), ((6915, 6958), 'glumpy.gl.glPixelStorei', 'gl.glPixelStorei', (['gl.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(gl.GL_UNPACK_ALIGNMENT, 1)\n', (6931, 6958), False, 'from glumpy import gl\n'), ((6967, 7008), 'glumpy.gl.glPixelStorei', 'gl.glPixelStorei', (['gl.GL_PACK_ALIGNMENT', '(1)'], {}), '(gl.GL_PACK_ALIGNMENT, 1)\n', (6983, 7008), False, 'from glumpy import gl\n'), ((7017, 7061), 'glumpy.gl.glEnable', 'gl.glEnable', (['gl.GL_VERTEX_PROGRAM_POINT_SIZE'], {}), '(gl.GL_VERTEX_PROGRAM_POINT_SIZE)\n', (7028, 7061), False, 'from glumpy import gl\n'), ((7216, 7240), 'glumpy.gl.glEnable', 'gl.glEnable', (['gl.GL_BLEND'], {}), '(gl.GL_BLEND)\n', (7227, 7240), False, 'from glumpy import gl\n'), ((7249, 7307), 'glumpy.gl.glBlendFunc', 'gl.glBlendFunc', (['gl.GL_SRC_ALPHA', 'gl.GL_ONE_MINUS_SRC_ALPHA'], {}), '(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n', (7263, 7307), False, 'from glumpy import gl\n'), ((9097, 9137), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: complete"""'], {}), "('tab: complete')\n", (9120, 9137), False, 'import readline\n'), ((9274, 9310), 'glumpy.ext.inputhook.inputhook_manager.set_inputhook', 'inputhook_manager.set_inputhook', (['run'], {}), '(run)\n', (9305, 9310), False, 'from glumpy.ext.inputhook import inputhook_manager, stdin_ready\n'), ((5681, 5745), 'glumpy.log.log.info', 'log.info', (["('Using multisampling with %d samples' % config.samples)"], {}), "('Using multisampling with %d samples' % config.samples)\n", (5689, 5745), False, 'from glumpy.log import log\n'), ((7143, 7174), 'glumpy.gl.glEnable', 'gl.glEnable', (['gl.GL_POINT_SPRITE'], {}), '(gl.GL_POINT_SPRITE)\n', (7154, 7174), False, 'from glumpy import gl\n'), ((10032, 10060), 're.sub', 're.sub', (['""".py$"""', '""".mp4"""', 'name'], {}), "('.py$', '.mp4', name)\n", (10038, 10060), False, 'import re\n'), ((10073, 10119), 'glumpy.log.log.info', 'log.info', (['("Recording movie in \'%s\'" % filename)'], {}), '("Recording movie in \'%s\'" % filename)\n', (10081, 10119), False, 'from glumpy.log import log\n'), ((4318, 4359), 'glumpy.log.log.critical', 'log.critical', (['"""No suitable backend found"""'], {}), "('No suitable backend found')\n", (4330, 4359), False, 'from glumpy.log import log\n'), ((9180, 9193), 'glumpy.ext.inputhook.stdin_ready', 'stdin_ready', ([], {}), '()\n', (9191, 9193), False, 'from glumpy.ext.inputhook import inputhook_manager, stdin_ready\n'), ((9935, 9964), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (9951, 9964), False, 'import os\n')]
|
#!/usr/bin/env python3
# vim:set fenc=utf-8:
import tkinter as tk
import subprocess
def run_cmd(cmd):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
p.wait()
retval = p.returncode
if retval != 0:
print("An error occured when executing `{}`:".format(cmd))
print(err.decode("utf-8"))
return (False, None)
return (True, out.decode("utf-8").splitlines())
# Find STYLUS device TODO add cli option for this
success, lines = run_cmd("xsetwacom list")
if not success:
print("Failed to call xsetwacom!")
exit(1)
dev_id = -1
for l in lines:
li = l.split()
if li[-1] == 'STYLUS':
dev_id = int(li[-3])
print("Found STYLUS device with id {}".format(dev_id))
if dev_id == -1:
print("Failed to find device!")
exit(1)
print("Using STYLUS device with id {}".format(dev_id))
if not run_cmd("xsetwacom set {} ResetArea".format(dev_id))[0]:
print("Failed to reset area!")
exit(1)
success, lines = run_cmd("xsetwacom get {} Area".format(dev_id))
if not success or len(lines) != 1:
print("Failed to get area!")
exit(1)
l = lines[0].split()
if len(l) != 4:
print("Failed to get area!")
exit(1)
dev_width = int(l[2])
dev_height = int(l[3])
print("Reset STYLUS to native device area of {}x{}".format(dev_width, dev_height))
root = tk.Tk()
root.overrideredirect(True)
root.wait_visibility(root)
root.wm_attributes("-alpha", 0.5)
root.wm_attributes("-topmost", True)
root.title("wacom-size")
root.configure(background='red')
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
run_cmd("xsetwacom set {} MapToOutput {}x{}+{}+{}".format(dev_id, screen_width, screen_height, 0, 0))
win_width = 400
win_height = 300
win_xpos = 0
win_ypos = 0
ratio_min = 10
ratio_max = 100
ratio_step = 2
ratio = ratio_max
def updateWinSize():
global win_width
global win_height
global ratio
if ratio < ratio_min:
ratio = ratio_min
if ratio > ratio_max:
ratio = ratio_max
win_width = int((ratio / 100) * screen_height * (dev_width / dev_height))
win_height = int((ratio / 100) * screen_height)
updateWinSize()
def getPosStr():
xpos = win_xpos - win_width//2
ypos = win_ypos - win_height//2
if xpos + win_width > screen_width:
xpos = screen_width - win_width
if xpos < 0:
xpos = 0
if ypos + win_height > screen_height:
ypos = screen_height - win_height
if ypos < 0:
ypos = 0
return "{width}x{height}+{xpos}+{ypos}".format(width=win_width, height=win_height, xpos=xpos, ypos=ypos)
def setPos():
global win_xpos
global win_ypos
win_xpos, win_ypos = root.winfo_pointerxy()
root.geometry(getPosStr())
setPos()
def motion(event):
setPos()
def click(event):
pos_str = root.winfo_geometry()
if not run_cmd("xsetwacom set {} MapToOutput {}".format(dev_id, pos_str))[0]:
print("Failed to map to output!")
exit(1)
print("Mapped STYLUS to area {}".format(pos_str))
exit(0)
def mouse_wheel(event):
global ratio
if event.num == 5:
ratio -= ratio_step
if event.num == 4:
ratio += ratio_step
updateWinSize()
setPos()
prev_y = None
def mouse_hold_motion(event):
global prev_y
global ratio
setPos()
curr_y = win_ypos
if prev_y == None:
prev_y = curr_y
return
if curr_y > prev_y:
ratio -= ratio_step
elif curr_y < prev_y:
ratio += ratio_step
prev_y = curr_y
updateWinSize()
setPos()
def mouse_hold_release(event):
global prev_y
prev_y = None
root.bind('<Motion>', motion)
root.bind('<Button-1>', click)
root.bind("<Button-4>", mouse_wheel)
root.bind("<Button-5>", mouse_wheel)
root.bind("<B3-Motion>", mouse_hold_motion)
root.bind("<ButtonRelease-3>", mouse_hold_release)
print("Usage:")
print(" Move mouse to move mapped area.")
print(" Scroll or move mouse with right button pressed to adjust the size of the mapped area.")
print(" Click to set size of the mapped area and exit.")
root.mainloop()
|
[
"subprocess.Popen",
"tkinter.Tk"
] |
[((1379, 1386), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1384, 1386), True, 'import tkinter as tk\n'), ((114, 171), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (130, 171), False, 'import subprocess\n')]
|
#!/usr/bin/env python
from PIL import Image
import cv2
from Crypto.Cipher import AES
import hashlib
import getpass
import sys
def decrypt():
print("Decrypting")
path = raw_input("Enter full path of image : ")
path = str(path)
img = cv2.imread(path)
binary = ""
list = []
lenght = 0
lenght = int(img[-1][-1][0])
lenght += int(img[-1][-1][1])
lenght += int(img[-1][-1][2])
lenght += int(img[-1][-2][0])
lenght += int(img[-1][-2][1])
lenght += int(img[-1][-2][2])
#print(lenght)
count = 0
for i in range(len(img)):
for j in range(len(img[i])):
for x in range(len(img[i][j])):
if count == lenght:
break
count += 1
if img[i][j][x] % 2 == 0:
binary = binary+"0"
elif img[i][j][x] % 2 != 0:
binary = binary+"1"
#print(binary)
a = 8
b = 0
for i in range(len(binary) / 8):
list.append(binary[b:a])
b = a
a += 8
liste = []
for i in range(len(list)):
a = str(list[i])
doc = int(a, 2)
char = chr(doc)
liste.append(char)
word = ""
word = ''.join(liste)
password = str("'-<PASSWORD>)-'")
deneme = word.decode('hex')
key = hashlib.md5(password)
k = key.hexdigest()
cipher = AES.new(k,AES.MODE_ECB) # AES MODE
dencrypted_data = cipher.decrypt(deneme)
print(dencrypted_data)
decrypt()
|
[
"cv2.imread",
"hashlib.md5",
"Crypto.Cipher.AES.new"
] |
[((267, 283), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (277, 283), False, 'import cv2\n'), ((1525, 1546), 'hashlib.md5', 'hashlib.md5', (['password'], {}), '(password)\n', (1536, 1546), False, 'import hashlib\n'), ((1592, 1616), 'Crypto.Cipher.AES.new', 'AES.new', (['k', 'AES.MODE_ECB'], {}), '(k, AES.MODE_ECB)\n', (1599, 1616), False, 'from Crypto.Cipher import AES\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.utils import timezone
from clock.shifts.views import ShiftManualCreate, \
ShiftManualEdit, ShiftManualDelete
from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, \
shift_action
urlpatterns = [
# Shift URLs
# Display the ShiftMonthView as default with the current year-month
url(r'^$', ShiftMonthContractView.as_view(month_format='%m',
year=timezone.now().strftime("%Y"),
month=timezone.now().strftime("%m")),
name="list"
),
# View to handle all the quick-actions from the dashboard
url(r'^quick_action/$', shift_action,
name='quick_action'),
# CreateView to add a new shift
url(r'^new/$',
ShiftManualCreate.as_view(),
name='new'
),
# UpdateView to update an existing shift
url(r'^(?P<pk>\d+)/edit/$',
ShiftManualEdit.as_view(),
name='edit'
),
# DeleteView to delete an existing shift
url(r'^(?P<pk>\d+)/delete/$',
ShiftManualDelete.as_view(),
name='delete'
),
# Shift Archive URLs
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/(?P<day>[0-9]+)/$',
ShiftDayView.as_view(),
name="archive_day"),
url(r'^(?P<year>[0-9]{4})/week/(?P<week>[0-9]+)/$',
ShiftWeekView.as_view(),
name="archive_week"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/$',
ShiftMonthContractView.as_view(month_format='%m'),
name="archive_month_numeric"),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/contract/(?P<contract>\d+)/$',
ShiftMonthContractView.as_view(month_format='%m'),
name="archive_month_contract_numeric"),
url(r'^(?P<year>[0-9]{4})/$',
ShiftYearView.as_view(),
name="article_year_archive"),
]
|
[
"clock.shifts.views.ShiftManualDelete.as_view",
"clock.shifts.views.ShiftWeekView.as_view",
"clock.shifts.views.ShiftDayView.as_view",
"clock.shifts.views.ShiftYearView.as_view",
"django.utils.timezone.now",
"clock.shifts.views.ShiftManualCreate.as_view",
"django.conf.urls.url",
"clock.shifts.views.ShiftManualEdit.as_view",
"clock.shifts.views.ShiftMonthContractView.as_view"
] |
[((776, 833), 'django.conf.urls.url', 'url', (['"""^quick_action/$"""', 'shift_action'], {'name': '"""quick_action"""'}), "('^quick_action/$', shift_action, name='quick_action')\n", (779, 833), False, 'from django.conf.urls import url\n'), ((907, 934), 'clock.shifts.views.ShiftManualCreate.as_view', 'ShiftManualCreate.as_view', ([], {}), '()\n', (932, 934), False, 'from clock.shifts.views import ShiftManualCreate, ShiftManualEdit, ShiftManualDelete\n'), ((1051, 1076), 'clock.shifts.views.ShiftManualEdit.as_view', 'ShiftManualEdit.as_view', ([], {}), '()\n', (1074, 1076), False, 'from clock.shifts.views import ShiftManualCreate, ShiftManualEdit, ShiftManualDelete\n'), ((1196, 1223), 'clock.shifts.views.ShiftManualDelete.as_view', 'ShiftManualDelete.as_view', ([], {}), '()\n', (1221, 1223), False, 'from clock.shifts.views import ShiftManualCreate, ShiftManualEdit, ShiftManualDelete\n'), ((1360, 1382), 'clock.shifts.views.ShiftDayView.as_view', 'ShiftDayView.as_view', ([], {}), '()\n', (1380, 1382), False, 'from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, shift_action\n'), ((1477, 1500), 'clock.shifts.views.ShiftWeekView.as_view', 'ShiftWeekView.as_view', ([], {}), '()\n', (1498, 1500), False, 'from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, shift_action\n'), ((1592, 1641), 'clock.shifts.views.ShiftMonthContractView.as_view', 'ShiftMonthContractView.as_view', ([], {'month_format': '"""%m"""'}), "(month_format='%m')\n", (1622, 1641), False, 'from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, shift_action\n'), ((1769, 1818), 'clock.shifts.views.ShiftMonthContractView.as_view', 'ShiftMonthContractView.as_view', ([], {'month_format': '"""%m"""'}), "(month_format='%m')\n", (1799, 1818), False, 'from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, shift_action\n'), ((1910, 1933), 'clock.shifts.views.ShiftYearView.as_view', 'ShiftYearView.as_view', ([], {}), '()\n', (1931, 1933), False, 'from clock.shifts.views import ShiftMonthContractView, ShiftWeekView, ShiftYearView, ShiftDayView, shift_action\n'), ((564, 578), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (576, 578), False, 'from django.utils import timezone\n'), ((647, 661), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (659, 661), False, 'from django.utils import timezone\n')]
|
"""
Module for reading temperature from the raspbery Pi 1-wire interface.
"""
import os
import glob
import time
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
BASE_DIR = '/sys/bus/w1/devices/'
DEVICE_FOLDER = glob.glob(BASE_DIR + '28*')[0]
DEVICE_FILE = DEVICE_FOLDER + '/w1_slave'
def read_temp_raw():
"""Read the raw data."""
file_name = open(DEVICE_FILE, 'r')
lines = file_name.readlines()
file_name.close()
return lines
def read_temp():
"""Convert the raw data into a temperature output."""
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
|
[
"time.sleep",
"os.system",
"glob.glob"
] |
[((113, 142), 'os.system', 'os.system', (['"""modprobe w1-gpio"""'], {}), "('modprobe w1-gpio')\n", (122, 142), False, 'import os\n'), ((143, 173), 'os.system', 'os.system', (['"""modprobe w1-therm"""'], {}), "('modprobe w1-therm')\n", (152, 173), False, 'import os\n'), ((225, 252), 'glob.glob', 'glob.glob', (["(BASE_DIR + '28*')"], {}), "(BASE_DIR + '28*')\n", (234, 252), False, 'import glob\n'), ((615, 630), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (625, 630), False, 'import time\n')]
|
import torch.nn as nn
import torch.nn.functional as F
from .loss_blocks import SSIM, smooth_grad_1st, smooth_grad_2nd, TernaryLoss
from utils.warp_utils import flow_warp
from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward
class unFlowLoss(nn.modules.Module):
def __init__(self, cfg):
super(unFlowLoss, self).__init__()
self.cfg = cfg
def loss_photomatric(self, im1_scaled, im1_recons, occu_mask1):
loss = []
if self.cfg.w_l1 > 0:
loss += [self.cfg.w_l1 * (im1_scaled - im1_recons).abs() * occu_mask1]
if self.cfg.w_ssim > 0:
loss += [self.cfg.w_ssim * SSIM(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
if self.cfg.w_ternary > 0:
loss += [self.cfg.w_ternary * TernaryLoss(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
return sum([l.mean() for l in loss]) / occu_mask1.mean()
def loss_smooth(self, flow, im1_scaled):
if 'smooth_2nd' in self.cfg and self.cfg.smooth_2nd:
func_smooth = smooth_grad_2nd
else:
func_smooth = smooth_grad_1st
loss = []
loss += [func_smooth(flow, im1_scaled, self.cfg.alpha)]
return sum([l.mean() for l in loss])
def forward(self, output, target):
"""
:param output: Multi-scale forward/backward flows n * [B x 4 x h x w]
:param target: image pairs Nx6xHxW
:return:
"""
pyramid_flows = output
im1_origin = target[:, :3]
im2_origin = target[:, 3:]
pyramid_smooth_losses = []
pyramid_warp_losses = []
self.pyramid_occu_mask1 = []
self.pyramid_occu_mask2 = []
s = 1.
for i, flow in enumerate(pyramid_flows):
if self.cfg.w_scales[i] == 0:
pyramid_warp_losses.append(0)
pyramid_smooth_losses.append(0)
continue
b, _, h, w = flow.size()
# resize images to match the size of layer
im1_scaled = F.interpolate(im1_origin, (h, w), mode='area')
im2_scaled = F.interpolate(im2_origin, (h, w), mode='area')
im1_recons = flow_warp(im2_scaled, flow[:, :2], pad=self.cfg.warp_pad)
im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad=self.cfg.warp_pad)
if i == 0:
if self.cfg.occ_from_back:
occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], th=0.2)
occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], th=0.2)
else:
occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:])
occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2])
else:
occu_mask1 = F.interpolate(self.pyramid_occu_mask1[0],
(h, w), mode='nearest')
occu_mask2 = F.interpolate(self.pyramid_occu_mask2[0],
(h, w), mode='nearest')
self.pyramid_occu_mask1.append(occu_mask1)
self.pyramid_occu_mask2.append(occu_mask2)
loss_warp = self.loss_photomatric(im1_scaled, im1_recons, occu_mask1)
if i == 0:
s = min(h, w)
loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled)
if self.cfg.with_bk:
loss_warp += self.loss_photomatric(im2_scaled, im2_recons,
occu_mask2)
loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled)
loss_warp /= 2.
loss_smooth /= 2.
pyramid_warp_losses.append(loss_warp)
pyramid_smooth_losses.append(loss_smooth)
pyramid_warp_losses = [l * w for l, w in
zip(pyramid_warp_losses, self.cfg.w_scales)]
pyramid_smooth_losses = [l * w for l, w in
zip(pyramid_smooth_losses, self.cfg.w_sm_scales)]
warp_loss = sum(pyramid_warp_losses)
smooth_loss = self.cfg.w_smooth * sum(pyramid_smooth_losses)
total_loss = warp_loss + smooth_loss
return total_loss, warp_loss, smooth_loss, pyramid_flows[0].abs().mean()
|
[
"torch.nn.functional.interpolate",
"utils.warp_utils.get_occu_mask_bidirection",
"utils.warp_utils.get_occu_mask_backward",
"utils.warp_utils.flow_warp"
] |
[((2142, 2188), 'torch.nn.functional.interpolate', 'F.interpolate', (['im1_origin', '(h, w)'], {'mode': '"""area"""'}), "(im1_origin, (h, w), mode='area')\n", (2155, 2188), True, 'import torch.nn.functional as F\n'), ((2214, 2260), 'torch.nn.functional.interpolate', 'F.interpolate', (['im2_origin', '(h, w)'], {'mode': '"""area"""'}), "(im2_origin, (h, w), mode='area')\n", (2227, 2260), True, 'import torch.nn.functional as F\n'), ((2287, 2344), 'utils.warp_utils.flow_warp', 'flow_warp', (['im2_scaled', 'flow[:, :2]'], {'pad': 'self.cfg.warp_pad'}), '(im2_scaled, flow[:, :2], pad=self.cfg.warp_pad)\n', (2296, 2344), False, 'from utils.warp_utils import flow_warp\n'), ((2370, 2427), 'utils.warp_utils.flow_warp', 'flow_warp', (['im1_scaled', 'flow[:, 2:]'], {'pad': 'self.cfg.warp_pad'}), '(im1_scaled, flow[:, 2:], pad=self.cfg.warp_pad)\n', (2379, 2427), False, 'from utils.warp_utils import flow_warp\n'), ((2904, 2969), 'torch.nn.functional.interpolate', 'F.interpolate', (['self.pyramid_occu_mask1[0]', '(h, w)'], {'mode': '"""nearest"""'}), "(self.pyramid_occu_mask1[0], (h, w), mode='nearest')\n", (2917, 2969), True, 'import torch.nn.functional as F\n'), ((3042, 3107), 'torch.nn.functional.interpolate', 'F.interpolate', (['self.pyramid_occu_mask2[0]', '(h, w)'], {'mode': '"""nearest"""'}), "(self.pyramid_occu_mask2[0], (h, w), mode='nearest')\n", (3055, 3107), True, 'import torch.nn.functional as F\n'), ((2532, 2575), 'utils.warp_utils.get_occu_mask_backward', 'get_occu_mask_backward', (['flow[:, 2:]'], {'th': '(0.2)'}), '(flow[:, 2:], th=0.2)\n', (2554, 2575), False, 'from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward\n'), ((2613, 2656), 'utils.warp_utils.get_occu_mask_backward', 'get_occu_mask_backward', (['flow[:, :2]'], {'th': '(0.2)'}), '(flow[:, :2], th=0.2)\n', (2635, 2656), False, 'from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward\n'), ((2716, 2767), 'utils.warp_utils.get_occu_mask_bidirection', 'get_occu_mask_bidirection', (['flow[:, :2]', 'flow[:, 2:]'], {}), '(flow[:, :2], flow[:, 2:])\n', (2741, 2767), False, 'from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward\n'), ((2805, 2856), 'utils.warp_utils.get_occu_mask_bidirection', 'get_occu_mask_bidirection', (['flow[:, 2:]', 'flow[:, :2]'], {}), '(flow[:, 2:], flow[:, :2])\n', (2830, 2856), False, 'from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward\n')]
|
import datetime
import json
import os
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows
def get_playStoreDevices(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
if not os.path.basename(file_found) == 'Devices.json': # skip -journal and other files
continue
with open(file_found, encoding = 'utf-8', mode = 'r') as f:
data = json.loads(f.read())
data_list = []
for x in range(0, len(data)):
carrierName = data[x]['device']['mostRecentData']['carrierName']
manufacturer = data[x]['device']['mostRecentData']['manufacturer']
modelName = data[x]['device']['mostRecentData']['modelName']
deviceName = data[x]['device']['mostRecentData']['deviceName']
totalMemoryBytes = str(round(int(data[x]['device']['mostRecentData']['totalMemoryBytes'])/1000000000,2))
deviceIpCountry = data[x]['device']['mostRecentData']['deviceIpCountry']
androidSdkVersion = data[x]['device']['mostRecentData']['androidSdkVersion']
deviceRegistrationTime = data[x]['device']['deviceRegistrationTime']
deviceRegistrationTime = deviceRegistrationTime.replace('T', ' ').replace('Z', '')
userAddedOnDeviceTime = data[x]['device']['userAddedOnDeviceTime']
userAddedOnDeviceTime = userAddedOnDeviceTime.replace('T', ' ').replace('Z', '')
lastTimeDeviceActive = data[x]['device']['lastTimeDeviceActive']
lastTimeDeviceActive = lastTimeDeviceActive.replace('T', ' ').replace('Z', '')
data_list.append((deviceRegistrationTime, userAddedOnDeviceTime, lastTimeDeviceActive, manufacturer, modelName, totalMemoryBytes, carrierName, deviceIpCountry, deviceName, androidSdkVersion))
num_entries = len(data_list)
if num_entries > 0:
description = 'Metadata about your devices that have accessed the Google Play Store.'
report = ArtifactHtmlReport('Google Play Store Devices')
report.start_artifact_report(report_folder, 'Google Play Store Devices',description)
report.add_script()
data_headers = ('Device Registration Timestamp','User Added Timestamp','Last Device Active Timestamp','Device Manufacturer','Device Model','Device RAM (GBs)','Carrier','Device IP Country','Device Name','SDK Version ')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Google Play Store Devices'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Google Play Store Devices'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Google Play Store Devices data available')
|
[
"os.path.basename",
"scripts.ilapfuncs.tsv",
"scripts.ilapfuncs.timeline",
"scripts.ilapfuncs.logfunc",
"scripts.artifact_report.ArtifactHtmlReport"
] |
[((2134, 2181), 'scripts.artifact_report.ArtifactHtmlReport', 'ArtifactHtmlReport', (['"""Google Play Store Devices"""'], {}), "('Google Play Store Devices')\n", (2152, 2181), False, 'from scripts.artifact_report import ArtifactHtmlReport\n'), ((2742, 2794), 'scripts.ilapfuncs.tsv', 'tsv', (['report_folder', 'data_headers', 'data_list', 'tsvname'], {}), '(report_folder, data_headers, data_list, tsvname)\n', (2745, 2794), False, 'from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows\n'), ((2874, 2934), 'scripts.ilapfuncs.timeline', 'timeline', (['report_folder', 'tlactivity', 'data_list', 'data_headers'], {}), '(report_folder, tlactivity, data_list, data_headers)\n', (2882, 2934), False, 'from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows\n'), ((2961, 3015), 'scripts.ilapfuncs.logfunc', 'logfunc', (['"""No Google Play Store Devices data available"""'], {}), "('No Google Play Store Devices data available')\n", (2968, 3015), False, 'from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows\n'), ((334, 362), 'os.path.basename', 'os.path.basename', (['file_found'], {}), '(file_found)\n', (350, 362), False, 'import os\n')]
|
import numpy as np
from PIL import Image
def save_image_array(img_array, fname, batch_size=100, class_num=10):
channels = img_array.shape[1]
resolution = img_array.shape[-1]
img_rows = 10
img_cols = batch_size//class_num
img = np.full([channels, resolution * img_rows, resolution * img_cols], 0.0)
for r in range(img_rows):
for c in range(img_cols):
img[:,
(resolution * r): (resolution * (r + 1)),
(resolution * (c % img_cols)): (resolution * ((c % img_cols) + 1))
] = img_array[c+(r*img_cols)]
img = (img * 255 + 0.5).clip(0, 255).astype(np.uint8)
if (img.shape[0] == 1):
img = img[0]
else:
img = np.rollaxis(img, 0, 3)
Image.fromarray(img).save(fname)
|
[
"numpy.full",
"PIL.Image.fromarray",
"numpy.rollaxis"
] |
[((249, 319), 'numpy.full', 'np.full', (['[channels, resolution * img_rows, resolution * img_cols]', '(0.0)'], {}), '([channels, resolution * img_rows, resolution * img_cols], 0.0)\n', (256, 319), True, 'import numpy as np\n'), ((710, 732), 'numpy.rollaxis', 'np.rollaxis', (['img', '(0)', '(3)'], {}), '(img, 0, 3)\n', (721, 732), True, 'import numpy as np\n'), ((738, 758), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (753, 758), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python3
#Use these commands in Kali to install required software:
# sudo apt install python3-pip
# pip install python-nmap
# Import nmap so we can use it for the scan
import nmap
# We import the ipaddress module. We want to use the ipaddress.ip_address(address)
# method to see if we can instantiate a valid ip address to test.
import ipaddress
# We need to create regular expressions to ensure that the input is correctly formatted.
import re
# Regular Expression Pattern to extract the number of ports you want to scan.
# You have to specify <lowest_port_number>-<highest_port_number> (ex 10-100)
port_range_pattern = re.compile("([0-9]+)-([0-9]+)")
# Initialising the port numbers, will be using the variables later on.
port_min = 0
port_max = 65535
# This port scanner uses the Python nmap module.
# You'll need to install the following to get it work on Linux:
# Step 1: sudo apt install python3-pip
# Step 2: pip install python-nmap
# Basic user interface header
print(r"""______ _ _ ______ _ _
| _ \ (_) | | | ___ \ | | | |
| | | |__ ___ ___ __| | | |_/ / ___ _ __ ___ | |__ __ _| |
| | | / _` \ \ / / |/ _` | | ___ \/ _ \| '_ ` _ \| '_ \ / _` | |
| |/ / (_| |\ V /| | (_| | | |_/ / (_) | | | | | | |_) | (_| | |
|___/ \__,_| \_/ |_|\__,_| \____/ \___/|_| |_| |_|_.__/ \__,_|_|""")
print("\n****************************************************************")
print("\n* Copyright of <NAME>, 2021 *")
print("\n* https://www.davidbombal.com *")
print("\n* https://www.youtube.com/davidbombal *")
print("\n****************************************************************")
# Ask user to input the ip address they want to scan.
while True:
ip_add_entered = input("\nPlease enter the ip address that you want to scan: ")
# If we enter an invalid ip address the try except block will go to the except block and say you entered an invalid ip address.
try:
ip_address_obj = ipaddress.ip_address(ip_add_entered)
# The following line will only execute if the ip is valid.
print("You entered a valid ip address.")
break
except:
print("You entered an invalid ip address")
while True:
# You can scan 0-65535 ports. This scanner is basic and doesn't use multithreading so scanning all the ports is not advised.
print("Please enter the range of ports you want to scan in format: <int>-<int> (ex would be 60-120)")
port_range = input("Enter port range: ")
# We pass the port numbers in by removing extra spaces that people sometimes enter. So if you enter 80 - 90 instead of 80-90 the program will still work.
port_range_valid = port_range_pattern.search(port_range.replace(" ",""))
if port_range_valid:
# We're extracting the low end of the port scanner range the user want to scan.
port_min = int(port_range_valid.group(1))
# We're extracting the upper end of the port scanner range the user want to scan.
port_max = int(port_range_valid.group(2))
break
nm = nmap.PortScanner()
# We're looping over all of the ports in the specified range.
for port in range(port_min, port_max + 1):
try:
# The result is quite interesting to look at. You may want to inspect the dictionary it returns.
# It contains what was sent to the command line in addition to the port status we're after.
# For in nmap for port 80 and ip 10.0.0.2 you'd run: nmap -oX - -p 89 -sV 10.0.0.2
result = nm.scan(ip_add_entered, str(port))
# Uncomment following line and look at dictionary
# print(result)
# We extract the port status from the returned object
port_status = (result['scan'][ip_add_entered]['tcp'][port]['state'])
print(f"Port {port} is {port_status}")
except:
# We cannot scan some ports and this ensures the program doesn't crash when we try to scan them.
print(f"Cannot scan port {port}.")
|
[
"ipaddress.ip_address",
"nmap.PortScanner",
"re.compile"
] |
[((640, 671), 're.compile', 're.compile', (['"""([0-9]+)-([0-9]+)"""'], {}), "('([0-9]+)-([0-9]+)')\n", (650, 671), False, 'import re\n'), ((3169, 3187), 'nmap.PortScanner', 'nmap.PortScanner', ([], {}), '()\n', (3185, 3187), False, 'import nmap\n'), ((2087, 2123), 'ipaddress.ip_address', 'ipaddress.ip_address', (['ip_add_entered'], {}), '(ip_add_entered)\n', (2107, 2123), False, 'import ipaddress\n')]
|
#!/usr/bin/env python3
import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from pytorch_translate import rnn # noqa
logger = logging.getLogger(__name__)
def add_args(parser):
parser.add_argument(
"--char-rnn",
action="store_true",
default=False,
help="Assumes input is delimiter-separated character tokens "
"(configurable with --word-delimiter option). Encodes word "
"representations using bi-LSTM over char inputs.",
)
parser.add_argument(
"--char-rnn-units",
type=int,
default=256,
metavar="N",
help=("Number of units for Character LSTM."),
)
parser.add_argument(
"--char-rnn-layers",
type=int,
default=2,
metavar="N",
help=("Number of Character LSTM layers."),
)
parser.add_argument(
"--word-delimiter", type=str, default="@SPACE", help=("Token separating words.")
)
def set_arg_defaults(args):
if hasattr(args, "char_rnn_params"):
return args.char_rnn_params
args.char_rnn_params = None
char_rnn = getattr(args, "char_rnn", False)
if char_rnn:
args.char_rnn_params = {
"char_rnn_units": args.char_rnn_units,
"char_rnn_layers": args.char_rnn_layers,
"word_delimiter": args.word_delimiter,
}
class DelimiterSplit(nn.Module):
"""
nn.Module which takes batched sequence input where the tokens are assumed
to represent characters with a specified delimiter separating words, and
returns the same indices split into words.
Inputs:
src_tokens (batch_size, max_length): character indices
src_lengths (batch_size): lengths in total characters including delimiters
Outputs:
padded_char_inds (max_word_length, total_words)
word_lenths (total_words,)
words_per_sent (batch_size,)
"""
def __init__(self, dictionary, word_delimiter="@SPACE"):
super().__init__()
self.dictionary = dictionary
self.padding_idx = dictionary.pad()
self.word_delim_index = self.dictionary.index(word_delimiter)
if self.word_delim_index == self.dictionary.unk():
raise RuntimeError(
f"Word delimiter {word_delimiter} not in source dictionary!"
)
def forward(self, src_tokens, src_lengths):
words = []
word_lengths = []
words_per_sent = []
src_tokens_numpy = src_tokens.cpu().numpy()
for sentence_array in src_tokens_numpy:
chars = []
words_in_sentence = 0
for idx in sentence_array:
if idx == self.dictionary.pad():
continue
elif idx == self.word_delim_index:
if len(chars) > 0:
word = torch.LongTensor(np.array(chars, dtype=np.int64))
words.append(word)
word_lengths.append(len(chars))
words_in_sentence += 1
chars = []
continue
else:
chars.append(idx)
if len(chars) > 0:
word = torch.LongTensor(np.array(chars, dtype=np.int64))
words.append(word)
word_lengths.append(len(chars))
words_in_sentence += 1
chars = []
words_per_sent.append(words_in_sentence)
max_word_length = max(word_lengths)
padded_char_inds = torch.LongTensor(max_word_length, len(words)).fill_(
self.padding_idx
)
for idx, length in enumerate(word_lengths):
padded_char_inds[:length, idx] = words[idx]
# copy to GPU if necessary
padded_char_inds = padded_char_inds.type_as(src_tokens)
word_lengths = torch.LongTensor(word_lengths).type_as(src_lengths)
words_per_sent = torch.LongTensor(words_per_sent).type_as(src_lengths)
return padded_char_inds, word_lengths, words_per_sent
class CharRNN(nn.Module):
"""
nn.Module to encode character sequences (with word delimiters) into
vectors representing each word with bi-directional RNNS.
"""
def __init__(
self,
dictionary,
embed_dim,
hidden_dim,
num_layers,
bidirectional=True,
word_delimiter="@SPACE",
):
super().__init__()
self.word_split = DelimiterSplit(dictionary, word_delimiter)
self.dictionary = dictionary
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_chars = rnn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embed_dim,
padding_idx=self.padding_idx,
freeze_embed=False,
)
self.bidirectional = bidirectional
if self.bidirectional:
assert hidden_dim % 2 == 0
self.lstm_encoder = rnn.LSTMSequenceEncoder.LSTM(
embed_dim,
hidden_dim // 2 if bidirectional else hidden_dim,
num_layers=num_layers,
bidirectional=bidirectional,
)
def forward(self, src_tokens, src_lengths):
padded_char_inds, word_lengths, words_per_sent = self.word_split(
src_tokens, src_lengths
)
# inputs to RNN must be in descending order of length
sorted_word_lengths, word_length_order = torch.sort(
word_lengths, descending=True
)
# shape: (max_word_len, total_words, embed_dim)
char_rnn_input = self.embed_chars(padded_char_inds[:, word_length_order])
packed_char_input = pack_padded_sequence(char_rnn_input, sorted_word_lengths)
_, (h_last, _) = self.lstm_encoder(packed_char_input)
# take last-layer output only (shape: (total_words, hidden_dim))
if self.bidirectional:
rnn_output = torch.cat((h_last[-2, :, :], h_last[-1, :, :]), dim=1)
else:
rnn_output = h_last[-1, :, :]
# "unsort"
_, inverted_word_length_order = torch.sort(word_length_order)
unsorted_rnn_output = rnn_output[inverted_word_length_order, :]
# (max_words_per_sent, batch_size, word_rep_dim)
output = torch.FloatTensor(
int(words_per_sent.max()), words_per_sent.shape[0], rnn_output.size(1)
).type_as(
rnn_output
)
sent_end_indices = words_per_sent.cumsum(0)
for sent_index in range(words_per_sent.shape[0]):
start = 0 if sent_index == 0 else sent_end_indices[sent_index - 1]
end = sent_end_indices[sent_index]
output[: words_per_sent[sent_index], sent_index, :] = unsorted_rnn_output[
start:end, :
]
return output, words_per_sent
|
[
"pytorch_translate.rnn.Embedding",
"torch.LongTensor",
"torch.cat",
"pytorch_translate.rnn.LSTMSequenceEncoder.LSTM",
"numpy.array",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.sort",
"logging.getLogger"
] |
[((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import logging\n'), ((4715, 4838), 'pytorch_translate.rnn.Embedding', 'rnn.Embedding', ([], {'num_embeddings': 'num_embeddings', 'embedding_dim': 'embed_dim', 'padding_idx': 'self.padding_idx', 'freeze_embed': '(False)'}), '(num_embeddings=num_embeddings, embedding_dim=embed_dim,\n padding_idx=self.padding_idx, freeze_embed=False)\n', (4728, 4838), False, 'from pytorch_translate import rnn\n'), ((5036, 5181), 'pytorch_translate.rnn.LSTMSequenceEncoder.LSTM', 'rnn.LSTMSequenceEncoder.LSTM', (['embed_dim', '(hidden_dim // 2 if bidirectional else hidden_dim)'], {'num_layers': 'num_layers', 'bidirectional': 'bidirectional'}), '(embed_dim, hidden_dim // 2 if bidirectional else\n hidden_dim, num_layers=num_layers, bidirectional=bidirectional)\n', (5064, 5181), False, 'from pytorch_translate import rnn\n'), ((5518, 5559), 'torch.sort', 'torch.sort', (['word_lengths'], {'descending': '(True)'}), '(word_lengths, descending=True)\n', (5528, 5559), False, 'import torch\n'), ((5750, 5807), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['char_rnn_input', 'sorted_word_lengths'], {}), '(char_rnn_input, sorted_word_lengths)\n', (5770, 5807), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((6172, 6201), 'torch.sort', 'torch.sort', (['word_length_order'], {}), '(word_length_order)\n', (6182, 6201), False, 'import torch\n'), ((6001, 6055), 'torch.cat', 'torch.cat', (['(h_last[-2, :, :], h_last[-1, :, :])'], {'dim': '(1)'}), '((h_last[-2, :, :], h_last[-1, :, :]), dim=1)\n', (6010, 6055), False, 'import torch\n'), ((3917, 3947), 'torch.LongTensor', 'torch.LongTensor', (['word_lengths'], {}), '(word_lengths)\n', (3933, 3947), False, 'import torch\n'), ((3994, 4026), 'torch.LongTensor', 'torch.LongTensor', (['words_per_sent'], {}), '(words_per_sent)\n', (4010, 4026), False, 'import torch\n'), ((3287, 3318), 'numpy.array', 'np.array', (['chars'], {'dtype': 'np.int64'}), '(chars, dtype=np.int64)\n', (3295, 3318), True, 'import numpy as np\n'), ((2909, 2940), 'numpy.array', 'np.array', (['chars'], {'dtype': 'np.int64'}), '(chars, dtype=np.int64)\n', (2917, 2940), True, 'import numpy as np\n')]
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT next sentence prediction / binary coherence finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from absl import app
from absl import flags
from bert import modeling
from bert import optimization
from bert import tokenization
from language.conpono.cpc import model_builder
from language.conpono.reconstruct import preprocess as ip
import tensorflow.compat.v1 as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import lookup as contrib_lookup
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"train_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_bool("include_mlm", True, "Whether to include MLM loss/objective")
flags.DEFINE_integer("num_choices", 32, "Number of negative samples + 1")
flags.DEFINE_integer("data_window_size", 5, "Number of documents to draw"
"negative samples from.")
flags.DEFINE_integer("data_window_shift", 2, "Shift windows by this many for"
"negative samples.")
flags.DEFINE_integer("max_sent_length", 70, "Number of tokens per sentence.")
flags.DEFINE_integer("max_para_length", 30, "Number of sentences per paragraph")
flags.DEFINE_integer("context_size", 4, "Number of sentences in the context")
flags.DEFINE_integer("margin", 1, "Eta value for margin.")
flags.DEFINE_float("mask_rate", 0.1, "Rate of masking for mlm.")
flags.DEFINE_bool("add_lv2loss", False, "Whether to use the level 2 loss.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_float(
"dataset_one_weight", 0.5, "Weight of first dataset."
"Weight of second dataset will be 1-x")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("train_data_size", 10000, "The number of examples in the"
"training data")
flags.DEFINE_integer("eval_data_size", -1, "The number of examples in the"
"validation data")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 10000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
_SEP_TOKEN = "[SEP]"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
# pylint: disable=invalid-name
Outputs_And_Context = collections.namedtuple(
"Outputs_And_Context",
["input_ids", "input_mask", "segment_ids", "label_types", "context"])
# pylint: enable=invalid-name
def pad_and_cut(tensor, max_len_scalar):
end_padding = tf.constant([[0, max_len_scalar]])
return tf.pad(tensor, end_padding)[:max_len_scalar]
def build_distractors(distractor_examples, context):
"""Create inputs with distractors."""
CLS_ID = tf.constant([101], dtype=tf.int64) # pylint: disable=invalid-name
SEP_ID = tf.constant([102], dtype=tf.int64) # pylint: disable=invalid-name
bert_inputs = []
input_masks = []
segment_ids = []
# for each distractor
sample_size = int((FLAGS.num_choices - 4) / (FLAGS.data_window_size - 1))
for example in distractor_examples:
# randomly sample 7
intermediate_examples_tensor = tf.reduce_sum(tf.abs(example), 1)
examples_zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
examples_bool_mask = tf.squeeze(
tf.not_equal(intermediate_examples_tensor, examples_zero_vector))
paragraph_len = tf.reduce_sum(tf.cast(examples_bool_mask, tf.int32))
indices = tf.range(0, limit=paragraph_len, dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)[:sample_size]
# extend examples / targets
distractor_cand = example
distractor_cand_plus_one = distractor_cand[1:]
distractor_cand_plus_two = distractor_cand[2:]
# pad extensions
paddings_one = tf.constant([[0, 1], [0, 0]])
distractor_cand_plus_one = tf.pad(distractor_cand_plus_one, paddings_one)
paddings_two = tf.constant([[0, 2], [0, 0]])
distractor_cand_plus_two = tf.pad(distractor_cand_plus_two, paddings_two)
distractor_cand_ext = tf.concat(
[distractor_cand, distractor_cand_plus_one, distractor_cand_plus_two],
axis=1)
distractors = tf.gather(distractor_cand_ext, shuffled_indices)
for i in range(sample_size):
distractors_non_zero = tf.where(
tf.not_equal(distractors[i], tf.zeros_like(distractors[i])))
distractors_stripped = tf.gather_nd(distractors[i], distractors_non_zero)
segment_id = tf.concat([
tf.zeros_like(CLS_ID, dtype=tf.int64),
tf.zeros_like(context),
tf.zeros_like(SEP_ID, dtype=tf.int64),
tf.ones_like(distractors_stripped),
tf.ones_like(SEP_ID, dtype=tf.int64)
],
axis=0)
segment_id = pad_and_cut(segment_id, FLAGS.max_seq_length)
segment_ids.append(segment_id)
new_input = tf.concat(
[CLS_ID, context, SEP_ID, distractors_stripped, SEP_ID], axis=0)
input_mask = tf.ones_like(new_input)
input_mask = pad_and_cut(input_mask, FLAGS.max_seq_length)
input_masks.append(input_mask)
padded_new_input = pad_and_cut(new_input, FLAGS.max_seq_length)
bert_inputs.append(padded_new_input)
bert_inputs = tf.stack(bert_inputs, axis=0)
input_masks = tf.stack(input_masks, axis=0)
segment_ids = tf.stack(segment_ids, axis=0)
out = Outputs_And_Context(bert_inputs, input_masks, segment_ids, None, None)
return out
def build_bert_inputs(example):
"""Convert example <Tensor [30, 70]> into bert inputs."""
CLS_ID = tf.constant([101], dtype=tf.int64) # pylint: disable=invalid-name
SEP_ID = tf.constant([102], dtype=tf.int64) # pylint: disable=invalid-name
max_len = tf.constant([FLAGS.max_para_length])
context_size = tf.constant([FLAGS.context_size])
intermediate_examples_tensor = tf.reduce_sum(tf.abs(example), 1)
examples_zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
examples_bool_mask = tf.squeeze(
tf.not_equal(intermediate_examples_tensor, examples_zero_vector))
paragraph_len = tf.reduce_sum(tf.cast(examples_bool_mask, tf.int32))
start = tf.random.uniform([1],
0,
tf.reshape(paragraph_len, []) -
tf.reshape(context_size, []) + 1,
dtype=tf.int32)
# Slice the document into the before, after and context.
# Discard the zero padding.
sizes = tf.squeeze(
tf.concat([[
start, context_size, paragraph_len - context_size - start,
max_len - paragraph_len
]], 0))
before, context, after, _ = tf.split(example, sizes, axis=0)
# Gather the context removing zero padding at end of sentences.
non_zeros = tf.where(tf.not_equal(context, tf.zeros_like(context)))
context_gathered = tf.gather_nd(context, non_zeros)
# Flip before so we select the 4 sentences closest to target
before = tf.reverse(before, axis=[0])
# pad both to longer than needed
paddings = tf.constant([[0, 8], [0, 0]])
before = tf.pad(before, paddings)
after = tf.pad(after, paddings)
# Extend targets to 3 sentences
# pad both
before_minus_one = before[1:][:4]
before_minus_two = before[2:][:4]
after_plus_one = after[1:][:4]
after_plus_two = after[2:][:4]
before = before[:4]
after = after[:4]
before = tf.concat([before_minus_two, before_minus_one, before], axis=1)
after = tf.concat([after, after_plus_one, after_plus_two], axis=1)
############################################################################
# before = before[:4]
# after = after[:4]
# These 8 sentences are the 8 surrounding targets. Some are padding.
targets = tf.concat([before, after], axis=0)
# Remove the padding from the sourrounding sentences
# Eg. if context starts at beginning of paragraph, before is all padding
intermediate_tensor = tf.reduce_sum(tf.abs(targets), 1)
zero_vector = tf.zeros(shape=(1, 1), dtype=tf.int64)
bool_mask = tf.squeeze(tf.not_equal(intermediate_tensor, zero_vector))
bool_mask.set_shape([None])
targets = tf.boolean_mask(targets, bool_mask)
# Randomly select 4 targets
# We will also select the label_types for each selected target
indices = tf.range(0, limit=tf.shape(targets)[0], dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)[:4]
targets = tf.gather(targets, shuffled_indices)
full_labels = tf.concat([tf.range(3, -1, -1), tf.range(4, 8)], axis=0)
label_types = tf.boolean_mask(full_labels, bool_mask)
label_types = tf.gather(label_types, shuffled_indices)
# create inputs
bert_inputs = []
input_masks = []
segment_ids = []
for i in range(4):
target_non_zero = tf.where(
tf.not_equal(targets[i], tf.zeros_like(targets[i])))
targets_stripped = tf.gather_nd(targets[i], target_non_zero)
segment_id = tf.concat([
tf.zeros_like(CLS_ID, dtype=tf.int64),
tf.zeros_like(context_gathered),
tf.zeros_like(SEP_ID, dtype=tf.int64),
tf.ones_like(targets_stripped),
tf.ones_like(SEP_ID, dtype=tf.int64)
],
axis=0)
segment_id = pad_and_cut(segment_id, FLAGS.max_seq_length)
segment_ids.append(segment_id)
new_input = tf.concat(
[CLS_ID, context_gathered, SEP_ID, targets_stripped, SEP_ID], axis=0)
input_mask = tf.ones_like(new_input)
input_mask = pad_and_cut(input_mask, FLAGS.max_seq_length)
input_masks.append(input_mask)
padded_new_input = pad_and_cut(new_input, FLAGS.max_seq_length)
bert_inputs.append(padded_new_input)
bert_inputs = tf.stack(bert_inputs, axis=0)
input_masks = tf.stack(input_masks, axis=0)
segment_ids = tf.stack(segment_ids, axis=0)
out = Outputs_And_Context(bert_inputs, input_masks, segment_ids, label_types,
context_gathered)
return out
def file_based_input_fn_builder(input_file, is_training, drop_remainder,
add_masking):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
input_file = input_file.split(",")
expanded_files = []
for infile in input_file:
try:
sharded_files = tf.io.gfile.glob(infile)
expanded_files.append(sharded_files)
except tf.errors.OpError:
expanded_files.append(infile)
name_to_features = {
"sents":
tf.FixedLenFeature([FLAGS.max_para_length * FLAGS.max_sent_length],
tf.int64)
}
def _decode_record(record, name_to_features, vocab_table):
"""Decodes a record to a TensorFlow example."""
target_example = tf.parse_single_example(record[0], name_to_features)
target_example = tf.reshape(target_example["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
# distractor_examples = []
# for rec in record[1:]:
# distractor_examples.append(
# tf.reshape(
# tf.parse_single_example(rec, name_to_features)["sents"],
# [FLAGS.max_para_length, FLAGS.max_sent_length]))
# This is an unfortunate hack but is necessary to get around a TF error.
dist0 = tf.reshape(
tf.parse_single_example(record[1], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist1 = tf.reshape(
tf.parse_single_example(record[2], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist2 = tf.reshape(
tf.parse_single_example(record[3], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
dist3 = tf.reshape(
tf.parse_single_example(record[4], name_to_features)["sents"],
[FLAGS.max_para_length, FLAGS.max_sent_length])
inputs_obj = build_bert_inputs(target_example)
distractor_obj = build_distractors([dist0, dist1, dist2, dist3],
inputs_obj.context)
example = {}
example["input_ids"] = tf.concat(
[inputs_obj.input_ids, distractor_obj.input_ids], axis=0)
example["input_mask"] = tf.concat(
[inputs_obj.input_mask, distractor_obj.input_mask], axis=0)
example["segment_ids"] = tf.concat(
[inputs_obj.segment_ids, distractor_obj.segment_ids], axis=0)
example["label_types"] = inputs_obj.label_types
# Add masking:
if add_masking:
mask_rate = FLAGS.mask_rate
max_predictions_per_seq = int(math.ceil(FLAGS.max_seq_length * mask_rate))
cls_token = "[CLS]"
sep_token = "[SEP]"
mask_token = "[MASK]"
# pad_token = "[PAD]"
mask_blacklist = tf.constant([cls_token, sep_token]) # , pad_token])
mask_blacklist_ids = tf.to_int32(vocab_table.lookup(mask_blacklist))
mask_token_id = tf.to_int32(vocab_table.lookup(tf.constant(mask_token)))
input_ids = tf.to_int32(example["input_ids"])
def call_sample_mask_indices(x):
return ip.sample_mask_indices(x, mask_rate, mask_blacklist_ids,
max_predictions_per_seq)
mask_indices = tf.map_fn(
call_sample_mask_indices, input_ids, dtype=tf.int32)
def call_get_target_tokens(x):
input_len = tf.shape(input_ids)[-1]
x_input_id = x[:input_len]
x_mask_indices = x[input_len:]
return ip.get_target_tokens_for_apply(x_input_id, x_mask_indices)
map_input = tf.concat([input_ids, mask_indices], -1)
target_token_ids = tf.map_fn(call_get_target_tokens, map_input)
def call_apply_masking(x):
input_len = tf.shape(input_ids)[-1]
mask_idx_len = tf.shape(mask_indices)[-1]
x_input_id = x[:input_len]
x_mask_indices = x[input_len:input_len + mask_idx_len]
x_target_token_ids = x[input_len + mask_idx_len:]
return ip.apply_masking(x_input_id, x_target_token_ids, x_mask_indices,
mask_token_id, 1000)
map_input2 = tf.concat([input_ids, mask_indices, target_token_ids], -1)
token_ids_masked = tf.map_fn(call_apply_masking, tf.to_int64(map_input2))
target_token_weights = tf.ones_like(target_token_ids, dtype=tf.float32)
pad_targets = tf.where(
tf.equal(target_token_ids, 0),
tf.ones_like(target_token_ids, dtype=tf.float32),
tf.zeros_like(target_token_ids, dtype=tf.float32))
target_token_weights = target_token_weights - pad_targets
example["target_token_weights"] = target_token_weights
example["target_token_ids"] = target_token_ids
example["input_ids"] = token_ids_masked
example["mask_indices"] = mask_indices
# Set shape explicitly for TPU
example["target_token_weights"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
example["target_token_ids"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
example["mask_indices"].set_shape(
[FLAGS.num_choices, max_predictions_per_seq])
# Set shape explicitly for TPU
example["input_ids"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["input_mask"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["segment_ids"].set_shape([FLAGS.num_choices, FLAGS.max_seq_length])
example["label_types"].set_shape([4])
example["label_ids"] = tf.scatter_nd(
tf.reshape(example["label_types"], [4, 1]), tf.range(4), [8])
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()): # pylint: disable=g-builtin-op
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
vocab_table = contrib_lookup.index_table_from_file(FLAGS.vocab_file)
if len(expanded_files) == 1:
d = tf.data.TFRecordDataset(expanded_files[0])
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=256)
else:
dataset_list = [
tf.data.TFRecordDataset(expanded_files[i])
for i in range(len(expanded_files))
]
if is_training:
dataset_list = [d.repeat() for d in dataset_list]
dset_weights = [FLAGS.dataset_one_weight, 1 - FLAGS.dataset_one_weight]
d = tf.data.experimental.sample_from_datasets(dataset_list, dset_weights)
# Note that sample_from_datasets() inserts randomness into the training
# An alternative would be to use choose_from_datasets() but then the
# order must be stated explicitly which is less intitive for unbalanced
# datasets. Example below:
#
# choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat()
# d = tf.data.experimental.choose_from_datasets(dataset_list,
# choice_dataset)
if is_training:
d = d.shuffle(buffer_size=256)
# The window size will be for selecting negative samples
# It equals the number of documents to sample from -1
d = d.apply(
contrib_data.sliding_window_batch(
window_size=FLAGS.data_window_size,
window_shift=FLAGS.data_window_shift))
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features, vocab_table
),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, num_choices, add_masking):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = tf.reshape(features["input_ids"], [-1, FLAGS.max_seq_length])
input_mask = tf.reshape(features["input_mask"], [-1, FLAGS.max_seq_length])
segment_ids = tf.reshape(features["segment_ids"],
[-1, FLAGS.max_seq_length])
label_types = features["label_types"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
is_real_example = tf.reduce_sum(tf.one_hot(label_types, 8), axis=1)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(cpc_loss, _, logits, probabilities) = model_builder.create_model(
model,
label_ids,
label_types,
FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size,
num_choices,
use_tpu,
FLAGS.add_lv2loss,
margin=float(FLAGS.margin))
if add_masking:
mask_rate = FLAGS.mask_rate # search alternatives?
max_predictions_per_seq = int(math.ceil(FLAGS.max_seq_length * mask_rate))
masked_lm_positions = tf.reshape(features["mask_indices"],
[-1, max_predictions_per_seq])
masked_lm_ids = tf.reshape(features["target_token_ids"],
[-1, max_predictions_per_seq])
masked_lm_weights = tf.reshape(features["target_token_weights"],
[-1, max_predictions_per_seq])
(masked_lm_loss, _,
_) = model_builder.get_masked_lm_output(bert_config,
model.get_sequence_output(),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids, masked_lm_weights)
total_loss = cpc_loss + masked_lm_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(cpc_loss, mlm_loss, label_ids, logits, is_real_example):
"""Collect metrics for function."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
cpc_loss_metric = tf.metrics.mean(values=cpc_loss)
mlm_loss_metric = tf.metrics.mean(values=mlm_loss)
metric_dict = {
"eval_accuracy": accuracy,
"eval_cpc_loss": cpc_loss_metric,
"eval_mlm_loss": mlm_loss_metric
}
for i in range(8):
metric_dict["acc" + str(i)] = tf.metrics.accuracy(
labels=label_ids[:, i],
predictions=predictions[:, i],
weights=is_real_example[:, i])
return metric_dict
eval_metrics = (metric_fn, [
cpc_loss, masked_lm_loss, label_ids, logits, is_real_example
])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train`, `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
num_train_steps = int(FLAGS.train_data_size / FLAGS.train_batch_size)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
num_choices=FLAGS.num_choices,
add_masking=FLAGS.include_mlm)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=FLAGS.train_file,
is_training=True,
drop_remainder=True,
add_masking=FLAGS.include_mlm)
estimator.train(input_fn=train_input_fn, steps=num_train_steps)
if FLAGS.do_eval:
# This tells the estimator to run through the entire set.
if FLAGS.eval_data_size < 0:
eval_steps = None
else:
eval_steps = int(FLAGS.eval_data_size / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
# Note that we are masking inputs for eval as well as training and this will
# decrease eval performance
eval_input_fn = file_based_input_fn_builder(
input_file=FLAGS.eval_file,
is_training=False,
drop_remainder=eval_drop_remainder,
add_masking=FLAGS.include_mlm)
# checkpoints_iterator blocks until a new checkpoint appears.
for ckpt in contrib_training.checkpoints_iterator(estimator.model_dir):
try:
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
tf.logging.info("********** Eval results:*******\n")
for key in sorted(result.keys()):
tf.logging.info("%s = %s" % (key, str(result[key])))
except tf.errors.NotFoundError:
tf.logging.error("Checkpoint path '%s' no longer exists.", ckpt)
if __name__ == "__main__":
flags.mark_flag_as_required("eval_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
app.run(main)
|
[
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.io.gfile.glob",
"tensorflow.compat.v1.zeros",
"bert.modeling.get_assignment_map_from_checkpoint",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.reverse",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.train.init_from_checkpoint",
"tensorflow.compat.v1.random.shuffle",
"tensorflow.compat.v1.gather_nd",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.data.experimental.sample_from_datasets",
"bert.tokenization.validate_case_matches_checkpoint",
"tensorflow.compat.v1.split",
"tensorflow.contrib.lookup.index_table_from_file",
"absl.flags.DEFINE_bool",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.to_int32",
"absl.flags.mark_flag_as_required",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.logging.error",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"bert.modeling.BertModel",
"tensorflow.contrib.training.checkpoints_iterator",
"tensorflow.compat.v1.map_fn",
"tensorflow.compat.v1.metrics.mean",
"language.conpono.reconstruct.preprocess.apply_masking",
"tensorflow.compat.v1.metrics.accuracy",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.parse_single_example",
"math.ceil",
"tensorflow.contrib.data.sliding_window_batch",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.data.TFRecordDataset",
"language.conpono.reconstruct.preprocess.get_target_tokens_for_apply",
"tensorflow.compat.v1.train.Scaffold",
"tensorflow.compat.v1.boolean_mask",
"tensorflow.compat.v1.reshape",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.compat.v1.range",
"tensorflow.compat.v1.FixedLenFeature",
"tensorflow.compat.v1.trainable_variables",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.pad",
"bert.modeling.BertConfig.from_json_file",
"tensorflow.compat.v1.gfile.MakeDirs",
"tensorflow.compat.v1.to_int64",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.ones_like",
"bert.optimization.create_optimizer",
"tensorflow.compat.v1.argmax",
"absl.flags.DEFINE_string",
"absl.app.run",
"tensorflow.compat.v1.logging.set_verbosity",
"collections.namedtuple",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.not_equal",
"language.conpono.reconstruct.preprocess.sample_mask_indices"
] |
[((1441, 1555), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""eval_file"""', 'None', '"""The input data. Should be in tfrecord format ready to input to BERT."""'], {}), "('eval_file', None,\n 'The input data. Should be in tfrecord format ready to input to BERT.')\n", (1460, 1555), False, 'from absl import flags\n'), ((1562, 1677), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""train_file"""', 'None', '"""The input data. Should be in tfrecord format ready to input to BERT."""'], {}), "('train_file', None,\n 'The input data. Should be in tfrecord format ready to input to BERT.')\n", (1581, 1677), False, 'from absl import flags\n'), ((1684, 1846), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""bert_config_file"""', 'None', '"""The config json file corresponding to the pre-trained BERT model. This specifies the model architecture."""'], {}), "('bert_config_file', None,\n 'The config json file corresponding to the pre-trained BERT model. This specifies the model architecture.'\n )\n", (1703, 1846), False, 'from absl import flags\n'), ((1855, 1957), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""vocab_file"""', 'None', '"""The vocabulary file that the BERT model was trained on."""'], {}), "('vocab_file', None,\n 'The vocabulary file that the BERT model was trained on.')\n", (1874, 1957), False, 'from absl import flags\n'), ((1975, 2087), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', 'None', '"""The output directory where the model checkpoints will be written."""'], {}), "('output_dir', None,\n 'The output directory where the model checkpoints will be written.')\n", (1994, 2087), False, 'from absl import flags\n'), ((2115, 2194), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""include_mlm"""', '(True)', '"""Whether to include MLM loss/objective"""'], {}), "('include_mlm', True, 'Whether to include MLM loss/objective')\n", (2132, 2194), False, 'from absl import flags\n'), ((2196, 2269), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_choices"""', '(32)', '"""Number of negative samples + 1"""'], {}), "('num_choices', 32, 'Number of negative samples + 1')\n", (2216, 2269), False, 'from absl import flags\n'), ((2271, 2371), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""data_window_size"""', '(5)', '"""Number of documents to drawnegative samples from."""'], {}), "('data_window_size', 5,\n 'Number of documents to drawnegative samples from.')\n", (2291, 2371), False, 'from absl import flags\n'), ((2393, 2492), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""data_window_shift"""', '(2)', '"""Shift windows by this many fornegative samples."""'], {}), "('data_window_shift', 2,\n 'Shift windows by this many fornegative samples.')\n", (2413, 2492), False, 'from absl import flags\n'), ((2514, 2591), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_sent_length"""', '(70)', '"""Number of tokens per sentence."""'], {}), "('max_sent_length', 70, 'Number of tokens per sentence.')\n", (2534, 2591), False, 'from absl import flags\n'), ((2593, 2678), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_para_length"""', '(30)', '"""Number of sentences per paragraph"""'], {}), "('max_para_length', 30, 'Number of sentences per paragraph'\n )\n", (2613, 2678), False, 'from absl import flags\n'), ((2675, 2752), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""context_size"""', '(4)', '"""Number of sentences in the context"""'], {}), "('context_size', 4, 'Number of sentences in the context')\n", (2695, 2752), False, 'from absl import flags\n'), ((2754, 2812), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""margin"""', '(1)', '"""Eta value for margin."""'], {}), "('margin', 1, 'Eta value for margin.')\n", (2774, 2812), False, 'from absl import flags\n'), ((2814, 2878), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""mask_rate"""', '(0.1)', '"""Rate of masking for mlm."""'], {}), "('mask_rate', 0.1, 'Rate of masking for mlm.')\n", (2832, 2878), False, 'from absl import flags\n'), ((2880, 2955), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""add_lv2loss"""', '(False)', '"""Whether to use the level 2 loss."""'], {}), "('add_lv2loss', False, 'Whether to use the level 2 loss.')\n", (2897, 2955), False, 'from absl import flags\n'), ((2957, 3068), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""init_checkpoint"""', 'None', '"""Initial checkpoint (usually from a pre-trained BERT model)."""'], {}), "('init_checkpoint', None,\n 'Initial checkpoint (usually from a pre-trained BERT model).')\n", (2976, 3068), False, 'from absl import flags\n'), ((3075, 3227), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""do_lower_case"""', '(True)', '"""Whether to lower case the input text. Should be True for uncased models and False for cased models."""'], {}), "('do_lower_case', True,\n 'Whether to lower case the input text. Should be True for uncased models and False for cased models.'\n )\n", (3092, 3227), False, 'from absl import flags\n'), ((3236, 3455), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_seq_length"""', '(128)', '"""The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded."""'], {}), "('max_seq_length', 128,\n 'The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.'\n )\n", (3256, 3455), False, 'from absl import flags\n'), ((3471, 3584), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""dataset_one_weight"""', '(0.5)', '"""Weight of first dataset.Weight of second dataset will be 1-x"""'], {}), "('dataset_one_weight', 0.5,\n 'Weight of first dataset.Weight of second dataset will be 1-x')\n", (3489, 3584), False, 'from absl import flags\n'), ((3594, 3658), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""do_train"""', '(False)', '"""Whether to run training."""'], {}), "('do_train', False, 'Whether to run training.')\n", (3611, 3658), False, 'from absl import flags\n'), ((3660, 3734), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""do_eval"""', '(False)', '"""Whether to run eval on the dev set."""'], {}), "('do_eval', False, 'Whether to run eval on the dev set.')\n", (3677, 3734), False, 'from absl import flags\n'), ((3736, 3814), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_batch_size"""', '(32)', '"""Total batch size for training."""'], {}), "('train_batch_size', 32, 'Total batch size for training.')\n", (3756, 3814), False, 'from absl import flags\n'), ((3816, 3889), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_batch_size"""', '(32)', '"""Total batch size for eval."""'], {}), "('eval_batch_size', 32, 'Total batch size for eval.')\n", (3836, 3889), False, 'from absl import flags\n'), ((3891, 3987), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""train_data_size"""', '(10000)', '"""The number of examples in thetraining data"""'], {}), "('train_data_size', 10000,\n 'The number of examples in thetraining data')\n", (3911, 3987), False, 'from absl import flags\n'), ((4009, 4103), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""eval_data_size"""', '(-1)', '"""The number of examples in thevalidation data"""'], {}), "('eval_data_size', -1,\n 'The number of examples in thevalidation data')\n", (4029, 4103), False, 'from absl import flags\n'), ((4125, 4203), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""predict_batch_size"""', '(8)', '"""Total batch size for predict."""'], {}), "('predict_batch_size', 8, 'Total batch size for predict.')\n", (4145, 4203), False, 'from absl import flags\n'), ((4205, 4290), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(5e-05)', '"""The initial learning rate for Adam."""'], {}), "('learning_rate', 5e-05,\n 'The initial learning rate for Adam.')\n", (4223, 4290), False, 'from absl import flags\n'), ((4287, 4439), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""warmup_proportion"""', '(0.1)', '"""Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% of training."""'], {}), "('warmup_proportion', 0.1,\n 'Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% of training.'\n )\n", (4305, 4439), False, 'from absl import flags\n'), ((4448, 4548), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""save_checkpoints_steps"""', '(10000)', '"""How often to save the model checkpoint."""'], {}), "('save_checkpoints_steps', 10000,\n 'How often to save the model checkpoint.')\n", (4468, 4548), False, 'from absl import flags\n'), ((4567, 4670), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""iterations_per_loop"""', '(1000)', '"""How many steps to make in each estimator call."""'], {}), "('iterations_per_loop', 1000,\n 'How many steps to make in each estimator call.')\n", (4587, 4670), False, 'from absl import flags\n'), ((4689, 4758), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""use_tpu"""', '(False)', '"""Whether to use TPU or GPU/CPU."""'], {}), "('use_tpu', False, 'Whether to use TPU or GPU/CPU.')\n", (4706, 4758), False, 'from absl import flags\n'), ((4760, 4949), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tpu_name"""', 'None', '"""The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url."""'], {}), "('tpu_name', None,\n 'The Cloud TPU to use for training. This should be either the name used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.'\n )\n", (4779, 4949), False, 'from absl import flags\n'), ((4965, 5158), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tpu_zone"""', 'None', '"""[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), "('tpu_zone', None,\n '[Optional] GCE zone where the Cloud TPU is located in. If not specified, we will attempt to automatically detect the GCE project from metadata.'\n )\n", (4984, 5158), False, 'from absl import flags\n'), ((5174, 5374), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""gcp_project"""', 'None', '"""[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata."""'], {}), "('gcp_project', None,\n '[Optional] Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect the GCE project from metadata.'\n )\n", (5193, 5374), False, 'from absl import flags\n'), ((5390, 5462), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""master"""', 'None', '"""[Optional] TensorFlow master URL."""'], {}), "('master', None, '[Optional] TensorFlow master URL.')\n", (5409, 5462), False, 'from absl import flags\n'), ((5464, 5577), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_tpu_cores"""', '(8)', '"""Only used if `use_tpu` is True. Total number of TPU cores to use."""'], {}), "('num_tpu_cores', 8,\n 'Only used if `use_tpu` is True. Total number of TPU cores to use.')\n", (5484, 5577), False, 'from absl import flags\n'), ((6070, 6189), 'collections.namedtuple', 'collections.namedtuple', (['"""Outputs_And_Context"""', "['input_ids', 'input_mask', 'segment_ids', 'label_types', 'context']"], {}), "('Outputs_And_Context', ['input_ids', 'input_mask',\n 'segment_ids', 'label_types', 'context'])\n", (6092, 6189), False, 'import collections\n'), ((6284, 6318), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, max_len_scalar]]'], {}), '([[0, max_len_scalar]])\n', (6295, 6318), True, 'import tensorflow.compat.v1 as tf\n'), ((6480, 6514), 'tensorflow.compat.v1.constant', 'tf.constant', (['[101]'], {'dtype': 'tf.int64'}), '([101], dtype=tf.int64)\n', (6491, 6514), True, 'import tensorflow.compat.v1 as tf\n'), ((6558, 6592), 'tensorflow.compat.v1.constant', 'tf.constant', (['[102]'], {'dtype': 'tf.int64'}), '([102], dtype=tf.int64)\n', (6569, 6592), True, 'import tensorflow.compat.v1 as tf\n'), ((8941, 8970), 'tensorflow.compat.v1.stack', 'tf.stack', (['bert_inputs'], {'axis': '(0)'}), '(bert_inputs, axis=0)\n', (8949, 8970), True, 'import tensorflow.compat.v1 as tf\n'), ((8987, 9016), 'tensorflow.compat.v1.stack', 'tf.stack', (['input_masks'], {'axis': '(0)'}), '(input_masks, axis=0)\n', (8995, 9016), True, 'import tensorflow.compat.v1 as tf\n'), ((9033, 9062), 'tensorflow.compat.v1.stack', 'tf.stack', (['segment_ids'], {'axis': '(0)'}), '(segment_ids, axis=0)\n', (9041, 9062), True, 'import tensorflow.compat.v1 as tf\n'), ((9261, 9295), 'tensorflow.compat.v1.constant', 'tf.constant', (['[101]'], {'dtype': 'tf.int64'}), '([101], dtype=tf.int64)\n', (9272, 9295), True, 'import tensorflow.compat.v1 as tf\n'), ((9339, 9373), 'tensorflow.compat.v1.constant', 'tf.constant', (['[102]'], {'dtype': 'tf.int64'}), '([102], dtype=tf.int64)\n', (9350, 9373), True, 'import tensorflow.compat.v1 as tf\n'), ((9418, 9454), 'tensorflow.compat.v1.constant', 'tf.constant', (['[FLAGS.max_para_length]'], {}), '([FLAGS.max_para_length])\n', (9429, 9454), True, 'import tensorflow.compat.v1 as tf\n'), ((9472, 9505), 'tensorflow.compat.v1.constant', 'tf.constant', (['[FLAGS.context_size]'], {}), '([FLAGS.context_size])\n', (9483, 9505), True, 'import tensorflow.compat.v1 as tf\n'), ((9599, 9637), 'tensorflow.compat.v1.zeros', 'tf.zeros', ([], {'shape': '(1, 1)', 'dtype': 'tf.int64'}), '(shape=(1, 1), dtype=tf.int64)\n', (9607, 9637), True, 'import tensorflow.compat.v1 as tf\n'), ((10325, 10357), 'tensorflow.compat.v1.split', 'tf.split', (['example', 'sizes'], {'axis': '(0)'}), '(example, sizes, axis=0)\n', (10333, 10357), True, 'import tensorflow.compat.v1 as tf\n'), ((10516, 10548), 'tensorflow.compat.v1.gather_nd', 'tf.gather_nd', (['context', 'non_zeros'], {}), '(context, non_zeros)\n', (10528, 10548), True, 'import tensorflow.compat.v1 as tf\n'), ((10624, 10652), 'tensorflow.compat.v1.reverse', 'tf.reverse', (['before'], {'axis': '[0]'}), '(before, axis=[0])\n', (10634, 10652), True, 'import tensorflow.compat.v1 as tf\n'), ((10702, 10731), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 8], [0, 0]]'], {}), '([[0, 8], [0, 0]])\n', (10713, 10731), True, 'import tensorflow.compat.v1 as tf\n'), ((10743, 10767), 'tensorflow.compat.v1.pad', 'tf.pad', (['before', 'paddings'], {}), '(before, paddings)\n', (10749, 10767), True, 'import tensorflow.compat.v1 as tf\n'), ((10778, 10801), 'tensorflow.compat.v1.pad', 'tf.pad', (['after', 'paddings'], {}), '(after, paddings)\n', (10784, 10801), True, 'import tensorflow.compat.v1 as tf\n'), ((11042, 11105), 'tensorflow.compat.v1.concat', 'tf.concat', (['[before_minus_two, before_minus_one, before]'], {'axis': '(1)'}), '([before_minus_two, before_minus_one, before], axis=1)\n', (11051, 11105), True, 'import tensorflow.compat.v1 as tf\n'), ((11116, 11174), 'tensorflow.compat.v1.concat', 'tf.concat', (['[after, after_plus_one, after_plus_two]'], {'axis': '(1)'}), '([after, after_plus_one, after_plus_two], axis=1)\n', (11125, 11174), True, 'import tensorflow.compat.v1 as tf\n'), ((11385, 11419), 'tensorflow.compat.v1.concat', 'tf.concat', (['[before, after]'], {'axis': '(0)'}), '([before, after], axis=0)\n', (11394, 11419), True, 'import tensorflow.compat.v1 as tf\n'), ((11625, 11663), 'tensorflow.compat.v1.zeros', 'tf.zeros', ([], {'shape': '(1, 1)', 'dtype': 'tf.int64'}), '(shape=(1, 1), dtype=tf.int64)\n', (11633, 11663), True, 'import tensorflow.compat.v1 as tf\n'), ((11779, 11814), 'tensorflow.compat.v1.boolean_mask', 'tf.boolean_mask', (['targets', 'bool_mask'], {}), '(targets, bool_mask)\n', (11794, 11814), True, 'import tensorflow.compat.v1 as tf\n'), ((12044, 12080), 'tensorflow.compat.v1.gather', 'tf.gather', (['targets', 'shuffled_indices'], {}), '(targets, shuffled_indices)\n', (12053, 12080), True, 'import tensorflow.compat.v1 as tf\n'), ((12170, 12209), 'tensorflow.compat.v1.boolean_mask', 'tf.boolean_mask', (['full_labels', 'bool_mask'], {}), '(full_labels, bool_mask)\n', (12185, 12209), True, 'import tensorflow.compat.v1 as tf\n'), ((12226, 12266), 'tensorflow.compat.v1.gather', 'tf.gather', (['label_types', 'shuffled_indices'], {}), '(label_types, shuffled_indices)\n', (12235, 12266), True, 'import tensorflow.compat.v1 as tf\n'), ((13281, 13310), 'tensorflow.compat.v1.stack', 'tf.stack', (['bert_inputs'], {'axis': '(0)'}), '(bert_inputs, axis=0)\n', (13289, 13310), True, 'import tensorflow.compat.v1 as tf\n'), ((13327, 13356), 'tensorflow.compat.v1.stack', 'tf.stack', (['input_masks'], {'axis': '(0)'}), '(input_masks, axis=0)\n', (13335, 13356), True, 'import tensorflow.compat.v1 as tf\n'), ((13373, 13402), 'tensorflow.compat.v1.stack', 'tf.stack', (['segment_ids'], {'axis': '(0)'}), '(segment_ids, axis=0)\n', (13381, 13402), True, 'import tensorflow.compat.v1 as tf\n'), ((26434, 26475), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (26458, 26475), True, 'import tensorflow.compat.v1 as tf\n'), ((26479, 26573), 'bert.tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['FLAGS.do_lower_case', 'FLAGS.init_checkpoint'], {}), '(FLAGS.do_lower_case, FLAGS.\n init_checkpoint)\n', (26524, 26573), False, 'from bert import tokenization\n'), ((26758, 26816), 'bert.modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['FLAGS.bert_config_file'], {}), '(FLAGS.bert_config_file)\n', (26792, 26816), False, 'from bert import modeling\n'), ((27095, 27130), 'tensorflow.compat.v1.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (27112, 27130), True, 'import tensorflow.compat.v1 as tf\n'), ((28487, 28707), 'tensorflow.contrib.tpu.TPUEstimator', 'contrib_tpu.TPUEstimator', ([], {'use_tpu': 'FLAGS.use_tpu', 'model_fn': 'model_fn', 'config': 'run_config', 'train_batch_size': 'FLAGS.train_batch_size', 'eval_batch_size': 'FLAGS.eval_batch_size', 'predict_batch_size': 'FLAGS.predict_batch_size'}), '(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=\n run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=\n FLAGS.eval_batch_size, predict_batch_size=FLAGS.predict_batch_size)\n', (28511, 28707), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((30307, 30347), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""eval_file"""'], {}), "('eval_file')\n", (30334, 30347), False, 'from absl import flags\n'), ((30350, 30391), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""vocab_file"""'], {}), "('vocab_file')\n", (30377, 30391), False, 'from absl import flags\n'), ((30394, 30441), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""bert_config_file"""'], {}), "('bert_config_file')\n", (30421, 30441), False, 'from absl import flags\n'), ((30444, 30485), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""output_dir"""'], {}), "('output_dir')\n", (30471, 30485), False, 'from absl import flags\n'), ((30488, 30501), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (30495, 30501), False, 'from absl import app\n'), ((6328, 6355), 'tensorflow.compat.v1.pad', 'tf.pad', (['tensor', 'end_padding'], {}), '(tensor, end_padding)\n', (6334, 6355), True, 'import tensorflow.compat.v1 as tf\n'), ((6941, 6979), 'tensorflow.compat.v1.zeros', 'tf.zeros', ([], {'shape': '(1, 1)', 'dtype': 'tf.int64'}), '(shape=(1, 1), dtype=tf.int64)\n', (6949, 6979), True, 'import tensorflow.compat.v1 as tf\n'), ((7178, 7226), 'tensorflow.compat.v1.range', 'tf.range', (['(0)'], {'limit': 'paragraph_len', 'dtype': 'tf.int32'}), '(0, limit=paragraph_len, dtype=tf.int32)\n', (7186, 7226), True, 'import tensorflow.compat.v1 as tf\n'), ((7497, 7526), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (7508, 7526), True, 'import tensorflow.compat.v1 as tf\n'), ((7558, 7604), 'tensorflow.compat.v1.pad', 'tf.pad', (['distractor_cand_plus_one', 'paddings_one'], {}), '(distractor_cand_plus_one, paddings_one)\n', (7564, 7604), True, 'import tensorflow.compat.v1 as tf\n'), ((7625, 7654), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 2], [0, 0]]'], {}), '([[0, 2], [0, 0]])\n', (7636, 7654), True, 'import tensorflow.compat.v1 as tf\n'), ((7686, 7732), 'tensorflow.compat.v1.pad', 'tf.pad', (['distractor_cand_plus_two', 'paddings_two'], {}), '(distractor_cand_plus_two, paddings_two)\n', (7692, 7732), True, 'import tensorflow.compat.v1 as tf\n'), ((7760, 7852), 'tensorflow.compat.v1.concat', 'tf.concat', (['[distractor_cand, distractor_cand_plus_one, distractor_cand_plus_two]'], {'axis': '(1)'}), '([distractor_cand, distractor_cand_plus_one,\n distractor_cand_plus_two], axis=1)\n', (7769, 7852), True, 'import tensorflow.compat.v1 as tf\n'), ((7885, 7933), 'tensorflow.compat.v1.gather', 'tf.gather', (['distractor_cand_ext', 'shuffled_indices'], {}), '(distractor_cand_ext, shuffled_indices)\n', (7894, 7933), True, 'import tensorflow.compat.v1 as tf\n'), ((9554, 9569), 'tensorflow.compat.v1.abs', 'tf.abs', (['example'], {}), '(example)\n', (9560, 9569), True, 'import tensorflow.compat.v1 as tf\n'), ((9679, 9743), 'tensorflow.compat.v1.not_equal', 'tf.not_equal', (['intermediate_examples_tensor', 'examples_zero_vector'], {}), '(intermediate_examples_tensor, examples_zero_vector)\n', (9691, 9743), True, 'import tensorflow.compat.v1 as tf\n'), ((9777, 9814), 'tensorflow.compat.v1.cast', 'tf.cast', (['examples_bool_mask', 'tf.int32'], {}), '(examples_bool_mask, tf.int32)\n', (9784, 9814), True, 'import tensorflow.compat.v1 as tf\n'), ((10165, 10270), 'tensorflow.compat.v1.concat', 'tf.concat', (['[[start, context_size, paragraph_len - context_size - start, max_len -\n paragraph_len]]', '(0)'], {}), '([[start, context_size, paragraph_len - context_size - start, \n max_len - paragraph_len]], 0)\n', (10174, 10270), True, 'import tensorflow.compat.v1 as tf\n'), ((11589, 11604), 'tensorflow.compat.v1.abs', 'tf.abs', (['targets'], {}), '(targets)\n', (11595, 11604), True, 'import tensorflow.compat.v1 as tf\n'), ((11689, 11735), 'tensorflow.compat.v1.not_equal', 'tf.not_equal', (['intermediate_tensor', 'zero_vector'], {}), '(intermediate_tensor, zero_vector)\n', (11701, 11735), True, 'import tensorflow.compat.v1 as tf\n'), ((12000, 12026), 'tensorflow.compat.v1.random.shuffle', 'tf.random.shuffle', (['indices'], {}), '(indices)\n', (12017, 12026), True, 'import tensorflow.compat.v1 as tf\n'), ((12480, 12521), 'tensorflow.compat.v1.gather_nd', 'tf.gather_nd', (['targets[i]', 'target_non_zero'], {}), '(targets[i], target_non_zero)\n', (12492, 12521), True, 'import tensorflow.compat.v1 as tf\n'), ((12927, 13006), 'tensorflow.compat.v1.concat', 'tf.concat', (['[CLS_ID, context_gathered, SEP_ID, targets_stripped, SEP_ID]'], {'axis': '(0)'}), '([CLS_ID, context_gathered, SEP_ID, targets_stripped, SEP_ID], axis=0)\n', (12936, 13006), True, 'import tensorflow.compat.v1 as tf\n'), ((13034, 13057), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['new_input'], {}), '(new_input)\n', (13046, 13057), True, 'import tensorflow.compat.v1 as tf\n'), ((14036, 14113), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[FLAGS.max_para_length * FLAGS.max_sent_length]', 'tf.int64'], {}), '([FLAGS.max_para_length * FLAGS.max_sent_length], tf.int64)\n', (14054, 14113), True, 'import tensorflow.compat.v1 as tf\n'), ((14282, 14334), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record[0]', 'name_to_features'], {}), '(record[0], name_to_features)\n', (14305, 14334), True, 'import tensorflow.compat.v1 as tf\n'), ((14356, 14444), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["target_example['sents']", '[FLAGS.max_para_length, FLAGS.max_sent_length]'], {}), "(target_example['sents'], [FLAGS.max_para_length, FLAGS.\n max_sent_length])\n", (14366, 14444), True, 'import tensorflow.compat.v1 as tf\n'), ((15637, 15704), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inputs_obj.input_ids, distractor_obj.input_ids]'], {'axis': '(0)'}), '([inputs_obj.input_ids, distractor_obj.input_ids], axis=0)\n', (15646, 15704), True, 'import tensorflow.compat.v1 as tf\n'), ((15742, 15811), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inputs_obj.input_mask, distractor_obj.input_mask]'], {'axis': '(0)'}), '([inputs_obj.input_mask, distractor_obj.input_mask], axis=0)\n', (15751, 15811), True, 'import tensorflow.compat.v1 as tf\n'), ((15850, 15921), 'tensorflow.compat.v1.concat', 'tf.concat', (['[inputs_obj.segment_ids, distractor_obj.segment_ids]'], {'axis': '(0)'}), '([inputs_obj.segment_ids, distractor_obj.segment_ids], axis=0)\n', (15859, 15921), True, 'import tensorflow.compat.v1 as tf\n'), ((19469, 19523), 'tensorflow.contrib.lookup.index_table_from_file', 'contrib_lookup.index_table_from_file', (['FLAGS.vocab_file'], {}), '(FLAGS.vocab_file)\n', (19505, 19523), True, 'from tensorflow.contrib import lookup as contrib_lookup\n'), ((21587, 21622), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""*** Features ***"""'], {}), "('*** Features ***')\n", (21602, 21622), True, 'import tensorflow.compat.v1 as tf\n'), ((21761, 21822), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['input_ids']", '[-1, FLAGS.max_seq_length]'], {}), "(features['input_ids'], [-1, FLAGS.max_seq_length])\n", (21771, 21822), True, 'import tensorflow.compat.v1 as tf\n'), ((21840, 21902), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['input_mask']", '[-1, FLAGS.max_seq_length]'], {}), "(features['input_mask'], [-1, FLAGS.max_seq_length])\n", (21850, 21902), True, 'import tensorflow.compat.v1 as tf\n'), ((21921, 21984), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['segment_ids']", '[-1, FLAGS.max_seq_length]'], {}), "(features['segment_ids'], [-1, FLAGS.max_seq_length])\n", (21931, 21984), True, 'import tensorflow.compat.v1 as tf\n'), ((22237, 22428), 'bert.modeling.BertModel', 'modeling.BertModel', ([], {'config': 'bert_config', 'is_training': 'is_training', 'input_ids': 'input_ids', 'input_mask': 'input_mask', 'token_type_ids': 'segment_ids', 'use_one_hot_embeddings': 'use_one_hot_embeddings'}), '(config=bert_config, is_training=is_training, input_ids=\n input_ids, input_mask=input_mask, token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n', (22255, 22428), False, 'from bert import modeling\n'), ((23777, 23801), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (23799, 23801), True, 'import tensorflow.compat.v1 as tf\n'), ((24295, 24343), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""**** Trainable Variables ****"""'], {}), "('**** Trainable Variables ****')\n", (24310, 24343), True, 'import tensorflow.compat.v1 as tf\n'), ((27228, 27340), 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'contrib_cluster_resolver.TPUClusterResolver', (['FLAGS.tpu_name'], {'zone': 'FLAGS.tpu_zone', 'project': 'FLAGS.gcp_project'}), '(FLAGS.tpu_name, zone=FLAGS.\n tpu_zone, project=FLAGS.gcp_project)\n', (27271, 27340), True, 'from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\n'), ((28761, 28808), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""***** Running training *****"""'], {}), "('***** Running training *****')\n", (28776, 28808), True, 'import tensorflow.compat.v1 as tf\n'), ((28813, 28873), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['""" Batch size = %d"""', 'FLAGS.train_batch_size'], {}), "(' Batch size = %d', FLAGS.train_batch_size)\n", (28828, 28873), True, 'import tensorflow.compat.v1 as tf\n'), ((28878, 28930), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['""" Num steps = %d"""', 'num_train_steps'], {}), "(' Num steps = %d', num_train_steps)\n", (28893, 28930), True, 'import tensorflow.compat.v1 as tf\n'), ((29850, 29908), 'tensorflow.contrib.training.checkpoints_iterator', 'contrib_training.checkpoints_iterator', (['estimator.model_dir'], {}), '(estimator.model_dir)\n', (29887, 29908), True, 'from tensorflow.contrib import training as contrib_training\n'), ((6894, 6909), 'tensorflow.compat.v1.abs', 'tf.abs', (['example'], {}), '(example)\n', (6900, 6909), True, 'import tensorflow.compat.v1 as tf\n'), ((7025, 7089), 'tensorflow.compat.v1.not_equal', 'tf.not_equal', (['intermediate_examples_tensor', 'examples_zero_vector'], {}), '(intermediate_examples_tensor, examples_zero_vector)\n', (7037, 7089), True, 'import tensorflow.compat.v1 as tf\n'), ((7125, 7162), 'tensorflow.compat.v1.cast', 'tf.cast', (['examples_bool_mask', 'tf.int32'], {}), '(examples_bool_mask, tf.int32)\n', (7132, 7162), True, 'import tensorflow.compat.v1 as tf\n'), ((7250, 7276), 'tensorflow.compat.v1.random.shuffle', 'tf.random.shuffle', (['indices'], {}), '(indices)\n', (7267, 7276), True, 'import tensorflow.compat.v1 as tf\n'), ((8106, 8156), 'tensorflow.compat.v1.gather_nd', 'tf.gather_nd', (['distractors[i]', 'distractors_non_zero'], {}), '(distractors[i], distractors_non_zero)\n', (8118, 8156), True, 'import tensorflow.compat.v1 as tf\n'), ((8579, 8653), 'tensorflow.compat.v1.concat', 'tf.concat', (['[CLS_ID, context, SEP_ID, distractors_stripped, SEP_ID]'], {'axis': '(0)'}), '([CLS_ID, context, SEP_ID, distractors_stripped, SEP_ID], axis=0)\n', (8588, 8653), True, 'import tensorflow.compat.v1 as tf\n'), ((8685, 8708), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['new_input'], {}), '(new_input)\n', (8697, 8708), True, 'import tensorflow.compat.v1 as tf\n'), ((10470, 10492), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['context'], {}), '(context)\n', (10483, 10492), True, 'import tensorflow.compat.v1 as tf\n'), ((12108, 12127), 'tensorflow.compat.v1.range', 'tf.range', (['(3)', '(-1)', '(-1)'], {}), '(3, -1, -1)\n', (12116, 12127), True, 'import tensorflow.compat.v1 as tf\n'), ((12129, 12143), 'tensorflow.compat.v1.range', 'tf.range', (['(4)', '(8)'], {}), '(4, 8)\n', (12137, 12143), True, 'import tensorflow.compat.v1 as tf\n'), ((13853, 13877), 'tensorflow.compat.v1.io.gfile.glob', 'tf.io.gfile.glob', (['infile'], {}), '(infile)\n', (13869, 13877), True, 'import tensorflow.compat.v1 as tf\n'), ((16269, 16304), 'tensorflow.compat.v1.constant', 'tf.constant', (['[cls_token, sep_token]'], {}), '([cls_token, sep_token])\n', (16280, 16304), True, 'import tensorflow.compat.v1 as tf\n'), ((16494, 16527), 'tensorflow.compat.v1.to_int32', 'tf.to_int32', (["example['input_ids']"], {}), "(example['input_ids'])\n", (16505, 16527), True, 'import tensorflow.compat.v1 as tf\n'), ((16725, 16787), 'tensorflow.compat.v1.map_fn', 'tf.map_fn', (['call_sample_mask_indices', 'input_ids'], {'dtype': 'tf.int32'}), '(call_sample_mask_indices, input_ids, dtype=tf.int32)\n', (16734, 16787), True, 'import tensorflow.compat.v1 as tf\n'), ((17048, 17088), 'tensorflow.compat.v1.concat', 'tf.concat', (['[input_ids, mask_indices]', '(-1)'], {}), '([input_ids, mask_indices], -1)\n', (17057, 17088), True, 'import tensorflow.compat.v1 as tf\n'), ((17114, 17158), 'tensorflow.compat.v1.map_fn', 'tf.map_fn', (['call_get_target_tokens', 'map_input'], {}), '(call_get_target_tokens, map_input)\n', (17123, 17158), True, 'import tensorflow.compat.v1 as tf\n'), ((17596, 17654), 'tensorflow.compat.v1.concat', 'tf.concat', (['[input_ids, mask_indices, target_token_ids]', '(-1)'], {}), '([input_ids, mask_indices, target_token_ids], -1)\n', (17605, 17654), True, 'import tensorflow.compat.v1 as tf\n'), ((17764, 17812), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['target_token_ids'], {'dtype': 'tf.float32'}), '(target_token_ids, dtype=tf.float32)\n', (17776, 17812), True, 'import tensorflow.compat.v1 as tf\n'), ((18981, 19023), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["example['label_types']", '[4, 1]'], {}), "(example['label_types'], [4, 1])\n", (18991, 19023), True, 'import tensorflow.compat.v1 as tf\n'), ((19025, 19036), 'tensorflow.compat.v1.range', 'tf.range', (['(4)'], {}), '(4)\n', (19033, 19036), True, 'import tensorflow.compat.v1 as tf\n'), ((19568, 19610), 'tensorflow.compat.v1.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['expanded_files[0]'], {}), '(expanded_files[0])\n', (19591, 19610), True, 'import tensorflow.compat.v1 as tf\n'), ((20003, 20072), 'tensorflow.compat.v1.data.experimental.sample_from_datasets', 'tf.data.experimental.sample_from_datasets', (['dataset_list', 'dset_weights'], {}), '(dataset_list, dset_weights)\n', (20044, 20072), True, 'import tensorflow.compat.v1 as tf\n'), ((20766, 20877), 'tensorflow.contrib.data.sliding_window_batch', 'contrib_data.sliding_window_batch', ([], {'window_size': 'FLAGS.data_window_size', 'window_shift': 'FLAGS.data_window_shift'}), '(window_size=FLAGS.data_window_size,\n window_shift=FLAGS.data_window_shift)\n', (20799, 20877), True, 'from tensorflow.contrib import data as contrib_data\n'), ((21670, 21743), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (["(' name = %s, shape = %s' % (name, features[name].shape))"], {}), "(' name = %s, shape = %s' % (name, features[name].shape))\n", (21685, 21743), True, 'import tensorflow.compat.v1 as tf\n'), ((22188, 22214), 'tensorflow.compat.v1.one_hot', 'tf.one_hot', (['label_types', '(8)'], {}), '(label_types, 8)\n', (22198, 22214), True, 'import tensorflow.compat.v1 as tf\n'), ((22959, 23026), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['mask_indices']", '[-1, max_predictions_per_seq]'], {}), "(features['mask_indices'], [-1, max_predictions_per_seq])\n", (22969, 23026), True, 'import tensorflow.compat.v1 as tf\n'), ((23088, 23159), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['target_token_ids']", '[-1, max_predictions_per_seq]'], {}), "(features['target_token_ids'], [-1, max_predictions_per_seq])\n", (23098, 23159), True, 'import tensorflow.compat.v1 as tf\n'), ((23219, 23294), 'tensorflow.compat.v1.reshape', 'tf.reshape', (["features['target_token_weights']", '[-1, max_predictions_per_seq]'], {}), "(features['target_token_weights'], [-1, max_predictions_per_seq])\n", (23229, 23294), True, 'import tensorflow.compat.v1 as tf\n'), ((23945, 24012), 'bert.modeling.get_assignment_map_from_checkpoint', 'modeling.get_assignment_map_from_checkpoint', (['tvars', 'init_checkpoint'], {}), '(tvars, init_checkpoint)\n', (23988, 24012), False, 'from bert import modeling\n'), ((24487, 24565), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['""" name = %s, shape = %s%s"""', 'var.name', 'var.shape', 'init_string'], {}), "(' name = %s, shape = %s%s', var.name, var.shape, init_string)\n", (24502, 24565), True, 'import tensorflow.compat.v1 as tf\n'), ((24674, 24778), 'bert.optimization.create_optimizer', 'optimization.create_optimizer', (['total_loss', 'learning_rate', 'num_train_steps', 'num_warmup_steps', 'use_tpu'], {}), '(total_loss, learning_rate, num_train_steps,\n num_warmup_steps, use_tpu)\n', (24703, 24778), False, 'from bert import optimization\n'), ((24890, 24994), 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'contrib_tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'train_op': 'train_op', 'scaffold_fn': 'scaffold_fn'}), '(mode=mode, loss=total_loss, train_op=train_op,\n scaffold_fn=scaffold_fn)\n', (24918, 24994), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((27617, 27762), 'tensorflow.contrib.tpu.TPUConfig', 'contrib_tpu.TPUConfig', ([], {'iterations_per_loop': 'FLAGS.iterations_per_loop', 'num_shards': 'FLAGS.num_tpu_cores', 'per_host_input_for_training': 'is_per_host'}), '(iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)\n', (27638, 27762), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((9909, 9938), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['paragraph_len', '[]'], {}), '(paragraph_len, [])\n', (9919, 9938), True, 'import tensorflow.compat.v1 as tf\n'), ((9969, 9997), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['context_size', '[]'], {}), '(context_size, [])\n', (9979, 9997), True, 'import tensorflow.compat.v1 as tf\n'), ((11941, 11958), 'tensorflow.compat.v1.shape', 'tf.shape', (['targets'], {}), '(targets)\n', (11949, 11958), True, 'import tensorflow.compat.v1 as tf\n'), ((12429, 12454), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['targets[i]'], {}), '(targets[i])\n', (12442, 12454), True, 'import tensorflow.compat.v1 as tf\n'), ((12559, 12596), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['CLS_ID'], {'dtype': 'tf.int64'}), '(CLS_ID, dtype=tf.int64)\n', (12572, 12596), True, 'import tensorflow.compat.v1 as tf\n'), ((12606, 12637), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['context_gathered'], {}), '(context_gathered)\n', (12619, 12637), True, 'import tensorflow.compat.v1 as tf\n'), ((12647, 12684), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['SEP_ID'], {'dtype': 'tf.int64'}), '(SEP_ID, dtype=tf.int64)\n', (12660, 12684), True, 'import tensorflow.compat.v1 as tf\n'), ((12694, 12724), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['targets_stripped'], {}), '(targets_stripped)\n', (12706, 12724), True, 'import tensorflow.compat.v1 as tf\n'), ((12734, 12770), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['SEP_ID'], {'dtype': 'tf.int64'}), '(SEP_ID, dtype=tf.int64)\n', (12746, 12770), True, 'import tensorflow.compat.v1 as tf\n'), ((14839, 14891), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record[1]', 'name_to_features'], {}), '(record[1], name_to_features)\n', (14862, 14891), True, 'import tensorflow.compat.v1 as tf\n'), ((14990, 15042), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record[2]', 'name_to_features'], {}), '(record[2], name_to_features)\n', (15013, 15042), True, 'import tensorflow.compat.v1 as tf\n'), ((15141, 15193), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record[3]', 'name_to_features'], {}), '(record[3], name_to_features)\n', (15164, 15193), True, 'import tensorflow.compat.v1 as tf\n'), ((15292, 15344), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record[4]', 'name_to_features'], {}), '(record[4], name_to_features)\n', (15315, 15344), True, 'import tensorflow.compat.v1 as tf\n'), ((16093, 16136), 'math.ceil', 'math.ceil', (['(FLAGS.max_seq_length * mask_rate)'], {}), '(FLAGS.max_seq_length * mask_rate)\n', (16102, 16136), False, 'import math\n'), ((16583, 16668), 'language.conpono.reconstruct.preprocess.sample_mask_indices', 'ip.sample_mask_indices', (['x', 'mask_rate', 'mask_blacklist_ids', 'max_predictions_per_seq'], {}), '(x, mask_rate, mask_blacklist_ids,\n max_predictions_per_seq)\n', (16605, 16668), True, 'from language.conpono.reconstruct import preprocess as ip\n'), ((16970, 17028), 'language.conpono.reconstruct.preprocess.get_target_tokens_for_apply', 'ip.get_target_tokens_for_apply', (['x_input_id', 'x_mask_indices'], {}), '(x_input_id, x_mask_indices)\n', (17000, 17028), True, 'from language.conpono.reconstruct import preprocess as ip\n'), ((17458, 17547), 'language.conpono.reconstruct.preprocess.apply_masking', 'ip.apply_masking', (['x_input_id', 'x_target_token_ids', 'x_mask_indices', 'mask_token_id', '(1000)'], {}), '(x_input_id, x_target_token_ids, x_mask_indices,\n mask_token_id, 1000)\n', (17474, 17547), True, 'from language.conpono.reconstruct import preprocess as ip\n'), ((17710, 17733), 'tensorflow.compat.v1.to_int64', 'tf.to_int64', (['map_input2'], {}), '(map_input2)\n', (17721, 17733), True, 'import tensorflow.compat.v1 as tf\n'), ((17853, 17882), 'tensorflow.compat.v1.equal', 'tf.equal', (['target_token_ids', '(0)'], {}), '(target_token_ids, 0)\n', (17861, 17882), True, 'import tensorflow.compat.v1 as tf\n'), ((17894, 17942), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['target_token_ids'], {'dtype': 'tf.float32'}), '(target_token_ids, dtype=tf.float32)\n', (17906, 17942), True, 'import tensorflow.compat.v1 as tf\n'), ((17954, 18003), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['target_token_ids'], {'dtype': 'tf.float32'}), '(target_token_ids, dtype=tf.float32)\n', (17967, 18003), True, 'import tensorflow.compat.v1 as tf\n'), ((19291, 19305), 'tensorflow.compat.v1.to_int32', 'tf.to_int32', (['t'], {}), '(t)\n', (19302, 19305), True, 'import tensorflow.compat.v1 as tf\n'), ((19738, 19780), 'tensorflow.compat.v1.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['expanded_files[i]'], {}), '(expanded_files[i])\n', (19761, 19780), True, 'import tensorflow.compat.v1 as tf\n'), ((22886, 22929), 'math.ceil', 'math.ceil', (['(FLAGS.max_seq_length * mask_rate)'], {}), '(FLAGS.max_seq_length * mask_rate)\n', (22895, 22929), False, 'import math\n'), ((24227, 24289), 'tensorflow.compat.v1.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (24256, 24289), True, 'import tensorflow.compat.v1 as tf\n'), ((26053, 26166), 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'contrib_tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'loss': 'total_loss', 'eval_metrics': 'eval_metrics', 'scaffold_fn': 'scaffold_fn'}), '(mode=mode, loss=total_loss, eval_metrics=\n eval_metrics, scaffold_fn=scaffold_fn)\n', (26081, 26166), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((26233, 26347), 'tensorflow.contrib.tpu.TPUEstimatorSpec', 'contrib_tpu.TPUEstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'probabilities': probabilities}", 'scaffold_fn': 'scaffold_fn'}), "(mode=mode, predictions={'probabilities':\n probabilities}, scaffold_fn=scaffold_fn)\n", (26261, 26347), True, 'from tensorflow.contrib import tpu as contrib_tpu\n'), ((30007, 30059), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""********** Eval results:*******\n"""'], {}), "('********** Eval results:*******\\n')\n", (30022, 30059), True, 'import tensorflow.compat.v1 as tf\n'), ((8045, 8074), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['distractors[i]'], {}), '(distractors[i])\n', (8058, 8074), True, 'import tensorflow.compat.v1 as tf\n'), ((8198, 8235), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['CLS_ID'], {'dtype': 'tf.int64'}), '(CLS_ID, dtype=tf.int64)\n', (8211, 8235), True, 'import tensorflow.compat.v1 as tf\n'), ((8247, 8269), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['context'], {}), '(context)\n', (8260, 8269), True, 'import tensorflow.compat.v1 as tf\n'), ((8281, 8318), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['SEP_ID'], {'dtype': 'tf.int64'}), '(SEP_ID, dtype=tf.int64)\n', (8294, 8318), True, 'import tensorflow.compat.v1 as tf\n'), ((8330, 8364), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['distractors_stripped'], {}), '(distractors_stripped)\n', (8342, 8364), True, 'import tensorflow.compat.v1 as tf\n'), ((8376, 8412), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['SEP_ID'], {'dtype': 'tf.int64'}), '(SEP_ID, dtype=tf.int64)\n', (8388, 8412), True, 'import tensorflow.compat.v1 as tf\n'), ((16450, 16473), 'tensorflow.compat.v1.constant', 'tf.constant', (['mask_token'], {}), '(mask_token)\n', (16461, 16473), True, 'import tensorflow.compat.v1 as tf\n'), ((16857, 16876), 'tensorflow.compat.v1.shape', 'tf.shape', (['input_ids'], {}), '(input_ids)\n', (16865, 16876), True, 'import tensorflow.compat.v1 as tf\n'), ((17213, 17232), 'tensorflow.compat.v1.shape', 'tf.shape', (['input_ids'], {}), '(input_ids)\n', (17221, 17232), True, 'import tensorflow.compat.v1 as tf\n'), ((17260, 17282), 'tensorflow.compat.v1.shape', 'tf.shape', (['mask_indices'], {}), '(mask_indices)\n', (17268, 17282), True, 'import tensorflow.compat.v1 as tf\n'), ((24071, 24133), 'tensorflow.compat.v1.train.init_from_checkpoint', 'tf.train.init_from_checkpoint', (['init_checkpoint', 'assignment_map'], {}), '(init_checkpoint, assignment_map)\n', (24100, 24133), True, 'import tensorflow.compat.v1 as tf\n'), ((24151, 24170), 'tensorflow.compat.v1.train.Scaffold', 'tf.train.Scaffold', ([], {}), '()\n', (24168, 24170), True, 'import tensorflow.compat.v1 as tf\n'), ((25223, 25271), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(logits, axis=-1, output_type=tf.int32)\n', (25232, 25271), True, 'import tensorflow.compat.v1 as tf\n'), ((25291, 25383), 'tensorflow.compat.v1.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids', 'predictions': 'predictions', 'weights': 'is_real_example'}), '(labels=label_ids, predictions=predictions, weights=\n is_real_example)\n', (25310, 25383), True, 'import tensorflow.compat.v1 as tf\n'), ((25418, 25450), 'tensorflow.compat.v1.metrics.mean', 'tf.metrics.mean', ([], {'values': 'cpc_loss'}), '(values=cpc_loss)\n', (25433, 25450), True, 'import tensorflow.compat.v1 as tf\n'), ((25477, 25509), 'tensorflow.compat.v1.metrics.mean', 'tf.metrics.mean', ([], {'values': 'mlm_loss'}), '(values=mlm_loss)\n', (25492, 25509), True, 'import tensorflow.compat.v1 as tf\n'), ((30211, 30275), 'tensorflow.compat.v1.logging.error', 'tf.logging.error', (['"""Checkpoint path \'%s\' no longer exists."""', 'ckpt'], {}), '("Checkpoint path \'%s\' no longer exists.", ckpt)\n', (30227, 30275), True, 'import tensorflow.compat.v1 as tf\n'), ((25741, 25850), 'tensorflow.compat.v1.metrics.accuracy', 'tf.metrics.accuracy', ([], {'labels': 'label_ids[:, i]', 'predictions': 'predictions[:, i]', 'weights': 'is_real_example[:, i]'}), '(labels=label_ids[:, i], predictions=predictions[:, i],\n weights=is_real_example[:, i])\n', (25760, 25850), True, 'import tensorflow.compat.v1 as tf\n')]
|
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject as gobject
from gi.repository import GObject
from . import draw
from .draw import RenderOptions
class PyBootchartWidget(gtk.DrawingArea, gtk.Scrollable):
__gsignals__ = {
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, Gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
hadjustment = GObject.property(type=Gtk.Adjustment,
default=Gtk.Adjustment(),
flags=GObject.PARAM_READWRITE)
hscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
default=Gtk.ScrollablePolicy.MINIMUM,
flags=GObject.PARAM_READWRITE)
vadjustment = GObject.property(type=Gtk.Adjustment,
default=Gtk.Adjustment(),
flags=GObject.PARAM_READWRITE)
vscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
default=Gtk.ScrollablePolicy.MINIMUM,
flags=GObject.PARAM_READWRITE)
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
self.set_can_focus(True)
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
self.connect("draw", self.on_draw)
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self.our_width, self.our_height = self.chart_width, self.chart_height
self.hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
self.vadj.connect('value-changed', self.on_adjustments_changed)
self.hadj.connect('value-changed', self.on_adjustments_changed)
def bound_vals(self):
self.x = max(0, self.x)
self.y = max(0, self.y)
self.x = min(self.chart_width - self.our_width, self.x)
self.y = min(self.chart_height - self.our_height, self.y)
def on_draw(self, darea, cr):
# set a clip region
#cr.rectangle(
# self.x, self.y,
# self.chart_width, self.chart_height
#)
#cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
draw.render(cr, self.options, self.xscale, self.trace)
def position_changed(self):
self.emit("position-changed", self.x, self.y)
ZOOM_INCREMENT = 1.25
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
self._set_scroll_adjustments()
self.queue_draw()
def zoom_to_rect (self, rect):
zoom_ratio = float(rect.width)/float(self.chart_width)
self.zoom_image(zoom_ratio)
self.x = 0
self.position_changed()
def set_xscale(self, xscale):
old_mid_x = self.x + self.hadj.page_size / 2
self.xscale = xscale
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
new_x = old_mid_x
self.zoom_image (self.zoom_ratio)
def on_expand(self, action):
self.set_xscale (int(self.xscale * 1.5 + 0.5))
def on_contract(self, action):
self.set_xscale (max(int(self.xscale / 1.5), 1))
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_rect(self.get_allocation())
def on_zoom_100(self, action):
self.zoom_image(1.0)
self.set_xscale(1.0)
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
self._set_scroll_adjustments()
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == Gdk.keyval_from_name("Left"):
self.x -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == Gdk.keyval_from_name("Right"):
self.x += self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == Gdk.keyval_from_name("Up"):
self.y -= self.POS_INCREMENT/self.zoom_ratio
elif event.keyval == Gdk.keyval_from_name("Down"):
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
self.bound_vals()
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
window = self.get_window()
window.set_cursor(Gdk.Cursor(Gdk.CursorType.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
if event.type not in (Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
window = self.get_window()
window.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
if event.state & Gdk.CONTROL_MASK:
if event.direction == Gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
if event.direction == Gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
if state & Gdk.ModifierType.BUTTON2_MASK or state & Gdk.ModifierType.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
self.bound_vals()
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
self.our_width = allocation.width
if self.chart_width < self.our_width:
self.our_width = self.chart_width
self.our_height = allocation.height
if self.chart_height < self.our_height:
self.our_height = self.chart_height
self._set_scroll_adjustments()
def _set_adj_upper(self, adj, upper):
if adj.get_upper() != upper:
adj.set_upper(upper)
def _set_scroll_adjustments(self):
self._set_adj_upper (self.hadj, self.zoom_ratio * (self.chart_width - self.our_width))
self._set_adj_upper (self.vadj, self.zoom_ratio * (self.chart_height - self.our_height))
def on_adjustments_changed(self, adj):
self.x = self.hadj.get_value() / self.zoom_ratio
self.y = self.vadj.get_value() / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
self.hadj.set_value(x * self.zoom_ratio)
#self.hadj.value_changed()
self.vadj.set_value(y * self.zoom_ratio)
class PyBootchartShell(gtk.VBox):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Expand"/>
<toolitem action="Contract"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
self.widget2 = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Expand', gtk.STOCK_ADD, None, None, None, self.widget2.on_expand),
('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget2.on_contract),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget2.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget2.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget2.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget2.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI description
uimanager.add_ui_from_string(self.ui)
# Scrolled window
scrolled = gtk.ScrolledWindow(self.widget2.hadj, self.widget2.vadj)
scrolled.add(self.widget2)
#scrolled.set_hadjustment()
#scrolled.set_vadjustment(self.widget2.vadj)
scrolled.set_policy(gtk.PolicyType.ALWAYS, gtk.PolicyType.ALWAYS)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
hbox.pack_start(toolbar, True, True, 0)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
button.connect ('toggled', self.widget2.show_toggled)
button.set_active(options.app_options.show_all)
hbox.pack_start (button, False, True, 0)
self.pack_start(hbox, False, True, 0)
self.pack_start(scrolled, True, True, 0)
self.show_all()
def grab_focus(self, window):
window.set_focus(self.widget2)
class PyBootchartWindow(gtk.Window):
def __init__(self, trace, app_options):
gtk.Window.__init__(self)
window = self
window.set_title("Bootchart %s" % trace.filename)
window.set_default_size(750, 550)
tab_page = gtk.Notebook()
tab_page.show()
window.add(tab_page)
full_opts = RenderOptions(app_options)
full_tree = PyBootchartShell(window, trace, full_opts, 1.0)
tab_page.append_page (full_tree, gtk.Label("Full tree"))
if trace.kernel is not None and len (trace.kernel) > 2:
kernel_opts = RenderOptions(app_options)
kernel_opts.cumulative = False
kernel_opts.charts = False
kernel_opts.kernel_only = True
kernel_tree = PyBootchartShell(window, trace, kernel_opts, 5.0)
tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
full_tree.grab_focus(self)
self.show()
def show(trace, options):
win = PyBootchartWindow(trace, options)
win.connect('destroy', gtk.main_quit)
gtk.main()
|
[
"gi.repository.Gtk.CheckButton",
"gi.repository.Gdk.Cursor",
"gi.require_version",
"gi.repository.Gtk.VBox.__init__",
"gi.repository.Gtk.main",
"gi.repository.Gtk.UIManager",
"gi.repository.Gtk.Adjustment",
"gi.repository.Gtk.Window.__init__",
"gi.repository.Gtk.Notebook",
"gi.repository.Gdk.keyval_from_name",
"gi.repository.Gtk.DrawingArea.__init__",
"gi.repository.Gtk.HBox",
"gi.repository.GObject.property",
"gi.repository.Gtk.ScrolledWindow",
"gi.repository.Gtk.Label",
"gi.repository.Gtk.ActionGroup"
] |
[((699, 731), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (717, 731), False, 'import gi\n'), ((1591, 1708), 'gi.repository.GObject.property', 'GObject.property', ([], {'type': 'Gtk.ScrollablePolicy', 'default': 'Gtk.ScrollablePolicy.MINIMUM', 'flags': 'GObject.PARAM_READWRITE'}), '(type=Gtk.ScrollablePolicy, default=Gtk.ScrollablePolicy.\n MINIMUM, flags=GObject.PARAM_READWRITE)\n', (1607, 1708), False, 'from gi.repository import GObject\n'), ((1984, 2101), 'gi.repository.GObject.property', 'GObject.property', ([], {'type': 'Gtk.ScrollablePolicy', 'default': 'Gtk.ScrollablePolicy.MINIMUM', 'flags': 'GObject.PARAM_READWRITE'}), '(type=Gtk.ScrollablePolicy, default=Gtk.ScrollablePolicy.\n MINIMUM, flags=GObject.PARAM_READWRITE)\n', (2000, 2101), False, 'from gi.repository import GObject\n'), ((13372, 13382), 'gi.repository.Gtk.main', 'gtk.main', ([], {}), '()\n', (13380, 13382), True, 'from gi.repository import Gtk as gtk\n'), ((2230, 2260), 'gi.repository.Gtk.DrawingArea.__init__', 'gtk.DrawingArea.__init__', (['self'], {}), '(self)\n', (2254, 2260), True, 'from gi.repository import Gtk as gtk\n'), ((3404, 3448), 'gi.repository.Gtk.Adjustment', 'gtk.Adjustment', (['(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n', (3418, 3448), True, 'from gi.repository import Gtk as gtk\n'), ((3469, 3513), 'gi.repository.Gtk.Adjustment', 'gtk.Adjustment', (['(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n', (3483, 3513), True, 'from gi.repository import Gtk as gtk\n'), ((10059, 10082), 'gi.repository.Gtk.VBox.__init__', 'gtk.VBox.__init__', (['self'], {}), '(self)\n', (10076, 10082), True, 'from gi.repository import Gtk as gtk\n'), ((10225, 10240), 'gi.repository.Gtk.UIManager', 'gtk.UIManager', ([], {}), '()\n', (10238, 10240), True, 'from gi.repository import Gtk as gtk\n'), ((10448, 10474), 'gi.repository.Gtk.ActionGroup', 'gtk.ActionGroup', (['"""Actions"""'], {}), "('Actions')\n", (10463, 10474), True, 'from gi.repository import Gtk as gtk\n'), ((11364, 11420), 'gi.repository.Gtk.ScrolledWindow', 'gtk.ScrolledWindow', (['self.widget2.hadj', 'self.widget2.vadj'], {}), '(self.widget2.hadj, self.widget2.vadj)\n', (11382, 11420), True, 'from gi.repository import Gtk as gtk\n'), ((11662, 11680), 'gi.repository.Gtk.HBox', 'gtk.HBox', (['(False)', '(8)'], {}), '(False, 8)\n', (11670, 11680), True, 'from gi.repository import Gtk as gtk\n'), ((12388, 12413), 'gi.repository.Gtk.Window.__init__', 'gtk.Window.__init__', (['self'], {}), '(self)\n', (12407, 12413), True, 'from gi.repository import Gtk as gtk\n'), ((12557, 12571), 'gi.repository.Gtk.Notebook', 'gtk.Notebook', ([], {}), '()\n', (12569, 12571), True, 'from gi.repository import Gtk as gtk\n'), ((1486, 1502), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', ([], {}), '()\n', (1500, 1502), False, 'from gi.repository import Gtk\n'), ((1879, 1895), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', ([], {}), '()\n', (1893, 1895), False, 'from gi.repository import Gtk\n'), ((5960, 5988), 'gi.repository.Gdk.keyval_from_name', 'Gdk.keyval_from_name', (['"""Left"""'], {}), "('Left')\n", (5980, 5988), False, 'from gi.repository import Gdk\n'), ((11894, 11922), 'gi.repository.Gtk.CheckButton', 'gtk.CheckButton', (['"""Show more"""'], {}), "('Show more')\n", (11909, 11922), True, 'from gi.repository import Gtk as gtk\n'), ((12782, 12804), 'gi.repository.Gtk.Label', 'gtk.Label', (['"""Full tree"""'], {}), "('Full tree')\n", (12791, 12804), True, 'from gi.repository import Gtk as gtk\n'), ((6076, 6105), 'gi.repository.Gdk.keyval_from_name', 'Gdk.keyval_from_name', (['"""Right"""'], {}), "('Right')\n", (6096, 6105), False, 'from gi.repository import Gdk\n'), ((6707, 6739), 'gi.repository.Gdk.Cursor', 'Gdk.Cursor', (['Gdk.CursorType.FLEUR'], {}), '(Gdk.CursorType.FLEUR)\n', (6717, 6739), False, 'from gi.repository import Gdk\n'), ((7124, 7156), 'gi.repository.Gdk.Cursor', 'Gdk.Cursor', (['Gdk.CursorType.ARROW'], {}), '(Gdk.CursorType.ARROW)\n', (7134, 7156), False, 'from gi.repository import Gdk\n'), ((13172, 13196), 'gi.repository.Gtk.Label', 'gtk.Label', (['"""Kernel boot"""'], {}), "('Kernel boot')\n", (13181, 13196), True, 'from gi.repository import Gtk as gtk\n'), ((6193, 6219), 'gi.repository.Gdk.keyval_from_name', 'Gdk.keyval_from_name', (['"""Up"""'], {}), "('Up')\n", (6213, 6219), False, 'from gi.repository import Gdk\n'), ((6307, 6335), 'gi.repository.Gdk.keyval_from_name', 'Gdk.keyval_from_name', (['"""Down"""'], {}), "('Down')\n", (6327, 6335), False, 'from gi.repository import Gdk\n')]
|
import networkx
import judo
from judo.tests.test_tree import TestNetworkxTree, to_node_id
import pytest
from fragile.core.tree import HistoryTree
def random_powerlaw():
g = networkx.DiGraph()
t = networkx.random_powerlaw_tree(500, gamma=3, tries=1000, seed=160290)
graph = networkx.compose(g, t)
mapping = {n: to_node_id(n) for n in graph.nodes}
return networkx.relabel_nodes(graph, mapping)
def small_tree():
node_data = {"a": judo.arange(10), "b": judo.zeros(10)}
edge_data = {"c": judo.ones(10)}
g = networkx.DiGraph()
for i in range(8):
g.add_node(to_node_id(i), **node_data)
pairs = [(0, 1), (1, 2), (2, 3), (2, 4), (2, 5), (3, 6), (3, 7)]
for a, b in pairs:
g.add_edge(to_node_id(a), to_node_id(b), **edge_data)
return g
@pytest.fixture(params=[random_powerlaw, small_tree], scope="function")
def tree(request):
tree = HistoryTree()
tree.data = request.param()
return tree
|
[
"fragile.core.tree.HistoryTree",
"judo.tests.test_tree.to_node_id",
"judo.ones",
"pytest.fixture",
"networkx.relabel_nodes",
"networkx.random_powerlaw_tree",
"networkx.compose",
"judo.arange",
"networkx.DiGraph",
"judo.zeros"
] |
[((799, 869), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[random_powerlaw, small_tree]', 'scope': '"""function"""'}), "(params=[random_powerlaw, small_tree], scope='function')\n", (813, 869), False, 'import pytest\n'), ((180, 198), 'networkx.DiGraph', 'networkx.DiGraph', ([], {}), '()\n', (196, 198), False, 'import networkx\n'), ((207, 275), 'networkx.random_powerlaw_tree', 'networkx.random_powerlaw_tree', (['(500)'], {'gamma': '(3)', 'tries': '(1000)', 'seed': '(160290)'}), '(500, gamma=3, tries=1000, seed=160290)\n', (236, 275), False, 'import networkx\n'), ((288, 310), 'networkx.compose', 'networkx.compose', (['g', 't'], {}), '(g, t)\n', (304, 310), False, 'import networkx\n'), ((376, 414), 'networkx.relabel_nodes', 'networkx.relabel_nodes', (['graph', 'mapping'], {}), '(graph, mapping)\n', (398, 414), False, 'import networkx\n'), ((540, 558), 'networkx.DiGraph', 'networkx.DiGraph', ([], {}), '()\n', (556, 558), False, 'import networkx\n'), ((900, 913), 'fragile.core.tree.HistoryTree', 'HistoryTree', ([], {}), '()\n', (911, 913), False, 'from fragile.core.tree import HistoryTree\n'), ((329, 342), 'judo.tests.test_tree.to_node_id', 'to_node_id', (['n'], {}), '(n)\n', (339, 342), False, 'from judo.tests.test_tree import TestNetworkxTree, to_node_id\n'), ((457, 472), 'judo.arange', 'judo.arange', (['(10)'], {}), '(10)\n', (468, 472), False, 'import judo\n'), ((479, 493), 'judo.zeros', 'judo.zeros', (['(10)'], {}), '(10)\n', (489, 493), False, 'import judo\n'), ((517, 530), 'judo.ones', 'judo.ones', (['(10)'], {}), '(10)\n', (526, 530), False, 'import judo\n'), ((601, 614), 'judo.tests.test_tree.to_node_id', 'to_node_id', (['i'], {}), '(i)\n', (611, 614), False, 'from judo.tests.test_tree import TestNetworkxTree, to_node_id\n'), ((740, 753), 'judo.tests.test_tree.to_node_id', 'to_node_id', (['a'], {}), '(a)\n', (750, 753), False, 'from judo.tests.test_tree import TestNetworkxTree, to_node_id\n'), ((755, 768), 'judo.tests.test_tree.to_node_id', 'to_node_id', (['b'], {}), '(b)\n', (765, 768), False, 'from judo.tests.test_tree import TestNetworkxTree, to_node_id\n')]
|
"""Multiview Random Gaussian Projection"""
# Authors: <NAME>
#
# License: MIT
import numpy as np
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.random_projection import GaussianRandomProjection
from .utils import check_n_views
class RandomGaussianProjection(TransformerMixin):
"""
Random Gaussian Projection method for constructing multiple views.
Each view is constructed using sklearn's random Gaussian projection.
Parameters
----------
n_views : int
Number of views to construct
n_components: int or 'auto', optional (default "auto")
Dimensionality of target projection space, see
sklearn.random_projection.GaussianRandomProjection for details.
eps: float, optional (default 0.1)
Parameter for controlling quality of embedding when
n_components = "auto" according to the Johnson-Lindenstrauss lemma
A smaller value leads to a better emedding (see sklearn for details).
random_state : int or RandomState instance, optional (default None)
Controls the random sampling of Gaussian projections. Set for
reproducible results.
Attributes
----------
GaussianRandomProjections_ : list, length n_views
List of GaussianRandomProjection instances fitted to construct each
view.
Notes
-----
From an implementation perspective, this wraps GaussianRandomProjection
from `sklearn.random_projection <https://scikit-learn.org/stable/modules/
classes.html#module-sklearn.random_projection>`_ and creates multiple
projections.
Examples
--------
>>> from mvlearn.compose import RandomGaussianProjection
>>> import numpy as np
>>> X = np.random.rand(1000, 50)
>>> rgp = RandomGaussianProjection(n_views=3, n_components=10)
>>> Xs = rgp.fit_transform(X)
>>> print(len(Xs))
3
>>> print(Xs[0].shape)
(1000, 10)
"""
def __init__(self, n_views, n_components="auto", eps=0.1,
random_state=None):
check_n_views(n_views)
self.n_views = n_views
self.n_components = n_components
self.eps = eps
self.random_state = random_state
def fit(self, X, y=None):
r"""
Fit to the singleview data.
Parameters
----------
X : array of shape (n_samples, n_total_features)
Input dataset
y : Ignored
Returns
-------
self : object
The Transformer instance
"""
# set function level random state
np.random.seed(self.random_state)
self.GaussianRandomProjections_ = [
GaussianRandomProjection(
n_components=self.n_components, eps=self.eps).fit(X)
for _ in range(self.n_views)
]
return self
def transform(self, X):
r"""
Transforms the singleview dataset and into a multiview dataset.
Parameters
----------
X : array of shape (n_samples, n_features)
Input dataset
Returns
-------
Xs : list of array-likes or numpy.ndarray
- Xs length: n_views
- Xs[i] shape: (n_samples, n_components)
"""
check_is_fitted(self)
Xs = [grp.transform(X) for grp in self.GaussianRandomProjections_]
return Xs
|
[
"numpy.random.seed",
"sklearn.random_projection.GaussianRandomProjection",
"sklearn.utils.validation.check_is_fitted"
] |
[((2611, 2644), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (2625, 2644), True, 'import numpy as np\n'), ((3287, 3308), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self'], {}), '(self)\n', (3302, 3308), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((2701, 2771), 'sklearn.random_projection.GaussianRandomProjection', 'GaussianRandomProjection', ([], {'n_components': 'self.n_components', 'eps': 'self.eps'}), '(n_components=self.n_components, eps=self.eps)\n', (2725, 2771), False, 'from sklearn.random_projection import GaussianRandomProjection\n')]
|
import argparse
import numpy as np
import torch
from torch import optim
from torchvision import utils
from tqdm import tqdm
from model import Glow
from samplers import memory_mnist, memory_fashion
from utils import (
net_args,
calc_z_shapes,
calc_loss,
string_args,
create_deltas_sequence,
)
parser = net_args(argparse.ArgumentParser(description="Glow trainer"))
def train(args, model, optimizer):
if args.dataset == "mnist":
dataset_f = memory_mnist
elif args.dataset == "fashion_mnist":
dataset_f = memory_fashion
repr_args = string_args(args)
n_bins = 2.0 ** args.n_bits
z_sample = []
z_shapes = calc_z_shapes(args.n_channels, args.img_size, args.n_flow, args.n_block)
for z in z_shapes:
z_new = torch.randn(args.n_sample, *z) * args.temp
z_sample.append(z_new.to(device))
deltas = create_deltas_sequence(0.1, 0.005)
args.delta = deltas[0]
epoch_losses = []
f_train_loss = open(f"losses/seq_losses_train_{repr_args}_.txt", "w", buffering=1)
f_test_loss = open(f"losses/seq_losses_test_{repr_args}_.txt", "w", buffering=1)
with tqdm(range(200)) as pbar:
for i in pbar:
args.delta = deltas[i]
repr_args = string_args(args)
train_loader, val_loader, train_val_loader = dataset_f(
args.batch, args.img_size, args.n_channels
)
train_losses = []
for image in train_loader:
optimizer.zero_grad()
image = image.to(device)
if args.tr_dq:
noisy_image += torch.rand_like(image) / n_bins
noisy_image += torch.randn_like(image) * args.delta
log_p, logdet, _ = model(noisy_image)
logdet = logdet.mean()
loss, log_p, log_det = calc_loss(
log_p, logdet, args.img_size, n_bins, args.n_channels
)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
current_train_loss = np.mean(train_losses)
print(f"{current_train_loss},{args.delta},{i + 1}", file=f_train_loss)
with torch.no_grad():
utils.save_image(
model.reverse(z_sample).cpu().data,
f"sample/seq_sample_{repr_args}_{str(i + 1).zfill(6)}.png",
normalize=True,
nrow=10,
range=(-0.5, 0.5),
)
losses = []
logdets = []
logps = []
for image in val_loader:
image = image.to(device)
noisy_image = image
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p, logdet, _ = model(noisy_image)
logdet = logdet.mean()
loss, log_p, log_det = calc_loss(
log_p, logdet, args.img_size, n_bins, args.n_channels
)
losses.append(loss.item())
logdets.append(log_det.item())
logps.append(log_p.item())
pbar.set_description(
f"Loss: {np.mean(losses):.5f}; logP: {np.mean(logps):.5f}; logdet: {np.mean(logdets):.5f}; delta: {args.delta:.5f}"
)
current_loss = np.mean(losses)
print(f"{current_loss},{args.delta},{i + 1}", file=f_test_loss)
epoch_losses.append(current_loss)
if (i + 1) % 10 == 0:
torch.save(
model.state_dict(),
f"checkpoint/seq_model_{repr_args}_{i + 1}_.pt",
)
f_ll = open(f"ll/seq_ll_{repr_args}_{i + 1}.txt", "w")
train_loader, val_loader, train_val_loader = dataset_f(
args.batch, args.img_size, args.n_channels
)
train_val_loader = iter(train_val_loader)
for image_val in val_loader:
image = image_val
image = image.to(device)
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p_val, logdet_val, _ = model(noisy_image)
image = next(train_val_loader)
image = image.to(device)
if args.te_dq:
noisy_image += torch.rand_like(image) / n_bins
if args.te_noise:
noisy_image += torch.randn_like(image) * args.delta
log_p_train_val, logdet_train_val, _ = model(noisy_image)
for (
lpv,
ldv,
lptv,
ldtv,
) in zip(log_p_val, logdet_val, log_p_train_val, logdet_train_val):
print(
args.delta,
lpv.item(),
ldv.item(),
lptv.item(),
ldtv.item(),
file=f_ll,
)
f_ll.close()
f_train_loss.close()
f_test_loss.close()
if __name__ == "__main__":
args = parser.parse_args()
print(string_args(args))
device = args.device
model = Glow(
args.n_channels,
args.n_flow,
args.n_block,
affine=args.affine,
conv_lu=not args.no_lu,
)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
train(args, model, optimizer)
|
[
"argparse.ArgumentParser",
"torch.randn_like",
"utils.create_deltas_sequence",
"utils.calc_z_shapes",
"model.Glow",
"torch.randn",
"torch.rand_like",
"numpy.mean",
"utils.calc_loss",
"utils.string_args",
"torch.no_grad"
] |
[((333, 384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Glow trainer"""'}), "(description='Glow trainer')\n", (356, 384), False, 'import argparse\n'), ((582, 599), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (593, 599), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((666, 738), 'utils.calc_z_shapes', 'calc_z_shapes', (['args.n_channels', 'args.img_size', 'args.n_flow', 'args.n_block'], {}), '(args.n_channels, args.img_size, args.n_flow, args.n_block)\n', (679, 738), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((877, 911), 'utils.create_deltas_sequence', 'create_deltas_sequence', (['(0.1)', '(0.005)'], {}), '(0.1, 0.005)\n', (899, 911), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((5772, 5868), 'model.Glow', 'Glow', (['args.n_channels', 'args.n_flow', 'args.n_block'], {'affine': 'args.affine', 'conv_lu': '(not args.no_lu)'}), '(args.n_channels, args.n_flow, args.n_block, affine=args.affine,\n conv_lu=not args.no_lu)\n', (5776, 5868), False, 'from model import Glow\n'), ((5716, 5733), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (5727, 5733), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((778, 808), 'torch.randn', 'torch.randn', (['args.n_sample', '*z'], {}), '(args.n_sample, *z)\n', (789, 808), False, 'import torch\n'), ((1252, 1269), 'utils.string_args', 'string_args', (['args'], {}), '(args)\n', (1263, 1269), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((2107, 2128), 'numpy.mean', 'np.mean', (['train_losses'], {}), '(train_losses)\n', (2114, 2128), True, 'import numpy as np\n'), ((1857, 1921), 'utils.calc_loss', 'calc_loss', (['log_p', 'logdet', 'args.img_size', 'n_bins', 'args.n_channels'], {}), '(log_p, logdet, args.img_size, n_bins, args.n_channels)\n', (1866, 1921), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((2229, 2244), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2242, 2244), False, 'import torch\n'), ((3591, 3606), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3598, 3606), True, 'import numpy as np\n'), ((1688, 1711), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (1704, 1711), False, 'import torch\n'), ((3112, 3176), 'utils.calc_loss', 'calc_loss', (['log_p', 'logdet', 'args.img_size', 'n_bins', 'args.n_channels'], {}), '(log_p, logdet, args.img_size, n_bins, args.n_channels)\n', (3121, 3176), False, 'from utils import net_args, calc_z_shapes, calc_loss, string_args, create_deltas_sequence\n'), ((1625, 1647), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (1640, 1647), False, 'import torch\n'), ((2822, 2844), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (2837, 2844), False, 'import torch\n'), ((2931, 2954), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (2947, 2954), False, 'import torch\n'), ((3435, 3450), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3442, 3450), True, 'import numpy as np\n'), ((3464, 3478), 'numpy.mean', 'np.mean', (['logps'], {}), '(logps)\n', (3471, 3478), True, 'import numpy as np\n'), ((3494, 3510), 'numpy.mean', 'np.mean', (['logdets'], {}), '(logdets)\n', (3501, 3510), True, 'import numpy as np\n'), ((4431, 4453), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (4446, 4453), False, 'import torch\n'), ((4540, 4563), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (4556, 4563), False, 'import torch\n'), ((4814, 4836), 'torch.rand_like', 'torch.rand_like', (['image'], {}), '(image)\n', (4829, 4836), False, 'import torch\n'), ((4923, 4946), 'torch.randn_like', 'torch.randn_like', (['image'], {}), '(image)\n', (4939, 4946), False, 'import torch\n')]
|
#!/usr/bin/env python3
print("Importing common model setup...")
import sys
sys.path.append("..")
from common_model_setup import *
SOLVED = False
print("Using {} structures for RNN".format("solved" if SOLVED else "AF2"))
device = "cuda" if torch.cuda.is_available() else "cpu"
print("The device in use is", device)
BATCH_SIZE = 8
MAX_EPOCHS = 200
LR = 1e-3
LR_DECAY = 1e-5
EARLY_STOPPING = True
training_set, val_set, test_set = create_sequential_datasets(
*create_embedding_dicts(SOLVED), device
)
class SimpleRNN(torch.nn.Module):
def __init__(self, input_size):
super(SimpleRNN, self).__init__()
#self.bn_input = nn.BatchNorm1d(927)
self.n_layers = 1
self.hidden_dim = 32
#self.rnn = nn.RNN(input_size, self.hidden_dim, self.n_layers, batch_first=True)
self.rnn = nn.RNN(input_size, self.hidden_dim, self.n_layers, batch_first=True, bidirectional=True)
self.dense = nn.Sequential(
nn.Linear(in_features=2*self.hidden_dim, out_features=1)
# nn.Linear(in_features=2*self.hidden_dim, out_features=self.hidden_dim),
# nn.ReLU(),
# nn.Linear(in_features=self.hidden_dim, out_features=int(self.hidden_dim/2)),
# nn.ReLU(),
# nn.Linear(in_features=int(self.hidden_dim/2), out_features=int(self.hidden_dim/4)),
# nn.ReLU(),
# nn.Linear(in_features=int(self.hidden_dim/4), out_features=1),
)
# Dense out
#self.act = nn.ReLU()
self.dropout = nn.Dropout(0.1)
self.out = nn.Sigmoid()
def forward(self, x):
#x = self.bn_input(x)
# Initializing hidden state for first input using method defined below
hidden = self.init_hidden(4)
# Convolutional modules
x, hidden = self.rnn(x)
#x = self.act(x)
x = self.dropout(x)
# Output MLP
x = self.dense(x)
# Output sigmoid
out = self.out(x)
return out, hidden
def init_hidden(self, batch_size):
# This method generates the first hidden state of zeros which we'll use in the forward pass
# We'll send the tensor holding the hidden state to the device we specified earlier as well
hidden = torch.zeros(self.n_layers, batch_size, self.hidden_dim)
return hidden
net = SimpleRNN(128)
net.to(device)
trainloader = torch.utils.data.DataLoader(training_set, batch_size=BATCH_SIZE)
################
### TRAINING ###
################
print("Starting training...")
train_X, train_y, train_mask = training_set[:]
test_X, test_y, test_mask = test_set[:]
criterion= nn.BCELoss(reduction='none')
optimizer = optim.Adam(net.parameters(), lr=LR, weight_decay=LR_DECAY)
def weight_reset(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
m.reset_parameters()
net.apply(weight_reset)
losses=[]
training_plot=[]
test_plot=[]
auc_train_plot=[]
auc_test_plot=[]
mcc_train_plot=[]
mcc_test_plot=[]
last_score=np.inf
max_es_rounds = 5
es_rounds = max_es_rounds
best_epoch = 0
for epoch in range(MAX_EPOCHS): # loop over the dataset multiple times
net.train()
for i, data in enumerate(trainloader,0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, mask = data
inputs = inputs.to(device)
# zero the parameter gradients
optimizer.zero_grad()
#scheduler.step()
# forward + backward + optimize
outputs, hidden = net(inputs)
loss = criterion(torch.squeeze(outputs), labels)
loss=loss*mask
loss=torch.sum(loss)/torch.sum(mask)
loss.backward()
optimizer.step()
# print statistics
with torch.no_grad():
test_loss=0
train_loss=0
net.eval()
inputs, labels, mask = training_set[:]
outputs, hidden = net(inputs)
loss = criterion(torch.squeeze(outputs), labels)
loss=loss*mask
loss=torch.sum(loss)/torch.sum(mask)
training_plot.append(loss.cpu().numpy())
auc_train_plot.append(roc_auc_score(labels.cpu()[mask.cpu()>0], outputs.cpu().squeeze()[mask.cpu()>0]))
mcc_train_plot.append(mcc(labels.cpu()[mask.cpu()>0], outputs.cpu().squeeze()[mask.cpu()>0]>.1))
inputs, labels, mask = test_set[:]
outputs, hidden = net(inputs)
loss = criterion(torch.squeeze(outputs), labels)
loss=loss*mask
loss=torch.sum(loss)/torch.sum(mask)
test_plot.append(loss.cpu().numpy())
fpr, tpr, _ = roc_curve(labels.cpu()[mask.cpu()>0], outputs.cpu()[mask.cpu()>0].squeeze())
#labels, outputs= get_labels_preds_and_posprob_without_padding( outputs.flatten(),labels.flatten() )
auc_test_plot.append(roc_auc_score(labels.cpu()[mask.cpu()>0], outputs.cpu()[mask.cpu()>0].squeeze()))
mcc_test_plot.append(mcc(labels.cpu()[mask.cpu()>0], outputs.cpu().squeeze()[mask.cpu()>0]>.1))
inputs, labels, mask = val_set[:]
outputs, hidden = net(inputs)
valloss = criterion(torch.squeeze(outputs), labels)
valloss=valloss*mask
valloss=torch.sum(valloss)/torch.sum(mask)
print("Epoch {}, training loss {}, test loss {}, validation loss {}".format(epoch, training_plot[-1], test_plot[-1], valloss))
print("Epoch {}, training AUC {}, test AUC {}".format(epoch, auc_train_plot[-1], auc_test_plot[-1]))
print("Epoch {}, training MCC {}, test MCC {}".format(epoch, mcc_train_plot[-1], mcc_test_plot[-1]))
if EARLY_STOPPING:
if last_score > valloss:
last_score = valloss
best_epoch = epoch
es_rounds = max_es_rounds
best_model = copy.deepcopy(net)
rnn_fpr, rnn_tpr = fpr, tpr
else:
if es_rounds > 0:
es_rounds -=1
else:
print('EARLY-STOPPING !')
print('Best epoch found: nº {}'.format(best_epoch))
print('Exiting. . .')
break
print("Finished training.")
|
[
"sys.path.append"
] |
[((76, 97), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (91, 97), False, 'import sys\n')]
|
""" Game fix for Sonic the Hedgehog 4: Episode II
"""
#pylint: disable=C0103
from protonfixes import util
def main():
""" lock to 60 fps
"""
util.set_environment('DXVK_FRAME_RATE', '60')
|
[
"protonfixes.util.set_environment"
] |
[((156, 201), 'protonfixes.util.set_environment', 'util.set_environment', (['"""DXVK_FRAME_RATE"""', '"""60"""'], {}), "('DXVK_FRAME_RATE', '60')\n", (176, 201), False, 'from protonfixes import util\n')]
|
import logging
from functools import wraps
from datetime import datetime
"""Decorator for logging the duration of a method call"""
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = datetime.now()
result = func(*args, **kwargs)
end = datetime.now()
duration = end - start
# TODO(df): Reuse logger / level from calling module
logger = logging.getLogger('pdf_extractor')
logger.setLevel(logging.INFO)
logger.info(f'Duration for {func.__name__}: {duration}')
return result
return wrapper
|
[
"logging.getLogger",
"datetime.datetime.now",
"functools.wraps"
] |
[((156, 167), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (161, 167), False, 'from functools import wraps\n'), ((218, 232), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (230, 232), False, 'from datetime import datetime\n'), ((286, 300), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (298, 300), False, 'from datetime import datetime\n'), ((410, 444), 'logging.getLogger', 'logging.getLogger', (['"""pdf_extractor"""'], {}), "('pdf_extractor')\n", (427, 444), False, 'import logging\n')]
|
#!/usr/bin/env python3
import numpy as np
####################
# generate_stimuli #
####################
def generate_stimuli(arg, env):
"""
Function to generate the stimuli
Arguments
---------
arg: Argument for which to generate stimuli (either Argument or ArrayArgument)
env: Dict mapping the variable (SweepVariable or DynamicVariable) names to their value.
"""
# name = arg.name
# if name == "srcA":
# # generate and return stimuli for srcA
# if name == "srcB":
# # generate and return stimuli for srcB
# ...
##################
# compute_result #
##################
def compute_result(result_parameter, inputs, env, fix_point):
"""
Funciton to generate the expected result of the testcase.
Arguments
---------
result_parameter: Either OutputArgument or ReturnValue (see pulp_dsp_test.py)
inputs: Dict mapping name to the Argument, with arg.value, arg.ctype (and arg.length)
env: Dict mapping the variable (SweepVariable or DynamicVariable) names to their value.
fix_point: None (if no fixpoint is used) or decimal point
"""
ctype = inputs['value'].ctype;
if ctype == 'int32_t':
my_type = np.int32
my_bits=32
fracBits=31
elif ctype == 'int16_t':
my_type = np.int16
my_bits=16
fracBits=15
elif ctype == 'int8_t':
my_type = np.int8
elif ctype == 'float' or ctype == 'float32_t':
my_type = np.float32
my_bits = 0;
else:
raise RuntimeError("Unrecognized result type: %s" % ctype)
if my_bits != 0:
input_number = inputs['value'].value
in_rad = 2*np.pi*float(input_number)/2**(my_bits-1)
return q_sat(int(2**(my_bits-1)*np.sin(in_rad)), my_bits)
elif ctype == 'float':
return np.sin(inputs['value'].value).astype(my_type)
######################
# Fixpoint Functions #
######################
def q_sat(x, bits=32):
if x > 2**(bits-1) - 1:
return x - 2**bits
elif x < -2**(bits-1):
return x + 2**bits
else:
return x
def q_add(a, b):
return q_sat(a + b)
def q_sub(a, b):
return q_sat(a - b)
def q_mul(a, b, p):
return q_roundnorm(a * b, p)
def q_roundnorm(a, p):
rounding = 1 << (p - 1)
return q_sat((a + rounding) >> p)
|
[
"numpy.sin"
] |
[((1759, 1773), 'numpy.sin', 'np.sin', (['in_rad'], {}), '(in_rad)\n', (1765, 1773), True, 'import numpy as np\n'), ((1827, 1856), 'numpy.sin', 'np.sin', (["inputs['value'].value"], {}), "(inputs['value'].value)\n", (1833, 1856), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
"""
Launches a ParaView visualization of a simulation.
User provides saved ParaView state file and output directory.
Usage:
.. code-block:: bash
visualize-output outputs my_visu_state.pvsm
Opens paraview visualization for state ``my_visu_state.pvsm`` where all pvd
files are read from ``outputs`` directory.
.. code-block:: bash
visualize-output -r outputs my_visu_state.pvsm
As above but first regenerates all ``*.pvd`` files that contain ``*.vtu`` files
for time indices 0..100. Useful in cases where a shorter pvd file has been
created by another simulation run.
.. code-block:: bash
visualize-output -r -f 20 -l 200 outputs my_visu_state.pvsm
As above but generates ``*.pvd`` files for time indices 20..200.
.. code-block:: bash
visualize-output -r outputs my_visu_state.pvsm
As above but first generates all ``*.pvd`` for a parallel run, i.e. it lists
``*.pvtu`` files instead of ``*.vtu`` files.
"""
import argparse
import glob
import os
import subprocess
import tempfile
TMP_DIR = tempfile.gettempdir()
def generate_pvd_file(outdir, fieldname, timesteps, usepvtu=False):
"""
Generates ParaView PVD XML file fieldName.pvd that contains vtu or ptvu files for
the given time steps range.
:arg str outdir: directory where pvd files are stored
:arg str fieldname: name of the field that appears in vtu/pvtu file names
:arg timesteps: list of time indices of vtu files to include in the pvd file
:type timesteps: list of int
"""
template_header = """<?xml version="1.0" ?>\n"""
template_openblock = """<VTKFile type="Collection" version="0.1" byte_order="LittleEndian">\n<Collection>\n"""
template_closeblock = """</Collection>\n</VTKFile>\n"""
template_entry = """<DataSet timestep="{i}" file="{name}_{i}.{ext}" />"""
extension = 'pvtu' if usepvtu else 'vtu'
content = template_header
content += template_openblock
for i in timesteps:
content += template_entry.format(i=i, name=fieldname, ext=extension)
content += template_closeblock
filename = os.path.join(outdir, fieldname+'.pvd')
print('generating {:}'.format(filename))
f = open(filename, 'w')
f.write(content)
f.close()
def replace_path_in_xml(filename, outputfile, new_path):
"""
Replaces all paths in paraview xml file PVDReader entries.
:arg str filename: XML file to process
:arg str outputfile: file where updated XML file is saved
:arg new_path: a new path for all pvd files
All PVDReader entries of the form
<Proxy group="sources" type="PVDReader" ...>
<Property name="FileName" ...>
<Element value="some/path/to/a_file.pvd" .../>
...
</Property>
...
</Proxy>
will be reaplaced by
<Proxy group="sources" type="PVDReader" ...>
<Property name="FileName" ...>
<Element value="new_path/a_file.pvd" .../>
...
</Property>
...
</Proxy>
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
root = tree.getroot()
readers = root[0].findall("Proxy[@type='PVDReader']")
for reader in readers:
fnameprop = reader.findall("Property[@name='FileName']/Element")[0]
old_fname = fnameprop.attrib['value']
path, file = os.path.split(old_fname)
field, ext = os.path.splitext(file)
new_fname = os.path.join(new_path, field, file)
fnameprop.attrib['value'] = new_fname
tree.write(outputfile)
def process_args(outputdir, state_file, regenerate_pvd=True, timesteps=None,
parallel_vtu=True):
"""
Processes command line arguments
"""
temp_state_file = os.path.join(TMP_DIR, 'tmp.pvsm')
paraview_bin = 'paraview'
pv_log_file = os.path.join(TMP_DIR, 'log_pvoutput.txt')
static_pvd_files = ['bath'] # outputs that are not time dependent
# regenerate all existing PVD files
if regenerate_pvd:
pvd_files = glob.glob(os.path.join(outputdir, '*/*.pvd'))
for f in pvd_files:
path, fname = os.path.split(f)
fieldName, extension = os.path.splitext(fname)
if fieldName not in static_pvd_files:
generate_pvd_file(path, fieldName, timesteps, usepvtu=parallel_vtu)
# read state file, replace directory with new one
replace_path_in_xml(state_file, temp_state_file, outputdir)
# lauch paraview with new independent thread
log_file = open(pv_log_file, 'w')
cmd = ' '.join([paraview_bin, '--state={:}'.format(temp_state_file), '>', pv_log_file])
subprocess.Popen(cmd, shell=True, stdout=log_file, stderr=subprocess.STDOUT)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch ParaView visualization',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('outputdir', type=str,
help='Directory where .pvd files are stored')
parser.add_argument('statefile', type=str,
help='ParaView *.pvsm state file')
parser.add_argument('-r', action='store_true', dest='regenerate_pvd',
help='regenerate PVD files')
parser.add_argument('-p', action='store_true', dest='parallel_vtu',
help='regenerate PVD files for parallel outputs')
parser.add_argument('-f', '--first-time-step', type=int, default=0,
help='first time step to be included in regenerated PVD file')
parser.add_argument('-l', '--last-time-step', type=int, default=100,
help='last time step to be included in regenerated PVD file')
args = parser.parse_args()
timesteps = range(args.first_time_step, args.last_time_step + 1)
process_args(args.outputdir, args.statefile, regenerate_pvd=args.regenerate_pvd,
timesteps=timesteps, parallel_vtu=args.parallel_vtu)
|
[
"xml.etree.ElementTree.parse",
"subprocess.Popen",
"argparse.ArgumentParser",
"tempfile.gettempdir",
"os.path.splitext",
"os.path.split",
"os.path.join"
] |
[((1042, 1063), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1061, 1063), False, 'import tempfile\n'), ((2088, 2128), 'os.path.join', 'os.path.join', (['outdir', "(fieldname + '.pvd')"], {}), "(outdir, fieldname + '.pvd')\n", (2100, 2128), False, 'import os\n'), ((3027, 3045), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (3035, 3045), True, 'import xml.etree.ElementTree as ET\n'), ((3689, 3722), 'os.path.join', 'os.path.join', (['TMP_DIR', '"""tmp.pvsm"""'], {}), "(TMP_DIR, 'tmp.pvsm')\n", (3701, 3722), False, 'import os\n'), ((3771, 3812), 'os.path.join', 'os.path.join', (['TMP_DIR', '"""log_pvoutput.txt"""'], {}), "(TMP_DIR, 'log_pvoutput.txt')\n", (3783, 3812), False, 'import os\n'), ((4578, 4654), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'log_file', 'stderr': 'subprocess.STDOUT'}), '(cmd, shell=True, stdout=log_file, stderr=subprocess.STDOUT)\n', (4594, 4654), False, 'import subprocess\n'), ((4697, 4825), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Launch ParaView visualization"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Launch ParaView visualization',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (4720, 4825), False, 'import argparse\n'), ((3300, 3324), 'os.path.split', 'os.path.split', (['old_fname'], {}), '(old_fname)\n', (3313, 3324), False, 'import os\n'), ((3346, 3368), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (3362, 3368), False, 'import os\n'), ((3389, 3424), 'os.path.join', 'os.path.join', (['new_path', 'field', 'file'], {}), '(new_path, field, file)\n', (3401, 3424), False, 'import os\n'), ((3977, 4011), 'os.path.join', 'os.path.join', (['outputdir', '"""*/*.pvd"""'], {}), "(outputdir, '*/*.pvd')\n", (3989, 4011), False, 'import os\n'), ((4067, 4083), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (4080, 4083), False, 'import os\n'), ((4119, 4142), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (4135, 4142), False, 'import os\n')]
|
import pygame
import logging
class Player():
"""
Player
"""
def __init__(self, name, avatar="assets/laughing.png", offsetX=0, offsetY=0):
self.name = name
self.logger = logging.getLogger(self.name)
logging.basicConfig(
format='%(asctime)s - %(name)s: %(levelname)s - %(message)s', level=logging.INFO)
self.avatar = avatar
self.x = 400
self.y = 400
self.offsetX = offsetX
self.offsetY = offsetY
self.pies = []
def draw(self, screen):
"""
Draw the player on the board
"""
img = pygame.image.load(self.avatar)
img = pygame.transform.scale(img, (50, 50))
screen.blit(img, (self.x + self.offsetX, self.y + self.offsetY))
def draw_statistics(self, screen, x, y):
"""
Draw the statistics (The big head)
"""
img = pygame.image.load(self.avatar)
img = pygame.transform.scale(img, (150, 150))
screen.blit(img, (x + 75, y))
self.font = pygame.font.SysFont("Arial", 12)
self.name_text = self.font.render(
self.name, True, pygame.Color('white'))
self.name_rect = self.name_text.get_rect(
center=(x + 150, y + 175))
screen.blit(self.name_text, self.name_rect)
self.draw_pies(screen, x, y + 200)
def draw_face(self, screen, x, y, width, height):
img = pygame.image.load(self.avatar)
img = pygame.transform.scale(img, (width, height))
screen.blit(img, (x, y))
def draw_pies(self, screen, x, y):
"""
Draw the pies
"""
x += 12.5
for pie in self.pies:
pygame.draw.rect(screen, pygame.Color(pie), (x, y, 50, 50))
x += 75
def add_pie(self, color):
if color not in self.pies:
self.logger.info("Getting a piece of cake!")
self.pies.append(color)
|
[
"logging.basicConfig",
"pygame.font.SysFont",
"pygame.Color",
"pygame.transform.scale",
"pygame.image.load",
"logging.getLogger"
] |
[((204, 232), 'logging.getLogger', 'logging.getLogger', (['self.name'], {}), '(self.name)\n', (221, 232), False, 'import logging\n'), ((241, 347), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s: %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s: %(levelname)s - %(message)s', level=logging.INFO)\n", (260, 347), False, 'import logging\n'), ((616, 646), 'pygame.image.load', 'pygame.image.load', (['self.avatar'], {}), '(self.avatar)\n', (633, 646), False, 'import pygame\n'), ((661, 698), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(50, 50)'], {}), '(img, (50, 50))\n', (683, 698), False, 'import pygame\n'), ((899, 929), 'pygame.image.load', 'pygame.image.load', (['self.avatar'], {}), '(self.avatar)\n', (916, 929), False, 'import pygame\n'), ((944, 983), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(150, 150)'], {}), '(img, (150, 150))\n', (966, 983), False, 'import pygame\n'), ((1042, 1074), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial"""', '(12)'], {}), "('Arial', 12)\n", (1061, 1074), False, 'import pygame\n'), ((1423, 1453), 'pygame.image.load', 'pygame.image.load', (['self.avatar'], {}), '(self.avatar)\n', (1440, 1453), False, 'import pygame\n'), ((1468, 1512), 'pygame.transform.scale', 'pygame.transform.scale', (['img', '(width, height)'], {}), '(img, (width, height))\n', (1490, 1512), False, 'import pygame\n'), ((1147, 1168), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (1159, 1168), False, 'import pygame\n'), ((1717, 1734), 'pygame.Color', 'pygame.Color', (['pie'], {}), '(pie)\n', (1729, 1734), False, 'import pygame\n')]
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from storageManager.jsonMode import *
class UpdateTable(Instruccion):
def __init__(self, id, tipo, lCol, insWhere, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.identificador = id
self.listaDeColumnas = lCol
self.insWhere = insWhere
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print("UPDATE_SET_TABLA")
if(self.identificador != None):
if(self.listaDeColumnas != None):
if(self.insWhere != None):
update(arbol.database())
'''
def update(database: str, table: str, register: dict, columns: list) -> int:
'''
'''
instruccion = UpdateTable("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
|
[
"Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__"
] |
[((208, 256), 'Instrucciones.TablaSimbolos.Instruccion.Instruccion.__init__', 'Instruccion.__init__', (['self', 'tipo', 'linea', 'columna'], {}), '(self, tipo, linea, columna)\n', (228, 256), False, 'from Instrucciones.TablaSimbolos.Instruccion import Instruccion\n')]
|
import math
import TextPrep as prep
"""
Gets cosine similarity of two documents
"""
def GetCosineSimilarity(doc1, doc2):
# wrapper function to clean documents and return cosine similarity
cleanDoc1 = prep.CleanDocument(doc1)
cleanDoc2 = prep.CleanDocument(doc2)
docVec1, docVec2 = __bagOfWords__(cleanDoc1, cleanDoc2)
return __cosineSimilarity__(docVec1, docVec2)
"""
Calculates cosine similarity of two L2 normed, equal length document vectors
"""
def __cosineSimilarity__(vec1, vec2):
# just a wrapper for naming purposes because in the case where vectors are L2 normed, cosine similarity is just
# their dot product, according to:
# https://stackoverflow.com/questions/51290969/is-there-any-reason-to-not-l2-normalize-vectors-before-using-cosine-similarity
return __dotProduct__(vec1, vec2)
"""
Uses vanilla Python to get the dot product of two vectors
"""
def __dotProduct__(vec1, vec2):
if len(vec1) != len(vec2):
raise Exception("Vectors must be equal length to take dot product")
return sum(i * j for i, j in zip(vec1, vec2))
"""
Uses Bag of Words to turn word vectors into numeric vectors. Note that we use L2 to normalize the vectors before
returning them
"""
def __bagOfWords__(doc1, doc2):
# unique words across both docs
voc = set(doc1).union(set(doc2))
# word count vectors
bagVector1 = __countWords__(doc1, voc)
bagVector2 = __countWords__(doc2, voc)
# normalize the vectors so that big differences in length between documents
normVector1 = __L2__(bagVector1)
normVector2 = __L2__(bagVector2)
return normVector1, normVector2
"""
Takes vector and returns an L2 normalized copy
"""
def __L2__(vec):
# Given a vector, the L2 norm of that vector is: sqrt(sum(vector**vector))
# to normalize the vector, we calculate: vector / L2norm
sqrt = math.sqrt(sum(vec))
return [i / sqrt for i in vec]
"""
Counting the number of times a word in a given corpus appears in a document
"""
def __countWords__(doc, voc):
count = dict.fromkeys(voc, 0)
for word in doc:
count[word] += 1
return count.values()
|
[
"TextPrep.CleanDocument"
] |
[((215, 239), 'TextPrep.CleanDocument', 'prep.CleanDocument', (['doc1'], {}), '(doc1)\n', (233, 239), True, 'import TextPrep as prep\n'), ((256, 280), 'TextPrep.CleanDocument', 'prep.CleanDocument', (['doc2'], {}), '(doc2)\n', (274, 280), True, 'import TextPrep as prep\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 22:33:41 2018
@author: Yulab
"""
import tensorflow as tf
import numpy as np
import math
#%%
def conv(layer_name, x, out_channels, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=True, seed=1):
'''Convolution op wrapper, use RELU activation after convolution
Args:
layer_name: e.g. conv1, pool1...
x: input tensor, [batch_size, height, width, channels]
out_channels: number of output channels (or comvolutional kernels)
kernel_size: the size of convolutional kernel, VGG paper used: [3,3]
stride: A list of ints. 1-D of length 4. VGG paper used: [1, 1, 1, 1]
is_pretrain: if load pretrained parameters, freeze all conv layers.
Depending on different situations, you can just set part of conv layers to be freezed.
the parameters of freezed layers will not change when training.
Returns:
4D tensor
'''
in_channels = x.get_shape()[-1]
with tf.variable_scope(layer_name):
w = tf.get_variable(name='weights',
trainable=is_pretrain,
shape=[kernel_size[0], kernel_size[1], in_channels, out_channels],
initializer=tf.contrib.layers.xavier_initializer(seed=seed)) # default is uniform distribution initialization
b = tf.get_variable(name='biases',
trainable=is_pretrain,
shape=[out_channels],
initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(x, w, stride, padding='SAME', name='conv')
x = tf.nn.bias_add(x, b, name='bias_add')
x = tf.nn.relu(x, name='relu')
return x
#%%
def pool(layer_name, x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True):
'''Pooling op
Args:
x: input tensor
kernel: pooling kernel, VGG paper used [1,2,2,1], the size of kernel is 2X2
stride: stride size, VGG paper used [1,2,2,1]
padding:
is_max_pool: boolen
if True: use max pooling
else: use avg pooling
'''
if is_max_pool:
x = tf.nn.max_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)
else:
x = tf.nn.avg_pool(x, kernel, strides=stride, padding='SAME', name=layer_name)
return x
#%%
def FC_layer(layer_name, x, out_nodes, keep_prob=0.5, seed=1):
'''Wrapper for fully connected layers with RELU activation as default
Args:
layer_name: e.g. 'FC1', 'FC2'
x: input feature map
out_nodes: number of neurons for current FC layer
'''
shape = x.get_shape()
if len(shape) == 4:
size = shape[1].value * shape[2].value * shape[3].value
else:
size = shape[-1].value
with tf.variable_scope(layer_name):
w = tf.get_variable('weights',
shape=[size, out_nodes],
initializer=tf.contrib.layers.xavier_initializer(seed=seed))
b = tf.get_variable('biases',
shape=[out_nodes],
initializer=tf.constant_initializer(0.0))
flat_x = tf.reshape(x, [-1, size]) # flatten into 1D
flat_x = tf.nn.dropout(flat_x, keep_prob, seed=seed)
x = tf.nn.bias_add(tf.matmul(flat_x, w), b)
x = tf.nn.relu(x)
return x
#%%
def loss(logits, labels):
'''Compute loss
Args:
logits: logits tensor, [batch_size, n_classes]
labels: one-hot labels
'''
with tf.name_scope('loss') as scope:
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels,name='cross-entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope+'/loss', loss)
return loss
#%%
def accuracy(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor,
"""
with tf.name_scope('accuracy') as scope:
correct = tf.equal(tf.arg_max(logits, 1), tf.argmax(labels, 1))
correct = tf.cast(correct, tf.float32)
accuracy = tf.reduce_mean(correct)*100.0
tf.summary.scalar(scope+'/accuracy', accuracy)
return accuracy
#%%
def num_correct_prediction(logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Return:
the number of correct predictions
"""
correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
correct = tf.cast(correct, tf.int32)
n_correct = tf.reduce_sum(correct)
return n_correct
#%%
def optimize(loss, learning_rate, global_step):
'''optimization, use Gradient Descent as default
'''
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
#%%
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
from coursera.org
"""
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation, :]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
|
[
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.reduce_sum",
"numpy.random.seed",
"tensorflow.constant_initializer",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.nn.conv2d",
"tensorflow.nn.relu",
"tensorflow.variable_scope",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.cast",
"tensorflow.name_scope",
"tensorflow.nn.bias_add",
"tensorflow.summary.scalar",
"tensorflow.reduce_mean",
"tensorflow.arg_max",
"tensorflow.nn.max_pool",
"numpy.random.permutation",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.argmax",
"math.floor",
"tensorflow.nn.avg_pool",
"tensorflow.nn.dropout"
] |
[((4569, 4595), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.int32'], {}), '(correct, tf.int32)\n', (4576, 4595), True, 'import tensorflow as tf\n'), ((4610, 4632), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['correct'], {}), '(correct)\n', (4623, 4632), True, 'import tensorflow as tf\n'), ((5753, 5773), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5767, 5773), True, 'import numpy as np\n'), ((6024, 6055), 'math.floor', 'math.floor', (['(m / mini_batch_size)'], {}), '(m / mini_batch_size)\n', (6034, 6055), False, 'import math\n'), ((989, 1018), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), '(layer_name)\n', (1006, 1018), True, 'import tensorflow as tf\n'), ((1574, 1629), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'w', 'stride'], {'padding': '"""SAME"""', 'name': '"""conv"""'}), "(x, w, stride, padding='SAME', name='conv')\n", (1586, 1629), True, 'import tensorflow as tf\n'), ((1642, 1679), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {'name': '"""bias_add"""'}), "(x, b, name='bias_add')\n", (1656, 1679), True, 'import tensorflow as tf\n'), ((1692, 1718), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': '"""relu"""'}), "(x, name='relu')\n", (1702, 1718), True, 'import tensorflow as tf\n'), ((2182, 2256), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', 'kernel'], {'strides': 'stride', 'padding': '"""SAME"""', 'name': 'layer_name'}), "(x, kernel, strides=stride, padding='SAME', name=layer_name)\n", (2196, 2256), True, 'import tensorflow as tf\n'), ((2279, 2353), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['x', 'kernel'], {'strides': 'stride', 'padding': '"""SAME"""', 'name': 'layer_name'}), "(x, kernel, strides=stride, padding='SAME', name=layer_name)\n", (2293, 2353), True, 'import tensorflow as tf\n'), ((2817, 2846), 'tensorflow.variable_scope', 'tf.variable_scope', (['layer_name'], {}), '(layer_name)\n', (2834, 2846), True, 'import tensorflow as tf\n'), ((3201, 3226), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, size]'], {}), '(x, [-1, size])\n', (3211, 3226), True, 'import tensorflow as tf\n'), ((3262, 3305), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['flat_x', 'keep_prob'], {'seed': 'seed'}), '(flat_x, keep_prob, seed=seed)\n', (3275, 3305), True, 'import tensorflow as tf\n'), ((3370, 3383), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3380, 3383), True, 'import tensorflow as tf\n'), ((3565, 3586), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (3578, 3586), True, 'import tensorflow as tf\n'), ((3621, 3719), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels', 'name': '"""cross-entropy"""'}), "(logits=logits, labels=labels,\n name='cross-entropy')\n", (3663, 3719), True, 'import tensorflow as tf\n'), ((3730, 3772), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {'name': '"""loss"""'}), "(cross_entropy, name='loss')\n", (3744, 3772), True, 'import tensorflow as tf\n'), ((3781, 3821), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + '/loss')", 'loss'], {}), "(scope + '/loss', loss)\n", (3798, 3821), True, 'import tensorflow as tf\n'), ((4051, 4076), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (4064, 4076), True, 'import tensorflow as tf\n'), ((4173, 4201), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (4180, 4201), True, 'import tensorflow as tf\n'), ((4255, 4303), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["(scope + '/accuracy')", 'accuracy'], {}), "(scope + '/accuracy', accuracy)\n", (4272, 4303), True, 'import tensorflow as tf\n'), ((4511, 4532), 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), '(logits, 1)\n', (4521, 4532), True, 'import tensorflow as tf\n'), ((4534, 4555), 'tensorflow.arg_max', 'tf.arg_max', (['labels', '(1)'], {}), '(labels, 1)\n', (4544, 4555), True, 'import tensorflow as tf\n'), ((4775, 4801), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (4788, 4801), True, 'import tensorflow as tf\n'), ((4823, 4885), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4856, 4885), True, 'import tensorflow as tf\n'), ((5826, 5850), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (5847, 5850), True, 'import numpy as np\n'), ((3333, 3353), 'tensorflow.matmul', 'tf.matmul', (['flat_x', 'w'], {}), '(flat_x, w)\n', (3342, 3353), True, 'import tensorflow as tf\n'), ((4112, 4133), 'tensorflow.arg_max', 'tf.arg_max', (['logits', '(1)'], {}), '(logits, 1)\n', (4122, 4133), True, 'import tensorflow as tf\n'), ((4135, 4155), 'tensorflow.argmax', 'tf.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (4144, 4155), True, 'import tensorflow as tf\n'), ((4219, 4242), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['correct'], {}), '(correct)\n', (4233, 4242), True, 'import tensorflow as tf\n'), ((1250, 1297), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (1286, 1297), True, 'import tensorflow as tf\n'), ((1532, 1560), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1555, 1560), True, 'import tensorflow as tf\n'), ((2980, 3027), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': 'seed'}), '(seed=seed)\n', (3016, 3027), True, 'import tensorflow as tf\n'), ((3154, 3182), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3177, 3182), True, 'import tensorflow as tf\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setup(
name='pysubmarine',
version='0.1',
scripts=['pysubmarine'],
description="A SDK for submarine",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/hadoopsubmarine/submarine",
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'six>=1.10.0',
'numpy',
'pandas',
'sqlalchemy',
'sqlparse',
],
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
)
|
[
"setuptools.find_packages"
] |
[((1173, 1216), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'tests.*']"}), "(exclude=['tests', 'tests.*'])\n", (1186, 1216), False, 'from setuptools import setup, find_packages\n')]
|
from hoomd_periodic import simulate
from md_nnps_periodic import MDNNPSSolverPeriodic
import numpy as np
import matplotlib.pyplot as plt
def run_simulations(num_particles, tf, dt):
# run hoomd simulation
simulate(num_particles, dt, tf, log=True)
# run compyle simulation
solver = MDNNPSSolverPeriodic(num_particles)
solver.solve(tf, dt, log_output=True)
solver.write_log('compyle-output.log')
def plot_props(hoomd_fname, comp_fname):
data_hoomd = np.genfromtxt(fname=hoomd_fname, skip_header=True)
data_compyle = np.genfromtxt(fname=comp_fname)
plt.plot(data_hoomd[:,0], data_hoomd[:,1], label="HooMD")
plt.plot(data_hoomd[:,0], data_compyle[:,1], label="Compyle")
plt.xlabel("Timestep")
plt.ylabel("Potential Energy")
plt.legend()
plt.savefig("hoomd_pe.png", dpi=300)
plt.clf()
plt.plot(data_hoomd[:,0], data_hoomd[:,2], label="HooMD")
plt.plot(data_hoomd[:,0], data_compyle[:,2], label="Compyle")
plt.xlabel("Timestep")
plt.ylabel("Kinetic Energy")
plt.legend()
plt.savefig("hoomd_ke.png", dpi=300)
if __name__ == '__main__':
run_simulations(2000, 200, 0.02)
plot_props('hoomd-output.log', 'compyle-output.log')
|
[
"hoomd_periodic.simulate",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"matplotlib.pyplot.ylabel",
"md_nnps_periodic.MDNNPSSolverPeriodic",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] |
[((213, 254), 'hoomd_periodic.simulate', 'simulate', (['num_particles', 'dt', 'tf'], {'log': '(True)'}), '(num_particles, dt, tf, log=True)\n', (221, 254), False, 'from hoomd_periodic import simulate\n'), ((298, 333), 'md_nnps_periodic.MDNNPSSolverPeriodic', 'MDNNPSSolverPeriodic', (['num_particles'], {}), '(num_particles)\n', (318, 333), False, 'from md_nnps_periodic import MDNNPSSolverPeriodic\n'), ((479, 529), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'hoomd_fname', 'skip_header': '(True)'}), '(fname=hoomd_fname, skip_header=True)\n', (492, 529), True, 'import numpy as np\n'), ((549, 580), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'comp_fname'}), '(fname=comp_fname)\n', (562, 580), True, 'import numpy as np\n'), ((587, 646), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_hoomd[:, 1]'], {'label': '"""HooMD"""'}), "(data_hoomd[:, 0], data_hoomd[:, 1], label='HooMD')\n", (595, 646), True, 'import matplotlib.pyplot as plt\n'), ((649, 712), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_compyle[:, 1]'], {'label': '"""Compyle"""'}), "(data_hoomd[:, 0], data_compyle[:, 1], label='Compyle')\n", (657, 712), True, 'import matplotlib.pyplot as plt\n'), ((715, 737), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timestep"""'], {}), "('Timestep')\n", (725, 737), True, 'import matplotlib.pyplot as plt\n'), ((742, 772), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Potential Energy"""'], {}), "('Potential Energy')\n", (752, 772), True, 'import matplotlib.pyplot as plt\n'), ((777, 789), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (787, 789), True, 'import matplotlib.pyplot as plt\n'), ((794, 830), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hoomd_pe.png"""'], {'dpi': '(300)'}), "('hoomd_pe.png', dpi=300)\n", (805, 830), True, 'import matplotlib.pyplot as plt\n'), ((836, 845), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (843, 845), True, 'import matplotlib.pyplot as plt\n'), ((851, 910), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_hoomd[:, 2]'], {'label': '"""HooMD"""'}), "(data_hoomd[:, 0], data_hoomd[:, 2], label='HooMD')\n", (859, 910), True, 'import matplotlib.pyplot as plt\n'), ((913, 976), 'matplotlib.pyplot.plot', 'plt.plot', (['data_hoomd[:, 0]', 'data_compyle[:, 2]'], {'label': '"""Compyle"""'}), "(data_hoomd[:, 0], data_compyle[:, 2], label='Compyle')\n", (921, 976), True, 'import matplotlib.pyplot as plt\n'), ((979, 1001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Timestep"""'], {}), "('Timestep')\n", (989, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Kinetic Energy"""'], {}), "('Kinetic Energy')\n", (1016, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1051), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1049, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1092), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hoomd_ke.png"""'], {'dpi': '(300)'}), "('hoomd_ke.png', dpi=300)\n", (1067, 1092), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generalized doubles factorization
This will go into OpenFermion. Putting here until I write something up
or decide to just publish the code.
"""
from typing import List, Tuple
from itertools import product, groupby
import numpy as np
import scipy as sp
from scipy.linalg import block_diag, sqrtm, polar, schur
import openfermion as of
from fqe.algorithm.brillouin_calculator import get_fermion_op
def doubles_factorization_svd(generator_tensor: np.ndarray, eig_cutoff=None):
"""
Given an antisymmetric antihermitian tensor perform a double factorized
low-rank decomposition.
Given:
A = sum_{pqrs}A^{pq}_{sr}p^ q^ r s
with A^{pq}_{sr} = -A^{qp}_{sr} = -A^{pq}_{rs} = -A^{sr}_{pq}
Rewrite A as a sum-of squares s.t
A = sum_{l}Y_{l}^2
where Y_{l} are normal operator one-body operators such that the spectral
theorem holds and we can use the double factorization to implement an
approximate evolution.
"""
if not np.allclose(generator_tensor.imag, 0):
raise TypeError("generator_tensor must be a real matrix")
if eig_cutoff is not None:
if eig_cutoff % 2 != 0:
raise ValueError("eig_cutoff must be an even number")
nso = generator_tensor.shape[0]
generator_tensor = generator_tensor.real
generator_mat = np.zeros((nso**2, nso**2))
for row_gem, col_gem in product(range(nso**2), repeat=2):
p, s = row_gem // nso, row_gem % nso
q, r = col_gem // nso, col_gem % nso
generator_mat[row_gem, col_gem] = generator_tensor[p, q, r, s]
test_generator_mat = np.reshape(
np.transpose(generator_tensor, [0, 3, 1, 2]),
(nso**2, nso**2)).astype(np.float)
assert np.allclose(test_generator_mat, generator_mat)
if not np.allclose(generator_mat, generator_mat.T):
raise ValueError("generator tensor does not correspond to four-fold"
" antisymmetry")
one_body_residual = -np.einsum('pqrq->pr', generator_tensor)
u, sigma, vh = np.linalg.svd(generator_mat)
ul = []
ul_ops = []
vl = []
vl_ops = []
if eig_cutoff is None:
max_sigma = len(sigma)
else:
max_sigma = eig_cutoff
for ll in range(max_sigma):
ul.append(np.sqrt(sigma[ll]) * u[:, ll].reshape((nso, nso)))
ul_ops.append(
get_fermion_op(np.sqrt(sigma[ll]) * u[:, ll].reshape((nso, nso))))
vl.append(np.sqrt(sigma[ll]) * vh[ll, :].reshape((nso, nso)))
vl_ops.append(
get_fermion_op(np.sqrt(sigma[ll]) * vh[ll, :].reshape((nso, nso))))
S = ul_ops[ll] + vl_ops[ll]
D = ul_ops[ll] - vl_ops[ll]
op1 = S + 1j * of.hermitian_conjugated(S)
op2 = S - 1j * of.hermitian_conjugated(S)
op3 = D + 1j * of.hermitian_conjugated(D)
op4 = D - 1j * of.hermitian_conjugated(D)
assert np.isclose(
of.normal_ordered(of.commutator(
op1, of.hermitian_conjugated(op1))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op2, of.hermitian_conjugated(op2))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op3, of.hermitian_conjugated(op3))).induced_norm(), 0)
assert np.isclose(
of.normal_ordered(of.commutator(
op4, of.hermitian_conjugated(op4))).induced_norm(), 0)
one_body_op = of.FermionOperator()
for p, q in product(range(nso), repeat=2):
tfop = ((p, 1), (q, 0))
one_body_op += of.FermionOperator(tfop,
coefficient=one_body_residual[p, q])
return ul, vl, one_body_residual, ul_ops, vl_ops, one_body_op
def takagi(N, tol=1e-13, rounding=13):
r"""Autonne-Takagi decomposition of a complex symmetric (not Hermitian!) matrix.
Note that singular values of N are considered equal if they are equal after np.round(values, tol).
Taken from Strawberry Fields
[https://github.com/XanaduAI/strawberryfields/blob/master/strawberryfields/decompositions.py#L28]
Args:
N (array[complex]): square, symmetric matrix N
rounding (int): the number of decimal places to use when rounding the singular values of N
tol (float): the tolerance used when checking if the input matrix is symmetric: :math:`|N-N^T| <` tol
Returns:
tuple[array, array]: (rl, U), where rl are the (rounded) singular values,
and U is the Takagi unitary, such that :math:`N = U \diag(rl) U^T`.
"""
(n, m) = N.shape
if n != m:
raise ValueError("The input matrix must be square")
if np.linalg.norm(N - np.transpose(N)) >= tol:
raise ValueError("The input matrix is not symmetric")
N = np.real_if_close(N)
if np.allclose(N, 0):
return np.zeros(n), np.eye(n)
if np.isrealobj(N):
# If the matrix N is real one can be more clever and use its eigendecomposition
l, U = np.linalg.eigh(N)
vals = np.abs(l) # These are the Takagi eigenvalues
phases = np.sqrt(np.complex128([1 if i > 0 else -1 for i in l]))
Uc = U @ np.diag(phases) # One needs to readjust the phases
list_vals = [(vals[i], i) for i in range(len(vals))]
list_vals.sort(reverse=True)
sorted_l, permutation = zip(*list_vals)
permutation = np.array(permutation)
Uc = Uc[:, permutation]
# And also rearrange the unitary and values so that they are decreasingly ordered
return np.array(sorted_l), Uc
v, l, ws = np.linalg.svd(N)
w = np.transpose(np.conjugate(ws))
rl = np.round(l, rounding)
# Generate list with degenerancies
result = []
for k, g in groupby(rl):
result.append(list(g))
# Generate lists containing the columns that correspond to degenerancies
kk = 0
for k in result:
for ind, j in enumerate(k): # pylint: disable=unused-variable
k[ind] = kk
kk = kk + 1
# Generate the lists with the degenerate column subspaces
vas = []
was = []
for i in result:
vas.append(v[:, i])
was.append(w[:, i])
# Generate the matrices qs of the degenerate subspaces
qs = []
for i in range(len(result)):
qs.append(sqrtm(np.transpose(vas[i]) @ was[i]))
# Construct the Takagi unitary
qb = block_diag(*qs)
U = v @ np.conj(qb)
return rl, U
def doubles_factorization_takagi(generator_tensor: np.ndarray, eig_cutoff=None):
"""
Given an antisymmetric antihermitian tensor perform a double factorized
low-rank decomposition. This uses the Takagi decomposition of a complex
symmetric matrix. This reduces the number of tensor from 4 to 2 when
compared against the SVD appraoch.
Given:
A = sum_{pqrs}A^{pq}_{sr}p^ q^ r s
with A^{pq}_{sr} = -A^{qp}_{sr} = -A^{pq}_{rs} = -A^{sr}_{pq}
Rewrite A as a sum-of squares s.t
A = sum_{l}Y_{l}^2
where Y_{l} are normal operator one-body operators such that the spectral
theorem holds and we can use the double factorization to implement an
approximate evolution.
"""
if eig_cutoff is not None:
if eig_cutoff % 2 != 0:
raise ValueError("eig_cutoff must be an even number")
nso = generator_tensor.shape[0]
generator_mat = np.reshape(np.transpose(generator_tensor, [0, 3, 1, 2]),
(nso**2, nso**2))
assert np.allclose(generator_mat, generator_mat.T)
one_body_residual = -np.einsum('pqrq->pr', generator_tensor)
# complex symmetric matrices give Q S Q^T with S diagonal and real
# and Q is unitary.
T, Z = takagi(generator_mat)
nonzero_idx = np.where(T > 1.0E-12)[0]
if eig_cutoff is None:
max_sigma = len(nonzero_idx)
else:
max_sigma = eig_cutoff
Zl = []
Zlp = []
Zlm = []
for idx in nonzero_idx[:max_sigma]:
Zl.append(np.sqrt(T[idx]) * Z[:, idx].reshape((nso, nso)))
Zlp.append(Zl[-1] + 1j * Zl[-1].conj().T)
Zlm.append(Zl[-1] - 1j * Zl[-1].conj().T)
return Zlp, Zlm, Zl, one_body_residual
|
[
"numpy.abs",
"numpy.allclose",
"numpy.einsum",
"numpy.linalg.svd",
"numpy.diag",
"numpy.conjugate",
"numpy.round",
"openfermion.hermitian_conjugated",
"numpy.transpose",
"numpy.isrealobj",
"openfermion.FermionOperator",
"numpy.conj",
"numpy.complex128",
"scipy.linalg.block_diag",
"numpy.real_if_close",
"itertools.groupby",
"numpy.zeros",
"numpy.linalg.eigh",
"numpy.where",
"numpy.array",
"numpy.eye",
"numpy.sqrt"
] |
[((1906, 1936), 'numpy.zeros', 'np.zeros', (['(nso ** 2, nso ** 2)'], {}), '((nso ** 2, nso ** 2))\n', (1914, 1936), True, 'import numpy as np\n'), ((2302, 2348), 'numpy.allclose', 'np.allclose', (['test_generator_mat', 'generator_mat'], {}), '(test_generator_mat, generator_mat)\n', (2313, 2348), True, 'import numpy as np\n'), ((2610, 2638), 'numpy.linalg.svd', 'np.linalg.svd', (['generator_mat'], {}), '(generator_mat)\n', (2623, 2638), True, 'import numpy as np\n'), ((4035, 4055), 'openfermion.FermionOperator', 'of.FermionOperator', ([], {}), '()\n', (4053, 4055), True, 'import openfermion as of\n'), ((5372, 5391), 'numpy.real_if_close', 'np.real_if_close', (['N'], {}), '(N)\n', (5388, 5391), True, 'import numpy as np\n'), ((5400, 5417), 'numpy.allclose', 'np.allclose', (['N', '(0)'], {}), '(N, 0)\n', (5411, 5417), True, 'import numpy as np\n'), ((5465, 5480), 'numpy.isrealobj', 'np.isrealobj', (['N'], {}), '(N)\n', (5477, 5480), True, 'import numpy as np\n'), ((6172, 6188), 'numpy.linalg.svd', 'np.linalg.svd', (['N'], {}), '(N)\n', (6185, 6188), True, 'import numpy as np\n'), ((6237, 6258), 'numpy.round', 'np.round', (['l', 'rounding'], {}), '(l, rounding)\n', (6245, 6258), True, 'import numpy as np\n'), ((6331, 6342), 'itertools.groupby', 'groupby', (['rl'], {}), '(rl)\n', (6338, 6342), False, 'from itertools import product, groupby\n'), ((6976, 6991), 'scipy.linalg.block_diag', 'block_diag', (['*qs'], {}), '(*qs)\n', (6986, 6991), False, 'from scipy.linalg import block_diag, sqrtm, polar, schur\n'), ((8064, 8107), 'numpy.allclose', 'np.allclose', (['generator_mat', 'generator_mat.T'], {}), '(generator_mat, generator_mat.T)\n', (8075, 8107), True, 'import numpy as np\n'), ((1569, 1606), 'numpy.allclose', 'np.allclose', (['generator_tensor.imag', '(0)'], {}), '(generator_tensor.imag, 0)\n', (1580, 1606), True, 'import numpy as np\n'), ((2361, 2404), 'numpy.allclose', 'np.allclose', (['generator_mat', 'generator_mat.T'], {}), '(generator_mat, generator_mat.T)\n', (2372, 2404), True, 'import numpy as np\n'), ((2551, 2590), 'numpy.einsum', 'np.einsum', (['"""pqrq->pr"""', 'generator_tensor'], {}), "('pqrq->pr', generator_tensor)\n", (2560, 2590), True, 'import numpy as np\n'), ((4158, 4219), 'openfermion.FermionOperator', 'of.FermionOperator', (['tfop'], {'coefficient': 'one_body_residual[p, q]'}), '(tfop, coefficient=one_body_residual[p, q])\n', (4176, 4219), True, 'import openfermion as of\n'), ((5585, 5602), 'numpy.linalg.eigh', 'np.linalg.eigh', (['N'], {}), '(N)\n', (5599, 5602), True, 'import numpy as np\n'), ((5618, 5627), 'numpy.abs', 'np.abs', (['l'], {}), '(l)\n', (5624, 5627), True, 'import numpy as np\n'), ((5974, 5995), 'numpy.array', 'np.array', (['permutation'], {}), '(permutation)\n', (5982, 5995), True, 'import numpy as np\n'), ((6210, 6226), 'numpy.conjugate', 'np.conjugate', (['ws'], {}), '(ws)\n', (6222, 6226), True, 'import numpy as np\n'), ((7005, 7016), 'numpy.conj', 'np.conj', (['qb'], {}), '(qb)\n', (7012, 7016), True, 'import numpy as np\n'), ((7958, 8002), 'numpy.transpose', 'np.transpose', (['generator_tensor', '[0, 3, 1, 2]'], {}), '(generator_tensor, [0, 3, 1, 2])\n', (7970, 8002), True, 'import numpy as np\n'), ((8134, 8173), 'numpy.einsum', 'np.einsum', (['"""pqrq->pr"""', 'generator_tensor'], {}), "('pqrq->pr', generator_tensor)\n", (8143, 8173), True, 'import numpy as np\n'), ((8322, 8341), 'numpy.where', 'np.where', (['(T > 1e-12)'], {}), '(T > 1e-12)\n', (8330, 8341), True, 'import numpy as np\n'), ((5434, 5445), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (5442, 5445), True, 'import numpy as np\n'), ((5447, 5456), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (5453, 5456), True, 'import numpy as np\n'), ((5689, 5737), 'numpy.complex128', 'np.complex128', (['[(1 if i > 0 else -1) for i in l]'], {}), '([(1 if i > 0 else -1) for i in l])\n', (5702, 5737), True, 'import numpy as np\n'), ((5754, 5769), 'numpy.diag', 'np.diag', (['phases'], {}), '(phases)\n', (5761, 5769), True, 'import numpy as np\n'), ((6133, 6151), 'numpy.array', 'np.array', (['sorted_l'], {}), '(sorted_l)\n', (6141, 6151), True, 'import numpy as np\n'), ((2201, 2245), 'numpy.transpose', 'np.transpose', (['generator_tensor', '[0, 3, 1, 2]'], {}), '(generator_tensor, [0, 3, 1, 2])\n', (2213, 2245), True, 'import numpy as np\n'), ((2846, 2864), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (2853, 2864), True, 'import numpy as np\n'), ((3017, 3035), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (3024, 3035), True, 'import numpy as np\n'), ((3267, 3293), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['S'], {}), '(S)\n', (3290, 3293), True, 'import openfermion as of\n'), ((3317, 3343), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['S'], {}), '(S)\n', (3340, 3343), True, 'import openfermion as of\n'), ((3367, 3393), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['D'], {}), '(D)\n', (3390, 3393), True, 'import openfermion as of\n'), ((3417, 3443), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['D'], {}), '(D)\n', (3440, 3443), True, 'import openfermion as of\n'), ((5276, 5291), 'numpy.transpose', 'np.transpose', (['N'], {}), '(N)\n', (5288, 5291), True, 'import numpy as np\n'), ((8549, 8564), 'numpy.sqrt', 'np.sqrt', (['T[idx]'], {}), '(T[idx])\n', (8556, 8564), True, 'import numpy as np\n'), ((2947, 2965), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (2954, 2965), True, 'import numpy as np\n'), ((3119, 3137), 'numpy.sqrt', 'np.sqrt', (['sigma[ll]'], {}), '(sigma[ll])\n', (3126, 3137), True, 'import numpy as np\n'), ((6899, 6919), 'numpy.transpose', 'np.transpose', (['vas[i]'], {}), '(vas[i])\n', (6911, 6919), True, 'import numpy as np\n'), ((3537, 3565), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op1'], {}), '(op1)\n', (3560, 3565), True, 'import openfermion as of\n'), ((3680, 3708), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op2'], {}), '(op2)\n', (3703, 3708), True, 'import openfermion as of\n'), ((3823, 3851), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op3'], {}), '(op3)\n', (3846, 3851), True, 'import openfermion as of\n'), ((3966, 3994), 'openfermion.hermitian_conjugated', 'of.hermitian_conjugated', (['op4'], {}), '(op4)\n', (3989, 3994), True, 'import openfermion as of\n')]
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class BillingPlanPreview(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency_code': 'str',
'invoice': 'BillingInvoice',
'is_prorated': 'str',
'subtotal_amount': 'str',
'tax_amount': 'str',
'total_amount': 'str'
}
attribute_map = {
'currency_code': 'currencyCode',
'invoice': 'invoice',
'is_prorated': 'isProrated',
'subtotal_amount': 'subtotalAmount',
'tax_amount': 'taxAmount',
'total_amount': 'totalAmount'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""BillingPlanPreview - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._currency_code = None
self._invoice = None
self._is_prorated = None
self._subtotal_amount = None
self._tax_amount = None
self._total_amount = None
self.discriminator = None
setattr(self, "_{}".format('currency_code'), kwargs.get('currency_code', None))
setattr(self, "_{}".format('invoice'), kwargs.get('invoice', None))
setattr(self, "_{}".format('is_prorated'), kwargs.get('is_prorated', None))
setattr(self, "_{}".format('subtotal_amount'), kwargs.get('subtotal_amount', None))
setattr(self, "_{}".format('tax_amount'), kwargs.get('tax_amount', None))
setattr(self, "_{}".format('total_amount'), kwargs.get('total_amount', None))
@property
def currency_code(self):
"""Gets the currency_code of this BillingPlanPreview. # noqa: E501
Specifies the ISO currency code for the account. # noqa: E501
:return: The currency_code of this BillingPlanPreview. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this BillingPlanPreview.
Specifies the ISO currency code for the account. # noqa: E501
:param currency_code: The currency_code of this BillingPlanPreview. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def invoice(self):
"""Gets the invoice of this BillingPlanPreview. # noqa: E501
:return: The invoice of this BillingPlanPreview. # noqa: E501
:rtype: BillingInvoice
"""
return self._invoice
@invoice.setter
def invoice(self, invoice):
"""Sets the invoice of this BillingPlanPreview.
:param invoice: The invoice of this BillingPlanPreview. # noqa: E501
:type: BillingInvoice
"""
self._invoice = invoice
@property
def is_prorated(self):
"""Gets the is_prorated of this BillingPlanPreview. # noqa: E501
# noqa: E501
:return: The is_prorated of this BillingPlanPreview. # noqa: E501
:rtype: str
"""
return self._is_prorated
@is_prorated.setter
def is_prorated(self, is_prorated):
"""Sets the is_prorated of this BillingPlanPreview.
# noqa: E501
:param is_prorated: The is_prorated of this BillingPlanPreview. # noqa: E501
:type: str
"""
self._is_prorated = is_prorated
@property
def subtotal_amount(self):
"""Gets the subtotal_amount of this BillingPlanPreview. # noqa: E501
# noqa: E501
:return: The subtotal_amount of this BillingPlanPreview. # noqa: E501
:rtype: str
"""
return self._subtotal_amount
@subtotal_amount.setter
def subtotal_amount(self, subtotal_amount):
"""Sets the subtotal_amount of this BillingPlanPreview.
# noqa: E501
:param subtotal_amount: The subtotal_amount of this BillingPlanPreview. # noqa: E501
:type: str
"""
self._subtotal_amount = subtotal_amount
@property
def tax_amount(self):
"""Gets the tax_amount of this BillingPlanPreview. # noqa: E501
# noqa: E501
:return: The tax_amount of this BillingPlanPreview. # noqa: E501
:rtype: str
"""
return self._tax_amount
@tax_amount.setter
def tax_amount(self, tax_amount):
"""Sets the tax_amount of this BillingPlanPreview.
# noqa: E501
:param tax_amount: The tax_amount of this BillingPlanPreview. # noqa: E501
:type: str
"""
self._tax_amount = tax_amount
@property
def total_amount(self):
"""Gets the total_amount of this BillingPlanPreview. # noqa: E501
# noqa: E501
:return: The total_amount of this BillingPlanPreview. # noqa: E501
:rtype: str
"""
return self._total_amount
@total_amount.setter
def total_amount(self, total_amount):
"""Sets the total_amount of this BillingPlanPreview.
# noqa: E501
:param total_amount: The total_amount of this BillingPlanPreview. # noqa: E501
:type: str
"""
self._total_amount = total_amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BillingPlanPreview, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BillingPlanPreview):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BillingPlanPreview):
return True
return self.to_dict() != other.to_dict()
|
[
"six.iteritems",
"docusign_esign.client.configuration.Configuration"
] |
[((6100, 6133), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (6113, 6133), False, 'import six\n'), ((1544, 1559), 'docusign_esign.client.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1557, 1559), False, 'from docusign_esign.client.configuration import Configuration\n')]
|